Fix compiler warnings. (#7974)

- Remove unused parameters. There are still many warnings that are not yet
addressed. Currently, the warnings in dmlc-core dominate the error log.
- Remove `distributed` parameter from metric.
- Fixes some warnings about signed comparison.
This commit is contained in:
Jiaming Yuan
2022-06-06 22:56:25 +08:00
committed by GitHub
parent d48123d23b
commit 1a33b50a0d
46 changed files with 149 additions and 189 deletions

View File

@@ -67,7 +67,7 @@ TEST(SegmentedUnique, Basic) {
CHECK_EQ(n_uniques, 5);
std::vector<float> values_sol{0.1f, 0.2f, 0.3f, 0.62448811531066895f, 0.4f};
for (auto i = 0 ; i < values_sol.size(); i ++) {
for (size_t i = 0 ; i < values_sol.size(); i ++) {
ASSERT_EQ(d_vals_out[i], values_sol[i]);
}
@@ -84,7 +84,7 @@ TEST(SegmentedUnique, Basic) {
d_segs_out.data().get(), d_vals_out.data().get(),
thrust::equal_to<float>{});
ASSERT_EQ(n_uniques, values.size());
for (auto i = 0 ; i < values.size(); i ++) {
for (size_t i = 0 ; i < values.size(); i ++) {
ASSERT_EQ(d_vals_out[i], values[i]);
}
}

View File

@@ -315,10 +315,10 @@ TEST(Linalg, Popc) {
TEST(Linalg, Stack) {
Tensor<float, 3> l{{2, 3, 4}, kCpuId};
ElementWiseTransformHost(l.View(kCpuId), omp_get_max_threads(),
[=](size_t i, float v) { return i; });
[=](size_t i, float) { return i; });
Tensor<float, 3> r_0{{2, 3, 4}, kCpuId};
ElementWiseTransformHost(r_0.View(kCpuId), omp_get_max_threads(),
[=](size_t i, float v) { return i; });
[=](size_t i, float) { return i; });
Stack(&l, r_0);

View File

@@ -50,8 +50,8 @@ TEST(PartitionBuilder, BasicTest) {
right[i] = left_total + value_right++;
}
builder.SetNLeftElems(nid, begin, end, n_left);
builder.SetNRightElems(nid, begin, end, n_right);
builder.SetNLeftElems(nid, begin, n_left);
builder.SetNRightElems(nid, begin, n_right);
}
}
builder.CalculateRowOffsets();

View File

@@ -77,7 +77,7 @@ void TestDistributedQuantile(size_t rows, size_t cols) {
std::vector<float> hessian(rows, 1.0);
auto hess = Span<float const>{hessian};
ContainerType<use_column> sketch_distributed(n_bins, m->Info(), column_size, false, hess,
ContainerType<use_column> sketch_distributed(n_bins, m->Info(), column_size, false,
OmpGetNumThreads(0));
if (use_column) {
@@ -98,7 +98,7 @@ void TestDistributedQuantile(size_t rows, size_t cols) {
CHECK_EQ(rabit::GetWorldSize(), 1);
std::for_each(column_size.begin(), column_size.end(), [=](auto& size) { size *= world; });
m->Info().num_row_ = world * rows;
ContainerType<use_column> sketch_on_single_node(n_bins, m->Info(), column_size, false, hess,
ContainerType<use_column> sketch_on_single_node(n_bins, m->Info(), column_size, false,
OmpGetNumThreads(0));
m->Info().num_row_ = rows;
@@ -190,7 +190,7 @@ TEST(Quantile, SameOnAllWorkers) {
constexpr size_t kRows = 1000, kCols = 100;
RunWithSeedsAndBins(
kRows, [=](int32_t seed, size_t n_bins, MetaInfo const &info) {
kRows, [=](int32_t seed, size_t n_bins, MetaInfo const&) {
auto rank = rabit::GetRank();
HostDeviceVector<float> storage;
std::vector<FeatureType> ft(kCols);

View File

@@ -36,7 +36,7 @@ struct TestTestStatus {
XGBOOST_DEVICE void operator()() {
this->operator()(0);
}
XGBOOST_DEVICE void operator()(int _idx) {
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index
SPAN_ASSERT_TRUE(false, status_);
}
};
@@ -49,7 +49,7 @@ struct TestAssignment {
XGBOOST_DEVICE void operator()() {
this->operator()(0);
}
XGBOOST_DEVICE void operator()(int _idx) {
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index
Span<float> s1;
float arr[] = {3, 4, 5};
@@ -71,7 +71,7 @@ struct TestBeginEnd {
XGBOOST_DEVICE void operator()() {
this->operator()(0);
}
XGBOOST_DEVICE void operator()(int _idx) {
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index
float arr[16];
InitializeRange(arr, arr + 16);
@@ -93,7 +93,7 @@ struct TestRBeginREnd {
XGBOOST_DEVICE void operator()() {
this->operator()(0);
}
XGBOOST_DEVICE void operator()(int _idx) {
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index
float arr[16];
InitializeRange(arr, arr + 16);
@@ -121,7 +121,7 @@ struct TestObservers {
XGBOOST_DEVICE void operator()() {
this->operator()(0);
}
XGBOOST_DEVICE void operator()(int _idx) {
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index
// empty
{
float *arr = nullptr;
@@ -148,7 +148,7 @@ struct TestCompare {
XGBOOST_DEVICE void operator()() {
this->operator()(0);
}
XGBOOST_DEVICE void operator()(int _idx) {
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index
float lhs_arr[16], rhs_arr[16];
InitializeRange(lhs_arr, lhs_arr + 16);
InitializeRange(rhs_arr, rhs_arr + 16);
@@ -178,7 +178,7 @@ struct TestIterConstruct {
XGBOOST_DEVICE void operator()() {
this->operator()(0);
}
XGBOOST_DEVICE void operator()(int _idx) {
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index.
Span<float>::iterator it1;
Span<float>::iterator it2;
SPAN_ASSERT_TRUE(it1 == it2, status_);
@@ -197,7 +197,7 @@ struct TestIterRef {
XGBOOST_DEVICE void operator()() {
this->operator()(0);
}
XGBOOST_DEVICE void operator()(int _idx) {
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index
float arr[16];
InitializeRange(arr, arr + 16);
@@ -215,7 +215,7 @@ struct TestIterCalculate {
XGBOOST_DEVICE void operator()() {
this->operator()(0);
}
XGBOOST_DEVICE void operator()(int _idx) {
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index
float arr[16];
InitializeRange(arr, arr + 16);
@@ -278,7 +278,7 @@ struct TestAsBytes {
XGBOOST_DEVICE void operator()() {
this->operator()(0);
}
XGBOOST_DEVICE void operator()(int _idx) {
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index
float arr[16];
InitializeRange(arr, arr + 16);
@@ -313,7 +313,7 @@ struct TestAsWritableBytes {
XGBOOST_DEVICE void operator()() {
this->operator()(0);
}
XGBOOST_DEVICE void operator()(int _idx) {
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index
float arr[16];
InitializeRange(arr, arr + 16);

View File

@@ -34,9 +34,8 @@ TEST(ParallelFor2d, Test) {
// working space is matrix of size (kDim1 x kDim2)
std::vector<int> matrix(kDim1 * kDim2, 0);
BlockedSpace2d space(kDim1, [&](size_t i) {
return kDim2;
}, kGrainSize);
BlockedSpace2d space(
kDim1, [&](size_t) { return kDim2; }, kGrainSize);
auto old = omp_get_max_threads();
omp_set_num_threads(4);