Cleanup warnings. (#5247)

From clang-tidy-9 and gcc-7: Invalid case style, narrowing definition, wrong
initialization order, unused variables.
This commit is contained in:
Jiaming Yuan
2020-01-31 14:52:15 +08:00
committed by GitHub
parent adc795929a
commit fe8d72b50b
8 changed files with 260 additions and 262 deletions

View File

@@ -46,7 +46,6 @@ TEST(ParallelGHistBuilder, Reset) {
hist_builder.Reset(nthreads, kNodes, space, target_hist);
common::ParallelFor2d(space, nthreads, [&](size_t inode, common::Range1d r) {
const size_t itask = r.begin();
const size_t tid = omp_get_thread_num();
GHistRow hist = hist_builder.GetInitializedHist(tid, inode);
@@ -65,7 +64,6 @@ TEST(ParallelGHistBuilder, Reset) {
hist_builder.Reset(nthreads, kNodesExtended, space2, target_hist);
common::ParallelFor2d(space2, nthreads, [&](size_t inode, common::Range1d r) {
const size_t itask = r.begin();
const size_t tid = omp_get_thread_num();
GHistRow hist = hist_builder.GetInitializedHist(tid, inode);
@@ -80,7 +78,6 @@ TEST(ParallelGHistBuilder, Reset) {
TEST(ParallelGHistBuilder, ReduceHist) {
constexpr size_t kBins = 10;
constexpr size_t kNodes = 5;
constexpr size_t kNodesExtended = 10;
constexpr size_t kTasksPerNode = 10;
constexpr double kValue = 1.0;
const size_t nthreads = GetNThreads();
@@ -104,7 +101,6 @@ TEST(ParallelGHistBuilder, ReduceHist) {
// Simple analog of BuildHist function, works in parallel for both tree-nodes and data in node
common::ParallelFor2d(space, nthreads, [&](size_t inode, common::Range1d r) {
const size_t itask = r.begin();
const size_t tid = omp_get_thread_num();
GHistRow hist = hist_builder.GetInitializedHist(tid, inode);

View File

@@ -1,82 +1,83 @@
#include <gtest/gtest.h>
#include "../../../src/common/column_matrix.h"
#include "../../../src/common/threading_utils.h"
namespace xgboost {
namespace common {
TEST(CreateBlockedSpace2d, Test) {
constexpr size_t kDim1 = 5;
constexpr size_t kDim2 = 3;
constexpr size_t kGrainSize = 1;
BlockedSpace2d space(kDim1, [&](size_t i) {
return kDim2;
}, kGrainSize);
ASSERT_EQ(kDim1 * kDim2, space.Size());
for (auto i = 0; i < kDim1; i++) {
for (auto j = 0; j < kDim2; j++) {
ASSERT_EQ(space.GetFirstDimension(i*kDim2 + j), i);
ASSERT_EQ(j, space.GetRange(i*kDim2 + j).begin());
ASSERT_EQ(j + kGrainSize, space.GetRange(i*kDim2 + j).end());
}
}
}
TEST(ParallelFor2d, Test) {
constexpr size_t kDim1 = 100;
constexpr size_t kDim2 = 15;
constexpr size_t kGrainSize = 2;
// working space is matrix of size (kDim1 x kDim2)
std::vector<int> matrix(kDim1 * kDim2, 0);
BlockedSpace2d space(kDim1, [&](size_t i) {
return kDim2;
}, kGrainSize);
ParallelFor2d(space, 4, [&](size_t i, Range1d r) {
for (auto j = r.begin(); j < r.end(); ++j) {
matrix[i*kDim2 + j] += 1;
}
});
for (auto i = 0; i < kDim1 * kDim2; i++) {
ASSERT_EQ(matrix[i], 1);
}
}
TEST(ParallelFor2dNonUniform, Test) {
constexpr size_t kDim1 = 5;
constexpr size_t kGrainSize = 256;
// here are quite non-uniform distribution in space
// but ParallelFor2d should split them by blocks with max size = kGrainSize
// and process in balanced manner (optimal performance)
std::vector<size_t> dim2 { 1024, 500, 255, 5, 10000 };
BlockedSpace2d space(kDim1, [&](size_t i) {
return dim2[i];
}, kGrainSize);
std::vector<std::vector<int>> working_space(kDim1);
for (auto i = 0; i < kDim1; i++) {
working_space[i].resize(dim2[i], 0);
}
ParallelFor2d(space, 4, [&](size_t i, Range1d r) {
for (auto j = r.begin(); j < r.end(); ++j) {
working_space[i][j] += 1;
}
});
for (auto i = 0; i < kDim1; i++) {
for (auto j = 0; j < dim2[i]; j++) {
ASSERT_EQ(working_space[i][j], 1);
}
}
}
} // namespace common
} // namespace xgboost
#include <cstddef>
#include <gtest/gtest.h>
#include "../../../src/common/column_matrix.h"
#include "../../../src/common/threading_utils.h"
namespace xgboost {
namespace common {
TEST(CreateBlockedSpace2d, Test) {
constexpr size_t kDim1 = 5;
constexpr size_t kDim2 = 3;
constexpr size_t kGrainSize = 1;
BlockedSpace2d space(kDim1, [&](size_t i) {
return kDim2;
}, kGrainSize);
ASSERT_EQ(kDim1 * kDim2, space.Size());
for (size_t i = 0; i < kDim1; i++) {
for (size_t j = 0; j < kDim2; j++) {
ASSERT_EQ(space.GetFirstDimension(i*kDim2 + j), i);
ASSERT_EQ(j, space.GetRange(i*kDim2 + j).begin());
ASSERT_EQ(j + kGrainSize, space.GetRange(i*kDim2 + j).end());
}
}
}
TEST(ParallelFor2d, Test) {
constexpr size_t kDim1 = 100;
constexpr size_t kDim2 = 15;
constexpr size_t kGrainSize = 2;
// working space is matrix of size (kDim1 x kDim2)
std::vector<int> matrix(kDim1 * kDim2, 0);
BlockedSpace2d space(kDim1, [&](size_t i) {
return kDim2;
}, kGrainSize);
ParallelFor2d(space, 4, [&](size_t i, Range1d r) {
for (auto j = r.begin(); j < r.end(); ++j) {
matrix[i*kDim2 + j] += 1;
}
});
for (size_t i = 0; i < kDim1 * kDim2; i++) {
ASSERT_EQ(matrix[i], 1);
}
}
TEST(ParallelFor2dNonUniform, Test) {
constexpr size_t kDim1 = 5;
constexpr size_t kGrainSize = 256;
// here are quite non-uniform distribution in space
// but ParallelFor2d should split them by blocks with max size = kGrainSize
// and process in balanced manner (optimal performance)
std::vector<size_t> dim2 { 1024, 500, 255, 5, 10000 };
BlockedSpace2d space(kDim1, [&](size_t i) {
return dim2[i];
}, kGrainSize);
std::vector<std::vector<int>> working_space(kDim1);
for (size_t i = 0; i < kDim1; i++) {
working_space[i].resize(dim2[i], 0);
}
ParallelFor2d(space, 4, [&](size_t i, Range1d r) {
for (auto j = r.begin(); j < r.end(); ++j) {
working_space[i][j] += 1;
}
});
for (size_t i = 0; i < kDim1; i++) {
for (size_t j = 0; j < dim2[i]; j++) {
ASSERT_EQ(working_space[i][j], 1);
}
}
}
} // namespace common
} // namespace xgboost

View File

@@ -7,7 +7,6 @@
#include "../helpers.h"
using namespace xgboost; // NOLINT
TEST(adapter, CSRAdapter) {
int m = 3;
int n = 2;
std::vector<float> data = {1, 2, 3, 4, 5};
std::vector<unsigned> feature_idx = {0, 1, 0, 1, 1};

View File

@@ -83,7 +83,7 @@ TEST(SimpleDMatrix, Empty) {
CHECK_EQ(batch.Size(), 0);
}
data::DenseAdapter dense_adapter(nullptr, 0, 0, 0);
data::DenseAdapter dense_adapter(nullptr, 0, 0);
dmat = data::SimpleDMatrix(&dense_adapter,
std::numeric_limits<float>::quiet_NaN(), 1);
CHECK_EQ(dmat.Info().num_nonzero_, 0);
@@ -136,7 +136,7 @@ TEST(SimpleDMatrix, FromDense) {
int m = 3;
int n = 2;
std::vector<float> data = {1, 2, 3, 4, 5, 6};
data::DenseAdapter adapter(data.data(), m, m * n, n);
data::DenseAdapter adapter(data.data(), m, n);
data::SimpleDMatrix dmat(&adapter, std::numeric_limits<float>::quiet_NaN(),
-1);
EXPECT_EQ(dmat.Info().num_col_, 2);

View File

@@ -106,7 +106,7 @@ TEST(SparsePageDMatrix, Empty) {
EXPECT_EQ(batch.Size(), 0);
}
data::DenseAdapter dense_adapter(nullptr, 0, 0, 0);
data::DenseAdapter dense_adapter(nullptr, 0, 0);
data::SparsePageDMatrix dmat2(&dense_adapter,
std::numeric_limits<float>::quiet_NaN(), 1,tmp_file);
EXPECT_EQ(dmat2.Info().num_nonzero_, 0);
@@ -163,7 +163,7 @@ TEST(SparsePageDMatrix, FromDense) {
int m = 3;
int n = 2;
std::vector<float> data = {1, 2, 3, 4, 5, 6};
data::DenseAdapter adapter(data.data(), m, m * n, n);
data::DenseAdapter adapter(data.data(), m, n);
data::SparsePageDMatrix dmat(
&adapter, std::numeric_limits<float>::quiet_NaN(), 1, tmp_file);
EXPECT_EQ(dmat.Info().num_col_, 2);