Fix clang-tidy warnings. (#4149)

* Upgrade gtest for clang-tidy.
* Use CMake to install GTest instead of mv.
* Don't enforce clang-tidy to return 0 due to errors in thrust.
* Add a small test for tidy itself.

* Reformat.
This commit is contained in:
Jiaming Yuan
2019-03-13 02:25:51 +08:00
committed by GitHub
parent 259fb809e9
commit 7b9043cf71
41 changed files with 775 additions and 628 deletions

View File

@@ -20,13 +20,13 @@ TEST(GPUExact, Update) {
auto* p_gpuexact_maker = TreeUpdater::Create("grow_gpu");
p_gpuexact_maker->Init(args);
size_t constexpr n_rows = 4;
size_t constexpr n_cols = 8;
bst_float constexpr sparsity = 0.0f;
size_t constexpr kNRows = 4;
size_t constexpr kNCols = 8;
bst_float constexpr kSparsity = 0.0f;
auto dmat = CreateDMatrix(n_rows, n_cols, sparsity, 3);
std::vector<GradientPair> h_gpair(n_rows);
for (size_t i = 0; i < n_rows; ++i) {
auto dmat = CreateDMatrix(kNRows, kNCols, kSparsity, 3);
std::vector<GradientPair> h_gpair(kNRows);
for (size_t i = 0; i < kNRows; ++i) {
h_gpair[i] = GradientPair(i % 2, 1);
}
HostDeviceVector<GradientPair> gpair (h_gpair);

View File

@@ -46,20 +46,20 @@ void BuildGidx(DeviceShard<GradientSumT>* shard, int n_rows, int n_cols,
}
TEST(GpuHist, BuildGidxDense) {
int const n_rows = 16, n_cols = 8;
int constexpr kNRows = 16, kNCols = 8;
TrainParam param;
param.max_depth = 1;
param.n_gpus = 1;
param.max_leaves = 0;
DeviceShard<GradientPairPrecise> shard(0, 0, n_rows, param);
BuildGidx(&shard, n_rows, n_cols);
DeviceShard<GradientPairPrecise> shard(0, 0, kNRows, param);
BuildGidx(&shard, kNRows, kNCols);
std::vector<common::CompressedByteT> h_gidx_buffer;
h_gidx_buffer = shard.gidx_buffer.AsVector();
common::CompressedIterator<uint32_t> gidx(h_gidx_buffer.data(), 25);
ASSERT_EQ(shard.row_stride, n_cols);
ASSERT_EQ(shard.row_stride, kNCols);
std::vector<uint32_t> solution = {
0, 3, 8, 9, 14, 17, 20, 21,
@@ -79,20 +79,20 @@ TEST(GpuHist, BuildGidxDense) {
2, 4, 8, 10, 14, 15, 19, 22,
1, 4, 7, 10, 14, 16, 19, 21,
};
for (size_t i = 0; i < n_rows * n_cols; ++i) {
for (size_t i = 0; i < kNRows * kNCols; ++i) {
ASSERT_EQ(solution[i], gidx[i]);
}
}
TEST(GpuHist, BuildGidxSparse) {
int const n_rows = 16, n_cols = 8;
int constexpr kNRows = 16, kNCols = 8;
TrainParam param;
param.max_depth = 1;
param.n_gpus = 1;
param.max_leaves = 0;
DeviceShard<GradientPairPrecise> shard(0, 0, n_rows, param);
BuildGidx(&shard, n_rows, n_cols, 0.9f);
DeviceShard<GradientPairPrecise> shard(0, 0, kNRows, param);
BuildGidx(&shard, kNRows, kNCols, 0.9f);
std::vector<common::CompressedByteT> h_gidx_buffer;
h_gidx_buffer = shard.gidx_buffer.AsVector();
@@ -106,7 +106,7 @@ TEST(GpuHist, BuildGidxSparse) {
24, 24, 24, 24, 24, 5, 24, 24, 0, 16, 24, 15, 24, 24, 24, 24,
24, 7, 14, 16, 4, 24, 24, 24, 24, 24, 9, 24, 24, 1, 24, 24
};
for (size_t i = 0; i < n_rows * shard.row_stride; ++i) {
for (size_t i = 0; i < kNRows * shard.row_stride; ++i) {
ASSERT_EQ(solution[i], gidx[i]);
}
}
@@ -128,27 +128,27 @@ std::vector<GradientPairPrecise> GetHostHistGpair() {
template <typename GradientSumT>
void TestBuildHist(GPUHistBuilderBase<GradientSumT>& builder) {
int const n_rows = 16, n_cols = 8;
int const kNRows = 16, kNCols = 8;
TrainParam param;
param.max_depth = 6;
param.n_gpus = 1;
param.max_leaves = 0;
DeviceShard<GradientSumT> shard(0, 0, n_rows, param);
DeviceShard<GradientSumT> shard(0, 0, kNRows, param);
BuildGidx(&shard, n_rows, n_cols);
BuildGidx(&shard, kNRows, kNCols);
xgboost::SimpleLCG gen;
xgboost::SimpleRealUniformDistribution<bst_float> dist(0.0f, 1.0f);
std::vector<GradientPair> h_gpair(n_rows);
for (size_t i = 0; i < h_gpair.size(); ++i) {
std::vector<GradientPair> h_gpair(kNRows);
for (auto &gpair : h_gpair) {
bst_float grad = dist(&gen);
bst_float hess = dist(&gen);
h_gpair[i] = GradientPair(grad, hess);
gpair = GradientPair(grad, hess);
}
thrust::device_vector<GradientPair> gpair (n_rows);
thrust::device_vector<GradientPair> gpair (kNRows);
gpair = h_gpair;
int num_symbols = shard.n_bins + 1;
@@ -164,7 +164,7 @@ void TestBuildHist(GPUHistBuilderBase<GradientSumT>& builder) {
num_symbols);
shard.ridx_segments.resize(1);
shard.ridx_segments[0] = Segment(0, n_rows);
shard.ridx_segments[0] = Segment(0, kNRows);
shard.hist.AllocateHistogram(0);
shard.gpair.copy(gpair.begin(), gpair.end());
thrust::sequence(shard.ridx.CurrentDVec().tbegin(),
@@ -175,11 +175,11 @@ void TestBuildHist(GPUHistBuilderBase<GradientSumT>& builder) {
auto node_histogram = d_hist.GetNodeHistogram(0);
// d_hist.data stored in float, not gradient pair
thrust::host_vector<GradientSumT> h_result (d_hist.data.size()/2);
thrust::host_vector<GradientSumT> h_result (d_hist.Data().size() / 2);
size_t data_size =
sizeof(GradientSumT) /
(sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT));
data_size *= d_hist.data.size();
data_size *= d_hist.Data().size();
dh::safe_cuda(cudaMemcpy(h_result.data(), node_histogram.data(), data_size,
cudaMemcpyDeviceToHost));
@@ -224,8 +224,8 @@ common::HistCutMatrix GetHostCutMatrix () {
// TODO(trivialfis): This test is over simplified.
TEST(GpuHist, EvaluateSplits) {
constexpr int n_rows = 16;
constexpr int n_cols = 8;
constexpr int kNRows = 16;
constexpr int kNCols = 8;
TrainParam param;
param.max_depth = 1;
@@ -240,14 +240,15 @@ TEST(GpuHist, EvaluateSplits) {
param.reg_lambda = 0;
param.max_delta_step = 0.0;
for (size_t i = 0; i < n_cols; ++i) {
for (size_t i = 0; i < kNCols; ++i) {
param.monotone_constraints.emplace_back(0);
}
int max_bins = 4;
// Initialize DeviceShard
std::unique_ptr<DeviceShard<GradientPairPrecise>> shard {new DeviceShard<GradientPairPrecise>(0, 0, n_rows, param)};
std::unique_ptr<DeviceShard<GradientPairPrecise>> shard {
new DeviceShard<GradientPairPrecise>(0, 0, kNRows, param)};
// Initialize DeviceShard::node_sum_gradients
shard->node_sum_gradients = {{6.4f, 12.8f}};
@@ -257,17 +258,17 @@ TEST(GpuHist, EvaluateSplits) {
// Copy cut matrix to device.
DeviceShard<GradientPairPrecise>::DeviceHistCutMatrix cut;
shard->ba.Allocate(0,
&(shard->cut_.feature_segments), cmat.row_ptr.size(),
&(shard->cut_.min_fvalue), cmat.min_val.size(),
&(shard->cut_.gidx_fvalue_map), 24,
&(shard->monotone_constraints), n_cols);
shard->cut_.feature_segments.copy(cmat.row_ptr.begin(), cmat.row_ptr.end());
shard->cut_.gidx_fvalue_map.copy(cmat.cut.begin(), cmat.cut.end());
&(shard->d_cut.feature_segments), cmat.row_ptr.size(),
&(shard->d_cut.min_fvalue), cmat.min_val.size(),
&(shard->d_cut.gidx_fvalue_map), 24,
&(shard->monotone_constraints), kNCols);
shard->d_cut.feature_segments.copy(cmat.row_ptr.begin(), cmat.row_ptr.end());
shard->d_cut.gidx_fvalue_map.copy(cmat.cut.begin(), cmat.cut.end());
shard->monotone_constraints.copy(param.monotone_constraints.begin(),
param.monotone_constraints.end());
// Initialize DeviceShard::hist
shard->hist.Init(0, (max_bins - 1) * n_cols);
shard->hist.Init(0, (max_bins - 1) * kNCols);
shard->hist.AllocateHistogram(0);
// Each row of hist_gpair represents gpairs for one feature.
// Each entry represents a bin.
@@ -278,16 +279,16 @@ TEST(GpuHist, EvaluateSplits) {
hist.push_back(pair.GetHess());
}
ASSERT_EQ(shard->hist.data.size(), hist.size());
ASSERT_EQ(shard->hist.Data().size(), hist.size());
thrust::copy(hist.begin(), hist.end(),
shard->hist.data.begin());
shard->hist.Data().begin());
// Initialize GPUHistMaker
GPUHistMakerSpecialised<GradientPairPrecise> hist_maker =
GPUHistMakerSpecialised<GradientPairPrecise>();
hist_maker.param_ = param;
hist_maker.shards_.push_back(std::move(shard));
hist_maker.column_sampler_.Init(n_cols,
hist_maker.column_sampler_.Init(kNCols,
param.colsample_bynode,
param.colsample_bylevel,
param.colsample_bytree,
@@ -295,8 +296,8 @@ TEST(GpuHist, EvaluateSplits) {
RegTree tree;
MetaInfo info;
info.num_row_ = n_rows;
info.num_col_ = n_cols;
info.num_row_ = kNRows;
info.num_col_ = kNCols;
hist_maker.info_ = &info;
hist_maker.node_value_constraints_.resize(1);
@@ -313,30 +314,30 @@ TEST(GpuHist, EvaluateSplits) {
TEST(GpuHist, ApplySplit) {
GPUHistMakerSpecialised<GradientPairPrecise> hist_maker =
GPUHistMakerSpecialised<GradientPairPrecise>();
int constexpr nid = 0;
int constexpr n_rows = 16;
int constexpr n_cols = 8;
int constexpr kNId = 0;
int constexpr kNRows = 16;
int constexpr kNCols = 8;
TrainParam param;
std::vector<std::pair<std::string, std::string>> args = {};
param.InitAllowUnknown(args);
// Initialize shard
for (size_t i = 0; i < n_cols; ++i) {
for (size_t i = 0; i < kNCols; ++i) {
param.monotone_constraints.emplace_back(0);
}
hist_maker.shards_.resize(1);
hist_maker.shards_[0].reset(new DeviceShard<GradientPairPrecise>(0, 0, n_rows, param));
hist_maker.shards_[0].reset(new DeviceShard<GradientPairPrecise>(0, 0, kNRows, param));
auto& shard = hist_maker.shards_.at(0);
shard->ridx_segments.resize(3); // 3 nodes.
shard->node_sum_gradients.resize(3);
shard->ridx_segments[0] = Segment(0, n_rows);
shard->ba.Allocate(0, &(shard->ridx), n_rows,
&(shard->position), n_rows);
shard->row_stride = n_cols;
shard->ridx_segments[0] = Segment(0, kNRows);
shard->ba.Allocate(0, &(shard->ridx), kNRows,
&(shard->position), kNRows);
shard->row_stride = kNCols;
thrust::sequence(shard->ridx.CurrentDVec().tbegin(),
shard->ridx.CurrentDVec().tend());
// Initialize GPUHistMaker
@@ -349,31 +350,30 @@ TEST(GpuHist, ApplySplit) {
GradientPair(8.2, 2.8), GradientPair(6.3, 3.6),
GPUTrainingParam(param));
GPUHistMakerSpecialised<GradientPairPrecise>::ExpandEntry candidate_entry {0, 0, candidate, 0};
candidate_entry.nid = nid;
candidate_entry.nid = kNId;
auto const& nodes = tree.GetNodes();
size_t n_nodes = nodes.size();
// Used to get bin_id in update position.
common::HistCutMatrix cmat = GetHostCutMatrix();
hist_maker.hmat_ = cmat;
MetaInfo info;
info.num_row_ = n_rows;
info.num_col_ = n_cols;
info.num_nonzero_ = n_rows * n_cols; // Dense
info.num_row_ = kNRows;
info.num_col_ = kNCols;
info.num_nonzero_ = kNRows * kNCols; // Dense
// Initialize gidx
int n_bins = 24;
int row_stride = n_cols;
int row_stride = kNCols;
int num_symbols = n_bins + 1;
size_t compressed_size_bytes =
common::CompressedBufferWriter::CalculateBufferSize(
row_stride * n_rows, num_symbols);
row_stride * kNRows, num_symbols);
shard->ba.Allocate(0, &(shard->gidx_buffer), compressed_size_bytes);
common::CompressedBufferWriter wr(num_symbols);
std::vector<int> h_gidx (n_rows * row_stride);
std::vector<int> h_gidx (kNRows * row_stride);
std::iota(h_gidx.begin(), h_gidx.end(), 0);
std::vector<common::CompressedByteT> h_gidx_compressed (compressed_size_bytes);
@@ -387,10 +387,10 @@ TEST(GpuHist, ApplySplit) {
hist_maker.ApplySplit(candidate_entry, &tree);
hist_maker.UpdatePosition(candidate_entry, &tree);
ASSERT_FALSE(tree[nid].IsLeaf());
ASSERT_FALSE(tree[kNId].IsLeaf());
int left_nidx = tree[nid].LeftChild();
int right_nidx = tree[nid].RightChild();
int left_nidx = tree[kNId].LeftChild();
int right_nidx = tree[kNId].RightChild();
ASSERT_EQ(shard->ridx_segments[left_nidx].begin, 0);
ASSERT_EQ(shard->ridx_segments[left_nidx].end, 6);

View File

@@ -13,14 +13,14 @@ namespace xgboost {
namespace tree {
TEST(Updater, Prune) {
int constexpr n_rows = 32, n_cols = 16;
int constexpr kNRows = 32, kNCols = 16;
std::vector<std::pair<std::string, std::string>> cfg;
cfg.push_back(std::pair<std::string, std::string>(
"num_feature", std::to_string(n_cols)));
cfg.push_back(std::pair<std::string, std::string>(
cfg.emplace_back(std::pair<std::string, std::string>(
"num_feature", std::to_string(kNCols)));
cfg.emplace_back(std::pair<std::string, std::string>(
"min_split_loss", "10"));
cfg.push_back(std::pair<std::string, std::string>(
cfg.emplace_back(std::pair<std::string, std::string>(
"silent", "1"));
// These data are just place holders.

View File

@@ -133,12 +133,12 @@ class QuantileHistMock : public QuantileHistMaker {
std::vector<GradientPair> row_gpairs =
{ {1.23f, 0.24f}, {0.24f, 0.25f}, {0.26f, 0.27f}, {2.27f, 0.28f},
{0.27f, 0.29f}, {0.37f, 0.39f}, {-0.47f, 0.49f}, {0.57f, 0.59f} };
size_t constexpr max_bins = 4;
auto dmat = CreateDMatrix(n_rows, n_cols, 0, 3);
size_t constexpr kMaxBins = 4;
auto dmat = CreateDMatrix(kNRows, kNCols, 0, 3);
// dense, no missing values
common::GHistIndexMatrix gmat;
gmat.Init((*dmat).get(), max_bins);
gmat.Init((*dmat).get(), kMaxBins);
RealImpl::InitData(gmat, row_gpairs, *(*dmat), tree);
hist_.AddHistRow(0);
@@ -167,7 +167,8 @@ class QuantileHistMock : public QuantileHistMaker {
// 2) no regularization, i.e. set min_child_weight, reg_lambda, reg_alpha,
// and max_delta_step to 0.
bst_float best_split_gain = 0.0f;
size_t best_split_threshold, best_split_feature;
size_t best_split_threshold = std::numeric_limits<size_t>::max();
size_t best_split_feature = std::numeric_limits<size_t>::max();
// Enumerate all features
for (size_t fid = 0; fid < num_feature; ++fid) {
const size_t bin_id_min = gmat.cut.row_ptr[fid];
@@ -213,56 +214,56 @@ class QuantileHistMock : public QuantileHistMaker {
}
};
int static constexpr n_rows = 8, n_cols = 16;
std::shared_ptr<xgboost::DMatrix> *dmat;
const std::vector<std::pair<std::string, std::string> > cfg;
int static constexpr kNRows = 8, kNCols = 16;
std::shared_ptr<xgboost::DMatrix> *dmat_;
const std::vector<std::pair<std::string, std::string> > cfg_;
std::shared_ptr<BuilderMock> builder_;
public:
explicit QuantileHistMock(
const std::vector<std::pair<std::string, std::string> >& args) :
cfg{args} {
cfg_{args} {
QuantileHistMaker::Init(args);
builder_.reset(
new BuilderMock(
param_,
std::move(pruner_),
std::unique_ptr<SplitEvaluator>(spliteval_->GetHostClone())));
dmat = CreateDMatrix(n_rows, n_cols, 0.8, 3);
dmat_ = CreateDMatrix(kNRows, kNCols, 0.8, 3);
}
~QuantileHistMock() { delete dmat; }
~QuantileHistMock() override { delete dmat_; }
static size_t GetNumColumns() { return n_cols; }
static size_t GetNumColumns() { return kNCols; }
void TestInitData() {
size_t constexpr max_bins = 4;
size_t constexpr kMaxBins = 4;
common::GHistIndexMatrix gmat;
gmat.Init((*dmat).get(), max_bins);
gmat.Init((*dmat_).get(), kMaxBins);
RegTree tree = RegTree();
tree.param.InitAllowUnknown(cfg);
tree.param.InitAllowUnknown(cfg_);
std::vector<GradientPair> gpair =
{ {0.23f, 0.24f}, {0.23f, 0.24f}, {0.23f, 0.24f}, {0.23f, 0.24f},
{0.27f, 0.29f}, {0.27f, 0.29f}, {0.27f, 0.29f}, {0.27f, 0.29f} };
builder_->TestInitData(gmat, gpair, dmat->get(), tree);
builder_->TestInitData(gmat, gpair, dmat_->get(), tree);
}
void TestBuildHist() {
RegTree tree = RegTree();
tree.param.InitAllowUnknown(cfg);
tree.param.InitAllowUnknown(cfg_);
size_t constexpr max_bins = 4;
size_t constexpr kMaxBins = 4;
common::GHistIndexMatrix gmat;
gmat.Init((*dmat).get(), max_bins);
gmat.Init((*dmat_).get(), kMaxBins);
builder_->TestBuildHist(0, gmat, *(*dmat).get(), tree);
builder_->TestBuildHist(0, gmat, *(*dmat_).get(), tree);
}
void TestEvaluateSplit() {
RegTree tree = RegTree();
tree.param.InitAllowUnknown(cfg);
tree.param.InitAllowUnknown(cfg_);
builder_->TestEvaluateSplit(gmatb_, tree);
}

View File

@@ -13,15 +13,15 @@ namespace xgboost {
namespace tree {
TEST(Updater, Refresh) {
int constexpr n_rows = 8, n_cols = 16;
int constexpr kNRows = 8, kNCols = 16;
HostDeviceVector<GradientPair> gpair =
{ {0.23f, 0.24f}, {0.23f, 0.24f}, {0.23f, 0.24f}, {0.23f, 0.24f},
{0.27f, 0.29f}, {0.27f, 0.29f}, {0.27f, 0.29f}, {0.27f, 0.29f} };
auto dmat = CreateDMatrix(n_rows, n_cols, 0.4, 3);
auto dmat = CreateDMatrix(kNRows, kNCols, 0.4, 3);
std::vector<std::pair<std::string, std::string>> cfg {
{"reg_alpha", "0.0"},
{"num_feature", std::to_string(n_cols)},
{"num_feature", std::to_string(kNCols)},
{"reg_lambda", "1"}};
RegTree tree = RegTree();