Upgrade clang-tidy on CI. (#5469)

* Correct all clang-tidy errors.
* Upgrade clang-tidy to 10 on CI.

Co-authored-by: Hyunsu Cho <chohyu01@cs.washington.edu>
This commit is contained in:
Jiaming Yuan
2020-04-05 04:42:29 +08:00
committed by GitHub
parent 30e94ddd04
commit 0012f2ef93
107 changed files with 932 additions and 903 deletions

View File

@@ -69,7 +69,7 @@ TEST(GradientBasedSampler, NoSampling) {
}
// In external mode, when not sampling, we concatenate the pages together.
TEST(GradientBasedSampler, NoSampling_ExternalMemory) {
TEST(GradientBasedSampler, NoSamplingExternalMemory) {
constexpr size_t kRows = 2048;
constexpr size_t kCols = 1;
constexpr float kSubsample = 1.0f;
@@ -121,7 +121,7 @@ TEST(GradientBasedSampler, UniformSampling) {
VerifySampling(kPageSize, kSubsample, kSamplingMethod, kFixedSizeSampling, kCheckSum);
}
TEST(GradientBasedSampler, UniformSampling_ExternalMemory) {
TEST(GradientBasedSampler, UniformSamplingExternalMemory) {
constexpr size_t kPageSize = 1024;
constexpr float kSubsample = 0.5;
constexpr int kSamplingMethod = TrainParam::kUniform;
@@ -137,7 +137,7 @@ TEST(GradientBasedSampler, GradientBasedSampling) {
VerifySampling(kPageSize, kSubsample, kSamplingMethod);
}
TEST(GradientBasedSampler, GradientBasedSampling_ExternalMemory) {
TEST(GradientBasedSampler, GradientBasedSamplingExternalMemory) {
constexpr size_t kPageSize = 1024;
constexpr float kSubsample = 0.8;
constexpr int kSamplingMethod = TrainParam::kGradientBased;

View File

@@ -45,13 +45,13 @@ tree::TrainParam GetParameter() {
}
void CompareBitField(LBitField64 d_field, std::set<uint32_t> positions) {
std::vector<LBitField64::value_type> h_field_storage(d_field.bits_.size());
thrust::copy(thrust::device_ptr<LBitField64::value_type>(d_field.bits_.data()),
std::vector<LBitField64::value_type> h_field_storage(d_field.Bits().size());
thrust::copy(thrust::device_ptr<LBitField64::value_type>(d_field.Bits().data()),
thrust::device_ptr<LBitField64::value_type>(
d_field.bits_.data() + d_field.bits_.size()),
d_field.Bits().data() + d_field.Bits().size()),
h_field_storage.data());
LBitField64 h_field;
h_field.bits_ = {h_field_storage.data(), h_field_storage.data() + h_field_storage.size()};
LBitField64 h_field{ {h_field_storage.data(),
h_field_storage.data() + h_field_storage.size()} };
for (size_t i = 0; i < h_field.Size(); ++i) {
if (positions.find(i) != positions.cend()) {
@@ -73,13 +73,14 @@ TEST(GPUFeatureInteractionConstraint, Init) {
ASSERT_EQ(constraints.Features(), kFeatures);
common::Span<LBitField64> s_nodes_constraints = constraints.GetNodeConstraints();
for (LBitField64 const& d_node : s_nodes_constraints) {
std::vector<LBitField64::value_type> h_node_storage(d_node.bits_.size());
thrust::copy(thrust::device_ptr<LBitField64::value_type>(d_node.bits_.data()),
thrust::device_ptr<LBitField64::value_type>(
d_node.bits_.data() + d_node.bits_.size()),
std::vector<LBitField64::value_type> h_node_storage(d_node.Bits().size());
thrust::copy(thrust::device_ptr<LBitField64::value_type const>(d_node.Bits().data()),
thrust::device_ptr<LBitField64::value_type const>(
d_node.Bits().data() + d_node.Bits().size()),
h_node_storage.data());
LBitField64 h_node;
h_node.bits_ = {h_node_storage.data(), h_node_storage.data() + h_node_storage.size()};
LBitField64 h_node {
{h_node_storage.data(), h_node_storage.data() + h_node_storage.size()}
};
// no feature is attached to node.
for (size_t i = 0; i < h_node.Size(); ++i) {
ASSERT_FALSE(h_node.Check(i));
@@ -133,7 +134,7 @@ TEST(GPUFeatureInteractionConstraint, Split) {
constraints.Split(0, /*feature_id=*/1, 1, 2);
for (size_t nid = 0; nid < 3; ++nid) {
d_node[nid] = constraints.GetNodeConstraints()[nid];
ASSERT_EQ(d_node[nid].bits_.size(), 1);
ASSERT_EQ(d_node[nid].Bits().size(), 1);
CompareBitField(d_node[nid], {1, 2});
}
}

View File

@@ -193,7 +193,7 @@ TEST(GpuHist, EvaluateSplits) {
auto cmat = GetHostCutMatrix();
// Copy cut matrix to device.
page->cuts_ = cmat;
page->Cuts() = cmat;
maker.ba.Allocate(0, &(maker.monotone_constraints), kNCols);
dh::CopyVectorToDeviceSpan(maker.monotone_constraints,
param.monotone_constraints);
@@ -271,7 +271,7 @@ void TestHistogramIndexImpl() {
const auto &maker_ext = hist_maker_ext.maker;
std::vector<common::CompressedByteT> h_gidx_buffer_ext(maker_ext->page->gidx_buffer.HostVector());
ASSERT_EQ(maker->page->cuts_.TotalBins(), maker_ext->page->cuts_.TotalBins());
ASSERT_EQ(maker->page->Cuts().TotalBins(), maker_ext->page->Cuts().TotalBins());
ASSERT_EQ(maker->page->gidx_buffer.Size(), maker_ext->page->gidx_buffer.Size());
}
@@ -498,7 +498,7 @@ TEST(GpuHist, ExternalMemoryWithSampling) {
}
}
TEST(GpuHist, Config_IO) {
TEST(GpuHist, ConfigIO) {
GenericParameter generic_param(CreateEmptyGenericParam(0));
std::unique_ptr<TreeUpdater> updater {TreeUpdater::Create("grow_gpu_hist", &generic_param) };
updater->Configure(Args{});

View File

@@ -73,7 +73,7 @@ class QuantileHistMock : public QuantileHistMaker {
const size_t rid = batch.base_rowid + i;
ASSERT_LT(rid, num_row);
const size_t gmat_row_offset = gmat.row_ptr[rid];
ASSERT_LT(gmat_row_offset, gmat.index.size());
ASSERT_LT(gmat_row_offset, gmat.index.Size());
SparsePage::Inst inst = batch[i];
ASSERT_EQ(gmat.row_ptr[rid] + inst.size(), gmat.row_ptr[rid + 1]);
for (size_t j = 0; j < inst.size(); ++j) {
@@ -285,14 +285,14 @@ class QuantileHistMock : public QuantileHistMaker {
}
};
TEST(Updater, QuantileHist_InitData) {
TEST(QuantileHist, InitData) {
std::vector<std::pair<std::string, std::string>> cfg
{{"num_feature", std::to_string(QuantileHistMock::GetNumColumns())}};
QuantileHistMock maker(cfg);
maker.TestInitData();
}
TEST(Updater, QuantileHist_BuildHist) {
TEST(QuantileHist, BuildHist) {
// Don't enable feature grouping
std::vector<std::pair<std::string, std::string>> cfg
{{"num_feature", std::to_string(QuantileHistMock::GetNumColumns())},
@@ -301,7 +301,7 @@ TEST(Updater, QuantileHist_BuildHist) {
maker.TestBuildHist();
}
TEST(Updater, QuantileHist_EvalSplits) {
TEST(QuantileHist, EvalSplits) {
std::vector<std::pair<std::string, std::string>> cfg
{{"num_feature", std::to_string(QuantileHistMock::GetNumColumns())},
{"split_evaluator", "elastic_net"},