Upgrade clang-tidy on CI. (#5469)

* Correct all clang-tidy errors.
* Upgrade clang-tidy to 10 on CI.

Co-authored-by: Hyunsu Cho <chohyu01@cs.washington.edu>
This commit is contained in:
Jiaming Yuan
2020-04-05 04:42:29 +08:00
committed by GitHub
parent 30e94ddd04
commit 0012f2ef93
107 changed files with 932 additions and 903 deletions

View File

@@ -11,7 +11,7 @@
#include "../../../src/common/io.h"
TEST(c_api, XGDMatrixCreateFromMatDT) {
TEST(CAPI, XGDMatrixCreateFromMatDT) {
std::vector<int> col0 = {0, -1, 3};
std::vector<float> col1 = {-4.0f, 2.0f, 0.0f};
const char *col0_type = "int32";
@@ -38,7 +38,7 @@ TEST(c_api, XGDMatrixCreateFromMatDT) {
delete dmat;
}
TEST(c_api, XGDMatrixCreateFromMat_omp) {
TEST(CAPI, XGDMatrixCreateFromMatOmp) {
std::vector<int> num_rows = {100, 11374, 15000};
for (auto row : num_rows) {
int num_cols = 50;
@@ -74,13 +74,13 @@ TEST(c_api, XGDMatrixCreateFromMat_omp) {
namespace xgboost {
TEST(c_api, Version) {
TEST(CAPI, Version) {
int patch {0};
XGBoostVersion(NULL, NULL, &patch); // NOLINT
ASSERT_EQ(patch, XGBOOST_VER_PATCH);
}
TEST(c_api, ConfigIO) {
TEST(CAPI, ConfigIO) {
size_t constexpr kRows = 10;
auto p_dmat = RandomDataGenerator(kRows, 10, 0).GenerateDMatix();
std::vector<std::shared_ptr<DMatrix>> mat {p_dmat};
@@ -111,7 +111,7 @@ TEST(c_api, ConfigIO) {
ASSERT_EQ(config_0, config_1);
}
TEST(c_api, JsonModelIO) {
TEST(CAPI, JsonModelIO) {
size_t constexpr kRows = 10;
dmlc::TemporaryDirectory tempdir;

View File

@@ -27,7 +27,7 @@ TEST(BitField, StorageSize) {
ASSERT_EQ(2, size);
}
TEST(BitField, GPU_Set) {
TEST(BitField, GPUSet) {
dh::device_vector<LBitField64::value_type> storage;
uint32_t constexpr kBits = 128;
storage.resize(128);
@@ -49,7 +49,7 @@ __global__ void TestOrKernel(LBitField64 lhs, LBitField64 rhs) {
lhs |= rhs;
}
TEST(BitField, GPU_And) {
TEST(BitField, GPUAnd) {
uint32_t constexpr kBits = 128;
dh::device_vector<LBitField64::value_type> lhs_storage(kBits);
dh::device_vector<LBitField64::value_type> rhs_storage(kBits);

View File

@@ -22,19 +22,19 @@ TEST(DenseColumn, Test) {
for (auto i = 0ull; i < dmat->Info().num_row_; i++) {
for (auto j = 0ull; j < dmat->Info().num_col_; j++) {
switch (column_matrix.GetTypeSize()) {
case UINT8_BINS_TYPE_SIZE: {
case kUint8BinsTypeSize: {
auto col = column_matrix.GetColumn<uint8_t>(j);
ASSERT_EQ(gmat.index[i * dmat->Info().num_col_ + j],
(*col.get()).GetGlobalBinIdx(i));
}
break;
case UINT16_BINS_TYPE_SIZE: {
case kUint16BinsTypeSize: {
auto col = column_matrix.GetColumn<uint16_t>(j);
ASSERT_EQ(gmat.index[i * dmat->Info().num_col_ + j],
(*col.get()).GetGlobalBinIdx(i));
}
break;
case UINT32_BINS_TYPE_SIZE: {
case kUint32BinsTypeSize: {
auto col = column_matrix.GetColumn<uint32_t>(j);
ASSERT_EQ(gmat.index[i * dmat->Info().num_col_ + j],
(*col.get()).GetGlobalBinIdx(i));
@@ -49,7 +49,7 @@ TEST(DenseColumn, Test) {
template<typename BinIdxType>
inline void CheckSparseColumn(const Column<BinIdxType>& col_input, const GHistIndexMatrix& gmat) {
const SparseColumn<BinIdxType>& col = static_cast<const SparseColumn<BinIdxType>& >(col_input);
ASSERT_EQ(col.Size(), gmat.index.size());
ASSERT_EQ(col.Size(), gmat.index.Size());
for (auto i = 0ull; i < col.Size(); i++) {
ASSERT_EQ(gmat.index[gmat.row_ptr[col.GetRowIdx(i)]],
col.GetGlobalBinIdx(i));
@@ -67,17 +67,17 @@ TEST(SparseColumn, Test) {
ColumnMatrix column_matrix;
column_matrix.Init(gmat, 0.5);
switch (column_matrix.GetTypeSize()) {
case UINT8_BINS_TYPE_SIZE: {
case kUint8BinsTypeSize: {
auto col = column_matrix.GetColumn<uint8_t>(0);
CheckSparseColumn(*col.get(), gmat);
}
break;
case UINT16_BINS_TYPE_SIZE: {
case kUint16BinsTypeSize: {
auto col = column_matrix.GetColumn<uint16_t>(0);
CheckSparseColumn(*col.get(), gmat);
}
break;
case UINT32_BINS_TYPE_SIZE: {
case kUint32BinsTypeSize: {
auto col = column_matrix.GetColumn<uint32_t>(0);
CheckSparseColumn(*col.get(), gmat);
}
@@ -108,17 +108,17 @@ TEST(DenseColumnWithMissing, Test) {
ColumnMatrix column_matrix;
column_matrix.Init(gmat, 0.2);
switch (column_matrix.GetTypeSize()) {
case UINT8_BINS_TYPE_SIZE: {
case kUint8BinsTypeSize: {
auto col = column_matrix.GetColumn<uint8_t>(0);
CheckColumWithMissingValue(*col.get(), gmat);
}
break;
case UINT16_BINS_TYPE_SIZE: {
case kUint16BinsTypeSize: {
auto col = column_matrix.GetColumn<uint16_t>(0);
CheckColumWithMissingValue(*col.get(), gmat);
}
break;
case UINT32_BINS_TYPE_SIZE: {
case kUint32BinsTypeSize: {
auto col = column_matrix.GetColumn<uint32_t>(0);
CheckColumWithMissingValue(*col.get(), gmat);
}

View File

@@ -55,14 +55,14 @@ void TestLbs() {
}
}
TEST(cub_lbs, Test) {
TEST(CubLBS, Test) {
TestLbs();
}
TEST(sumReduce, Test) {
TEST(SumReduce, Test) {
thrust::device_vector<float> data(100, 1.0f);
dh::CubMemory temp;
auto sum = dh::SumReduction(temp, dh::Raw(data), data.size());
auto sum = dh::SumReduction(&temp, dh::Raw(data), data.size());
ASSERT_NEAR(sum, 100.0f, 1e-5);
}
@@ -81,7 +81,7 @@ void TestAllocator() {
}
// Define the test in a function so we can use device lambda
TEST(bulkAllocator, Test) {
TEST(BulkAllocator, Test) {
TestAllocator();
}

View File

@@ -8,7 +8,7 @@
namespace xgboost {
namespace common {
TEST(group_data, ParallelGroupBuilder) {
TEST(GroupData, ParallelGroupBuilder) {
std::vector<size_t> offsets;
std::vector<Entry> data;
ParallelGroupBuilder<Entry, size_t> builder(&offsets, &data);

View File

@@ -218,7 +218,7 @@ TEST(SparseCuts, MultiThreadedBuild) {
omp_set_num_threads(ori_nthreads);
}
TEST(hist_util, DenseCutsCategorical) {
TEST(HistUtil, DenseCutsCategorical) {
int categorical_sizes[] = {2, 6, 8, 12};
int num_bins = 256;
int sizes[] = {25, 100, 1000};
@@ -240,7 +240,7 @@ TEST(hist_util, DenseCutsCategorical) {
}
}
TEST(hist_util, DenseCutsAccuracyTest) {
TEST(HistUtil, DenseCutsAccuracyTest) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
@@ -256,7 +256,7 @@ TEST(hist_util, DenseCutsAccuracyTest) {
}
}
TEST(hist_util, DenseCutsAccuracyTestWeights) {
TEST(HistUtil, DenseCutsAccuracyTestWeights) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
@@ -274,7 +274,7 @@ TEST(hist_util, DenseCutsAccuracyTestWeights) {
}
}
TEST(hist_util, DenseCutsExternalMemory) {
TEST(HistUtil, DenseCutsExternalMemory) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
@@ -292,7 +292,7 @@ TEST(hist_util, DenseCutsExternalMemory) {
}
}
TEST(hist_util, SparseCutsAccuracyTest) {
TEST(HistUtil, SparseCutsAccuracyTest) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
@@ -308,7 +308,7 @@ TEST(hist_util, SparseCutsAccuracyTest) {
}
}
TEST(hist_util, SparseCutsCategorical) {
TEST(HistUtil, SparseCutsCategorical) {
int categorical_sizes[] = {2, 6, 8, 12};
int num_bins = 256;
int sizes[] = {25, 100, 1000};
@@ -330,7 +330,7 @@ TEST(hist_util, SparseCutsCategorical) {
}
}
TEST(hist_util, SparseCutsExternalMemory) {
TEST(HistUtil, SparseCutsExternalMemory) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
@@ -348,13 +348,13 @@ TEST(hist_util, SparseCutsExternalMemory) {
}
}
TEST(hist_util, IndexBinBound) {
TEST(HistUtil, IndexBinBound) {
uint64_t bin_sizes[] = { static_cast<uint64_t>(std::numeric_limits<uint8_t>::max()) + 1,
static_cast<uint64_t>(std::numeric_limits<uint16_t>::max()) + 1,
static_cast<uint64_t>(std::numeric_limits<uint16_t>::max()) + 2 };
BinTypeSize expected_bin_type_sizes[] = {UINT8_BINS_TYPE_SIZE,
UINT16_BINS_TYPE_SIZE,
UINT32_BINS_TYPE_SIZE};
BinTypeSize expected_bin_type_sizes[] = {kUint8BinsTypeSize,
kUint16BinsTypeSize,
kUint32BinsTypeSize};
size_t constexpr kRows = 100;
size_t constexpr kCols = 10;
@@ -364,18 +364,18 @@ TEST(hist_util, IndexBinBound) {
common::GHistIndexMatrix hmat;
hmat.Init(p_fmat.get(), max_bin);
EXPECT_EQ(hmat.index.size(), kRows*kCols);
EXPECT_EQ(expected_bin_type_sizes[bin_id++], hmat.index.getBinTypeSize());
EXPECT_EQ(hmat.index.Size(), kRows*kCols);
EXPECT_EQ(expected_bin_type_sizes[bin_id++], hmat.index.GetBinTypeSize());
}
}
TEST(hist_util, SparseIndexBinBound) {
TEST(HistUtil, SparseIndexBinBound) {
uint64_t bin_sizes[] = { static_cast<uint64_t>(std::numeric_limits<uint8_t>::max()) + 1,
static_cast<uint64_t>(std::numeric_limits<uint16_t>::max()) + 1,
static_cast<uint64_t>(std::numeric_limits<uint16_t>::max()) + 2 };
BinTypeSize expected_bin_type_sizes[] = { UINT32_BINS_TYPE_SIZE,
UINT32_BINS_TYPE_SIZE,
UINT32_BINS_TYPE_SIZE };
BinTypeSize expected_bin_type_sizes[] = { kUint32BinsTypeSize,
kUint32BinsTypeSize,
kUint32BinsTypeSize };
size_t constexpr kRows = 100;
size_t constexpr kCols = 10;
@@ -384,19 +384,19 @@ TEST(hist_util, SparseIndexBinBound) {
auto p_fmat = RandomDataGenerator(kRows, kCols, 0.2).GenerateDMatix();
common::GHistIndexMatrix hmat;
hmat.Init(p_fmat.get(), max_bin);
EXPECT_EQ(expected_bin_type_sizes[bin_id++], hmat.index.getBinTypeSize());
EXPECT_EQ(expected_bin_type_sizes[bin_id++], hmat.index.GetBinTypeSize());
}
}
template <typename T>
void CheckIndexData(T* data_ptr, uint32_t* offsets,
const common::GHistIndexMatrix& hmat, size_t n_cols) {
for (size_t i = 0; i < hmat.index.size(); ++i) {
for (size_t i = 0; i < hmat.index.Size(); ++i) {
EXPECT_EQ(data_ptr[i] + offsets[i % n_cols], hmat.index[i]);
}
}
TEST(hist_util, IndexBinData) {
TEST(HistUtil, IndexBinData) {
uint64_t constexpr kBinSizes[] = { static_cast<uint64_t>(std::numeric_limits<uint8_t>::max()) + 1,
static_cast<uint64_t>(std::numeric_limits<uint16_t>::max()) + 1,
static_cast<uint64_t>(std::numeric_limits<uint16_t>::max()) + 2 };
@@ -407,8 +407,8 @@ TEST(hist_util, IndexBinData) {
auto p_fmat = RandomDataGenerator(kRows, kCols, 0).GenerateDMatix();
common::GHistIndexMatrix hmat;
hmat.Init(p_fmat.get(), max_bin);
uint32_t* offsets = hmat.index.offset();
EXPECT_EQ(hmat.index.size(), kRows*kCols);
uint32_t* offsets = hmat.index.Offset();
EXPECT_EQ(hmat.index.Size(), kRows*kCols);
switch (max_bin) {
case kBinSizes[0]:
CheckIndexData(hmat.index.data<uint8_t>(),
@@ -426,7 +426,7 @@ TEST(hist_util, IndexBinData) {
}
}
TEST(hist_util, SparseIndexBinData) {
TEST(HistUtil, SparseIndexBinData) {
uint64_t bin_sizes[] = { static_cast<uint64_t>(std::numeric_limits<uint8_t>::max()) + 1,
static_cast<uint64_t>(std::numeric_limits<uint16_t>::max()) + 1,
static_cast<uint64_t>(std::numeric_limits<uint16_t>::max()) + 2 };
@@ -437,10 +437,10 @@ TEST(hist_util, SparseIndexBinData) {
auto p_fmat = RandomDataGenerator(kRows, kCols, 0.2).GenerateDMatix();
common::GHistIndexMatrix hmat;
hmat.Init(p_fmat.get(), max_bin);
EXPECT_EQ(hmat.index.offset(), nullptr);
EXPECT_EQ(hmat.index.Offset(), nullptr);
uint32_t* data_ptr = hmat.index.data<uint32_t>();
for (size_t i = 0; i < hmat.index.size(); ++i) {
for (size_t i = 0; i < hmat.index.Size(); ++i) {
EXPECT_EQ(data_ptr[i], hmat.index[i]);
}
}

View File

@@ -32,7 +32,7 @@ HistogramCuts GetHostCuts(AdapterT *adapter, int num_bins, float missing) {
builder.Build(&dmat, num_bins);
return cuts;
}
TEST(hist_util, DeviceSketch) {
TEST(HistUtil, DeviceSketch) {
int num_rows = 5;
int num_columns = 1;
int num_bins = 4;
@@ -61,7 +61,7 @@ size_t RequiredSampleCutsTest(int max_bins, size_t num_rows) {
return std::min(num_cuts, num_rows);
}
TEST(hist_util, DeviceSketchMemory) {
TEST(HistUtil, DeviceSketchMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
@@ -81,7 +81,7 @@ TEST(hist_util, DeviceSketchMemory) {
bytes_num_elements + bytes_cuts + bytes_constant);
}
TEST(hist_util, DeviceSketchMemoryWeights) {
TEST(HistUtil, DeviceSketchMemoryWeights) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
@@ -102,7 +102,7 @@ TEST(hist_util, DeviceSketchMemoryWeights) {
size_t((bytes_num_elements + bytes_cuts) * 1.05));
}
TEST(hist_util, DeviceSketchDeterminism) {
TEST(HistUtil, DeviceSketchDeterminism) {
int num_rows = 500;
int num_columns = 5;
int num_bins = 256;
@@ -117,7 +117,7 @@ TEST(hist_util, DeviceSketchDeterminism) {
}
}
TEST(hist_util, DeviceSketchCategorical) {
TEST(HistUtil, DeviceSketchCategorical) {
int categorical_sizes[] = {2, 6, 8, 12};
int num_bins = 256;
int sizes[] = {25, 100, 1000};
@@ -131,7 +131,7 @@ TEST(hist_util, DeviceSketchDeterminism) {
}
}
TEST(hist_util, DeviceSketchMultipleColumns) {
TEST(HistUtil, DeviceSketchMultipleColumns) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
@@ -146,7 +146,7 @@ TEST(hist_util, DeviceSketchMultipleColumns) {
}
TEST(hist_util, DeviceSketchMultipleColumnsWeights) {
TEST(HistUtil, DeviceSketchMultipleColumnsWeights) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
@@ -161,7 +161,7 @@ TEST(hist_util, DeviceSketchMultipleColumnsWeights) {
}
}
TEST(hist_util, DeviceSketchBatches) {
TEST(HistUtil, DeviceSketchBatches) {
int num_bins = 256;
int num_rows = 5000;
int batch_sizes[] = {0, 100, 1500, 6000};
@@ -174,7 +174,7 @@ TEST(hist_util, DeviceSketchBatches) {
}
}
TEST(hist_util, DeviceSketchMultipleColumnsExternal) {
TEST(HistUtil, DeviceSketchMultipleColumnsExternal) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns =5;
@@ -190,7 +190,7 @@ TEST(hist_util, DeviceSketchMultipleColumnsExternal) {
}
}
TEST(hist_util, AdapterDeviceSketch)
TEST(HistUtil, AdapterDeviceSketch)
{
int rows = 5;
int cols = 1;
@@ -212,7 +212,7 @@ TEST(hist_util, AdapterDeviceSketch)
EXPECT_EQ(device_cuts.MinValues(), host_cuts.MinValues());
}
TEST(hist_util, AdapterDeviceSketchMemory) {
TEST(HistUtil, AdapterDeviceSketchMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
@@ -235,7 +235,7 @@ TEST(hist_util, AdapterDeviceSketchMemory) {
bytes_num_elements + bytes_cuts + bytes_num_columns + bytes_constant);
}
TEST(hist_util, AdapterDeviceSketchCategorical) {
TEST(HistUtil, AdapterDeviceSketchCategorical) {
int categorical_sizes[] = {2, 6, 8, 12};
int num_bins = 256;
int sizes[] = {25, 100, 1000};
@@ -252,7 +252,7 @@ TEST(hist_util, AdapterDeviceSketchMemory) {
}
}
TEST(hist_util, AdapterDeviceSketchMultipleColumns) {
TEST(HistUtil, AdapterDeviceSketchMultipleColumns) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
@@ -268,7 +268,7 @@ TEST(hist_util, AdapterDeviceSketchMultipleColumns) {
}
}
}
TEST(hist_util, AdapterDeviceSketchBatches) {
TEST(HistUtil, AdapterDeviceSketchBatches) {
int num_bins = 256;
int num_rows = 5000;
int batch_sizes[] = {0, 100, 1500, 6000};
@@ -287,7 +287,7 @@ TEST(hist_util, AdapterDeviceSketchBatches) {
// Check sketching from adapter or DMatrix results in the same answer
// Consistency here is useful for testing and user experience
TEST(hist_util, SketchingEquivalent) {
TEST(HistUtil, SketchingEquivalent) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;

View File

@@ -176,7 +176,7 @@ TEST(HostDeviceVector, Span) {
ASSERT_TRUE(vec.HostCanWrite());
}
TEST(HostDeviceVector, MGPU_Basic) {
TEST(HostDeviceVector, MGPU_Basic) { // NOLINT
if (AllVisibleGPUs() < 2) {
LOG(WARNING) << "Not testing in multi-gpu environment.";
return;

View File

@@ -262,7 +262,7 @@ TEST(Json, Indexing) {
Json j {Json::Load(&reader)};
auto& value_1 = j["model_parameter"];
auto& value = value_1["base_score"];
std::string result = Cast<JsonString>(&value.GetValue())->getString();
std::string result = Cast<JsonString>(&value.GetValue())->GetString();
ASSERT_EQ(result, "0.5");
}
@@ -406,7 +406,7 @@ TEST(Json, WrongCasts) {
}
}
TEST(Json, Int_vs_Float) {
TEST(Json, IntVSFloat) {
// If integer is parsed as float, calling `get<Integer>()' will throw.
{
std::string str = R"json(

View File

@@ -5,7 +5,7 @@
namespace xgboost {
namespace common {
TEST(Transform, MGPU_SpecifiedGpuId) {
TEST(Transform, MGPU_SpecifiedGpuId) { // NOLINT
if (AllVisibleGPUs() < 2) {
LOG(WARNING) << "Not testing in multi-gpu environment.";
return;

View File

@@ -67,7 +67,7 @@ TEST(Adapter, CSCAdapterColsMoreThanRows) {
EXPECT_EQ(inst[3].index, 3);
}
TEST(c_api, DMatrixSliceAdapterFromSimpleDMatrix) {
TEST(CAPI, DMatrixSliceAdapterFromSimpleDMatrix) {
auto p_dmat = RandomDataGenerator(6, 2, 1.0).GenerateDMatix();
std::vector<int> ridx_set = {1, 3, 5};

View File

@@ -50,6 +50,6 @@ void TestCudfAdapter()
});
}
TEST(device_adapter, CudfAdapter) {
TEST(DeviceAdapter, CudfAdapter) {
TestCudfAdapter();
}

View File

@@ -32,7 +32,7 @@ TEST(DeviceDMatrix, RowMajor) {
for(auto i = 0ull; i < x.size(); i++)
{
int column_idx = i % num_columns;
EXPECT_EQ(impl->cuts_.SearchBin(x[i], column_idx), iterator[i]);
EXPECT_EQ(impl->Cuts().SearchBin(x[i], column_idx), iterator[i]);
}
EXPECT_EQ(dmat.Info().num_col_, num_columns);
EXPECT_EQ(dmat.Info().num_row_, num_rows);
@@ -93,9 +93,9 @@ TEST(DeviceDMatrix, ColumnMajor) {
for (auto i = 0ull; i < kRows; i++) {
for (auto j = 0ull; j < columns.size(); j++) {
if (j == 0) {
EXPECT_EQ(iterator[i * 2 + j], impl->cuts_.SearchBin(d_data_0[i], j));
EXPECT_EQ(iterator[i * 2 + j], impl->Cuts().SearchBin(d_data_0[i], j));
} else {
EXPECT_EQ(iterator[i * 2 + j], impl->cuts_.SearchBin(d_data_1[i], j));
EXPECT_EQ(iterator[i * 2 + j], impl->Cuts().SearchBin(d_data_1[i], j));
}
}
}
@@ -123,7 +123,7 @@ TEST(DeviceDMatrix, Equivalent) {
const auto &device_dmat_batch =
*device_dmat.GetBatches<EllpackPage>({0, num_bins}).begin();
ASSERT_EQ(batch.Impl()->cuts_.Values(), device_dmat_batch.Impl()->cuts_.Values());
ASSERT_EQ(batch.Impl()->Cuts().Values(), device_dmat_batch.Impl()->Cuts().Values());
ASSERT_EQ(batch.Impl()->gidx_buffer.HostVector(),
device_dmat_batch.Impl()->gidx_buffer.HostVector());
}

View File

@@ -21,7 +21,7 @@ TEST(EllpackPage, EmptyDMatrix) {
auto& page = *dmat->GetBatches<EllpackPage>({0, kMaxBin}).begin();
auto impl = page.Impl();
ASSERT_EQ(impl->row_stride, 0);
ASSERT_EQ(impl->cuts_.TotalBins(), 0);
ASSERT_EQ(impl->Cuts().TotalBins(), 0);
ASSERT_EQ(impl->gidx_buffer.Size(), 4);
}
@@ -106,7 +106,7 @@ TEST(EllpackPage, Copy) {
auto page = (*dmat->GetBatches<EllpackPage>(param).begin()).Impl();
// Create an empty result page.
EllpackPageImpl result(0, page->cuts_, page->is_dense, page->row_stride,
EllpackPageImpl result(0, page->Cuts(), page->is_dense, page->row_stride,
kRows);
// Copy batch pages into the result page.
@@ -152,7 +152,7 @@ TEST(EllpackPage, Compact) {
auto page = (*dmat->GetBatches<EllpackPage>(param).begin()).Impl();
// Create an empty result page.
EllpackPageImpl result(0, page->cuts_, page->is_dense, page->row_stride,
EllpackPageImpl result(0, page->Cuts(), page->is_dense, page->row_stride,
kCompactedRows);
// Compact batch pages into the result page.

View File

@@ -63,14 +63,14 @@ TEST(SparsePageDMatrix, EllpackPageContent) {
EXPECT_EQ(impl->n_rows, kRows);
EXPECT_FALSE(impl->is_dense);
EXPECT_EQ(impl->row_stride, 2);
EXPECT_EQ(impl->cuts_.TotalBins(), 4);
EXPECT_EQ(impl->Cuts().TotalBins(), 4);
auto impl_ext = (*dmat_ext->GetBatches<EllpackPage>(param).begin()).Impl();
EXPECT_EQ(impl_ext->base_rowid, 0);
EXPECT_EQ(impl_ext->n_rows, kRows);
EXPECT_FALSE(impl_ext->is_dense);
EXPECT_EQ(impl_ext->row_stride, 2);
EXPECT_EQ(impl_ext->cuts_.TotalBins(), 4);
EXPECT_EQ(impl_ext->Cuts().TotalBins(), 4);
std::vector<common::CompressedByteT> buffer(impl->gidx_buffer.HostVector());
std::vector<common::CompressedByteT> buffer_ext(impl_ext->gidx_buffer.HostVector());
@@ -149,7 +149,6 @@ TEST(SparsePageDMatrix, EllpackPageMultipleLoops) {
dmat_ext(CreateSparsePageDMatrixWithRC(kRows, kCols, kPageSize, true, tmpdir));
BatchParam param{0, kMaxBins, kPageSize};
auto impl = (*dmat->GetBatches<EllpackPage>(param).begin()).Impl();
size_t current_row = 0;
for (auto& page : dmat_ext->GetBatches<EllpackPage>(param)) {

View File

@@ -33,7 +33,7 @@ TEST(Linear, Shotgun) {
model.LazyInitModel();
updater->Update(&gpair, p_fmat.get(), &model, gpair.Size());
ASSERT_EQ(model.bias()[0], 5.0f);
ASSERT_EQ(model.Bias()[0], 5.0f);
}
{
@@ -68,7 +68,7 @@ TEST(Linear, coordinate) {
model.LazyInitModel();
updater->Update(&gpair, p_fmat.get(), &model, gpair.Size());
ASSERT_EQ(model.bias()[0], 5.0f);
ASSERT_EQ(model.Bias()[0], 5.0f);
}
TEST(Coordinate, JsonIO){

View File

@@ -30,7 +30,7 @@ TEST(Linear, GPUCoordinate) {
model.LazyInitModel();
updater->Update(&gpair, mat.get(), &model, gpair.Size());
ASSERT_EQ(model.bias()[0], 5.0f);
ASSERT_EQ(model.Bias()[0], 5.0f);
}
TEST(GPUCoordinate, JsonIO) {

View File

@@ -126,7 +126,7 @@ TEST(GPUPredictor, InplacePredictCuDF) {
TestInplacePrediction(x, "gpu_predictor", kRows, kCols, 0);
}
TEST(GPUPredictor, MGPU_InplacePredict) {
TEST(GPUPredictor, MGPU_InplacePredict) { // NOLINT
int32_t n_gpus = xgboost::common::AllVisibleGPUs();
if (n_gpus <= 1) {
LOG(WARNING) << "GPUPredictor.MGPU_InplacePredict is skipped.";

View File

@@ -86,7 +86,7 @@ TEST(Learner, CheckGroup) {
EXPECT_ANY_THROW(learner->UpdateOneIter(0, p_mat));
}
TEST(Learner, SLOW_CheckMultiBatch) {
TEST(Learner, SLOW_CheckMultiBatch) { // NOLINT
// Create sufficiently large data to make two row pages
dmlc::TemporaryDirectory tempdir;
const std::string tmp_file = tempdir.path + "/big.libsvm";

View File

@@ -254,7 +254,7 @@ TEST_F(SerializationTest, Hist) {
fmap_, p_dmat_);
}
TEST_F(SerializationTest, CPU_CoordDescent) {
TEST_F(SerializationTest, CPUCoordDescent) {
TestLearnerSerialization({{"booster", "gblinear"},
{"seed", "0"},
{"nthread", "1"},
@@ -264,7 +264,7 @@ TEST_F(SerializationTest, CPU_CoordDescent) {
}
#if defined(XGBOOST_USE_CUDA)
TEST_F(SerializationTest, GPU_Hist) {
TEST_F(SerializationTest, GPUHist) {
TestLearnerSerialization({{"booster", "gbtree"},
{"seed", "0"},
{"enable_experimental_json_serialization", "1"},
@@ -338,7 +338,7 @@ TEST_F(SerializationTest, ConfigurationCount) {
xgboost::ConsoleLogger::Configure({{"verbosity", "2"}});
}
TEST_F(SerializationTest, GPU_CoordDescent) {
TEST_F(SerializationTest, GPUCoordDescent) {
TestLearnerSerialization({{"booster", "gblinear"},
{"seed", "0"},
{"nthread", "1"},
@@ -431,7 +431,7 @@ TEST_F(LogitSerializationTest, Hist) {
fmap_, p_dmat_);
}
TEST_F(LogitSerializationTest, CPU_CoordDescent) {
TEST_F(LogitSerializationTest, CPUCoordDescent) {
TestLearnerSerialization({{"booster", "gblinear"},
{"seed", "0"},
{"nthread", "1"},
@@ -441,7 +441,7 @@ TEST_F(LogitSerializationTest, CPU_CoordDescent) {
}
#if defined(XGBOOST_USE_CUDA)
TEST_F(LogitSerializationTest, GPU_Hist) {
TEST_F(LogitSerializationTest, GPUHist) {
TestLearnerSerialization({{"booster", "gbtree"},
{"objective", "binary:logistic"},
{"seed", "0"},
@@ -471,7 +471,7 @@ TEST_F(LogitSerializationTest, GPU_Hist) {
fmap_, p_dmat_);
}
TEST_F(LogitSerializationTest, GPU_CoordDescent) {
TEST_F(LogitSerializationTest, GPUCoordDescent) {
TestLearnerSerialization({{"booster", "gblinear"},
{"objective", "binary:logistic"},
{"seed", "0"},
@@ -586,7 +586,7 @@ TEST_F(MultiClassesSerializationTest, Hist) {
fmap_, p_dmat_);
}
TEST_F(MultiClassesSerializationTest, CPU_CoordDescent) {
TEST_F(MultiClassesSerializationTest, CPUCoordDescent) {
TestLearnerSerialization({{"booster", "gblinear"},
{"seed", "0"},
{"nthread", "1"},
@@ -596,7 +596,7 @@ TEST_F(MultiClassesSerializationTest, CPU_CoordDescent) {
}
#if defined(XGBOOST_USE_CUDA)
TEST_F(MultiClassesSerializationTest, GPU_Hist) {
TEST_F(MultiClassesSerializationTest, GPUHist) {
TestLearnerSerialization({{"booster", "gbtree"},
{"num_class", std::to_string(kClasses)},
{"seed", "0"},
@@ -632,7 +632,7 @@ TEST_F(MultiClassesSerializationTest, GPU_Hist) {
fmap_, p_dmat_);
}
TEST_F(MultiClassesSerializationTest, GPU_CoordDescent) {
TEST_F(MultiClassesSerializationTest, GPUCoordDescent) {
TestLearnerSerialization({{"booster", "gblinear"},
{"num_class", std::to_string(kClasses)},
{"seed", "0"},

View File

@@ -69,7 +69,7 @@ TEST(GradientBasedSampler, NoSampling) {
}
// In external mode, when not sampling, we concatenate the pages together.
TEST(GradientBasedSampler, NoSampling_ExternalMemory) {
TEST(GradientBasedSampler, NoSamplingExternalMemory) {
constexpr size_t kRows = 2048;
constexpr size_t kCols = 1;
constexpr float kSubsample = 1.0f;
@@ -121,7 +121,7 @@ TEST(GradientBasedSampler, UniformSampling) {
VerifySampling(kPageSize, kSubsample, kSamplingMethod, kFixedSizeSampling, kCheckSum);
}
TEST(GradientBasedSampler, UniformSampling_ExternalMemory) {
TEST(GradientBasedSampler, UniformSamplingExternalMemory) {
constexpr size_t kPageSize = 1024;
constexpr float kSubsample = 0.5;
constexpr int kSamplingMethod = TrainParam::kUniform;
@@ -137,7 +137,7 @@ TEST(GradientBasedSampler, GradientBasedSampling) {
VerifySampling(kPageSize, kSubsample, kSamplingMethod);
}
TEST(GradientBasedSampler, GradientBasedSampling_ExternalMemory) {
TEST(GradientBasedSampler, GradientBasedSamplingExternalMemory) {
constexpr size_t kPageSize = 1024;
constexpr float kSubsample = 0.8;
constexpr int kSamplingMethod = TrainParam::kGradientBased;

View File

@@ -45,13 +45,13 @@ tree::TrainParam GetParameter() {
}
void CompareBitField(LBitField64 d_field, std::set<uint32_t> positions) {
std::vector<LBitField64::value_type> h_field_storage(d_field.bits_.size());
thrust::copy(thrust::device_ptr<LBitField64::value_type>(d_field.bits_.data()),
std::vector<LBitField64::value_type> h_field_storage(d_field.Bits().size());
thrust::copy(thrust::device_ptr<LBitField64::value_type>(d_field.Bits().data()),
thrust::device_ptr<LBitField64::value_type>(
d_field.bits_.data() + d_field.bits_.size()),
d_field.Bits().data() + d_field.Bits().size()),
h_field_storage.data());
LBitField64 h_field;
h_field.bits_ = {h_field_storage.data(), h_field_storage.data() + h_field_storage.size()};
LBitField64 h_field{ {h_field_storage.data(),
h_field_storage.data() + h_field_storage.size()} };
for (size_t i = 0; i < h_field.Size(); ++i) {
if (positions.find(i) != positions.cend()) {
@@ -73,13 +73,14 @@ TEST(GPUFeatureInteractionConstraint, Init) {
ASSERT_EQ(constraints.Features(), kFeatures);
common::Span<LBitField64> s_nodes_constraints = constraints.GetNodeConstraints();
for (LBitField64 const& d_node : s_nodes_constraints) {
std::vector<LBitField64::value_type> h_node_storage(d_node.bits_.size());
thrust::copy(thrust::device_ptr<LBitField64::value_type>(d_node.bits_.data()),
thrust::device_ptr<LBitField64::value_type>(
d_node.bits_.data() + d_node.bits_.size()),
std::vector<LBitField64::value_type> h_node_storage(d_node.Bits().size());
thrust::copy(thrust::device_ptr<LBitField64::value_type const>(d_node.Bits().data()),
thrust::device_ptr<LBitField64::value_type const>(
d_node.Bits().data() + d_node.Bits().size()),
h_node_storage.data());
LBitField64 h_node;
h_node.bits_ = {h_node_storage.data(), h_node_storage.data() + h_node_storage.size()};
LBitField64 h_node {
{h_node_storage.data(), h_node_storage.data() + h_node_storage.size()}
};
// no feature is attached to node.
for (size_t i = 0; i < h_node.Size(); ++i) {
ASSERT_FALSE(h_node.Check(i));
@@ -133,7 +134,7 @@ TEST(GPUFeatureInteractionConstraint, Split) {
constraints.Split(0, /*feature_id=*/1, 1, 2);
for (size_t nid = 0; nid < 3; ++nid) {
d_node[nid] = constraints.GetNodeConstraints()[nid];
ASSERT_EQ(d_node[nid].bits_.size(), 1);
ASSERT_EQ(d_node[nid].Bits().size(), 1);
CompareBitField(d_node[nid], {1, 2});
}
}

View File

@@ -193,7 +193,7 @@ TEST(GpuHist, EvaluateSplits) {
auto cmat = GetHostCutMatrix();
// Copy cut matrix to device.
page->cuts_ = cmat;
page->Cuts() = cmat;
maker.ba.Allocate(0, &(maker.monotone_constraints), kNCols);
dh::CopyVectorToDeviceSpan(maker.monotone_constraints,
param.monotone_constraints);
@@ -271,7 +271,7 @@ void TestHistogramIndexImpl() {
const auto &maker_ext = hist_maker_ext.maker;
std::vector<common::CompressedByteT> h_gidx_buffer_ext(maker_ext->page->gidx_buffer.HostVector());
ASSERT_EQ(maker->page->cuts_.TotalBins(), maker_ext->page->cuts_.TotalBins());
ASSERT_EQ(maker->page->Cuts().TotalBins(), maker_ext->page->Cuts().TotalBins());
ASSERT_EQ(maker->page->gidx_buffer.Size(), maker_ext->page->gidx_buffer.Size());
}
@@ -498,7 +498,7 @@ TEST(GpuHist, ExternalMemoryWithSampling) {
}
}
TEST(GpuHist, Config_IO) {
TEST(GpuHist, ConfigIO) {
GenericParameter generic_param(CreateEmptyGenericParam(0));
std::unique_ptr<TreeUpdater> updater {TreeUpdater::Create("grow_gpu_hist", &generic_param) };
updater->Configure(Args{});

View File

@@ -73,7 +73,7 @@ class QuantileHistMock : public QuantileHistMaker {
const size_t rid = batch.base_rowid + i;
ASSERT_LT(rid, num_row);
const size_t gmat_row_offset = gmat.row_ptr[rid];
ASSERT_LT(gmat_row_offset, gmat.index.size());
ASSERT_LT(gmat_row_offset, gmat.index.Size());
SparsePage::Inst inst = batch[i];
ASSERT_EQ(gmat.row_ptr[rid] + inst.size(), gmat.row_ptr[rid + 1]);
for (size_t j = 0; j < inst.size(); ++j) {
@@ -285,14 +285,14 @@ class QuantileHistMock : public QuantileHistMaker {
}
};
TEST(Updater, QuantileHist_InitData) {
TEST(QuantileHist, InitData) {
std::vector<std::pair<std::string, std::string>> cfg
{{"num_feature", std::to_string(QuantileHistMock::GetNumColumns())}};
QuantileHistMock maker(cfg);
maker.TestInitData();
}
TEST(Updater, QuantileHist_BuildHist) {
TEST(QuantileHist, BuildHist) {
// Don't enable feature grouping
std::vector<std::pair<std::string, std::string>> cfg
{{"num_feature", std::to_string(QuantileHistMock::GetNumColumns())},
@@ -301,7 +301,7 @@ TEST(Updater, QuantileHist_BuildHist) {
maker.TestBuildHist();
}
TEST(Updater, QuantileHist_EvalSplits) {
TEST(QuantileHist, EvalSplits) {
std::vector<std::pair<std::string, std::string>> cfg
{{"num_feature", std::to_string(QuantileHistMock::GetNumColumns())},
{"split_evaluator", "elastic_net"},