Fix clang-tidy warnings. (#4149)

* Upgrade gtest for clang-tidy.
* Use CMake to install GTest instead of mv.
* Don't enforce clang-tidy to return 0 due to errors in thrust.
* Add a small test for tidy itself.

* Reformat.
This commit is contained in:
Jiaming Yuan
2019-03-13 02:25:51 +08:00
committed by GitHub
parent 259fb809e9
commit 7b9043cf71
41 changed files with 775 additions and 628 deletions

View File

@@ -172,7 +172,7 @@ struct BaseClass {
virtual void operator()() {}
};
struct DerivedClass : public BaseClass {
virtual void operator()() {}
void operator()() override {}
};
TEST(Span, FromOther) {

View File

@@ -15,6 +15,7 @@ namespace xgboost {
namespace common {
struct TestStatus {
private:
int *status_;
public:
@@ -28,32 +29,34 @@ struct TestStatus {
dh::safe_cuda(cudaFree(status_));
}
int get() {
int Get() {
int h_status;
dh::safe_cuda(cudaMemcpy(&h_status, status_,
sizeof(int), cudaMemcpyDeviceToHost));
return h_status;
}
int* data() {
int* Data() {
return status_;
}
};
__global__ void test_from_other_kernel(Span<float> span) {
__global__ void TestFromOtherKernel(Span<float> span) {
// don't get optimized out
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= span.size())
if (idx >= span.size()) {
return;
}
}
// Test converting different T
__global__ void test_from_other_kernel_const(Span<float const, 16> span) {
__global__ void TestFromOtherKernelConst(Span<float const, 16> span) {
// don't get optimized out
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= span.size())
if (idx >= span.size()) {
return;
}
}
/*!
@@ -68,42 +71,44 @@ TEST(GPUSpan, FromOther) {
// dynamic extent
{
Span<float> span (d_vec.data().get(), d_vec.size());
test_from_other_kernel<<<1, 16>>>(span);
TestFromOtherKernel<<<1, 16>>>(span);
}
{
Span<float> span (d_vec.data().get(), d_vec.size());
test_from_other_kernel_const<<<1, 16>>>(span);
TestFromOtherKernelConst<<<1, 16>>>(span);
}
// static extent
{
Span<float, 16> span(d_vec.data().get(), d_vec.data().get() + 16);
test_from_other_kernel<<<1, 16>>>(span);
TestFromOtherKernel<<<1, 16>>>(span);
}
{
Span<float, 16> span(d_vec.data().get(), d_vec.data().get() + 16);
test_from_other_kernel_const<<<1, 16>>>(span);
TestFromOtherKernelConst<<<1, 16>>>(span);
}
}
TEST(GPUSpan, Assignment) {
dh::safe_cuda(cudaSetDevice(0));
TestStatus status;
dh::LaunchN(0, 16, TestAssignment{status.data()});
ASSERT_EQ(status.get(), 1);
dh::LaunchN(0, 16, TestAssignment{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
TEST(GPUSpan, TestStatus) {
dh::safe_cuda(cudaSetDevice(0));
TestStatus status;
dh::LaunchN(0, 16, TestTestStatus{status.data()});
ASSERT_EQ(status.get(), -1);
dh::LaunchN(0, 16, TestTestStatus{status.Data()});
ASSERT_EQ(status.Get(), -1);
}
template <typename T>
struct TestEqual {
private:
T *lhs_, *rhs_;
int *status_;
public:
TestEqual(T* _lhs, T* _rhs, int * _status) :
lhs_(_lhs), rhs_(_rhs), status_(_status) {}
@@ -140,10 +145,10 @@ TEST(GPUSpan, WithTrust) {
dh::LaunchN(0, 16, TestEqual<float>{
thrust::raw_pointer_cast(d_vec1.data()),
s.data(), status.data()});
ASSERT_EQ(status.get(), 1);
s.data(), status.Data()});
ASSERT_EQ(status.Get(), 1);
// FIXME: memory error!
// FIXME(trivialfis): memory error!
// bool res = thrust::equal(thrust::device,
// d_vec.begin(), d_vec.end(),
// s.begin());
@@ -153,23 +158,23 @@ TEST(GPUSpan, WithTrust) {
TEST(GPUSpan, BeginEnd) {
dh::safe_cuda(cudaSetDevice(0));
TestStatus status;
dh::LaunchN(0, 16, TestBeginEnd{status.data()});
ASSERT_EQ(status.get(), 1);
dh::LaunchN(0, 16, TestBeginEnd{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
TEST(GPUSpan, RBeginREnd) {
dh::safe_cuda(cudaSetDevice(0));
TestStatus status;
dh::LaunchN(0, 16, TestRBeginREnd{status.data()});
ASSERT_EQ(status.get(), 1);
dh::LaunchN(0, 16, TestRBeginREnd{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
__global__ void test_modify_kernel(Span<float> span) {
__global__ void TestModifyKernel(Span<float> span) {
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= span.size())
if (idx >= span.size()) {
return;
}
span[idx] = span.size() - idx;
}
@@ -182,7 +187,7 @@ TEST(GPUSpan, Modify) {
Span<float> span (d_vec.data().get(), d_vec.size());
test_modify_kernel<<<1, 16>>>(span);
TestModifyKernel<<<1, 16>>>(span);
for (size_t i = 0; i < d_vec.size(); ++i) {
ASSERT_EQ(d_vec[i], d_vec.size() - i);
@@ -192,21 +197,23 @@ TEST(GPUSpan, Modify) {
TEST(GPUSpan, Observers) {
dh::safe_cuda(cudaSetDevice(0));
TestStatus status;
dh::LaunchN(0, 16, TestObservers{status.data()});
ASSERT_EQ(status.get(), 1);
dh::LaunchN(0, 16, TestObservers{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
TEST(GPUSpan, Compare) {
dh::safe_cuda(cudaSetDevice(0));
TestStatus status;
dh::LaunchN(0, 16, TestIterCompare{status.data()});
ASSERT_EQ(status.get(), 1);
dh::LaunchN(0, 16, TestIterCompare{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
struct TestElementAccess {
private:
Span<float> span_;
XGBOOST_DEVICE TestElementAccess (Span<float> _span) : span_(_span) {}
public:
XGBOOST_DEVICE explicit TestElementAccess (Span<float> _span) : span_(_span) {}
XGBOOST_DEVICE float operator()(size_t _idx) {
float tmp = span_[_idx];
@@ -232,16 +239,16 @@ TEST(GPUSpan, ElementAccess) {
std::string output = testing::internal::GetCapturedStdout();
}
__global__ void test_first_dynamic_kernel(Span<float> _span) {
__global__ void TestFirstDynamicKernel(Span<float> _span) {
_span.first<-1>();
}
__global__ void test_first_static_kernel(Span<float> _span) {
__global__ void TestFirstStaticKernel(Span<float> _span) {
_span.first(-1);
}
__global__ void test_last_dynamic_kernel(Span<float> _span) {
__global__ void TestLastDynamicKernel(Span<float> _span) {
_span.last<-1>();
}
__global__ void test_last_static_kernel(Span<float> _span) {
__global__ void TestLastStaticKernel(Span<float> _span) {
_span.last(-1);
}
@@ -256,7 +263,7 @@ TEST(GPUSpan, FirstLast) {
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
Span<float> span (d_vec.data().get(), d_vec.size());
test_first_dynamic_kernel<<<1, 1>>>(span);
TestFirstDynamicKernel<<<1, 1>>>(span);
};
testing::internal::CaptureStdout();
EXPECT_DEATH(lambda_first_dy(), "");
@@ -270,7 +277,7 @@ TEST(GPUSpan, FirstLast) {
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
Span<float> span (d_vec.data().get(), d_vec.size());
test_first_static_kernel<<<1, 1>>>(span);
TestFirstStaticKernel<<<1, 1>>>(span);
};
testing::internal::CaptureStdout();
EXPECT_DEATH(lambda_first_static(), "");
@@ -284,7 +291,7 @@ TEST(GPUSpan, FirstLast) {
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
Span<float> span (d_vec.data().get(), d_vec.size());
test_last_dynamic_kernel<<<1, 1>>>(span);
TestLastDynamicKernel<<<1, 1>>>(span);
};
testing::internal::CaptureStdout();
EXPECT_DEATH(lambda_last_dy(), "");
@@ -298,7 +305,7 @@ TEST(GPUSpan, FirstLast) {
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
Span<float> span (d_vec.data().get(), d_vec.size());
test_last_static_kernel<<<1, 1>>>(span);
TestLastStaticKernel<<<1, 1>>>(span);
};
testing::internal::CaptureStdout();
EXPECT_DEATH(lambda_last_static(), "");
@@ -306,10 +313,10 @@ TEST(GPUSpan, FirstLast) {
}
__global__ void test_subspan_dynamic_kernel(Span<float> _span) {
__global__ void TestSubspanDynamicKernel(Span<float> _span) {
_span.subspan(16, 0);
}
__global__ void test_subspan_static_kernel(Span<float> _span) {
__global__ void TestSubspanStaticKernel(Span<float> _span) {
_span.subspan<16>();
}
TEST(GPUSpan, Subspan) {
@@ -321,7 +328,7 @@ TEST(GPUSpan, Subspan) {
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
Span<float> span (d_vec.data().get(), d_vec.size());
test_subspan_dynamic_kernel<<<1, 1>>>(span);
TestSubspanDynamicKernel<<<1, 1>>>(span);
};
testing::internal::CaptureStdout();
EXPECT_DEATH(lambda_subspan_dynamic(), "");
@@ -335,7 +342,7 @@ TEST(GPUSpan, Subspan) {
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
Span<float> span (d_vec.data().get(), d_vec.size());
test_subspan_static_kernel<<<1, 1>>>(span);
TestSubspanStaticKernel<<<1, 1>>>(span);
};
testing::internal::CaptureStdout();
EXPECT_DEATH(lambda_subspan_static(), "");
@@ -345,43 +352,43 @@ TEST(GPUSpan, Subspan) {
TEST(GPUSpanIter, Construct) {
dh::safe_cuda(cudaSetDevice(0));
TestStatus status;
dh::LaunchN(0, 16, TestIterConstruct{status.data()});
ASSERT_EQ(status.get(), 1);
dh::LaunchN(0, 16, TestIterConstruct{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
TEST(GPUSpanIter, Ref) {
dh::safe_cuda(cudaSetDevice(0));
TestStatus status;
dh::LaunchN(0, 16, TestIterRef{status.data()});
ASSERT_EQ(status.get(), 1);
dh::LaunchN(0, 16, TestIterRef{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
TEST(GPUSpanIter, Calculate) {
dh::safe_cuda(cudaSetDevice(0));
TestStatus status;
dh::LaunchN(0, 16, TestIterCalculate{status.data()});
ASSERT_EQ(status.get(), 1);
dh::LaunchN(0, 16, TestIterCalculate{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
TEST(GPUSpanIter, Compare) {
dh::safe_cuda(cudaSetDevice(0));
TestStatus status;
dh::LaunchN(0, 16, TestIterCompare{status.data()});
ASSERT_EQ(status.get(), 1);
dh::LaunchN(0, 16, TestIterCompare{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
TEST(GPUSpan, AsBytes) {
dh::safe_cuda(cudaSetDevice(0));
TestStatus status;
dh::LaunchN(0, 16, TestAsBytes{status.data()});
ASSERT_EQ(status.get(), 1);
dh::LaunchN(0, 16, TestAsBytes{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
TEST(GPUSpan, AsWritableBytes) {
dh::safe_cuda(cudaSetDevice(0));
TestStatus status;
dh::LaunchN(0, 16, TestAsWritableBytes{status.data()});
ASSERT_EQ(status.get(), 1);
dh::LaunchN(0, 16, TestAsWritableBytes{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
} // namespace common

View File

@@ -13,7 +13,7 @@ TEST(SparsePage, PushCSC) {
offset = {0, 1, 4};
for (size_t i = 0; i < offset.back(); ++i) {
data.push_back(Entry(i, 0.1f));
data.emplace_back(Entry(i, 0.1f));
}
SparsePage other;
@@ -52,4 +52,4 @@ TEST(SparsePage, PushCSC) {
ASSERT_EQ(inst[i].index, indices_sol[i % 3]);
}
}
}
} // namespace xgboost

View File

@@ -27,7 +27,7 @@ TEST(SimpleDMatrix, RowAccess) {
xgboost::DMatrix * dmat = xgboost::DMatrix::Load(tmp_file, false, false);
// Loop over the batches and count the records
long row_count = 0;
int64_t row_count = 0;
for (auto &batch : dmat->GetRowBatches()) {
row_count += batch.Size();
}
@@ -54,7 +54,7 @@ TEST(SimpleDMatrix, ColAccessWithoutBatches) {
ASSERT_TRUE(dmat->SingleColBlock());
// Loop over the batches and assert the data is as expected
long num_col_batch = 0;
int64_t num_col_batch = 0;
for (const auto &batch : dmat->GetSortedColumnBatches()) {
num_col_batch += 1;
EXPECT_EQ(batch.Size(), dmat->Info().num_col_)

View File

@@ -1,6 +1,8 @@
// Copyright by Contributors
#include <xgboost/data.h>
#include <dmlc/filesystem.h>
#include <cinttypes>
#include "../../../src/data/sparse_page_dmatrix.h"
#include "../helpers.h"
@@ -33,7 +35,7 @@ TEST(SparsePageDMatrix, RowAccess) {
EXPECT_TRUE(FileExists(tmp_file + ".cache.row.page"));
// Loop over the batches and count the records
long row_count = 0;
int64_t row_count = 0;
for (auto &batch : dmat->GetRowBatches()) {
row_count += batch.Size();
}

View File

@@ -4,13 +4,14 @@
#include "./helpers.h"
#include "xgboost/c_api.h"
#include <random>
#include <cinttypes>
bool FileExists(const std::string& filename) {
struct stat st;
return stat(filename.c_str(), &st) == 0;
}
long GetFileSize(const std::string& filename) {
int64_t GetFileSize(const std::string& filename) {
struct stat st;
stat(filename.c_str(), &st);
return st.st_size;
@@ -30,13 +31,13 @@ void CreateBigTestData(const std::string& filename, size_t n_entries) {
}
}
void _CheckObjFunction(xgboost::ObjFunction * obj,
std::vector<xgboost::bst_float> preds,
std::vector<xgboost::bst_float> labels,
std::vector<xgboost::bst_float> weights,
xgboost::MetaInfo info,
std::vector<xgboost::bst_float> out_grad,
std::vector<xgboost::bst_float> out_hess) {
void CheckObjFunctionImpl(xgboost::ObjFunction * obj,
std::vector<xgboost::bst_float> preds,
std::vector<xgboost::bst_float> labels,
std::vector<xgboost::bst_float> weights,
xgboost::MetaInfo info,
std::vector<xgboost::bst_float> out_grad,
std::vector<xgboost::bst_float> out_hess) {
xgboost::HostDeviceVector<xgboost::bst_float> in_preds(preds);
xgboost::HostDeviceVector<xgboost::GradientPair> out_gpair;
obj->GetGradient(in_preds, info, 1, &out_gpair);
@@ -64,7 +65,7 @@ void CheckObjFunction(xgboost::ObjFunction * obj,
info.labels_.HostVector() = labels;
info.weights_.HostVector() = weights;
_CheckObjFunction(obj, preds, labels, weights, info, out_grad, out_hess);
CheckObjFunctionImpl(obj, preds, labels, weights, info, out_grad, out_hess);
}
void CheckRankingObjFunction(xgboost::ObjFunction * obj,
@@ -80,7 +81,7 @@ void CheckRankingObjFunction(xgboost::ObjFunction * obj,
info.weights_.HostVector() = weights;
info.group_ptr_ = groups;
_CheckObjFunction(obj, preds, labels, weights, info, out_grad, out_hess);
CheckObjFunctionImpl(obj, preds, labels, weights, info, out_grad, out_hess);
}

View File

@@ -4,11 +4,16 @@
#include "../helpers.h"
TEST(Metric, UnknownMetric) {
xgboost::Metric * metric;
xgboost::Metric * metric = nullptr;
EXPECT_ANY_THROW(metric = xgboost::Metric::Create("unknown_name"));
EXPECT_NO_THROW(metric = xgboost::Metric::Create("rmse"));
delete metric;
if (metric) {
delete metric;
}
metric = nullptr;
EXPECT_ANY_THROW(metric = xgboost::Metric::Create("unknown_name@1"));
EXPECT_NO_THROW(metric = xgboost::Metric::Create("error@0.5f"));
delete metric;
if (metric) {
delete metric;
}
}

View File

@@ -4,8 +4,10 @@
#include "../helpers.h"
TEST(Objective, UnknownFunction) {
xgboost::ObjFunction* obj;
xgboost::ObjFunction* obj = nullptr;
EXPECT_ANY_THROW(obj = xgboost::ObjFunction::Create("unknown_name"));
EXPECT_NO_THROW(obj = xgboost::ObjFunction::Create("reg:linear"));
delete obj;
if (obj) {
delete obj;
}
}

View File

@@ -85,7 +85,7 @@ TEST(Objective, DeclareUnifiedTest(LogisticRawGPair)) {
TEST(Objective, DeclareUnifiedTest(PoissonRegressionGPair)) {
xgboost::ObjFunction * obj = xgboost::ObjFunction::Create("count:poisson");
std::vector<std::pair<std::string, std::string> > args;
args.push_back(std::make_pair("max_delta_step", "0.1f"));
args.emplace_back(std::make_pair("max_delta_step", "0.1f"));
obj->Configure(args);
CheckObjFunction(obj,
{ 0, 0.1f, 0.9f, 1, 0, 0.1f, 0.9f, 1},
@@ -176,7 +176,7 @@ TEST(Objective, DeclareUnifiedTest(GammaRegressionBasic)) {
TEST(Objective, DeclareUnifiedTest(TweedieRegressionGPair)) {
xgboost::ObjFunction * obj = xgboost::ObjFunction::Create("reg:tweedie");
std::vector<std::pair<std::string, std::string> > args;
args.push_back(std::make_pair("tweedie_variance_power", "1.1f"));
args.emplace_back(std::make_pair("tweedie_variance_power", "1.1f"));
obj->Configure(args);
CheckObjFunction(obj,
{ 0, 0.1f, 0.9f, 1, 0, 0.1f, 0.9f, 1},

View File

@@ -41,21 +41,20 @@ TEST(cpu_predictor, Test) {
// Test predict leaf
std::vector<float> leaf_out_predictions;
cpu_predictor->PredictLeaf((*dmat).get(), &leaf_out_predictions, model);
for (int i = 0; i < leaf_out_predictions.size(); i++) {
ASSERT_EQ(leaf_out_predictions[i], 0);
for (auto v : leaf_out_predictions) {
ASSERT_EQ(v, 0);
}
// Test predict contribution
std::vector<float> out_contribution;
cpu_predictor->PredictContribution((*dmat).get(), &out_contribution, model);
for (int i = 0; i < out_contribution.size(); i++) {
ASSERT_EQ(out_contribution[i], 1.5);
for (auto const& contri : out_contribution) {
ASSERT_EQ(contri, 1.5);
}
// Test predict contribution (approximate method)
cpu_predictor->PredictContribution((*dmat).get(), &out_contribution, model, true);
for (int i = 0; i < out_contribution.size(); i++) {
ASSERT_EQ(out_contribution[i], 1.5);
for (auto const& contri : out_contribution) {
ASSERT_EQ(contri, 1.5);
}
delete dmat;

View File

@@ -8,7 +8,7 @@
namespace xgboost {
TEST(Learner, Basic) {
typedef std::pair<std::string, std::string> Arg;
using Arg = std::pair<std::string, std::string>;
auto args = {Arg("tree_method", "exact")};
auto mat_ptr = CreateDMatrix(10, 10, 0);
std::vector<std::shared_ptr<xgboost::DMatrix>> mat = {*mat_ptr};

View File

@@ -20,13 +20,13 @@ TEST(GPUExact, Update) {
auto* p_gpuexact_maker = TreeUpdater::Create("grow_gpu");
p_gpuexact_maker->Init(args);
size_t constexpr n_rows = 4;
size_t constexpr n_cols = 8;
bst_float constexpr sparsity = 0.0f;
size_t constexpr kNRows = 4;
size_t constexpr kNCols = 8;
bst_float constexpr kSparsity = 0.0f;
auto dmat = CreateDMatrix(n_rows, n_cols, sparsity, 3);
std::vector<GradientPair> h_gpair(n_rows);
for (size_t i = 0; i < n_rows; ++i) {
auto dmat = CreateDMatrix(kNRows, kNCols, kSparsity, 3);
std::vector<GradientPair> h_gpair(kNRows);
for (size_t i = 0; i < kNRows; ++i) {
h_gpair[i] = GradientPair(i % 2, 1);
}
HostDeviceVector<GradientPair> gpair (h_gpair);

View File

@@ -46,20 +46,20 @@ void BuildGidx(DeviceShard<GradientSumT>* shard, int n_rows, int n_cols,
}
TEST(GpuHist, BuildGidxDense) {
int const n_rows = 16, n_cols = 8;
int constexpr kNRows = 16, kNCols = 8;
TrainParam param;
param.max_depth = 1;
param.n_gpus = 1;
param.max_leaves = 0;
DeviceShard<GradientPairPrecise> shard(0, 0, n_rows, param);
BuildGidx(&shard, n_rows, n_cols);
DeviceShard<GradientPairPrecise> shard(0, 0, kNRows, param);
BuildGidx(&shard, kNRows, kNCols);
std::vector<common::CompressedByteT> h_gidx_buffer;
h_gidx_buffer = shard.gidx_buffer.AsVector();
common::CompressedIterator<uint32_t> gidx(h_gidx_buffer.data(), 25);
ASSERT_EQ(shard.row_stride, n_cols);
ASSERT_EQ(shard.row_stride, kNCols);
std::vector<uint32_t> solution = {
0, 3, 8, 9, 14, 17, 20, 21,
@@ -79,20 +79,20 @@ TEST(GpuHist, BuildGidxDense) {
2, 4, 8, 10, 14, 15, 19, 22,
1, 4, 7, 10, 14, 16, 19, 21,
};
for (size_t i = 0; i < n_rows * n_cols; ++i) {
for (size_t i = 0; i < kNRows * kNCols; ++i) {
ASSERT_EQ(solution[i], gidx[i]);
}
}
TEST(GpuHist, BuildGidxSparse) {
int const n_rows = 16, n_cols = 8;
int constexpr kNRows = 16, kNCols = 8;
TrainParam param;
param.max_depth = 1;
param.n_gpus = 1;
param.max_leaves = 0;
DeviceShard<GradientPairPrecise> shard(0, 0, n_rows, param);
BuildGidx(&shard, n_rows, n_cols, 0.9f);
DeviceShard<GradientPairPrecise> shard(0, 0, kNRows, param);
BuildGidx(&shard, kNRows, kNCols, 0.9f);
std::vector<common::CompressedByteT> h_gidx_buffer;
h_gidx_buffer = shard.gidx_buffer.AsVector();
@@ -106,7 +106,7 @@ TEST(GpuHist, BuildGidxSparse) {
24, 24, 24, 24, 24, 5, 24, 24, 0, 16, 24, 15, 24, 24, 24, 24,
24, 7, 14, 16, 4, 24, 24, 24, 24, 24, 9, 24, 24, 1, 24, 24
};
for (size_t i = 0; i < n_rows * shard.row_stride; ++i) {
for (size_t i = 0; i < kNRows * shard.row_stride; ++i) {
ASSERT_EQ(solution[i], gidx[i]);
}
}
@@ -128,27 +128,27 @@ std::vector<GradientPairPrecise> GetHostHistGpair() {
template <typename GradientSumT>
void TestBuildHist(GPUHistBuilderBase<GradientSumT>& builder) {
int const n_rows = 16, n_cols = 8;
int const kNRows = 16, kNCols = 8;
TrainParam param;
param.max_depth = 6;
param.n_gpus = 1;
param.max_leaves = 0;
DeviceShard<GradientSumT> shard(0, 0, n_rows, param);
DeviceShard<GradientSumT> shard(0, 0, kNRows, param);
BuildGidx(&shard, n_rows, n_cols);
BuildGidx(&shard, kNRows, kNCols);
xgboost::SimpleLCG gen;
xgboost::SimpleRealUniformDistribution<bst_float> dist(0.0f, 1.0f);
std::vector<GradientPair> h_gpair(n_rows);
for (size_t i = 0; i < h_gpair.size(); ++i) {
std::vector<GradientPair> h_gpair(kNRows);
for (auto &gpair : h_gpair) {
bst_float grad = dist(&gen);
bst_float hess = dist(&gen);
h_gpair[i] = GradientPair(grad, hess);
gpair = GradientPair(grad, hess);
}
thrust::device_vector<GradientPair> gpair (n_rows);
thrust::device_vector<GradientPair> gpair (kNRows);
gpair = h_gpair;
int num_symbols = shard.n_bins + 1;
@@ -164,7 +164,7 @@ void TestBuildHist(GPUHistBuilderBase<GradientSumT>& builder) {
num_symbols);
shard.ridx_segments.resize(1);
shard.ridx_segments[0] = Segment(0, n_rows);
shard.ridx_segments[0] = Segment(0, kNRows);
shard.hist.AllocateHistogram(0);
shard.gpair.copy(gpair.begin(), gpair.end());
thrust::sequence(shard.ridx.CurrentDVec().tbegin(),
@@ -175,11 +175,11 @@ void TestBuildHist(GPUHistBuilderBase<GradientSumT>& builder) {
auto node_histogram = d_hist.GetNodeHistogram(0);
// d_hist.data stored in float, not gradient pair
thrust::host_vector<GradientSumT> h_result (d_hist.data.size()/2);
thrust::host_vector<GradientSumT> h_result (d_hist.Data().size() / 2);
size_t data_size =
sizeof(GradientSumT) /
(sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT));
data_size *= d_hist.data.size();
data_size *= d_hist.Data().size();
dh::safe_cuda(cudaMemcpy(h_result.data(), node_histogram.data(), data_size,
cudaMemcpyDeviceToHost));
@@ -224,8 +224,8 @@ common::HistCutMatrix GetHostCutMatrix () {
// TODO(trivialfis): This test is over simplified.
TEST(GpuHist, EvaluateSplits) {
constexpr int n_rows = 16;
constexpr int n_cols = 8;
constexpr int kNRows = 16;
constexpr int kNCols = 8;
TrainParam param;
param.max_depth = 1;
@@ -240,14 +240,15 @@ TEST(GpuHist, EvaluateSplits) {
param.reg_lambda = 0;
param.max_delta_step = 0.0;
for (size_t i = 0; i < n_cols; ++i) {
for (size_t i = 0; i < kNCols; ++i) {
param.monotone_constraints.emplace_back(0);
}
int max_bins = 4;
// Initialize DeviceShard
std::unique_ptr<DeviceShard<GradientPairPrecise>> shard {new DeviceShard<GradientPairPrecise>(0, 0, n_rows, param)};
std::unique_ptr<DeviceShard<GradientPairPrecise>> shard {
new DeviceShard<GradientPairPrecise>(0, 0, kNRows, param)};
// Initialize DeviceShard::node_sum_gradients
shard->node_sum_gradients = {{6.4f, 12.8f}};
@@ -257,17 +258,17 @@ TEST(GpuHist, EvaluateSplits) {
// Copy cut matrix to device.
DeviceShard<GradientPairPrecise>::DeviceHistCutMatrix cut;
shard->ba.Allocate(0,
&(shard->cut_.feature_segments), cmat.row_ptr.size(),
&(shard->cut_.min_fvalue), cmat.min_val.size(),
&(shard->cut_.gidx_fvalue_map), 24,
&(shard->monotone_constraints), n_cols);
shard->cut_.feature_segments.copy(cmat.row_ptr.begin(), cmat.row_ptr.end());
shard->cut_.gidx_fvalue_map.copy(cmat.cut.begin(), cmat.cut.end());
&(shard->d_cut.feature_segments), cmat.row_ptr.size(),
&(shard->d_cut.min_fvalue), cmat.min_val.size(),
&(shard->d_cut.gidx_fvalue_map), 24,
&(shard->monotone_constraints), kNCols);
shard->d_cut.feature_segments.copy(cmat.row_ptr.begin(), cmat.row_ptr.end());
shard->d_cut.gidx_fvalue_map.copy(cmat.cut.begin(), cmat.cut.end());
shard->monotone_constraints.copy(param.monotone_constraints.begin(),
param.monotone_constraints.end());
// Initialize DeviceShard::hist
shard->hist.Init(0, (max_bins - 1) * n_cols);
shard->hist.Init(0, (max_bins - 1) * kNCols);
shard->hist.AllocateHistogram(0);
// Each row of hist_gpair represents gpairs for one feature.
// Each entry represents a bin.
@@ -278,16 +279,16 @@ TEST(GpuHist, EvaluateSplits) {
hist.push_back(pair.GetHess());
}
ASSERT_EQ(shard->hist.data.size(), hist.size());
ASSERT_EQ(shard->hist.Data().size(), hist.size());
thrust::copy(hist.begin(), hist.end(),
shard->hist.data.begin());
shard->hist.Data().begin());
// Initialize GPUHistMaker
GPUHistMakerSpecialised<GradientPairPrecise> hist_maker =
GPUHistMakerSpecialised<GradientPairPrecise>();
hist_maker.param_ = param;
hist_maker.shards_.push_back(std::move(shard));
hist_maker.column_sampler_.Init(n_cols,
hist_maker.column_sampler_.Init(kNCols,
param.colsample_bynode,
param.colsample_bylevel,
param.colsample_bytree,
@@ -295,8 +296,8 @@ TEST(GpuHist, EvaluateSplits) {
RegTree tree;
MetaInfo info;
info.num_row_ = n_rows;
info.num_col_ = n_cols;
info.num_row_ = kNRows;
info.num_col_ = kNCols;
hist_maker.info_ = &info;
hist_maker.node_value_constraints_.resize(1);
@@ -313,30 +314,30 @@ TEST(GpuHist, EvaluateSplits) {
TEST(GpuHist, ApplySplit) {
GPUHistMakerSpecialised<GradientPairPrecise> hist_maker =
GPUHistMakerSpecialised<GradientPairPrecise>();
int constexpr nid = 0;
int constexpr n_rows = 16;
int constexpr n_cols = 8;
int constexpr kNId = 0;
int constexpr kNRows = 16;
int constexpr kNCols = 8;
TrainParam param;
std::vector<std::pair<std::string, std::string>> args = {};
param.InitAllowUnknown(args);
// Initialize shard
for (size_t i = 0; i < n_cols; ++i) {
for (size_t i = 0; i < kNCols; ++i) {
param.monotone_constraints.emplace_back(0);
}
hist_maker.shards_.resize(1);
hist_maker.shards_[0].reset(new DeviceShard<GradientPairPrecise>(0, 0, n_rows, param));
hist_maker.shards_[0].reset(new DeviceShard<GradientPairPrecise>(0, 0, kNRows, param));
auto& shard = hist_maker.shards_.at(0);
shard->ridx_segments.resize(3); // 3 nodes.
shard->node_sum_gradients.resize(3);
shard->ridx_segments[0] = Segment(0, n_rows);
shard->ba.Allocate(0, &(shard->ridx), n_rows,
&(shard->position), n_rows);
shard->row_stride = n_cols;
shard->ridx_segments[0] = Segment(0, kNRows);
shard->ba.Allocate(0, &(shard->ridx), kNRows,
&(shard->position), kNRows);
shard->row_stride = kNCols;
thrust::sequence(shard->ridx.CurrentDVec().tbegin(),
shard->ridx.CurrentDVec().tend());
// Initialize GPUHistMaker
@@ -349,31 +350,30 @@ TEST(GpuHist, ApplySplit) {
GradientPair(8.2, 2.8), GradientPair(6.3, 3.6),
GPUTrainingParam(param));
GPUHistMakerSpecialised<GradientPairPrecise>::ExpandEntry candidate_entry {0, 0, candidate, 0};
candidate_entry.nid = nid;
candidate_entry.nid = kNId;
auto const& nodes = tree.GetNodes();
size_t n_nodes = nodes.size();
// Used to get bin_id in update position.
common::HistCutMatrix cmat = GetHostCutMatrix();
hist_maker.hmat_ = cmat;
MetaInfo info;
info.num_row_ = n_rows;
info.num_col_ = n_cols;
info.num_nonzero_ = n_rows * n_cols; // Dense
info.num_row_ = kNRows;
info.num_col_ = kNCols;
info.num_nonzero_ = kNRows * kNCols; // Dense
// Initialize gidx
int n_bins = 24;
int row_stride = n_cols;
int row_stride = kNCols;
int num_symbols = n_bins + 1;
size_t compressed_size_bytes =
common::CompressedBufferWriter::CalculateBufferSize(
row_stride * n_rows, num_symbols);
row_stride * kNRows, num_symbols);
shard->ba.Allocate(0, &(shard->gidx_buffer), compressed_size_bytes);
common::CompressedBufferWriter wr(num_symbols);
std::vector<int> h_gidx (n_rows * row_stride);
std::vector<int> h_gidx (kNRows * row_stride);
std::iota(h_gidx.begin(), h_gidx.end(), 0);
std::vector<common::CompressedByteT> h_gidx_compressed (compressed_size_bytes);
@@ -387,10 +387,10 @@ TEST(GpuHist, ApplySplit) {
hist_maker.ApplySplit(candidate_entry, &tree);
hist_maker.UpdatePosition(candidate_entry, &tree);
ASSERT_FALSE(tree[nid].IsLeaf());
ASSERT_FALSE(tree[kNId].IsLeaf());
int left_nidx = tree[nid].LeftChild();
int right_nidx = tree[nid].RightChild();
int left_nidx = tree[kNId].LeftChild();
int right_nidx = tree[kNId].RightChild();
ASSERT_EQ(shard->ridx_segments[left_nidx].begin, 0);
ASSERT_EQ(shard->ridx_segments[left_nidx].end, 6);

View File

@@ -13,14 +13,14 @@ namespace xgboost {
namespace tree {
TEST(Updater, Prune) {
int constexpr n_rows = 32, n_cols = 16;
int constexpr kNRows = 32, kNCols = 16;
std::vector<std::pair<std::string, std::string>> cfg;
cfg.push_back(std::pair<std::string, std::string>(
"num_feature", std::to_string(n_cols)));
cfg.push_back(std::pair<std::string, std::string>(
cfg.emplace_back(std::pair<std::string, std::string>(
"num_feature", std::to_string(kNCols)));
cfg.emplace_back(std::pair<std::string, std::string>(
"min_split_loss", "10"));
cfg.push_back(std::pair<std::string, std::string>(
cfg.emplace_back(std::pair<std::string, std::string>(
"silent", "1"));
// These data are just place holders.

View File

@@ -133,12 +133,12 @@ class QuantileHistMock : public QuantileHistMaker {
std::vector<GradientPair> row_gpairs =
{ {1.23f, 0.24f}, {0.24f, 0.25f}, {0.26f, 0.27f}, {2.27f, 0.28f},
{0.27f, 0.29f}, {0.37f, 0.39f}, {-0.47f, 0.49f}, {0.57f, 0.59f} };
size_t constexpr max_bins = 4;
auto dmat = CreateDMatrix(n_rows, n_cols, 0, 3);
size_t constexpr kMaxBins = 4;
auto dmat = CreateDMatrix(kNRows, kNCols, 0, 3);
// dense, no missing values
common::GHistIndexMatrix gmat;
gmat.Init((*dmat).get(), max_bins);
gmat.Init((*dmat).get(), kMaxBins);
RealImpl::InitData(gmat, row_gpairs, *(*dmat), tree);
hist_.AddHistRow(0);
@@ -167,7 +167,8 @@ class QuantileHistMock : public QuantileHistMaker {
// 2) no regularization, i.e. set min_child_weight, reg_lambda, reg_alpha,
// and max_delta_step to 0.
bst_float best_split_gain = 0.0f;
size_t best_split_threshold, best_split_feature;
size_t best_split_threshold = std::numeric_limits<size_t>::max();
size_t best_split_feature = std::numeric_limits<size_t>::max();
// Enumerate all features
for (size_t fid = 0; fid < num_feature; ++fid) {
const size_t bin_id_min = gmat.cut.row_ptr[fid];
@@ -213,56 +214,56 @@ class QuantileHistMock : public QuantileHistMaker {
}
};
int static constexpr n_rows = 8, n_cols = 16;
std::shared_ptr<xgboost::DMatrix> *dmat;
const std::vector<std::pair<std::string, std::string> > cfg;
int static constexpr kNRows = 8, kNCols = 16;
std::shared_ptr<xgboost::DMatrix> *dmat_;
const std::vector<std::pair<std::string, std::string> > cfg_;
std::shared_ptr<BuilderMock> builder_;
public:
explicit QuantileHistMock(
const std::vector<std::pair<std::string, std::string> >& args) :
cfg{args} {
cfg_{args} {
QuantileHistMaker::Init(args);
builder_.reset(
new BuilderMock(
param_,
std::move(pruner_),
std::unique_ptr<SplitEvaluator>(spliteval_->GetHostClone())));
dmat = CreateDMatrix(n_rows, n_cols, 0.8, 3);
dmat_ = CreateDMatrix(kNRows, kNCols, 0.8, 3);
}
~QuantileHistMock() { delete dmat; }
~QuantileHistMock() override { delete dmat_; }
static size_t GetNumColumns() { return n_cols; }
static size_t GetNumColumns() { return kNCols; }
void TestInitData() {
size_t constexpr max_bins = 4;
size_t constexpr kMaxBins = 4;
common::GHistIndexMatrix gmat;
gmat.Init((*dmat).get(), max_bins);
gmat.Init((*dmat_).get(), kMaxBins);
RegTree tree = RegTree();
tree.param.InitAllowUnknown(cfg);
tree.param.InitAllowUnknown(cfg_);
std::vector<GradientPair> gpair =
{ {0.23f, 0.24f}, {0.23f, 0.24f}, {0.23f, 0.24f}, {0.23f, 0.24f},
{0.27f, 0.29f}, {0.27f, 0.29f}, {0.27f, 0.29f}, {0.27f, 0.29f} };
builder_->TestInitData(gmat, gpair, dmat->get(), tree);
builder_->TestInitData(gmat, gpair, dmat_->get(), tree);
}
void TestBuildHist() {
RegTree tree = RegTree();
tree.param.InitAllowUnknown(cfg);
tree.param.InitAllowUnknown(cfg_);
size_t constexpr max_bins = 4;
size_t constexpr kMaxBins = 4;
common::GHistIndexMatrix gmat;
gmat.Init((*dmat).get(), max_bins);
gmat.Init((*dmat_).get(), kMaxBins);
builder_->TestBuildHist(0, gmat, *(*dmat).get(), tree);
builder_->TestBuildHist(0, gmat, *(*dmat_).get(), tree);
}
void TestEvaluateSplit() {
RegTree tree = RegTree();
tree.param.InitAllowUnknown(cfg);
tree.param.InitAllowUnknown(cfg_);
builder_->TestEvaluateSplit(gmatb_, tree);
}

View File

@@ -13,15 +13,15 @@ namespace xgboost {
namespace tree {
TEST(Updater, Refresh) {
int constexpr n_rows = 8, n_cols = 16;
int constexpr kNRows = 8, kNCols = 16;
HostDeviceVector<GradientPair> gpair =
{ {0.23f, 0.24f}, {0.23f, 0.24f}, {0.23f, 0.24f}, {0.23f, 0.24f},
{0.27f, 0.29f}, {0.27f, 0.29f}, {0.27f, 0.29f}, {0.27f, 0.29f} };
auto dmat = CreateDMatrix(n_rows, n_cols, 0.4, 3);
auto dmat = CreateDMatrix(kNRows, kNCols, 0.4, 3);
std::vector<std::pair<std::string, std::string>> cfg {
{"reg_alpha", "0.0"},
{"num_feature", std::to_string(n_cols)},
{"num_feature", std::to_string(kNCols)},
{"reg_lambda", "1"}};
RegTree tree = RegTree();