Use quantised gradients in gpu_hist histograms (#8246)

This commit is contained in:
Rory Mitchell
2022-09-26 17:35:35 +02:00
committed by GitHub
parent 4056974e37
commit 8f77677193
14 changed files with 394 additions and 336 deletions

View File

@@ -7,6 +7,7 @@
#include "../../helpers.h"
#include "../../histogram_helpers.h"
#include "../test_evaluate_splits.h" // TestPartitionBasedSplit
#include <thrust/host_vector.h>
namespace xgboost {
namespace tree {
@@ -21,13 +22,29 @@ auto ZeroParam() {
} // anonymous namespace
inline GradientQuantizer DummyRoundingFactor() {
thrust::device_vector<GradientPair> gpair(1);
gpair[0] = {1000.f, 1000.f}; // Tests should not exceed sum of 1000
return GradientQuantizer(dh::ToSpan(gpair));
}
thrust::device_vector<GradientPairInt64> ConvertToInteger(std::vector<GradientPairPrecise> x) {
auto r = DummyRoundingFactor();
std::vector<GradientPairInt64> y(x.size());
for (int i = 0; i < x.size(); i++) {
y[i] = r.ToFixedPoint(GradientPair(x[i]));
}
return y;
}
TEST_F(TestCategoricalSplitWithMissing, GPUHistEvaluator) {
thrust::device_vector<bst_feature_t> feature_set = std::vector<bst_feature_t>{0};
GPUTrainingParam param{param_};
cuts_.cut_ptrs_.SetDevice(0);
cuts_.cut_values_.SetDevice(0);
cuts_.min_vals_.SetDevice(0);
thrust::device_vector<GradientPairPrecise> feature_histogram{feature_histogram_};
thrust::device_vector<GradientPairInt64> feature_histogram{ConvertToInteger(feature_histogram_)};
dh::device_vector<FeatureType> feature_types(feature_set.size(), FeatureType::kCategorical);
auto d_feature_types = dh::ToSpan(feature_types);
@@ -36,6 +53,7 @@ TEST_F(TestCategoricalSplitWithMissing, GPUHistEvaluator) {
dh::ToSpan(feature_histogram)};
EvaluateSplitSharedInputs shared_inputs{
param,
DummyRoundingFactor(),
d_feature_types,
cuts_.cut_ptrs_.ConstDeviceSpan(),
cuts_.cut_values_.ConstDeviceSpan(),
@@ -76,6 +94,7 @@ TEST(GpuHist, PartitionBasic) {
EvaluateSplitSharedInputs shared_inputs{
param,
DummyRoundingFactor(),
d_feature_types,
cuts.cut_ptrs_.ConstDeviceSpan(),
cuts.cut_values_.ConstDeviceSpan(),
@@ -89,8 +108,7 @@ TEST(GpuHist, PartitionBasic) {
// -1.0s go right
// -3.0s go left
GradientPairPrecise parent_sum(-5.0, 3.0);
thrust::device_vector<GradientPairPrecise> feature_histogram =
std::vector<GradientPairPrecise>{{-1.0, 1.0}, {-1.0, 1.0}, {-3.0, 1.0}};
auto feature_histogram = ConvertToInteger({{-1.0, 1.0}, {-1.0, 1.0}, {-3.0, 1.0}});
EvaluateSplitInputs input{0, 0, parent_sum, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
@@ -105,8 +123,7 @@ TEST(GpuHist, PartitionBasic) {
// -1.0s go right
// -3.0s go left
GradientPairPrecise parent_sum(-7.0, 3.0);
thrust::device_vector<GradientPairPrecise> feature_histogram =
std::vector<GradientPairPrecise>{{-1.0, 1.0}, {-3.0, 1.0}, {-3.0, 1.0}};
auto feature_histogram = ConvertToInteger({{-1.0, 1.0}, {-3.0, 1.0}, {-3.0, 1.0}});
EvaluateSplitInputs input{1, 0, parent_sum, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
@@ -119,8 +136,7 @@ TEST(GpuHist, PartitionBasic) {
{
// All -1.0, gain from splitting should be 0.0
GradientPairPrecise parent_sum(-3.0, 3.0);
thrust::device_vector<GradientPairPrecise> feature_histogram =
std::vector<GradientPairPrecise>{{-1.0, 1.0}, {-1.0, 1.0}, {-1.0, 1.0}};
auto feature_histogram = ConvertToInteger({{-1.0, 1.0}, {-1.0, 1.0}, {-1.0, 1.0}});
EvaluateSplitInputs input{2, 0, parent_sum, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
@@ -133,8 +149,7 @@ TEST(GpuHist, PartitionBasic) {
// Forward, first 2 categories are selected, while the last one go to left along with missing value
{
GradientPairPrecise parent_sum(0.0, 6.0);
thrust::device_vector<GradientPairPrecise> feature_histogram =
std::vector<GradientPairPrecise>{{-1.0, 1.0}, {-1.0, 1.0}, {-1.0, 1.0}};
auto feature_histogram = ConvertToInteger({{-1.0, 1.0}, {-1.0, 1.0}, {-1.0, 1.0}});
EvaluateSplitInputs input{3, 0, parent_sum, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
@@ -148,8 +163,7 @@ TEST(GpuHist, PartitionBasic) {
// -1.0s go right
// -3.0s go left
GradientPairPrecise parent_sum(-5.0, 3.0);
thrust::device_vector<GradientPairPrecise> feature_histogram =
std::vector<GradientPairPrecise>{{-1.0, 1.0}, {-3.0, 1.0}, {-1.0, 1.0}};
auto feature_histogram = ConvertToInteger({{-1.0, 1.0}, {-3.0, 1.0}, {-1.0, 1.0}});
EvaluateSplitInputs input{4, 0, parent_sum, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
@@ -163,8 +177,7 @@ TEST(GpuHist, PartitionBasic) {
// -1.0s go right
// -3.0s go left
GradientPairPrecise parent_sum(-5.0, 3.0);
thrust::device_vector<GradientPairPrecise> feature_histogram =
std::vector<GradientPairPrecise>{{-3.0, 1.0}, {-1.0, 1.0}, {-3.0, 1.0}};
auto feature_histogram = ConvertToInteger({{-3.0, 1.0}, {-1.0, 1.0}, {-3.0, 1.0}});
EvaluateSplitInputs input{5, 0, parent_sum, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
@@ -198,6 +211,7 @@ TEST(GpuHist, PartitionTwoFeatures) {
EvaluateSplitSharedInputs shared_inputs{
param,
DummyRoundingFactor(),
d_feature_types,
cuts.cut_ptrs_.ConstDeviceSpan(),
cuts.cut_values_.ConstDeviceSpan(),
@@ -209,8 +223,7 @@ TEST(GpuHist, PartitionTwoFeatures) {
{
GradientPairPrecise parent_sum(-6.0, 3.0);
thrust::device_vector<GradientPairPrecise> feature_histogram = std::vector<GradientPairPrecise>{
{-2.0, 1.0}, {-2.0, 1.0}, {-2.0, 1.0}, {-1.0, 1.0}, {-1.0, 1.0}, {-4.0, 1.0}};
auto feature_histogram = ConvertToInteger({ {-2.0, 1.0}, {-2.0, 1.0}, {-2.0, 1.0}, {-1.0, 1.0}, {-1.0, 1.0}, {-4.0, 1.0}});
EvaluateSplitInputs input{0, 0, parent_sum, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
@@ -223,8 +236,7 @@ TEST(GpuHist, PartitionTwoFeatures) {
{
GradientPairPrecise parent_sum(-6.0, 3.0);
thrust::device_vector<GradientPairPrecise> feature_histogram = std::vector<GradientPairPrecise>{
{-2.0, 1.0}, {-2.0, 1.0}, {-2.0, 1.0}, {-1.0, 1.0}, {-2.5, 1.0}, {-2.5, 1.0}};
auto feature_histogram = ConvertToInteger({ {-2.0, 1.0}, {-2.0, 1.0}, {-2.0, 1.0}, {-1.0, 1.0}, {-2.5, 1.0}, {-2.5, 1.0}});
EvaluateSplitInputs input{1, 0, parent_sum, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
@@ -259,6 +271,7 @@ TEST(GpuHist, PartitionTwoNodes) {
EvaluateSplitSharedInputs shared_inputs{
param,
DummyRoundingFactor(),
d_feature_types,
cuts.cut_ptrs_.ConstDeviceSpan(),
cuts.cut_values_.ConstDeviceSpan(),
@@ -270,14 +283,12 @@ TEST(GpuHist, PartitionTwoNodes) {
{
GradientPairPrecise parent_sum(-6.0, 3.0);
thrust::device_vector<GradientPairPrecise> feature_histogram_a =
std::vector<GradientPairPrecise>{{-1.0, 1.0}, {-2.5, 1.0}, {-2.5, 1.0},
{-1.0, 1.0}, {-1.0, 1.0}, {-4.0, 1.0}};
auto feature_histogram_a = ConvertToInteger({{-1.0, 1.0}, {-2.5, 1.0}, {-2.5, 1.0},
{-1.0, 1.0}, {-1.0, 1.0}, {-4.0, 1.0}});
thrust::device_vector<EvaluateSplitInputs> inputs(2);
inputs[0] = EvaluateSplitInputs{0, 0, parent_sum, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram_a)};
thrust::device_vector<GradientPairPrecise> feature_histogram_b =
std::vector<GradientPairPrecise>{{-1.0, 1.0}, {-1.0, 1.0}, {-4.0, 1.0}};
auto feature_histogram_b = ConvertToInteger({{-1.0, 1.0}, {-1.0, 1.0}, {-4.0, 1.0}});
inputs[1] = EvaluateSplitInputs{1, 0, parent_sum, dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram_b)};
thrust::device_vector<GPUExpandEntry> results(2);
@@ -300,9 +311,7 @@ void TestEvaluateSingleSplit(bool is_categorical) {
thrust::device_vector<bst_feature_t> feature_set = std::vector<bst_feature_t>{0, 1};
// Setup gradients so that second feature gets higher gain
thrust::device_vector<GradientPairPrecise> feature_histogram =
std::vector<GradientPairPrecise>{
{-0.5, 0.5}, {0.5, 0.5}, {-1.0, 0.5}, {1.0, 0.5}};
auto feature_histogram = ConvertToInteger({ {-0.5, 0.5}, {0.5, 0.5}, {-1.0, 0.5}, {1.0, 0.5}});
dh::device_vector<FeatureType> feature_types(feature_set.size(),
FeatureType::kCategorical);
@@ -318,6 +327,7 @@ void TestEvaluateSingleSplit(bool is_categorical) {
dh::ToSpan(feature_histogram)};
EvaluateSplitSharedInputs shared_inputs{
param,
DummyRoundingFactor(),
d_feature_types,
cuts.cut_ptrs_.ConstDeviceSpan(),
cuts.cut_values_.ConstDeviceSpan(),
@@ -360,14 +370,14 @@ TEST(GpuHist, EvaluateSingleSplitMissing) {
std::vector<bst_row_t>{0, 2};
thrust::device_vector<float> feature_values = std::vector<float>{1.0, 2.0};
thrust::device_vector<float> feature_min_values = std::vector<float>{0.0};
thrust::device_vector<GradientPairPrecise> feature_histogram =
std::vector<GradientPairPrecise>{{-0.5, 0.5}, {0.5, 0.5}};
auto feature_histogram = ConvertToInteger({{-0.5, 0.5}, {0.5, 0.5}});
EvaluateSplitInputs input{1,0,
parent_sum,
dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
EvaluateSplitSharedInputs shared_inputs{
param,
DummyRoundingFactor(),
{},
dh::ToSpan(feature_segments),
dh::ToSpan(feature_values),
@@ -388,7 +398,11 @@ TEST(GpuHist, EvaluateSingleSplitEmpty) {
TrainParam tparam = ZeroParam();
GPUHistEvaluator evaluator(tparam, 1, 0);
DeviceSplitCandidate result =
evaluator.EvaluateSingleSplit(EvaluateSplitInputs{}, EvaluateSplitSharedInputs{}).split;
evaluator
.EvaluateSingleSplit(
EvaluateSplitInputs{},
EvaluateSplitSharedInputs{GPUTrainingParam(tparam), DummyRoundingFactor()})
.split;
EXPECT_EQ(result.findex, -1);
EXPECT_LT(result.loss_chg, 0.0f);
}
@@ -408,15 +422,14 @@ TEST(GpuHist, EvaluateSingleSplitFeatureSampling) {
std::vector<float>{1.0, 2.0, 11.0, 12.0};
thrust::device_vector<float> feature_min_values =
std::vector<float>{0.0, 10.0};
thrust::device_vector<GradientPairPrecise> feature_histogram =
std::vector<GradientPairPrecise>{
{-10.0, 0.5}, {10.0, 0.5}, {-0.5, 0.5}, {0.5, 0.5}};
auto feature_histogram = ConvertToInteger({ {-10.0, 0.5}, {10.0, 0.5}, {-0.5, 0.5}, {0.5, 0.5}});
EvaluateSplitInputs input{1,0,
parent_sum,
dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
EvaluateSplitSharedInputs shared_inputs{
param,
DummyRoundingFactor(),
{},
dh::ToSpan(feature_segments),
dh::ToSpan(feature_values),
@@ -447,15 +460,14 @@ TEST(GpuHist, EvaluateSingleSplitBreakTies) {
std::vector<float>{1.0, 2.0, 11.0, 12.0};
thrust::device_vector<float> feature_min_values =
std::vector<float>{0.0, 10.0};
thrust::device_vector<GradientPairPrecise> feature_histogram =
std::vector<GradientPairPrecise>{
{-0.5, 0.5}, {0.5, 0.5}, {-0.5, 0.5}, {0.5, 0.5}};
auto feature_histogram = ConvertToInteger({ {-0.5, 0.5}, {0.5, 0.5}, {-0.5, 0.5}, {0.5, 0.5}});
EvaluateSplitInputs input{1,0,
parent_sum,
dh::ToSpan(feature_set),
dh::ToSpan(feature_histogram)};
EvaluateSplitSharedInputs shared_inputs{
param,
DummyRoundingFactor(),
{},
dh::ToSpan(feature_segments),
dh::ToSpan(feature_values),
@@ -484,12 +496,8 @@ TEST(GpuHist, EvaluateSplits) {
std::vector<float>{1.0, 2.0, 11.0, 12.0};
thrust::device_vector<float> feature_min_values =
std::vector<float>{0.0, 0.0};
thrust::device_vector<GradientPairPrecise> feature_histogram_left =
std::vector<GradientPairPrecise>{
{-0.5, 0.5}, {0.5, 0.5}, {-1.0, 0.5}, {1.0, 0.5}};
thrust::device_vector<GradientPairPrecise> feature_histogram_right =
std::vector<GradientPairPrecise>{
{-1.0, 0.5}, {1.0, 0.5}, {-0.5, 0.5}, {0.5, 0.5}};
auto feature_histogram_left = ConvertToInteger({ {-0.5, 0.5}, {0.5, 0.5}, {-1.0, 0.5}, {1.0, 0.5}});
auto feature_histogram_right = ConvertToInteger({ {-1.0, 0.5}, {1.0, 0.5}, {-0.5, 0.5}, {0.5, 0.5}});
EvaluateSplitInputs input_left{
1,0,
parent_sum,
@@ -502,6 +510,7 @@ TEST(GpuHist, EvaluateSplits) {
dh::ToSpan(feature_histogram_right)};
EvaluateSplitSharedInputs shared_inputs{
param,
DummyRoundingFactor(),
{},
dh::ToSpan(feature_segments),
dh::ToSpan(feature_values),
@@ -533,20 +542,26 @@ TEST_F(TestPartitionBasedSplit, GpuHist) {
evaluator.Reset(cuts_, dh::ToSpan(ft), info_.num_col_, param_, 0);
dh::device_vector<GradientPairPrecise> d_hist(hist_[0].size());
auto node_hist = hist_[0];
dh::safe_cuda(cudaMemcpy(d_hist.data().get(), node_hist.data(), node_hist.size_bytes(),
cudaMemcpyHostToDevice));
// Convert the sample histogram to fixed point
auto rounding = DummyRoundingFactor();
thrust::host_vector<GradientPairInt64> h_hist;
for(auto e: hist_[0]){
h_hist.push_back(rounding.ToFixedPoint({float(e.GetGrad()),float(e.GetHess())}));
}
dh::device_vector<GradientPairInt64> d_hist = h_hist;
dh::device_vector<bst_feature_t> feature_set{std::vector<bst_feature_t>{0}};
EvaluateSplitInputs input{0, 0, total_gpair_, dh::ToSpan(feature_set), dh::ToSpan(d_hist)};
EvaluateSplitSharedInputs shared_inputs{
GPUTrainingParam{param_}, dh::ToSpan(ft),
cuts_.cut_ptrs_.ConstDeviceSpan(), cuts_.cut_values_.ConstDeviceSpan(),
GPUTrainingParam{param_},
rounding,
dh::ToSpan(ft),
cuts_.cut_ptrs_.ConstDeviceSpan(),
cuts_.cut_values_.ConstDeviceSpan(),
cuts_.min_vals_.ConstDeviceSpan(),
};
auto split = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
ASSERT_NEAR(split.loss_chg, best_score_, 1e-16);
ASSERT_NEAR(split.loss_chg, best_score_, 1e-2);
}
} // namespace tree
} // namespace xgboost

View File

@@ -10,7 +10,6 @@
namespace xgboost {
namespace tree {
template <typename Gradient>
void TestDeterministicHistogram(bool is_dense, int shm_size) {
size_t constexpr kBins = 256, kCols = 120, kRows = 16384, kRounds = 16;
float constexpr kLower = -1e-2, kUpper = 1e2;
@@ -26,41 +25,41 @@ void TestDeterministicHistogram(bool is_dense, int shm_size) {
auto ridx = row_partitioner.GetRows(0);
int num_bins = kBins * kCols;
dh::device_vector<Gradient> histogram(num_bins);
dh::device_vector<GradientPairInt64> histogram(num_bins);
auto d_histogram = dh::ToSpan(histogram);
auto gpair = GenerateRandomGradients(kRows, kLower, kUpper);
gpair.SetDevice(0);
FeatureGroups feature_groups(page->Cuts(), page->is_dense, shm_size,
sizeof(Gradient));
sizeof(GradientPairInt64));
auto rounding = CreateRoundingFactor<Gradient>(gpair.DeviceSpan());
auto rounding = GradientQuantizer(gpair.DeviceSpan());
BuildGradientHistogram(page->GetDeviceAccessor(0),
feature_groups.DeviceAccessor(0), gpair.DeviceSpan(),
ridx, d_histogram, rounding);
std::vector<Gradient> histogram_h(num_bins);
std::vector<GradientPairInt64> histogram_h(num_bins);
dh::safe_cuda(cudaMemcpy(histogram_h.data(), d_histogram.data(),
num_bins * sizeof(Gradient),
num_bins * sizeof(GradientPairInt64),
cudaMemcpyDeviceToHost));
for (size_t i = 0; i < kRounds; ++i) {
dh::device_vector<Gradient> new_histogram(num_bins);
dh::device_vector<GradientPairInt64> new_histogram(num_bins);
auto d_new_histogram = dh::ToSpan(new_histogram);
auto rounding = CreateRoundingFactor<Gradient>(gpair.DeviceSpan());
auto rounding = GradientQuantizer(gpair.DeviceSpan());
BuildGradientHistogram(page->GetDeviceAccessor(0),
feature_groups.DeviceAccessor(0),
gpair.DeviceSpan(), ridx, d_new_histogram,
rounding);
std::vector<Gradient> new_histogram_h(num_bins);
std::vector<GradientPairInt64> new_histogram_h(num_bins);
dh::safe_cuda(cudaMemcpy(new_histogram_h.data(), d_new_histogram.data(),
num_bins * sizeof(Gradient),
num_bins * sizeof(GradientPairInt64),
cudaMemcpyDeviceToHost));
for (size_t j = 0; j < new_histogram_h.size(); ++j) {
ASSERT_EQ(new_histogram_h[j].GetGrad(), histogram_h[j].GetGrad());
ASSERT_EQ(new_histogram_h[j].GetHess(), histogram_h[j].GetHess());
ASSERT_EQ(new_histogram_h[j].GetQuantisedGrad(), histogram_h[j].GetQuantisedGrad());
ASSERT_EQ(new_histogram_h[j].GetQuantisedHess(), histogram_h[j].GetQuantisedHess());
}
}
@@ -71,20 +70,20 @@ void TestDeterministicHistogram(bool is_dense, int shm_size) {
// Use a single feature group to compute the baseline.
FeatureGroups single_group(page->Cuts());
dh::device_vector<Gradient> baseline(num_bins);
dh::device_vector<GradientPairInt64> baseline(num_bins);
BuildGradientHistogram(page->GetDeviceAccessor(0),
single_group.DeviceAccessor(0),
gpair.DeviceSpan(), ridx, dh::ToSpan(baseline),
rounding);
std::vector<Gradient> baseline_h(num_bins);
std::vector<GradientPairInt64> baseline_h(num_bins);
dh::safe_cuda(cudaMemcpy(baseline_h.data(), baseline.data().get(),
num_bins * sizeof(Gradient),
num_bins * sizeof(GradientPairInt64),
cudaMemcpyDeviceToHost));
for (size_t i = 0; i < baseline.size(); ++i) {
EXPECT_NEAR(baseline_h[i].GetGrad(), histogram_h[i].GetGrad(),
baseline_h[i].GetGrad() * 1e-3);
EXPECT_NEAR(baseline_h[i].GetQuantisedGrad(), histogram_h[i].GetQuantisedGrad(),
baseline_h[i].GetQuantisedGrad() * 1e-3);
}
}
}
@@ -95,11 +94,25 @@ TEST(Histogram, GPUDeterministic) {
std::vector<int> shm_sizes{48 * 1024, 64 * 1024, 160 * 1024};
for (bool is_dense : is_dense_array) {
for (int shm_size : shm_sizes) {
TestDeterministicHistogram<GradientPairPrecise>(is_dense, shm_size);
TestDeterministicHistogram(is_dense, shm_size);
}
}
}
void ValidateCategoricalHistogram(size_t n_categories, common::Span<GradientPairInt64> onehot,
common::Span<GradientPairInt64> cat) {
auto cat_sum = std::accumulate(cat.cbegin(), cat.cend(), GradientPairInt64{});
for (size_t c = 0; c < n_categories; ++c) {
auto zero = onehot[c * 2];
auto one = onehot[c * 2 + 1];
auto chosen = cat[c];
auto not_chosen = cat_sum - chosen;
ASSERT_EQ(zero, not_chosen);
ASSERT_EQ(one, chosen);
}
}
// Test 1 vs rest categorical histogram is equivalent to one hot encoded data.
void TestGPUHistogramCategorical(size_t num_categories) {
size_t constexpr kRows = 340;
@@ -110,10 +123,10 @@ void TestGPUHistogramCategorical(size_t num_categories) {
BatchParam batch_param{0, static_cast<int32_t>(kBins)};
tree::RowPartitioner row_partitioner(0, kRows);
auto ridx = row_partitioner.GetRows(0);
dh::device_vector<GradientPairPrecise> cat_hist(num_categories);
dh::device_vector<GradientPairInt64> cat_hist(num_categories);
auto gpair = GenerateRandomGradients(kRows, 0, 2);
gpair.SetDevice(0);
auto rounding = CreateRoundingFactor<GradientPairPrecise>(gpair.DeviceSpan());
auto rounding = GradientQuantizer(gpair.DeviceSpan());
/**
* Generate hist with cat data.
*/
@@ -131,7 +144,7 @@ void TestGPUHistogramCategorical(size_t num_categories) {
*/
auto x_encoded = OneHotEncodeFeature(x, num_categories);
auto encode_m = GetDMatrixFromData(x_encoded, kRows, num_categories);
dh::device_vector<GradientPairPrecise> encode_hist(2 * num_categories);
dh::device_vector<GradientPairInt64> encode_hist(2 * num_categories);
for (auto const &batch : encode_m->GetBatches<EllpackPage>(batch_param)) {
auto* page = batch.Impl();
FeatureGroups single_group(page->Cuts());
@@ -141,14 +154,14 @@ void TestGPUHistogramCategorical(size_t num_categories) {
rounding);
}
std::vector<GradientPairPrecise> h_cat_hist(cat_hist.size());
std::vector<GradientPairInt64> h_cat_hist(cat_hist.size());
thrust::copy(cat_hist.begin(), cat_hist.end(), h_cat_hist.begin());
std::vector<GradientPairPrecise> h_encode_hist(encode_hist.size());
std::vector<GradientPairInt64> h_encode_hist(encode_hist.size());
thrust::copy(encode_hist.begin(), encode_hist.end(), h_encode_hist.begin());
ValidateCategoricalHistogram(num_categories,
common::Span<GradientPairPrecise>{h_encode_hist},
common::Span<GradientPairPrecise>{h_cat_hist});
common::Span<GradientPairInt64>{h_encode_hist},
common::Span<GradientPairInt64>{h_cat_hist});
}
TEST(Histogram, GPUHistCategorical) {
@@ -156,5 +169,74 @@ TEST(Histogram, GPUHistCategorical) {
TestGPUHistogramCategorical(num_categories);
}
}
namespace {
// Atomic add as type cast for test.
XGBOOST_DEV_INLINE int64_t atomicAdd(int64_t *dst, int64_t src) { // NOLINT
uint64_t* u_dst = reinterpret_cast<uint64_t*>(dst);
uint64_t u_src = *reinterpret_cast<uint64_t*>(&src);
uint64_t ret = ::atomicAdd(u_dst, u_src);
return *reinterpret_cast<int64_t*>(&ret);
}
}
void TestAtomicAdd() {
size_t n_elements = 1024;
dh::device_vector<int64_t> result_a(1, 0);
auto d_result_a = result_a.data().get();
dh::device_vector<int64_t> result_b(1, 0);
auto d_result_b = result_b.data().get();
/**
* Test for simple inputs
*/
std::vector<int64_t> h_inputs(n_elements);
for (size_t i = 0; i < h_inputs.size(); ++i) {
h_inputs[i] = (i % 2 == 0) ? i : -i;
}
dh::device_vector<int64_t> inputs(h_inputs);
auto d_inputs = inputs.data().get();
dh::LaunchN(n_elements, [=] __device__(size_t i) {
AtomicAdd64As32(d_result_a, d_inputs[i]);
atomicAdd(d_result_b, d_inputs[i]);
});
ASSERT_EQ(result_a[0], result_b[0]);
/**
* Test for positive values that don't fit into 32 bit integer.
*/
thrust::fill(inputs.begin(), inputs.end(),
(std::numeric_limits<uint32_t>::max() / 2));
thrust::fill(result_a.begin(), result_a.end(), 0);
thrust::fill(result_b.begin(), result_b.end(), 0);
dh::LaunchN(n_elements, [=] __device__(size_t i) {
AtomicAdd64As32(d_result_a, d_inputs[i]);
atomicAdd(d_result_b, d_inputs[i]);
});
ASSERT_EQ(result_a[0], result_b[0]);
ASSERT_GT(result_a[0], std::numeric_limits<uint32_t>::max());
CHECK_EQ(thrust::reduce(inputs.begin(), inputs.end(), int64_t(0)), result_a[0]);
/**
* Test for negative values that don't fit into 32 bit integer.
*/
thrust::fill(inputs.begin(), inputs.end(),
(std::numeric_limits<int32_t>::min() / 2));
thrust::fill(result_a.begin(), result_a.end(), 0);
thrust::fill(result_b.begin(), result_b.end(), 0);
dh::LaunchN(n_elements, [=] __device__(size_t i) {
AtomicAdd64As32(d_result_a, d_inputs[i]);
atomicAdd(d_result_b, d_inputs[i]);
});
ASSERT_EQ(result_a[0], result_b[0]);
ASSERT_LT(result_a[0], std::numeric_limits<int32_t>::min());
CHECK_EQ(thrust::reduce(inputs.begin(), inputs.end(), int64_t(0)), result_a[0]);
}
TEST(Histogram, AtomicAddInt64) {
TestAtomicAdd();
}
} // namespace tree
} // namespace xgboost

View File

@@ -291,6 +291,26 @@ TEST(CPUHistogram, BuildHist) {
}
namespace {
template <typename GradientSumT>
void ValidateCategoricalHistogram(size_t n_categories,
common::Span<GradientSumT> onehot,
common::Span<GradientSumT> cat) {
auto cat_sum = std::accumulate(cat.cbegin(), cat.cend(), GradientPairPrecise{});
for (size_t c = 0; c < n_categories; ++c) {
auto zero = onehot[c * 2];
auto one = onehot[c * 2 + 1];
auto chosen = cat[c];
auto not_chosen = cat_sum - chosen;
ASSERT_LE(RelError(zero.GetGrad(), not_chosen.GetGrad()), kRtEps);
ASSERT_LE(RelError(zero.GetHess(), not_chosen.GetHess()), kRtEps);
ASSERT_LE(RelError(one.GetGrad(), chosen.GetGrad()), kRtEps);
ASSERT_LE(RelError(one.GetHess(), chosen.GetHess()), kRtEps);
}
}
void TestHistogramCategorical(size_t n_categories, bool force_read_by_column) {
size_t constexpr kRows = 340;
int32_t constexpr kBins = 256;

View File

@@ -29,7 +29,7 @@ TEST(GpuHist, DeviceHistogram) {
constexpr size_t kNBins = 128;
constexpr int kNNodes = 4;
constexpr size_t kStopGrowing = kNNodes * kNBins * 2u;
DeviceHistogramStorage<GradientPairPrecise, kStopGrowing> histogram;
DeviceHistogramStorage<kStopGrowing> histogram;
histogram.Init(0, kNBins);
for (int i = 0; i < kNNodes; ++i) {
histogram.AllocateHistograms({i});
@@ -107,32 +107,27 @@ void TestBuildHist(bool use_shared_memory_histograms) {
maker.row_partitioner.reset(new RowPartitioner(0, kNRows));
maker.hist.AllocateHistograms({0});
maker.gpair = gpair.DeviceSpan();
maker.histogram_rounding = CreateRoundingFactor<GradientSumT>(maker.gpair);
maker.histogram_rounding.reset(new GradientQuantizer(maker.gpair));
BuildGradientHistogram(
page->GetDeviceAccessor(0), maker.feature_groups->DeviceAccessor(0),
gpair.DeviceSpan(), maker.row_partitioner->GetRows(0),
maker.hist.GetNodeHistogram(0), maker.histogram_rounding,
maker.hist.GetNodeHistogram(0), *maker.histogram_rounding,
!use_shared_memory_histograms);
DeviceHistogramStorage<GradientSumT>& d_hist = maker.hist;
DeviceHistogramStorage<>& d_hist = maker.hist;
auto node_histogram = d_hist.GetNodeHistogram(0);
// d_hist.data stored in float, not gradient pair
thrust::host_vector<GradientSumT> h_result (d_hist.Data().size() / 2);
size_t data_size =
sizeof(GradientSumT) /
(sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT));
data_size *= d_hist.Data().size();
dh::safe_cuda(cudaMemcpy(h_result.data(), node_histogram.data(), data_size,
thrust::host_vector<GradientPairInt64> h_result (node_histogram.size());
dh::safe_cuda(cudaMemcpy(h_result.data(), node_histogram.data(), node_histogram.size_bytes(),
cudaMemcpyDeviceToHost));
std::vector<GradientPairPrecise> solution = GetHostHistGpair();
std::cout << std::fixed;
for (size_t i = 0; i < h_result.size(); ++i) {
ASSERT_FALSE(std::isnan(h_result[i].GetGrad()));
EXPECT_NEAR(h_result[i].GetGrad(), solution[i].GetGrad(), 0.01f);
EXPECT_NEAR(h_result[i].GetHess(), solution[i].GetHess(), 0.01f);
auto result = maker.histogram_rounding->ToFloatingPoint(h_result[i]);
EXPECT_NEAR(result.GetGrad(), solution[i].GetGrad(), 0.01f);
EXPECT_NEAR(result.GetHess(), solution[i].GetHess(), 0.01f);
}
}
@@ -161,6 +156,12 @@ HistogramCutsWrapper GetHostCutMatrix () {
return cmat;
}
inline GradientQuantizer DummyRoundingFactor() {
thrust::device_vector<GradientPair> gpair(1);
gpair[0] = {1000.f, 1000.f}; // Tests should not exceed sum of 1000
return GradientQuantizer(dh::ToSpan(gpair));
}
// TODO(trivialfis): This test is over simplified.
TEST(GpuHist, EvaluateRootSplit) {
constexpr int kNRows = 16;
@@ -209,10 +210,12 @@ TEST(GpuHist, EvaluateRootSplit) {
// Each row of hist_gpair represents gpairs for one feature.
// Each entry represents a bin.
std::vector<GradientPairPrecise> hist_gpair = GetHostHistGpair();
std::vector<bst_float> hist;
maker.histogram_rounding.reset(new GradientQuantizer(DummyRoundingFactor()));
std::vector<int64_t> hist;
for (auto pair : hist_gpair) {
hist.push_back(pair.GetGrad());
hist.push_back(pair.GetHess());
auto grad = maker.histogram_rounding->ToFixedPoint({float(pair.GetGrad()),float(pair.GetHess())});
hist.push_back(grad.GetQuantisedGrad());
hist.push_back(grad.GetQuantisedHess());
}
ASSERT_EQ(maker.hist.Data().size(), hist.size());