Support cpu quantile sketch with column-wise data split (#8742)

This commit is contained in:
Rong Ou
2023-02-04 22:26:24 -08:00
committed by GitHub
parent c1786849e3
commit 66191e9926
15 changed files with 250 additions and 118 deletions

View File

@@ -6,7 +6,6 @@
#include <gtest/gtest.h>
#include "../../../src/common/hist_util.h"
#include "../../../src/common/quantile.h"
#include "../../../src/data/adapter.h"
#include "xgboost/context.h"
@@ -74,7 +73,7 @@ void DoTestDistributedQuantile(size_t rows, size_t cols) {
auto hess = Span<float const>{hessian};
ContainerType<use_column> sketch_distributed(n_bins, m->Info().feature_types.ConstHostSpan(),
column_size, false, AllThreadsForTest());
column_size, false, false, AllThreadsForTest());
if (use_column) {
for (auto const& page : m->GetBatches<SortedCSCPage>()) {
@@ -95,7 +94,7 @@ void DoTestDistributedQuantile(size_t rows, size_t cols) {
std::for_each(column_size.begin(), column_size.end(), [=](auto& size) { size *= world; });
m->Info().num_row_ = world * rows;
ContainerType<use_column> sketch_on_single_node(n_bins, m->Info().feature_types.ConstHostSpan(),
column_size, false, AllThreadsForTest());
column_size, false, false, AllThreadsForTest());
m->Info().num_row_ = rows;
for (auto rank = 0; rank < world; ++rank) {
@@ -170,6 +169,132 @@ TEST(Quantile, SortedDistributed) {
TestDistributedQuantile<true>(kRows, kCols);
}
namespace {
template <bool use_column>
void DoTestColSplitQuantile(size_t rows, size_t cols) {
auto const world = collective::GetWorldSize();
auto const rank = collective::GetRank();
auto m = std::unique_ptr<DMatrix>{[=]() {
auto sparsity = 0.5f;
std::vector<FeatureType> ft(cols);
for (size_t i = 0; i < ft.size(); ++i) {
ft[i] = (i % 2 == 0) ? FeatureType::kNumerical : FeatureType::kCategorical;
}
auto dmat = RandomDataGenerator{rows, cols, sparsity}
.Seed(0)
.Lower(.0f)
.Upper(1.0f)
.Type(ft)
.MaxCategory(13)
.GenerateDMatrix();
return dmat->SliceCol(world, rank);
}()};
std::vector<bst_row_t> column_size(cols, 0);
auto const slice_size = cols / world;
auto const slice_start = slice_size * rank;
auto const slice_end = (rank == world - 1) ? cols : slice_start + slice_size;
for (auto i = slice_start; i < slice_end; i++) {
column_size[i] = rows;
}
auto const n_bins = 64;
// Generate cuts for distributed environment.
HistogramCuts distributed_cuts;
{
ContainerType<use_column> sketch_distributed(n_bins, m->Info().feature_types.ConstHostSpan(),
column_size, false, true, AllThreadsForTest());
std::vector<float> hessian(rows, 1.0);
auto hess = Span<float const>{hessian};
if (use_column) {
for (auto const& page : m->GetBatches<SortedCSCPage>()) {
PushPage(&sketch_distributed, page, m->Info(), hess);
}
} else {
for (auto const& page : m->GetBatches<SparsePage>()) {
PushPage(&sketch_distributed, page, m->Info(), hess);
}
}
sketch_distributed.MakeCuts(&distributed_cuts);
}
// Generate cuts for single node environment
collective::Finalize();
CHECK_EQ(collective::GetWorldSize(), 1);
HistogramCuts single_node_cuts;
{
ContainerType<use_column> sketch_on_single_node(n_bins, m->Info().feature_types.ConstHostSpan(),
column_size, false, false, AllThreadsForTest());
std::vector<float> hessian(rows, 1.0);
auto hess = Span<float const>{hessian};
if (use_column) {
for (auto const& page : m->GetBatches<SortedCSCPage>()) {
PushPage(&sketch_on_single_node, page, m->Info(), hess);
}
} else {
for (auto const& page : m->GetBatches<SparsePage>()) {
PushPage(&sketch_on_single_node, page, m->Info(), hess);
}
}
sketch_on_single_node.MakeCuts(&single_node_cuts);
}
auto const& sptrs = single_node_cuts.Ptrs();
auto const& dptrs = distributed_cuts.Ptrs();
auto const& svals = single_node_cuts.Values();
auto const& dvals = distributed_cuts.Values();
auto const& smins = single_node_cuts.MinValues();
auto const& dmins = distributed_cuts.MinValues();
EXPECT_EQ(sptrs.size(), dptrs.size());
for (size_t i = 0; i < sptrs.size(); ++i) {
EXPECT_EQ(sptrs[i], dptrs[i]) << "rank: " << rank << ", i: " << i;
}
EXPECT_EQ(svals.size(), dvals.size());
for (size_t i = 0; i < svals.size(); ++i) {
EXPECT_NEAR(svals[i], dvals[i], 2e-2f) << "rank: " << rank << ", i: " << i;
}
EXPECT_EQ(smins.size(), dmins.size());
for (size_t i = 0; i < smins.size(); ++i) {
EXPECT_FLOAT_EQ(smins[i], dmins[i]) << "rank: " << rank << ", i: " << i;
}
}
template <bool use_column>
void TestColSplitQuantile(size_t rows, size_t cols) {
auto constexpr kWorkers = 4;
RunWithInMemoryCommunicator(kWorkers, DoTestColSplitQuantile<use_column>, rows, cols);
}
} // anonymous namespace
TEST(Quantile, ColSplitBasic) {
constexpr size_t kRows = 10, kCols = 10;
TestColSplitQuantile<false>(kRows, kCols);
}
TEST(Quantile, ColSplit) {
constexpr size_t kRows = 4000, kCols = 200;
TestColSplitQuantile<false>(kRows, kCols);
}
TEST(Quantile, ColSplitSortedBasic) {
constexpr size_t kRows = 10, kCols = 10;
TestColSplitQuantile<true>(kRows, kCols);
}
TEST(Quantile, ColSplitSorted) {
constexpr size_t kRows = 4000, kCols = 200;
TestColSplitQuantile<true>(kRows, kCols);
}
namespace {
void TestSameOnAllWorkers() {
auto const world = collective::GetWorldSize();
@@ -222,17 +347,17 @@ void TestSameOnAllWorkers() {
for (int32_t i = 0; i < world; i++) {
for (size_t j = 0; j < value_size; ++j) {
size_t idx = i * value_size + j;
ASSERT_NEAR(cuts.Values().at(j), cut_values.at(idx), kRtEps);
EXPECT_NEAR(cuts.Values().at(j), cut_values.at(idx), kRtEps);
}
for (size_t j = 0; j < ptr_size; ++j) {
size_t idx = i * ptr_size + j;
ASSERT_EQ(cuts.Ptrs().at(j), cut_ptrs.at(idx));
EXPECT_EQ(cuts.Ptrs().at(j), cut_ptrs.at(idx));
}
for (size_t j = 0; j < min_value_size; ++j) {
size_t idx = i * min_value_size + j;
ASSERT_EQ(cuts.MinValues().at(j), cut_min_values.at(idx));
EXPECT_EQ(cuts.MinValues().at(j), cut_min_values.at(idx));
}
}
});

View File

@@ -6,7 +6,6 @@
#include <vector>
#include "../helpers.h"
#include "../../src/collective/communicator-inl.h"
namespace xgboost {
namespace common {

View File

@@ -338,10 +338,10 @@ TEST(SimpleDMatrix, SliceCol) {
auto& margin = p_m->Info().base_margin_;
margin = decltype(p_m->Info().base_margin_){{kRows, kClasses}, Context::kCpuId};
size_t constexpr kSlicCols {4};
for (auto slice = 0; slice < 2; slice++) {
auto const slice_start = slice * kSlicCols;
std::unique_ptr<DMatrix> out { p_m->SliceCol(slice_start, kSlicCols) };
auto constexpr kSlices {2};
auto constexpr kSliceSize {4};
for (auto slice = 0; slice < kSlices; slice++) {
std::unique_ptr<DMatrix> out { p_m->SliceCol(kSlices, slice) };
ASSERT_EQ(out->Info().labels.Size(), kRows);
ASSERT_EQ(out->Info().labels_lower_bound_.Size(), kRows);
ASSERT_EQ(out->Info().labels_upper_bound_.Size(), kRows);
@@ -355,7 +355,8 @@ TEST(SimpleDMatrix, SliceCol) {
auto out_inst = out_page[i];
auto in_inst = in_page[i];
ASSERT_EQ(out_inst.size() * 2, in_inst.size()) << i;
for (size_t j = 0; j < kSlicCols; ++j) {
for (size_t j = 0; j < kSliceSize; ++j) {
auto const slice_start = kSliceSize * slice;
ASSERT_EQ(in_inst[slice_start + j].fvalue, out_inst[j].fvalue);
ASSERT_EQ(in_inst[slice_start + j].index, out_inst[j].index);
}
@@ -377,7 +378,7 @@ TEST(SimpleDMatrix, SliceCol) {
ASSERT_EQ(out->Info().num_col_, out->Info().num_col_);
ASSERT_EQ(out->Info().num_row_, kRows);
ASSERT_EQ(out->Info().num_nonzero_, kRows * kSlicCols); // dense
ASSERT_EQ(out->Info().num_nonzero_, kRows * kSliceSize); // dense
ASSERT_EQ(out->Info().data_split_mode, DataSplitMode::kCol);
}
}

View File

@@ -97,7 +97,6 @@ void TestColumnSplitPredictBatch() {
auto dmat = RandomDataGenerator(kRows, kCols, 0).GenerateDMatrix();
auto const world_size = collective::GetWorldSize();
auto const rank = collective::GetRank();
auto const kSliceSize = (kCols + 1) / world_size;
auto lparam = CreateEmptyGenericParam(GPUIDX);
std::unique_ptr<Predictor> cpu_predictor =
@@ -112,7 +111,7 @@ void TestColumnSplitPredictBatch() {
// Test predict batch
PredictionCacheEntry out_predictions;
cpu_predictor->InitOutPredictions(dmat->Info(), &out_predictions.predictions, model);
auto sliced = std::unique_ptr<DMatrix>{dmat->SliceCol(rank * kSliceSize, kSliceSize)};
auto sliced = std::unique_ptr<DMatrix>{dmat->SliceCol(world_size, rank)};
cpu_predictor->PredictBatch(sliced.get(), &out_predictions, model, 0);
std::vector<float>& out_predictions_h = out_predictions.predictions.HostVector();