Optimize cpu sketch allreduce for sparse data. (#6009)

* Bypass RABIT serialization reducer and use custom allgather based merging.
This commit is contained in:
Jiaming Yuan 2020-08-19 10:03:45 +08:00 committed by GitHub
parent 90355b4f00
commit 29b7fea572
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 357 additions and 87 deletions

View File

@ -116,26 +116,14 @@ inline HistogramCuts SketchOnDMatrix(DMatrix *m, int32_t max_bins) {
for (auto& column : column_sizes) { for (auto& column : column_sizes) {
column.resize(info.num_col_, 0); column.resize(info.num_col_, 0);
} }
for (auto const& page : m->GetBatches<SparsePage>()) {
page.data.HostVector();
page.offset.HostVector();
ParallelFor(page.Size(), threads, [&](size_t i) {
auto &local_column_sizes = column_sizes.at(omp_get_thread_num());
auto row = page[i];
auto const *p_row = row.data();
for (size_t j = 0; j < row.size(); ++j) {
local_column_sizes.at(p_row[j].index)++;
}
});
}
std::vector<bst_row_t> reduced(info.num_col_, 0); std::vector<bst_row_t> reduced(info.num_col_, 0);
for (auto const& page : m->GetBatches<SparsePage>()) {
ParallelFor(info.num_col_, threads, [&](size_t i) { auto const &entries_per_column =
for (auto const &thread : column_sizes) { HostSketchContainer::CalcColumnSize(page, info.num_col_, threads);
reduced[i] += thread[i]; for (size_t i = 0; i < entries_per_column.size(); ++i) {
reduced[i] += entries_per_column[i];
}
} }
});
HostSketchContainer container(reduced, max_bins, HostSketchContainer container(reduced, max_bins,
HostSketchContainer::UseGroup(info)); HostSketchContainer::UseGroup(info));
for (auto const &page : m->GetBatches<SparsePage>()) { for (auto const &page : m->GetBatches<SparsePage>()) {

View File

@ -25,34 +25,67 @@ HostSketchContainer::HostSketchContainer(std::vector<bst_row_t> columns_size,
} }
} }
std::vector<bst_feature_t> LoadBalance(SparsePage const &page, std::vector<bst_row_t>
std::vector<size_t> columns_size, HostSketchContainer::CalcColumnSize(SparsePage const &batch,
bst_feature_t const n_columns,
size_t const nthreads) { size_t const nthreads) {
/* Some sparse datasets have their mass concentrating on small auto page = batch.GetView();
* number of features. To avoid wating for a few threads running std::vector<std::vector<bst_row_t>> column_sizes(nthreads);
* forever, we here distirbute different number of columns to for (auto &column : column_sizes) {
* different threads according to number of entries. */ column.resize(n_columns, 0);
size_t const total_entries = page.data.Size(); }
ParallelFor(page.Size(), nthreads, [&](size_t i) {
auto &local_column_sizes = column_sizes.at(omp_get_thread_num());
auto row = page[i];
auto const *p_row = row.data();
for (size_t j = 0; j < row.size(); ++j) {
local_column_sizes.at(p_row[j].index)++;
}
});
std::vector<bst_row_t> entries_per_columns(n_columns, 0);
ParallelFor(n_columns, nthreads, [&](size_t i) {
for (auto const &thread : column_sizes) {
entries_per_columns[i] += thread[i];
}
});
return entries_per_columns;
}
std::vector<bst_feature_t> HostSketchContainer::LoadBalance(
SparsePage const &batch, bst_feature_t n_columns, size_t const nthreads) {
/* Some sparse datasets have their mass concentrating on small number of features. To
* avoid wating for a few threads running forever, we here distirbute different number
* of columns to different threads according to number of entries.
*/
auto page = batch.GetView();
size_t const total_entries = page.data.size();
size_t const entries_per_thread = common::DivRoundUp(total_entries, nthreads); size_t const entries_per_thread = common::DivRoundUp(total_entries, nthreads);
std::vector<bst_feature_t> cols_ptr(nthreads+1, 0); std::vector<std::vector<bst_row_t>> column_sizes(nthreads);
for (auto& column : column_sizes) {
column.resize(n_columns, 0);
}
std::vector<bst_row_t> entries_per_columns =
CalcColumnSize(batch, n_columns, nthreads);
std::vector<bst_feature_t> cols_ptr(nthreads + 1, 0);
size_t count {0}; size_t count {0};
size_t current_thread {1}; size_t current_thread {1};
for (auto col : columns_size) { for (auto col : entries_per_columns) {
cols_ptr[current_thread]++; // add one column to thread cols_ptr.at(current_thread)++; // add one column to thread
count += col; count += col;
if (count > entries_per_thread + 1) { CHECK_LE(count, total_entries);
if (count > entries_per_thread) {
current_thread++; current_thread++;
count = 0; count = 0;
cols_ptr[current_thread] = cols_ptr[current_thread-1]; cols_ptr.at(current_thread) = cols_ptr[current_thread-1];
} }
} }
// Idle threads. // Idle threads.
for (; current_thread < cols_ptr.size() - 1; ++current_thread) { for (; current_thread < cols_ptr.size() - 1; ++current_thread) {
cols_ptr[current_thread+1] = cols_ptr[current_thread]; cols_ptr[current_thread+1] = cols_ptr[current_thread];
} }
return cols_ptr; return cols_ptr;
} }
@ -67,11 +100,10 @@ void HostSketchContainer::PushRowPage(SparsePage const &page,
// Use group index for weights? // Use group index for weights?
auto batch = page.GetView(); auto batch = page.GetView();
dmlc::OMPException exec; dmlc::OMPException exec;
// Parallel over columns. Asumming the data is dense, each thread owns a set of // Parallel over columns. Each thread owns a set of consecutive columns.
// consecutive columns.
auto const ncol = static_cast<uint32_t>(info.num_col_); auto const ncol = static_cast<uint32_t>(info.num_col_);
auto const is_dense = info.num_nonzero_ == info.num_col_ * info.num_row_; auto const is_dense = info.num_nonzero_ == info.num_col_ * info.num_row_;
auto thread_columns_ptr = LoadBalance(page, columns_size_, nthread); auto thread_columns_ptr = LoadBalance(page, info.num_col_, nthread);
#pragma omp parallel num_threads(nthread) #pragma omp parallel num_threads(nthread)
{ {
@ -112,6 +144,132 @@ void HostSketchContainer::PushRowPage(SparsePage const &page,
monitor_.Stop(__func__); monitor_.Stop(__func__);
} }
void HostSketchContainer::GatherSketchInfo(
std::vector<WQSketch::SummaryContainer> const &reduced,
std::vector<size_t> *p_worker_segments,
std::vector<bst_row_t> *p_sketches_scan,
std::vector<WQSketch::Entry> *p_global_sketches) {
auto& worker_segments = *p_worker_segments;
worker_segments.resize(1, 0);
auto world = rabit::GetWorldSize();
auto rank = rabit::GetRank();
auto n_columns = sketches_.size();
std::vector<bst_row_t> sketch_size;
for (auto const& sketch : reduced) {
sketch_size.push_back(sketch.size);
}
std::vector<bst_row_t>& sketches_scan = *p_sketches_scan;
sketches_scan.resize((n_columns + 1) * world, 0);
size_t beg_scan = rank * (n_columns + 1);
std::partial_sum(sketch_size.cbegin(), sketch_size.cend(),
sketches_scan.begin() + beg_scan + 1);
// Gather all column pointers
rabit::Allreduce<rabit::op::Sum>(sketches_scan.data(), sketches_scan.size());
for (int32_t i = 0; i < world; ++i) {
size_t back = (i + 1) * (n_columns + 1) - 1;
auto n_entries = sketches_scan.at(back);
worker_segments.push_back(n_entries);
}
// Offset of sketch from each worker.
std::partial_sum(worker_segments.begin(), worker_segments.end(),
worker_segments.begin());
CHECK_GE(worker_segments.size(), 1);
auto total = worker_segments.back();
auto& global_sketches = *p_global_sketches;
global_sketches.resize(total, WQSketch::Entry{0, 0, 0, 0});
auto worker_sketch = Span<WQSketch::Entry>{global_sketches}.subspan(
worker_segments[rank], worker_segments[rank + 1] - worker_segments[rank]);
size_t cursor = 0;
for (auto const &sketch : reduced) {
std::copy(sketch.data, sketch.data + sketch.size,
worker_sketch.begin() + cursor);
cursor += sketch.size;
}
static_assert(sizeof(WQSketch::Entry) / 4 == sizeof(float), "");
rabit::Allreduce<rabit::op::Sum>(
reinterpret_cast<float *>(global_sketches.data()),
global_sketches.size() * sizeof(WQSketch::Entry) / sizeof(float));
}
void HostSketchContainer::AllReduce(
std::vector<WQSketch::SummaryContainer> *p_reduced,
std::vector<int32_t>* p_num_cuts) {
monitor_.Start(__func__);
auto& num_cuts = *p_num_cuts;
CHECK_EQ(num_cuts.size(), 0);
auto &reduced = *p_reduced;
reduced.resize(sketches_.size());
size_t n_columns = sketches_.size();
rabit::Allreduce<rabit::op::Max>(&n_columns, 1);
CHECK_EQ(n_columns, sketches_.size()) << "Number of columns differs across workers";
// Prune the intermediate num cuts for synchronization.
std::vector<bst_row_t> global_column_size(columns_size_);
rabit::Allreduce<rabit::op::Sum>(global_column_size.data(), global_column_size.size());
size_t nbytes = 0;
for (size_t i = 0; i < sketches_.size(); ++i) {
int32_t intermediate_num_cuts = static_cast<int32_t>(std::min(
global_column_size[i], static_cast<size_t>(max_bins_ * WQSketch::kFactor)));
if (global_column_size[i] != 0) {
WQSketch::SummaryContainer out;
sketches_[i].GetSummary(&out);
reduced[i].Reserve(intermediate_num_cuts);
CHECK(reduced[i].data);
reduced[i].SetPrune(out, intermediate_num_cuts);
nbytes = std::max(
WQSketch::SummaryContainer::CalcMemCost(intermediate_num_cuts),
nbytes);
}
num_cuts.push_back(intermediate_num_cuts);
}
auto world = rabit::GetWorldSize();
if (world == 1) {
return;
}
std::vector<size_t> worker_segments(1, 0); // CSC pointer to sketches.
std::vector<bst_row_t> sketches_scan((n_columns + 1) * world, 0);
std::vector<WQSketch::Entry> global_sketches;
this->GatherSketchInfo(reduced, &worker_segments, &sketches_scan,
&global_sketches);
std::vector<WQSketch::SummaryContainer> final_sketches(n_columns);
ParallelFor(n_columns, omp_get_max_threads(), [&](size_t fidx) {
int32_t intermediate_num_cuts = num_cuts[fidx];
auto nbytes =
WQSketch::SummaryContainer::CalcMemCost(intermediate_num_cuts);
for (int32_t i = 1; i < world + 1; ++i) {
auto size = worker_segments.at(i) - worker_segments[i - 1];
auto worker_sketches = Span<WQSketch::Entry>{global_sketches}.subspan(
worker_segments[i - 1], size);
auto worker_scan =
Span<bst_row_t>(sketches_scan)
.subspan((i - 1) * (n_columns + 1), (n_columns + 1));
auto worker_feature = worker_sketches.subspan(
worker_scan[fidx], worker_scan[fidx + 1] - worker_scan[fidx]);
CHECK(worker_feature.data());
WQSummary<float, float> summary(worker_feature.data(),
worker_feature.size());
auto &out = final_sketches.at(fidx);
out.Reduce(summary, nbytes);
}
reduced.at(fidx).Reserve(intermediate_num_cuts);
reduced.at(fidx).SetPrune(final_sketches.at(fidx), intermediate_num_cuts);
});
monitor_.Stop(__func__);
}
void AddCutPoint(WQuantileSketch<float, float>::SummaryContainer const &summary, void AddCutPoint(WQuantileSketch<float, float>::SummaryContainer const &summary,
int max_bin, HistogramCuts *cuts) { int max_bin, HistogramCuts *cuts) {
size_t required_cuts = std::min(summary.size, static_cast<size_t>(max_bin)); size_t required_cuts = std::min(summary.size, static_cast<size_t>(max_bin));
@ -126,44 +284,18 @@ void AddCutPoint(WQuantileSketch<float, float>::SummaryContainer const &summary,
void HostSketchContainer::MakeCuts(HistogramCuts* cuts) { void HostSketchContainer::MakeCuts(HistogramCuts* cuts) {
monitor_.Start(__func__); monitor_.Start(__func__);
rabit::Allreduce<rabit::op::Sum>(columns_size_.data(), columns_size_.size()); std::vector<WQSketch::SummaryContainer> reduced;
std::vector<WQSketch::SummaryContainer> reduced(sketches_.size());
std::vector<int32_t> num_cuts; std::vector<int32_t> num_cuts;
size_t nbytes = 0; this->AllReduce(&reduced, &num_cuts);
for (size_t i = 0; i < sketches_.size(); ++i) {
int32_t intermediate_num_cuts = static_cast<int32_t>(std::min(
columns_size_[i], static_cast<size_t>(max_bins_ * WQSketch::kFactor)));
if (columns_size_[i] != 0) {
WQSketch::SummaryContainer out;
sketches_[i].GetSummary(&out);
reduced[i].Reserve(intermediate_num_cuts);
CHECK(reduced[i].data);
reduced[i].SetPrune(out, intermediate_num_cuts);
}
num_cuts.push_back(intermediate_num_cuts);
nbytes = std::max(
WQSketch::SummaryContainer::CalcMemCost(intermediate_num_cuts), nbytes);
}
if (rabit::IsDistributed()) {
// FIXME(trivialfis): This call will allocate nbytes * num_columns on rabit, which
// may generate oom error when data is sparse. To fix it, we need to:
// - gather the column offsets over all workers.
// - run rabit::allgather on sketch data to collect all data.
// - merge all gathered sketches based on worker offsets and column offsets of data
// from each worker.
// See GPU implementation for details.
rabit::SerializeReducer<WQSketch::SummaryContainer> sreducer;
sreducer.Allreduce(dmlc::BeginPtr(reduced), nbytes, reduced.size());
}
cuts->min_vals_.HostVector().resize(sketches_.size(), 0.0f); cuts->min_vals_.HostVector().resize(sketches_.size(), 0.0f);
for (size_t fid = 0; fid < reduced.size(); ++fid) { for (size_t fid = 0; fid < reduced.size(); ++fid) {
WQSketch::SummaryContainer a; WQSketch::SummaryContainer a;
size_t max_num_bins = std::min(num_cuts[fid], max_bins_); size_t max_num_bins = std::min(num_cuts[fid], max_bins_);
a.Reserve(max_num_bins + 1); a.Reserve(max_num_bins + 1);
CHECK(a.data); CHECK(a.data);
if (columns_size_[fid] != 0) { if (num_cuts[fid] != 0) {
a.SetPrune(reduced[fid], max_num_bins + 1); a.SetPrune(reduced[fid], max_num_bins + 1);
CHECK(a.data && reduced[fid].data); CHECK(a.data && reduced[fid].data);
const bst_float mval = a.data[0].value; const bst_float mval = a.data[0].value;
@ -173,6 +305,7 @@ void HostSketchContainer::MakeCuts(HistogramCuts* cuts) {
const float mval = 1e-5f; const float mval = 1e-5f;
cuts->min_vals_.HostVector()[fid] = mval; cuts->min_vals_.HostVector()[fid] = mval;
} }
AddCutPoint(a, max_num_bins, cuts); AddCutPoint(a, max_num_bins, cuts);
// push a value that is greater than anything // push a value that is greater than anything
const bst_float cpt const bst_float cpt

View File

@ -166,6 +166,16 @@ struct WQSummary {
* \param src source sketch * \param src source sketch
*/ */
inline void CopyFrom(const WQSummary &src) { inline void CopyFrom(const WQSummary &src) {
if (!src.data) {
CHECK_EQ(src.size, 0);
size = 0;
return;
}
if (!data) {
CHECK_EQ(this->size, 0);
CHECK_EQ(src.size, 0);
return;
}
size = src.size; size = src.size;
std::memcpy(data, src.data, sizeof(Entry) * size); std::memcpy(data, src.data, sizeof(Entry) * size);
} }
@ -721,6 +731,14 @@ class HostSketchContainer {
return use_group_ind; return use_group_ind;
} }
static std::vector<bst_row_t> CalcColumnSize(SparsePage const &page,
bst_feature_t const n_columns,
size_t const nthreads);
static std::vector<bst_feature_t> LoadBalance(SparsePage const &page,
bst_feature_t n_columns,
size_t const nthreads);
static uint32_t SearchGroupIndFromRow(std::vector<bst_uint> const &group_ptr, static uint32_t SearchGroupIndFromRow(std::vector<bst_uint> const &group_ptr,
size_t const base_rowid) { size_t const base_rowid) {
CHECK_LT(base_rowid, group_ptr.back()) CHECK_LT(base_rowid, group_ptr.back())
@ -730,6 +748,14 @@ class HostSketchContainer {
group_ptr.cbegin() - 1; group_ptr.cbegin() - 1;
return group_ind; return group_ind;
} }
// Gather sketches from all workers.
void GatherSketchInfo(std::vector<WQSketch::SummaryContainer> const &reduced,
std::vector<bst_row_t> *p_worker_segments,
std::vector<bst_row_t> *p_sketches_scan,
std::vector<WQSketch::Entry> *p_global_sketches);
// Merge sketches from all workers.
void AllReduce(std::vector<WQSketch::SummaryContainer> *p_reduced,
std::vector<int32_t>* p_num_cuts);
/* \brief Push a CSR matrix. */ /* \brief Push a CSR matrix. */
void PushRowPage(SparsePage const& page, MetaInfo const& info); void PushRowPage(SparsePage const& page, MetaInfo const& info);

View File

@ -23,9 +23,9 @@ TEST(CAPI, XGDMatrixCreateFromMatDT) {
std::shared_ptr<xgboost::DMatrix> *dmat = std::shared_ptr<xgboost::DMatrix> *dmat =
static_cast<std::shared_ptr<xgboost::DMatrix> *>(handle); static_cast<std::shared_ptr<xgboost::DMatrix> *>(handle);
xgboost::MetaInfo &info = (*dmat)->Info(); xgboost::MetaInfo &info = (*dmat)->Info();
ASSERT_EQ(info.num_col_, 2); ASSERT_EQ(info.num_col_, 2ul);
ASSERT_EQ(info.num_row_, 3); ASSERT_EQ(info.num_row_, 3ul);
ASSERT_EQ(info.num_nonzero_, 6); ASSERT_EQ(info.num_nonzero_, 6ul);
for (const auto &batch : (*dmat)->GetBatches<xgboost::SparsePage>()) { for (const auto &batch : (*dmat)->GetBatches<xgboost::SparsePage>()) {
ASSERT_EQ(batch[0][0].fvalue, 0.0f); ASSERT_EQ(batch[0][0].fvalue, 0.0f);
@ -38,9 +38,9 @@ TEST(CAPI, XGDMatrixCreateFromMatDT) {
} }
TEST(CAPI, XGDMatrixCreateFromMatOmp) { TEST(CAPI, XGDMatrixCreateFromMatOmp) {
std::vector<int> num_rows = {100, 11374, 15000}; std::vector<bst_ulong> num_rows = {100, 11374, 15000};
for (auto row : num_rows) { for (auto row : num_rows) {
int num_cols = 50; bst_ulong num_cols = 50;
int num_missing = 5; int num_missing = 5;
DMatrixHandle handle; DMatrixHandle handle;
std::vector<float> data(num_cols * row, 1.5); std::vector<float> data(num_cols * row, 1.5);

View File

@ -159,10 +159,10 @@ TEST(CutsBuilder, SearchGroupInd) {
HistogramCuts hmat; HistogramCuts hmat;
size_t group_ind = HostSketchContainer::SearchGroupIndFromRow(p_mat->Info().group_ptr_, 0); size_t group_ind = HostSketchContainer::SearchGroupIndFromRow(p_mat->Info().group_ptr_, 0);
ASSERT_EQ(group_ind, 0); ASSERT_EQ(group_ind, 0ul);
group_ind = HostSketchContainer::SearchGroupIndFromRow(p_mat->Info().group_ptr_, 5); group_ind = HostSketchContainer::SearchGroupIndFromRow(p_mat->Info().group_ptr_, 5);
ASSERT_EQ(group_ind, 2); ASSERT_EQ(group_ind, 2ul);
EXPECT_ANY_THROW(HostSketchContainer::SearchGroupIndFromRow(p_mat->Info().group_ptr_, 17)); EXPECT_ANY_THROW(HostSketchContainer::SearchGroupIndFromRow(p_mat->Info().group_ptr_, 17));
@ -189,7 +189,7 @@ TEST(HistUtil, DenseCutsCategorical) {
EXPECT_LT(cuts.MinValues()[0], x_sorted.front()); EXPECT_LT(cuts.MinValues()[0], x_sorted.front());
EXPECT_GT(cuts_from_sketch.front(), x_sorted.front()); EXPECT_GT(cuts_from_sketch.front(), x_sorted.front());
EXPECT_GE(cuts_from_sketch.back(), x_sorted.back()); EXPECT_GE(cuts_from_sketch.back(), x_sorted.back());
EXPECT_EQ(cuts_from_sketch.size(), num_categories); EXPECT_EQ(cuts_from_sketch.size(), static_cast<size_t>(num_categories));
} }
} }
} }

View File

@ -162,7 +162,7 @@ inline void ValidateColumn(const HistogramCuts& cuts, int column_idx,
// Check all cut points are unique // Check all cut points are unique
EXPECT_EQ(std::set<float>(cuts_begin, cuts_end).size(), EXPECT_EQ(std::set<float>(cuts_begin, cuts_end).size(),
cuts_end - cuts_begin); static_cast<size_t>(cuts_end - cuts_begin));
auto unique = std::set<float>(sorted_column.begin(), sorted_column.end()); auto unique = std::set<float>(sorted_column.begin(), sorted_column.end());
if (unique.size() <= num_bins) { if (unique.size() <= num_bins) {
@ -189,7 +189,7 @@ inline void ValidateCuts(const HistogramCuts& cuts, DMatrix* dmat,
// Collect data into columns // Collect data into columns
std::vector<std::vector<float>> columns(dmat->Info().num_col_); std::vector<std::vector<float>> columns(dmat->Info().num_col_);
for (auto& batch : dmat->GetBatches<SparsePage>()) { for (auto& batch : dmat->GetBatches<SparsePage>()) {
ASSERT_GT(batch.Size(), 0); ASSERT_GT(batch.Size(), 0ul);
for (auto i = 0ull; i < batch.Size(); i++) { for (auto i = 0ull; i < batch.Size(); i++) {
for (auto e : batch[i]) { for (auto e : batch[i]) {
columns[e.index].push_back(e.fvalue); columns[e.index].push_back(e.fvalue);

View File

@ -222,7 +222,7 @@ TEST(Json, ParseArray) {
auto json = Json::Load(StringView{str.c_str(), str.size()}); auto json = Json::Load(StringView{str.c_str(), str.size()});
json = json["nodes"]; json = json["nodes"];
std::vector<Json> arr = get<JsonArray>(json); std::vector<Json> arr = get<JsonArray>(json);
ASSERT_EQ(arr.size(), 3); ASSERT_EQ(arr.size(), 3ul);
Json v0 = arr[0]; Json v0 = arr[0];
ASSERT_EQ(get<Integer>(v0["depth"]), 3); ASSERT_EQ(get<Integer>(v0["depth"]), 3);
ASSERT_NEAR(get<Number>(v0["gain"]), 10.4866, kRtEps); ASSERT_NEAR(get<Number>(v0["gain"]), 10.4866, kRtEps);
@ -284,7 +284,7 @@ TEST(Json, EmptyArray) {
std::istringstream iss(str); std::istringstream iss(str);
auto json = Json::Load(StringView{str.c_str(), str.size()}); auto json = Json::Load(StringView{str.c_str(), str.size()});
auto arr = get<JsonArray>(json["leaf_vector"]); auto arr = get<JsonArray>(json["leaf_vector"]);
ASSERT_EQ(arr.size(), 0); ASSERT_EQ(arr.size(), 0ul);
} }
TEST(Json, Boolean) { TEST(Json, Boolean) {
@ -315,7 +315,7 @@ TEST(Json, AssigningObjects) {
Json json; Json json;
json = JsonObject(); json = JsonObject();
json["Okay"] = JsonArray(); json["Okay"] = JsonArray();
ASSERT_EQ(get<JsonArray>(json["Okay"]).size(), 0); ASSERT_EQ(get<JsonArray>(json["Okay"]).size(), 0ul);
} }
{ {

View File

@ -5,14 +5,122 @@
namespace xgboost { namespace xgboost {
namespace common { namespace common {
TEST(Quantile, LoadBalance) {
size_t constexpr kRows = 1000, kCols = 100;
auto m = RandomDataGenerator{kRows, kCols, 0}.GenerateDMatrix();
std::vector<bst_feature_t> cols_ptr;
for (auto const &page : m->GetBatches<SparsePage>()) {
cols_ptr = HostSketchContainer::LoadBalance(page, kCols, 13);
}
size_t n_cols = 0;
for (size_t i = 1; i < cols_ptr.size(); ++i) {
n_cols += cols_ptr[i] - cols_ptr[i - 1];
}
CHECK_EQ(n_cols, kCols);
}
void TestDistributedQuantile(size_t rows, size_t cols) {
std::string msg {"Skipping AllReduce test"};
int32_t constexpr kWorkers = 4;
InitRabitContext(msg, kWorkers);
auto world = rabit::GetWorldSize();
if (world != 1) {
ASSERT_EQ(world, kWorkers);
} else {
return;
}
std::vector<MetaInfo> infos(2);
auto& h_weights = infos.front().weights_.HostVector();
h_weights.resize(rows);
SimpleLCG lcg;
SimpleRealUniformDistribution<float> dist(3, 1000);
std::generate(h_weights.begin(), h_weights.end(), [&]() { return dist(&lcg); });
std::vector<bst_row_t> column_size(cols, rows);
size_t n_bins = 64;
// Generate cuts for distributed environment.
auto sparsity = 0.5f;
auto rank = rabit::GetRank();
HostSketchContainer sketch_distributed(column_size, n_bins, false);
auto m = RandomDataGenerator{rows, cols, sparsity}
.Seed(rank)
.Lower(.0f)
.Upper(1.0f)
.GenerateDMatrix();
for (auto const &page : m->GetBatches<SparsePage>()) {
sketch_distributed.PushRowPage(page, m->Info());
}
HistogramCuts distributed_cuts;
sketch_distributed.MakeCuts(&distributed_cuts);
// Generate cuts for single node environment
rabit::Finalize();
CHECK_EQ(rabit::GetWorldSize(), 1);
std::for_each(column_size.begin(), column_size.end(), [=](auto& size) { size *= world; });
HostSketchContainer sketch_on_single_node(column_size, n_bins, false);
for (auto rank = 0; rank < world; ++rank) {
auto m = RandomDataGenerator{rows, cols, sparsity}
.Seed(rank)
.Lower(.0f)
.Upper(1.0f)
.GenerateDMatrix();
for (auto const &page : m->GetBatches<SparsePage>()) {
sketch_on_single_node.PushRowPage(page, m->Info());
}
}
HistogramCuts single_node_cuts;
sketch_on_single_node.MakeCuts(&single_node_cuts);
auto const& sptrs = single_node_cuts.Ptrs();
auto const& dptrs = distributed_cuts.Ptrs();
auto const& svals = single_node_cuts.Values();
auto const& dvals = distributed_cuts.Values();
auto const& smins = single_node_cuts.MinValues();
auto const& dmins = distributed_cuts.MinValues();
ASSERT_EQ(sptrs.size(), dptrs.size());
for (size_t i = 0; i < sptrs.size(); ++i) {
ASSERT_EQ(sptrs[i], dptrs[i]);
}
ASSERT_EQ(svals.size(), dvals.size());
for (size_t i = 0; i < svals.size(); ++i) {
ASSERT_NEAR(svals[i], dvals[i], 2e-2f);
}
ASSERT_EQ(smins.size(), dmins.size());
for (size_t i = 0; i < smins.size(); ++i) {
ASSERT_FLOAT_EQ(smins[i], dmins[i]);
}
}
TEST(Quantile, DistributedBasic) {
#if defined(__unix__)
constexpr size_t kRows = 10, kCols = 10;
TestDistributedQuantile(kRows, kCols);
#endif
}
TEST(Quantile, Distributed) {
#if defined(__unix__)
constexpr size_t kRows = 1000, kCols = 200;
TestDistributedQuantile(kRows, kCols);
#endif
}
TEST(Quantile, SameOnAllWorkers) { TEST(Quantile, SameOnAllWorkers) {
#if defined(__unix__)
std::string msg{"Skipping Quantile AllreduceBasic test"}; std::string msg{"Skipping Quantile AllreduceBasic test"};
size_t constexpr kWorkers = 4; int32_t constexpr kWorkers = 4;
InitRabitContext(msg, kWorkers); InitRabitContext(msg, kWorkers);
auto world = rabit::GetWorldSize(); auto world = rabit::GetWorldSize();
if (world != 1) { if (world != 1) {
CHECK_EQ(world, kWorkers); CHECK_EQ(world, kWorkers);
} else { } else {
LOG(WARNING) << msg;
return; return;
} }
@ -72,6 +180,8 @@ TEST(Quantile, SameOnAllWorkers) {
} }
} }
}); });
rabit::Finalize();
#endif // defined(__unix__)
} }
} // namespace common } // namespace common
} // namespace xgboost } // namespace xgboost

View File

@ -7,7 +7,7 @@
namespace xgboost { namespace xgboost {
namespace common { namespace common {
inline void InitRabitContext(std::string msg, size_t n_workers) { inline void InitRabitContext(std::string msg, int32_t n_workers) {
auto port = std::getenv("DMLC_TRACKER_PORT"); auto port = std::getenv("DMLC_TRACKER_PORT");
std::string port_str; std::string port_str;
if (port) { if (port) {
@ -35,7 +35,7 @@ template <typename Fn> void RunWithSeedsAndBins(size_t rows, Fn fn) {
for (size_t i = 0; i < bins.size() - 1; ++i) { for (size_t i = 0; i < bins.size() - 1; ++i) {
bins[i] = i * 35 + 2; bins[i] = i * 35 + 2;
} }
bins.back() = rows + 80; // provide a bin number greater than rows. bins.back() = rows + 160; // provide a bin number greater than rows.
std::vector<MetaInfo> infos(2); std::vector<MetaInfo> infos(2);
auto& h_weights = infos.front().weights_.HostVector(); auto& h_weights = infos.front().weights_.HostVector();

View File

@ -501,17 +501,20 @@ class TestWithDask:
num_boost_round=num_rounds, num_boost_round=num_rounds,
evals=[(m, 'train')])['history'] evals=[(m, 'train')])['history']
note(history) note(history)
assert tm.non_increasing(history['train'][dataset.metric]) history = history['train'][dataset.metric]
assert tm.non_increasing(history)
# Make sure that it's decreasing
assert history[-1] < history[0]
@given(params=hist_parameter_strategy, @given(params=hist_parameter_strategy,
num_rounds=strategies.integers(10, 20), num_rounds=strategies.integers(20, 30),
dataset=tm.dataset_strategy) dataset=tm.dataset_strategy)
@settings(deadline=None) @settings(deadline=None)
def test_hist(self, params, num_rounds, dataset, client): def test_hist(self, params, num_rounds, dataset, client):
self.run_updater_test(client, params, num_rounds, dataset, 'hist') self.run_updater_test(client, params, num_rounds, dataset, 'hist')
@given(params=exact_parameter_strategy, @given(params=exact_parameter_strategy,
num_rounds=strategies.integers(10, 20), num_rounds=strategies.integers(20, 30),
dataset=tm.dataset_strategy) dataset=tm.dataset_strategy)
@settings(deadline=None) @settings(deadline=None)
def test_approx(self, client, params, num_rounds, dataset): def test_approx(self, client, params, num_rounds, dataset):
@ -524,8 +527,7 @@ class TestWithDask:
exe = None exe = None
for possible_path in {'./testxgboost', './build/testxgboost', for possible_path in {'./testxgboost', './build/testxgboost',
'../build/testxgboost', '../build/testxgboost',
'../cpu-build/testxgboost', '../cpu-build/testxgboost'}:
'../gpu-build/testxgboost'}:
if os.path.exists(possible_path): if os.path.exists(possible_path):
exe = possible_path exe = possible_path
if exe is None: if exe is None:
@ -542,7 +544,7 @@ class TestWithDask:
port = port.split('=') port = port.split('=')
env = os.environ.copy() env = os.environ.copy()
env[port[0]] = port[1] env[port[0]] = port[1]
return subprocess.run([exe, test], env=env, stdout=subprocess.PIPE) return subprocess.run([exe, test], env=env, capture_output=True)
with LocalCluster(n_workers=4) as cluster: with LocalCluster(n_workers=4) as cluster:
with Client(cluster) as client: with Client(cluster) as client:
@ -555,6 +557,7 @@ class TestWithDask:
workers=workers, workers=workers,
rabit_args=rabit_args) rabit_args=rabit_args)
results = client.gather(futures) results = client.gather(futures)
for ret in results: for ret in results:
msg = ret.stdout.decode('utf-8') msg = ret.stdout.decode('utf-8')
assert msg.find('1 test from Quantile') != -1, msg assert msg.find('1 test from Quantile') != -1, msg
@ -563,4 +566,14 @@ class TestWithDask:
@pytest.mark.skipif(**tm.no_dask()) @pytest.mark.skipif(**tm.no_dask())
@pytest.mark.gtest @pytest.mark.gtest
def test_quantile_basic(self): def test_quantile_basic(self):
self.run_quantile('DistributedBasic')
@pytest.mark.skipif(**tm.no_dask())
@pytest.mark.gtest
def test_quantile(self):
self.run_quantile('Distributed')
@pytest.mark.skipif(**tm.no_dask())
@pytest.mark.gtest
def test_quantile_same_on_all_workers(self):
self.run_quantile('SameOnAllWorkers') self.run_quantile('SameOnAllWorkers')