Optimize cpu sketch allreduce for sparse data. (#6009)

* Bypass RABIT serialization reducer and use custom allgather based merging.
This commit is contained in:
Jiaming Yuan
2020-08-19 10:03:45 +08:00
committed by GitHub
parent 90355b4f00
commit 29b7fea572
10 changed files with 357 additions and 87 deletions

View File

@@ -23,9 +23,9 @@ TEST(CAPI, XGDMatrixCreateFromMatDT) {
std::shared_ptr<xgboost::DMatrix> *dmat =
static_cast<std::shared_ptr<xgboost::DMatrix> *>(handle);
xgboost::MetaInfo &info = (*dmat)->Info();
ASSERT_EQ(info.num_col_, 2);
ASSERT_EQ(info.num_row_, 3);
ASSERT_EQ(info.num_nonzero_, 6);
ASSERT_EQ(info.num_col_, 2ul);
ASSERT_EQ(info.num_row_, 3ul);
ASSERT_EQ(info.num_nonzero_, 6ul);
for (const auto &batch : (*dmat)->GetBatches<xgboost::SparsePage>()) {
ASSERT_EQ(batch[0][0].fvalue, 0.0f);
@@ -38,9 +38,9 @@ TEST(CAPI, XGDMatrixCreateFromMatDT) {
}
TEST(CAPI, XGDMatrixCreateFromMatOmp) {
std::vector<int> num_rows = {100, 11374, 15000};
std::vector<bst_ulong> num_rows = {100, 11374, 15000};
for (auto row : num_rows) {
int num_cols = 50;
bst_ulong num_cols = 50;
int num_missing = 5;
DMatrixHandle handle;
std::vector<float> data(num_cols * row, 1.5);

View File

@@ -159,10 +159,10 @@ TEST(CutsBuilder, SearchGroupInd) {
HistogramCuts hmat;
size_t group_ind = HostSketchContainer::SearchGroupIndFromRow(p_mat->Info().group_ptr_, 0);
ASSERT_EQ(group_ind, 0);
ASSERT_EQ(group_ind, 0ul);
group_ind = HostSketchContainer::SearchGroupIndFromRow(p_mat->Info().group_ptr_, 5);
ASSERT_EQ(group_ind, 2);
ASSERT_EQ(group_ind, 2ul);
EXPECT_ANY_THROW(HostSketchContainer::SearchGroupIndFromRow(p_mat->Info().group_ptr_, 17));
@@ -189,7 +189,7 @@ TEST(HistUtil, DenseCutsCategorical) {
EXPECT_LT(cuts.MinValues()[0], x_sorted.front());
EXPECT_GT(cuts_from_sketch.front(), x_sorted.front());
EXPECT_GE(cuts_from_sketch.back(), x_sorted.back());
EXPECT_EQ(cuts_from_sketch.size(), num_categories);
EXPECT_EQ(cuts_from_sketch.size(), static_cast<size_t>(num_categories));
}
}
}

View File

@@ -162,7 +162,7 @@ inline void ValidateColumn(const HistogramCuts& cuts, int column_idx,
// Check all cut points are unique
EXPECT_EQ(std::set<float>(cuts_begin, cuts_end).size(),
cuts_end - cuts_begin);
static_cast<size_t>(cuts_end - cuts_begin));
auto unique = std::set<float>(sorted_column.begin(), sorted_column.end());
if (unique.size() <= num_bins) {
@@ -189,7 +189,7 @@ inline void ValidateCuts(const HistogramCuts& cuts, DMatrix* dmat,
// Collect data into columns
std::vector<std::vector<float>> columns(dmat->Info().num_col_);
for (auto& batch : dmat->GetBatches<SparsePage>()) {
ASSERT_GT(batch.Size(), 0);
ASSERT_GT(batch.Size(), 0ul);
for (auto i = 0ull; i < batch.Size(); i++) {
for (auto e : batch[i]) {
columns[e.index].push_back(e.fvalue);

View File

@@ -222,7 +222,7 @@ TEST(Json, ParseArray) {
auto json = Json::Load(StringView{str.c_str(), str.size()});
json = json["nodes"];
std::vector<Json> arr = get<JsonArray>(json);
ASSERT_EQ(arr.size(), 3);
ASSERT_EQ(arr.size(), 3ul);
Json v0 = arr[0];
ASSERT_EQ(get<Integer>(v0["depth"]), 3);
ASSERT_NEAR(get<Number>(v0["gain"]), 10.4866, kRtEps);
@@ -284,7 +284,7 @@ TEST(Json, EmptyArray) {
std::istringstream iss(str);
auto json = Json::Load(StringView{str.c_str(), str.size()});
auto arr = get<JsonArray>(json["leaf_vector"]);
ASSERT_EQ(arr.size(), 0);
ASSERT_EQ(arr.size(), 0ul);
}
TEST(Json, Boolean) {
@@ -315,7 +315,7 @@ TEST(Json, AssigningObjects) {
Json json;
json = JsonObject();
json["Okay"] = JsonArray();
ASSERT_EQ(get<JsonArray>(json["Okay"]).size(), 0);
ASSERT_EQ(get<JsonArray>(json["Okay"]).size(), 0ul);
}
{

View File

@@ -5,14 +5,122 @@
namespace xgboost {
namespace common {
TEST(Quantile, LoadBalance) {
size_t constexpr kRows = 1000, kCols = 100;
auto m = RandomDataGenerator{kRows, kCols, 0}.GenerateDMatrix();
std::vector<bst_feature_t> cols_ptr;
for (auto const &page : m->GetBatches<SparsePage>()) {
cols_ptr = HostSketchContainer::LoadBalance(page, kCols, 13);
}
size_t n_cols = 0;
for (size_t i = 1; i < cols_ptr.size(); ++i) {
n_cols += cols_ptr[i] - cols_ptr[i - 1];
}
CHECK_EQ(n_cols, kCols);
}
void TestDistributedQuantile(size_t rows, size_t cols) {
std::string msg {"Skipping AllReduce test"};
int32_t constexpr kWorkers = 4;
InitRabitContext(msg, kWorkers);
auto world = rabit::GetWorldSize();
if (world != 1) {
ASSERT_EQ(world, kWorkers);
} else {
return;
}
std::vector<MetaInfo> infos(2);
auto& h_weights = infos.front().weights_.HostVector();
h_weights.resize(rows);
SimpleLCG lcg;
SimpleRealUniformDistribution<float> dist(3, 1000);
std::generate(h_weights.begin(), h_weights.end(), [&]() { return dist(&lcg); });
std::vector<bst_row_t> column_size(cols, rows);
size_t n_bins = 64;
// Generate cuts for distributed environment.
auto sparsity = 0.5f;
auto rank = rabit::GetRank();
HostSketchContainer sketch_distributed(column_size, n_bins, false);
auto m = RandomDataGenerator{rows, cols, sparsity}
.Seed(rank)
.Lower(.0f)
.Upper(1.0f)
.GenerateDMatrix();
for (auto const &page : m->GetBatches<SparsePage>()) {
sketch_distributed.PushRowPage(page, m->Info());
}
HistogramCuts distributed_cuts;
sketch_distributed.MakeCuts(&distributed_cuts);
// Generate cuts for single node environment
rabit::Finalize();
CHECK_EQ(rabit::GetWorldSize(), 1);
std::for_each(column_size.begin(), column_size.end(), [=](auto& size) { size *= world; });
HostSketchContainer sketch_on_single_node(column_size, n_bins, false);
for (auto rank = 0; rank < world; ++rank) {
auto m = RandomDataGenerator{rows, cols, sparsity}
.Seed(rank)
.Lower(.0f)
.Upper(1.0f)
.GenerateDMatrix();
for (auto const &page : m->GetBatches<SparsePage>()) {
sketch_on_single_node.PushRowPage(page, m->Info());
}
}
HistogramCuts single_node_cuts;
sketch_on_single_node.MakeCuts(&single_node_cuts);
auto const& sptrs = single_node_cuts.Ptrs();
auto const& dptrs = distributed_cuts.Ptrs();
auto const& svals = single_node_cuts.Values();
auto const& dvals = distributed_cuts.Values();
auto const& smins = single_node_cuts.MinValues();
auto const& dmins = distributed_cuts.MinValues();
ASSERT_EQ(sptrs.size(), dptrs.size());
for (size_t i = 0; i < sptrs.size(); ++i) {
ASSERT_EQ(sptrs[i], dptrs[i]);
}
ASSERT_EQ(svals.size(), dvals.size());
for (size_t i = 0; i < svals.size(); ++i) {
ASSERT_NEAR(svals[i], dvals[i], 2e-2f);
}
ASSERT_EQ(smins.size(), dmins.size());
for (size_t i = 0; i < smins.size(); ++i) {
ASSERT_FLOAT_EQ(smins[i], dmins[i]);
}
}
TEST(Quantile, DistributedBasic) {
#if defined(__unix__)
constexpr size_t kRows = 10, kCols = 10;
TestDistributedQuantile(kRows, kCols);
#endif
}
TEST(Quantile, Distributed) {
#if defined(__unix__)
constexpr size_t kRows = 1000, kCols = 200;
TestDistributedQuantile(kRows, kCols);
#endif
}
TEST(Quantile, SameOnAllWorkers) {
#if defined(__unix__)
std::string msg{"Skipping Quantile AllreduceBasic test"};
size_t constexpr kWorkers = 4;
int32_t constexpr kWorkers = 4;
InitRabitContext(msg, kWorkers);
auto world = rabit::GetWorldSize();
if (world != 1) {
CHECK_EQ(world, kWorkers);
} else {
LOG(WARNING) << msg;
return;
}
@@ -72,6 +180,8 @@ TEST(Quantile, SameOnAllWorkers) {
}
}
});
rabit::Finalize();
#endif // defined(__unix__)
}
} // namespace common
} // namespace xgboost

View File

@@ -7,7 +7,7 @@
namespace xgboost {
namespace common {
inline void InitRabitContext(std::string msg, size_t n_workers) {
inline void InitRabitContext(std::string msg, int32_t n_workers) {
auto port = std::getenv("DMLC_TRACKER_PORT");
std::string port_str;
if (port) {
@@ -35,7 +35,7 @@ template <typename Fn> void RunWithSeedsAndBins(size_t rows, Fn fn) {
for (size_t i = 0; i < bins.size() - 1; ++i) {
bins[i] = i * 35 + 2;
}
bins.back() = rows + 80; // provide a bin number greater than rows.
bins.back() = rows + 160; // provide a bin number greater than rows.
std::vector<MetaInfo> infos(2);
auto& h_weights = infos.front().weights_.HostVector();