More refactoring to take advantage of collective aggregators (#9081)

This commit is contained in:
Rong Ou 2023-04-25 12:36:09 -07:00 committed by GitHub
parent 49ccae7fb9
commit a320b402a5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 81 additions and 81 deletions

View File

@ -196,6 +196,14 @@ class MetaInfo {
*/ */
bool IsVerticalFederated() const; bool IsVerticalFederated() const;
/*!
* \brief A convenient method to check if the MetaInfo should contain labels.
*
* Normally we assume labels are available everywhere. The only exception is in vertical federated
* learning where labels are only available on worker 0.
*/
bool ShouldHaveLabels() const;
private: private:
void SetInfoFromHost(Context const& ctx, StringView key, Json arr); void SetInfoFromHost(Context const& ctx, StringView key, Json arr);
void SetInfoFromCUDA(Context const& ctx, StringView key, Json arr); void SetInfoFromCUDA(Context const& ctx, StringView key, Json arr);

View File

@ -31,18 +31,16 @@ namespace collective {
* @param buffer The buffer storing the results. * @param buffer The buffer storing the results.
* @param size The size of the buffer. * @param size The size of the buffer.
* @param function The function used to calculate the results. * @param function The function used to calculate the results.
* @param args Arguments to the function.
*/ */
template <typename Function, typename T, typename... Args> template <typename Function>
void ApplyWithLabels(MetaInfo const& info, T* buffer, size_t size, Function&& function, void ApplyWithLabels(MetaInfo const& info, void* buffer, size_t size, Function&& function) {
Args&&... args) {
if (info.IsVerticalFederated()) { if (info.IsVerticalFederated()) {
// We assume labels are only available on worker 0, so the calculation is done there and result // We assume labels are only available on worker 0, so the calculation is done there and result
// broadcast to other workers. // broadcast to other workers.
std::string message; std::string message;
if (collective::GetRank() == 0) { if (collective::GetRank() == 0) {
try { try {
std::forward<Function>(function)(std::forward<Args>(args)...); std::forward<Function>(function)();
} catch (dmlc::Error& e) { } catch (dmlc::Error& e) {
message = e.what(); message = e.what();
} }
@ -55,7 +53,7 @@ void ApplyWithLabels(MetaInfo const& info, T* buffer, size_t size, Function&& fu
LOG(FATAL) << &message[0]; LOG(FATAL) << &message[0];
} }
} else { } else {
std::forward<Function>(function)(std::forward<Args>(args)...); std::forward<Function>(function)();
} }
} }

View File

@ -45,20 +45,18 @@ HistogramCuts SketchOnDMatrix(DMatrix *m, int32_t max_bins, int32_t n_threads, b
if (!use_sorted) { if (!use_sorted) {
HostSketchContainer container(max_bins, m->Info().feature_types.ConstHostSpan(), reduced, HostSketchContainer container(max_bins, m->Info().feature_types.ConstHostSpan(), reduced,
HostSketchContainer::UseGroup(info), HostSketchContainer::UseGroup(info), n_threads);
m->Info().IsColumnSplit(), n_threads);
for (auto const& page : m->GetBatches<SparsePage>()) { for (auto const& page : m->GetBatches<SparsePage>()) {
container.PushRowPage(page, info, hessian); container.PushRowPage(page, info, hessian);
} }
container.MakeCuts(&out); container.MakeCuts(m->Info(), &out);
} else { } else {
SortedSketchContainer container{max_bins, m->Info().feature_types.ConstHostSpan(), reduced, SortedSketchContainer container{max_bins, m->Info().feature_types.ConstHostSpan(), reduced,
HostSketchContainer::UseGroup(info), HostSketchContainer::UseGroup(info), n_threads};
m->Info().IsColumnSplit(), n_threads};
for (auto const& page : m->GetBatches<SortedCSCPage>()) { for (auto const& page : m->GetBatches<SortedCSCPage>()) {
container.PushColPage(page, info, hessian); container.PushColPage(page, info, hessian);
} }
container.MakeCuts(&out); container.MakeCuts(m->Info(), &out);
} }
return out; return out;

View File

@ -6,6 +6,7 @@
#include <limits> #include <limits>
#include <utility> #include <utility>
#include "../collective/aggregator.h"
#include "../collective/communicator-inl.h" #include "../collective/communicator-inl.h"
#include "../data/adapter.h" #include "../data/adapter.h"
#include "categorical.h" #include "categorical.h"
@ -18,13 +19,12 @@ template <typename WQSketch>
SketchContainerImpl<WQSketch>::SketchContainerImpl(std::vector<bst_row_t> columns_size, SketchContainerImpl<WQSketch>::SketchContainerImpl(std::vector<bst_row_t> columns_size,
int32_t max_bins, int32_t max_bins,
Span<FeatureType const> feature_types, Span<FeatureType const> feature_types,
bool use_group, bool col_split, bool use_group,
int32_t n_threads) int32_t n_threads)
: feature_types_(feature_types.cbegin(), feature_types.cend()), : feature_types_(feature_types.cbegin(), feature_types.cend()),
columns_size_{std::move(columns_size)}, columns_size_{std::move(columns_size)},
max_bins_{max_bins}, max_bins_{max_bins},
use_group_ind_{use_group}, use_group_ind_{use_group},
col_split_{col_split},
n_threads_{n_threads} { n_threads_{n_threads} {
monitor_.Init(__func__); monitor_.Init(__func__);
CHECK_NE(columns_size_.size(), 0); CHECK_NE(columns_size_.size(), 0);
@ -202,10 +202,10 @@ void SketchContainerImpl<WQSketch>::GatherSketchInfo(
} }
template <typename WQSketch> template <typename WQSketch>
void SketchContainerImpl<WQSketch>::AllreduceCategories() { void SketchContainerImpl<WQSketch>::AllreduceCategories(MetaInfo const& info) {
auto world_size = collective::GetWorldSize(); auto world_size = collective::GetWorldSize();
auto rank = collective::GetRank(); auto rank = collective::GetRank();
if (world_size == 1 || col_split_) { if (world_size == 1 || info.IsColumnSplit()) {
return; return;
} }
@ -273,6 +273,7 @@ void SketchContainerImpl<WQSketch>::AllreduceCategories() {
template <typename WQSketch> template <typename WQSketch>
void SketchContainerImpl<WQSketch>::AllReduce( void SketchContainerImpl<WQSketch>::AllReduce(
MetaInfo const& info,
std::vector<typename WQSketch::SummaryContainer> *p_reduced, std::vector<typename WQSketch::SummaryContainer> *p_reduced,
std::vector<int32_t>* p_num_cuts) { std::vector<int32_t>* p_num_cuts) {
monitor_.Start(__func__); monitor_.Start(__func__);
@ -281,7 +282,7 @@ void SketchContainerImpl<WQSketch>::AllReduce(
collective::Allreduce<collective::Operation::kMax>(&n_columns, 1); collective::Allreduce<collective::Operation::kMax>(&n_columns, 1);
CHECK_EQ(n_columns, sketches_.size()) << "Number of columns differs across workers"; CHECK_EQ(n_columns, sketches_.size()) << "Number of columns differs across workers";
AllreduceCategories(); AllreduceCategories(info);
auto& num_cuts = *p_num_cuts; auto& num_cuts = *p_num_cuts;
CHECK_EQ(num_cuts.size(), 0); CHECK_EQ(num_cuts.size(), 0);
@ -292,10 +293,7 @@ void SketchContainerImpl<WQSketch>::AllReduce(
// Prune the intermediate num cuts for synchronization. // Prune the intermediate num cuts for synchronization.
std::vector<bst_row_t> global_column_size(columns_size_); std::vector<bst_row_t> global_column_size(columns_size_);
if (!col_split_) { collective::GlobalSum(info, &global_column_size);
collective::Allreduce<collective::Operation::kSum>(global_column_size.data(),
global_column_size.size());
}
ParallelFor(sketches_.size(), n_threads_, [&](size_t i) { ParallelFor(sketches_.size(), n_threads_, [&](size_t i) {
int32_t intermediate_num_cuts = static_cast<int32_t>( int32_t intermediate_num_cuts = static_cast<int32_t>(
@ -316,7 +314,7 @@ void SketchContainerImpl<WQSketch>::AllReduce(
}); });
auto world = collective::GetWorldSize(); auto world = collective::GetWorldSize();
if (world == 1 || col_split_) { if (world == 1 || info.IsColumnSplit()) {
monitor_.Stop(__func__); monitor_.Stop(__func__);
return; return;
} }
@ -382,11 +380,11 @@ auto AddCategories(std::set<float> const &categories, HistogramCuts *cuts) {
} }
template <typename WQSketch> template <typename WQSketch>
void SketchContainerImpl<WQSketch>::MakeCuts(HistogramCuts* cuts) { void SketchContainerImpl<WQSketch>::MakeCuts(MetaInfo const& info, HistogramCuts* cuts) {
monitor_.Start(__func__); monitor_.Start(__func__);
std::vector<typename WQSketch::SummaryContainer> reduced; std::vector<typename WQSketch::SummaryContainer> reduced;
std::vector<int32_t> num_cuts; std::vector<int32_t> num_cuts;
this->AllReduce(&reduced, &num_cuts); this->AllReduce(info, &reduced, &num_cuts);
cuts->min_vals_.HostVector().resize(sketches_.size(), 0.0f); cuts->min_vals_.HostVector().resize(sketches_.size(), 0.0f);
std::vector<typename WQSketch::SummaryContainer> final_summaries(reduced.size()); std::vector<typename WQSketch::SummaryContainer> final_summaries(reduced.size());
@ -443,8 +441,8 @@ template class SketchContainerImpl<WXQuantileSketch<float, float>>;
HostSketchContainer::HostSketchContainer(int32_t max_bins, common::Span<FeatureType const> ft, HostSketchContainer::HostSketchContainer(int32_t max_bins, common::Span<FeatureType const> ft,
std::vector<size_t> columns_size, bool use_group, std::vector<size_t> columns_size, bool use_group,
bool col_split, int32_t n_threads) int32_t n_threads)
: SketchContainerImpl{columns_size, max_bins, ft, use_group, col_split, n_threads} { : SketchContainerImpl{columns_size, max_bins, ft, use_group, n_threads} {
monitor_.Init(__func__); monitor_.Init(__func__);
ParallelFor(sketches_.size(), n_threads_, Sched::Auto(), [&](auto i) { ParallelFor(sketches_.size(), n_threads_, Sched::Auto(), [&](auto i) {
auto n_bins = std::min(static_cast<size_t>(max_bins_), columns_size_[i]); auto n_bins = std::min(static_cast<size_t>(max_bins_), columns_size_[i]);

View File

@ -789,7 +789,6 @@ class SketchContainerImpl {
std::vector<bst_row_t> columns_size_; std::vector<bst_row_t> columns_size_;
int32_t max_bins_; int32_t max_bins_;
bool use_group_ind_{false}; bool use_group_ind_{false};
bool col_split_;
int32_t n_threads_; int32_t n_threads_;
bool has_categorical_{false}; bool has_categorical_{false};
Monitor monitor_; Monitor monitor_;
@ -802,7 +801,7 @@ class SketchContainerImpl {
* \param use_group whether is assigned to group to data instance. * \param use_group whether is assigned to group to data instance.
*/ */
SketchContainerImpl(std::vector<bst_row_t> columns_size, int32_t max_bins, SketchContainerImpl(std::vector<bst_row_t> columns_size, int32_t max_bins,
common::Span<FeatureType const> feature_types, bool use_group, bool col_split, common::Span<FeatureType const> feature_types, bool use_group,
int32_t n_threads); int32_t n_threads);
static bool UseGroup(MetaInfo const &info) { static bool UseGroup(MetaInfo const &info) {
@ -829,7 +828,7 @@ class SketchContainerImpl {
std::vector<bst_row_t> *p_sketches_scan, std::vector<bst_row_t> *p_sketches_scan,
std::vector<typename WQSketch::Entry> *p_global_sketches); std::vector<typename WQSketch::Entry> *p_global_sketches);
// Merge sketches from all workers. // Merge sketches from all workers.
void AllReduce(std::vector<typename WQSketch::SummaryContainer> *p_reduced, void AllReduce(MetaInfo const& info, std::vector<typename WQSketch::SummaryContainer> *p_reduced,
std::vector<int32_t> *p_num_cuts); std::vector<int32_t> *p_num_cuts);
template <typename Batch, typename IsValid> template <typename Batch, typename IsValid>
@ -883,11 +882,11 @@ class SketchContainerImpl {
/* \brief Push a CSR matrix. */ /* \brief Push a CSR matrix. */
void PushRowPage(SparsePage const &page, MetaInfo const &info, Span<float const> hessian = {}); void PushRowPage(SparsePage const &page, MetaInfo const &info, Span<float const> hessian = {});
void MakeCuts(HistogramCuts* cuts); void MakeCuts(MetaInfo const& info, HistogramCuts* cuts);
private: private:
// Merge all categories from other workers. // Merge all categories from other workers.
void AllreduceCategories(); void AllreduceCategories(MetaInfo const& info);
}; };
class HostSketchContainer : public SketchContainerImpl<WQuantileSketch<float, float>> { class HostSketchContainer : public SketchContainerImpl<WQuantileSketch<float, float>> {
@ -896,8 +895,7 @@ class HostSketchContainer : public SketchContainerImpl<WQuantileSketch<float, fl
public: public:
HostSketchContainer(int32_t max_bins, common::Span<FeatureType const> ft, HostSketchContainer(int32_t max_bins, common::Span<FeatureType const> ft,
std::vector<size_t> columns_size, bool use_group, bool col_split, std::vector<size_t> columns_size, bool use_group, int32_t n_threads);
int32_t n_threads);
template <typename Batch> template <typename Batch>
void PushAdapterBatch(Batch const &batch, size_t base_rowid, MetaInfo const &info, float missing); void PushAdapterBatch(Batch const &batch, size_t base_rowid, MetaInfo const &info, float missing);
@ -993,9 +991,9 @@ class SortedSketchContainer : public SketchContainerImpl<WXQuantileSketch<float,
public: public:
explicit SortedSketchContainer(int32_t max_bins, common::Span<FeatureType const> ft, explicit SortedSketchContainer(int32_t max_bins, common::Span<FeatureType const> ft,
std::vector<size_t> columns_size, bool use_group, bool col_split, std::vector<size_t> columns_size, bool use_group,
int32_t n_threads) int32_t n_threads)
: SketchContainerImpl{columns_size, max_bins, ft, use_group, col_split, n_threads} { : SketchContainerImpl{columns_size, max_bins, ft, use_group, n_threads} {
monitor_.Init(__func__); monitor_.Init(__func__);
sketches_.resize(columns_size.size()); sketches_.resize(columns_size.size());
size_t i = 0; size_t i = 0;

View File

@ -774,6 +774,10 @@ bool MetaInfo::IsVerticalFederated() const {
return collective::IsFederated() && IsColumnSplit(); return collective::IsFederated() && IsColumnSplit();
} }
bool MetaInfo::ShouldHaveLabels() const {
return !IsVerticalFederated() || collective::GetRank() == 0;
}
using DMatrixThreadLocal = using DMatrixThreadLocal =
dmlc::ThreadLocalStore<std::map<DMatrix const *, XGBAPIThreadLocalEntry>>; dmlc::ThreadLocalStore<std::map<DMatrix const *, XGBAPIThreadLocalEntry>>;

View File

@ -213,7 +213,7 @@ void IterativeDMatrix::InitFromCPU(DataIterHandle iter_handle, float missing,
SyncFeatureType(&h_ft); SyncFeatureType(&h_ft);
p_sketch.reset(new common::HostSketchContainer{ p_sketch.reset(new common::HostSketchContainer{
batch_param_.max_bin, h_ft, column_sizes, !proxy->Info().group_ptr_.empty(), batch_param_.max_bin, h_ft, column_sizes, !proxy->Info().group_ptr_.empty(),
proxy->Info().IsColumnSplit(), ctx_.Threads()}); ctx_.Threads()});
} }
HostAdapterDispatch(proxy, [&](auto const& batch) { HostAdapterDispatch(proxy, [&](auto const& batch) {
proxy->Info().num_nonzero_ = batch_nnz[i]; proxy->Info().num_nonzero_ = batch_nnz[i];
@ -228,7 +228,7 @@ void IterativeDMatrix::InitFromCPU(DataIterHandle iter_handle, float missing,
CHECK_EQ(accumulated_rows, Info().num_row_); CHECK_EQ(accumulated_rows, Info().num_row_);
CHECK(p_sketch); CHECK(p_sketch);
p_sketch->MakeCuts(&cuts); p_sketch->MakeCuts(Info(), &cuts);
} }
if (!h_ft.empty()) { if (!h_ft.empty()) {
CHECK_EQ(h_ft.size(), n_features); CHECK_EQ(h_ft.size(), n_features);

View File

@ -99,7 +99,8 @@ void UpdateTreeLeafHost(Context const* ctx, std::vector<bst_node_t> const& posit
auto h_predt = linalg::MakeTensorView(ctx, predt.ConstHostSpan(), info.num_row_, auto h_predt = linalg::MakeTensorView(ctx, predt.ConstHostSpan(), info.num_row_,
predt.Size() / info.num_row_); predt.Size() / info.num_row_);
if (!info.IsVerticalFederated() || collective::GetRank() == 0) { collective::ApplyWithLabels(
info, static_cast<void*>(quantiles.data()), quantiles.size() * sizeof(float), [&] {
// loop over each leaf // loop over each leaf
common::ParallelFor(quantiles.size(), ctx->Threads(), [&](size_t k) { common::ParallelFor(quantiles.size(), ctx->Threads(), [&](size_t k) {
auto nidx = h_node_idx[k]; auto nidx = h_node_idx[k];
@ -131,12 +132,7 @@ void UpdateTreeLeafHost(Context const* ctx, std::vector<bst_node_t> const& posit
} }
quantiles.at(k) = q; quantiles.at(k) = q;
}); });
} });
if (info.IsVerticalFederated()) {
collective::Broadcast(static_cast<void*>(quantiles.data()), quantiles.size() * sizeof(float),
0);
}
UpdateLeafValues(&quantiles, nidx, info, learning_rate, p_tree); UpdateLeafValues(&quantiles, nidx, info, learning_rate, p_tree);
} }

View File

@ -36,7 +36,7 @@ class QuantileRegression : public ObjFunction {
bst_target_t Targets(MetaInfo const& info) const override { bst_target_t Targets(MetaInfo const& info) const override {
auto const& alpha = param_.quantile_alpha.Get(); auto const& alpha = param_.quantile_alpha.Get();
CHECK_EQ(alpha.size(), alpha_.Size()) << "The objective is not yet configured."; CHECK_EQ(alpha.size(), alpha_.Size()) << "The objective is not yet configured.";
if (!info.IsVerticalFederated() || collective::GetRank() == 0) { if (info.ShouldHaveLabels()) {
CHECK_EQ(info.labels.Shape(1), 1) CHECK_EQ(info.labels.Shape(1), 1)
<< "Multi-target is not yet supported by the quantile loss."; << "Multi-target is not yet supported by the quantile loss.";
} }

View File

@ -73,7 +73,7 @@ void DoTestDistributedQuantile(size_t rows, size_t cols) {
auto hess = Span<float const>{hessian}; auto hess = Span<float const>{hessian};
ContainerType<use_column> sketch_distributed(n_bins, m->Info().feature_types.ConstHostSpan(), ContainerType<use_column> sketch_distributed(n_bins, m->Info().feature_types.ConstHostSpan(),
column_size, false, false, AllThreadsForTest()); column_size, false, AllThreadsForTest());
if (use_column) { if (use_column) {
for (auto const& page : m->GetBatches<SortedCSCPage>()) { for (auto const& page : m->GetBatches<SortedCSCPage>()) {
@ -86,7 +86,7 @@ void DoTestDistributedQuantile(size_t rows, size_t cols) {
} }
HistogramCuts distributed_cuts; HistogramCuts distributed_cuts;
sketch_distributed.MakeCuts(&distributed_cuts); sketch_distributed.MakeCuts(m->Info(), &distributed_cuts);
// Generate cuts for single node environment // Generate cuts for single node environment
collective::Finalize(); collective::Finalize();
@ -94,7 +94,7 @@ void DoTestDistributedQuantile(size_t rows, size_t cols) {
std::for_each(column_size.begin(), column_size.end(), [=](auto& size) { size *= world; }); std::for_each(column_size.begin(), column_size.end(), [=](auto& size) { size *= world; });
m->Info().num_row_ = world * rows; m->Info().num_row_ = world * rows;
ContainerType<use_column> sketch_on_single_node(n_bins, m->Info().feature_types.ConstHostSpan(), ContainerType<use_column> sketch_on_single_node(n_bins, m->Info().feature_types.ConstHostSpan(),
column_size, false, false, AllThreadsForTest()); column_size, false, AllThreadsForTest());
m->Info().num_row_ = rows; m->Info().num_row_ = rows;
for (auto rank = 0; rank < world; ++rank) { for (auto rank = 0; rank < world; ++rank) {
@ -117,7 +117,7 @@ void DoTestDistributedQuantile(size_t rows, size_t cols) {
} }
HistogramCuts single_node_cuts; HistogramCuts single_node_cuts;
sketch_on_single_node.MakeCuts(&single_node_cuts); sketch_on_single_node.MakeCuts(m->Info(), &single_node_cuts);
auto const& sptrs = single_node_cuts.Ptrs(); auto const& sptrs = single_node_cuts.Ptrs();
auto const& dptrs = distributed_cuts.Ptrs(); auto const& dptrs = distributed_cuts.Ptrs();
@ -205,7 +205,7 @@ void DoTestColSplitQuantile(size_t rows, size_t cols) {
HistogramCuts distributed_cuts; HistogramCuts distributed_cuts;
{ {
ContainerType<use_column> sketch_distributed(n_bins, m->Info().feature_types.ConstHostSpan(), ContainerType<use_column> sketch_distributed(n_bins, m->Info().feature_types.ConstHostSpan(),
column_size, false, true, AllThreadsForTest()); column_size, false, AllThreadsForTest());
std::vector<float> hessian(rows, 1.0); std::vector<float> hessian(rows, 1.0);
auto hess = Span<float const>{hessian}; auto hess = Span<float const>{hessian};
@ -219,7 +219,7 @@ void DoTestColSplitQuantile(size_t rows, size_t cols) {
} }
} }
sketch_distributed.MakeCuts(&distributed_cuts); sketch_distributed.MakeCuts(m->Info(), &distributed_cuts);
} }
// Generate cuts for single node environment // Generate cuts for single node environment
@ -228,7 +228,7 @@ void DoTestColSplitQuantile(size_t rows, size_t cols) {
HistogramCuts single_node_cuts; HistogramCuts single_node_cuts;
{ {
ContainerType<use_column> sketch_on_single_node(n_bins, m->Info().feature_types.ConstHostSpan(), ContainerType<use_column> sketch_on_single_node(n_bins, m->Info().feature_types.ConstHostSpan(),
column_size, false, false, AllThreadsForTest()); column_size, false, AllThreadsForTest());
std::vector<float> hessian(rows, 1.0); std::vector<float> hessian(rows, 1.0);
auto hess = Span<float const>{hessian}; auto hess = Span<float const>{hessian};
@ -242,7 +242,7 @@ void DoTestColSplitQuantile(size_t rows, size_t cols) {
} }
} }
sketch_on_single_node.MakeCuts(&single_node_cuts); sketch_on_single_node.MakeCuts(m->Info(), &single_node_cuts);
} }
auto const& sptrs = single_node_cuts.Ptrs(); auto const& sptrs = single_node_cuts.Ptrs();