Make objectives work with vertical distributed and federated learning (#9002)
This commit is contained in:
@@ -85,7 +85,7 @@ void UpdateTreeLeafHost(Context const* ctx, std::vector<bst_node_t> const& posit
|
||||
size_t n_leaf = nidx.size();
|
||||
if (nptr.empty()) {
|
||||
std::vector<float> quantiles;
|
||||
UpdateLeafValues(&quantiles, nidx, learning_rate, p_tree);
|
||||
UpdateLeafValues(&quantiles, nidx, info, learning_rate, p_tree);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -99,39 +99,46 @@ void UpdateTreeLeafHost(Context const* ctx, std::vector<bst_node_t> const& posit
|
||||
auto h_predt = linalg::MakeTensorView(ctx, predt.ConstHostSpan(), info.num_row_,
|
||||
predt.Size() / info.num_row_);
|
||||
|
||||
// loop over each leaf
|
||||
common::ParallelFor(quantiles.size(), ctx->Threads(), [&](size_t k) {
|
||||
auto nidx = h_node_idx[k];
|
||||
CHECK(tree[nidx].IsLeaf());
|
||||
CHECK_LT(k + 1, h_node_ptr.size());
|
||||
size_t n = h_node_ptr[k + 1] - h_node_ptr[k];
|
||||
auto h_row_set = common::Span<size_t const>{ridx}.subspan(h_node_ptr[k], n);
|
||||
if (!info.IsVerticalFederated() || collective::GetRank() == 0) {
|
||||
// loop over each leaf
|
||||
common::ParallelFor(quantiles.size(), ctx->Threads(), [&](size_t k) {
|
||||
auto nidx = h_node_idx[k];
|
||||
CHECK(tree[nidx].IsLeaf());
|
||||
CHECK_LT(k + 1, h_node_ptr.size());
|
||||
size_t n = h_node_ptr[k + 1] - h_node_ptr[k];
|
||||
auto h_row_set = common::Span<size_t const>{ridx}.subspan(h_node_ptr[k], n);
|
||||
|
||||
auto h_labels = info.labels.HostView().Slice(linalg::All(), IdxY(info, group_idx));
|
||||
auto h_weights = linalg::MakeVec(&info.weights_);
|
||||
auto h_labels = info.labels.HostView().Slice(linalg::All(), IdxY(info, group_idx));
|
||||
auto h_weights = linalg::MakeVec(&info.weights_);
|
||||
|
||||
auto iter = common::MakeIndexTransformIter([&](size_t i) -> float {
|
||||
auto row_idx = h_row_set[i];
|
||||
return h_labels(row_idx) - h_predt(row_idx, group_idx);
|
||||
});
|
||||
auto w_it = common::MakeIndexTransformIter([&](size_t i) -> float {
|
||||
auto row_idx = h_row_set[i];
|
||||
return h_weights(row_idx);
|
||||
auto iter = common::MakeIndexTransformIter([&](size_t i) -> float {
|
||||
auto row_idx = h_row_set[i];
|
||||
return h_labels(row_idx) - h_predt(row_idx, group_idx);
|
||||
});
|
||||
auto w_it = common::MakeIndexTransformIter([&](size_t i) -> float {
|
||||
auto row_idx = h_row_set[i];
|
||||
return h_weights(row_idx);
|
||||
});
|
||||
|
||||
float q{0};
|
||||
if (info.weights_.Empty()) {
|
||||
q = common::Quantile(ctx, alpha, iter, iter + h_row_set.size());
|
||||
} else {
|
||||
q = common::WeightedQuantile(ctx, alpha, iter, iter + h_row_set.size(), w_it);
|
||||
}
|
||||
if (std::isnan(q)) {
|
||||
CHECK(h_row_set.empty());
|
||||
}
|
||||
quantiles.at(k) = q;
|
||||
});
|
||||
}
|
||||
|
||||
float q{0};
|
||||
if (info.weights_.Empty()) {
|
||||
q = common::Quantile(ctx, alpha, iter, iter + h_row_set.size());
|
||||
} else {
|
||||
q = common::WeightedQuantile(ctx, alpha, iter, iter + h_row_set.size(), w_it);
|
||||
}
|
||||
if (std::isnan(q)) {
|
||||
CHECK(h_row_set.empty());
|
||||
}
|
||||
quantiles.at(k) = q;
|
||||
});
|
||||
if (info.IsVerticalFederated()) {
|
||||
collective::Broadcast(static_cast<void*>(quantiles.data()), quantiles.size() * sizeof(float),
|
||||
0);
|
||||
}
|
||||
|
||||
UpdateLeafValues(&quantiles, nidx, learning_rate, p_tree);
|
||||
UpdateLeafValues(&quantiles, nidx, info, learning_rate, p_tree);
|
||||
}
|
||||
|
||||
#if !defined(XGBOOST_USE_CUDA)
|
||||
|
||||
@@ -151,7 +151,7 @@ void UpdateTreeLeafDevice(Context const* ctx, common::Span<bst_node_t const> pos
|
||||
|
||||
if (nptr.Empty()) {
|
||||
std::vector<float> quantiles;
|
||||
UpdateLeafValues(&quantiles, nidx.ConstHostVector(), learning_rate, p_tree);
|
||||
UpdateLeafValues(&quantiles, nidx.ConstHostVector(), info, learning_rate, p_tree);
|
||||
}
|
||||
|
||||
HostDeviceVector<float> quantiles;
|
||||
@@ -186,7 +186,7 @@ void UpdateTreeLeafDevice(Context const* ctx, common::Span<bst_node_t const> pos
|
||||
w_it + d_weights.size(), &quantiles);
|
||||
}
|
||||
|
||||
UpdateLeafValues(&quantiles.HostVector(), nidx.ConstHostVector(), learning_rate, p_tree);
|
||||
UpdateLeafValues(&quantiles.HostVector(), nidx.ConstHostVector(), info, learning_rate, p_tree);
|
||||
}
|
||||
} // namespace detail
|
||||
} // namespace obj
|
||||
|
||||
@@ -36,13 +36,15 @@ inline void FillMissingLeaf(std::vector<bst_node_t> const& maybe_missing,
|
||||
}
|
||||
|
||||
inline void UpdateLeafValues(std::vector<float>* p_quantiles, std::vector<bst_node_t> const& nidx,
|
||||
float learning_rate, RegTree* p_tree) {
|
||||
MetaInfo const& info, float learning_rate, RegTree* p_tree) {
|
||||
auto& tree = *p_tree;
|
||||
auto& quantiles = *p_quantiles;
|
||||
auto const& h_node_idx = nidx;
|
||||
|
||||
size_t n_leaf{h_node_idx.size()};
|
||||
collective::Allreduce<collective::Operation::kMax>(&n_leaf, 1);
|
||||
if (info.IsRowSplit()) {
|
||||
collective::Allreduce<collective::Operation::kMax>(&n_leaf, 1);
|
||||
}
|
||||
CHECK(quantiles.empty() || quantiles.size() == n_leaf);
|
||||
if (quantiles.empty()) {
|
||||
quantiles.resize(n_leaf, std::numeric_limits<float>::quiet_NaN());
|
||||
@@ -52,12 +54,16 @@ inline void UpdateLeafValues(std::vector<float>* p_quantiles, std::vector<bst_no
|
||||
std::vector<int32_t> n_valids(quantiles.size());
|
||||
std::transform(quantiles.cbegin(), quantiles.cend(), n_valids.begin(),
|
||||
[](float q) { return static_cast<int32_t>(!std::isnan(q)); });
|
||||
collective::Allreduce<collective::Operation::kSum>(n_valids.data(), n_valids.size());
|
||||
if (info.IsRowSplit()) {
|
||||
collective::Allreduce<collective::Operation::kSum>(n_valids.data(), n_valids.size());
|
||||
}
|
||||
// convert to 0 for all reduce
|
||||
std::replace_if(
|
||||
quantiles.begin(), quantiles.end(), [](float q) { return std::isnan(q); }, 0.f);
|
||||
// use the mean value
|
||||
collective::Allreduce<collective::Operation::kSum>(quantiles.data(), quantiles.size());
|
||||
if (info.IsRowSplit()) {
|
||||
collective::Allreduce<collective::Operation::kSum>(quantiles.data(), quantiles.size());
|
||||
}
|
||||
for (size_t i = 0; i < n_leaf; ++i) {
|
||||
if (n_valids[i] > 0) {
|
||||
quantiles[i] /= static_cast<float>(n_valids[i]);
|
||||
|
||||
@@ -35,7 +35,10 @@ class QuantileRegression : public ObjFunction {
|
||||
bst_target_t Targets(MetaInfo const& info) const override {
|
||||
auto const& alpha = param_.quantile_alpha.Get();
|
||||
CHECK_EQ(alpha.size(), alpha_.Size()) << "The objective is not yet configured.";
|
||||
CHECK_EQ(info.labels.Shape(1), 1) << "Multi-target is not yet supported by the quantile loss.";
|
||||
if (!info.IsVerticalFederated() || collective::GetRank() == 0) {
|
||||
CHECK_EQ(info.labels.Shape(1), 1)
|
||||
<< "Multi-target is not yet supported by the quantile loss.";
|
||||
}
|
||||
CHECK(!alpha.empty());
|
||||
// We have some placeholders for multi-target in the quantile loss. But it's not
|
||||
// supported as the gbtree doesn't know how to slice the gradient and there's no 3-dim
|
||||
@@ -167,8 +170,10 @@ class QuantileRegression : public ObjFunction {
|
||||
common::Mean(ctx_, *base_score, &temp);
|
||||
double meanq = temp(0) * sw;
|
||||
|
||||
collective::Allreduce<collective::Operation::kSum>(&meanq, 1);
|
||||
collective::Allreduce<collective::Operation::kSum>(&sw, 1);
|
||||
if (info.IsRowSplit()) {
|
||||
collective::Allreduce<collective::Operation::kSum>(&meanq, 1);
|
||||
collective::Allreduce<collective::Operation::kSum>(&sw, 1);
|
||||
}
|
||||
meanq /= (sw + kRtEps);
|
||||
base_score->Reshape(1);
|
||||
base_score->Data()->Fill(meanq);
|
||||
|
||||
@@ -728,8 +728,10 @@ class MeanAbsoluteError : public ObjFunction {
|
||||
std::transform(linalg::cbegin(out), linalg::cend(out), linalg::begin(out),
|
||||
[w](float v) { return v * w; });
|
||||
|
||||
collective::Allreduce<collective::Operation::kSum>(out.Values().data(), out.Values().size());
|
||||
collective::Allreduce<collective::Operation::kSum>(&w, 1);
|
||||
if (info.IsRowSplit()) {
|
||||
collective::Allreduce<collective::Operation::kSum>(out.Values().data(), out.Values().size());
|
||||
collective::Allreduce<collective::Operation::kSum>(&w, 1);
|
||||
}
|
||||
|
||||
if (common::CloseTo(w, 0.0)) {
|
||||
// Mostly for handling empty dataset test.
|
||||
|
||||
Reference in New Issue
Block a user