Serialize expand entry for allgather. (#9702)

This commit is contained in:
Jiaming Yuan
2023-10-24 14:33:28 +08:00
committed by GitHub
parent ee8b29c843
commit 7a02facc9d
14 changed files with 336 additions and 76 deletions

View File

@@ -1,31 +1,36 @@
/*!
* Copyright 2020 by XGBoost Contributors
/**
* Copyright 2020-2023, XGBoost Contributors
*/
#ifndef EXPAND_ENTRY_CUH_
#define EXPAND_ENTRY_CUH_
#include <xgboost/span.h>
#include <limits> // for numeric_limits
#include <utility> // for move
#include "../param.h"
#include "../updater_gpu_common.cuh"
#include "xgboost/base.h" // for bst_node_t
namespace xgboost {
namespace tree {
namespace xgboost::tree {
struct GPUExpandEntry {
int nid;
int depth;
bst_node_t nid;
bst_node_t depth;
DeviceSplitCandidate split;
float base_weight { std::numeric_limits<float>::quiet_NaN() };
float left_weight { std::numeric_limits<float>::quiet_NaN() };
float right_weight { std::numeric_limits<float>::quiet_NaN() };
float base_weight{std::numeric_limits<float>::quiet_NaN()};
float left_weight{std::numeric_limits<float>::quiet_NaN()};
float right_weight{std::numeric_limits<float>::quiet_NaN()};
GPUExpandEntry() = default;
XGBOOST_DEVICE GPUExpandEntry(int nid, int depth, DeviceSplitCandidate split,
float base, float left, float right)
: nid(nid), depth(depth), split(std::move(split)), base_weight{base},
left_weight{left}, right_weight{right} {}
bool IsValid(const TrainParam& param, int num_leaves) const {
XGBOOST_DEVICE GPUExpandEntry(bst_node_t nid, bst_node_t depth, DeviceSplitCandidate split,
float base, float left, float right)
: nid(nid),
depth(depth),
split(std::move(split)),
base_weight{base},
left_weight{left},
right_weight{right} {}
[[nodiscard]] bool IsValid(TrainParam const& param, bst_node_t num_leaves) const {
if (split.loss_chg <= kRtEps) return false;
if (split.left_sum.GetQuantisedHess() == 0 || split.right_sum.GetQuantisedHess() == 0) {
return false;
@@ -42,17 +47,11 @@ struct GPUExpandEntry {
return true;
}
bst_float GetLossChange() const {
return split.loss_chg;
}
[[nodiscard]] float GetLossChange() const { return split.loss_chg; }
int GetNodeId() const {
return nid;
}
[[nodiscard]] bst_node_t GetNodeId() const { return nid; }
int GetDepth() const {
return depth;
}
[[nodiscard]] bst_node_t GetDepth() const { return depth; }
friend std::ostream& operator<<(std::ostream& os, const GPUExpandEntry& e) {
os << "GPUExpandEntry: \n";
@@ -63,9 +62,69 @@ struct GPUExpandEntry {
os << "right_sum: " << e.split.right_sum << "\n";
return os;
}
};
} // namespace tree
} // namespace xgboost
void Save(Json* p_out) const {
auto& out = *p_out;
out["nid"] = Integer{this->nid};
out["depth"] = Integer{this->depth};
// GPU specific
out["base_weight"] = this->base_weight;
out["left_weight"] = this->left_weight;
out["right_weight"] = this->right_weight;
/**
* Handle split
*/
out["split"] = Object{};
auto& split = out["split"];
split["loss_chg"] = this->split.loss_chg;
split["sindex"] = Integer{this->split.findex};
split["split_value"] = this->split.fvalue;
// cat
split["thresh"] = Integer{this->split.thresh};
split["is_cat"] = Boolean{this->split.is_cat};
/**
* Gradients
*/
auto save = [&](std::string const& name, GradientPairInt64 const& sum) {
out[name] = I64Array{2};
auto& array = get<I64Array>(out[name]);
array[0] = sum.GetQuantisedGrad();
array[1] = sum.GetQuantisedHess();
};
save("left_sum", this->split.left_sum);
save("right_sum", this->split.right_sum);
}
void Load(Json const& in) {
this->nid = get<Integer const>(in["nid"]);
this->depth = get<Integer const>(in["depth"]);
// GPU specific
this->base_weight = get<Number const>(in["base_weight"]);
this->left_weight = get<Number const>(in["left_weight"]);
this->right_weight = get<Number const>(in["right_weight"]);
/**
* Handle split
*/
auto const& split = in["split"];
this->split.loss_chg = get<Number const>(split["loss_chg"]);
this->split.findex = get<Integer const>(split["sindex"]);
this->split.fvalue = get<Number const>(split["split_value"]);
// cat
this->split.thresh = get<Integer const>(split["thresh"]);
this->split.is_cat = get<Boolean const>(split["is_cat"]);
/**
* Gradients
*/
auto const& left_sum = get<I64Array const>(in["left_sum"]);
this->split.left_sum = GradientPairInt64{left_sum[0], left_sum[1]};
auto const& right_sum = get<I64Array const>(in["right_sum"]);
this->split.right_sum = GradientPairInt64{right_sum[0], right_sum[1]};
}
};
} // namespace xgboost::tree
#endif // EXPAND_ENTRY_CUH_

View File

@@ -1,16 +1,20 @@
/**
* Copyright 2021-2023 XGBoost contributors
* Copyright 2021-2023, XGBoost Contributors
*/
#ifndef XGBOOST_TREE_HIST_EXPAND_ENTRY_H_
#define XGBOOST_TREE_HIST_EXPAND_ENTRY_H_
#include <algorithm> // for all_of
#include <ostream> // for ostream
#include <utility> // for move
#include <vector> // for vector
#include <algorithm> // for all_of
#include <ostream> // for ostream
#include <string> // for string
#include <type_traits> // for add_const_t
#include <utility> // for move
#include <vector> // for vector
#include "../param.h" // for SplitEntry, SplitEntryContainer, TrainParam
#include "xgboost/base.h" // for GradientPairPrecise, bst_node_t
#include "../../common/type.h" // for EraseType
#include "../param.h" // for SplitEntry, SplitEntryContainer, TrainParam
#include "xgboost/base.h" // for GradientPairPrecise, bst_node_t
#include "xgboost/json.h" // for Json
namespace xgboost::tree {
/**
@@ -29,6 +33,66 @@ struct ExpandEntryImpl {
[[nodiscard]] bool IsValid(TrainParam const& param, bst_node_t num_leaves) const {
return static_cast<Impl const*>(this)->IsValidImpl(param, num_leaves);
}
void Save(Json* p_out) const {
auto& out = *p_out;
auto self = static_cast<Impl const*>(this);
out["nid"] = Integer{this->nid};
out["depth"] = Integer{this->depth};
/**
* Handle split
*/
out["split"] = Object{};
auto& split = out["split"];
split["loss_chg"] = self->split.loss_chg;
split["sindex"] = Integer{self->split.sindex};
split["split_value"] = self->split.split_value;
auto const& cat_bits = self->split.cat_bits;
auto s_cat_bits = common::Span{cat_bits.data(), cat_bits.size()};
split["cat_bits"] = U8Array{s_cat_bits.size_bytes()};
auto& j_cat_bits = get<U8Array>(split["cat_bits"]);
using T = typename decltype(self->split.cat_bits)::value_type;
auto erased =
common::EraseType<std::add_const_t<T>, std::add_const_t<std::uint8_t>>(s_cat_bits);
for (std::size_t i = 0; i < erased.size(); ++i) {
j_cat_bits[i] = erased[i];
}
split["is_cat"] = Boolean{self->split.is_cat};
self->SaveGrad(&split);
}
void Load(Json const& in) {
auto self = static_cast<Impl*>(this);
this->nid = get<Integer const>(in["nid"]);
this->depth = get<Integer const>(in["depth"]);
/**
* Handle split
*/
auto const& split = in["split"];
self->split.loss_chg = get<Number const>(split["loss_chg"]);
self->split.sindex = get<Integer const>(split["sindex"]);
self->split.split_value = get<Number const>(split["split_value"]);
auto const& j_cat_bits = get<U8Array const>(split["cat_bits"]);
using T = typename decltype(self->split.cat_bits)::value_type;
auto restored = common::RestoreType<std::add_const_t<T>>(
common::Span{j_cat_bits.data(), j_cat_bits.size()});
self->split.cat_bits.resize(restored.size());
for (std::size_t i = 0; i < restored.size(); ++i) {
self->split.cat_bits[i] = restored[i];
}
self->split.is_cat = get<Boolean const>(split["is_cat"]);
self->LoadGrad(split);
}
};
struct CPUExpandEntry : public ExpandEntryImpl<CPUExpandEntry> {
@@ -39,6 +103,24 @@ struct CPUExpandEntry : public ExpandEntryImpl<CPUExpandEntry> {
: ExpandEntryImpl{nidx, depth}, split(std::move(split)) {}
CPUExpandEntry(bst_node_t nidx, bst_node_t depth) : ExpandEntryImpl{nidx, depth} {}
void SaveGrad(Json* p_out) const {
auto& out = *p_out;
auto save = [&](std::string const& name, GradStats const& sum) {
out[name] = F32Array{2};
auto& array = get<F32Array>(out[name]);
array[0] = sum.GetGrad();
array[1] = sum.GetHess();
};
save("left_sum", this->split.left_sum);
save("right_sum", this->split.right_sum);
}
void LoadGrad(Json const& in) {
auto const& left_sum = get<F32Array const>(in["left_sum"]);
this->split.left_sum = GradStats{left_sum[0], left_sum[1]};
auto const& right_sum = get<F32Array const>(in["right_sum"]);
this->split.right_sum = GradStats{right_sum[0], right_sum[1]};
}
[[nodiscard]] bool IsValidImpl(TrainParam const& param, bst_node_t num_leaves) const {
if (split.loss_chg <= kRtEps) return false;
if (split.left_sum.GetHess() == 0 || split.right_sum.GetHess() == 0) {
@@ -88,6 +170,32 @@ struct MultiExpandEntry : public ExpandEntryImpl<MultiExpandEntry> {
MultiExpandEntry() = default;
MultiExpandEntry(bst_node_t nidx, bst_node_t depth) : ExpandEntryImpl{nidx, depth} {}
void SaveGrad(Json* p_out) const {
auto& out = *p_out;
auto save = [&](std::string const& name, std::vector<GradientPairPrecise> const& sum) {
out[name] = F32Array{sum.size() * 2};
auto& array = get<F32Array>(out[name]);
for (std::size_t i = 0, j = 0; i < sum.size(); i++, j += 2) {
array[j] = sum[i].GetGrad();
array[j + 1] = sum[i].GetHess();
}
};
save("left_sum", this->split.left_sum);
save("right_sum", this->split.right_sum);
}
void LoadGrad(Json const& in) {
auto load = [&](std::string const& name, std::vector<GradientPairPrecise>* p_sum) {
auto const& array = get<F32Array const>(in[name]);
auto& sum = *p_sum;
sum.resize(array.size() / 2);
for (std::size_t i = 0, j = 0; i < sum.size(); ++i, j += 2) {
sum[i] = GradientPairPrecise{array[j], array[j + 1]};
}
};
load("left_sum", &this->split.left_sum);
load("right_sum", &this->split.right_sum);
}
[[nodiscard]] bool IsValidImpl(TrainParam const& param, bst_node_t num_leaves) const {
if (split.loss_chg <= kRtEps) return false;
auto is_zero = [](auto const& sum) {

View File

@@ -401,7 +401,7 @@ struct SplitEntryContainer {
/*! \brief split index */
bst_feature_t sindex{0};
bst_float split_value{0.0f};
std::vector<uint32_t> cat_bits;
std::vector<std::uint32_t> cat_bits;
bool is_cat{false};
GradientT left_sum;

View File

@@ -14,9 +14,7 @@
#include "gpu_hist/histogram.cuh"
#include "param.h"
namespace xgboost {
namespace tree {
namespace xgboost::tree {
struct GPUTrainingParam {
// minimum amount of hessian(weight) allowed in a child
float min_child_weight;
@@ -136,5 +134,4 @@ struct SumCallbackOp {
return old_prefix;
}
};
} // namespace tree
} // namespace xgboost
} // namespace xgboost::tree