[breaking] Drop single precision histogram (#7892)

Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>
This commit is contained in:
Jiaming Yuan
2022-05-13 19:54:55 +08:00
committed by GitHub
parent c8f9d4b6e6
commit 1b6538b4e5
18 changed files with 171 additions and 407 deletions

View File

@@ -22,7 +22,8 @@
namespace xgboost {
namespace tree {
template <typename GradientSumT, typename ExpandEntry> class HistEvaluator {
template <typename ExpandEntry>
class HistEvaluator {
private:
struct NodeEntry {
/*! \brief statics for node entry */
@@ -57,7 +58,7 @@ template <typename GradientSumT, typename ExpandEntry> class HistEvaluator {
// a non-missing value for the particular feature fid.
template <int d_step, SplitType split_type>
GradStats EnumerateSplit(common::HistogramCuts const &cut, common::Span<size_t const> sorted_idx,
const common::GHistRow<GradientSumT> &hist, bst_feature_t fidx,
const common::GHistRow &hist, bst_feature_t fidx,
bst_node_t nidx,
TreeEvaluator::SplitEvaluator<TrainParam> const &evaluator,
SplitEntry *p_best) const {
@@ -197,10 +198,8 @@ template <typename GradientSumT, typename ExpandEntry> class HistEvaluator {
}
public:
void EvaluateSplits(const common::HistCollection<GradientSumT> &hist,
common::HistogramCuts const &cut,
common::Span<FeatureType const> feature_types,
const RegTree &tree,
void EvaluateSplits(const common::HistCollection &hist, common::HistogramCuts const &cut,
common::Span<FeatureType const> feature_types, const RegTree &tree,
std::vector<ExpandEntry> *p_entries) {
auto& entries = *p_entries;
// All nodes are on the same level, so we can store the shared ptr.
@@ -377,10 +376,10 @@ template <typename GradientSumT, typename ExpandEntry> class HistEvaluator {
*
* \param p_last_tree The last tree being updated by tree updater
*/
template <typename Partitioner, typename GradientSumT, typename ExpandEntry>
template <typename Partitioner, typename ExpandEntry>
void UpdatePredictionCacheImpl(GenericParameter const *ctx, RegTree const *p_last_tree,
std::vector<Partitioner> const &partitioner,
HistEvaluator<GradientSumT, ExpandEntry> const &hist_evaluator,
HistEvaluator<ExpandEntry> const &hist_evaluator,
TrainParam const &param, linalg::VectorView<float> out_preds) {
CHECK_GT(out_preds.Size(), 0U);

View File

@@ -16,17 +16,15 @@
namespace xgboost {
namespace tree {
template <typename GradientSumT, typename ExpandEntry> class HistogramBuilder {
using GradientPairT = xgboost::detail::GradientPairInternal<GradientSumT>;
using GHistRowT = common::GHistRow<GradientSumT>;
template <typename ExpandEntry>
class HistogramBuilder {
/*! \brief culmulative histogram of gradients. */
common::HistCollection<GradientSumT> hist_;
common::HistCollection hist_;
/*! \brief culmulative local parent histogram of gradients. */
common::HistCollection<GradientSumT> hist_local_worker_;
common::GHistBuilder<GradientSumT> builder_;
common::ParallelGHistBuilder<GradientSumT> buffer_;
rabit::Reducer<GradientPairT, GradientPairT::Reduce> reducer_;
common::HistCollection hist_local_worker_;
common::GHistBuilder builder_;
common::ParallelGHistBuilder buffer_;
rabit::Reducer<GradientPairPrecise, GradientPairPrecise::Reduce> reducer_;
BatchParam param_;
int32_t n_threads_{-1};
size_t n_batches_{0};
@@ -51,8 +49,10 @@ template <typename GradientSumT, typename ExpandEntry> class HistogramBuilder {
hist_.Init(total_bins);
hist_local_worker_.Init(total_bins);
buffer_.Init(total_bins);
builder_ = common::GHistBuilder<GradientSumT>(total_bins);
builder_ = common::GHistBuilder(total_bins);
is_distributed_ = is_distributed;
// Workaround s390x gcc 7.5.0
auto DMLC_ATTRIBUTE_UNUSED __force_instantiation = &GradientPairPrecise::Reduce;
}
template <bool any_missing>
@@ -64,7 +64,7 @@ template <typename GradientSumT, typename ExpandEntry> class HistogramBuilder {
const size_t n_nodes = nodes_for_explicit_hist_build.size();
CHECK_GT(n_nodes, 0);
std::vector<GHistRowT> target_hists(n_nodes);
std::vector<common::GHistRow> target_hists(n_nodes);
for (size_t i = 0; i < n_nodes; ++i) {
const int32_t nid = nodes_for_explicit_hist_build[i].nid;
target_hists[i] = hist_[nid];
@@ -243,9 +243,7 @@ template <typename GradientSumT, typename ExpandEntry> class HistogramBuilder {
public:
/* Getters for tests. */
common::HistCollection<GradientSumT> const& Histogram() {
return hist_;
}
common::HistCollection const &Histogram() { return hist_; }
auto& Buffer() { return buffer_; }
private:

View File

@@ -1,10 +0,0 @@
/*!
* Copyright 2022 XGBoost contributors
*/
#include "param.h"
namespace xgboost {
namespace tree {
DMLC_REGISTER_PARAMETER(CPUHistMakerTrainParam);
} // namespace tree
} // namespace xgboost

View File

@@ -1,23 +0,0 @@
/*!
* Copyright 2021 XGBoost contributors
*/
#ifndef XGBOOST_TREE_HIST_PARAM_H_
#define XGBOOST_TREE_HIST_PARAM_H_
#include "xgboost/parameter.h"
namespace xgboost {
namespace tree {
// training parameters specific to this algorithm
struct CPUHistMakerTrainParam
: public XGBoostParameter<CPUHistMakerTrainParam> {
bool single_precision_histogram;
// declare parameters
DMLC_DECLARE_PARAMETER(CPUHistMakerTrainParam) {
DMLC_DECLARE_FIELD(single_precision_histogram).set_default(false).describe(
"Use single precision to build histograms.");
}
};
} // namespace tree
} // namespace xgboost
#endif // XGBOOST_TREE_HIST_PARAM_H_

View File

@@ -15,7 +15,6 @@
#include "driver.h"
#include "hist/evaluate_splits.h"
#include "hist/histogram.h"
#include "hist/param.h"
#include "param.h"
#include "xgboost/base.h"
#include "xgboost/json.h"
@@ -38,13 +37,12 @@ auto BatchSpec(TrainParam const &p, common::Span<float> hess) {
}
} // anonymous namespace
template <typename GradientSumT>
class GloablApproxBuilder {
protected:
TrainParam param_;
std::shared_ptr<common::ColumnSampler> col_sampler_;
HistEvaluator<GradientSumT, CPUExpandEntry> evaluator_;
HistogramBuilder<GradientSumT, CPUExpandEntry> histogram_builder_;
HistEvaluator<CPUExpandEntry> evaluator_;
HistogramBuilder<CPUExpandEntry> histogram_builder_;
Context const *ctx_;
ObjInfo const task_;
@@ -166,7 +164,7 @@ class GloablApproxBuilder {
}
public:
explicit GloablApproxBuilder(TrainParam param, MetaInfo const &info, GenericParameter const *ctx,
explicit GloablApproxBuilder(TrainParam param, MetaInfo const &info, Context const *ctx,
std::shared_ptr<common::ColumnSampler> column_sampler, ObjInfo task,
common::Monitor *monitor)
: param_{std::move(param)},
@@ -256,10 +254,8 @@ class GloablApproxBuilder {
class GlobalApproxUpdater : public TreeUpdater {
TrainParam param_;
common::Monitor monitor_;
CPUHistMakerTrainParam hist_param_;
// specializations for different histogram precision.
std::unique_ptr<GloablApproxBuilder<float>> f32_impl_;
std::unique_ptr<GloablApproxBuilder<double>> f64_impl_;
std::unique_ptr<GloablApproxBuilder> pimpl_;
// pointer to the last DMatrix, used for update prediction cache.
DMatrix *cached_{nullptr};
std::shared_ptr<common::ColumnSampler> column_sampler_ =
@@ -272,19 +268,14 @@ class GlobalApproxUpdater : public TreeUpdater {
monitor_.Init(__func__);
}
void Configure(const Args &args) override {
param_.UpdateAllowUnknown(args);
hist_param_.UpdateAllowUnknown(args);
}
void Configure(const Args &args) override { param_.UpdateAllowUnknown(args); }
void LoadConfig(Json const &in) override {
auto const &config = get<Object const>(in);
FromJson(config.at("train_param"), &this->param_);
FromJson(config.at("hist_param"), &this->hist_param_);
}
void SaveConfig(Json *p_out) const override {
auto &out = *p_out;
out["train_param"] = ToJson(param_);
out["hist_param"] = ToJson(hist_param_);
}
void InitData(TrainParam const &param, HostDeviceVector<GradientPair> const *gpair,
@@ -316,13 +307,8 @@ class GlobalApproxUpdater : public TreeUpdater {
float lr = param_.learning_rate;
param_.learning_rate = lr / trees.size();
if (hist_param_.single_precision_histogram) {
f32_impl_ = std::make_unique<GloablApproxBuilder<float>>(param_, m->Info(), ctx_,
column_sampler_, task_, &monitor_);
} else {
f64_impl_ = std::make_unique<GloablApproxBuilder<double>>(param_, m->Info(), ctx_,
column_sampler_, task_, &monitor_);
}
pimpl_ = std::make_unique<GloablApproxBuilder>(param_, m->Info(), ctx_, column_sampler_, task_,
&monitor_);
std::vector<GradientPair> h_gpair;
InitData(param_, gpair, &h_gpair);
@@ -335,26 +321,17 @@ class GlobalApproxUpdater : public TreeUpdater {
size_t t_idx = 0;
for (auto p_tree : trees) {
if (hist_param_.single_precision_histogram) {
this->f32_impl_->UpdateTree(m, h_gpair, hess, p_tree, &out_position[t_idx]);
} else {
this->f64_impl_->UpdateTree(m, h_gpair, hess, p_tree, &out_position[t_idx]);
}
this->pimpl_->UpdateTree(m, h_gpair, hess, p_tree, &out_position[t_idx]);
++t_idx;
}
param_.learning_rate = lr;
}
bool UpdatePredictionCache(const DMatrix *data, linalg::VectorView<float> out_preds) override {
if (data != cached_ || (!this->f32_impl_ && !this->f64_impl_)) {
if (data != cached_ || !pimpl_) {
return false;
}
if (hist_param_.single_precision_histogram) {
this->f32_impl_->UpdatePredictionCache(data, out_preds);
} else {
this->f64_impl_->UpdatePredictionCache(data, out_preds);
}
this->pimpl_->UpdatePredictionCache(data, out_preds);
return true;
}

View File

@@ -16,7 +16,6 @@
#include "driver.h"
#include "hist/evaluate_splits.h"
#include "hist/expand_entry.h"
#include "hist/param.h"
#include "param.h"
#include "xgboost/generic_parameters.h"
#include "xgboost/json.h"

View File

@@ -32,7 +32,6 @@ DMLC_REGISTRY_FILE_TAG(updater_quantile_hist);
void QuantileHistMaker::Configure(const Args &args) {
param_.UpdateAllowUnknown(args);
hist_maker_param_.UpdateAllowUnknown(args);
}
void QuantileHistMaker::Update(HostDeviceVector<GradientPair> *gpair, DMatrix *dmat,
@@ -44,24 +43,14 @@ void QuantileHistMaker::Update(HostDeviceVector<GradientPair> *gpair, DMatrix *d
// build tree
const size_t n_trees = trees.size();
if (hist_maker_param_.single_precision_histogram) {
if (!float_builder_) {
float_builder_.reset(new Builder<float>(n_trees, param_, dmat, task_, ctx_));
}
} else {
if (!double_builder_) {
double_builder_.reset(new Builder<double>(n_trees, param_, dmat, task_, ctx_));
}
if (!pimpl_) {
pimpl_.reset(new Builder(n_trees, param_, dmat, task_, ctx_));
}
size_t t_idx{0};
for (auto p_tree : trees) {
auto &t_row_position = out_position[t_idx];
if (hist_maker_param_.single_precision_histogram) {
this->float_builder_->UpdateTree(gpair, dmat, p_tree, &t_row_position);
} else {
this->double_builder_->UpdateTree(gpair, dmat, p_tree, &t_row_position);
}
this->pimpl_->UpdateTree(gpair, dmat, p_tree, &t_row_position);
++t_idx;
}
@@ -70,17 +59,14 @@ void QuantileHistMaker::Update(HostDeviceVector<GradientPair> *gpair, DMatrix *d
bool QuantileHistMaker::UpdatePredictionCache(const DMatrix *data,
linalg::VectorView<float> out_preds) {
if (hist_maker_param_.single_precision_histogram && float_builder_) {
return float_builder_->UpdatePredictionCache(data, out_preds);
} else if (double_builder_) {
return double_builder_->UpdatePredictionCache(data, out_preds);
if (pimpl_) {
return pimpl_->UpdatePredictionCache(data, out_preds);
} else {
return false;
}
}
template <typename GradientSumT>
CPUExpandEntry QuantileHistMaker::Builder<GradientSumT>::InitRoot(
CPUExpandEntry QuantileHistMaker::Builder::InitRoot(
DMatrix *p_fmat, RegTree *p_tree, const std::vector<GradientPair> &gpair_h) {
CPUExpandEntry node(RegTree::kRoot, p_tree->GetDepth(0), 0.0f);
@@ -96,7 +82,7 @@ CPUExpandEntry QuantileHistMaker::Builder<GradientSumT>::InitRoot(
}
{
GradientPairT grad_stat;
GradientPairPrecise grad_stat;
if (p_fmat->IsDense()) {
/**
* Specialized code for dense data: For dense data (with no missing value), the sum
@@ -110,15 +96,14 @@ CPUExpandEntry QuantileHistMaker::Builder<GradientSumT>::InitRoot(
auto hist = this->histogram_builder_->Histogram()[RegTree::kRoot];
auto begin = hist.data();
for (uint32_t i = ibegin; i < iend; ++i) {
GradientPairT const &et = begin[i];
GradientPairPrecise const &et = begin[i];
grad_stat.Add(et.GetGrad(), et.GetHess());
}
} else {
for (auto const &grad : gpair_h) {
grad_stat.Add(grad.GetGrad(), grad.GetHess());
}
rabit::Allreduce<rabit::op::Sum, GradientSumT>(reinterpret_cast<GradientSumT *>(&grad_stat),
2);
rabit::Allreduce<rabit::op::Sum, double>(reinterpret_cast<double *>(&grad_stat), 2);
}
auto weight = evaluator_->InitRoot(GradStats{grad_stat});
@@ -140,10 +125,9 @@ CPUExpandEntry QuantileHistMaker::Builder<GradientSumT>::InitRoot(
return node;
}
template <typename GradientSumT>
void QuantileHistMaker::Builder<GradientSumT>::BuildHistogram(
DMatrix *p_fmat, RegTree *p_tree, std::vector<CPUExpandEntry> const &valid_candidates,
std::vector<GradientPair> const &gpair) {
void QuantileHistMaker::Builder::BuildHistogram(DMatrix *p_fmat, RegTree *p_tree,
std::vector<CPUExpandEntry> const &valid_candidates,
std::vector<GradientPair> const &gpair) {
std::vector<CPUExpandEntry> nodes_to_build(valid_candidates.size());
std::vector<CPUExpandEntry> nodes_to_sub(valid_candidates.size());
@@ -173,10 +157,9 @@ void QuantileHistMaker::Builder<GradientSumT>::BuildHistogram(
}
}
template <typename GradientSumT>
void QuantileHistMaker::Builder<GradientSumT>::LeafPartition(
RegTree const &tree, common::Span<GradientPair const> gpair,
std::vector<bst_node_t> *p_out_position) {
void QuantileHistMaker::Builder::LeafPartition(RegTree const &tree,
common::Span<GradientPair const> gpair,
std::vector<bst_node_t> *p_out_position) {
monitor_->Start(__func__);
if (!task_.UpdateTreeLeaf()) {
return;
@@ -187,10 +170,9 @@ void QuantileHistMaker::Builder<GradientSumT>::LeafPartition(
monitor_->Stop(__func__);
}
template <typename GradientSumT>
void QuantileHistMaker::Builder<GradientSumT>::ExpandTree(
DMatrix *p_fmat, RegTree *p_tree, const std::vector<GradientPair> &gpair_h,
HostDeviceVector<bst_node_t> *p_out_position) {
void QuantileHistMaker::Builder::ExpandTree(DMatrix *p_fmat, RegTree *p_tree,
const std::vector<GradientPair> &gpair_h,
HostDeviceVector<bst_node_t> *p_out_position) {
monitor_->Start(__func__);
Driver<CPUExpandEntry> driver(static_cast<TrainParam::TreeGrowPolicy>(param_.grow_policy));
@@ -252,10 +234,9 @@ void QuantileHistMaker::Builder<GradientSumT>::ExpandTree(
monitor_->Stop(__func__);
}
template <typename GradientSumT>
void QuantileHistMaker::Builder<GradientSumT>::UpdateTree(
HostDeviceVector<GradientPair> *gpair, DMatrix *p_fmat, RegTree *p_tree,
HostDeviceVector<bst_node_t> *p_out_position) {
void QuantileHistMaker::Builder::UpdateTree(HostDeviceVector<GradientPair> *gpair, DMatrix *p_fmat,
RegTree *p_tree,
HostDeviceVector<bst_node_t> *p_out_position) {
monitor_->Start(__func__);
std::vector<GradientPair> *gpair_ptr = &(gpair->HostVector());
@@ -272,9 +253,8 @@ void QuantileHistMaker::Builder<GradientSumT>::UpdateTree(
monitor_->Stop(__func__);
}
template <typename GradientSumT>
bool QuantileHistMaker::Builder<GradientSumT>::UpdatePredictionCache(
DMatrix const *data, linalg::VectorView<float> out_preds) const {
bool QuantileHistMaker::Builder::UpdatePredictionCache(DMatrix const *data,
linalg::VectorView<float> out_preds) const {
// p_last_fmat_ is a valid pointer as long as UpdatePredictionCache() is called in
// conjunction with Update().
if (!p_last_fmat_ || !p_last_tree_ || data != p_last_fmat_) {
@@ -287,9 +267,8 @@ bool QuantileHistMaker::Builder<GradientSumT>::UpdatePredictionCache(
return true;
}
template <typename GradientSumT>
void QuantileHistMaker::Builder<GradientSumT>::InitSampling(const DMatrix &fmat,
std::vector<GradientPair> *gpair) {
void QuantileHistMaker::Builder::InitSampling(const DMatrix &fmat,
std::vector<GradientPair> *gpair) {
monitor_->Start(__func__);
const auto &info = fmat.Info();
auto& rnd = common::GlobalRandom();
@@ -325,14 +304,10 @@ void QuantileHistMaker::Builder<GradientSumT>::InitSampling(const DMatrix &fmat,
#endif // XGBOOST_CUSTOMIZE_GLOBAL_PRNG
monitor_->Stop(__func__);
}
template<typename GradientSumT>
size_t QuantileHistMaker::Builder<GradientSumT>::GetNumberOfTrees() {
return n_trees_;
}
size_t QuantileHistMaker::Builder::GetNumberOfTrees() { return n_trees_; }
template <typename GradientSumT>
void QuantileHistMaker::Builder<GradientSumT>::InitData(DMatrix *fmat, const RegTree &tree,
std::vector<GradientPair> *gpair) {
void QuantileHistMaker::Builder::InitData(DMatrix *fmat, const RegTree &tree,
std::vector<GradientPair> *gpair) {
monitor_->Start(__func__);
const auto& info = fmat->Info();
@@ -362,8 +337,8 @@ void QuantileHistMaker::Builder<GradientSumT>::InitData(DMatrix *fmat, const Reg
// store a pointer to the tree
p_last_tree_ = &tree;
evaluator_.reset(new HistEvaluator<GradientSumT, CPUExpandEntry>{
param_, info, this->ctx_->Threads(), column_sampler_});
evaluator_.reset(
new HistEvaluator<CPUExpandEntry>{param_, info, this->ctx_->Threads(), column_sampler_});
monitor_->Stop(__func__);
}
@@ -406,9 +381,6 @@ void HistRowPartitioner::AddSplitsToRowSet(const std::vector<CPUExpandEntry> &no
}
}
template struct QuantileHistMaker::Builder<float>;
template struct QuantileHistMaker::Builder<double>;
XGBOOST_REGISTER_TREE_UPDATER(QuantileHistMaker, "grow_quantile_histmaker")
.describe("Grow tree using quantized histogram.")
.set_body([](GenericParameter const *ctx, ObjInfo task) {

View File

@@ -24,7 +24,6 @@
#include "hist/evaluate_splits.h"
#include "hist/histogram.h"
#include "hist/expand_entry.h"
#include "hist/param.h"
#include "constraints.h"
#include "./param.h"
@@ -236,7 +235,7 @@ inline BatchParam HistBatch(TrainParam const& param) {
class QuantileHistMaker: public TreeUpdater {
public:
explicit QuantileHistMaker(GenericParameter const* ctx, ObjInfo task)
: task_{task}, TreeUpdater(ctx) {}
: TreeUpdater(ctx), task_{task} {}
void Configure(const Args& args) override;
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
@@ -249,12 +248,10 @@ class QuantileHistMaker: public TreeUpdater {
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("train_param"), &this->param_);
FromJson(config.at("cpu_hist_train_param"), &this->hist_maker_param_);
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["train_param"] = ToJson(param_);
out["cpu_hist_train_param"] = ToJson(hist_maker_param_);
}
char const* Name() const override {
@@ -264,22 +261,19 @@ class QuantileHistMaker: public TreeUpdater {
bool HasNodePosition() const override { return true; }
protected:
CPUHistMakerTrainParam hist_maker_param_;
// training parameter
TrainParam param_;
// actual builder that runs the algorithm
template<typename GradientSumT>
struct Builder {
public:
using GradientPairT = xgboost::detail::GradientPairInternal<GradientSumT>;
// constructor
explicit Builder(const size_t n_trees, const TrainParam& param, DMatrix const* fmat,
ObjInfo task, GenericParameter const* ctx)
: n_trees_(n_trees),
param_(param),
p_last_fmat_(fmat),
histogram_builder_{new HistogramBuilder<GradientSumT, CPUExpandEntry>},
histogram_builder_{new HistogramBuilder<CPUExpandEntry>},
task_{task},
ctx_{ctx},
monitor_{std::make_unique<common::Monitor>()} {
@@ -320,14 +314,14 @@ class QuantileHistMaker: public TreeUpdater {
std::vector<GradientPair> gpair_local_;
std::unique_ptr<HistEvaluator<GradientSumT, CPUExpandEntry>> evaluator_;
std::unique_ptr<HistEvaluator<CPUExpandEntry>> evaluator_;
std::vector<HistRowPartitioner> partitioner_;
// back pointers to tree and data matrix
const RegTree* p_last_tree_{nullptr};
DMatrix const* const p_last_fmat_;
std::unique_ptr<HistogramBuilder<GradientSumT, CPUExpandEntry>> histogram_builder_;
std::unique_ptr<HistogramBuilder<CPUExpandEntry>> histogram_builder_;
ObjInfo task_;
// Context for number of threads
GenericParameter const* ctx_;
@@ -336,8 +330,7 @@ class QuantileHistMaker: public TreeUpdater {
};
protected:
std::unique_ptr<Builder<float>> float_builder_;
std::unique_ptr<Builder<double>> double_builder_;
std::unique_ptr<Builder> pimpl_;
ObjInfo task_;
};
} // namespace tree