[breaking] Drop single precision histogram (#7892)

Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>
This commit is contained in:
Jiaming Yuan
2022-05-13 19:54:55 +08:00
committed by GitHub
parent c8f9d4b6e6
commit 1b6538b4e5
18 changed files with 171 additions and 407 deletions

View File

@@ -22,7 +22,8 @@
namespace xgboost {
namespace tree {
template <typename GradientSumT, typename ExpandEntry> class HistEvaluator {
template <typename ExpandEntry>
class HistEvaluator {
private:
struct NodeEntry {
/*! \brief statics for node entry */
@@ -57,7 +58,7 @@ template <typename GradientSumT, typename ExpandEntry> class HistEvaluator {
// a non-missing value for the particular feature fid.
template <int d_step, SplitType split_type>
GradStats EnumerateSplit(common::HistogramCuts const &cut, common::Span<size_t const> sorted_idx,
const common::GHistRow<GradientSumT> &hist, bst_feature_t fidx,
const common::GHistRow &hist, bst_feature_t fidx,
bst_node_t nidx,
TreeEvaluator::SplitEvaluator<TrainParam> const &evaluator,
SplitEntry *p_best) const {
@@ -197,10 +198,8 @@ template <typename GradientSumT, typename ExpandEntry> class HistEvaluator {
}
public:
void EvaluateSplits(const common::HistCollection<GradientSumT> &hist,
common::HistogramCuts const &cut,
common::Span<FeatureType const> feature_types,
const RegTree &tree,
void EvaluateSplits(const common::HistCollection &hist, common::HistogramCuts const &cut,
common::Span<FeatureType const> feature_types, const RegTree &tree,
std::vector<ExpandEntry> *p_entries) {
auto& entries = *p_entries;
// All nodes are on the same level, so we can store the shared ptr.
@@ -377,10 +376,10 @@ template <typename GradientSumT, typename ExpandEntry> class HistEvaluator {
*
* \param p_last_tree The last tree being updated by tree updater
*/
template <typename Partitioner, typename GradientSumT, typename ExpandEntry>
template <typename Partitioner, typename ExpandEntry>
void UpdatePredictionCacheImpl(GenericParameter const *ctx, RegTree const *p_last_tree,
std::vector<Partitioner> const &partitioner,
HistEvaluator<GradientSumT, ExpandEntry> const &hist_evaluator,
HistEvaluator<ExpandEntry> const &hist_evaluator,
TrainParam const &param, linalg::VectorView<float> out_preds) {
CHECK_GT(out_preds.Size(), 0U);

View File

@@ -16,17 +16,15 @@
namespace xgboost {
namespace tree {
template <typename GradientSumT, typename ExpandEntry> class HistogramBuilder {
using GradientPairT = xgboost::detail::GradientPairInternal<GradientSumT>;
using GHistRowT = common::GHistRow<GradientSumT>;
template <typename ExpandEntry>
class HistogramBuilder {
/*! \brief culmulative histogram of gradients. */
common::HistCollection<GradientSumT> hist_;
common::HistCollection hist_;
/*! \brief culmulative local parent histogram of gradients. */
common::HistCollection<GradientSumT> hist_local_worker_;
common::GHistBuilder<GradientSumT> builder_;
common::ParallelGHistBuilder<GradientSumT> buffer_;
rabit::Reducer<GradientPairT, GradientPairT::Reduce> reducer_;
common::HistCollection hist_local_worker_;
common::GHistBuilder builder_;
common::ParallelGHistBuilder buffer_;
rabit::Reducer<GradientPairPrecise, GradientPairPrecise::Reduce> reducer_;
BatchParam param_;
int32_t n_threads_{-1};
size_t n_batches_{0};
@@ -51,8 +49,10 @@ template <typename GradientSumT, typename ExpandEntry> class HistogramBuilder {
hist_.Init(total_bins);
hist_local_worker_.Init(total_bins);
buffer_.Init(total_bins);
builder_ = common::GHistBuilder<GradientSumT>(total_bins);
builder_ = common::GHistBuilder(total_bins);
is_distributed_ = is_distributed;
// Workaround s390x gcc 7.5.0
auto DMLC_ATTRIBUTE_UNUSED __force_instantiation = &GradientPairPrecise::Reduce;
}
template <bool any_missing>
@@ -64,7 +64,7 @@ template <typename GradientSumT, typename ExpandEntry> class HistogramBuilder {
const size_t n_nodes = nodes_for_explicit_hist_build.size();
CHECK_GT(n_nodes, 0);
std::vector<GHistRowT> target_hists(n_nodes);
std::vector<common::GHistRow> target_hists(n_nodes);
for (size_t i = 0; i < n_nodes; ++i) {
const int32_t nid = nodes_for_explicit_hist_build[i].nid;
target_hists[i] = hist_[nid];
@@ -243,9 +243,7 @@ template <typename GradientSumT, typename ExpandEntry> class HistogramBuilder {
public:
/* Getters for tests. */
common::HistCollection<GradientSumT> const& Histogram() {
return hist_;
}
common::HistCollection const &Histogram() { return hist_; }
auto& Buffer() { return buffer_; }
private:

View File

@@ -1,10 +0,0 @@
/*!
* Copyright 2022 XGBoost contributors
*/
#include "param.h"
namespace xgboost {
namespace tree {
DMLC_REGISTER_PARAMETER(CPUHistMakerTrainParam);
} // namespace tree
} // namespace xgboost

View File

@@ -1,23 +0,0 @@
/*!
* Copyright 2021 XGBoost contributors
*/
#ifndef XGBOOST_TREE_HIST_PARAM_H_
#define XGBOOST_TREE_HIST_PARAM_H_
#include "xgboost/parameter.h"
namespace xgboost {
namespace tree {
// training parameters specific to this algorithm
struct CPUHistMakerTrainParam
: public XGBoostParameter<CPUHistMakerTrainParam> {
bool single_precision_histogram;
// declare parameters
DMLC_DECLARE_PARAMETER(CPUHistMakerTrainParam) {
DMLC_DECLARE_FIELD(single_precision_histogram).set_default(false).describe(
"Use single precision to build histograms.");
}
};
} // namespace tree
} // namespace xgboost
#endif // XGBOOST_TREE_HIST_PARAM_H_