Fix compiler warnings. (#7974)

- Remove unused parameters. There are still many warnings that are not yet
addressed. Currently, the warnings in dmlc-core dominate the error log.
- Remove `distributed` parameter from metric.
- Fixes some warnings about signed comparison.
This commit is contained in:
Jiaming Yuan
2022-06-06 22:56:25 +08:00
committed by GitHub
parent d48123d23b
commit 1a33b50a0d
46 changed files with 149 additions and 189 deletions

View File

@@ -171,14 +171,14 @@ inline HistogramCuts SketchOnDMatrix(DMatrix* m, int32_t max_bins, int32_t n_thr
if (!use_sorted) {
HostSketchContainer container(max_bins, m->Info(), reduced, HostSketchContainer::UseGroup(info),
hessian, n_threads);
n_threads);
for (auto const& page : m->GetBatches<SparsePage>()) {
container.PushRowPage(page, info, hessian);
}
container.MakeCuts(&out);
} else {
SortedSketchContainer container{
max_bins, m->Info(), reduced, HostSketchContainer::UseGroup(info), hessian, n_threads};
SortedSketchContainer container{max_bins, m->Info(), reduced,
HostSketchContainer::UseGroup(info), n_threads};
for (auto const& page : m->GetBatches<SortedCSCPage>()) {
container.PushColPage(page, info, hessian);
}

View File

@@ -168,8 +168,8 @@ class PartitionBuilder {
const size_t n_left = child_nodes_sizes.first;
const size_t n_right = child_nodes_sizes.second;
SetNLeftElems(node_in_set, range.begin(), range.end(), n_left);
SetNRightElems(node_in_set, range.begin(), range.end(), n_right);
SetNLeftElems(node_in_set, range.begin(), n_left);
SetNRightElems(node_in_set, range.begin(), n_right);
}
/**
@@ -188,8 +188,7 @@ class PartitionBuilder {
*/
template <typename Pred>
void PartitionRange(const size_t node_in_set, const size_t nid, common::Range1d range,
bst_feature_t fidx, common::RowSetCollection* p_row_set_collection,
Pred pred) {
common::RowSetCollection* p_row_set_collection, Pred pred) {
auto& row_set_collection = *p_row_set_collection;
const size_t* p_ridx = row_set_collection[nid].begin;
common::Span<const size_t> ridx(p_ridx + range.begin(), p_ridx + range.end());
@@ -200,8 +199,8 @@ class PartitionBuilder {
const size_t n_left = child_nodes_sizes.first;
const size_t n_right = child_nodes_sizes.second;
this->SetNLeftElems(node_in_set, range.begin(), range.end(), n_left);
this->SetNRightElems(node_in_set, range.begin(), range.end(), n_right);
this->SetNLeftElems(node_in_set, range.begin(), n_left);
this->SetNRightElems(node_in_set, range.begin(), n_right);
}
// allocate thread local memory, should be called for each specific task
@@ -223,12 +222,12 @@ class PartitionBuilder {
return { mem_blocks_.at(task_idx)->Right(), end - begin };
}
void SetNLeftElems(int nid, size_t begin, size_t end, size_t n_left) {
void SetNLeftElems(int nid, size_t begin, size_t n_left) {
size_t task_idx = GetTaskIdx(nid, begin);
mem_blocks_.at(task_idx)->n_left = n_left;
}
void SetNRightElems(int nid, size_t begin, size_t end, size_t n_right) {
void SetNRightElems(int nid, size_t begin, size_t n_right) {
size_t task_idx = GetTaskIdx(nid, begin);
mem_blocks_.at(task_idx)->n_right = n_right;
}

View File

@@ -543,7 +543,7 @@ template class SketchContainerImpl<WXQuantileSketch<float, float>>;
HostSketchContainer::HostSketchContainer(int32_t max_bins, MetaInfo const &info,
std::vector<size_t> columns_size, bool use_group,
Span<float const> hessian, int32_t n_threads)
int32_t n_threads)
: SketchContainerImpl{columns_size, max_bins, info.feature_types.ConstHostSpan(), use_group,
n_threads} {
monitor_.Init(__func__);

View File

@@ -774,7 +774,7 @@ class HostSketchContainer : public SketchContainerImpl<WQuantileSketch<float, fl
public:
HostSketchContainer(int32_t max_bins, MetaInfo const &info, std::vector<size_t> columns_size,
bool use_group, Span<float const> hessian, int32_t n_threads);
bool use_group, int32_t n_threads);
};
/**
@@ -868,7 +868,7 @@ class SortedSketchContainer : public SketchContainerImpl<WXQuantileSketch<float,
public:
explicit SortedSketchContainer(int32_t max_bins, MetaInfo const &info,
std::vector<size_t> columns_size, bool use_group,
Span<float const> hessian, int32_t n_threads)
int32_t n_threads)
: SketchContainerImpl{columns_size, max_bins, info.feature_types.ConstHostSpan(), use_group,
n_threads} {
monitor_.Init(__func__);

View File

@@ -163,8 +163,7 @@ void IterativeDeviceDMatrix::Initialize(DataIterHandle iter_handle, float missin
BatchSet<EllpackPage> IterativeDeviceDMatrix::GetEllpackBatches(const BatchParam& param) {
CHECK(page_);
auto begin_iter =
BatchIterator<EllpackPage>(new SimpleBatchIteratorImpl<EllpackPage>(page_));
auto begin_iter = BatchIterator<EllpackPage>(new SimpleBatchIteratorImpl<EllpackPage>(page_));
return BatchSet<EllpackPage>(begin_iter);
}
} // namespace data

View File

@@ -45,8 +45,8 @@ class IterativeDeviceDMatrix : public DMatrix {
bool EllpackExists() const override { return true; }
bool SparsePageExists() const override { return false; }
DMatrix *Slice(common::Span<int32_t const> ridxs) override {
LOG(FATAL) << "Slicing DMatrix is not supported for Device DMatrix.";
DMatrix *Slice(common::Span<int32_t const>) override {
LOG(FATAL) << "Slicing DMatrix is not supported for Quantile DMatrix.";
return nullptr;
}
BatchSet<SparsePage> GetRowBatches() override {

View File

@@ -84,7 +84,7 @@ class DMatrixProxy : public DMatrix {
bool SingleColBlock() const override { return true; }
bool EllpackExists() const override { return true; }
bool SparsePageExists() const override { return false; }
DMatrix *Slice(common::Span<int32_t const> ridxs) override {
DMatrix* Slice(common::Span<int32_t const> /*ridxs*/) override {
LOG(FATAL) << "Slicing DMatrix is not supported for Proxy DMatrix.";
return nullptr;
}
@@ -100,7 +100,7 @@ class DMatrixProxy : public DMatrix {
LOG(FATAL) << "Not implemented.";
return BatchSet<SortedCSCPage>(BatchIterator<SortedCSCPage>(nullptr));
}
BatchSet<EllpackPage> GetEllpackBatches(const BatchParam& param) override {
BatchSet<EllpackPage> GetEllpackBatches(const BatchParam&) override {
LOG(FATAL) << "Not implemented.";
return BatchSet<EllpackPage>(BatchIterator<EllpackPage>(nullptr));
}

View File

@@ -218,7 +218,7 @@ void CopyGradient(HostDeviceVector<GradientPair> const* in_gpair, int32_t n_thre
}
void GBTree::UpdateTreeLeaf(DMatrix const* p_fmat, HostDeviceVector<float> const& predictions,
ObjFunction const* obj, size_t gidx,
ObjFunction const* obj,
std::vector<std::unique_ptr<RegTree>>* p_trees) {
CHECK(!updaters_.empty());
if (!updaters_.back()->HasNodePosition()) {
@@ -257,7 +257,7 @@ void GBTree::DoBoost(DMatrix* p_fmat, HostDeviceVector<GradientPair>* in_gpair,
if (ngroup == 1) {
std::vector<std::unique_ptr<RegTree>> ret;
BoostNewTrees(in_gpair, p_fmat, 0, &ret);
UpdateTreeLeaf(p_fmat, predt->predictions, obj, 0, &ret);
UpdateTreeLeaf(p_fmat, predt->predictions, obj, &ret);
const size_t num_new_trees = ret.size();
new_trees.push_back(std::move(ret));
auto v_predt = out.Slice(linalg::All(), 0);
@@ -274,7 +274,7 @@ void GBTree::DoBoost(DMatrix* p_fmat, HostDeviceVector<GradientPair>* in_gpair,
CopyGradient(in_gpair, ctx_->Threads(), ngroup, gid, &tmp);
std::vector<std::unique_ptr<RegTree>> ret;
BoostNewTrees(&tmp, p_fmat, gid, &ret);
UpdateTreeLeaf(p_fmat, predt->predictions, obj, gid, &ret);
UpdateTreeLeaf(p_fmat, predt->predictions, obj, &ret);
const size_t num_new_trees = ret.size();
new_trees.push_back(std::move(ret));
auto v_predt = out.Slice(linalg::All(), gid);
@@ -289,7 +289,7 @@ void GBTree::DoBoost(DMatrix* p_fmat, HostDeviceVector<GradientPair>* in_gpair,
}
monitor_.Stop("BoostNewTrees");
this->CommitModel(std::move(new_trees), p_fmat, predt);
this->CommitModel(std::move(new_trees));
}
void GBTree::InitUpdater(Args const& cfg) {
@@ -378,9 +378,7 @@ void GBTree::BoostNewTrees(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fma
}
}
void GBTree::CommitModel(std::vector<std::vector<std::unique_ptr<RegTree>>>&& new_trees,
DMatrix* m,
PredictionCacheEntry* predts) {
void GBTree::CommitModel(std::vector<std::vector<std::unique_ptr<RegTree>>>&& new_trees) {
monitor_.Start("CommitModel");
for (uint32_t gid = 0; gid < model_.learner_model_param->num_output_group; ++gid) {
model_.CommitModel(std::move(new_trees[gid]), gid);
@@ -490,15 +488,14 @@ void GBTree::Slice(int32_t layer_begin, int32_t layer_end, int32_t step,
"want to update a portion of trees.";
}
*out_of_bound = detail::SliceTrees(
layer_begin, layer_end, step, this->model_, tparam_, layer_trees,
[&](auto const &in_it, auto const &out_it) {
auto new_tree =
std::make_unique<RegTree>(*this->model_.trees.at(in_it));
bst_group_t group = this->model_.tree_info[in_it];
out_trees.at(out_it) = std::move(new_tree);
out_trees_info.at(out_it) = group;
});
*out_of_bound = detail::SliceTrees(layer_begin, layer_end, step, this->model_, layer_trees,
[&](auto const& in_it, auto const& out_it) {
auto new_tree =
std::make_unique<RegTree>(*this->model_.trees.at(in_it));
bst_group_t group = this->model_.tree_info[in_it];
out_trees.at(out_it) = std::move(new_tree);
out_trees_info.at(out_it) = group;
});
}
void GBTree::PredictBatch(DMatrix* p_fmat,
@@ -674,11 +671,10 @@ class Dart : public GBTree {
auto p_dart = dynamic_cast<Dart*>(out);
CHECK(p_dart);
CHECK(p_dart->weight_drop_.empty());
detail::SliceTrees(
layer_begin, layer_end, step, model_, tparam_, this->LayerTrees(),
[&](auto const& in_it, auto const&) {
p_dart->weight_drop_.push_back(this->weight_drop_.at(in_it));
});
detail::SliceTrees(layer_begin, layer_end, step, model_, this->LayerTrees(),
[&](auto const& in_it, auto const&) {
p_dart->weight_drop_.push_back(this->weight_drop_.at(in_it));
});
}
void SaveModel(Json *p_out) const override {
@@ -901,9 +897,7 @@ class Dart : public GBTree {
protected:
// commit new trees all at once
void
CommitModel(std::vector<std::vector<std::unique_ptr<RegTree>>>&& new_trees,
DMatrix*, PredictionCacheEntry*) override {
void CommitModel(std::vector<std::vector<std::unique_ptr<RegTree>>>&& new_trees) override {
int num_new_trees = 0;
for (uint32_t gid = 0; gid < model_.learner_model_param->num_output_group; ++gid) {
num_new_trees += new_trees[gid].size();

View File

@@ -162,9 +162,8 @@ inline std::pair<uint32_t, uint32_t> LayerToTree(gbm::GBTreeModel const &model,
// Call fn for each pair of input output tree. Return true if index is out of bound.
template <typename Func>
inline bool SliceTrees(int32_t layer_begin, int32_t layer_end, int32_t step,
GBTreeModel const &model, GBTreeTrainParam const &tparam,
uint32_t layer_trees, Func fn) {
bool SliceTrees(int32_t layer_begin, int32_t layer_end, int32_t step, GBTreeModel const& model,
uint32_t layer_trees, Func fn) {
uint32_t tree_begin, tree_end;
std::tie(tree_begin, tree_end) = detail::LayerToTree(model, layer_begin, layer_end);
if (tree_end > model.trees.size()) {
@@ -206,8 +205,7 @@ class GBTree : public GradientBooster {
* \brief Optionally update the leaf value.
*/
void UpdateTreeLeaf(DMatrix const* p_fmat, HostDeviceVector<float> const& predictions,
ObjFunction const* obj, size_t gidx,
std::vector<std::unique_ptr<RegTree>>* p_trees);
ObjFunction const* obj, std::vector<std::unique_ptr<RegTree>>* p_trees);
/*! \brief Carry out one iteration of boosting */
void DoBoost(DMatrix* p_fmat, HostDeviceVector<GradientPair>* in_gpair,
@@ -325,7 +323,7 @@ class GBTree : public GradientBooster {
};
if (importance_type == "weight") {
add_score([&](auto const &p_tree, bst_node_t, bst_feature_t split) {
add_score([&](auto const&, bst_node_t, bst_feature_t split) {
gain_map[split] = split_counts[split];
});
} else if (importance_type == "gain" || importance_type == "total_gain") {
@@ -423,9 +421,7 @@ class GBTree : public GradientBooster {
DMatrix* f_dmat = nullptr) const;
// commit new trees all at once
virtual void CommitModel(std::vector<std::vector<std::unique_ptr<RegTree>>>&& new_trees,
DMatrix* m,
PredictionCacheEntry* predts);
virtual void CommitModel(std::vector<std::vector<std::unique_ptr<RegTree>>>&& new_trees);
// --- data structure ---
GBTreeModel model_;

View File

@@ -1234,8 +1234,7 @@ class LearnerImpl : public LearnerIO {
obj_->EvalTransform(&out);
for (auto& ev : metrics_) {
os << '\t' << data_names[i] << '-' << ev->Name() << ':'
<< ev->Eval(out, m->Info(), tparam_.dsplit == DataSplitMode::kRow);
os << '\t' << data_names[i] << '-' << ev->Name() << ':' << ev->Eval(out, m->Info());
}
}

View File

@@ -254,8 +254,7 @@ std::pair<double, uint32_t> RankingAUC(std::vector<float> const &predts,
template <typename Curve>
class EvalAUC : public Metric {
double Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info,
bool distributed) override {
double Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info) override {
double auc {0};
if (tparam_->gpu_id != GenericParameter::kCpuId) {
preds.SetDevice(tparam_->gpu_id);

View File

@@ -312,10 +312,8 @@ void SegmentedReduceAUC(common::Span<size_t const> d_unique_idx,
* up each class in all kernels.
*/
template <bool scale, typename Fn>
double GPUMultiClassAUCOVR(common::Span<float const> predts,
MetaInfo const &info, int32_t device,
common::Span<uint32_t> d_class_ptr, size_t n_classes,
std::shared_ptr<DeviceAUCCache> cache, Fn area_fn) {
double GPUMultiClassAUCOVR(MetaInfo const &info, int32_t device, common::Span<uint32_t> d_class_ptr,
size_t n_classes, std::shared_ptr<DeviceAUCCache> cache, Fn area_fn) {
dh::safe_cuda(cudaSetDevice(device));
/**
* Sorted idx
@@ -478,8 +476,7 @@ double GPUMultiClassROCAUC(common::Span<float const> predts,
double tp, size_t /*class_id*/) {
return TrapezoidArea(fp_prev, fp, tp_prev, tp);
};
return GPUMultiClassAUCOVR<true>(predts, info, device, dh::ToSpan(class_ptr),
n_classes, cache, fn);
return GPUMultiClassAUCOVR<true>(info, device, dh::ToSpan(class_ptr), n_classes, cache, fn);
}
namespace {
@@ -704,8 +701,7 @@ double GPUMultiClassPRAUC(common::Span<float const> predts,
return detail::CalcDeltaPRAUC(fp_prev, fp, tp_prev, tp,
d_totals[class_id].first);
};
return GPUMultiClassAUCOVR<false>(predts, info, device, d_class_ptr,
n_classes, cache, fn);
return GPUMultiClassAUCOVR<false>(info, device, d_class_ptr, n_classes, cache, fn);
}
template <typename Fn>

View File

@@ -178,8 +178,7 @@ class PseudoErrorLoss : public Metric {
out["pseudo_huber_param"] = ToJson(param_);
}
double Eval(const HostDeviceVector<bst_float>& preds, const MetaInfo& info,
bool distributed) override {
double Eval(const HostDeviceVector<bst_float>& preds, const MetaInfo& info) override {
CHECK_EQ(info.labels.Shape(0), info.num_row_);
auto labels = info.labels.View(tparam_->gpu_id);
preds.SetDevice(tparam_->gpu_id);
@@ -197,7 +196,7 @@ class PseudoErrorLoss : public Metric {
return std::make_tuple(v, wt);
});
double dat[2]{result.Residue(), result.Weights()};
if (distributed) {
if (rabit::IsDistributed()) {
rabit::Allreduce<rabit::op::Sum>(dat, 2);
}
return EvalRowMAPE::GetFinal(dat[0], dat[1]);
@@ -342,8 +341,7 @@ struct EvalEWiseBase : public Metric {
EvalEWiseBase() = default;
explicit EvalEWiseBase(char const* policy_param) : policy_{policy_param} {}
double Eval(HostDeviceVector<bst_float> const& preds, const MetaInfo& info,
bool distributed) override {
double Eval(HostDeviceVector<bst_float> const& preds, const MetaInfo& info) override {
CHECK_EQ(preds.Size(), info.labels.Size())
<< "label and prediction size not match, "
<< "hint: use merror or mlogloss for multi-class classification";
@@ -367,10 +365,7 @@ struct EvalEWiseBase : public Metric {
});
double dat[2]{result.Residue(), result.Weights()};
if (distributed) {
rabit::Allreduce<rabit::op::Sum>(dat, 2);
}
rabit::Allreduce<rabit::op::Sum>(dat, 2);
return Policy::GetFinal(dat[0], dat[1]);
}

View File

@@ -167,8 +167,7 @@ class MultiClassMetricsReduction {
*/
template<typename Derived>
struct EvalMClassBase : public Metric {
double Eval(const HostDeviceVector<float> &preds, const MetaInfo &info,
bool distributed) override {
double Eval(const HostDeviceVector<float> &preds, const MetaInfo &info) override {
if (info.labels.Size() == 0) {
CHECK_EQ(preds.Size(), 0);
} else {
@@ -186,9 +185,7 @@ struct EvalMClassBase : public Metric {
dat[0] = result.Residue();
dat[1] = result.Weights();
}
if (distributed) {
rabit::Allreduce<rabit::op::Sum>(dat, 2);
}
rabit::Allreduce<rabit::op::Sum>(dat, 2);
return Derived::GetFinal(dat[0], dat[1]);
}
/*!

View File

@@ -102,9 +102,8 @@ struct EvalAMS : public Metric {
name_ = os.str();
}
double Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info,
bool distributed) override {
CHECK(!distributed) << "metric AMS do not support distributed evaluation";
double Eval(const HostDeviceVector<bst_float>& preds, const MetaInfo& info) override {
CHECK(!rabit::IsDistributed()) << "metric AMS do not support distributed evaluation";
using namespace std; // NOLINT(*)
const auto ndata = static_cast<bst_omp_uint>(info.labels.Size());
@@ -161,8 +160,7 @@ struct EvalRank : public Metric, public EvalRankConfig {
std::unique_ptr<xgboost::Metric> rank_gpu_;
public:
double Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info,
bool distributed) override {
double Eval(const HostDeviceVector<bst_float>& preds, const MetaInfo& info) override {
CHECK_EQ(preds.Size(), info.labels.Size())
<< "label size predict size not match";
@@ -185,7 +183,7 @@ struct EvalRank : public Metric, public EvalRankConfig {
rank_gpu_.reset(GPUMetric::CreateGPUMetric(this->Name(), tparam_));
}
if (rank_gpu_) {
sum_metric = rank_gpu_->Eval(preds, info, distributed);
sum_metric = rank_gpu_->Eval(preds, info);
}
}
@@ -218,7 +216,7 @@ struct EvalRank : public Metric, public EvalRankConfig {
exc.Rethrow();
}
if (distributed) {
if (rabit::IsDistributed()) {
double dat[2]{sum_metric, static_cast<double>(ngroups)};
// approximately estimate the metric using mean
rabit::Allreduce<rabit::op::Sum>(dat, 2);
@@ -342,9 +340,8 @@ struct EvalMAP : public EvalRank {
struct EvalCox : public Metric {
public:
EvalCox() = default;
double Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info,
bool distributed) override {
CHECK(!distributed) << "Cox metric does not support distributed evaluation";
double Eval(const HostDeviceVector<bst_float>& preds, const MetaInfo& info) override {
CHECK(!rabit::IsDistributed()) << "Cox metric does not support distributed evaluation";
using namespace std; // NOLINT(*)
const auto ndata = static_cast<bst_omp_uint>(info.labels.Size());

View File

@@ -29,8 +29,7 @@ DMLC_REGISTRY_FILE_TAG(rank_metric_gpu);
template <typename EvalMetricT>
struct EvalRankGpu : public GPUMetric, public EvalRankConfig {
public:
double Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info,
bool distributed) override {
double Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info) override {
// Sanity check is done by the caller
std::vector<unsigned> tgptr(2, 0);
tgptr[1] = static_cast<unsigned>(preds.Size());

View File

@@ -206,20 +206,15 @@ template <typename Policy> struct EvalEWiseSurvivalBase : public Metric {
CHECK(tparam_);
}
double Eval(const HostDeviceVector<float> &preds, const MetaInfo &info,
bool distributed) override {
double Eval(const HostDeviceVector<float>& preds, const MetaInfo& info) override {
CHECK_EQ(preds.Size(), info.labels_lower_bound_.Size());
CHECK_EQ(preds.Size(), info.labels_upper_bound_.Size());
CHECK(tparam_);
auto result =
reducer_.Reduce(*tparam_, info.weights_, info.labels_lower_bound_,
info.labels_upper_bound_, preds);
auto result = reducer_.Reduce(*tparam_, info.weights_, info.labels_lower_bound_,
info.labels_upper_bound_, preds);
double dat[2] {result.Residue(), result.Weights()};
if (distributed) {
rabit::Allreduce<rabit::op::Sum>(dat, 2);
}
double dat[2]{result.Residue(), result.Weights()};
rabit::Allreduce<rabit::op::Sum>(dat, 2);
return Policy::GetFinal(dat[0], dat[1]);
}
@@ -240,10 +235,9 @@ struct AFTNLogLikDispatcher : public Metric {
return "aft-nloglik";
}
double Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info,
bool distributed) override {
double Eval(const HostDeviceVector<bst_float>& preds, const MetaInfo& info) override {
CHECK(metric_) << "AFT metric must be configured first, with distribution type and scale";
return metric_->Eval(preds, info, distributed);
return metric_->Eval(preds, info);
}
void Configure(const Args& args) override {

View File

@@ -116,7 +116,7 @@ class RowPartitioner {
Segment segment = ridx_segments_.at(nidx); // rows belongs to node nidx
auto d_ridx = ridx_.CurrentSpan();
auto d_position = position_.CurrentSpan();
if (left_counts_.size() <= nidx) {
if (left_counts_.size() <= static_cast<size_t>(nidx)) {
left_counts_.resize((nidx * 2) + 1);
thrust::fill(left_counts_.begin(), left_counts_.end(), 0);
}

View File

@@ -203,8 +203,8 @@ class HistEvaluator {
// Returns the sum of gradients corresponding to the data points that contains
// a non-missing value for the particular feature fid.
template <int d_step>
GradStats EnumerateSplit(common::HistogramCuts const &cut, common::Span<size_t const> sorted_idx,
const common::GHistRow &hist, bst_feature_t fidx, bst_node_t nidx,
GradStats EnumerateSplit(common::HistogramCuts const &cut, const common::GHistRow &hist,
bst_feature_t fidx, bst_node_t nidx,
TreeEvaluator::SplitEvaluator<TrainParam> const &evaluator,
SplitEntry *p_best) const {
static_assert(d_step == +1 || d_step == -1, "Invalid step.");
@@ -333,9 +333,9 @@ class HistEvaluator {
EnumeratePart<-1>(cut, sorted_idx, histogram, fidx, nidx, evaluator, best);
}
} else {
auto grad_stats = EnumerateSplit<+1>(cut, {}, histogram, fidx, nidx, evaluator, best);
auto grad_stats = EnumerateSplit<+1>(cut, histogram, fidx, nidx, evaluator, best);
if (SplitContainsMissingValues(grad_stats, snode_[nidx])) {
EnumerateSplit<-1>(cut, {}, histogram, fidx, nidx, evaluator, best);
EnumerateSplit<-1>(cut, histogram, fidx, nidx, evaluator, best);
}
}
}
@@ -440,7 +440,7 @@ template <typename Partitioner, typename ExpandEntry>
void UpdatePredictionCacheImpl(GenericParameter const *ctx, RegTree const *p_last_tree,
std::vector<Partitioner> const &partitioner,
HistEvaluator<ExpandEntry> const &hist_evaluator,
TrainParam const &param, linalg::VectorView<float> out_preds) {
linalg::VectorView<float> out_preds) {
CHECK_GT(out_preds.Size(), 0U);
CHECK(p_last_tree);

View File

@@ -116,7 +116,7 @@ class GloablApproxBuilder {
// Caching prediction seems redundant for approx tree method, as sketching takes up
// majority of training time.
CHECK_EQ(out_preds.Size(), data->Info().num_row_);
UpdatePredictionCacheImpl(ctx_, p_last_tree_, partitioner_, evaluator_, param_, out_preds);
UpdatePredictionCacheImpl(ctx_, p_last_tree_, partitioner_, evaluator_, out_preds);
monitor_->Stop(__func__);
}

View File

@@ -83,7 +83,7 @@ class ApproxRowPartitioner {
const size_t task_id = partition_builder_.GetTaskIdx(node_in_set, r.begin());
partition_builder_.AllocateForTask(task_id);
partition_builder_.PartitionRange(
node_in_set, nid, r, fidx, &row_set_collection_, [&](size_t row_id) {
node_in_set, nid, r, &row_set_collection_, [&](size_t row_id) {
auto cut_value = SearchCutValue(row_id, fidx, index, cut_ptrs, cut_values);
if (std::isnan(cut_value)) {
return candidate.split.DefaultLeft();

View File

@@ -563,7 +563,7 @@ struct GPUHistMakerDevice {
// when processing a large batch
this->AllReduceHist(hist_nidx.at(0), reducer, hist_nidx.size());
for (int i = 0; i < subtraction_nidx.size(); i++) {
for (size_t i = 0; i < subtraction_nidx.size(); i++) {
auto build_hist_nidx = hist_nidx.at(i);
auto subtraction_trick_nidx = subtraction_nidx.at(i);
auto parent_nidx = candidates.at(i).nid;

View File

@@ -257,7 +257,7 @@ bool QuantileHistMaker::Builder::UpdatePredictionCache(DMatrix const *data,
}
monitor_->Start(__func__);
CHECK_EQ(out_preds.Size(), data->Info().num_row_);
UpdatePredictionCacheImpl(ctx_, p_last_tree_, partitioner_, *evaluator_, param_, out_preds);
UpdatePredictionCacheImpl(ctx_, p_last_tree_, partitioner_, *evaluator_, out_preds);
monitor_->Stop(__func__);
return true;
}