Fix compiler warnings. (#7974)
- Remove unused parameters. There are still many warnings that are not yet addressed. Currently, the warnings in dmlc-core dominate the error log. - Remove `distributed` parameter from metric. - Fixes some warnings about signed comparison.
This commit is contained in:
parent
d48123d23b
commit
1a33b50a0d
@ -144,6 +144,15 @@ function(xgboost_set_cuda_flags target)
|
||||
set_property(TARGET ${target} PROPERTY CUDA_ARCHITECTURES ${CMAKE_CUDA_ARCHITECTURES})
|
||||
endif (CMAKE_VERSION VERSION_GREATER_EQUAL "3.18")
|
||||
|
||||
if (FORCE_COLORED_OUTPUT)
|
||||
if (FORCE_COLORED_OUTPUT AND (CMAKE_GENERATOR STREQUAL "Ninja") AND
|
||||
((CMAKE_CXX_COMPILER_ID STREQUAL "GNU") OR
|
||||
(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")))
|
||||
target_compile_options(${target} PRIVATE
|
||||
$<$<COMPILE_LANGUAGE:CUDA>:-Xcompiler=-fdiagnostics-color=always>)
|
||||
endif()
|
||||
endif (FORCE_COLORED_OUTPUT)
|
||||
|
||||
if (USE_DEVICE_DEBUG)
|
||||
target_compile_options(${target} PRIVATE
|
||||
$<$<AND:$<CONFIG:DEBUG>,$<COMPILE_LANGUAGE:CUDA>>:-G;-src-in-ptx>)
|
||||
|
||||
@ -68,8 +68,8 @@ class GradientBooster : public Model, public Configurable {
|
||||
* \param layer_end End of booster layer. 0 means do not limit trees.
|
||||
* \param out Output gradient booster
|
||||
*/
|
||||
virtual void Slice(int32_t layer_begin, int32_t layer_end, int32_t step,
|
||||
GradientBooster *out, bool* out_of_bound) const {
|
||||
virtual void Slice(int32_t /*layer_begin*/, int32_t /*layer_end*/, int32_t /*step*/,
|
||||
GradientBooster* /*out*/, bool* /*out_of_bound*/) const {
|
||||
LOG(FATAL) << "Slice is not supported by current booster.";
|
||||
}
|
||||
/*!
|
||||
|
||||
@ -89,7 +89,7 @@ class JsonReader {
|
||||
} else if (got == 0) {
|
||||
msg += "\\0\"";
|
||||
} else {
|
||||
msg += (got <= 127 ? std::string{got} : std::to_string(got)) + " \""; // NOLINT
|
||||
msg += (got <= static_cast<char>(127) ? std::string{got} : std::to_string(got)) + " \"";
|
||||
}
|
||||
Error(msg);
|
||||
}
|
||||
|
||||
@ -317,7 +317,8 @@ class TensorView {
|
||||
}
|
||||
|
||||
template <size_t old_dim, size_t new_dim, int32_t D, typename Index>
|
||||
LINALG_HD size_t MakeSliceDim(size_t new_shape[D], size_t new_stride[D], Index i) const {
|
||||
LINALG_HD size_t MakeSliceDim(DMLC_ATTRIBUTE_UNUSED size_t new_shape[D],
|
||||
DMLC_ATTRIBUTE_UNUSED size_t new_stride[D], Index i) const {
|
||||
static_assert(old_dim < kDim, "");
|
||||
return stride_[old_dim] * i;
|
||||
}
|
||||
|
||||
@ -57,12 +57,8 @@ class Metric : public Configurable {
|
||||
* \brief evaluate a specific metric
|
||||
* \param preds prediction
|
||||
* \param info information, including label etc.
|
||||
* \param distributed whether a call to Allreduce is needed to gather
|
||||
* the average statistics across all the node,
|
||||
* this is only supported by some metrics
|
||||
*/
|
||||
virtual double Eval(const HostDeviceVector<bst_float> &preds,
|
||||
const MetaInfo &info, bool distributed) = 0;
|
||||
virtual double Eval(const HostDeviceVector<bst_float>& preds, const MetaInfo& info) = 0;
|
||||
/*! \return name of metric */
|
||||
virtual const char* Name() const = 0;
|
||||
/*! \brief virtual destructor */
|
||||
|
||||
@ -103,8 +103,10 @@ class ObjFunction : public Configurable {
|
||||
* \param prediction Model prediction after transformation.
|
||||
* \param p_tree Tree that needs to be updated.
|
||||
*/
|
||||
virtual void UpdateTreeLeaf(HostDeviceVector<bst_node_t> const& position, MetaInfo const& info,
|
||||
HostDeviceVector<float> const& prediction, RegTree* p_tree) const {}
|
||||
virtual void UpdateTreeLeaf(HostDeviceVector<bst_node_t> const& /*position*/,
|
||||
MetaInfo const& /*info*/,
|
||||
HostDeviceVector<float> const& /*prediction*/,
|
||||
RegTree* /*p_tree*/) const {}
|
||||
|
||||
/*!
|
||||
* \brief Create an objective function according to name.
|
||||
|
||||
@ -171,14 +171,14 @@ inline HistogramCuts SketchOnDMatrix(DMatrix* m, int32_t max_bins, int32_t n_thr
|
||||
|
||||
if (!use_sorted) {
|
||||
HostSketchContainer container(max_bins, m->Info(), reduced, HostSketchContainer::UseGroup(info),
|
||||
hessian, n_threads);
|
||||
n_threads);
|
||||
for (auto const& page : m->GetBatches<SparsePage>()) {
|
||||
container.PushRowPage(page, info, hessian);
|
||||
}
|
||||
container.MakeCuts(&out);
|
||||
} else {
|
||||
SortedSketchContainer container{
|
||||
max_bins, m->Info(), reduced, HostSketchContainer::UseGroup(info), hessian, n_threads};
|
||||
SortedSketchContainer container{max_bins, m->Info(), reduced,
|
||||
HostSketchContainer::UseGroup(info), n_threads};
|
||||
for (auto const& page : m->GetBatches<SortedCSCPage>()) {
|
||||
container.PushColPage(page, info, hessian);
|
||||
}
|
||||
|
||||
@ -168,8 +168,8 @@ class PartitionBuilder {
|
||||
const size_t n_left = child_nodes_sizes.first;
|
||||
const size_t n_right = child_nodes_sizes.second;
|
||||
|
||||
SetNLeftElems(node_in_set, range.begin(), range.end(), n_left);
|
||||
SetNRightElems(node_in_set, range.begin(), range.end(), n_right);
|
||||
SetNLeftElems(node_in_set, range.begin(), n_left);
|
||||
SetNRightElems(node_in_set, range.begin(), n_right);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -188,8 +188,7 @@ class PartitionBuilder {
|
||||
*/
|
||||
template <typename Pred>
|
||||
void PartitionRange(const size_t node_in_set, const size_t nid, common::Range1d range,
|
||||
bst_feature_t fidx, common::RowSetCollection* p_row_set_collection,
|
||||
Pred pred) {
|
||||
common::RowSetCollection* p_row_set_collection, Pred pred) {
|
||||
auto& row_set_collection = *p_row_set_collection;
|
||||
const size_t* p_ridx = row_set_collection[nid].begin;
|
||||
common::Span<const size_t> ridx(p_ridx + range.begin(), p_ridx + range.end());
|
||||
@ -200,8 +199,8 @@ class PartitionBuilder {
|
||||
const size_t n_left = child_nodes_sizes.first;
|
||||
const size_t n_right = child_nodes_sizes.second;
|
||||
|
||||
this->SetNLeftElems(node_in_set, range.begin(), range.end(), n_left);
|
||||
this->SetNRightElems(node_in_set, range.begin(), range.end(), n_right);
|
||||
this->SetNLeftElems(node_in_set, range.begin(), n_left);
|
||||
this->SetNRightElems(node_in_set, range.begin(), n_right);
|
||||
}
|
||||
|
||||
// allocate thread local memory, should be called for each specific task
|
||||
@ -223,12 +222,12 @@ class PartitionBuilder {
|
||||
return { mem_blocks_.at(task_idx)->Right(), end - begin };
|
||||
}
|
||||
|
||||
void SetNLeftElems(int nid, size_t begin, size_t end, size_t n_left) {
|
||||
void SetNLeftElems(int nid, size_t begin, size_t n_left) {
|
||||
size_t task_idx = GetTaskIdx(nid, begin);
|
||||
mem_blocks_.at(task_idx)->n_left = n_left;
|
||||
}
|
||||
|
||||
void SetNRightElems(int nid, size_t begin, size_t end, size_t n_right) {
|
||||
void SetNRightElems(int nid, size_t begin, size_t n_right) {
|
||||
size_t task_idx = GetTaskIdx(nid, begin);
|
||||
mem_blocks_.at(task_idx)->n_right = n_right;
|
||||
}
|
||||
|
||||
@ -543,7 +543,7 @@ template class SketchContainerImpl<WXQuantileSketch<float, float>>;
|
||||
|
||||
HostSketchContainer::HostSketchContainer(int32_t max_bins, MetaInfo const &info,
|
||||
std::vector<size_t> columns_size, bool use_group,
|
||||
Span<float const> hessian, int32_t n_threads)
|
||||
int32_t n_threads)
|
||||
: SketchContainerImpl{columns_size, max_bins, info.feature_types.ConstHostSpan(), use_group,
|
||||
n_threads} {
|
||||
monitor_.Init(__func__);
|
||||
|
||||
@ -774,7 +774,7 @@ class HostSketchContainer : public SketchContainerImpl<WQuantileSketch<float, fl
|
||||
|
||||
public:
|
||||
HostSketchContainer(int32_t max_bins, MetaInfo const &info, std::vector<size_t> columns_size,
|
||||
bool use_group, Span<float const> hessian, int32_t n_threads);
|
||||
bool use_group, int32_t n_threads);
|
||||
};
|
||||
|
||||
/**
|
||||
@ -868,7 +868,7 @@ class SortedSketchContainer : public SketchContainerImpl<WXQuantileSketch<float,
|
||||
public:
|
||||
explicit SortedSketchContainer(int32_t max_bins, MetaInfo const &info,
|
||||
std::vector<size_t> columns_size, bool use_group,
|
||||
Span<float const> hessian, int32_t n_threads)
|
||||
int32_t n_threads)
|
||||
: SketchContainerImpl{columns_size, max_bins, info.feature_types.ConstHostSpan(), use_group,
|
||||
n_threads} {
|
||||
monitor_.Init(__func__);
|
||||
|
||||
@ -163,8 +163,7 @@ void IterativeDeviceDMatrix::Initialize(DataIterHandle iter_handle, float missin
|
||||
|
||||
BatchSet<EllpackPage> IterativeDeviceDMatrix::GetEllpackBatches(const BatchParam& param) {
|
||||
CHECK(page_);
|
||||
auto begin_iter =
|
||||
BatchIterator<EllpackPage>(new SimpleBatchIteratorImpl<EllpackPage>(page_));
|
||||
auto begin_iter = BatchIterator<EllpackPage>(new SimpleBatchIteratorImpl<EllpackPage>(page_));
|
||||
return BatchSet<EllpackPage>(begin_iter);
|
||||
}
|
||||
} // namespace data
|
||||
|
||||
@ -45,8 +45,8 @@ class IterativeDeviceDMatrix : public DMatrix {
|
||||
|
||||
bool EllpackExists() const override { return true; }
|
||||
bool SparsePageExists() const override { return false; }
|
||||
DMatrix *Slice(common::Span<int32_t const> ridxs) override {
|
||||
LOG(FATAL) << "Slicing DMatrix is not supported for Device DMatrix.";
|
||||
DMatrix *Slice(common::Span<int32_t const>) override {
|
||||
LOG(FATAL) << "Slicing DMatrix is not supported for Quantile DMatrix.";
|
||||
return nullptr;
|
||||
}
|
||||
BatchSet<SparsePage> GetRowBatches() override {
|
||||
|
||||
@ -84,7 +84,7 @@ class DMatrixProxy : public DMatrix {
|
||||
bool SingleColBlock() const override { return true; }
|
||||
bool EllpackExists() const override { return true; }
|
||||
bool SparsePageExists() const override { return false; }
|
||||
DMatrix *Slice(common::Span<int32_t const> ridxs) override {
|
||||
DMatrix* Slice(common::Span<int32_t const> /*ridxs*/) override {
|
||||
LOG(FATAL) << "Slicing DMatrix is not supported for Proxy DMatrix.";
|
||||
return nullptr;
|
||||
}
|
||||
@ -100,7 +100,7 @@ class DMatrixProxy : public DMatrix {
|
||||
LOG(FATAL) << "Not implemented.";
|
||||
return BatchSet<SortedCSCPage>(BatchIterator<SortedCSCPage>(nullptr));
|
||||
}
|
||||
BatchSet<EllpackPage> GetEllpackBatches(const BatchParam& param) override {
|
||||
BatchSet<EllpackPage> GetEllpackBatches(const BatchParam&) override {
|
||||
LOG(FATAL) << "Not implemented.";
|
||||
return BatchSet<EllpackPage>(BatchIterator<EllpackPage>(nullptr));
|
||||
}
|
||||
|
||||
@ -218,7 +218,7 @@ void CopyGradient(HostDeviceVector<GradientPair> const* in_gpair, int32_t n_thre
|
||||
}
|
||||
|
||||
void GBTree::UpdateTreeLeaf(DMatrix const* p_fmat, HostDeviceVector<float> const& predictions,
|
||||
ObjFunction const* obj, size_t gidx,
|
||||
ObjFunction const* obj,
|
||||
std::vector<std::unique_ptr<RegTree>>* p_trees) {
|
||||
CHECK(!updaters_.empty());
|
||||
if (!updaters_.back()->HasNodePosition()) {
|
||||
@ -257,7 +257,7 @@ void GBTree::DoBoost(DMatrix* p_fmat, HostDeviceVector<GradientPair>* in_gpair,
|
||||
if (ngroup == 1) {
|
||||
std::vector<std::unique_ptr<RegTree>> ret;
|
||||
BoostNewTrees(in_gpair, p_fmat, 0, &ret);
|
||||
UpdateTreeLeaf(p_fmat, predt->predictions, obj, 0, &ret);
|
||||
UpdateTreeLeaf(p_fmat, predt->predictions, obj, &ret);
|
||||
const size_t num_new_trees = ret.size();
|
||||
new_trees.push_back(std::move(ret));
|
||||
auto v_predt = out.Slice(linalg::All(), 0);
|
||||
@ -274,7 +274,7 @@ void GBTree::DoBoost(DMatrix* p_fmat, HostDeviceVector<GradientPair>* in_gpair,
|
||||
CopyGradient(in_gpair, ctx_->Threads(), ngroup, gid, &tmp);
|
||||
std::vector<std::unique_ptr<RegTree>> ret;
|
||||
BoostNewTrees(&tmp, p_fmat, gid, &ret);
|
||||
UpdateTreeLeaf(p_fmat, predt->predictions, obj, gid, &ret);
|
||||
UpdateTreeLeaf(p_fmat, predt->predictions, obj, &ret);
|
||||
const size_t num_new_trees = ret.size();
|
||||
new_trees.push_back(std::move(ret));
|
||||
auto v_predt = out.Slice(linalg::All(), gid);
|
||||
@ -289,7 +289,7 @@ void GBTree::DoBoost(DMatrix* p_fmat, HostDeviceVector<GradientPair>* in_gpair,
|
||||
}
|
||||
|
||||
monitor_.Stop("BoostNewTrees");
|
||||
this->CommitModel(std::move(new_trees), p_fmat, predt);
|
||||
this->CommitModel(std::move(new_trees));
|
||||
}
|
||||
|
||||
void GBTree::InitUpdater(Args const& cfg) {
|
||||
@ -378,9 +378,7 @@ void GBTree::BoostNewTrees(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fma
|
||||
}
|
||||
}
|
||||
|
||||
void GBTree::CommitModel(std::vector<std::vector<std::unique_ptr<RegTree>>>&& new_trees,
|
||||
DMatrix* m,
|
||||
PredictionCacheEntry* predts) {
|
||||
void GBTree::CommitModel(std::vector<std::vector<std::unique_ptr<RegTree>>>&& new_trees) {
|
||||
monitor_.Start("CommitModel");
|
||||
for (uint32_t gid = 0; gid < model_.learner_model_param->num_output_group; ++gid) {
|
||||
model_.CommitModel(std::move(new_trees[gid]), gid);
|
||||
@ -490,9 +488,8 @@ void GBTree::Slice(int32_t layer_begin, int32_t layer_end, int32_t step,
|
||||
"want to update a portion of trees.";
|
||||
}
|
||||
|
||||
*out_of_bound = detail::SliceTrees(
|
||||
layer_begin, layer_end, step, this->model_, tparam_, layer_trees,
|
||||
[&](auto const &in_it, auto const &out_it) {
|
||||
*out_of_bound = detail::SliceTrees(layer_begin, layer_end, step, this->model_, layer_trees,
|
||||
[&](auto const& in_it, auto const& out_it) {
|
||||
auto new_tree =
|
||||
std::make_unique<RegTree>(*this->model_.trees.at(in_it));
|
||||
bst_group_t group = this->model_.tree_info[in_it];
|
||||
@ -674,8 +671,7 @@ class Dart : public GBTree {
|
||||
auto p_dart = dynamic_cast<Dart*>(out);
|
||||
CHECK(p_dart);
|
||||
CHECK(p_dart->weight_drop_.empty());
|
||||
detail::SliceTrees(
|
||||
layer_begin, layer_end, step, model_, tparam_, this->LayerTrees(),
|
||||
detail::SliceTrees(layer_begin, layer_end, step, model_, this->LayerTrees(),
|
||||
[&](auto const& in_it, auto const&) {
|
||||
p_dart->weight_drop_.push_back(this->weight_drop_.at(in_it));
|
||||
});
|
||||
@ -901,9 +897,7 @@ class Dart : public GBTree {
|
||||
|
||||
protected:
|
||||
// commit new trees all at once
|
||||
void
|
||||
CommitModel(std::vector<std::vector<std::unique_ptr<RegTree>>>&& new_trees,
|
||||
DMatrix*, PredictionCacheEntry*) override {
|
||||
void CommitModel(std::vector<std::vector<std::unique_ptr<RegTree>>>&& new_trees) override {
|
||||
int num_new_trees = 0;
|
||||
for (uint32_t gid = 0; gid < model_.learner_model_param->num_output_group; ++gid) {
|
||||
num_new_trees += new_trees[gid].size();
|
||||
|
||||
@ -162,8 +162,7 @@ inline std::pair<uint32_t, uint32_t> LayerToTree(gbm::GBTreeModel const &model,
|
||||
|
||||
// Call fn for each pair of input output tree. Return true if index is out of bound.
|
||||
template <typename Func>
|
||||
inline bool SliceTrees(int32_t layer_begin, int32_t layer_end, int32_t step,
|
||||
GBTreeModel const &model, GBTreeTrainParam const &tparam,
|
||||
bool SliceTrees(int32_t layer_begin, int32_t layer_end, int32_t step, GBTreeModel const& model,
|
||||
uint32_t layer_trees, Func fn) {
|
||||
uint32_t tree_begin, tree_end;
|
||||
std::tie(tree_begin, tree_end) = detail::LayerToTree(model, layer_begin, layer_end);
|
||||
@ -206,8 +205,7 @@ class GBTree : public GradientBooster {
|
||||
* \brief Optionally update the leaf value.
|
||||
*/
|
||||
void UpdateTreeLeaf(DMatrix const* p_fmat, HostDeviceVector<float> const& predictions,
|
||||
ObjFunction const* obj, size_t gidx,
|
||||
std::vector<std::unique_ptr<RegTree>>* p_trees);
|
||||
ObjFunction const* obj, std::vector<std::unique_ptr<RegTree>>* p_trees);
|
||||
|
||||
/*! \brief Carry out one iteration of boosting */
|
||||
void DoBoost(DMatrix* p_fmat, HostDeviceVector<GradientPair>* in_gpair,
|
||||
@ -325,7 +323,7 @@ class GBTree : public GradientBooster {
|
||||
};
|
||||
|
||||
if (importance_type == "weight") {
|
||||
add_score([&](auto const &p_tree, bst_node_t, bst_feature_t split) {
|
||||
add_score([&](auto const&, bst_node_t, bst_feature_t split) {
|
||||
gain_map[split] = split_counts[split];
|
||||
});
|
||||
} else if (importance_type == "gain" || importance_type == "total_gain") {
|
||||
@ -423,9 +421,7 @@ class GBTree : public GradientBooster {
|
||||
DMatrix* f_dmat = nullptr) const;
|
||||
|
||||
// commit new trees all at once
|
||||
virtual void CommitModel(std::vector<std::vector<std::unique_ptr<RegTree>>>&& new_trees,
|
||||
DMatrix* m,
|
||||
PredictionCacheEntry* predts);
|
||||
virtual void CommitModel(std::vector<std::vector<std::unique_ptr<RegTree>>>&& new_trees);
|
||||
|
||||
// --- data structure ---
|
||||
GBTreeModel model_;
|
||||
|
||||
@ -1234,8 +1234,7 @@ class LearnerImpl : public LearnerIO {
|
||||
|
||||
obj_->EvalTransform(&out);
|
||||
for (auto& ev : metrics_) {
|
||||
os << '\t' << data_names[i] << '-' << ev->Name() << ':'
|
||||
<< ev->Eval(out, m->Info(), tparam_.dsplit == DataSplitMode::kRow);
|
||||
os << '\t' << data_names[i] << '-' << ev->Name() << ':' << ev->Eval(out, m->Info());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -254,8 +254,7 @@ std::pair<double, uint32_t> RankingAUC(std::vector<float> const &predts,
|
||||
|
||||
template <typename Curve>
|
||||
class EvalAUC : public Metric {
|
||||
double Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info,
|
||||
bool distributed) override {
|
||||
double Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info) override {
|
||||
double auc {0};
|
||||
if (tparam_->gpu_id != GenericParameter::kCpuId) {
|
||||
preds.SetDevice(tparam_->gpu_id);
|
||||
|
||||
@ -312,10 +312,8 @@ void SegmentedReduceAUC(common::Span<size_t const> d_unique_idx,
|
||||
* up each class in all kernels.
|
||||
*/
|
||||
template <bool scale, typename Fn>
|
||||
double GPUMultiClassAUCOVR(common::Span<float const> predts,
|
||||
MetaInfo const &info, int32_t device,
|
||||
common::Span<uint32_t> d_class_ptr, size_t n_classes,
|
||||
std::shared_ptr<DeviceAUCCache> cache, Fn area_fn) {
|
||||
double GPUMultiClassAUCOVR(MetaInfo const &info, int32_t device, common::Span<uint32_t> d_class_ptr,
|
||||
size_t n_classes, std::shared_ptr<DeviceAUCCache> cache, Fn area_fn) {
|
||||
dh::safe_cuda(cudaSetDevice(device));
|
||||
/**
|
||||
* Sorted idx
|
||||
@ -478,8 +476,7 @@ double GPUMultiClassROCAUC(common::Span<float const> predts,
|
||||
double tp, size_t /*class_id*/) {
|
||||
return TrapezoidArea(fp_prev, fp, tp_prev, tp);
|
||||
};
|
||||
return GPUMultiClassAUCOVR<true>(predts, info, device, dh::ToSpan(class_ptr),
|
||||
n_classes, cache, fn);
|
||||
return GPUMultiClassAUCOVR<true>(info, device, dh::ToSpan(class_ptr), n_classes, cache, fn);
|
||||
}
|
||||
|
||||
namespace {
|
||||
@ -704,8 +701,7 @@ double GPUMultiClassPRAUC(common::Span<float const> predts,
|
||||
return detail::CalcDeltaPRAUC(fp_prev, fp, tp_prev, tp,
|
||||
d_totals[class_id].first);
|
||||
};
|
||||
return GPUMultiClassAUCOVR<false>(predts, info, device, d_class_ptr,
|
||||
n_classes, cache, fn);
|
||||
return GPUMultiClassAUCOVR<false>(info, device, d_class_ptr, n_classes, cache, fn);
|
||||
}
|
||||
|
||||
template <typename Fn>
|
||||
|
||||
@ -178,8 +178,7 @@ class PseudoErrorLoss : public Metric {
|
||||
out["pseudo_huber_param"] = ToJson(param_);
|
||||
}
|
||||
|
||||
double Eval(const HostDeviceVector<bst_float>& preds, const MetaInfo& info,
|
||||
bool distributed) override {
|
||||
double Eval(const HostDeviceVector<bst_float>& preds, const MetaInfo& info) override {
|
||||
CHECK_EQ(info.labels.Shape(0), info.num_row_);
|
||||
auto labels = info.labels.View(tparam_->gpu_id);
|
||||
preds.SetDevice(tparam_->gpu_id);
|
||||
@ -197,7 +196,7 @@ class PseudoErrorLoss : public Metric {
|
||||
return std::make_tuple(v, wt);
|
||||
});
|
||||
double dat[2]{result.Residue(), result.Weights()};
|
||||
if (distributed) {
|
||||
if (rabit::IsDistributed()) {
|
||||
rabit::Allreduce<rabit::op::Sum>(dat, 2);
|
||||
}
|
||||
return EvalRowMAPE::GetFinal(dat[0], dat[1]);
|
||||
@ -342,8 +341,7 @@ struct EvalEWiseBase : public Metric {
|
||||
EvalEWiseBase() = default;
|
||||
explicit EvalEWiseBase(char const* policy_param) : policy_{policy_param} {}
|
||||
|
||||
double Eval(HostDeviceVector<bst_float> const& preds, const MetaInfo& info,
|
||||
bool distributed) override {
|
||||
double Eval(HostDeviceVector<bst_float> const& preds, const MetaInfo& info) override {
|
||||
CHECK_EQ(preds.Size(), info.labels.Size())
|
||||
<< "label and prediction size not match, "
|
||||
<< "hint: use merror or mlogloss for multi-class classification";
|
||||
@ -367,10 +365,7 @@ struct EvalEWiseBase : public Metric {
|
||||
});
|
||||
|
||||
double dat[2]{result.Residue(), result.Weights()};
|
||||
|
||||
if (distributed) {
|
||||
rabit::Allreduce<rabit::op::Sum>(dat, 2);
|
||||
}
|
||||
return Policy::GetFinal(dat[0], dat[1]);
|
||||
}
|
||||
|
||||
|
||||
@ -167,8 +167,7 @@ class MultiClassMetricsReduction {
|
||||
*/
|
||||
template<typename Derived>
|
||||
struct EvalMClassBase : public Metric {
|
||||
double Eval(const HostDeviceVector<float> &preds, const MetaInfo &info,
|
||||
bool distributed) override {
|
||||
double Eval(const HostDeviceVector<float> &preds, const MetaInfo &info) override {
|
||||
if (info.labels.Size() == 0) {
|
||||
CHECK_EQ(preds.Size(), 0);
|
||||
} else {
|
||||
@ -186,9 +185,7 @@ struct EvalMClassBase : public Metric {
|
||||
dat[0] = result.Residue();
|
||||
dat[1] = result.Weights();
|
||||
}
|
||||
if (distributed) {
|
||||
rabit::Allreduce<rabit::op::Sum>(dat, 2);
|
||||
}
|
||||
return Derived::GetFinal(dat[0], dat[1]);
|
||||
}
|
||||
/*!
|
||||
|
||||
@ -102,9 +102,8 @@ struct EvalAMS : public Metric {
|
||||
name_ = os.str();
|
||||
}
|
||||
|
||||
double Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info,
|
||||
bool distributed) override {
|
||||
CHECK(!distributed) << "metric AMS do not support distributed evaluation";
|
||||
double Eval(const HostDeviceVector<bst_float>& preds, const MetaInfo& info) override {
|
||||
CHECK(!rabit::IsDistributed()) << "metric AMS do not support distributed evaluation";
|
||||
using namespace std; // NOLINT(*)
|
||||
|
||||
const auto ndata = static_cast<bst_omp_uint>(info.labels.Size());
|
||||
@ -161,8 +160,7 @@ struct EvalRank : public Metric, public EvalRankConfig {
|
||||
std::unique_ptr<xgboost::Metric> rank_gpu_;
|
||||
|
||||
public:
|
||||
double Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info,
|
||||
bool distributed) override {
|
||||
double Eval(const HostDeviceVector<bst_float>& preds, const MetaInfo& info) override {
|
||||
CHECK_EQ(preds.Size(), info.labels.Size())
|
||||
<< "label size predict size not match";
|
||||
|
||||
@ -185,7 +183,7 @@ struct EvalRank : public Metric, public EvalRankConfig {
|
||||
rank_gpu_.reset(GPUMetric::CreateGPUMetric(this->Name(), tparam_));
|
||||
}
|
||||
if (rank_gpu_) {
|
||||
sum_metric = rank_gpu_->Eval(preds, info, distributed);
|
||||
sum_metric = rank_gpu_->Eval(preds, info);
|
||||
}
|
||||
}
|
||||
|
||||
@ -218,7 +216,7 @@ struct EvalRank : public Metric, public EvalRankConfig {
|
||||
exc.Rethrow();
|
||||
}
|
||||
|
||||
if (distributed) {
|
||||
if (rabit::IsDistributed()) {
|
||||
double dat[2]{sum_metric, static_cast<double>(ngroups)};
|
||||
// approximately estimate the metric using mean
|
||||
rabit::Allreduce<rabit::op::Sum>(dat, 2);
|
||||
@ -342,9 +340,8 @@ struct EvalMAP : public EvalRank {
|
||||
struct EvalCox : public Metric {
|
||||
public:
|
||||
EvalCox() = default;
|
||||
double Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info,
|
||||
bool distributed) override {
|
||||
CHECK(!distributed) << "Cox metric does not support distributed evaluation";
|
||||
double Eval(const HostDeviceVector<bst_float>& preds, const MetaInfo& info) override {
|
||||
CHECK(!rabit::IsDistributed()) << "Cox metric does not support distributed evaluation";
|
||||
using namespace std; // NOLINT(*)
|
||||
|
||||
const auto ndata = static_cast<bst_omp_uint>(info.labels.Size());
|
||||
|
||||
@ -29,8 +29,7 @@ DMLC_REGISTRY_FILE_TAG(rank_metric_gpu);
|
||||
template <typename EvalMetricT>
|
||||
struct EvalRankGpu : public GPUMetric, public EvalRankConfig {
|
||||
public:
|
||||
double Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info,
|
||||
bool distributed) override {
|
||||
double Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info) override {
|
||||
// Sanity check is done by the caller
|
||||
std::vector<unsigned> tgptr(2, 0);
|
||||
tgptr[1] = static_cast<unsigned>(preds.Size());
|
||||
|
||||
@ -206,20 +206,15 @@ template <typename Policy> struct EvalEWiseSurvivalBase : public Metric {
|
||||
CHECK(tparam_);
|
||||
}
|
||||
|
||||
double Eval(const HostDeviceVector<float> &preds, const MetaInfo &info,
|
||||
bool distributed) override {
|
||||
double Eval(const HostDeviceVector<float>& preds, const MetaInfo& info) override {
|
||||
CHECK_EQ(preds.Size(), info.labels_lower_bound_.Size());
|
||||
CHECK_EQ(preds.Size(), info.labels_upper_bound_.Size());
|
||||
CHECK(tparam_);
|
||||
auto result =
|
||||
reducer_.Reduce(*tparam_, info.weights_, info.labels_lower_bound_,
|
||||
auto result = reducer_.Reduce(*tparam_, info.weights_, info.labels_lower_bound_,
|
||||
info.labels_upper_bound_, preds);
|
||||
|
||||
double dat[2] {result.Residue(), result.Weights()};
|
||||
|
||||
if (distributed) {
|
||||
double dat[2]{result.Residue(), result.Weights()};
|
||||
rabit::Allreduce<rabit::op::Sum>(dat, 2);
|
||||
}
|
||||
return Policy::GetFinal(dat[0], dat[1]);
|
||||
}
|
||||
|
||||
@ -240,10 +235,9 @@ struct AFTNLogLikDispatcher : public Metric {
|
||||
return "aft-nloglik";
|
||||
}
|
||||
|
||||
double Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info,
|
||||
bool distributed) override {
|
||||
double Eval(const HostDeviceVector<bst_float>& preds, const MetaInfo& info) override {
|
||||
CHECK(metric_) << "AFT metric must be configured first, with distribution type and scale";
|
||||
return metric_->Eval(preds, info, distributed);
|
||||
return metric_->Eval(preds, info);
|
||||
}
|
||||
|
||||
void Configure(const Args& args) override {
|
||||
|
||||
@ -116,7 +116,7 @@ class RowPartitioner {
|
||||
Segment segment = ridx_segments_.at(nidx); // rows belongs to node nidx
|
||||
auto d_ridx = ridx_.CurrentSpan();
|
||||
auto d_position = position_.CurrentSpan();
|
||||
if (left_counts_.size() <= nidx) {
|
||||
if (left_counts_.size() <= static_cast<size_t>(nidx)) {
|
||||
left_counts_.resize((nidx * 2) + 1);
|
||||
thrust::fill(left_counts_.begin(), left_counts_.end(), 0);
|
||||
}
|
||||
|
||||
@ -203,8 +203,8 @@ class HistEvaluator {
|
||||
// Returns the sum of gradients corresponding to the data points that contains
|
||||
// a non-missing value for the particular feature fid.
|
||||
template <int d_step>
|
||||
GradStats EnumerateSplit(common::HistogramCuts const &cut, common::Span<size_t const> sorted_idx,
|
||||
const common::GHistRow &hist, bst_feature_t fidx, bst_node_t nidx,
|
||||
GradStats EnumerateSplit(common::HistogramCuts const &cut, const common::GHistRow &hist,
|
||||
bst_feature_t fidx, bst_node_t nidx,
|
||||
TreeEvaluator::SplitEvaluator<TrainParam> const &evaluator,
|
||||
SplitEntry *p_best) const {
|
||||
static_assert(d_step == +1 || d_step == -1, "Invalid step.");
|
||||
@ -333,9 +333,9 @@ class HistEvaluator {
|
||||
EnumeratePart<-1>(cut, sorted_idx, histogram, fidx, nidx, evaluator, best);
|
||||
}
|
||||
} else {
|
||||
auto grad_stats = EnumerateSplit<+1>(cut, {}, histogram, fidx, nidx, evaluator, best);
|
||||
auto grad_stats = EnumerateSplit<+1>(cut, histogram, fidx, nidx, evaluator, best);
|
||||
if (SplitContainsMissingValues(grad_stats, snode_[nidx])) {
|
||||
EnumerateSplit<-1>(cut, {}, histogram, fidx, nidx, evaluator, best);
|
||||
EnumerateSplit<-1>(cut, histogram, fidx, nidx, evaluator, best);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -440,7 +440,7 @@ template <typename Partitioner, typename ExpandEntry>
|
||||
void UpdatePredictionCacheImpl(GenericParameter const *ctx, RegTree const *p_last_tree,
|
||||
std::vector<Partitioner> const &partitioner,
|
||||
HistEvaluator<ExpandEntry> const &hist_evaluator,
|
||||
TrainParam const ¶m, linalg::VectorView<float> out_preds) {
|
||||
linalg::VectorView<float> out_preds) {
|
||||
CHECK_GT(out_preds.Size(), 0U);
|
||||
|
||||
CHECK(p_last_tree);
|
||||
|
||||
@ -116,7 +116,7 @@ class GloablApproxBuilder {
|
||||
// Caching prediction seems redundant for approx tree method, as sketching takes up
|
||||
// majority of training time.
|
||||
CHECK_EQ(out_preds.Size(), data->Info().num_row_);
|
||||
UpdatePredictionCacheImpl(ctx_, p_last_tree_, partitioner_, evaluator_, param_, out_preds);
|
||||
UpdatePredictionCacheImpl(ctx_, p_last_tree_, partitioner_, evaluator_, out_preds);
|
||||
monitor_->Stop(__func__);
|
||||
}
|
||||
|
||||
|
||||
@ -83,7 +83,7 @@ class ApproxRowPartitioner {
|
||||
const size_t task_id = partition_builder_.GetTaskIdx(node_in_set, r.begin());
|
||||
partition_builder_.AllocateForTask(task_id);
|
||||
partition_builder_.PartitionRange(
|
||||
node_in_set, nid, r, fidx, &row_set_collection_, [&](size_t row_id) {
|
||||
node_in_set, nid, r, &row_set_collection_, [&](size_t row_id) {
|
||||
auto cut_value = SearchCutValue(row_id, fidx, index, cut_ptrs, cut_values);
|
||||
if (std::isnan(cut_value)) {
|
||||
return candidate.split.DefaultLeft();
|
||||
|
||||
@ -563,7 +563,7 @@ struct GPUHistMakerDevice {
|
||||
// when processing a large batch
|
||||
this->AllReduceHist(hist_nidx.at(0), reducer, hist_nidx.size());
|
||||
|
||||
for (int i = 0; i < subtraction_nidx.size(); i++) {
|
||||
for (size_t i = 0; i < subtraction_nidx.size(); i++) {
|
||||
auto build_hist_nidx = hist_nidx.at(i);
|
||||
auto subtraction_trick_nidx = subtraction_nidx.at(i);
|
||||
auto parent_nidx = candidates.at(i).nid;
|
||||
|
||||
@ -257,7 +257,7 @@ bool QuantileHistMaker::Builder::UpdatePredictionCache(DMatrix const *data,
|
||||
}
|
||||
monitor_->Start(__func__);
|
||||
CHECK_EQ(out_preds.Size(), data->Info().num_row_);
|
||||
UpdatePredictionCacheImpl(ctx_, p_last_tree_, partitioner_, *evaluator_, param_, out_preds);
|
||||
UpdatePredictionCacheImpl(ctx_, p_last_tree_, partitioner_, *evaluator_, out_preds);
|
||||
monitor_->Stop(__func__);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -67,7 +67,7 @@ TEST(SegmentedUnique, Basic) {
|
||||
CHECK_EQ(n_uniques, 5);
|
||||
|
||||
std::vector<float> values_sol{0.1f, 0.2f, 0.3f, 0.62448811531066895f, 0.4f};
|
||||
for (auto i = 0 ; i < values_sol.size(); i ++) {
|
||||
for (size_t i = 0 ; i < values_sol.size(); i ++) {
|
||||
ASSERT_EQ(d_vals_out[i], values_sol[i]);
|
||||
}
|
||||
|
||||
@ -84,7 +84,7 @@ TEST(SegmentedUnique, Basic) {
|
||||
d_segs_out.data().get(), d_vals_out.data().get(),
|
||||
thrust::equal_to<float>{});
|
||||
ASSERT_EQ(n_uniques, values.size());
|
||||
for (auto i = 0 ; i < values.size(); i ++) {
|
||||
for (size_t i = 0 ; i < values.size(); i ++) {
|
||||
ASSERT_EQ(d_vals_out[i], values[i]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -315,10 +315,10 @@ TEST(Linalg, Popc) {
|
||||
TEST(Linalg, Stack) {
|
||||
Tensor<float, 3> l{{2, 3, 4}, kCpuId};
|
||||
ElementWiseTransformHost(l.View(kCpuId), omp_get_max_threads(),
|
||||
[=](size_t i, float v) { return i; });
|
||||
[=](size_t i, float) { return i; });
|
||||
Tensor<float, 3> r_0{{2, 3, 4}, kCpuId};
|
||||
ElementWiseTransformHost(r_0.View(kCpuId), omp_get_max_threads(),
|
||||
[=](size_t i, float v) { return i; });
|
||||
[=](size_t i, float) { return i; });
|
||||
|
||||
Stack(&l, r_0);
|
||||
|
||||
|
||||
@ -50,8 +50,8 @@ TEST(PartitionBuilder, BasicTest) {
|
||||
right[i] = left_total + value_right++;
|
||||
}
|
||||
|
||||
builder.SetNLeftElems(nid, begin, end, n_left);
|
||||
builder.SetNRightElems(nid, begin, end, n_right);
|
||||
builder.SetNLeftElems(nid, begin, n_left);
|
||||
builder.SetNRightElems(nid, begin, n_right);
|
||||
}
|
||||
}
|
||||
builder.CalculateRowOffsets();
|
||||
|
||||
@ -77,7 +77,7 @@ void TestDistributedQuantile(size_t rows, size_t cols) {
|
||||
std::vector<float> hessian(rows, 1.0);
|
||||
auto hess = Span<float const>{hessian};
|
||||
|
||||
ContainerType<use_column> sketch_distributed(n_bins, m->Info(), column_size, false, hess,
|
||||
ContainerType<use_column> sketch_distributed(n_bins, m->Info(), column_size, false,
|
||||
OmpGetNumThreads(0));
|
||||
|
||||
if (use_column) {
|
||||
@ -98,7 +98,7 @@ void TestDistributedQuantile(size_t rows, size_t cols) {
|
||||
CHECK_EQ(rabit::GetWorldSize(), 1);
|
||||
std::for_each(column_size.begin(), column_size.end(), [=](auto& size) { size *= world; });
|
||||
m->Info().num_row_ = world * rows;
|
||||
ContainerType<use_column> sketch_on_single_node(n_bins, m->Info(), column_size, false, hess,
|
||||
ContainerType<use_column> sketch_on_single_node(n_bins, m->Info(), column_size, false,
|
||||
OmpGetNumThreads(0));
|
||||
m->Info().num_row_ = rows;
|
||||
|
||||
@ -190,7 +190,7 @@ TEST(Quantile, SameOnAllWorkers) {
|
||||
|
||||
constexpr size_t kRows = 1000, kCols = 100;
|
||||
RunWithSeedsAndBins(
|
||||
kRows, [=](int32_t seed, size_t n_bins, MetaInfo const &info) {
|
||||
kRows, [=](int32_t seed, size_t n_bins, MetaInfo const&) {
|
||||
auto rank = rabit::GetRank();
|
||||
HostDeviceVector<float> storage;
|
||||
std::vector<FeatureType> ft(kCols);
|
||||
|
||||
@ -36,7 +36,7 @@ struct TestTestStatus {
|
||||
XGBOOST_DEVICE void operator()() {
|
||||
this->operator()(0);
|
||||
}
|
||||
XGBOOST_DEVICE void operator()(int _idx) {
|
||||
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index
|
||||
SPAN_ASSERT_TRUE(false, status_);
|
||||
}
|
||||
};
|
||||
@ -49,7 +49,7 @@ struct TestAssignment {
|
||||
XGBOOST_DEVICE void operator()() {
|
||||
this->operator()(0);
|
||||
}
|
||||
XGBOOST_DEVICE void operator()(int _idx) {
|
||||
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index
|
||||
Span<float> s1;
|
||||
|
||||
float arr[] = {3, 4, 5};
|
||||
@ -71,7 +71,7 @@ struct TestBeginEnd {
|
||||
XGBOOST_DEVICE void operator()() {
|
||||
this->operator()(0);
|
||||
}
|
||||
XGBOOST_DEVICE void operator()(int _idx) {
|
||||
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index
|
||||
float arr[16];
|
||||
InitializeRange(arr, arr + 16);
|
||||
|
||||
@ -93,7 +93,7 @@ struct TestRBeginREnd {
|
||||
XGBOOST_DEVICE void operator()() {
|
||||
this->operator()(0);
|
||||
}
|
||||
XGBOOST_DEVICE void operator()(int _idx) {
|
||||
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index
|
||||
float arr[16];
|
||||
InitializeRange(arr, arr + 16);
|
||||
|
||||
@ -121,7 +121,7 @@ struct TestObservers {
|
||||
XGBOOST_DEVICE void operator()() {
|
||||
this->operator()(0);
|
||||
}
|
||||
XGBOOST_DEVICE void operator()(int _idx) {
|
||||
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index
|
||||
// empty
|
||||
{
|
||||
float *arr = nullptr;
|
||||
@ -148,7 +148,7 @@ struct TestCompare {
|
||||
XGBOOST_DEVICE void operator()() {
|
||||
this->operator()(0);
|
||||
}
|
||||
XGBOOST_DEVICE void operator()(int _idx) {
|
||||
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index
|
||||
float lhs_arr[16], rhs_arr[16];
|
||||
InitializeRange(lhs_arr, lhs_arr + 16);
|
||||
InitializeRange(rhs_arr, rhs_arr + 16);
|
||||
@ -178,7 +178,7 @@ struct TestIterConstruct {
|
||||
XGBOOST_DEVICE void operator()() {
|
||||
this->operator()(0);
|
||||
}
|
||||
XGBOOST_DEVICE void operator()(int _idx) {
|
||||
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index.
|
||||
Span<float>::iterator it1;
|
||||
Span<float>::iterator it2;
|
||||
SPAN_ASSERT_TRUE(it1 == it2, status_);
|
||||
@ -197,7 +197,7 @@ struct TestIterRef {
|
||||
XGBOOST_DEVICE void operator()() {
|
||||
this->operator()(0);
|
||||
}
|
||||
XGBOOST_DEVICE void operator()(int _idx) {
|
||||
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index
|
||||
float arr[16];
|
||||
InitializeRange(arr, arr + 16);
|
||||
|
||||
@ -215,7 +215,7 @@ struct TestIterCalculate {
|
||||
XGBOOST_DEVICE void operator()() {
|
||||
this->operator()(0);
|
||||
}
|
||||
XGBOOST_DEVICE void operator()(int _idx) {
|
||||
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index
|
||||
float arr[16];
|
||||
InitializeRange(arr, arr + 16);
|
||||
|
||||
@ -278,7 +278,7 @@ struct TestAsBytes {
|
||||
XGBOOST_DEVICE void operator()() {
|
||||
this->operator()(0);
|
||||
}
|
||||
XGBOOST_DEVICE void operator()(int _idx) {
|
||||
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index
|
||||
float arr[16];
|
||||
InitializeRange(arr, arr + 16);
|
||||
|
||||
@ -313,7 +313,7 @@ struct TestAsWritableBytes {
|
||||
XGBOOST_DEVICE void operator()() {
|
||||
this->operator()(0);
|
||||
}
|
||||
XGBOOST_DEVICE void operator()(int _idx) {
|
||||
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index
|
||||
float arr[16];
|
||||
InitializeRange(arr, arr + 16);
|
||||
|
||||
|
||||
@ -34,9 +34,8 @@ TEST(ParallelFor2d, Test) {
|
||||
|
||||
// working space is matrix of size (kDim1 x kDim2)
|
||||
std::vector<int> matrix(kDim1 * kDim2, 0);
|
||||
BlockedSpace2d space(kDim1, [&](size_t i) {
|
||||
return kDim2;
|
||||
}, kGrainSize);
|
||||
BlockedSpace2d space(
|
||||
kDim1, [&](size_t) { return kDim2; }, kGrainSize);
|
||||
|
||||
auto old = omp_get_max_threads();
|
||||
omp_set_num_threads(4);
|
||||
|
||||
@ -167,7 +167,7 @@ double GetMultiMetricEval(xgboost::Metric* metric,
|
||||
info.weights_.HostVector() = weights;
|
||||
info.group_ptr_ = groups;
|
||||
|
||||
return metric->Eval(preds, info, false);
|
||||
return metric->Eval(preds, info);
|
||||
}
|
||||
|
||||
namespace xgboost {
|
||||
@ -653,8 +653,6 @@ class RMMAllocator {};
|
||||
|
||||
void DeleteRMMResource(RMMAllocator* r) {}
|
||||
|
||||
RMMAllocatorPtr SetUpRMMResourceForCppTests(int argc, char** argv) {
|
||||
return {nullptr, DeleteRMMResource};
|
||||
}
|
||||
RMMAllocatorPtr SetUpRMMResourceForCppTests(int, char**) { return {nullptr, DeleteRMMResource}; }
|
||||
#endif // !defined(XGBOOST_USE_RMM) || XGBOOST_USE_RMM != 1
|
||||
} // namespace xgboost
|
||||
|
||||
@ -29,9 +29,7 @@ int CudaArrayIterForTest::Next() {
|
||||
}
|
||||
|
||||
|
||||
std::shared_ptr<DMatrix> RandomDataGenerator::GenerateDeviceDMatrix(bool with_label,
|
||||
bool float_label,
|
||||
size_t classes) {
|
||||
std::shared_ptr<DMatrix> RandomDataGenerator::GenerateDeviceDMatrix() {
|
||||
CudaArrayIterForTest iter{this->sparsity_, this->rows_, this->cols_, 1};
|
||||
auto m = std::make_shared<data::IterativeDeviceDMatrix>(
|
||||
&iter, iter.Proxy(), Reset, Next, std::numeric_limits<float>::quiet_NaN(),
|
||||
|
||||
@ -296,9 +296,7 @@ class RandomDataGenerator {
|
||||
bool float_label = true,
|
||||
size_t classes = 1) const;
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
std::shared_ptr<DMatrix> GenerateDeviceDMatrix(bool with_label = false,
|
||||
bool float_label = true,
|
||||
size_t classes = 1);
|
||||
std::shared_ptr<DMatrix> GenerateDeviceDMatrix();
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
@ -22,10 +22,10 @@ TEST(Metric, DeclareUnifiedTest(BinaryAUC)) {
|
||||
// Invalid dataset
|
||||
MetaInfo info;
|
||||
info.labels = linalg::Tensor<float, 2>{{0.0f, 0.0f}, {2}, -1};
|
||||
float auc = metric->Eval({1, 1}, info, false);
|
||||
float auc = metric->Eval({1, 1}, info);
|
||||
ASSERT_TRUE(std::isnan(auc));
|
||||
*info.labels.Data() = HostDeviceVector<float>{};
|
||||
auc = metric->Eval(HostDeviceVector<float>{}, info, false);
|
||||
auc = metric->Eval(HostDeviceVector<float>{}, info);
|
||||
ASSERT_TRUE(std::isnan(auc));
|
||||
|
||||
EXPECT_NEAR(GetMetricEval(metric, {0, 1, 0, 1}, {0, 1, 0, 1}), 1.0f, 1e-10);
|
||||
|
||||
@ -36,9 +36,9 @@ inline void CheckDeterministicMetricElementWise(StringView name, int32_t device)
|
||||
h_labels[i] = dist(&lcg);
|
||||
}
|
||||
|
||||
auto result = metric->Eval(predts, info, false);
|
||||
auto result = metric->Eval(predts, info);
|
||||
for (size_t i = 0; i < 8; ++i) {
|
||||
ASSERT_EQ(metric->Eval(predts, info, false), result);
|
||||
ASSERT_EQ(metric->Eval(predts, info), result);
|
||||
}
|
||||
}
|
||||
} // anonymous namespace
|
||||
|
||||
@ -35,9 +35,9 @@ inline void CheckDeterministicMetricMultiClass(StringView name, int32_t device)
|
||||
}
|
||||
}
|
||||
|
||||
auto result = metric->Eval(predts, info, false);
|
||||
auto result = metric->Eval(predts, info);
|
||||
for (size_t i = 0; i < 8; ++i) {
|
||||
ASSERT_EQ(metric->Eval(predts, info, false), result);
|
||||
ASSERT_EQ(metric->Eval(predts, info), result);
|
||||
}
|
||||
}
|
||||
} // namespace xgboost
|
||||
|
||||
@ -40,9 +40,9 @@ inline void CheckDeterministicMetricElementWise(StringView name, int32_t device)
|
||||
h_upper[i] = 10;
|
||||
}
|
||||
|
||||
auto result = metric->Eval(predts, info, false);
|
||||
auto result = metric->Eval(predts, info);
|
||||
for (size_t i = 0; i < 8; ++i) {
|
||||
ASSERT_EQ(metric->Eval(predts, info, false), result);
|
||||
ASSERT_EQ(metric->Eval(predts, info), result);
|
||||
}
|
||||
}
|
||||
} // anonymous namespace
|
||||
@ -72,7 +72,7 @@ TEST(Metric, DeclareUnifiedTest(AFTNegLogLik)) {
|
||||
std::unique_ptr<Metric> metric(Metric::Create("aft-nloglik", &lparam));
|
||||
metric->Configure({ {"aft_loss_distribution", test_case.dist_type},
|
||||
{"aft_loss_distribution_scale", "1.0"} });
|
||||
EXPECT_NEAR(metric->Eval(preds, info, false), test_case.reference_value, 1e-4);
|
||||
EXPECT_NEAR(metric->Eval(preds, info), test_case.reference_value, 1e-4);
|
||||
}
|
||||
}
|
||||
|
||||
@ -87,15 +87,15 @@ TEST(Metric, DeclareUnifiedTest(IntervalRegressionAccuracy)) {
|
||||
HostDeviceVector<bst_float> preds(4, std::log(60.0f));
|
||||
|
||||
std::unique_ptr<Metric> metric(Metric::Create("interval-regression-accuracy", &lparam));
|
||||
EXPECT_FLOAT_EQ(metric->Eval(preds, info, false), 0.75f);
|
||||
EXPECT_FLOAT_EQ(metric->Eval(preds, info), 0.75f);
|
||||
info.labels_lower_bound_.HostVector()[2] = 70.0f;
|
||||
EXPECT_FLOAT_EQ(metric->Eval(preds, info, false), 0.50f);
|
||||
EXPECT_FLOAT_EQ(metric->Eval(preds, info), 0.50f);
|
||||
info.labels_upper_bound_.HostVector()[2] = std::numeric_limits<bst_float>::infinity();
|
||||
EXPECT_FLOAT_EQ(metric->Eval(preds, info, false), 0.50f);
|
||||
EXPECT_FLOAT_EQ(metric->Eval(preds, info), 0.50f);
|
||||
info.labels_upper_bound_.HostVector()[3] = std::numeric_limits<bst_float>::infinity();
|
||||
EXPECT_FLOAT_EQ(metric->Eval(preds, info, false), 0.50f);
|
||||
EXPECT_FLOAT_EQ(metric->Eval(preds, info), 0.50f);
|
||||
info.labels_lower_bound_.HostVector()[0] = 70.0f;
|
||||
EXPECT_FLOAT_EQ(metric->Eval(preds, info, false), 0.25f);
|
||||
EXPECT_FLOAT_EQ(metric->Eval(preds, info), 0.25f);
|
||||
|
||||
CheckDeterministicMetricElementWise(StringView{"interval-regression-accuracy"}, GPUIDX);
|
||||
}
|
||||
|
||||
@ -170,7 +170,7 @@ TEST(Objective, NDCGLambdaWeightComputerTest) {
|
||||
EXPECT_EQ(hgroup_dcgs.size(), segment_label_sorter->GetNumGroups());
|
||||
std::vector<float> hsorted_labels(segment_label_sorter->GetNumItems());
|
||||
dh::CopyDeviceSpanToVector(&hsorted_labels, segment_label_sorter->GetItemsSpan());
|
||||
for (auto i = 0; i < hgroup_dcgs.size(); ++i) {
|
||||
for (size_t i = 0; i < hgroup_dcgs.size(); ++i) {
|
||||
// Compute group DCG value on CPU and compare
|
||||
auto gbegin = hgroups[i];
|
||||
auto gend = hgroups[i + 1];
|
||||
@ -244,7 +244,7 @@ TEST(Objective, ComputeAndCompareMAPStatsTest) {
|
||||
std::vector<uint32_t> hgroups(segment_label_sorter->GetNumGroups() + 1);
|
||||
dh::CopyDeviceSpanToVector(&hgroups, segment_label_sorter->GetGroupsSpan());
|
||||
|
||||
for (auto i = 0; i < hgroups.size() - 1; ++i) {
|
||||
for (size_t i = 0; i < hgroups.size() - 1; ++i) {
|
||||
auto gbegin = hgroups[i];
|
||||
auto gend = hgroups[i + 1];
|
||||
std::vector<xgboost::obj::ListEntry> lst_entry;
|
||||
|
||||
@ -66,10 +66,7 @@ TEST(GPUPredictor, EllpackBasic) {
|
||||
size_t constexpr kCols {8};
|
||||
for (size_t bins = 2; bins < 258; bins += 16) {
|
||||
size_t rows = bins * 16;
|
||||
auto p_m = RandomDataGenerator{rows, kCols, 0.0}
|
||||
.Bins(bins)
|
||||
.Device(0)
|
||||
.GenerateDeviceDMatrix(true);
|
||||
auto p_m = RandomDataGenerator{rows, kCols, 0.0}.Bins(bins).Device(0).GenerateDeviceDMatrix();
|
||||
ASSERT_FALSE(p_m->PageExists<SparsePage>());
|
||||
TestPredictionFromGradientIndex<EllpackPage>("gpu_predictor", rows, kCols, p_m);
|
||||
TestPredictionFromGradientIndex<EllpackPage>("gpu_predictor", bins, kCols, p_m);
|
||||
@ -78,10 +75,8 @@ TEST(GPUPredictor, EllpackBasic) {
|
||||
|
||||
TEST(GPUPredictor, EllpackTraining) {
|
||||
size_t constexpr kRows { 128 }, kCols { 16 }, kBins { 64 };
|
||||
auto p_ellpack = RandomDataGenerator{kRows, kCols, 0.0}
|
||||
.Bins(kBins)
|
||||
.Device(0)
|
||||
.GenerateDeviceDMatrix(true);
|
||||
auto p_ellpack =
|
||||
RandomDataGenerator{kRows, kCols, 0.0}.Bins(kBins).Device(0).GenerateDeviceDMatrix();
|
||||
HostDeviceVector<float> storage(kRows * kCols);
|
||||
auto columnar = RandomDataGenerator{kRows, kCols, 0.0}
|
||||
.Device(0)
|
||||
|
||||
@ -94,8 +94,8 @@ TEST(GPUFeatureInteractionConstraint, Init) {
|
||||
tree::TrainParam param = GetParameter();
|
||||
param.interaction_constraints = R"([[0, 1, 3], [3, 5, 6]])";
|
||||
FConstraintWrapper constraints(param, kFeatures);
|
||||
std::vector<int32_t> h_sets {0, 0, 0, 1, 1, 1};
|
||||
std::vector<int32_t> h_sets_ptr {0, 1, 2, 2, 4, 4, 5, 6};
|
||||
std::vector<bst_feature_t> h_sets {0, 0, 0, 1, 1, 1};
|
||||
std::vector<size_t> h_sets_ptr {0, 1, 2, 2, 4, 4, 5, 6};
|
||||
auto d_sets = constraints.GetDSets();
|
||||
ASSERT_EQ(h_sets.size(), d_sets.size());
|
||||
auto d_sets_ptr = constraints.GetDSetsPtr();
|
||||
|
||||
@ -346,7 +346,7 @@ TEST(GpuHist, UniformSampling) {
|
||||
// Make sure the predictions are the same.
|
||||
auto preds_h = preds.ConstHostVector();
|
||||
auto preds_sampling_h = preds_sampling.ConstHostVector();
|
||||
for (int i = 0; i < kRows; i++) {
|
||||
for (size_t i = 0; i < kRows; i++) {
|
||||
EXPECT_NEAR(preds_h[i], preds_sampling_h[i], 1e-8);
|
||||
}
|
||||
}
|
||||
@ -376,7 +376,7 @@ TEST(GpuHist, GradientBasedSampling) {
|
||||
// Make sure the predictions are the same.
|
||||
auto preds_h = preds.ConstHostVector();
|
||||
auto preds_sampling_h = preds_sampling.ConstHostVector();
|
||||
for (int i = 0; i < kRows; i++) {
|
||||
for (size_t i = 0; i < kRows; i++) {
|
||||
EXPECT_NEAR(preds_h[i], preds_sampling_h[i], 1e-3);
|
||||
}
|
||||
}
|
||||
@ -409,7 +409,7 @@ TEST(GpuHist, ExternalMemory) {
|
||||
// Make sure the predictions are the same.
|
||||
auto preds_h = preds.ConstHostVector();
|
||||
auto preds_ext_h = preds_ext.ConstHostVector();
|
||||
for (int i = 0; i < kRows; i++) {
|
||||
for (size_t i = 0; i < kRows; i++) {
|
||||
EXPECT_NEAR(preds_h[i], preds_ext_h[i], 1e-6);
|
||||
}
|
||||
}
|
||||
@ -451,7 +451,7 @@ TEST(GpuHist, ExternalMemoryWithSampling) {
|
||||
// Make sure the predictions are the same.
|
||||
auto preds_h = preds.ConstHostVector();
|
||||
auto preds_ext_h = preds_ext.ConstHostVector();
|
||||
for (int i = 0; i < kRows; i++) {
|
||||
for (size_t i = 0; i < kRows; i++) {
|
||||
ASSERT_NEAR(preds_h[i], preds_ext_h[i], 1e-3);
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user