Fix compiler warnings. (#7974)
- Remove unused parameters. There are still many warnings that are not yet addressed. Currently, the warnings in dmlc-core dominate the error log. - Remove `distributed` parameter from metric. - Fixes some warnings about signed comparison.
This commit is contained in:
parent
d48123d23b
commit
1a33b50a0d
@ -144,6 +144,15 @@ function(xgboost_set_cuda_flags target)
|
|||||||
set_property(TARGET ${target} PROPERTY CUDA_ARCHITECTURES ${CMAKE_CUDA_ARCHITECTURES})
|
set_property(TARGET ${target} PROPERTY CUDA_ARCHITECTURES ${CMAKE_CUDA_ARCHITECTURES})
|
||||||
endif (CMAKE_VERSION VERSION_GREATER_EQUAL "3.18")
|
endif (CMAKE_VERSION VERSION_GREATER_EQUAL "3.18")
|
||||||
|
|
||||||
|
if (FORCE_COLORED_OUTPUT)
|
||||||
|
if (FORCE_COLORED_OUTPUT AND (CMAKE_GENERATOR STREQUAL "Ninja") AND
|
||||||
|
((CMAKE_CXX_COMPILER_ID STREQUAL "GNU") OR
|
||||||
|
(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")))
|
||||||
|
target_compile_options(${target} PRIVATE
|
||||||
|
$<$<COMPILE_LANGUAGE:CUDA>:-Xcompiler=-fdiagnostics-color=always>)
|
||||||
|
endif()
|
||||||
|
endif (FORCE_COLORED_OUTPUT)
|
||||||
|
|
||||||
if (USE_DEVICE_DEBUG)
|
if (USE_DEVICE_DEBUG)
|
||||||
target_compile_options(${target} PRIVATE
|
target_compile_options(${target} PRIVATE
|
||||||
$<$<AND:$<CONFIG:DEBUG>,$<COMPILE_LANGUAGE:CUDA>>:-G;-src-in-ptx>)
|
$<$<AND:$<CONFIG:DEBUG>,$<COMPILE_LANGUAGE:CUDA>>:-G;-src-in-ptx>)
|
||||||
|
|||||||
@ -68,8 +68,8 @@ class GradientBooster : public Model, public Configurable {
|
|||||||
* \param layer_end End of booster layer. 0 means do not limit trees.
|
* \param layer_end End of booster layer. 0 means do not limit trees.
|
||||||
* \param out Output gradient booster
|
* \param out Output gradient booster
|
||||||
*/
|
*/
|
||||||
virtual void Slice(int32_t layer_begin, int32_t layer_end, int32_t step,
|
virtual void Slice(int32_t /*layer_begin*/, int32_t /*layer_end*/, int32_t /*step*/,
|
||||||
GradientBooster *out, bool* out_of_bound) const {
|
GradientBooster* /*out*/, bool* /*out_of_bound*/) const {
|
||||||
LOG(FATAL) << "Slice is not supported by current booster.";
|
LOG(FATAL) << "Slice is not supported by current booster.";
|
||||||
}
|
}
|
||||||
/*!
|
/*!
|
||||||
|
|||||||
@ -89,7 +89,7 @@ class JsonReader {
|
|||||||
} else if (got == 0) {
|
} else if (got == 0) {
|
||||||
msg += "\\0\"";
|
msg += "\\0\"";
|
||||||
} else {
|
} else {
|
||||||
msg += (got <= 127 ? std::string{got} : std::to_string(got)) + " \""; // NOLINT
|
msg += (got <= static_cast<char>(127) ? std::string{got} : std::to_string(got)) + " \"";
|
||||||
}
|
}
|
||||||
Error(msg);
|
Error(msg);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -317,7 +317,8 @@ class TensorView {
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <size_t old_dim, size_t new_dim, int32_t D, typename Index>
|
template <size_t old_dim, size_t new_dim, int32_t D, typename Index>
|
||||||
LINALG_HD size_t MakeSliceDim(size_t new_shape[D], size_t new_stride[D], Index i) const {
|
LINALG_HD size_t MakeSliceDim(DMLC_ATTRIBUTE_UNUSED size_t new_shape[D],
|
||||||
|
DMLC_ATTRIBUTE_UNUSED size_t new_stride[D], Index i) const {
|
||||||
static_assert(old_dim < kDim, "");
|
static_assert(old_dim < kDim, "");
|
||||||
return stride_[old_dim] * i;
|
return stride_[old_dim] * i;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -57,12 +57,8 @@ class Metric : public Configurable {
|
|||||||
* \brief evaluate a specific metric
|
* \brief evaluate a specific metric
|
||||||
* \param preds prediction
|
* \param preds prediction
|
||||||
* \param info information, including label etc.
|
* \param info information, including label etc.
|
||||||
* \param distributed whether a call to Allreduce is needed to gather
|
|
||||||
* the average statistics across all the node,
|
|
||||||
* this is only supported by some metrics
|
|
||||||
*/
|
*/
|
||||||
virtual double Eval(const HostDeviceVector<bst_float> &preds,
|
virtual double Eval(const HostDeviceVector<bst_float>& preds, const MetaInfo& info) = 0;
|
||||||
const MetaInfo &info, bool distributed) = 0;
|
|
||||||
/*! \return name of metric */
|
/*! \return name of metric */
|
||||||
virtual const char* Name() const = 0;
|
virtual const char* Name() const = 0;
|
||||||
/*! \brief virtual destructor */
|
/*! \brief virtual destructor */
|
||||||
|
|||||||
@ -103,8 +103,10 @@ class ObjFunction : public Configurable {
|
|||||||
* \param prediction Model prediction after transformation.
|
* \param prediction Model prediction after transformation.
|
||||||
* \param p_tree Tree that needs to be updated.
|
* \param p_tree Tree that needs to be updated.
|
||||||
*/
|
*/
|
||||||
virtual void UpdateTreeLeaf(HostDeviceVector<bst_node_t> const& position, MetaInfo const& info,
|
virtual void UpdateTreeLeaf(HostDeviceVector<bst_node_t> const& /*position*/,
|
||||||
HostDeviceVector<float> const& prediction, RegTree* p_tree) const {}
|
MetaInfo const& /*info*/,
|
||||||
|
HostDeviceVector<float> const& /*prediction*/,
|
||||||
|
RegTree* /*p_tree*/) const {}
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
* \brief Create an objective function according to name.
|
* \brief Create an objective function according to name.
|
||||||
|
|||||||
@ -171,14 +171,14 @@ inline HistogramCuts SketchOnDMatrix(DMatrix* m, int32_t max_bins, int32_t n_thr
|
|||||||
|
|
||||||
if (!use_sorted) {
|
if (!use_sorted) {
|
||||||
HostSketchContainer container(max_bins, m->Info(), reduced, HostSketchContainer::UseGroup(info),
|
HostSketchContainer container(max_bins, m->Info(), reduced, HostSketchContainer::UseGroup(info),
|
||||||
hessian, n_threads);
|
n_threads);
|
||||||
for (auto const& page : m->GetBatches<SparsePage>()) {
|
for (auto const& page : m->GetBatches<SparsePage>()) {
|
||||||
container.PushRowPage(page, info, hessian);
|
container.PushRowPage(page, info, hessian);
|
||||||
}
|
}
|
||||||
container.MakeCuts(&out);
|
container.MakeCuts(&out);
|
||||||
} else {
|
} else {
|
||||||
SortedSketchContainer container{
|
SortedSketchContainer container{max_bins, m->Info(), reduced,
|
||||||
max_bins, m->Info(), reduced, HostSketchContainer::UseGroup(info), hessian, n_threads};
|
HostSketchContainer::UseGroup(info), n_threads};
|
||||||
for (auto const& page : m->GetBatches<SortedCSCPage>()) {
|
for (auto const& page : m->GetBatches<SortedCSCPage>()) {
|
||||||
container.PushColPage(page, info, hessian);
|
container.PushColPage(page, info, hessian);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -168,8 +168,8 @@ class PartitionBuilder {
|
|||||||
const size_t n_left = child_nodes_sizes.first;
|
const size_t n_left = child_nodes_sizes.first;
|
||||||
const size_t n_right = child_nodes_sizes.second;
|
const size_t n_right = child_nodes_sizes.second;
|
||||||
|
|
||||||
SetNLeftElems(node_in_set, range.begin(), range.end(), n_left);
|
SetNLeftElems(node_in_set, range.begin(), n_left);
|
||||||
SetNRightElems(node_in_set, range.begin(), range.end(), n_right);
|
SetNRightElems(node_in_set, range.begin(), n_right);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -188,8 +188,7 @@ class PartitionBuilder {
|
|||||||
*/
|
*/
|
||||||
template <typename Pred>
|
template <typename Pred>
|
||||||
void PartitionRange(const size_t node_in_set, const size_t nid, common::Range1d range,
|
void PartitionRange(const size_t node_in_set, const size_t nid, common::Range1d range,
|
||||||
bst_feature_t fidx, common::RowSetCollection* p_row_set_collection,
|
common::RowSetCollection* p_row_set_collection, Pred pred) {
|
||||||
Pred pred) {
|
|
||||||
auto& row_set_collection = *p_row_set_collection;
|
auto& row_set_collection = *p_row_set_collection;
|
||||||
const size_t* p_ridx = row_set_collection[nid].begin;
|
const size_t* p_ridx = row_set_collection[nid].begin;
|
||||||
common::Span<const size_t> ridx(p_ridx + range.begin(), p_ridx + range.end());
|
common::Span<const size_t> ridx(p_ridx + range.begin(), p_ridx + range.end());
|
||||||
@ -200,8 +199,8 @@ class PartitionBuilder {
|
|||||||
const size_t n_left = child_nodes_sizes.first;
|
const size_t n_left = child_nodes_sizes.first;
|
||||||
const size_t n_right = child_nodes_sizes.second;
|
const size_t n_right = child_nodes_sizes.second;
|
||||||
|
|
||||||
this->SetNLeftElems(node_in_set, range.begin(), range.end(), n_left);
|
this->SetNLeftElems(node_in_set, range.begin(), n_left);
|
||||||
this->SetNRightElems(node_in_set, range.begin(), range.end(), n_right);
|
this->SetNRightElems(node_in_set, range.begin(), n_right);
|
||||||
}
|
}
|
||||||
|
|
||||||
// allocate thread local memory, should be called for each specific task
|
// allocate thread local memory, should be called for each specific task
|
||||||
@ -223,12 +222,12 @@ class PartitionBuilder {
|
|||||||
return { mem_blocks_.at(task_idx)->Right(), end - begin };
|
return { mem_blocks_.at(task_idx)->Right(), end - begin };
|
||||||
}
|
}
|
||||||
|
|
||||||
void SetNLeftElems(int nid, size_t begin, size_t end, size_t n_left) {
|
void SetNLeftElems(int nid, size_t begin, size_t n_left) {
|
||||||
size_t task_idx = GetTaskIdx(nid, begin);
|
size_t task_idx = GetTaskIdx(nid, begin);
|
||||||
mem_blocks_.at(task_idx)->n_left = n_left;
|
mem_blocks_.at(task_idx)->n_left = n_left;
|
||||||
}
|
}
|
||||||
|
|
||||||
void SetNRightElems(int nid, size_t begin, size_t end, size_t n_right) {
|
void SetNRightElems(int nid, size_t begin, size_t n_right) {
|
||||||
size_t task_idx = GetTaskIdx(nid, begin);
|
size_t task_idx = GetTaskIdx(nid, begin);
|
||||||
mem_blocks_.at(task_idx)->n_right = n_right;
|
mem_blocks_.at(task_idx)->n_right = n_right;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -543,7 +543,7 @@ template class SketchContainerImpl<WXQuantileSketch<float, float>>;
|
|||||||
|
|
||||||
HostSketchContainer::HostSketchContainer(int32_t max_bins, MetaInfo const &info,
|
HostSketchContainer::HostSketchContainer(int32_t max_bins, MetaInfo const &info,
|
||||||
std::vector<size_t> columns_size, bool use_group,
|
std::vector<size_t> columns_size, bool use_group,
|
||||||
Span<float const> hessian, int32_t n_threads)
|
int32_t n_threads)
|
||||||
: SketchContainerImpl{columns_size, max_bins, info.feature_types.ConstHostSpan(), use_group,
|
: SketchContainerImpl{columns_size, max_bins, info.feature_types.ConstHostSpan(), use_group,
|
||||||
n_threads} {
|
n_threads} {
|
||||||
monitor_.Init(__func__);
|
monitor_.Init(__func__);
|
||||||
|
|||||||
@ -774,7 +774,7 @@ class HostSketchContainer : public SketchContainerImpl<WQuantileSketch<float, fl
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
HostSketchContainer(int32_t max_bins, MetaInfo const &info, std::vector<size_t> columns_size,
|
HostSketchContainer(int32_t max_bins, MetaInfo const &info, std::vector<size_t> columns_size,
|
||||||
bool use_group, Span<float const> hessian, int32_t n_threads);
|
bool use_group, int32_t n_threads);
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -868,7 +868,7 @@ class SortedSketchContainer : public SketchContainerImpl<WXQuantileSketch<float,
|
|||||||
public:
|
public:
|
||||||
explicit SortedSketchContainer(int32_t max_bins, MetaInfo const &info,
|
explicit SortedSketchContainer(int32_t max_bins, MetaInfo const &info,
|
||||||
std::vector<size_t> columns_size, bool use_group,
|
std::vector<size_t> columns_size, bool use_group,
|
||||||
Span<float const> hessian, int32_t n_threads)
|
int32_t n_threads)
|
||||||
: SketchContainerImpl{columns_size, max_bins, info.feature_types.ConstHostSpan(), use_group,
|
: SketchContainerImpl{columns_size, max_bins, info.feature_types.ConstHostSpan(), use_group,
|
||||||
n_threads} {
|
n_threads} {
|
||||||
monitor_.Init(__func__);
|
monitor_.Init(__func__);
|
||||||
|
|||||||
@ -163,8 +163,7 @@ void IterativeDeviceDMatrix::Initialize(DataIterHandle iter_handle, float missin
|
|||||||
|
|
||||||
BatchSet<EllpackPage> IterativeDeviceDMatrix::GetEllpackBatches(const BatchParam& param) {
|
BatchSet<EllpackPage> IterativeDeviceDMatrix::GetEllpackBatches(const BatchParam& param) {
|
||||||
CHECK(page_);
|
CHECK(page_);
|
||||||
auto begin_iter =
|
auto begin_iter = BatchIterator<EllpackPage>(new SimpleBatchIteratorImpl<EllpackPage>(page_));
|
||||||
BatchIterator<EllpackPage>(new SimpleBatchIteratorImpl<EllpackPage>(page_));
|
|
||||||
return BatchSet<EllpackPage>(begin_iter);
|
return BatchSet<EllpackPage>(begin_iter);
|
||||||
}
|
}
|
||||||
} // namespace data
|
} // namespace data
|
||||||
|
|||||||
@ -45,8 +45,8 @@ class IterativeDeviceDMatrix : public DMatrix {
|
|||||||
|
|
||||||
bool EllpackExists() const override { return true; }
|
bool EllpackExists() const override { return true; }
|
||||||
bool SparsePageExists() const override { return false; }
|
bool SparsePageExists() const override { return false; }
|
||||||
DMatrix *Slice(common::Span<int32_t const> ridxs) override {
|
DMatrix *Slice(common::Span<int32_t const>) override {
|
||||||
LOG(FATAL) << "Slicing DMatrix is not supported for Device DMatrix.";
|
LOG(FATAL) << "Slicing DMatrix is not supported for Quantile DMatrix.";
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
BatchSet<SparsePage> GetRowBatches() override {
|
BatchSet<SparsePage> GetRowBatches() override {
|
||||||
|
|||||||
@ -84,7 +84,7 @@ class DMatrixProxy : public DMatrix {
|
|||||||
bool SingleColBlock() const override { return true; }
|
bool SingleColBlock() const override { return true; }
|
||||||
bool EllpackExists() const override { return true; }
|
bool EllpackExists() const override { return true; }
|
||||||
bool SparsePageExists() const override { return false; }
|
bool SparsePageExists() const override { return false; }
|
||||||
DMatrix *Slice(common::Span<int32_t const> ridxs) override {
|
DMatrix* Slice(common::Span<int32_t const> /*ridxs*/) override {
|
||||||
LOG(FATAL) << "Slicing DMatrix is not supported for Proxy DMatrix.";
|
LOG(FATAL) << "Slicing DMatrix is not supported for Proxy DMatrix.";
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
@ -100,7 +100,7 @@ class DMatrixProxy : public DMatrix {
|
|||||||
LOG(FATAL) << "Not implemented.";
|
LOG(FATAL) << "Not implemented.";
|
||||||
return BatchSet<SortedCSCPage>(BatchIterator<SortedCSCPage>(nullptr));
|
return BatchSet<SortedCSCPage>(BatchIterator<SortedCSCPage>(nullptr));
|
||||||
}
|
}
|
||||||
BatchSet<EllpackPage> GetEllpackBatches(const BatchParam& param) override {
|
BatchSet<EllpackPage> GetEllpackBatches(const BatchParam&) override {
|
||||||
LOG(FATAL) << "Not implemented.";
|
LOG(FATAL) << "Not implemented.";
|
||||||
return BatchSet<EllpackPage>(BatchIterator<EllpackPage>(nullptr));
|
return BatchSet<EllpackPage>(BatchIterator<EllpackPage>(nullptr));
|
||||||
}
|
}
|
||||||
|
|||||||
@ -218,7 +218,7 @@ void CopyGradient(HostDeviceVector<GradientPair> const* in_gpair, int32_t n_thre
|
|||||||
}
|
}
|
||||||
|
|
||||||
void GBTree::UpdateTreeLeaf(DMatrix const* p_fmat, HostDeviceVector<float> const& predictions,
|
void GBTree::UpdateTreeLeaf(DMatrix const* p_fmat, HostDeviceVector<float> const& predictions,
|
||||||
ObjFunction const* obj, size_t gidx,
|
ObjFunction const* obj,
|
||||||
std::vector<std::unique_ptr<RegTree>>* p_trees) {
|
std::vector<std::unique_ptr<RegTree>>* p_trees) {
|
||||||
CHECK(!updaters_.empty());
|
CHECK(!updaters_.empty());
|
||||||
if (!updaters_.back()->HasNodePosition()) {
|
if (!updaters_.back()->HasNodePosition()) {
|
||||||
@ -257,7 +257,7 @@ void GBTree::DoBoost(DMatrix* p_fmat, HostDeviceVector<GradientPair>* in_gpair,
|
|||||||
if (ngroup == 1) {
|
if (ngroup == 1) {
|
||||||
std::vector<std::unique_ptr<RegTree>> ret;
|
std::vector<std::unique_ptr<RegTree>> ret;
|
||||||
BoostNewTrees(in_gpair, p_fmat, 0, &ret);
|
BoostNewTrees(in_gpair, p_fmat, 0, &ret);
|
||||||
UpdateTreeLeaf(p_fmat, predt->predictions, obj, 0, &ret);
|
UpdateTreeLeaf(p_fmat, predt->predictions, obj, &ret);
|
||||||
const size_t num_new_trees = ret.size();
|
const size_t num_new_trees = ret.size();
|
||||||
new_trees.push_back(std::move(ret));
|
new_trees.push_back(std::move(ret));
|
||||||
auto v_predt = out.Slice(linalg::All(), 0);
|
auto v_predt = out.Slice(linalg::All(), 0);
|
||||||
@ -274,7 +274,7 @@ void GBTree::DoBoost(DMatrix* p_fmat, HostDeviceVector<GradientPair>* in_gpair,
|
|||||||
CopyGradient(in_gpair, ctx_->Threads(), ngroup, gid, &tmp);
|
CopyGradient(in_gpair, ctx_->Threads(), ngroup, gid, &tmp);
|
||||||
std::vector<std::unique_ptr<RegTree>> ret;
|
std::vector<std::unique_ptr<RegTree>> ret;
|
||||||
BoostNewTrees(&tmp, p_fmat, gid, &ret);
|
BoostNewTrees(&tmp, p_fmat, gid, &ret);
|
||||||
UpdateTreeLeaf(p_fmat, predt->predictions, obj, gid, &ret);
|
UpdateTreeLeaf(p_fmat, predt->predictions, obj, &ret);
|
||||||
const size_t num_new_trees = ret.size();
|
const size_t num_new_trees = ret.size();
|
||||||
new_trees.push_back(std::move(ret));
|
new_trees.push_back(std::move(ret));
|
||||||
auto v_predt = out.Slice(linalg::All(), gid);
|
auto v_predt = out.Slice(linalg::All(), gid);
|
||||||
@ -289,7 +289,7 @@ void GBTree::DoBoost(DMatrix* p_fmat, HostDeviceVector<GradientPair>* in_gpair,
|
|||||||
}
|
}
|
||||||
|
|
||||||
monitor_.Stop("BoostNewTrees");
|
monitor_.Stop("BoostNewTrees");
|
||||||
this->CommitModel(std::move(new_trees), p_fmat, predt);
|
this->CommitModel(std::move(new_trees));
|
||||||
}
|
}
|
||||||
|
|
||||||
void GBTree::InitUpdater(Args const& cfg) {
|
void GBTree::InitUpdater(Args const& cfg) {
|
||||||
@ -378,9 +378,7 @@ void GBTree::BoostNewTrees(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fma
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void GBTree::CommitModel(std::vector<std::vector<std::unique_ptr<RegTree>>>&& new_trees,
|
void GBTree::CommitModel(std::vector<std::vector<std::unique_ptr<RegTree>>>&& new_trees) {
|
||||||
DMatrix* m,
|
|
||||||
PredictionCacheEntry* predts) {
|
|
||||||
monitor_.Start("CommitModel");
|
monitor_.Start("CommitModel");
|
||||||
for (uint32_t gid = 0; gid < model_.learner_model_param->num_output_group; ++gid) {
|
for (uint32_t gid = 0; gid < model_.learner_model_param->num_output_group; ++gid) {
|
||||||
model_.CommitModel(std::move(new_trees[gid]), gid);
|
model_.CommitModel(std::move(new_trees[gid]), gid);
|
||||||
@ -490,15 +488,14 @@ void GBTree::Slice(int32_t layer_begin, int32_t layer_end, int32_t step,
|
|||||||
"want to update a portion of trees.";
|
"want to update a portion of trees.";
|
||||||
}
|
}
|
||||||
|
|
||||||
*out_of_bound = detail::SliceTrees(
|
*out_of_bound = detail::SliceTrees(layer_begin, layer_end, step, this->model_, layer_trees,
|
||||||
layer_begin, layer_end, step, this->model_, tparam_, layer_trees,
|
[&](auto const& in_it, auto const& out_it) {
|
||||||
[&](auto const &in_it, auto const &out_it) {
|
auto new_tree =
|
||||||
auto new_tree =
|
std::make_unique<RegTree>(*this->model_.trees.at(in_it));
|
||||||
std::make_unique<RegTree>(*this->model_.trees.at(in_it));
|
bst_group_t group = this->model_.tree_info[in_it];
|
||||||
bst_group_t group = this->model_.tree_info[in_it];
|
out_trees.at(out_it) = std::move(new_tree);
|
||||||
out_trees.at(out_it) = std::move(new_tree);
|
out_trees_info.at(out_it) = group;
|
||||||
out_trees_info.at(out_it) = group;
|
});
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void GBTree::PredictBatch(DMatrix* p_fmat,
|
void GBTree::PredictBatch(DMatrix* p_fmat,
|
||||||
@ -674,11 +671,10 @@ class Dart : public GBTree {
|
|||||||
auto p_dart = dynamic_cast<Dart*>(out);
|
auto p_dart = dynamic_cast<Dart*>(out);
|
||||||
CHECK(p_dart);
|
CHECK(p_dart);
|
||||||
CHECK(p_dart->weight_drop_.empty());
|
CHECK(p_dart->weight_drop_.empty());
|
||||||
detail::SliceTrees(
|
detail::SliceTrees(layer_begin, layer_end, step, model_, this->LayerTrees(),
|
||||||
layer_begin, layer_end, step, model_, tparam_, this->LayerTrees(),
|
[&](auto const& in_it, auto const&) {
|
||||||
[&](auto const& in_it, auto const&) {
|
p_dart->weight_drop_.push_back(this->weight_drop_.at(in_it));
|
||||||
p_dart->weight_drop_.push_back(this->weight_drop_.at(in_it));
|
});
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void SaveModel(Json *p_out) const override {
|
void SaveModel(Json *p_out) const override {
|
||||||
@ -901,9 +897,7 @@ class Dart : public GBTree {
|
|||||||
|
|
||||||
protected:
|
protected:
|
||||||
// commit new trees all at once
|
// commit new trees all at once
|
||||||
void
|
void CommitModel(std::vector<std::vector<std::unique_ptr<RegTree>>>&& new_trees) override {
|
||||||
CommitModel(std::vector<std::vector<std::unique_ptr<RegTree>>>&& new_trees,
|
|
||||||
DMatrix*, PredictionCacheEntry*) override {
|
|
||||||
int num_new_trees = 0;
|
int num_new_trees = 0;
|
||||||
for (uint32_t gid = 0; gid < model_.learner_model_param->num_output_group; ++gid) {
|
for (uint32_t gid = 0; gid < model_.learner_model_param->num_output_group; ++gid) {
|
||||||
num_new_trees += new_trees[gid].size();
|
num_new_trees += new_trees[gid].size();
|
||||||
|
|||||||
@ -162,9 +162,8 @@ inline std::pair<uint32_t, uint32_t> LayerToTree(gbm::GBTreeModel const &model,
|
|||||||
|
|
||||||
// Call fn for each pair of input output tree. Return true if index is out of bound.
|
// Call fn for each pair of input output tree. Return true if index is out of bound.
|
||||||
template <typename Func>
|
template <typename Func>
|
||||||
inline bool SliceTrees(int32_t layer_begin, int32_t layer_end, int32_t step,
|
bool SliceTrees(int32_t layer_begin, int32_t layer_end, int32_t step, GBTreeModel const& model,
|
||||||
GBTreeModel const &model, GBTreeTrainParam const &tparam,
|
uint32_t layer_trees, Func fn) {
|
||||||
uint32_t layer_trees, Func fn) {
|
|
||||||
uint32_t tree_begin, tree_end;
|
uint32_t tree_begin, tree_end;
|
||||||
std::tie(tree_begin, tree_end) = detail::LayerToTree(model, layer_begin, layer_end);
|
std::tie(tree_begin, tree_end) = detail::LayerToTree(model, layer_begin, layer_end);
|
||||||
if (tree_end > model.trees.size()) {
|
if (tree_end > model.trees.size()) {
|
||||||
@ -206,8 +205,7 @@ class GBTree : public GradientBooster {
|
|||||||
* \brief Optionally update the leaf value.
|
* \brief Optionally update the leaf value.
|
||||||
*/
|
*/
|
||||||
void UpdateTreeLeaf(DMatrix const* p_fmat, HostDeviceVector<float> const& predictions,
|
void UpdateTreeLeaf(DMatrix const* p_fmat, HostDeviceVector<float> const& predictions,
|
||||||
ObjFunction const* obj, size_t gidx,
|
ObjFunction const* obj, std::vector<std::unique_ptr<RegTree>>* p_trees);
|
||||||
std::vector<std::unique_ptr<RegTree>>* p_trees);
|
|
||||||
|
|
||||||
/*! \brief Carry out one iteration of boosting */
|
/*! \brief Carry out one iteration of boosting */
|
||||||
void DoBoost(DMatrix* p_fmat, HostDeviceVector<GradientPair>* in_gpair,
|
void DoBoost(DMatrix* p_fmat, HostDeviceVector<GradientPair>* in_gpair,
|
||||||
@ -325,7 +323,7 @@ class GBTree : public GradientBooster {
|
|||||||
};
|
};
|
||||||
|
|
||||||
if (importance_type == "weight") {
|
if (importance_type == "weight") {
|
||||||
add_score([&](auto const &p_tree, bst_node_t, bst_feature_t split) {
|
add_score([&](auto const&, bst_node_t, bst_feature_t split) {
|
||||||
gain_map[split] = split_counts[split];
|
gain_map[split] = split_counts[split];
|
||||||
});
|
});
|
||||||
} else if (importance_type == "gain" || importance_type == "total_gain") {
|
} else if (importance_type == "gain" || importance_type == "total_gain") {
|
||||||
@ -423,9 +421,7 @@ class GBTree : public GradientBooster {
|
|||||||
DMatrix* f_dmat = nullptr) const;
|
DMatrix* f_dmat = nullptr) const;
|
||||||
|
|
||||||
// commit new trees all at once
|
// commit new trees all at once
|
||||||
virtual void CommitModel(std::vector<std::vector<std::unique_ptr<RegTree>>>&& new_trees,
|
virtual void CommitModel(std::vector<std::vector<std::unique_ptr<RegTree>>>&& new_trees);
|
||||||
DMatrix* m,
|
|
||||||
PredictionCacheEntry* predts);
|
|
||||||
|
|
||||||
// --- data structure ---
|
// --- data structure ---
|
||||||
GBTreeModel model_;
|
GBTreeModel model_;
|
||||||
|
|||||||
@ -1234,8 +1234,7 @@ class LearnerImpl : public LearnerIO {
|
|||||||
|
|
||||||
obj_->EvalTransform(&out);
|
obj_->EvalTransform(&out);
|
||||||
for (auto& ev : metrics_) {
|
for (auto& ev : metrics_) {
|
||||||
os << '\t' << data_names[i] << '-' << ev->Name() << ':'
|
os << '\t' << data_names[i] << '-' << ev->Name() << ':' << ev->Eval(out, m->Info());
|
||||||
<< ev->Eval(out, m->Info(), tparam_.dsplit == DataSplitMode::kRow);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -254,8 +254,7 @@ std::pair<double, uint32_t> RankingAUC(std::vector<float> const &predts,
|
|||||||
|
|
||||||
template <typename Curve>
|
template <typename Curve>
|
||||||
class EvalAUC : public Metric {
|
class EvalAUC : public Metric {
|
||||||
double Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info,
|
double Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info) override {
|
||||||
bool distributed) override {
|
|
||||||
double auc {0};
|
double auc {0};
|
||||||
if (tparam_->gpu_id != GenericParameter::kCpuId) {
|
if (tparam_->gpu_id != GenericParameter::kCpuId) {
|
||||||
preds.SetDevice(tparam_->gpu_id);
|
preds.SetDevice(tparam_->gpu_id);
|
||||||
|
|||||||
@ -312,10 +312,8 @@ void SegmentedReduceAUC(common::Span<size_t const> d_unique_idx,
|
|||||||
* up each class in all kernels.
|
* up each class in all kernels.
|
||||||
*/
|
*/
|
||||||
template <bool scale, typename Fn>
|
template <bool scale, typename Fn>
|
||||||
double GPUMultiClassAUCOVR(common::Span<float const> predts,
|
double GPUMultiClassAUCOVR(MetaInfo const &info, int32_t device, common::Span<uint32_t> d_class_ptr,
|
||||||
MetaInfo const &info, int32_t device,
|
size_t n_classes, std::shared_ptr<DeviceAUCCache> cache, Fn area_fn) {
|
||||||
common::Span<uint32_t> d_class_ptr, size_t n_classes,
|
|
||||||
std::shared_ptr<DeviceAUCCache> cache, Fn area_fn) {
|
|
||||||
dh::safe_cuda(cudaSetDevice(device));
|
dh::safe_cuda(cudaSetDevice(device));
|
||||||
/**
|
/**
|
||||||
* Sorted idx
|
* Sorted idx
|
||||||
@ -478,8 +476,7 @@ double GPUMultiClassROCAUC(common::Span<float const> predts,
|
|||||||
double tp, size_t /*class_id*/) {
|
double tp, size_t /*class_id*/) {
|
||||||
return TrapezoidArea(fp_prev, fp, tp_prev, tp);
|
return TrapezoidArea(fp_prev, fp, tp_prev, tp);
|
||||||
};
|
};
|
||||||
return GPUMultiClassAUCOVR<true>(predts, info, device, dh::ToSpan(class_ptr),
|
return GPUMultiClassAUCOVR<true>(info, device, dh::ToSpan(class_ptr), n_classes, cache, fn);
|
||||||
n_classes, cache, fn);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
@ -704,8 +701,7 @@ double GPUMultiClassPRAUC(common::Span<float const> predts,
|
|||||||
return detail::CalcDeltaPRAUC(fp_prev, fp, tp_prev, tp,
|
return detail::CalcDeltaPRAUC(fp_prev, fp, tp_prev, tp,
|
||||||
d_totals[class_id].first);
|
d_totals[class_id].first);
|
||||||
};
|
};
|
||||||
return GPUMultiClassAUCOVR<false>(predts, info, device, d_class_ptr,
|
return GPUMultiClassAUCOVR<false>(info, device, d_class_ptr, n_classes, cache, fn);
|
||||||
n_classes, cache, fn);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Fn>
|
template <typename Fn>
|
||||||
|
|||||||
@ -178,8 +178,7 @@ class PseudoErrorLoss : public Metric {
|
|||||||
out["pseudo_huber_param"] = ToJson(param_);
|
out["pseudo_huber_param"] = ToJson(param_);
|
||||||
}
|
}
|
||||||
|
|
||||||
double Eval(const HostDeviceVector<bst_float>& preds, const MetaInfo& info,
|
double Eval(const HostDeviceVector<bst_float>& preds, const MetaInfo& info) override {
|
||||||
bool distributed) override {
|
|
||||||
CHECK_EQ(info.labels.Shape(0), info.num_row_);
|
CHECK_EQ(info.labels.Shape(0), info.num_row_);
|
||||||
auto labels = info.labels.View(tparam_->gpu_id);
|
auto labels = info.labels.View(tparam_->gpu_id);
|
||||||
preds.SetDevice(tparam_->gpu_id);
|
preds.SetDevice(tparam_->gpu_id);
|
||||||
@ -197,7 +196,7 @@ class PseudoErrorLoss : public Metric {
|
|||||||
return std::make_tuple(v, wt);
|
return std::make_tuple(v, wt);
|
||||||
});
|
});
|
||||||
double dat[2]{result.Residue(), result.Weights()};
|
double dat[2]{result.Residue(), result.Weights()};
|
||||||
if (distributed) {
|
if (rabit::IsDistributed()) {
|
||||||
rabit::Allreduce<rabit::op::Sum>(dat, 2);
|
rabit::Allreduce<rabit::op::Sum>(dat, 2);
|
||||||
}
|
}
|
||||||
return EvalRowMAPE::GetFinal(dat[0], dat[1]);
|
return EvalRowMAPE::GetFinal(dat[0], dat[1]);
|
||||||
@ -342,8 +341,7 @@ struct EvalEWiseBase : public Metric {
|
|||||||
EvalEWiseBase() = default;
|
EvalEWiseBase() = default;
|
||||||
explicit EvalEWiseBase(char const* policy_param) : policy_{policy_param} {}
|
explicit EvalEWiseBase(char const* policy_param) : policy_{policy_param} {}
|
||||||
|
|
||||||
double Eval(HostDeviceVector<bst_float> const& preds, const MetaInfo& info,
|
double Eval(HostDeviceVector<bst_float> const& preds, const MetaInfo& info) override {
|
||||||
bool distributed) override {
|
|
||||||
CHECK_EQ(preds.Size(), info.labels.Size())
|
CHECK_EQ(preds.Size(), info.labels.Size())
|
||||||
<< "label and prediction size not match, "
|
<< "label and prediction size not match, "
|
||||||
<< "hint: use merror or mlogloss for multi-class classification";
|
<< "hint: use merror or mlogloss for multi-class classification";
|
||||||
@ -367,10 +365,7 @@ struct EvalEWiseBase : public Metric {
|
|||||||
});
|
});
|
||||||
|
|
||||||
double dat[2]{result.Residue(), result.Weights()};
|
double dat[2]{result.Residue(), result.Weights()};
|
||||||
|
rabit::Allreduce<rabit::op::Sum>(dat, 2);
|
||||||
if (distributed) {
|
|
||||||
rabit::Allreduce<rabit::op::Sum>(dat, 2);
|
|
||||||
}
|
|
||||||
return Policy::GetFinal(dat[0], dat[1]);
|
return Policy::GetFinal(dat[0], dat[1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -167,8 +167,7 @@ class MultiClassMetricsReduction {
|
|||||||
*/
|
*/
|
||||||
template<typename Derived>
|
template<typename Derived>
|
||||||
struct EvalMClassBase : public Metric {
|
struct EvalMClassBase : public Metric {
|
||||||
double Eval(const HostDeviceVector<float> &preds, const MetaInfo &info,
|
double Eval(const HostDeviceVector<float> &preds, const MetaInfo &info) override {
|
||||||
bool distributed) override {
|
|
||||||
if (info.labels.Size() == 0) {
|
if (info.labels.Size() == 0) {
|
||||||
CHECK_EQ(preds.Size(), 0);
|
CHECK_EQ(preds.Size(), 0);
|
||||||
} else {
|
} else {
|
||||||
@ -186,9 +185,7 @@ struct EvalMClassBase : public Metric {
|
|||||||
dat[0] = result.Residue();
|
dat[0] = result.Residue();
|
||||||
dat[1] = result.Weights();
|
dat[1] = result.Weights();
|
||||||
}
|
}
|
||||||
if (distributed) {
|
rabit::Allreduce<rabit::op::Sum>(dat, 2);
|
||||||
rabit::Allreduce<rabit::op::Sum>(dat, 2);
|
|
||||||
}
|
|
||||||
return Derived::GetFinal(dat[0], dat[1]);
|
return Derived::GetFinal(dat[0], dat[1]);
|
||||||
}
|
}
|
||||||
/*!
|
/*!
|
||||||
|
|||||||
@ -102,9 +102,8 @@ struct EvalAMS : public Metric {
|
|||||||
name_ = os.str();
|
name_ = os.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
double Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info,
|
double Eval(const HostDeviceVector<bst_float>& preds, const MetaInfo& info) override {
|
||||||
bool distributed) override {
|
CHECK(!rabit::IsDistributed()) << "metric AMS do not support distributed evaluation";
|
||||||
CHECK(!distributed) << "metric AMS do not support distributed evaluation";
|
|
||||||
using namespace std; // NOLINT(*)
|
using namespace std; // NOLINT(*)
|
||||||
|
|
||||||
const auto ndata = static_cast<bst_omp_uint>(info.labels.Size());
|
const auto ndata = static_cast<bst_omp_uint>(info.labels.Size());
|
||||||
@ -161,8 +160,7 @@ struct EvalRank : public Metric, public EvalRankConfig {
|
|||||||
std::unique_ptr<xgboost::Metric> rank_gpu_;
|
std::unique_ptr<xgboost::Metric> rank_gpu_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
double Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info,
|
double Eval(const HostDeviceVector<bst_float>& preds, const MetaInfo& info) override {
|
||||||
bool distributed) override {
|
|
||||||
CHECK_EQ(preds.Size(), info.labels.Size())
|
CHECK_EQ(preds.Size(), info.labels.Size())
|
||||||
<< "label size predict size not match";
|
<< "label size predict size not match";
|
||||||
|
|
||||||
@ -185,7 +183,7 @@ struct EvalRank : public Metric, public EvalRankConfig {
|
|||||||
rank_gpu_.reset(GPUMetric::CreateGPUMetric(this->Name(), tparam_));
|
rank_gpu_.reset(GPUMetric::CreateGPUMetric(this->Name(), tparam_));
|
||||||
}
|
}
|
||||||
if (rank_gpu_) {
|
if (rank_gpu_) {
|
||||||
sum_metric = rank_gpu_->Eval(preds, info, distributed);
|
sum_metric = rank_gpu_->Eval(preds, info);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -218,7 +216,7 @@ struct EvalRank : public Metric, public EvalRankConfig {
|
|||||||
exc.Rethrow();
|
exc.Rethrow();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (distributed) {
|
if (rabit::IsDistributed()) {
|
||||||
double dat[2]{sum_metric, static_cast<double>(ngroups)};
|
double dat[2]{sum_metric, static_cast<double>(ngroups)};
|
||||||
// approximately estimate the metric using mean
|
// approximately estimate the metric using mean
|
||||||
rabit::Allreduce<rabit::op::Sum>(dat, 2);
|
rabit::Allreduce<rabit::op::Sum>(dat, 2);
|
||||||
@ -342,9 +340,8 @@ struct EvalMAP : public EvalRank {
|
|||||||
struct EvalCox : public Metric {
|
struct EvalCox : public Metric {
|
||||||
public:
|
public:
|
||||||
EvalCox() = default;
|
EvalCox() = default;
|
||||||
double Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info,
|
double Eval(const HostDeviceVector<bst_float>& preds, const MetaInfo& info) override {
|
||||||
bool distributed) override {
|
CHECK(!rabit::IsDistributed()) << "Cox metric does not support distributed evaluation";
|
||||||
CHECK(!distributed) << "Cox metric does not support distributed evaluation";
|
|
||||||
using namespace std; // NOLINT(*)
|
using namespace std; // NOLINT(*)
|
||||||
|
|
||||||
const auto ndata = static_cast<bst_omp_uint>(info.labels.Size());
|
const auto ndata = static_cast<bst_omp_uint>(info.labels.Size());
|
||||||
|
|||||||
@ -29,8 +29,7 @@ DMLC_REGISTRY_FILE_TAG(rank_metric_gpu);
|
|||||||
template <typename EvalMetricT>
|
template <typename EvalMetricT>
|
||||||
struct EvalRankGpu : public GPUMetric, public EvalRankConfig {
|
struct EvalRankGpu : public GPUMetric, public EvalRankConfig {
|
||||||
public:
|
public:
|
||||||
double Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info,
|
double Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info) override {
|
||||||
bool distributed) override {
|
|
||||||
// Sanity check is done by the caller
|
// Sanity check is done by the caller
|
||||||
std::vector<unsigned> tgptr(2, 0);
|
std::vector<unsigned> tgptr(2, 0);
|
||||||
tgptr[1] = static_cast<unsigned>(preds.Size());
|
tgptr[1] = static_cast<unsigned>(preds.Size());
|
||||||
|
|||||||
@ -206,20 +206,15 @@ template <typename Policy> struct EvalEWiseSurvivalBase : public Metric {
|
|||||||
CHECK(tparam_);
|
CHECK(tparam_);
|
||||||
}
|
}
|
||||||
|
|
||||||
double Eval(const HostDeviceVector<float> &preds, const MetaInfo &info,
|
double Eval(const HostDeviceVector<float>& preds, const MetaInfo& info) override {
|
||||||
bool distributed) override {
|
|
||||||
CHECK_EQ(preds.Size(), info.labels_lower_bound_.Size());
|
CHECK_EQ(preds.Size(), info.labels_lower_bound_.Size());
|
||||||
CHECK_EQ(preds.Size(), info.labels_upper_bound_.Size());
|
CHECK_EQ(preds.Size(), info.labels_upper_bound_.Size());
|
||||||
CHECK(tparam_);
|
CHECK(tparam_);
|
||||||
auto result =
|
auto result = reducer_.Reduce(*tparam_, info.weights_, info.labels_lower_bound_,
|
||||||
reducer_.Reduce(*tparam_, info.weights_, info.labels_lower_bound_,
|
info.labels_upper_bound_, preds);
|
||||||
info.labels_upper_bound_, preds);
|
|
||||||
|
|
||||||
double dat[2] {result.Residue(), result.Weights()};
|
double dat[2]{result.Residue(), result.Weights()};
|
||||||
|
rabit::Allreduce<rabit::op::Sum>(dat, 2);
|
||||||
if (distributed) {
|
|
||||||
rabit::Allreduce<rabit::op::Sum>(dat, 2);
|
|
||||||
}
|
|
||||||
return Policy::GetFinal(dat[0], dat[1]);
|
return Policy::GetFinal(dat[0], dat[1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -240,10 +235,9 @@ struct AFTNLogLikDispatcher : public Metric {
|
|||||||
return "aft-nloglik";
|
return "aft-nloglik";
|
||||||
}
|
}
|
||||||
|
|
||||||
double Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info,
|
double Eval(const HostDeviceVector<bst_float>& preds, const MetaInfo& info) override {
|
||||||
bool distributed) override {
|
|
||||||
CHECK(metric_) << "AFT metric must be configured first, with distribution type and scale";
|
CHECK(metric_) << "AFT metric must be configured first, with distribution type and scale";
|
||||||
return metric_->Eval(preds, info, distributed);
|
return metric_->Eval(preds, info);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Configure(const Args& args) override {
|
void Configure(const Args& args) override {
|
||||||
|
|||||||
@ -116,7 +116,7 @@ class RowPartitioner {
|
|||||||
Segment segment = ridx_segments_.at(nidx); // rows belongs to node nidx
|
Segment segment = ridx_segments_.at(nidx); // rows belongs to node nidx
|
||||||
auto d_ridx = ridx_.CurrentSpan();
|
auto d_ridx = ridx_.CurrentSpan();
|
||||||
auto d_position = position_.CurrentSpan();
|
auto d_position = position_.CurrentSpan();
|
||||||
if (left_counts_.size() <= nidx) {
|
if (left_counts_.size() <= static_cast<size_t>(nidx)) {
|
||||||
left_counts_.resize((nidx * 2) + 1);
|
left_counts_.resize((nidx * 2) + 1);
|
||||||
thrust::fill(left_counts_.begin(), left_counts_.end(), 0);
|
thrust::fill(left_counts_.begin(), left_counts_.end(), 0);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -203,8 +203,8 @@ class HistEvaluator {
|
|||||||
// Returns the sum of gradients corresponding to the data points that contains
|
// Returns the sum of gradients corresponding to the data points that contains
|
||||||
// a non-missing value for the particular feature fid.
|
// a non-missing value for the particular feature fid.
|
||||||
template <int d_step>
|
template <int d_step>
|
||||||
GradStats EnumerateSplit(common::HistogramCuts const &cut, common::Span<size_t const> sorted_idx,
|
GradStats EnumerateSplit(common::HistogramCuts const &cut, const common::GHistRow &hist,
|
||||||
const common::GHistRow &hist, bst_feature_t fidx, bst_node_t nidx,
|
bst_feature_t fidx, bst_node_t nidx,
|
||||||
TreeEvaluator::SplitEvaluator<TrainParam> const &evaluator,
|
TreeEvaluator::SplitEvaluator<TrainParam> const &evaluator,
|
||||||
SplitEntry *p_best) const {
|
SplitEntry *p_best) const {
|
||||||
static_assert(d_step == +1 || d_step == -1, "Invalid step.");
|
static_assert(d_step == +1 || d_step == -1, "Invalid step.");
|
||||||
@ -333,9 +333,9 @@ class HistEvaluator {
|
|||||||
EnumeratePart<-1>(cut, sorted_idx, histogram, fidx, nidx, evaluator, best);
|
EnumeratePart<-1>(cut, sorted_idx, histogram, fidx, nidx, evaluator, best);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
auto grad_stats = EnumerateSplit<+1>(cut, {}, histogram, fidx, nidx, evaluator, best);
|
auto grad_stats = EnumerateSplit<+1>(cut, histogram, fidx, nidx, evaluator, best);
|
||||||
if (SplitContainsMissingValues(grad_stats, snode_[nidx])) {
|
if (SplitContainsMissingValues(grad_stats, snode_[nidx])) {
|
||||||
EnumerateSplit<-1>(cut, {}, histogram, fidx, nidx, evaluator, best);
|
EnumerateSplit<-1>(cut, histogram, fidx, nidx, evaluator, best);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -440,7 +440,7 @@ template <typename Partitioner, typename ExpandEntry>
|
|||||||
void UpdatePredictionCacheImpl(GenericParameter const *ctx, RegTree const *p_last_tree,
|
void UpdatePredictionCacheImpl(GenericParameter const *ctx, RegTree const *p_last_tree,
|
||||||
std::vector<Partitioner> const &partitioner,
|
std::vector<Partitioner> const &partitioner,
|
||||||
HistEvaluator<ExpandEntry> const &hist_evaluator,
|
HistEvaluator<ExpandEntry> const &hist_evaluator,
|
||||||
TrainParam const ¶m, linalg::VectorView<float> out_preds) {
|
linalg::VectorView<float> out_preds) {
|
||||||
CHECK_GT(out_preds.Size(), 0U);
|
CHECK_GT(out_preds.Size(), 0U);
|
||||||
|
|
||||||
CHECK(p_last_tree);
|
CHECK(p_last_tree);
|
||||||
|
|||||||
@ -116,7 +116,7 @@ class GloablApproxBuilder {
|
|||||||
// Caching prediction seems redundant for approx tree method, as sketching takes up
|
// Caching prediction seems redundant for approx tree method, as sketching takes up
|
||||||
// majority of training time.
|
// majority of training time.
|
||||||
CHECK_EQ(out_preds.Size(), data->Info().num_row_);
|
CHECK_EQ(out_preds.Size(), data->Info().num_row_);
|
||||||
UpdatePredictionCacheImpl(ctx_, p_last_tree_, partitioner_, evaluator_, param_, out_preds);
|
UpdatePredictionCacheImpl(ctx_, p_last_tree_, partitioner_, evaluator_, out_preds);
|
||||||
monitor_->Stop(__func__);
|
monitor_->Stop(__func__);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -83,7 +83,7 @@ class ApproxRowPartitioner {
|
|||||||
const size_t task_id = partition_builder_.GetTaskIdx(node_in_set, r.begin());
|
const size_t task_id = partition_builder_.GetTaskIdx(node_in_set, r.begin());
|
||||||
partition_builder_.AllocateForTask(task_id);
|
partition_builder_.AllocateForTask(task_id);
|
||||||
partition_builder_.PartitionRange(
|
partition_builder_.PartitionRange(
|
||||||
node_in_set, nid, r, fidx, &row_set_collection_, [&](size_t row_id) {
|
node_in_set, nid, r, &row_set_collection_, [&](size_t row_id) {
|
||||||
auto cut_value = SearchCutValue(row_id, fidx, index, cut_ptrs, cut_values);
|
auto cut_value = SearchCutValue(row_id, fidx, index, cut_ptrs, cut_values);
|
||||||
if (std::isnan(cut_value)) {
|
if (std::isnan(cut_value)) {
|
||||||
return candidate.split.DefaultLeft();
|
return candidate.split.DefaultLeft();
|
||||||
|
|||||||
@ -563,7 +563,7 @@ struct GPUHistMakerDevice {
|
|||||||
// when processing a large batch
|
// when processing a large batch
|
||||||
this->AllReduceHist(hist_nidx.at(0), reducer, hist_nidx.size());
|
this->AllReduceHist(hist_nidx.at(0), reducer, hist_nidx.size());
|
||||||
|
|
||||||
for (int i = 0; i < subtraction_nidx.size(); i++) {
|
for (size_t i = 0; i < subtraction_nidx.size(); i++) {
|
||||||
auto build_hist_nidx = hist_nidx.at(i);
|
auto build_hist_nidx = hist_nidx.at(i);
|
||||||
auto subtraction_trick_nidx = subtraction_nidx.at(i);
|
auto subtraction_trick_nidx = subtraction_nidx.at(i);
|
||||||
auto parent_nidx = candidates.at(i).nid;
|
auto parent_nidx = candidates.at(i).nid;
|
||||||
|
|||||||
@ -257,7 +257,7 @@ bool QuantileHistMaker::Builder::UpdatePredictionCache(DMatrix const *data,
|
|||||||
}
|
}
|
||||||
monitor_->Start(__func__);
|
monitor_->Start(__func__);
|
||||||
CHECK_EQ(out_preds.Size(), data->Info().num_row_);
|
CHECK_EQ(out_preds.Size(), data->Info().num_row_);
|
||||||
UpdatePredictionCacheImpl(ctx_, p_last_tree_, partitioner_, *evaluator_, param_, out_preds);
|
UpdatePredictionCacheImpl(ctx_, p_last_tree_, partitioner_, *evaluator_, out_preds);
|
||||||
monitor_->Stop(__func__);
|
monitor_->Stop(__func__);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -67,7 +67,7 @@ TEST(SegmentedUnique, Basic) {
|
|||||||
CHECK_EQ(n_uniques, 5);
|
CHECK_EQ(n_uniques, 5);
|
||||||
|
|
||||||
std::vector<float> values_sol{0.1f, 0.2f, 0.3f, 0.62448811531066895f, 0.4f};
|
std::vector<float> values_sol{0.1f, 0.2f, 0.3f, 0.62448811531066895f, 0.4f};
|
||||||
for (auto i = 0 ; i < values_sol.size(); i ++) {
|
for (size_t i = 0 ; i < values_sol.size(); i ++) {
|
||||||
ASSERT_EQ(d_vals_out[i], values_sol[i]);
|
ASSERT_EQ(d_vals_out[i], values_sol[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -84,7 +84,7 @@ TEST(SegmentedUnique, Basic) {
|
|||||||
d_segs_out.data().get(), d_vals_out.data().get(),
|
d_segs_out.data().get(), d_vals_out.data().get(),
|
||||||
thrust::equal_to<float>{});
|
thrust::equal_to<float>{});
|
||||||
ASSERT_EQ(n_uniques, values.size());
|
ASSERT_EQ(n_uniques, values.size());
|
||||||
for (auto i = 0 ; i < values.size(); i ++) {
|
for (size_t i = 0 ; i < values.size(); i ++) {
|
||||||
ASSERT_EQ(d_vals_out[i], values[i]);
|
ASSERT_EQ(d_vals_out[i], values[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -315,10 +315,10 @@ TEST(Linalg, Popc) {
|
|||||||
TEST(Linalg, Stack) {
|
TEST(Linalg, Stack) {
|
||||||
Tensor<float, 3> l{{2, 3, 4}, kCpuId};
|
Tensor<float, 3> l{{2, 3, 4}, kCpuId};
|
||||||
ElementWiseTransformHost(l.View(kCpuId), omp_get_max_threads(),
|
ElementWiseTransformHost(l.View(kCpuId), omp_get_max_threads(),
|
||||||
[=](size_t i, float v) { return i; });
|
[=](size_t i, float) { return i; });
|
||||||
Tensor<float, 3> r_0{{2, 3, 4}, kCpuId};
|
Tensor<float, 3> r_0{{2, 3, 4}, kCpuId};
|
||||||
ElementWiseTransformHost(r_0.View(kCpuId), omp_get_max_threads(),
|
ElementWiseTransformHost(r_0.View(kCpuId), omp_get_max_threads(),
|
||||||
[=](size_t i, float v) { return i; });
|
[=](size_t i, float) { return i; });
|
||||||
|
|
||||||
Stack(&l, r_0);
|
Stack(&l, r_0);
|
||||||
|
|
||||||
|
|||||||
@ -50,8 +50,8 @@ TEST(PartitionBuilder, BasicTest) {
|
|||||||
right[i] = left_total + value_right++;
|
right[i] = left_total + value_right++;
|
||||||
}
|
}
|
||||||
|
|
||||||
builder.SetNLeftElems(nid, begin, end, n_left);
|
builder.SetNLeftElems(nid, begin, n_left);
|
||||||
builder.SetNRightElems(nid, begin, end, n_right);
|
builder.SetNRightElems(nid, begin, n_right);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
builder.CalculateRowOffsets();
|
builder.CalculateRowOffsets();
|
||||||
|
|||||||
@ -77,7 +77,7 @@ void TestDistributedQuantile(size_t rows, size_t cols) {
|
|||||||
std::vector<float> hessian(rows, 1.0);
|
std::vector<float> hessian(rows, 1.0);
|
||||||
auto hess = Span<float const>{hessian};
|
auto hess = Span<float const>{hessian};
|
||||||
|
|
||||||
ContainerType<use_column> sketch_distributed(n_bins, m->Info(), column_size, false, hess,
|
ContainerType<use_column> sketch_distributed(n_bins, m->Info(), column_size, false,
|
||||||
OmpGetNumThreads(0));
|
OmpGetNumThreads(0));
|
||||||
|
|
||||||
if (use_column) {
|
if (use_column) {
|
||||||
@ -98,7 +98,7 @@ void TestDistributedQuantile(size_t rows, size_t cols) {
|
|||||||
CHECK_EQ(rabit::GetWorldSize(), 1);
|
CHECK_EQ(rabit::GetWorldSize(), 1);
|
||||||
std::for_each(column_size.begin(), column_size.end(), [=](auto& size) { size *= world; });
|
std::for_each(column_size.begin(), column_size.end(), [=](auto& size) { size *= world; });
|
||||||
m->Info().num_row_ = world * rows;
|
m->Info().num_row_ = world * rows;
|
||||||
ContainerType<use_column> sketch_on_single_node(n_bins, m->Info(), column_size, false, hess,
|
ContainerType<use_column> sketch_on_single_node(n_bins, m->Info(), column_size, false,
|
||||||
OmpGetNumThreads(0));
|
OmpGetNumThreads(0));
|
||||||
m->Info().num_row_ = rows;
|
m->Info().num_row_ = rows;
|
||||||
|
|
||||||
@ -190,7 +190,7 @@ TEST(Quantile, SameOnAllWorkers) {
|
|||||||
|
|
||||||
constexpr size_t kRows = 1000, kCols = 100;
|
constexpr size_t kRows = 1000, kCols = 100;
|
||||||
RunWithSeedsAndBins(
|
RunWithSeedsAndBins(
|
||||||
kRows, [=](int32_t seed, size_t n_bins, MetaInfo const &info) {
|
kRows, [=](int32_t seed, size_t n_bins, MetaInfo const&) {
|
||||||
auto rank = rabit::GetRank();
|
auto rank = rabit::GetRank();
|
||||||
HostDeviceVector<float> storage;
|
HostDeviceVector<float> storage;
|
||||||
std::vector<FeatureType> ft(kCols);
|
std::vector<FeatureType> ft(kCols);
|
||||||
|
|||||||
@ -36,7 +36,7 @@ struct TestTestStatus {
|
|||||||
XGBOOST_DEVICE void operator()() {
|
XGBOOST_DEVICE void operator()() {
|
||||||
this->operator()(0);
|
this->operator()(0);
|
||||||
}
|
}
|
||||||
XGBOOST_DEVICE void operator()(int _idx) {
|
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index
|
||||||
SPAN_ASSERT_TRUE(false, status_);
|
SPAN_ASSERT_TRUE(false, status_);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -49,7 +49,7 @@ struct TestAssignment {
|
|||||||
XGBOOST_DEVICE void operator()() {
|
XGBOOST_DEVICE void operator()() {
|
||||||
this->operator()(0);
|
this->operator()(0);
|
||||||
}
|
}
|
||||||
XGBOOST_DEVICE void operator()(int _idx) {
|
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index
|
||||||
Span<float> s1;
|
Span<float> s1;
|
||||||
|
|
||||||
float arr[] = {3, 4, 5};
|
float arr[] = {3, 4, 5};
|
||||||
@ -71,7 +71,7 @@ struct TestBeginEnd {
|
|||||||
XGBOOST_DEVICE void operator()() {
|
XGBOOST_DEVICE void operator()() {
|
||||||
this->operator()(0);
|
this->operator()(0);
|
||||||
}
|
}
|
||||||
XGBOOST_DEVICE void operator()(int _idx) {
|
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index
|
||||||
float arr[16];
|
float arr[16];
|
||||||
InitializeRange(arr, arr + 16);
|
InitializeRange(arr, arr + 16);
|
||||||
|
|
||||||
@ -93,7 +93,7 @@ struct TestRBeginREnd {
|
|||||||
XGBOOST_DEVICE void operator()() {
|
XGBOOST_DEVICE void operator()() {
|
||||||
this->operator()(0);
|
this->operator()(0);
|
||||||
}
|
}
|
||||||
XGBOOST_DEVICE void operator()(int _idx) {
|
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index
|
||||||
float arr[16];
|
float arr[16];
|
||||||
InitializeRange(arr, arr + 16);
|
InitializeRange(arr, arr + 16);
|
||||||
|
|
||||||
@ -121,7 +121,7 @@ struct TestObservers {
|
|||||||
XGBOOST_DEVICE void operator()() {
|
XGBOOST_DEVICE void operator()() {
|
||||||
this->operator()(0);
|
this->operator()(0);
|
||||||
}
|
}
|
||||||
XGBOOST_DEVICE void operator()(int _idx) {
|
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index
|
||||||
// empty
|
// empty
|
||||||
{
|
{
|
||||||
float *arr = nullptr;
|
float *arr = nullptr;
|
||||||
@ -148,7 +148,7 @@ struct TestCompare {
|
|||||||
XGBOOST_DEVICE void operator()() {
|
XGBOOST_DEVICE void operator()() {
|
||||||
this->operator()(0);
|
this->operator()(0);
|
||||||
}
|
}
|
||||||
XGBOOST_DEVICE void operator()(int _idx) {
|
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index
|
||||||
float lhs_arr[16], rhs_arr[16];
|
float lhs_arr[16], rhs_arr[16];
|
||||||
InitializeRange(lhs_arr, lhs_arr + 16);
|
InitializeRange(lhs_arr, lhs_arr + 16);
|
||||||
InitializeRange(rhs_arr, rhs_arr + 16);
|
InitializeRange(rhs_arr, rhs_arr + 16);
|
||||||
@ -178,7 +178,7 @@ struct TestIterConstruct {
|
|||||||
XGBOOST_DEVICE void operator()() {
|
XGBOOST_DEVICE void operator()() {
|
||||||
this->operator()(0);
|
this->operator()(0);
|
||||||
}
|
}
|
||||||
XGBOOST_DEVICE void operator()(int _idx) {
|
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index.
|
||||||
Span<float>::iterator it1;
|
Span<float>::iterator it1;
|
||||||
Span<float>::iterator it2;
|
Span<float>::iterator it2;
|
||||||
SPAN_ASSERT_TRUE(it1 == it2, status_);
|
SPAN_ASSERT_TRUE(it1 == it2, status_);
|
||||||
@ -197,7 +197,7 @@ struct TestIterRef {
|
|||||||
XGBOOST_DEVICE void operator()() {
|
XGBOOST_DEVICE void operator()() {
|
||||||
this->operator()(0);
|
this->operator()(0);
|
||||||
}
|
}
|
||||||
XGBOOST_DEVICE void operator()(int _idx) {
|
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index
|
||||||
float arr[16];
|
float arr[16];
|
||||||
InitializeRange(arr, arr + 16);
|
InitializeRange(arr, arr + 16);
|
||||||
|
|
||||||
@ -215,7 +215,7 @@ struct TestIterCalculate {
|
|||||||
XGBOOST_DEVICE void operator()() {
|
XGBOOST_DEVICE void operator()() {
|
||||||
this->operator()(0);
|
this->operator()(0);
|
||||||
}
|
}
|
||||||
XGBOOST_DEVICE void operator()(int _idx) {
|
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index
|
||||||
float arr[16];
|
float arr[16];
|
||||||
InitializeRange(arr, arr + 16);
|
InitializeRange(arr, arr + 16);
|
||||||
|
|
||||||
@ -278,7 +278,7 @@ struct TestAsBytes {
|
|||||||
XGBOOST_DEVICE void operator()() {
|
XGBOOST_DEVICE void operator()() {
|
||||||
this->operator()(0);
|
this->operator()(0);
|
||||||
}
|
}
|
||||||
XGBOOST_DEVICE void operator()(int _idx) {
|
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index
|
||||||
float arr[16];
|
float arr[16];
|
||||||
InitializeRange(arr, arr + 16);
|
InitializeRange(arr, arr + 16);
|
||||||
|
|
||||||
@ -313,7 +313,7 @@ struct TestAsWritableBytes {
|
|||||||
XGBOOST_DEVICE void operator()() {
|
XGBOOST_DEVICE void operator()() {
|
||||||
this->operator()(0);
|
this->operator()(0);
|
||||||
}
|
}
|
||||||
XGBOOST_DEVICE void operator()(int _idx) {
|
XGBOOST_DEVICE void operator()(size_t) { // size_t for CUDA index
|
||||||
float arr[16];
|
float arr[16];
|
||||||
InitializeRange(arr, arr + 16);
|
InitializeRange(arr, arr + 16);
|
||||||
|
|
||||||
|
|||||||
@ -34,9 +34,8 @@ TEST(ParallelFor2d, Test) {
|
|||||||
|
|
||||||
// working space is matrix of size (kDim1 x kDim2)
|
// working space is matrix of size (kDim1 x kDim2)
|
||||||
std::vector<int> matrix(kDim1 * kDim2, 0);
|
std::vector<int> matrix(kDim1 * kDim2, 0);
|
||||||
BlockedSpace2d space(kDim1, [&](size_t i) {
|
BlockedSpace2d space(
|
||||||
return kDim2;
|
kDim1, [&](size_t) { return kDim2; }, kGrainSize);
|
||||||
}, kGrainSize);
|
|
||||||
|
|
||||||
auto old = omp_get_max_threads();
|
auto old = omp_get_max_threads();
|
||||||
omp_set_num_threads(4);
|
omp_set_num_threads(4);
|
||||||
|
|||||||
@ -167,7 +167,7 @@ double GetMultiMetricEval(xgboost::Metric* metric,
|
|||||||
info.weights_.HostVector() = weights;
|
info.weights_.HostVector() = weights;
|
||||||
info.group_ptr_ = groups;
|
info.group_ptr_ = groups;
|
||||||
|
|
||||||
return metric->Eval(preds, info, false);
|
return metric->Eval(preds, info);
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace xgboost {
|
namespace xgboost {
|
||||||
@ -653,8 +653,6 @@ class RMMAllocator {};
|
|||||||
|
|
||||||
void DeleteRMMResource(RMMAllocator* r) {}
|
void DeleteRMMResource(RMMAllocator* r) {}
|
||||||
|
|
||||||
RMMAllocatorPtr SetUpRMMResourceForCppTests(int argc, char** argv) {
|
RMMAllocatorPtr SetUpRMMResourceForCppTests(int, char**) { return {nullptr, DeleteRMMResource}; }
|
||||||
return {nullptr, DeleteRMMResource};
|
|
||||||
}
|
|
||||||
#endif // !defined(XGBOOST_USE_RMM) || XGBOOST_USE_RMM != 1
|
#endif // !defined(XGBOOST_USE_RMM) || XGBOOST_USE_RMM != 1
|
||||||
} // namespace xgboost
|
} // namespace xgboost
|
||||||
|
|||||||
@ -29,9 +29,7 @@ int CudaArrayIterForTest::Next() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
std::shared_ptr<DMatrix> RandomDataGenerator::GenerateDeviceDMatrix(bool with_label,
|
std::shared_ptr<DMatrix> RandomDataGenerator::GenerateDeviceDMatrix() {
|
||||||
bool float_label,
|
|
||||||
size_t classes) {
|
|
||||||
CudaArrayIterForTest iter{this->sparsity_, this->rows_, this->cols_, 1};
|
CudaArrayIterForTest iter{this->sparsity_, this->rows_, this->cols_, 1};
|
||||||
auto m = std::make_shared<data::IterativeDeviceDMatrix>(
|
auto m = std::make_shared<data::IterativeDeviceDMatrix>(
|
||||||
&iter, iter.Proxy(), Reset, Next, std::numeric_limits<float>::quiet_NaN(),
|
&iter, iter.Proxy(), Reset, Next, std::numeric_limits<float>::quiet_NaN(),
|
||||||
|
|||||||
@ -296,9 +296,7 @@ class RandomDataGenerator {
|
|||||||
bool float_label = true,
|
bool float_label = true,
|
||||||
size_t classes = 1) const;
|
size_t classes = 1) const;
|
||||||
#if defined(XGBOOST_USE_CUDA)
|
#if defined(XGBOOST_USE_CUDA)
|
||||||
std::shared_ptr<DMatrix> GenerateDeviceDMatrix(bool with_label = false,
|
std::shared_ptr<DMatrix> GenerateDeviceDMatrix();
|
||||||
bool float_label = true,
|
|
||||||
size_t classes = 1);
|
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@ -22,10 +22,10 @@ TEST(Metric, DeclareUnifiedTest(BinaryAUC)) {
|
|||||||
// Invalid dataset
|
// Invalid dataset
|
||||||
MetaInfo info;
|
MetaInfo info;
|
||||||
info.labels = linalg::Tensor<float, 2>{{0.0f, 0.0f}, {2}, -1};
|
info.labels = linalg::Tensor<float, 2>{{0.0f, 0.0f}, {2}, -1};
|
||||||
float auc = metric->Eval({1, 1}, info, false);
|
float auc = metric->Eval({1, 1}, info);
|
||||||
ASSERT_TRUE(std::isnan(auc));
|
ASSERT_TRUE(std::isnan(auc));
|
||||||
*info.labels.Data() = HostDeviceVector<float>{};
|
*info.labels.Data() = HostDeviceVector<float>{};
|
||||||
auc = metric->Eval(HostDeviceVector<float>{}, info, false);
|
auc = metric->Eval(HostDeviceVector<float>{}, info);
|
||||||
ASSERT_TRUE(std::isnan(auc));
|
ASSERT_TRUE(std::isnan(auc));
|
||||||
|
|
||||||
EXPECT_NEAR(GetMetricEval(metric, {0, 1, 0, 1}, {0, 1, 0, 1}), 1.0f, 1e-10);
|
EXPECT_NEAR(GetMetricEval(metric, {0, 1, 0, 1}, {0, 1, 0, 1}), 1.0f, 1e-10);
|
||||||
|
|||||||
@ -36,9 +36,9 @@ inline void CheckDeterministicMetricElementWise(StringView name, int32_t device)
|
|||||||
h_labels[i] = dist(&lcg);
|
h_labels[i] = dist(&lcg);
|
||||||
}
|
}
|
||||||
|
|
||||||
auto result = metric->Eval(predts, info, false);
|
auto result = metric->Eval(predts, info);
|
||||||
for (size_t i = 0; i < 8; ++i) {
|
for (size_t i = 0; i < 8; ++i) {
|
||||||
ASSERT_EQ(metric->Eval(predts, info, false), result);
|
ASSERT_EQ(metric->Eval(predts, info), result);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} // anonymous namespace
|
} // anonymous namespace
|
||||||
|
|||||||
@ -35,9 +35,9 @@ inline void CheckDeterministicMetricMultiClass(StringView name, int32_t device)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
auto result = metric->Eval(predts, info, false);
|
auto result = metric->Eval(predts, info);
|
||||||
for (size_t i = 0; i < 8; ++i) {
|
for (size_t i = 0; i < 8; ++i) {
|
||||||
ASSERT_EQ(metric->Eval(predts, info, false), result);
|
ASSERT_EQ(metric->Eval(predts, info), result);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} // namespace xgboost
|
} // namespace xgboost
|
||||||
|
|||||||
@ -40,9 +40,9 @@ inline void CheckDeterministicMetricElementWise(StringView name, int32_t device)
|
|||||||
h_upper[i] = 10;
|
h_upper[i] = 10;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto result = metric->Eval(predts, info, false);
|
auto result = metric->Eval(predts, info);
|
||||||
for (size_t i = 0; i < 8; ++i) {
|
for (size_t i = 0; i < 8; ++i) {
|
||||||
ASSERT_EQ(metric->Eval(predts, info, false), result);
|
ASSERT_EQ(metric->Eval(predts, info), result);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} // anonymous namespace
|
} // anonymous namespace
|
||||||
@ -72,7 +72,7 @@ TEST(Metric, DeclareUnifiedTest(AFTNegLogLik)) {
|
|||||||
std::unique_ptr<Metric> metric(Metric::Create("aft-nloglik", &lparam));
|
std::unique_ptr<Metric> metric(Metric::Create("aft-nloglik", &lparam));
|
||||||
metric->Configure({ {"aft_loss_distribution", test_case.dist_type},
|
metric->Configure({ {"aft_loss_distribution", test_case.dist_type},
|
||||||
{"aft_loss_distribution_scale", "1.0"} });
|
{"aft_loss_distribution_scale", "1.0"} });
|
||||||
EXPECT_NEAR(metric->Eval(preds, info, false), test_case.reference_value, 1e-4);
|
EXPECT_NEAR(metric->Eval(preds, info), test_case.reference_value, 1e-4);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -87,15 +87,15 @@ TEST(Metric, DeclareUnifiedTest(IntervalRegressionAccuracy)) {
|
|||||||
HostDeviceVector<bst_float> preds(4, std::log(60.0f));
|
HostDeviceVector<bst_float> preds(4, std::log(60.0f));
|
||||||
|
|
||||||
std::unique_ptr<Metric> metric(Metric::Create("interval-regression-accuracy", &lparam));
|
std::unique_ptr<Metric> metric(Metric::Create("interval-regression-accuracy", &lparam));
|
||||||
EXPECT_FLOAT_EQ(metric->Eval(preds, info, false), 0.75f);
|
EXPECT_FLOAT_EQ(metric->Eval(preds, info), 0.75f);
|
||||||
info.labels_lower_bound_.HostVector()[2] = 70.0f;
|
info.labels_lower_bound_.HostVector()[2] = 70.0f;
|
||||||
EXPECT_FLOAT_EQ(metric->Eval(preds, info, false), 0.50f);
|
EXPECT_FLOAT_EQ(metric->Eval(preds, info), 0.50f);
|
||||||
info.labels_upper_bound_.HostVector()[2] = std::numeric_limits<bst_float>::infinity();
|
info.labels_upper_bound_.HostVector()[2] = std::numeric_limits<bst_float>::infinity();
|
||||||
EXPECT_FLOAT_EQ(metric->Eval(preds, info, false), 0.50f);
|
EXPECT_FLOAT_EQ(metric->Eval(preds, info), 0.50f);
|
||||||
info.labels_upper_bound_.HostVector()[3] = std::numeric_limits<bst_float>::infinity();
|
info.labels_upper_bound_.HostVector()[3] = std::numeric_limits<bst_float>::infinity();
|
||||||
EXPECT_FLOAT_EQ(metric->Eval(preds, info, false), 0.50f);
|
EXPECT_FLOAT_EQ(metric->Eval(preds, info), 0.50f);
|
||||||
info.labels_lower_bound_.HostVector()[0] = 70.0f;
|
info.labels_lower_bound_.HostVector()[0] = 70.0f;
|
||||||
EXPECT_FLOAT_EQ(metric->Eval(preds, info, false), 0.25f);
|
EXPECT_FLOAT_EQ(metric->Eval(preds, info), 0.25f);
|
||||||
|
|
||||||
CheckDeterministicMetricElementWise(StringView{"interval-regression-accuracy"}, GPUIDX);
|
CheckDeterministicMetricElementWise(StringView{"interval-regression-accuracy"}, GPUIDX);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -170,7 +170,7 @@ TEST(Objective, NDCGLambdaWeightComputerTest) {
|
|||||||
EXPECT_EQ(hgroup_dcgs.size(), segment_label_sorter->GetNumGroups());
|
EXPECT_EQ(hgroup_dcgs.size(), segment_label_sorter->GetNumGroups());
|
||||||
std::vector<float> hsorted_labels(segment_label_sorter->GetNumItems());
|
std::vector<float> hsorted_labels(segment_label_sorter->GetNumItems());
|
||||||
dh::CopyDeviceSpanToVector(&hsorted_labels, segment_label_sorter->GetItemsSpan());
|
dh::CopyDeviceSpanToVector(&hsorted_labels, segment_label_sorter->GetItemsSpan());
|
||||||
for (auto i = 0; i < hgroup_dcgs.size(); ++i) {
|
for (size_t i = 0; i < hgroup_dcgs.size(); ++i) {
|
||||||
// Compute group DCG value on CPU and compare
|
// Compute group DCG value on CPU and compare
|
||||||
auto gbegin = hgroups[i];
|
auto gbegin = hgroups[i];
|
||||||
auto gend = hgroups[i + 1];
|
auto gend = hgroups[i + 1];
|
||||||
@ -244,7 +244,7 @@ TEST(Objective, ComputeAndCompareMAPStatsTest) {
|
|||||||
std::vector<uint32_t> hgroups(segment_label_sorter->GetNumGroups() + 1);
|
std::vector<uint32_t> hgroups(segment_label_sorter->GetNumGroups() + 1);
|
||||||
dh::CopyDeviceSpanToVector(&hgroups, segment_label_sorter->GetGroupsSpan());
|
dh::CopyDeviceSpanToVector(&hgroups, segment_label_sorter->GetGroupsSpan());
|
||||||
|
|
||||||
for (auto i = 0; i < hgroups.size() - 1; ++i) {
|
for (size_t i = 0; i < hgroups.size() - 1; ++i) {
|
||||||
auto gbegin = hgroups[i];
|
auto gbegin = hgroups[i];
|
||||||
auto gend = hgroups[i + 1];
|
auto gend = hgroups[i + 1];
|
||||||
std::vector<xgboost::obj::ListEntry> lst_entry;
|
std::vector<xgboost::obj::ListEntry> lst_entry;
|
||||||
|
|||||||
@ -66,10 +66,7 @@ TEST(GPUPredictor, EllpackBasic) {
|
|||||||
size_t constexpr kCols {8};
|
size_t constexpr kCols {8};
|
||||||
for (size_t bins = 2; bins < 258; bins += 16) {
|
for (size_t bins = 2; bins < 258; bins += 16) {
|
||||||
size_t rows = bins * 16;
|
size_t rows = bins * 16;
|
||||||
auto p_m = RandomDataGenerator{rows, kCols, 0.0}
|
auto p_m = RandomDataGenerator{rows, kCols, 0.0}.Bins(bins).Device(0).GenerateDeviceDMatrix();
|
||||||
.Bins(bins)
|
|
||||||
.Device(0)
|
|
||||||
.GenerateDeviceDMatrix(true);
|
|
||||||
ASSERT_FALSE(p_m->PageExists<SparsePage>());
|
ASSERT_FALSE(p_m->PageExists<SparsePage>());
|
||||||
TestPredictionFromGradientIndex<EllpackPage>("gpu_predictor", rows, kCols, p_m);
|
TestPredictionFromGradientIndex<EllpackPage>("gpu_predictor", rows, kCols, p_m);
|
||||||
TestPredictionFromGradientIndex<EllpackPage>("gpu_predictor", bins, kCols, p_m);
|
TestPredictionFromGradientIndex<EllpackPage>("gpu_predictor", bins, kCols, p_m);
|
||||||
@ -78,10 +75,8 @@ TEST(GPUPredictor, EllpackBasic) {
|
|||||||
|
|
||||||
TEST(GPUPredictor, EllpackTraining) {
|
TEST(GPUPredictor, EllpackTraining) {
|
||||||
size_t constexpr kRows { 128 }, kCols { 16 }, kBins { 64 };
|
size_t constexpr kRows { 128 }, kCols { 16 }, kBins { 64 };
|
||||||
auto p_ellpack = RandomDataGenerator{kRows, kCols, 0.0}
|
auto p_ellpack =
|
||||||
.Bins(kBins)
|
RandomDataGenerator{kRows, kCols, 0.0}.Bins(kBins).Device(0).GenerateDeviceDMatrix();
|
||||||
.Device(0)
|
|
||||||
.GenerateDeviceDMatrix(true);
|
|
||||||
HostDeviceVector<float> storage(kRows * kCols);
|
HostDeviceVector<float> storage(kRows * kCols);
|
||||||
auto columnar = RandomDataGenerator{kRows, kCols, 0.0}
|
auto columnar = RandomDataGenerator{kRows, kCols, 0.0}
|
||||||
.Device(0)
|
.Device(0)
|
||||||
|
|||||||
@ -94,8 +94,8 @@ TEST(GPUFeatureInteractionConstraint, Init) {
|
|||||||
tree::TrainParam param = GetParameter();
|
tree::TrainParam param = GetParameter();
|
||||||
param.interaction_constraints = R"([[0, 1, 3], [3, 5, 6]])";
|
param.interaction_constraints = R"([[0, 1, 3], [3, 5, 6]])";
|
||||||
FConstraintWrapper constraints(param, kFeatures);
|
FConstraintWrapper constraints(param, kFeatures);
|
||||||
std::vector<int32_t> h_sets {0, 0, 0, 1, 1, 1};
|
std::vector<bst_feature_t> h_sets {0, 0, 0, 1, 1, 1};
|
||||||
std::vector<int32_t> h_sets_ptr {0, 1, 2, 2, 4, 4, 5, 6};
|
std::vector<size_t> h_sets_ptr {0, 1, 2, 2, 4, 4, 5, 6};
|
||||||
auto d_sets = constraints.GetDSets();
|
auto d_sets = constraints.GetDSets();
|
||||||
ASSERT_EQ(h_sets.size(), d_sets.size());
|
ASSERT_EQ(h_sets.size(), d_sets.size());
|
||||||
auto d_sets_ptr = constraints.GetDSetsPtr();
|
auto d_sets_ptr = constraints.GetDSetsPtr();
|
||||||
|
|||||||
@ -242,7 +242,7 @@ void TestHistogramIndexImpl() {
|
|||||||
int constexpr kNRows = 1000, kNCols = 10;
|
int constexpr kNRows = 1000, kNCols = 10;
|
||||||
|
|
||||||
// Build 2 matrices and build a histogram maker with that
|
// Build 2 matrices and build a histogram maker with that
|
||||||
|
|
||||||
GenericParameter generic_param(CreateEmptyGenericParam(0));
|
GenericParameter generic_param(CreateEmptyGenericParam(0));
|
||||||
tree::GPUHistMaker hist_maker{&generic_param,ObjInfo{ObjInfo::kRegression}},
|
tree::GPUHistMaker hist_maker{&generic_param,ObjInfo{ObjInfo::kRegression}},
|
||||||
hist_maker_ext{&generic_param,ObjInfo{ObjInfo::kRegression}};
|
hist_maker_ext{&generic_param,ObjInfo{ObjInfo::kRegression}};
|
||||||
@ -346,7 +346,7 @@ TEST(GpuHist, UniformSampling) {
|
|||||||
// Make sure the predictions are the same.
|
// Make sure the predictions are the same.
|
||||||
auto preds_h = preds.ConstHostVector();
|
auto preds_h = preds.ConstHostVector();
|
||||||
auto preds_sampling_h = preds_sampling.ConstHostVector();
|
auto preds_sampling_h = preds_sampling.ConstHostVector();
|
||||||
for (int i = 0; i < kRows; i++) {
|
for (size_t i = 0; i < kRows; i++) {
|
||||||
EXPECT_NEAR(preds_h[i], preds_sampling_h[i], 1e-8);
|
EXPECT_NEAR(preds_h[i], preds_sampling_h[i], 1e-8);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -376,7 +376,7 @@ TEST(GpuHist, GradientBasedSampling) {
|
|||||||
// Make sure the predictions are the same.
|
// Make sure the predictions are the same.
|
||||||
auto preds_h = preds.ConstHostVector();
|
auto preds_h = preds.ConstHostVector();
|
||||||
auto preds_sampling_h = preds_sampling.ConstHostVector();
|
auto preds_sampling_h = preds_sampling.ConstHostVector();
|
||||||
for (int i = 0; i < kRows; i++) {
|
for (size_t i = 0; i < kRows; i++) {
|
||||||
EXPECT_NEAR(preds_h[i], preds_sampling_h[i], 1e-3);
|
EXPECT_NEAR(preds_h[i], preds_sampling_h[i], 1e-3);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -409,7 +409,7 @@ TEST(GpuHist, ExternalMemory) {
|
|||||||
// Make sure the predictions are the same.
|
// Make sure the predictions are the same.
|
||||||
auto preds_h = preds.ConstHostVector();
|
auto preds_h = preds.ConstHostVector();
|
||||||
auto preds_ext_h = preds_ext.ConstHostVector();
|
auto preds_ext_h = preds_ext.ConstHostVector();
|
||||||
for (int i = 0; i < kRows; i++) {
|
for (size_t i = 0; i < kRows; i++) {
|
||||||
EXPECT_NEAR(preds_h[i], preds_ext_h[i], 1e-6);
|
EXPECT_NEAR(preds_h[i], preds_ext_h[i], 1e-6);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -451,7 +451,7 @@ TEST(GpuHist, ExternalMemoryWithSampling) {
|
|||||||
// Make sure the predictions are the same.
|
// Make sure the predictions are the same.
|
||||||
auto preds_h = preds.ConstHostVector();
|
auto preds_h = preds.ConstHostVector();
|
||||||
auto preds_ext_h = preds_ext.ConstHostVector();
|
auto preds_ext_h = preds_ext.ConstHostVector();
|
||||||
for (int i = 0; i < kRows; i++) {
|
for (size_t i = 0; i < kRows; i++) {
|
||||||
ASSERT_NEAR(preds_h[i], preds_ext_h[i], 1e-3);
|
ASSERT_NEAR(preds_h[i], preds_ext_h[i], 1e-3);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user