Rename and extract Context. (#8528)

* Rename `GenericParameter` to `Context`.
* Rename header file to reflect the change.
* Rename all references.
This commit is contained in:
Jiaming Yuan
2022-12-07 04:58:54 +08:00
committed by GitHub
parent 05fc6f3ca9
commit 3e26107a9c
105 changed files with 548 additions and 574 deletions

View File

@@ -114,7 +114,7 @@ class BroadcastFunctor {
int root_;
};
void InMemoryHandler::Init(int world_size, int rank) {
void InMemoryHandler::Init(int world_size, int) {
CHECK(world_size_ < world_size) << "In memory handler already initialized.";
std::unique_lock<std::mutex> lock(mutex_);
@@ -124,7 +124,7 @@ void InMemoryHandler::Init(int world_size, int rank) {
cv_.notify_all();
}
void InMemoryHandler::Shutdown(uint64_t sequence_number, int rank) {
void InMemoryHandler::Shutdown(uint64_t sequence_number, int) {
CHECK(world_size_ > 0) << "In memory handler already shutdown.";
std::unique_lock<std::mutex> lock(mutex_);

View File

@@ -8,21 +8,21 @@
#define XGBOOST_COMMON_HIST_UTIL_H_
#include <xgboost/data.h>
#include <xgboost/generic_parameters.h>
#include <limits>
#include <vector>
#include <algorithm>
#include <limits>
#include <map>
#include <memory>
#include <utility>
#include <map>
#include <vector>
#include "algorithm.h" // SegmentId
#include "categorical.h"
#include "common.h"
#include "quantile.h"
#include "row_set.h"
#include "threading_utils.h"
#include "timer.h"
#include "algorithm.h" // SegmentId
namespace xgboost {
class GHistIndexMatrix;

View File

@@ -4,9 +4,9 @@
#ifndef XGBOOST_COMMON_LINALG_OP_CUH_
#define XGBOOST_COMMON_LINALG_OP_CUH_
#include "xgboost/generic_parameters.h"
#include "device_helpers.cuh"
#include "linalg_op.h"
#include "xgboost/context.h"
#include "xgboost/linalg.h"
namespace xgboost {

View File

@@ -9,7 +9,7 @@
#include "common.h"
#include "threading_utils.h"
#include "transform_iterator.h" // MakeIndexTransformIter
#include "xgboost/generic_parameters.h"
#include "xgboost/context.h" // Context
#include "xgboost/linalg.h"
namespace xgboost {
@@ -54,7 +54,7 @@ void ElementWiseTransformDevice(linalg::TensorView<T, D>, Fn&&, void* = nullptr)
}
template <typename T, int32_t D, typename Fn>
void ElementWiseKernel(GenericParameter const* ctx, linalg::TensorView<T, D> t, Fn&& fn) {
void ElementWiseKernel(Context const* ctx, linalg::TensorView<T, D> t, Fn&& fn) {
if (!ctx->IsCPU()) {
common::AssertGPUSupport();
}

View File

@@ -7,7 +7,7 @@
#include <type_traits> // std::is_same
#include "threading_utils.h" // MemStackAllocator, ParallelFor, DefaultMaxThreads
#include "xgboost/generic_parameters.h" // Context
#include "xgboost/context.h" // Context
#include "xgboost/host_device_vector.h" // HostDeviceVector
namespace xgboost {

View File

@@ -6,7 +6,7 @@
#include "device_helpers.cuh" // dh::Reduce, safe_cuda, dh::XGBCachingDeviceAllocator
#include "numeric.h"
#include "xgboost/generic_parameters.h" // Context
#include "xgboost/context.h" // Context
#include "xgboost/host_device_vector.h" // HostDeviceVector
namespace xgboost {

View File

@@ -12,7 +12,7 @@
#include "common.h" // AssertGPUSupport
#include "threading_utils.h" // MemStackAllocator, DefaultMaxThreads
#include "xgboost/generic_parameters.h" // Context
#include "xgboost/context.h" // Context
#include "xgboost/host_device_vector.h" // HostDeviceVector
namespace xgboost {

View File

@@ -10,15 +10,15 @@
#include <xgboost/data.h>
#include <algorithm>
#include <limits>
#include <memory>
#include <utility>
#include <limits>
#include <vector>
#include "../tree/hist/expand_entry.h"
#include "categorical.h"
#include "column_matrix.h"
#include "../tree/hist/expand_entry.h"
#include "xgboost/generic_parameters.h"
#include "xgboost/context.h"
#include "xgboost/tree_model.h"
namespace xgboost {

View File

@@ -7,7 +7,7 @@
#include "common.h" // common::OptionalWeights
#include "device_helpers.cuh" // dh::MakeTransformIterator, tcbegin, tcend
#include "stats.cuh" // common::SegmentedQuantile, common::SegmentedWeightedQuantile
#include "xgboost/generic_parameters.h" // Context
#include "xgboost/context.h" // Context
#include "xgboost/host_device_vector.h" // HostDeviceVector
#include "xgboost/linalg.h" // linalg::TensorView, UnravelIndex, Apply

View File

@@ -11,7 +11,7 @@
#include "device_helpers.cuh"
#include "linalg_op.cuh"
#include "xgboost/generic_parameters.h"
#include "xgboost/context.h"
#include "xgboost/linalg.h"
#include "xgboost/tree_model.h"

View File

@@ -10,7 +10,7 @@
#include "common.h" // AssertGPUSupport
#include "transform_iterator.h" // MakeIndexTransformIter
#include "xgboost/generic_parameters.h"
#include "xgboost/context.h" // Context
#include "xgboost/linalg.h"
namespace xgboost {

62
src/context.cc Normal file
View File

@@ -0,0 +1,62 @@
/**
* Copyright 2014-2022 by XGBoost Contributors
*
* \brief Context object used for controlling runtime parameters.
*/
#include <xgboost/context.h>
#include "common/common.h"
#include "common/threading_utils.h"
namespace xgboost {
DMLC_REGISTER_PARAMETER(Context);
std::int32_t constexpr Context::kCpuId;
std::int64_t constexpr Context::kDefaultSeed;
Context::Context() : cfs_cpu_count_{common::GetCfsCPUCount()} {}
void Context::ConfigureGpuId(bool require_gpu) {
#if defined(XGBOOST_USE_CUDA)
if (gpu_id == kCpuId) { // 0. User didn't specify the `gpu_id'
if (require_gpu) { // 1. `tree_method' or `predictor' or both are using
// GPU.
// 2. Use device 0 as default.
this->UpdateAllowUnknown(Args{{"gpu_id", "0"}});
}
}
// 3. When booster is loaded from a memory image (Python pickle or R
// raw model), number of available GPUs could be different. Wrap around it.
int32_t n_gpus = common::AllVisibleGPUs();
if (n_gpus == 0) {
if (gpu_id != kCpuId) {
LOG(WARNING) << "No visible GPU is found, setting `gpu_id` to -1";
}
this->UpdateAllowUnknown(Args{{"gpu_id", std::to_string(kCpuId)}});
} else if (fail_on_invalid_gpu_id) {
CHECK(gpu_id == kCpuId || gpu_id < n_gpus)
<< "Only " << n_gpus << " GPUs are visible, gpu_id " << gpu_id << " is invalid.";
} else if (gpu_id != kCpuId && gpu_id >= n_gpus) {
LOG(WARNING) << "Only " << n_gpus << " GPUs are visible, setting `gpu_id` to "
<< gpu_id % n_gpus;
this->UpdateAllowUnknown(Args{{"gpu_id", std::to_string(gpu_id % n_gpus)}});
}
#else
// Just set it to CPU, don't think about it.
this->UpdateAllowUnknown(Args{{"gpu_id", std::to_string(kCpuId)}});
(void)(require_gpu);
#endif // defined(XGBOOST_USE_CUDA)
common::SetDevice(this->gpu_id);
}
std::int32_t Context::Threads() const {
auto n_threads = common::OmpGetNumThreads(nthread);
if (cfs_cpu_count_ > 0) {
n_threads = std::min(n_threads, cfs_cpu_count_);
}
return n_threads;
}
} // namespace xgboost

View File

@@ -86,7 +86,7 @@ class IterativeDMatrix : public DMatrix {
LOG(FATAL) << "Slicing DMatrix is not supported for Quantile DMatrix.";
return nullptr;
}
DMatrix *SliceCol(std::size_t start, std::size_t size) override {
DMatrix *SliceCol(std::size_t, std::size_t) override {
LOG(FATAL) << "Slicing DMatrix columns is not supported for Quantile DMatrix.";
return nullptr;
}

View File

@@ -10,10 +10,10 @@
#include <string>
#include <utility>
#include "xgboost/data.h"
#include "xgboost/generic_parameters.h"
#include "xgboost/c_api.h"
#include "adapter.h"
#include "xgboost/c_api.h"
#include "xgboost/context.h"
#include "xgboost/data.h"
namespace xgboost {
namespace data {
@@ -87,7 +87,7 @@ class DMatrixProxy : public DMatrix {
LOG(FATAL) << "Slicing DMatrix is not supported for Proxy DMatrix.";
return nullptr;
}
DMatrix* SliceCol(std::size_t start, std::size_t size) override {
DMatrix* SliceCol(std::size_t, std::size_t) override {
LOG(FATAL) << "Slicing DMatrix columns is not supported for Proxy DMatrix.";
return nullptr;
}

View File

@@ -53,7 +53,7 @@ DMatrix* SimpleDMatrix::SliceCol(std::size_t start, std::size_t size) {
auto& h_data = out_page.data.HostVector();
auto& h_offset = out_page.offset.HostVector();
size_t rptr{0};
for (auto i = 0; i < this->Info().num_row_; i++) {
for (bst_row_t i = 0; i < this->Info().num_row_; i++) {
auto inst = batch[i];
auto prev_size = h_data.size();
std::copy_if(inst.begin(), inst.end(), std::back_inserter(h_data), [&](Entry e) {

View File

@@ -107,7 +107,7 @@ class SparsePageDMatrix : public DMatrix {
LOG(FATAL) << "Slicing DMatrix is not supported for external memory.";
return nullptr;
}
DMatrix *SliceCol(std::size_t start, std::size_t size) override {
DMatrix *SliceCol(std::size_t, std::size_t) override {
LOG(FATAL) << "Slicing DMatrix columns is not supported for external memory.";
return nullptr;
}

View File

@@ -71,7 +71,7 @@ void LinearCheckLayer(unsigned layer_begin) {
*/
class GBLinear : public GradientBooster {
public:
explicit GBLinear(LearnerModelParam const* learner_model_param, GenericParameter const* ctx)
explicit GBLinear(LearnerModelParam const* learner_model_param, Context const* ctx)
: GradientBooster{ctx},
learner_model_param_{learner_model_param},
model_{learner_model_param},
@@ -179,7 +179,7 @@ class GBLinear : public GradientBooster {
unsigned) override {
model_.LazyInitModel();
LinearCheckLayer(layer_begin);
auto base_margin = p_fmat->Info().base_margin_.View(GenericParameter::kCpuId);
auto base_margin = p_fmat->Info().base_margin_.View(Context::kCpuId);
const int ngroup = model_.learner_model_param->num_output_group;
const size_t ncolumns = model_.learner_model_param->num_feature + 1;
// allocate space for (#features + bias) times #groups times #rows
@@ -250,7 +250,7 @@ class GBLinear : public GradientBooster {
linalg::TensorView<float, 2> scores{
*out_scores,
{learner_model_param_->num_feature, n_groups},
GenericParameter::kCpuId};
Context::kCpuId};
for (size_t i = 0; i < learner_model_param_->num_feature; ++i) {
for (bst_group_t g = 0; g < n_groups; ++g) {
scores(i, g) = model_[i][g];
@@ -355,7 +355,7 @@ DMLC_REGISTER_PARAMETER(GBLinearTrainParam);
XGBOOST_REGISTER_GBM(GBLinear, "gblinear")
.describe("Linear booster, implement generalized linear model.")
.set_body([](LearnerModelParam const* booster_config, GenericParameter const* ctx) {
.set_body([](LearnerModelParam const* booster_config, Context const* ctx) {
return new GBLinear(booster_config, ctx);
});
} // namespace gbm

View File

@@ -3,21 +3,23 @@
* \file gbm.cc
* \brief Registry of gradient boosters.
*/
#include "xgboost/gbm.h"
#include <dmlc/registry.h>
#include <memory>
#include <string>
#include <vector>
#include <memory>
#include "xgboost/gbm.h"
#include "xgboost/context.h"
#include "xgboost/learner.h"
#include "xgboost/generic_parameters.h"
namespace dmlc {
DMLC_REGISTRY_ENABLE(::xgboost::GradientBoosterReg);
} // namespace dmlc
namespace xgboost {
GradientBooster* GradientBooster::Create(const std::string& name, GenericParameter const* ctx,
GradientBooster* GradientBooster::Create(const std::string& name, Context const* ctx,
LearnerModelParam const* learner_model_param) {
auto *e = ::dmlc::Registry< ::xgboost::GradientBoosterReg>::Get()->Find(name);
if (e == nullptr) {

View File

@@ -67,7 +67,7 @@ void GBTree::Configure(const Args& cfg) {
#if defined(XGBOOST_USE_ONEAPI)
if (!oneapi_predictor_) {
oneapi_predictor_ = std::unique_ptr<Predictor>(
Predictor::Create("oneapi_predictor", this->generic_param_));
Predictor::Create("oneapi_predictor", this->ctx_));
}
oneapi_predictor_->Configure(cfg);
#endif // defined(XGBOOST_USE_ONEAPI)
@@ -204,7 +204,7 @@ void GPUCopyGradient(HostDeviceVector<GradientPair> const*, bst_group_t, bst_gro
void CopyGradient(HostDeviceVector<GradientPair> const* in_gpair, int32_t n_threads,
bst_group_t n_groups, bst_group_t group_id,
HostDeviceVector<GradientPair>* out_gpair) {
if (in_gpair->DeviceIdx() != GenericParameter::kCpuId) {
if (in_gpair->DeviceIdx() != Context::kCpuId) {
GPUCopyGradient(in_gpair, n_groups, group_id, out_gpair);
} else {
std::vector<GradientPair> &tmp_h = out_gpair->HostVector();
@@ -651,7 +651,7 @@ void GPUDartInplacePredictInc(common::Span<float> /*out_predts*/, common::Span<f
class Dart : public GBTree {
public:
explicit Dart(LearnerModelParam const* booster_config, GenericParameter const* ctx)
explicit Dart(LearnerModelParam const* booster_config, Context const* ctx)
: GBTree(booster_config, ctx) {}
void Configure(const Args& cfg) override {
@@ -741,7 +741,7 @@ class Dart : public GBTree {
auto n_groups = model_.learner_model_param->num_output_group;
PredictionCacheEntry predts; // temporary storage for prediction
if (ctx_->gpu_id != GenericParameter::kCpuId) {
if (ctx_->gpu_id != Context::kCpuId) {
predts.predictions.SetDevice(ctx_->gpu_id);
}
predts.predictions.Resize(p_fmat->Info().num_row_ * n_groups, 0);
@@ -763,7 +763,7 @@ class Dart : public GBTree {
CHECK_EQ(p_out_preds->predictions.Size(), predts.predictions.Size());
size_t n_rows = p_fmat->Info().num_row_;
if (predts.predictions.DeviceIdx() != GenericParameter::kCpuId) {
if (predts.predictions.DeviceIdx() != Context::kCpuId) {
p_out_preds->predictions.SetDevice(predts.predictions.DeviceIdx());
GPUDartPredictInc(p_out_preds->predictions.DeviceSpan(),
predts.predictions.DeviceSpan(), w, n_rows, n_groups,
@@ -1019,13 +1019,13 @@ DMLC_REGISTER_PARAMETER(DartTrainParam);
XGBOOST_REGISTER_GBM(GBTree, "gbtree")
.describe("Tree booster, gradient boosted trees.")
.set_body([](LearnerModelParam const* booster_config, GenericParameter const* ctx) {
.set_body([](LearnerModelParam const* booster_config, Context const* ctx) {
auto* p = new GBTree(booster_config, ctx);
return p;
});
XGBOOST_REGISTER_GBM(Dart, "dart")
.describe("Tree booster, dart.")
.set_body([](LearnerModelParam const* booster_config, GenericParameter const* ctx) {
.set_body([](LearnerModelParam const* booster_config, Context const* ctx) {
GBTree* p = new Dart(booster_config, ctx);
return p;
});

View File

@@ -1,10 +1,10 @@
/*!
* Copyright 2021 by Contributors
*/
#include "xgboost/span.h"
#include "xgboost/generic_parameters.h"
#include "xgboost/linalg.h"
#include "../common/device_helpers.cuh"
#include "xgboost/context.h"
#include "xgboost/linalg.h"
#include "xgboost/span.h"
namespace xgboost {
namespace gbm {

View File

@@ -190,7 +190,7 @@ bool SliceTrees(int32_t layer_begin, int32_t layer_end, int32_t step, GBTreeMode
// gradient boosted trees
class GBTree : public GradientBooster {
public:
explicit GBTree(LearnerModelParam const* booster_config, GenericParameter const* ctx)
explicit GBTree(LearnerModelParam const* booster_config, Context const* ctx)
: GradientBooster{ctx}, model_(booster_config, ctx_) {}
void Configure(const Args& cfg) override;

View File

@@ -5,16 +5,17 @@
#ifndef XGBOOST_GBM_GBTREE_MODEL_H_
#define XGBOOST_GBM_GBTREE_MODEL_H_
#include <dmlc/parameter.h>
#include <dmlc/io.h>
#include <xgboost/model.h>
#include <xgboost/tree_model.h>
#include <xgboost/parameter.h>
#include <dmlc/parameter.h>
#include <xgboost/context.h>
#include <xgboost/learner.h>
#include <xgboost/model.h>
#include <xgboost/parameter.h>
#include <xgboost/tree_model.h>
#include <memory>
#include <utility>
#include <string>
#include <utility>
#include <vector>
#include "../common/threading_utils.h"
@@ -89,7 +90,7 @@ struct GBTreeModelParam : public dmlc::Parameter<GBTreeModelParam> {
struct GBTreeModel : public Model {
public:
explicit GBTreeModel(LearnerModelParam const* learner_model, GenericParameter const* ctx)
explicit GBTreeModel(LearnerModelParam const* learner_model, Context const* ctx)
: learner_model_param{learner_model}, ctx_{ctx} {}
void Configure(const Args& cfg) {
// initialize model parameters if not yet been initialized.
@@ -143,7 +144,7 @@ struct GBTreeModel : public Model {
std::vector<int> tree_info;
private:
GenericParameter const* ctx_;
Context const* ctx_;
};
} // namespace gbm
} // namespace xgboost

View File

@@ -35,10 +35,10 @@
#include "common/version.h"
#include "xgboost/base.h"
#include "xgboost/c_api.h"
#include "xgboost/context.h"
#include "xgboost/data.h"
#include "xgboost/feature_map.h"
#include "xgboost/gbm.h"
#include "xgboost/generic_parameters.h"
#include "xgboost/host_device_vector.h"
#include "xgboost/json.h"
#include "xgboost/logging.h"
@@ -306,56 +306,6 @@ struct LearnerTrainParam : public XGBoostParameter<LearnerTrainParam> {
DMLC_REGISTER_PARAMETER(LearnerModelParamLegacy);
DMLC_REGISTER_PARAMETER(LearnerTrainParam);
DMLC_REGISTER_PARAMETER(GenericParameter);
int constexpr GenericParameter::kCpuId;
int64_t constexpr GenericParameter::kDefaultSeed;
GenericParameter::GenericParameter() : cfs_cpu_count_{common::GetCfsCPUCount()} {}
void GenericParameter::ConfigureGpuId(bool require_gpu) {
#if defined(XGBOOST_USE_CUDA)
if (gpu_id == kCpuId) { // 0. User didn't specify the `gpu_id'
if (require_gpu) { // 1. `tree_method' or `predictor' or both are using
// GPU.
// 2. Use device 0 as default.
this->UpdateAllowUnknown(Args{{"gpu_id", "0"}});
}
}
// 3. When booster is loaded from a memory image (Python pickle or R
// raw model), number of available GPUs could be different. Wrap around it.
int32_t n_gpus = common::AllVisibleGPUs();
if (n_gpus == 0) {
if (gpu_id != kCpuId) {
LOG(WARNING) << "No visible GPU is found, setting `gpu_id` to -1";
}
this->UpdateAllowUnknown(Args{{"gpu_id", std::to_string(kCpuId)}});
} else if (fail_on_invalid_gpu_id) {
CHECK(gpu_id == kCpuId || gpu_id < n_gpus)
<< "Only " << n_gpus << " GPUs are visible, gpu_id "
<< gpu_id << " is invalid.";
} else if (gpu_id != kCpuId && gpu_id >= n_gpus) {
LOG(WARNING) << "Only " << n_gpus
<< " GPUs are visible, setting `gpu_id` to " << gpu_id % n_gpus;
this->UpdateAllowUnknown(Args{{"gpu_id", std::to_string(gpu_id % n_gpus)}});
}
#else
// Just set it to CPU, don't think about it.
this->UpdateAllowUnknown(Args{{"gpu_id", std::to_string(kCpuId)}});
(void)(require_gpu);
#endif // defined(XGBOOST_USE_CUDA)
common::SetDevice(this->gpu_id);
}
int32_t GenericParameter::Threads() const {
auto n_threads = common::OmpGetNumThreads(nthread);
if (cfs_cpu_count_ > 0) {
n_threads = std::min(n_threads, cfs_cpu_count_);
}
return n_threads;
}
using LearnerAPIThreadLocalStore =
dmlc::ThreadLocalStore<std::map<Learner const *, XGBAPIThreadLocalEntry>>;
@@ -461,7 +411,7 @@ class LearnerConfiguration : public Learner {
monitor_.Init("Learner");
auto& local_cache = (*ThreadLocalPredictionCache::Get())[this];
for (std::shared_ptr<DMatrix> const& d : cache) {
local_cache.Cache(d, GenericParameter::kCpuId);
local_cache.Cache(d, Context::kCpuId);
}
}
~LearnerConfiguration() override {
@@ -541,6 +491,9 @@ class LearnerConfiguration : public Learner {
// If configuration is loaded, ensure that the model came from the same version
CHECK(IsA<Object>(in));
auto origin_version = Version::Load(in);
if (std::get<0>(Version::kInvalid) == std::get<0>(origin_version)) {
LOG(WARNING) << "Invalid version string in config";
}
if (!Version::Same(origin_version)) {
LOG(WARNING) << ModelMsg();

View File

@@ -108,10 +108,10 @@ inline std::pair<double, double> GetGradient(int group_idx, int num_group, int f
*
* \return The gradient and diagonal Hessian entry for a given feature.
*/
inline std::pair<double, double>
GetGradientParallel(GenericParameter const *ctx, int group_idx, int num_group,
int fidx, const std::vector<GradientPair> &gpair,
DMatrix *p_fmat) {
inline std::pair<double, double> GetGradientParallel(Context const *ctx, int group_idx,
int num_group, int fidx,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat) {
std::vector<double> sum_grad_tloc(ctx->Threads(), 0.0);
std::vector<double> sum_hess_tloc(ctx->Threads(), 0.0);

View File

@@ -11,13 +11,13 @@ DMLC_REGISTRY_ENABLE(::xgboost::LinearUpdaterReg);
namespace xgboost {
LinearUpdater* LinearUpdater::Create(const std::string& name, GenericParameter const* lparam) {
LinearUpdater* LinearUpdater::Create(const std::string& name, Context const* ctx) {
auto *e = ::dmlc::Registry< ::xgboost::LinearUpdaterReg>::Get()->Find(name);
if (e == nullptr) {
LOG(FATAL) << "Unknown linear updater " << name;
}
auto p_linear = (e->body)();
p_linear->ctx_ = lparam;
p_linear->ctx_ = ctx;
return p_linear;
}

View File

@@ -79,14 +79,14 @@ double MultiClassOVR(common::Span<float const> predts, MetaInfo const &info,
size_t n_classes, int32_t n_threads,
BinaryAUC &&binary_auc) {
CHECK_NE(n_classes, 0);
auto const labels = info.labels.View(GenericParameter::kCpuId);
auto const labels = info.labels.View(Context::kCpuId);
if (labels.Shape(0) != 0) {
CHECK_EQ(labels.Shape(1), 1) << "AUC doesn't support multi-target model.";
}
std::vector<double> results_storage(n_classes * 3, 0);
linalg::TensorView<double, 2> results(results_storage, {n_classes, static_cast<size_t>(3)},
GenericParameter::kCpuId);
Context::kCpuId);
auto local_area = results.Slice(linalg::All(), 0);
auto tp = results.Slice(linalg::All(), 1);
auto auc = results.Slice(linalg::All(), 2);
@@ -94,7 +94,7 @@ double MultiClassOVR(common::Span<float const> predts, MetaInfo const &info,
auto weights = common::OptionalWeights{info.weights_.ConstHostSpan()};
auto predts_t = linalg::TensorView<float const, 2>(
predts, {static_cast<size_t>(info.num_row_), n_classes},
GenericParameter::kCpuId);
Context::kCpuId);
if (info.labels.Size() != 0) {
common::ParallelFor(n_classes, n_threads, [&](auto c) {
@@ -215,7 +215,7 @@ std::pair<double, uint32_t> RankingAUC(std::vector<float> const &predts,
CHECK_GE(info.group_ptr_.size(), 2);
uint32_t n_groups = info.group_ptr_.size() - 1;
auto s_predts = common::Span<float const>{predts};
auto labels = info.labels.View(GenericParameter::kCpuId);
auto labels = info.labels.View(Context::kCpuId);
auto s_weights = info.weights_.ConstHostSpan();
std::atomic<uint32_t> invalid_groups{0};
@@ -255,7 +255,7 @@ template <typename Curve>
class EvalAUC : public Metric {
double Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info) override {
double auc {0};
if (tparam_->gpu_id != GenericParameter::kCpuId) {
if (tparam_->gpu_id != Context::kCpuId) {
preds.SetDevice(tparam_->gpu_id);
info.labels.SetDevice(tparam_->gpu_id);
info.weights_.SetDevice(tparam_->gpu_id);
@@ -340,7 +340,7 @@ class EvalROCAUC : public EvalAUC<EvalROCAUC> {
double auc{0};
uint32_t valid_groups = 0;
auto n_threads = tparam_->Threads();
if (tparam_->gpu_id == GenericParameter::kCpuId) {
if (tparam_->gpu_id == Context::kCpuId) {
std::tie(auc, valid_groups) =
RankingAUC<true>(predts.ConstHostVector(), info, n_threads);
} else {
@@ -355,7 +355,7 @@ class EvalROCAUC : public EvalAUC<EvalROCAUC> {
double auc{0};
auto n_threads = tparam_->Threads();
CHECK_NE(n_classes, 0);
if (tparam_->gpu_id == GenericParameter::kCpuId) {
if (tparam_->gpu_id == Context::kCpuId) {
auc = MultiClassOVR(predts.ConstHostVector(), info, n_classes, n_threads,
BinaryROCAUC);
} else {
@@ -368,7 +368,7 @@ class EvalROCAUC : public EvalAUC<EvalROCAUC> {
std::tuple<double, double, double>
EvalBinary(HostDeviceVector<float> const &predts, MetaInfo const &info) {
double fp, tp, auc;
if (tparam_->gpu_id == GenericParameter::kCpuId) {
if (tparam_->gpu_id == Context::kCpuId) {
std::tie(fp, tp, auc) =
BinaryROCAUC(predts.ConstHostVector(), info.labels.HostView().Slice(linalg::All(), 0),
common::OptionalWeights{info.weights_.ConstHostSpan()});
@@ -418,7 +418,7 @@ class EvalPRAUC : public EvalAUC<EvalPRAUC> {
std::tuple<double, double, double>
EvalBinary(HostDeviceVector<float> const &predts, MetaInfo const &info) {
double pr, re, auc;
if (tparam_->gpu_id == GenericParameter::kCpuId) {
if (tparam_->gpu_id == Context::kCpuId) {
std::tie(pr, re, auc) =
BinaryPRAUC(predts.ConstHostSpan(), info.labels.HostView().Slice(linalg::All(), 0),
common::OptionalWeights{info.weights_.ConstHostSpan()});
@@ -431,7 +431,7 @@ class EvalPRAUC : public EvalAUC<EvalPRAUC> {
double EvalMultiClass(HostDeviceVector<float> const &predts, MetaInfo const &info,
size_t n_classes) {
if (tparam_->gpu_id == GenericParameter::kCpuId) {
if (tparam_->gpu_id == Context::kCpuId) {
auto n_threads = this->tparam_->Threads();
return MultiClassOVR(predts.ConstHostSpan(), info, n_classes, n_threads,
BinaryPRAUC);
@@ -446,7 +446,7 @@ class EvalPRAUC : public EvalAUC<EvalPRAUC> {
double auc{0};
uint32_t valid_groups = 0;
auto n_threads = tparam_->Threads();
if (tparam_->gpu_id == GenericParameter::kCpuId) {
if (tparam_->gpu_id == Context::kCpuId) {
auto labels = info.labels.Data()->ConstHostSpan();
if (std::any_of(labels.cbegin(), labels.cend(), PRAUCLabelInvalid{})) {
InvalidLabels();

View File

@@ -40,7 +40,7 @@ namespace {
* applying the weights. A tuple of {error_i, weight_i} is expected as return.
*/
template <typename Fn>
PackedReduceResult Reduce(GenericParameter const* ctx, MetaInfo const& info, Fn&& loss) {
PackedReduceResult Reduce(Context const* ctx, MetaInfo const& info, Fn&& loss) {
PackedReduceResult result;
auto labels = info.labels.View(ctx->gpu_id);
if (ctx->IsCPU()) {

View File

@@ -4,8 +4,8 @@
* \brief Registry of objective functions.
*/
#include <dmlc/registry.h>
#include <xgboost/context.h>
#include <xgboost/metric.h>
#include <xgboost/generic_parameters.h>
#include "metric_common.h"
@@ -43,7 +43,7 @@ Metric* CreateMetricImpl(const std::string& name) {
}
Metric *
Metric::Create(const std::string& name, GenericParameter const* tparam) {
Metric::Create(const std::string& name, Context const* tparam) {
auto metric = CreateMetricImpl<MetricReg>(name);
if (metric == nullptr) {
LOG(FATAL) << "Unknown metric function " << name;
@@ -54,7 +54,7 @@ Metric::Create(const std::string& name, GenericParameter const* tparam) {
}
Metric *
GPUMetric::CreateGPUMetric(const std::string& name, GenericParameter const* tparam) {
GPUMetric::CreateGPUMetric(const std::string& name, Context const* tparam) {
auto metric = CreateMetricImpl<MetricGPUReg>(name);
if (metric == nullptr) {
LOG(WARNING) << "Cannot find a GPU metric builder for metric " << name

View File

@@ -12,12 +12,13 @@
#include "xgboost/metric.h"
namespace xgboost {
struct Context;
// This creates a GPU metric instance dynamically and adds it to the GPU metric registry, if not
// present already. This is created when there is a device ordinal present and if xgboost
// is compiled with CUDA support
struct GPUMetric : Metric {
static Metric *CreateGPUMetric(const std::string& name, GenericParameter const* tparam);
static Metric *CreateGPUMetric(const std::string &name, Context const *tparam);
};
/*!

View File

@@ -126,13 +126,10 @@ class MultiClassMetricsReduction {
#endif // XGBOOST_USE_CUDA
PackedReduceResult Reduce(
const GenericParameter &tparam,
int device,
size_t n_class,
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) {
PackedReduceResult Reduce(const Context& tparam, int device, size_t n_class,
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) {
PackedReduceResult result;
if (device < 0) {

View File

@@ -118,7 +118,7 @@ struct EvalAMS : public Metric {
const double br = 10.0;
unsigned thresindex = 0;
double s_tp = 0.0, b_fp = 0.0, tams = 0.0;
const auto& labels = info.labels.View(GenericParameter::kCpuId);
const auto& labels = info.labels.View(Context::kCpuId);
for (unsigned i = 0; i < static_cast<unsigned>(ndata-1) && i < ntop; ++i) {
const unsigned ridx = rec[i].second;
const bst_float wt = info.GetWeight(ridx);
@@ -191,7 +191,7 @@ struct EvalRank : public Metric, public EvalRankConfig {
std::vector<double> sum_tloc(tparam_->Threads(), 0.0);
if (!rank_gpu_ || tparam_->gpu_id < 0) {
const auto& labels = info.labels.View(GenericParameter::kCpuId);
const auto& labels = info.labels.View(Context::kCpuId);
const auto &h_preds = preds.ConstHostVector();
dmlc::OMPException exc;

View File

@@ -123,7 +123,7 @@ class ElementWiseSurvivalMetricsReduction {
#endif // XGBOOST_USE_CUDA
PackedReduceResult Reduce(
const GenericParameter &ctx,
const Context &ctx,
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels_lower_bound,
const HostDeviceVector<bst_float>& labels_upper_bound,
@@ -195,7 +195,7 @@ struct EvalAFTNLogLik {
};
template <typename Policy> struct EvalEWiseSurvivalBase : public Metric {
explicit EvalEWiseSurvivalBase(GenericParameter const *ctx) {
explicit EvalEWiseSurvivalBase(Context const *ctx) {
tparam_ = ctx;
}
EvalEWiseSurvivalBase() = default;

View File

@@ -9,7 +9,7 @@
#include "../collective/communicator-inl.h"
#include "../common/common.h"
#include "xgboost/generic_parameters.h"
#include "xgboost/context.h"
#include "xgboost/host_device_vector.h"
#include "xgboost/tree_model.h"

View File

@@ -4,6 +4,7 @@
* \brief Registry of all objective functions.
*/
#include <dmlc/registry.h>
#include <xgboost/context.h>
#include <xgboost/objective.h>
#include <sstream>
@@ -16,7 +17,7 @@ DMLC_REGISTRY_ENABLE(::xgboost::ObjFunctionReg);
namespace xgboost {
// implement factory functions
ObjFunction* ObjFunction::Create(const std::string& name, GenericParameter const* tparam) {
ObjFunction* ObjFunction::Create(const std::string& name, Context const* ctx) {
auto *e = ::dmlc::Registry< ::xgboost::ObjFunctionReg>::Get()->Find(name);
if (e == nullptr) {
std::stringstream ss;
@@ -27,7 +28,7 @@ ObjFunction* ObjFunction::Create(const std::string& name, GenericParameter const
<< ss.str();
}
auto pobj = (e->body)();
pobj->ctx_ = tparam;
pobj->ctx_ = ctx;
return pobj;
}

View File

@@ -23,8 +23,8 @@
#include "./regression_loss.h"
#include "adaptive.h"
#include "xgboost/base.h"
#include "xgboost/context.h"
#include "xgboost/data.h"
#include "xgboost/generic_parameters.h"
#include "xgboost/host_device_vector.h"
#include "xgboost/json.h"
#include "xgboost/linalg.h"

View File

@@ -1,5 +1,5 @@
/*!
* Copyright by Contributors 2017-2021
* Copyright by XGBoost Contributors 2017-2022
*/
#include <dmlc/any.h>
#include <dmlc/omp.h>
@@ -351,8 +351,7 @@ class CPUPredictor : public Predictor {
}
public:
explicit CPUPredictor(GenericParameter const* generic_param) :
Predictor::Predictor{generic_param} {}
explicit CPUPredictor(Context const *ctx) : Predictor::Predictor{ctx} {}
void PredictBatch(DMatrix *dmat, PredictionCacheEntry *predts,
const gbm::GBTreeModel &model, uint32_t tree_begin,
@@ -614,9 +613,7 @@ class CPUPredictor : public Predictor {
};
XGBOOST_REGISTER_PREDICTOR(CPUPredictor, "cpu_predictor")
.describe("Make predictions using CPU.")
.set_body([](GenericParameter const* generic_param) {
return new CPUPredictor(generic_param);
});
.describe("Make predictions using CPU.")
.set_body([](Context const *ctx) { return new CPUPredictor(ctx); });
} // namespace predictor
} // namespace xgboost

View File

@@ -723,8 +723,7 @@ class GPUPredictor : public xgboost::Predictor {
}
public:
explicit GPUPredictor(GenericParameter const* generic_param) :
Predictor::Predictor{generic_param} {}
explicit GPUPredictor(Context const* ctx) : Predictor::Predictor{ctx} {}
~GPUPredictor() override {
if (ctx_->gpu_id >= 0 && ctx_->gpu_id < common::AllVisibleGPUs()) {
@@ -1026,10 +1025,8 @@ class GPUPredictor : public xgboost::Predictor {
};
XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor")
.describe("Make predictions using GPU.")
.set_body([](GenericParameter const* generic_param) {
return new GPUPredictor(generic_param);
});
.describe("Make predictions using GPU.")
.set_body([](Context const* ctx) { return new GPUPredictor(ctx); });
} // namespace predictor
} // namespace xgboost

View File

@@ -1,14 +1,15 @@
/*!
* Copyright 2017-2021 by Contributors
*/
#include "xgboost/predictor.h"
#include <dmlc/registry.h>
#include <mutex>
#include "xgboost/predictor.h"
#include "xgboost/data.h"
#include "xgboost/generic_parameters.h"
#include "../gbm/gbtree.h"
#include "xgboost/context.h"
#include "xgboost/data.h"
namespace dmlc {
DMLC_REGISTRY_ENABLE(::xgboost::PredictorReg);
@@ -30,7 +31,7 @@ void PredictionContainer::ClearExpiredEntries() {
PredictionCacheEntry &PredictionContainer::Cache(std::shared_ptr<DMatrix> m, int32_t device) {
this->ClearExpiredEntries();
container_[m.get()].ref = m;
if (device != GenericParameter::kCpuId) {
if (device != Context::kCpuId) {
container_[m.get()].predictions.SetDevice(device);
}
return container_[m.get()];
@@ -51,13 +52,12 @@ decltype(PredictionContainer::container_) const& PredictionContainer::Container(
void Predictor::Configure(
const std::vector<std::pair<std::string, std::string>>&) {
}
Predictor* Predictor::Create(
std::string const& name, GenericParameter const* generic_param) {
Predictor* Predictor::Create(std::string const& name, Context const* ctx) {
auto* e = ::dmlc::Registry<PredictorReg>::Get()->Find(name);
if (e == nullptr) {
LOG(FATAL) << "Unknown predictor type " << name;
}
auto p_predictor = (e->body)(generic_param);
auto p_predictor = (e->body)(ctx);
return p_predictor;
}

View File

@@ -11,8 +11,8 @@
#include "../common/numeric.h" // Iota
#include "../common/partition_builder.h"
#include "hist/expand_entry.h" // CPUExpandEntry
#include "xgboost/generic_parameters.h" // Context
#include "hist/expand_entry.h" // CPUExpandEntry
#include "xgboost/context.h" // Context
namespace xgboost {
namespace tree {

View File

@@ -9,7 +9,7 @@
#include "../../common/device_helpers.cuh"
#include "xgboost/base.h"
#include "xgboost/generic_parameters.h"
#include "xgboost/context.h"
#include "xgboost/task.h"
#include "xgboost/tree_model.h"

View File

@@ -5,19 +5,20 @@
#define XGBOOST_TREE_HIST_EVALUATE_SPLITS_H_
#include <algorithm>
#include <limits>
#include <memory>
#include <numeric>
#include <limits>
#include <utility>
#include <vector>
#include "../param.h"
#include "../constraints.h"
#include "../split_evaluator.h"
#include "../../common/categorical.h"
#include "../../common/random.h"
#include "../../common/hist_util.h"
#include "../../common/random.h"
#include "../../data/gradient_index.h"
#include "../constraints.h"
#include "../param.h"
#include "../split_evaluator.h"
#include "xgboost/context.h"
namespace xgboost {
namespace tree {
@@ -427,7 +428,7 @@ class HistEvaluator {
std::shared_ptr<common::ColumnSampler> sampler)
: param_{param},
column_sampler_{std::move(sampler)},
tree_evaluator_{param, static_cast<bst_feature_t>(info.num_col_), GenericParameter::kCpuId},
tree_evaluator_{param, static_cast<bst_feature_t>(info.num_col_), Context::kCpuId},
n_threads_{n_threads} {
interaction_constraints_.Configure(param, info.num_col_);
column_sampler_->Init(info.num_col_, info.feature_weights.HostVector(), param_.colsample_bynode,
@@ -442,14 +443,14 @@ class HistEvaluator {
* \param p_last_tree The last tree being updated by tree updater
*/
template <typename Partitioner>
void UpdatePredictionCacheImpl(GenericParameter const *ctx, RegTree const *p_last_tree,
void UpdatePredictionCacheImpl(Context const *ctx, RegTree const *p_last_tree,
std::vector<Partitioner> const &partitioner,
linalg::VectorView<float> out_preds) {
CHECK_GT(out_preds.Size(), 0U);
CHECK(p_last_tree);
auto const &tree = *p_last_tree;
CHECK_EQ(out_preds.DeviceIdx(), GenericParameter::kCpuId);
CHECK_EQ(out_preds.DeviceIdx(), Context::kCpuId);
size_t n_nodes = p_last_tree->GetNodes().size();
for (auto &part : partitioner) {
CHECK_EQ(part.Size(), n_nodes);

View File

@@ -10,17 +10,18 @@
#include <dmlc/registry.h>
#include <xgboost/base.h>
#include <algorithm>
#include <limits>
#include <utility>
#include <vector>
#include <limits>
#include <algorithm>
#include "xgboost/tree_model.h"
#include "xgboost/host_device_vector.h"
#include "xgboost/generic_parameters.h"
#include "../common/transform.h"
#include "../common/math.h"
#include "../common/transform.h"
#include "param.h"
#include "xgboost/context.h"
#include "xgboost/host_device_vector.h"
#include "xgboost/tree_model.h"
namespace xgboost {
namespace tree {
@@ -38,7 +39,7 @@ class TreeEvaluator {
public:
TreeEvaluator(TrainParam const& p, bst_feature_t n_features, int32_t device) {
device_ = device;
if (device != GenericParameter::kCpuId) {
if (device != Context::kCpuId) {
lower_bounds_.SetDevice(device);
upper_bounds_.SetDevice(device);
monotone_.SetDevice(device);
@@ -56,7 +57,7 @@ class TreeEvaluator {
has_constraint_ = true;
}
if (device_ != GenericParameter::kCpuId) {
if (device_ != Context::kCpuId) {
// Pull to device early.
lower_bounds_.ConstDeviceSpan();
upper_bounds_.ConstDeviceSpan();
@@ -151,7 +152,7 @@ class TreeEvaluator {
public:
/* Get a view to the evaluator that can be passed down to device. */
template <typename ParamT = TrainParam> auto GetEvaluator() const {
if (device_ != GenericParameter::kCpuId) {
if (device_ != Context::kCpuId) {
auto constraints = monotone_.ConstDevicePointer();
return SplitEvaluator<ParamT>{constraints, lower_bounds_.ConstDevicePointer(),
upper_bounds_.ConstDevicePointer(), has_constraint_};

View File

@@ -14,13 +14,12 @@ DMLC_REGISTRY_ENABLE(::xgboost::TreeUpdaterReg);
namespace xgboost {
TreeUpdater* TreeUpdater::Create(const std::string& name, GenericParameter const* tparam,
ObjInfo task) {
TreeUpdater* TreeUpdater::Create(const std::string& name, Context const* ctx, ObjInfo task) {
auto* e = ::dmlc::Registry< ::xgboost::TreeUpdaterReg>::Get()->Find(name);
if (e == nullptr) {
LOG(FATAL) << "Unknown tree updater " << name;
}
auto p_updater = (e->body)(tparam, task);
auto p_updater = (e->body)(ctx, task);
return p_updater;
}

View File

@@ -256,7 +256,7 @@ class GlobalApproxUpdater : public TreeUpdater {
ObjInfo task_;
public:
explicit GlobalApproxUpdater(GenericParameter const *ctx, ObjInfo task)
explicit GlobalApproxUpdater(Context const *ctx, ObjInfo task)
: TreeUpdater(ctx), task_{task} {
monitor_.Init(__func__);
}
@@ -337,8 +337,6 @@ XGBOOST_REGISTER_TREE_UPDATER(GlobalHistMaker, "grow_histmaker")
.describe(
"Tree constructor that uses approximate histogram construction "
"for each node.")
.set_body([](GenericParameter const *ctx, ObjInfo task) {
return new GlobalApproxUpdater(ctx, task);
});
.set_body([](Context const *ctx, ObjInfo task) { return new GlobalApproxUpdater(ctx, task); });
} // namespace tree
} // namespace xgboost

View File

@@ -55,7 +55,7 @@ DMLC_REGISTER_PARAMETER(ColMakerTrainParam);
/*! \brief column-wise update to construct a tree */
class ColMaker: public TreeUpdater {
public:
explicit ColMaker(GenericParameter const *ctx) : TreeUpdater(ctx) {}
explicit ColMaker(Context const *ctx) : TreeUpdater(ctx) {}
void Configure(const Args &args) override {
param_.UpdateAllowUnknown(args);
colmaker_param_.UpdateAllowUnknown(args);
@@ -159,11 +159,11 @@ class ColMaker: public TreeUpdater {
// constructor
explicit Builder(const TrainParam &param, const ColMakerTrainParam &colmaker_train_param,
FeatureInteractionConstraintHost _interaction_constraints,
GenericParameter const *ctx, const std::vector<float> &column_densities)
Context const *ctx, const std::vector<float> &column_densities)
: param_(param),
colmaker_train_param_{colmaker_train_param},
ctx_{ctx},
tree_evaluator_(param_, column_densities.size(), GenericParameter::kCpuId),
tree_evaluator_(param_, column_densities.size(), Context::kCpuId),
interaction_constraints_{std::move(_interaction_constraints)},
column_densities_(column_densities) {}
// update one tree, growing
@@ -594,7 +594,7 @@ class ColMaker: public TreeUpdater {
const TrainParam& param_;
const ColMakerTrainParam& colmaker_train_param_;
// number of omp thread used during training
GenericParameter const* ctx_;
Context const* ctx_;
common::ColumnSampler column_sampler_;
// Instance Data: current node position in the tree of each instance
std::vector<int> position_;
@@ -612,9 +612,7 @@ class ColMaker: public TreeUpdater {
};
XGBOOST_REGISTER_TREE_UPDATER(ColMaker, "grow_colmaker")
.describe("Grow tree with parallelization over columns.")
.set_body([](GenericParameter const* ctx, ObjInfo) {
return new ColMaker(ctx);
});
.describe("Grow tree with parallelization over columns.")
.set_body([](Context const *ctx, ObjInfo) { return new ColMaker(ctx); });
} // namespace tree
} // namespace xgboost

View File

@@ -4,41 +4,40 @@
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <cmath>
#include <memory>
#include <limits>
#include <memory>
#include <utility>
#include <vector>
#include "xgboost/base.h"
#include "xgboost/data.h"
#include "xgboost/generic_parameters.h"
#include "xgboost/host_device_vector.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "xgboost/json.h"
#include "../collective/device_communicator.cuh"
#include "../common/io.h"
#include "../common/bitfield.h"
#include "../common/categorical.h"
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/bitfield.h"
#include "../common/io.h"
#include "../common/timer.h"
#include "../common/categorical.h"
#include "../data/ellpack_page.cuh"
#include "param.h"
#include "driver.h"
#include "updater_gpu_common.cuh"
#include "split_evaluator.h"
#include "constraints.cuh"
#include "gpu_hist/feature_groups.cuh"
#include "gpu_hist/gradient_based_sampler.cuh"
#include "gpu_hist/row_partitioner.cuh"
#include "gpu_hist/histogram.cuh"
#include "driver.h"
#include "gpu_hist/evaluate_splits.cuh"
#include "gpu_hist/expand_entry.cuh"
#include "gpu_hist/feature_groups.cuh"
#include "gpu_hist/gradient_based_sampler.cuh"
#include "gpu_hist/histogram.cuh"
#include "gpu_hist/row_partitioner.cuh"
#include "param.h"
#include "split_evaluator.h"
#include "updater_gpu_common.cuh"
#include "xgboost/base.h"
#include "xgboost/context.h"
#include "xgboost/data.h"
#include "xgboost/host_device_vector.h"
#include "xgboost/json.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "xgboost/task.h"
#include "xgboost/tree_model.h"
@@ -730,7 +729,7 @@ class GPUHistMaker : public TreeUpdater {
using GradientSumT = GradientPairPrecise;
public:
explicit GPUHistMaker(GenericParameter const* ctx, ObjInfo task)
explicit GPUHistMaker(Context const* ctx, ObjInfo task)
: TreeUpdater(ctx), task_{task} {};
void Configure(const Args& args) override {
// Used in test to count how many configurations are performed
@@ -879,9 +878,7 @@ class GPUHistMaker : public TreeUpdater {
#if !defined(GTEST_TEST)
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([](GenericParameter const* tparam, ObjInfo task) {
return new GPUHistMaker(tparam, task);
});
.set_body([](Context const* ctx, ObjInfo task) { return new GPUHistMaker(ctx, task); });
#endif // !defined(GTEST_TEST)
} // namespace tree

View File

@@ -20,7 +20,7 @@ DMLC_REGISTRY_FILE_TAG(updater_prune);
/*! \brief pruner that prunes a tree after growing finishes */
class TreePruner : public TreeUpdater {
public:
explicit TreePruner(GenericParameter const* ctx, ObjInfo task) : TreeUpdater(ctx) {
explicit TreePruner(Context const* ctx, ObjInfo task) : TreeUpdater(ctx) {
syncher_.reset(TreeUpdater::Create("sync", ctx_, task));
pruner_monitor_.Init("TreePruner");
}
@@ -110,6 +110,6 @@ class TreePruner : public TreeUpdater {
XGBOOST_REGISTER_TREE_UPDATER(TreePruner, "prune")
.describe("Pruner that prune the tree according to statistics.")
.set_body([](GenericParameter const* ctx, ObjInfo task) { return new TreePruner(ctx, task); });
.set_body([](Context const* ctx, ObjInfo task) { return new TreePruner(ctx, task); });
} // namespace tree
} // namespace xgboost

View File

@@ -335,8 +335,6 @@ void QuantileHistMaker::Builder::InitData(DMatrix *fmat, const RegTree &tree,
XGBOOST_REGISTER_TREE_UPDATER(QuantileHistMaker, "grow_quantile_histmaker")
.describe("Grow tree using quantized histogram.")
.set_body([](GenericParameter const *ctx, ObjInfo task) {
return new QuantileHistMaker(ctx, task);
});
.set_body([](Context const *ctx, ObjInfo task) { return new QuantileHistMaker(ctx, task); });
} // namespace tree
} // namespace xgboost

View File

@@ -85,8 +85,7 @@ inline BatchParam HistBatch(TrainParam const& param) {
/*! \brief construct a tree using quantized feature values */
class QuantileHistMaker: public TreeUpdater {
public:
explicit QuantileHistMaker(GenericParameter const* ctx, ObjInfo task)
: TreeUpdater(ctx), task_{task} {}
explicit QuantileHistMaker(Context const* ctx, ObjInfo task) : TreeUpdater(ctx), task_{task} {}
void Configure(const Args& args) override;
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
@@ -120,7 +119,7 @@ class QuantileHistMaker: public TreeUpdater {
public:
// constructor
explicit Builder(const size_t n_trees, const TrainParam& param, DMatrix const* fmat,
ObjInfo task, GenericParameter const* ctx)
ObjInfo task, Context const* ctx)
: n_trees_(n_trees),
param_(param),
p_last_fmat_(fmat),

View File

@@ -24,7 +24,7 @@ DMLC_REGISTRY_FILE_TAG(updater_refresh);
/*! \brief pruner that prunes a tree after growing finishs */
class TreeRefresher : public TreeUpdater {
public:
explicit TreeRefresher(GenericParameter const *ctx) : TreeUpdater(ctx) {}
explicit TreeRefresher(Context const *ctx) : TreeUpdater(ctx) {}
void Configure(const Args &args) override { param_.UpdateAllowUnknown(args); }
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
@@ -160,6 +160,6 @@ class TreeRefresher : public TreeUpdater {
XGBOOST_REGISTER_TREE_UPDATER(TreeRefresher, "refresh")
.describe("Refresher that refreshes the weight and statistics according to data.")
.set_body([](GenericParameter const *ctx, ObjInfo) { return new TreeRefresher(ctx); });
.set_body([](Context const *ctx, ObjInfo) { return new TreeRefresher(ctx); });
} // namespace tree
} // namespace xgboost

View File

@@ -24,7 +24,7 @@ DMLC_REGISTRY_FILE_TAG(updater_sync);
*/
class TreeSyncher : public TreeUpdater {
public:
explicit TreeSyncher(GenericParameter const* tparam) : TreeUpdater(tparam) {}
explicit TreeSyncher(Context const* tparam) : TreeUpdater(tparam) {}
void Configure(const Args&) override {}
void LoadConfig(Json const&) override {}
@@ -56,6 +56,6 @@ class TreeSyncher : public TreeUpdater {
XGBOOST_REGISTER_TREE_UPDATER(TreeSyncher, "sync")
.describe("Syncher that synchronize the tree in all distributed nodes.")
.set_body([](GenericParameter const* tparam, ObjInfo) { return new TreeSyncher(tparam); });
.set_body([](Context const* ctx, ObjInfo) { return new TreeSyncher(ctx); });
} // namespace tree
} // namespace xgboost