diff --git a/R-package/src/Makevars.in b/R-package/src/Makevars.in index 03b197104..5c6c624e0 100644 --- a/R-package/src/Makevars.in +++ b/R-package/src/Makevars.in @@ -68,6 +68,7 @@ OBJECTS= \ $(PKGROOT)/src/linear/updater_coordinate.o \ $(PKGROOT)/src/linear/updater_shotgun.o \ $(PKGROOT)/src/learner.o \ + $(PKGROOT)/src/context.o \ $(PKGROOT)/src/logging.o \ $(PKGROOT)/src/global_config.o \ $(PKGROOT)/src/collective/communicator.o \ diff --git a/R-package/src/Makevars.win b/R-package/src/Makevars.win index e49ce4c73..b12fca7c8 100644 --- a/R-package/src/Makevars.win +++ b/R-package/src/Makevars.win @@ -68,6 +68,7 @@ OBJECTS= \ $(PKGROOT)/src/linear/updater_coordinate.o \ $(PKGROOT)/src/linear/updater_shotgun.o \ $(PKGROOT)/src/learner.o \ + $(PKGROOT)/src/context.o \ $(PKGROOT)/src/logging.o \ $(PKGROOT)/src/global_config.o \ $(PKGROOT)/src/collective/communicator.o \ diff --git a/R-package/src/xgboost_R.cc b/R-package/src/xgboost_R.cc index 777a275d7..8f775f087 100644 --- a/R-package/src/xgboost_R.cc +++ b/R-package/src/xgboost_R.cc @@ -4,8 +4,8 @@ #include #include #include +#include #include -#include #include #include @@ -18,7 +18,8 @@ #include "../../src/c_api/c_api_error.h" #include "../../src/common/threading_utils.h" -#include "./xgboost_R.h" + +#include "./xgboost_R.h" // Must follow other include. /*! * \brief macro to annotate begin of api @@ -46,14 +47,14 @@ using dmlc::BeginPtr; -xgboost::GenericParameter const *BoosterCtx(BoosterHandle handle) { +xgboost::Context const *BoosterCtx(BoosterHandle handle) { CHECK_HANDLE(); auto *learner = static_cast(handle); CHECK(learner); return learner->Ctx(); } -xgboost::GenericParameter const *DMatrixCtx(DMatrixHandle handle) { +xgboost::Context const *DMatrixCtx(DMatrixHandle handle) { CHECK_HANDLE(); auto p_m = static_cast *>(handle); CHECK(p_m); diff --git a/include/xgboost/generic_parameters.h b/include/xgboost/context.h similarity index 57% rename from include/xgboost/generic_parameters.h rename to include/xgboost/context.h index 0375ecfaf..66ad1d4bb 100644 --- a/include/xgboost/generic_parameters.h +++ b/include/xgboost/context.h @@ -1,9 +1,9 @@ /*! - * Copyright 2014-2019 by Contributors - * \file generic_parameters.h + * Copyright 2014-2022 by Contributors + * \file context.h */ -#ifndef XGBOOST_GENERIC_PARAMETERS_H_ -#define XGBOOST_GENERIC_PARAMETERS_H_ +#ifndef XGBOOST_CONTEXT_H_ +#define XGBOOST_CONTEXT_H_ #include #include @@ -12,31 +12,31 @@ namespace xgboost { -struct GenericParameter : public XGBoostParameter { +struct Context : public XGBoostParameter { private: // cached value for CFS CPU limit. (used in containerized env) - int32_t cfs_cpu_count_; // NOLINT + std::int32_t cfs_cpu_count_; // NOLINT public: // Constant representing the device ID of CPU. - static int32_t constexpr kCpuId = -1; - static int64_t constexpr kDefaultSeed = 0; + static std::int32_t constexpr kCpuId = -1; + static std::int64_t constexpr kDefaultSeed = 0; public: - GenericParameter(); + Context(); // stored random seed - int64_t seed { kDefaultSeed }; + std::int64_t seed{kDefaultSeed}; // whether seed the PRNG each iteration bool seed_per_iteration{false}; // number of threads to use if OpenMP is enabled // if equals 0, use system default - int nthread{0}; + std::int32_t nthread{0}; // primary device, -1 means no gpu. - int gpu_id{kCpuId}; + std::int32_t gpu_id{kCpuId}; // fail when gpu_id is invalid - bool fail_on_invalid_gpu_id {false}; - bool validate_parameters {false}; + bool fail_on_invalid_gpu_id{false}; + bool validate_parameters{false}; /*! * \brief Configure the parameter `gpu_id'. @@ -47,26 +47,25 @@ struct GenericParameter : public XGBoostParameter { /*! * Return automatically chosen threads. */ - int32_t Threads() const; + std::int32_t Threads() const; bool IsCPU() const { return gpu_id == kCpuId; } + bool IsCUDA() const { return !IsCPU(); } // declare parameters - DMLC_DECLARE_PARAMETER(GenericParameter) { - DMLC_DECLARE_FIELD(seed).set_default(kDefaultSeed).describe( - "Random number seed during training."); + DMLC_DECLARE_PARAMETER(Context) { + DMLC_DECLARE_FIELD(seed) + .set_default(kDefaultSeed) + .describe("Random number seed during training."); DMLC_DECLARE_ALIAS(seed, random_state); DMLC_DECLARE_FIELD(seed_per_iteration) .set_default(false) .describe("Seed PRNG determnisticly via iterator number."); - DMLC_DECLARE_FIELD(nthread).set_default(0).describe( - "Number of threads to use."); + DMLC_DECLARE_FIELD(nthread).set_default(0).describe("Number of threads to use."); DMLC_DECLARE_ALIAS(nthread, n_jobs); - DMLC_DECLARE_FIELD(gpu_id) - .set_default(-1) - .set_lower_bound(-1) - .describe("The primary GPU device ordinal."); + DMLC_DECLARE_FIELD(gpu_id).set_default(-1).set_lower_bound(-1).describe( + "The primary GPU device ordinal."); DMLC_DECLARE_FIELD(fail_on_invalid_gpu_id) .set_default(false) .describe("Fail with error when gpu_id is invalid."); @@ -75,8 +74,6 @@ struct GenericParameter : public XGBoostParameter { .describe("Enable checking whether parameters are used or not."); } }; - -using Context = GenericParameter; } // namespace xgboost -#endif // XGBOOST_GENERIC_PARAMETERS_H_ +#endif // XGBOOST_CONTEXT_H_ diff --git a/include/xgboost/data.h b/include/xgboost/data.h index d5c89a8cc..990c6896e 100644 --- a/include/xgboost/data.h +++ b/include/xgboost/data.h @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include @@ -28,6 +27,7 @@ namespace xgboost { // forward declare dmatrix. class DMatrix; +struct Context; /*! \brief data type accepted by xgboost interface */ enum class DataType : uint8_t { diff --git a/include/xgboost/gbm.h b/include/xgboost/gbm.h index 07dd82371..3be60a061 100644 --- a/include/xgboost/gbm.h +++ b/include/xgboost/gbm.h @@ -28,7 +28,7 @@ class Json; class FeatureMap; class ObjFunction; -struct GenericParameter; +struct Context; struct LearnerModelParam; struct PredictionCacheEntry; class PredictionContainer; @@ -38,8 +38,8 @@ class PredictionContainer; */ class GradientBooster : public Model, public Configurable { protected: - GenericParameter const* ctx_; - explicit GradientBooster(GenericParameter const* ctx) : ctx_{ctx} {} + Context const* ctx_; + explicit GradientBooster(Context const* ctx) : ctx_{ctx} {} public: /*! \brief virtual destructor */ @@ -193,10 +193,8 @@ class GradientBooster : public Model, public Configurable { * \param learner_model_param pointer to global model parameters * \return The created booster. */ - static GradientBooster* Create( - const std::string& name, - GenericParameter const* generic_param, - LearnerModelParam const* learner_model_param); + static GradientBooster* Create(const std::string& name, Context const* ctx, + LearnerModelParam const* learner_model_param); }; /*! @@ -206,7 +204,7 @@ struct GradientBoosterReg : public dmlc::FunctionRegEntryBase< GradientBoosterReg, std::function > {}; + Context const* ctx)> > {}; /*! * \brief Macro to register gradient booster. diff --git a/include/xgboost/learner.h b/include/xgboost/learner.h index 6969c7d7d..35d3cf586 100644 --- a/include/xgboost/learner.h +++ b/include/xgboost/learner.h @@ -9,8 +9,8 @@ #define XGBOOST_LEARNER_H_ #include +#include // Context #include -#include // Context #include #include #include diff --git a/include/xgboost/linalg.h b/include/xgboost/linalg.h index 3897e89ea..d5b255b82 100644 --- a/include/xgboost/linalg.h +++ b/include/xgboost/linalg.h @@ -8,7 +8,7 @@ #include #include -#include +#include // fixme(jiamingy): Remove the dependency on this header. #include #include #include diff --git a/include/xgboost/linear_updater.h b/include/xgboost/linear_updater.h index 1506093ee..6faf11230 100644 --- a/include/xgboost/linear_updater.h +++ b/include/xgboost/linear_updater.h @@ -6,7 +6,6 @@ #include #include #include -#include #include #include @@ -19,6 +18,7 @@ namespace xgboost { class Json; +struct Context; namespace gbm { class GBLinearModel; @@ -29,7 +29,7 @@ class GBLinearModel; */ class LinearUpdater : public Configurable { protected: - GenericParameter const* ctx_; + Context const* ctx_; public: /*! \brief virtual destructor */ @@ -57,7 +57,7 @@ class LinearUpdater : public Configurable { * \brief Create a linear updater given name * \param name Name of the linear updater. */ - static LinearUpdater* Create(const std::string& name, GenericParameter const*); + static LinearUpdater* Create(const std::string& name, Context const*); }; /*! diff --git a/include/xgboost/metric.h b/include/xgboost/metric.h index 2cda38047..c35fd532c 100644 --- a/include/xgboost/metric.h +++ b/include/xgboost/metric.h @@ -9,7 +9,6 @@ #include #include -#include #include #include #include @@ -20,13 +19,15 @@ #include namespace xgboost { +struct Context; + /*! * \brief interface of evaluation metric used to evaluate model performance. * This has nothing to do with training, but merely act as evaluation purpose. */ class Metric : public Configurable { protected: - GenericParameter const* tparam_; + Context const* tparam_; public: /*! @@ -68,10 +69,10 @@ class Metric : public Configurable { * \param name name of the metric. * name can be in form metric[@]param and the name will be matched in the * registry. - * \param tparam A global generic parameter + * \param ctx A global context * \return the created metric. */ - static Metric* Create(const std::string& name, GenericParameter const* tparam); + static Metric* Create(const std::string& name, Context const* ctx); }; /*! diff --git a/include/xgboost/objective.h b/include/xgboost/objective.h index 0c0d502bd..9186ef710 100644 --- a/include/xgboost/objective.h +++ b/include/xgboost/objective.h @@ -10,19 +10,19 @@ #include #include #include -#include -#include #include +#include #include -#include -#include -#include #include +#include +#include +#include namespace xgboost { class RegTree; +struct Context; /*! \brief interface of objective function */ class ObjFunction : public Configurable { @@ -120,10 +120,10 @@ class ObjFunction : public Configurable { /*! * \brief Create an objective function according to name. - * \param tparam Generic parameters. + * \param ctx Pointer to runtime parameters. * \param name Name of the objective. */ - static ObjFunction* Create(const std::string& name, GenericParameter const* tparam); + static ObjFunction* Create(const std::string& name, Context const* ctx); }; /*! diff --git a/include/xgboost/predictor.h b/include/xgboost/predictor.h index 877ff462b..6625612b4 100644 --- a/include/xgboost/predictor.h +++ b/include/xgboost/predictor.h @@ -1,22 +1,22 @@ /*! - * Copyright 2017-2021 by Contributors + * Copyright 2017-2022 by Contributors * \file predictor.h * \brief Interface of predictor, * performs predictions for a gradient booster. */ #pragma once #include +#include #include -#include #include #include #include +#include #include #include #include #include -#include // Forward declarations namespace xgboost { @@ -73,7 +73,7 @@ class PredictionContainer { * * \param m shared pointer to the DMatrix that needs to be cached. * \param device Which device should the cache be allocated on. Pass - * GenericParameter::kCpuId for CPU or positive integer for GPU id. + * Context::kCpuId for CPU or positive integer for GPU id. * * \return the cache entry for passed in DMatrix, either an existing cache or newly * created. @@ -218,19 +218,17 @@ class Predictor { /** * \brief Creates a new Predictor*. * - * \param name Name of the predictor. - * \param generic_param Pointer to runtime parameters. + * \param name Name of the predictor. + * \param ctx Pointer to runtime parameters. */ - static Predictor* Create( - std::string const& name, GenericParameter const* generic_param); + static Predictor* Create(std::string const& name, Context const* ctx); }; /*! * \brief Registry entry for predictor. */ struct PredictorReg - : public dmlc::FunctionRegEntryBase< - PredictorReg, std::function> {}; + : public dmlc::FunctionRegEntryBase> {}; #define XGBOOST_REGISTER_PREDICTOR(UniqueId, Name) \ static DMLC_ATTRIBUTE_UNUSED ::xgboost::PredictorReg& \ diff --git a/include/xgboost/tree_updater.h b/include/xgboost/tree_updater.h index 62cd64b18..5cf8fb05c 100644 --- a/include/xgboost/tree_updater.h +++ b/include/xgboost/tree_updater.h @@ -10,8 +10,8 @@ #include #include +#include #include -#include #include #include #include @@ -26,16 +26,17 @@ namespace xgboost { class Json; +struct Context; /*! * \brief interface of tree update module, that performs update of a tree. */ class TreeUpdater : public Configurable { protected: - GenericParameter const* ctx_ = nullptr; + Context const* ctx_ = nullptr; public: - explicit TreeUpdater(const GenericParameter* ctx) : ctx_(ctx) {} + explicit TreeUpdater(const Context* ctx) : ctx_(ctx) {} /*! \brief virtual destructor */ ~TreeUpdater() override = default; /*! @@ -90,9 +91,9 @@ class TreeUpdater : public Configurable { /*! * \brief Create a tree updater given name * \param name Name of the tree updater. - * \param tparam A global runtime parameter + * \param ctx A global runtime parameter */ - static TreeUpdater* Create(const std::string& name, GenericParameter const* tparam, ObjInfo task); + static TreeUpdater* Create(const std::string& name, Context const* ctx, ObjInfo task); }; /*! @@ -100,8 +101,7 @@ class TreeUpdater : public Configurable { */ struct TreeUpdaterReg : public dmlc::FunctionRegEntryBase< - TreeUpdaterReg, - std::function > {}; + TreeUpdaterReg, std::function> {}; /*! * \brief Macro to register tree updater. diff --git a/plugin/updater_oneapi/predictor_oneapi.cc b/plugin/updater_oneapi/predictor_oneapi.cc index 791d56c37..eafe83e19 100755 --- a/plugin/updater_oneapi/predictor_oneapi.cc +++ b/plugin/updater_oneapi/predictor_oneapi.cc @@ -330,7 +330,7 @@ class PredictorOneAPI : public Predictor { } public: - explicit PredictorOneAPI(GenericParameter const* generic_param) : + explicit PredictorOneAPI(Context const* generic_param) : Predictor::Predictor{generic_param}, cpu_predictor(Predictor::Create("cpu_predictor", generic_param)) { cl::sycl::default_selector selector; qu_ = cl::sycl::queue(selector); @@ -441,7 +441,7 @@ class PredictorOneAPI : public Predictor { XGBOOST_REGISTER_PREDICTOR(PredictorOneAPI, "oneapi_predictor") .describe("Make predictions using DPC++.") -.set_body([](GenericParameter const* generic_param) { +.set_body([](Context const* generic_param) { return new PredictorOneAPI(generic_param); }); } // namespace predictor diff --git a/src/collective/in_memory_handler.cc b/src/collective/in_memory_handler.cc index da425b708..790024402 100644 --- a/src/collective/in_memory_handler.cc +++ b/src/collective/in_memory_handler.cc @@ -114,7 +114,7 @@ class BroadcastFunctor { int root_; }; -void InMemoryHandler::Init(int world_size, int rank) { +void InMemoryHandler::Init(int world_size, int) { CHECK(world_size_ < world_size) << "In memory handler already initialized."; std::unique_lock lock(mutex_); @@ -124,7 +124,7 @@ void InMemoryHandler::Init(int world_size, int rank) { cv_.notify_all(); } -void InMemoryHandler::Shutdown(uint64_t sequence_number, int rank) { +void InMemoryHandler::Shutdown(uint64_t sequence_number, int) { CHECK(world_size_ > 0) << "In memory handler already shutdown."; std::unique_lock lock(mutex_); diff --git a/src/common/hist_util.h b/src/common/hist_util.h index 0861a2d3a..62d29f531 100644 --- a/src/common/hist_util.h +++ b/src/common/hist_util.h @@ -8,21 +8,21 @@ #define XGBOOST_COMMON_HIST_UTIL_H_ #include -#include -#include -#include + #include +#include +#include #include #include -#include +#include +#include "algorithm.h" // SegmentId #include "categorical.h" #include "common.h" #include "quantile.h" #include "row_set.h" #include "threading_utils.h" #include "timer.h" -#include "algorithm.h" // SegmentId namespace xgboost { class GHistIndexMatrix; diff --git a/src/common/linalg_op.cuh b/src/common/linalg_op.cuh index 558a09ca6..037ad1ff3 100644 --- a/src/common/linalg_op.cuh +++ b/src/common/linalg_op.cuh @@ -4,9 +4,9 @@ #ifndef XGBOOST_COMMON_LINALG_OP_CUH_ #define XGBOOST_COMMON_LINALG_OP_CUH_ -#include "xgboost/generic_parameters.h" #include "device_helpers.cuh" #include "linalg_op.h" +#include "xgboost/context.h" #include "xgboost/linalg.h" namespace xgboost { diff --git a/src/common/linalg_op.h b/src/common/linalg_op.h index 0df780475..9ae353311 100644 --- a/src/common/linalg_op.h +++ b/src/common/linalg_op.h @@ -9,7 +9,7 @@ #include "common.h" #include "threading_utils.h" #include "transform_iterator.h" // MakeIndexTransformIter -#include "xgboost/generic_parameters.h" +#include "xgboost/context.h" // Context #include "xgboost/linalg.h" namespace xgboost { @@ -54,7 +54,7 @@ void ElementWiseTransformDevice(linalg::TensorView, Fn&&, void* = nullptr) } template -void ElementWiseKernel(GenericParameter const* ctx, linalg::TensorView t, Fn&& fn) { +void ElementWiseKernel(Context const* ctx, linalg::TensorView t, Fn&& fn) { if (!ctx->IsCPU()) { common::AssertGPUSupport(); } diff --git a/src/common/numeric.cc b/src/common/numeric.cc index 9740d6af1..959f6305d 100644 --- a/src/common/numeric.cc +++ b/src/common/numeric.cc @@ -7,7 +7,7 @@ #include // std::is_same #include "threading_utils.h" // MemStackAllocator, ParallelFor, DefaultMaxThreads -#include "xgboost/generic_parameters.h" // Context +#include "xgboost/context.h" // Context #include "xgboost/host_device_vector.h" // HostDeviceVector namespace xgboost { diff --git a/src/common/numeric.cu b/src/common/numeric.cu index faac6ddb5..ad6c6b53c 100644 --- a/src/common/numeric.cu +++ b/src/common/numeric.cu @@ -6,7 +6,7 @@ #include "device_helpers.cuh" // dh::Reduce, safe_cuda, dh::XGBCachingDeviceAllocator #include "numeric.h" -#include "xgboost/generic_parameters.h" // Context +#include "xgboost/context.h" // Context #include "xgboost/host_device_vector.h" // HostDeviceVector namespace xgboost { diff --git a/src/common/numeric.h b/src/common/numeric.h index e839c7119..f3fad57b4 100644 --- a/src/common/numeric.h +++ b/src/common/numeric.h @@ -12,7 +12,7 @@ #include "common.h" // AssertGPUSupport #include "threading_utils.h" // MemStackAllocator, DefaultMaxThreads -#include "xgboost/generic_parameters.h" // Context +#include "xgboost/context.h" // Context #include "xgboost/host_device_vector.h" // HostDeviceVector namespace xgboost { diff --git a/src/common/partition_builder.h b/src/common/partition_builder.h index 34864ee90..4a72df7b6 100644 --- a/src/common/partition_builder.h +++ b/src/common/partition_builder.h @@ -10,15 +10,15 @@ #include #include +#include #include #include -#include #include +#include "../tree/hist/expand_entry.h" #include "categorical.h" #include "column_matrix.h" -#include "../tree/hist/expand_entry.h" -#include "xgboost/generic_parameters.h" +#include "xgboost/context.h" #include "xgboost/tree_model.h" namespace xgboost { diff --git a/src/common/stats.cu b/src/common/stats.cu index dcb04ac4b..42a9bc684 100644 --- a/src/common/stats.cu +++ b/src/common/stats.cu @@ -7,7 +7,7 @@ #include "common.h" // common::OptionalWeights #include "device_helpers.cuh" // dh::MakeTransformIterator, tcbegin, tcend #include "stats.cuh" // common::SegmentedQuantile, common::SegmentedWeightedQuantile -#include "xgboost/generic_parameters.h" // Context +#include "xgboost/context.h" // Context #include "xgboost/host_device_vector.h" // HostDeviceVector #include "xgboost/linalg.h" // linalg::TensorView, UnravelIndex, Apply diff --git a/src/common/stats.cuh b/src/common/stats.cuh index 9d9e526a8..df544e180 100644 --- a/src/common/stats.cuh +++ b/src/common/stats.cuh @@ -11,7 +11,7 @@ #include "device_helpers.cuh" #include "linalg_op.cuh" -#include "xgboost/generic_parameters.h" +#include "xgboost/context.h" #include "xgboost/linalg.h" #include "xgboost/tree_model.h" diff --git a/src/common/stats.h b/src/common/stats.h index 566b4be93..bc82d4da8 100644 --- a/src/common/stats.h +++ b/src/common/stats.h @@ -10,7 +10,7 @@ #include "common.h" // AssertGPUSupport #include "transform_iterator.h" // MakeIndexTransformIter -#include "xgboost/generic_parameters.h" +#include "xgboost/context.h" // Context #include "xgboost/linalg.h" namespace xgboost { diff --git a/src/context.cc b/src/context.cc new file mode 100644 index 000000000..571aa943e --- /dev/null +++ b/src/context.cc @@ -0,0 +1,62 @@ +/** + * Copyright 2014-2022 by XGBoost Contributors + * + * \brief Context object used for controlling runtime parameters. + */ +#include + +#include "common/common.h" +#include "common/threading_utils.h" + +namespace xgboost { + +DMLC_REGISTER_PARAMETER(Context); + +std::int32_t constexpr Context::kCpuId; +std::int64_t constexpr Context::kDefaultSeed; + +Context::Context() : cfs_cpu_count_{common::GetCfsCPUCount()} {} + +void Context::ConfigureGpuId(bool require_gpu) { +#if defined(XGBOOST_USE_CUDA) + if (gpu_id == kCpuId) { // 0. User didn't specify the `gpu_id' + if (require_gpu) { // 1. `tree_method' or `predictor' or both are using + // GPU. + // 2. Use device 0 as default. + this->UpdateAllowUnknown(Args{{"gpu_id", "0"}}); + } + } + + // 3. When booster is loaded from a memory image (Python pickle or R + // raw model), number of available GPUs could be different. Wrap around it. + int32_t n_gpus = common::AllVisibleGPUs(); + if (n_gpus == 0) { + if (gpu_id != kCpuId) { + LOG(WARNING) << "No visible GPU is found, setting `gpu_id` to -1"; + } + this->UpdateAllowUnknown(Args{{"gpu_id", std::to_string(kCpuId)}}); + } else if (fail_on_invalid_gpu_id) { + CHECK(gpu_id == kCpuId || gpu_id < n_gpus) + << "Only " << n_gpus << " GPUs are visible, gpu_id " << gpu_id << " is invalid."; + } else if (gpu_id != kCpuId && gpu_id >= n_gpus) { + LOG(WARNING) << "Only " << n_gpus << " GPUs are visible, setting `gpu_id` to " + << gpu_id % n_gpus; + this->UpdateAllowUnknown(Args{{"gpu_id", std::to_string(gpu_id % n_gpus)}}); + } +#else + // Just set it to CPU, don't think about it. + this->UpdateAllowUnknown(Args{{"gpu_id", std::to_string(kCpuId)}}); + (void)(require_gpu); +#endif // defined(XGBOOST_USE_CUDA) + + common::SetDevice(this->gpu_id); +} + +std::int32_t Context::Threads() const { + auto n_threads = common::OmpGetNumThreads(nthread); + if (cfs_cpu_count_ > 0) { + n_threads = std::min(n_threads, cfs_cpu_count_); + } + return n_threads; +} +} // namespace xgboost diff --git a/src/data/iterative_dmatrix.h b/src/data/iterative_dmatrix.h index bb188e5b9..c79e84370 100644 --- a/src/data/iterative_dmatrix.h +++ b/src/data/iterative_dmatrix.h @@ -86,7 +86,7 @@ class IterativeDMatrix : public DMatrix { LOG(FATAL) << "Slicing DMatrix is not supported for Quantile DMatrix."; return nullptr; } - DMatrix *SliceCol(std::size_t start, std::size_t size) override { + DMatrix *SliceCol(std::size_t, std::size_t) override { LOG(FATAL) << "Slicing DMatrix columns is not supported for Quantile DMatrix."; return nullptr; } diff --git a/src/data/proxy_dmatrix.h b/src/data/proxy_dmatrix.h index 2e7fd6f00..af579ea72 100644 --- a/src/data/proxy_dmatrix.h +++ b/src/data/proxy_dmatrix.h @@ -10,10 +10,10 @@ #include #include -#include "xgboost/data.h" -#include "xgboost/generic_parameters.h" -#include "xgboost/c_api.h" #include "adapter.h" +#include "xgboost/c_api.h" +#include "xgboost/context.h" +#include "xgboost/data.h" namespace xgboost { namespace data { @@ -87,7 +87,7 @@ class DMatrixProxy : public DMatrix { LOG(FATAL) << "Slicing DMatrix is not supported for Proxy DMatrix."; return nullptr; } - DMatrix* SliceCol(std::size_t start, std::size_t size) override { + DMatrix* SliceCol(std::size_t, std::size_t) override { LOG(FATAL) << "Slicing DMatrix columns is not supported for Proxy DMatrix."; return nullptr; } diff --git a/src/data/simple_dmatrix.cc b/src/data/simple_dmatrix.cc index 56185b03e..eee98a494 100644 --- a/src/data/simple_dmatrix.cc +++ b/src/data/simple_dmatrix.cc @@ -53,7 +53,7 @@ DMatrix* SimpleDMatrix::SliceCol(std::size_t start, std::size_t size) { auto& h_data = out_page.data.HostVector(); auto& h_offset = out_page.offset.HostVector(); size_t rptr{0}; - for (auto i = 0; i < this->Info().num_row_; i++) { + for (bst_row_t i = 0; i < this->Info().num_row_; i++) { auto inst = batch[i]; auto prev_size = h_data.size(); std::copy_if(inst.begin(), inst.end(), std::back_inserter(h_data), [&](Entry e) { diff --git a/src/data/sparse_page_dmatrix.h b/src/data/sparse_page_dmatrix.h index 3bbe8fbae..4f09684a7 100644 --- a/src/data/sparse_page_dmatrix.h +++ b/src/data/sparse_page_dmatrix.h @@ -107,7 +107,7 @@ class SparsePageDMatrix : public DMatrix { LOG(FATAL) << "Slicing DMatrix is not supported for external memory."; return nullptr; } - DMatrix *SliceCol(std::size_t start, std::size_t size) override { + DMatrix *SliceCol(std::size_t, std::size_t) override { LOG(FATAL) << "Slicing DMatrix columns is not supported for external memory."; return nullptr; } diff --git a/src/gbm/gblinear.cc b/src/gbm/gblinear.cc index 2498865e9..84e766121 100644 --- a/src/gbm/gblinear.cc +++ b/src/gbm/gblinear.cc @@ -71,7 +71,7 @@ void LinearCheckLayer(unsigned layer_begin) { */ class GBLinear : public GradientBooster { public: - explicit GBLinear(LearnerModelParam const* learner_model_param, GenericParameter const* ctx) + explicit GBLinear(LearnerModelParam const* learner_model_param, Context const* ctx) : GradientBooster{ctx}, learner_model_param_{learner_model_param}, model_{learner_model_param}, @@ -179,7 +179,7 @@ class GBLinear : public GradientBooster { unsigned) override { model_.LazyInitModel(); LinearCheckLayer(layer_begin); - auto base_margin = p_fmat->Info().base_margin_.View(GenericParameter::kCpuId); + auto base_margin = p_fmat->Info().base_margin_.View(Context::kCpuId); const int ngroup = model_.learner_model_param->num_output_group; const size_t ncolumns = model_.learner_model_param->num_feature + 1; // allocate space for (#features + bias) times #groups times #rows @@ -250,7 +250,7 @@ class GBLinear : public GradientBooster { linalg::TensorView scores{ *out_scores, {learner_model_param_->num_feature, n_groups}, - GenericParameter::kCpuId}; + Context::kCpuId}; for (size_t i = 0; i < learner_model_param_->num_feature; ++i) { for (bst_group_t g = 0; g < n_groups; ++g) { scores(i, g) = model_[i][g]; @@ -355,7 +355,7 @@ DMLC_REGISTER_PARAMETER(GBLinearTrainParam); XGBOOST_REGISTER_GBM(GBLinear, "gblinear") .describe("Linear booster, implement generalized linear model.") - .set_body([](LearnerModelParam const* booster_config, GenericParameter const* ctx) { + .set_body([](LearnerModelParam const* booster_config, Context const* ctx) { return new GBLinear(booster_config, ctx); }); } // namespace gbm diff --git a/src/gbm/gbm.cc b/src/gbm/gbm.cc index e8af1a553..9f88e30f5 100644 --- a/src/gbm/gbm.cc +++ b/src/gbm/gbm.cc @@ -3,21 +3,23 @@ * \file gbm.cc * \brief Registry of gradient boosters. */ +#include "xgboost/gbm.h" + #include + +#include #include #include -#include -#include "xgboost/gbm.h" +#include "xgboost/context.h" #include "xgboost/learner.h" -#include "xgboost/generic_parameters.h" namespace dmlc { DMLC_REGISTRY_ENABLE(::xgboost::GradientBoosterReg); } // namespace dmlc namespace xgboost { -GradientBooster* GradientBooster::Create(const std::string& name, GenericParameter const* ctx, +GradientBooster* GradientBooster::Create(const std::string& name, Context const* ctx, LearnerModelParam const* learner_model_param) { auto *e = ::dmlc::Registry< ::xgboost::GradientBoosterReg>::Get()->Find(name); if (e == nullptr) { diff --git a/src/gbm/gbtree.cc b/src/gbm/gbtree.cc index c7b916e38..210b7c0dd 100644 --- a/src/gbm/gbtree.cc +++ b/src/gbm/gbtree.cc @@ -67,7 +67,7 @@ void GBTree::Configure(const Args& cfg) { #if defined(XGBOOST_USE_ONEAPI) if (!oneapi_predictor_) { oneapi_predictor_ = std::unique_ptr( - Predictor::Create("oneapi_predictor", this->generic_param_)); + Predictor::Create("oneapi_predictor", this->ctx_)); } oneapi_predictor_->Configure(cfg); #endif // defined(XGBOOST_USE_ONEAPI) @@ -204,7 +204,7 @@ void GPUCopyGradient(HostDeviceVector const*, bst_group_t, bst_gro void CopyGradient(HostDeviceVector const* in_gpair, int32_t n_threads, bst_group_t n_groups, bst_group_t group_id, HostDeviceVector* out_gpair) { - if (in_gpair->DeviceIdx() != GenericParameter::kCpuId) { + if (in_gpair->DeviceIdx() != Context::kCpuId) { GPUCopyGradient(in_gpair, n_groups, group_id, out_gpair); } else { std::vector &tmp_h = out_gpair->HostVector(); @@ -651,7 +651,7 @@ void GPUDartInplacePredictInc(common::Span /*out_predts*/, common::Spannum_output_group; PredictionCacheEntry predts; // temporary storage for prediction - if (ctx_->gpu_id != GenericParameter::kCpuId) { + if (ctx_->gpu_id != Context::kCpuId) { predts.predictions.SetDevice(ctx_->gpu_id); } predts.predictions.Resize(p_fmat->Info().num_row_ * n_groups, 0); @@ -763,7 +763,7 @@ class Dart : public GBTree { CHECK_EQ(p_out_preds->predictions.Size(), predts.predictions.Size()); size_t n_rows = p_fmat->Info().num_row_; - if (predts.predictions.DeviceIdx() != GenericParameter::kCpuId) { + if (predts.predictions.DeviceIdx() != Context::kCpuId) { p_out_preds->predictions.SetDevice(predts.predictions.DeviceIdx()); GPUDartPredictInc(p_out_preds->predictions.DeviceSpan(), predts.predictions.DeviceSpan(), w, n_rows, n_groups, @@ -1019,13 +1019,13 @@ DMLC_REGISTER_PARAMETER(DartTrainParam); XGBOOST_REGISTER_GBM(GBTree, "gbtree") .describe("Tree booster, gradient boosted trees.") - .set_body([](LearnerModelParam const* booster_config, GenericParameter const* ctx) { + .set_body([](LearnerModelParam const* booster_config, Context const* ctx) { auto* p = new GBTree(booster_config, ctx); return p; }); XGBOOST_REGISTER_GBM(Dart, "dart") .describe("Tree booster, dart.") - .set_body([](LearnerModelParam const* booster_config, GenericParameter const* ctx) { + .set_body([](LearnerModelParam const* booster_config, Context const* ctx) { GBTree* p = new Dart(booster_config, ctx); return p; }); diff --git a/src/gbm/gbtree.cu b/src/gbm/gbtree.cu index 12109782d..acff9de52 100644 --- a/src/gbm/gbtree.cu +++ b/src/gbm/gbtree.cu @@ -1,10 +1,10 @@ /*! * Copyright 2021 by Contributors */ -#include "xgboost/span.h" -#include "xgboost/generic_parameters.h" -#include "xgboost/linalg.h" #include "../common/device_helpers.cuh" +#include "xgboost/context.h" +#include "xgboost/linalg.h" +#include "xgboost/span.h" namespace xgboost { namespace gbm { diff --git a/src/gbm/gbtree.h b/src/gbm/gbtree.h index 78224aba4..38dcb25ea 100644 --- a/src/gbm/gbtree.h +++ b/src/gbm/gbtree.h @@ -190,7 +190,7 @@ bool SliceTrees(int32_t layer_begin, int32_t layer_end, int32_t step, GBTreeMode // gradient boosted trees class GBTree : public GradientBooster { public: - explicit GBTree(LearnerModelParam const* booster_config, GenericParameter const* ctx) + explicit GBTree(LearnerModelParam const* booster_config, Context const* ctx) : GradientBooster{ctx}, model_(booster_config, ctx_) {} void Configure(const Args& cfg) override; diff --git a/src/gbm/gbtree_model.h b/src/gbm/gbtree_model.h index 1e4ac73de..1f2bdfa63 100644 --- a/src/gbm/gbtree_model.h +++ b/src/gbm/gbtree_model.h @@ -5,16 +5,17 @@ #ifndef XGBOOST_GBM_GBTREE_MODEL_H_ #define XGBOOST_GBM_GBTREE_MODEL_H_ -#include #include -#include -#include -#include +#include +#include #include +#include +#include +#include #include -#include #include +#include #include #include "../common/threading_utils.h" @@ -89,7 +90,7 @@ struct GBTreeModelParam : public dmlc::Parameter { struct GBTreeModel : public Model { public: - explicit GBTreeModel(LearnerModelParam const* learner_model, GenericParameter const* ctx) + explicit GBTreeModel(LearnerModelParam const* learner_model, Context const* ctx) : learner_model_param{learner_model}, ctx_{ctx} {} void Configure(const Args& cfg) { // initialize model parameters if not yet been initialized. @@ -143,7 +144,7 @@ struct GBTreeModel : public Model { std::vector tree_info; private: - GenericParameter const* ctx_; + Context const* ctx_; }; } // namespace gbm } // namespace xgboost diff --git a/src/learner.cc b/src/learner.cc index 3d3d18535..226399049 100644 --- a/src/learner.cc +++ b/src/learner.cc @@ -35,10 +35,10 @@ #include "common/version.h" #include "xgboost/base.h" #include "xgboost/c_api.h" +#include "xgboost/context.h" #include "xgboost/data.h" #include "xgboost/feature_map.h" #include "xgboost/gbm.h" -#include "xgboost/generic_parameters.h" #include "xgboost/host_device_vector.h" #include "xgboost/json.h" #include "xgboost/logging.h" @@ -306,56 +306,6 @@ struct LearnerTrainParam : public XGBoostParameter { DMLC_REGISTER_PARAMETER(LearnerModelParamLegacy); DMLC_REGISTER_PARAMETER(LearnerTrainParam); -DMLC_REGISTER_PARAMETER(GenericParameter); - -int constexpr GenericParameter::kCpuId; -int64_t constexpr GenericParameter::kDefaultSeed; - -GenericParameter::GenericParameter() : cfs_cpu_count_{common::GetCfsCPUCount()} {} - -void GenericParameter::ConfigureGpuId(bool require_gpu) { -#if defined(XGBOOST_USE_CUDA) - if (gpu_id == kCpuId) { // 0. User didn't specify the `gpu_id' - if (require_gpu) { // 1. `tree_method' or `predictor' or both are using - // GPU. - // 2. Use device 0 as default. - this->UpdateAllowUnknown(Args{{"gpu_id", "0"}}); - } - } - - // 3. When booster is loaded from a memory image (Python pickle or R - // raw model), number of available GPUs could be different. Wrap around it. - int32_t n_gpus = common::AllVisibleGPUs(); - if (n_gpus == 0) { - if (gpu_id != kCpuId) { - LOG(WARNING) << "No visible GPU is found, setting `gpu_id` to -1"; - } - this->UpdateAllowUnknown(Args{{"gpu_id", std::to_string(kCpuId)}}); - } else if (fail_on_invalid_gpu_id) { - CHECK(gpu_id == kCpuId || gpu_id < n_gpus) - << "Only " << n_gpus << " GPUs are visible, gpu_id " - << gpu_id << " is invalid."; - } else if (gpu_id != kCpuId && gpu_id >= n_gpus) { - LOG(WARNING) << "Only " << n_gpus - << " GPUs are visible, setting `gpu_id` to " << gpu_id % n_gpus; - this->UpdateAllowUnknown(Args{{"gpu_id", std::to_string(gpu_id % n_gpus)}}); - } -#else - // Just set it to CPU, don't think about it. - this->UpdateAllowUnknown(Args{{"gpu_id", std::to_string(kCpuId)}}); - (void)(require_gpu); -#endif // defined(XGBOOST_USE_CUDA) - - common::SetDevice(this->gpu_id); -} - -int32_t GenericParameter::Threads() const { - auto n_threads = common::OmpGetNumThreads(nthread); - if (cfs_cpu_count_ > 0) { - n_threads = std::min(n_threads, cfs_cpu_count_); - } - return n_threads; -} using LearnerAPIThreadLocalStore = dmlc::ThreadLocalStore>; @@ -461,7 +411,7 @@ class LearnerConfiguration : public Learner { monitor_.Init("Learner"); auto& local_cache = (*ThreadLocalPredictionCache::Get())[this]; for (std::shared_ptr const& d : cache) { - local_cache.Cache(d, GenericParameter::kCpuId); + local_cache.Cache(d, Context::kCpuId); } } ~LearnerConfiguration() override { @@ -541,6 +491,9 @@ class LearnerConfiguration : public Learner { // If configuration is loaded, ensure that the model came from the same version CHECK(IsA(in)); auto origin_version = Version::Load(in); + if (std::get<0>(Version::kInvalid) == std::get<0>(origin_version)) { + LOG(WARNING) << "Invalid version string in config"; + } if (!Version::Same(origin_version)) { LOG(WARNING) << ModelMsg(); diff --git a/src/linear/coordinate_common.h b/src/linear/coordinate_common.h index 1f7c81d11..f61c423f0 100644 --- a/src/linear/coordinate_common.h +++ b/src/linear/coordinate_common.h @@ -108,10 +108,10 @@ inline std::pair GetGradient(int group_idx, int num_group, int f * * \return The gradient and diagonal Hessian entry for a given feature. */ -inline std::pair -GetGradientParallel(GenericParameter const *ctx, int group_idx, int num_group, - int fidx, const std::vector &gpair, - DMatrix *p_fmat) { +inline std::pair GetGradientParallel(Context const *ctx, int group_idx, + int num_group, int fidx, + const std::vector &gpair, + DMatrix *p_fmat) { std::vector sum_grad_tloc(ctx->Threads(), 0.0); std::vector sum_hess_tloc(ctx->Threads(), 0.0); diff --git a/src/linear/linear_updater.cc b/src/linear/linear_updater.cc index 4593d54f0..e66206196 100644 --- a/src/linear/linear_updater.cc +++ b/src/linear/linear_updater.cc @@ -11,13 +11,13 @@ DMLC_REGISTRY_ENABLE(::xgboost::LinearUpdaterReg); namespace xgboost { -LinearUpdater* LinearUpdater::Create(const std::string& name, GenericParameter const* lparam) { +LinearUpdater* LinearUpdater::Create(const std::string& name, Context const* ctx) { auto *e = ::dmlc::Registry< ::xgboost::LinearUpdaterReg>::Get()->Find(name); if (e == nullptr) { LOG(FATAL) << "Unknown linear updater " << name; } auto p_linear = (e->body)(); - p_linear->ctx_ = lparam; + p_linear->ctx_ = ctx; return p_linear; } diff --git a/src/metric/auc.cc b/src/metric/auc.cc index 318a3d4aa..eb2ca3630 100644 --- a/src/metric/auc.cc +++ b/src/metric/auc.cc @@ -79,14 +79,14 @@ double MultiClassOVR(common::Span predts, MetaInfo const &info, size_t n_classes, int32_t n_threads, BinaryAUC &&binary_auc) { CHECK_NE(n_classes, 0); - auto const labels = info.labels.View(GenericParameter::kCpuId); + auto const labels = info.labels.View(Context::kCpuId); if (labels.Shape(0) != 0) { CHECK_EQ(labels.Shape(1), 1) << "AUC doesn't support multi-target model."; } std::vector results_storage(n_classes * 3, 0); linalg::TensorView results(results_storage, {n_classes, static_cast(3)}, - GenericParameter::kCpuId); + Context::kCpuId); auto local_area = results.Slice(linalg::All(), 0); auto tp = results.Slice(linalg::All(), 1); auto auc = results.Slice(linalg::All(), 2); @@ -94,7 +94,7 @@ double MultiClassOVR(common::Span predts, MetaInfo const &info, auto weights = common::OptionalWeights{info.weights_.ConstHostSpan()}; auto predts_t = linalg::TensorView( predts, {static_cast(info.num_row_), n_classes}, - GenericParameter::kCpuId); + Context::kCpuId); if (info.labels.Size() != 0) { common::ParallelFor(n_classes, n_threads, [&](auto c) { @@ -215,7 +215,7 @@ std::pair RankingAUC(std::vector const &predts, CHECK_GE(info.group_ptr_.size(), 2); uint32_t n_groups = info.group_ptr_.size() - 1; auto s_predts = common::Span{predts}; - auto labels = info.labels.View(GenericParameter::kCpuId); + auto labels = info.labels.View(Context::kCpuId); auto s_weights = info.weights_.ConstHostSpan(); std::atomic invalid_groups{0}; @@ -255,7 +255,7 @@ template class EvalAUC : public Metric { double Eval(const HostDeviceVector &preds, const MetaInfo &info) override { double auc {0}; - if (tparam_->gpu_id != GenericParameter::kCpuId) { + if (tparam_->gpu_id != Context::kCpuId) { preds.SetDevice(tparam_->gpu_id); info.labels.SetDevice(tparam_->gpu_id); info.weights_.SetDevice(tparam_->gpu_id); @@ -340,7 +340,7 @@ class EvalROCAUC : public EvalAUC { double auc{0}; uint32_t valid_groups = 0; auto n_threads = tparam_->Threads(); - if (tparam_->gpu_id == GenericParameter::kCpuId) { + if (tparam_->gpu_id == Context::kCpuId) { std::tie(auc, valid_groups) = RankingAUC(predts.ConstHostVector(), info, n_threads); } else { @@ -355,7 +355,7 @@ class EvalROCAUC : public EvalAUC { double auc{0}; auto n_threads = tparam_->Threads(); CHECK_NE(n_classes, 0); - if (tparam_->gpu_id == GenericParameter::kCpuId) { + if (tparam_->gpu_id == Context::kCpuId) { auc = MultiClassOVR(predts.ConstHostVector(), info, n_classes, n_threads, BinaryROCAUC); } else { @@ -368,7 +368,7 @@ class EvalROCAUC : public EvalAUC { std::tuple EvalBinary(HostDeviceVector const &predts, MetaInfo const &info) { double fp, tp, auc; - if (tparam_->gpu_id == GenericParameter::kCpuId) { + if (tparam_->gpu_id == Context::kCpuId) { std::tie(fp, tp, auc) = BinaryROCAUC(predts.ConstHostVector(), info.labels.HostView().Slice(linalg::All(), 0), common::OptionalWeights{info.weights_.ConstHostSpan()}); @@ -418,7 +418,7 @@ class EvalPRAUC : public EvalAUC { std::tuple EvalBinary(HostDeviceVector const &predts, MetaInfo const &info) { double pr, re, auc; - if (tparam_->gpu_id == GenericParameter::kCpuId) { + if (tparam_->gpu_id == Context::kCpuId) { std::tie(pr, re, auc) = BinaryPRAUC(predts.ConstHostSpan(), info.labels.HostView().Slice(linalg::All(), 0), common::OptionalWeights{info.weights_.ConstHostSpan()}); @@ -431,7 +431,7 @@ class EvalPRAUC : public EvalAUC { double EvalMultiClass(HostDeviceVector const &predts, MetaInfo const &info, size_t n_classes) { - if (tparam_->gpu_id == GenericParameter::kCpuId) { + if (tparam_->gpu_id == Context::kCpuId) { auto n_threads = this->tparam_->Threads(); return MultiClassOVR(predts.ConstHostSpan(), info, n_classes, n_threads, BinaryPRAUC); @@ -446,7 +446,7 @@ class EvalPRAUC : public EvalAUC { double auc{0}; uint32_t valid_groups = 0; auto n_threads = tparam_->Threads(); - if (tparam_->gpu_id == GenericParameter::kCpuId) { + if (tparam_->gpu_id == Context::kCpuId) { auto labels = info.labels.Data()->ConstHostSpan(); if (std::any_of(labels.cbegin(), labels.cend(), PRAUCLabelInvalid{})) { InvalidLabels(); diff --git a/src/metric/elementwise_metric.cu b/src/metric/elementwise_metric.cu index 17151e4b1..b2b4f60ed 100644 --- a/src/metric/elementwise_metric.cu +++ b/src/metric/elementwise_metric.cu @@ -40,7 +40,7 @@ namespace { * applying the weights. A tuple of {error_i, weight_i} is expected as return. */ template -PackedReduceResult Reduce(GenericParameter const* ctx, MetaInfo const& info, Fn&& loss) { +PackedReduceResult Reduce(Context const* ctx, MetaInfo const& info, Fn&& loss) { PackedReduceResult result; auto labels = info.labels.View(ctx->gpu_id); if (ctx->IsCPU()) { diff --git a/src/metric/metric.cc b/src/metric/metric.cc index f6c1d53bb..9fe12f0c9 100644 --- a/src/metric/metric.cc +++ b/src/metric/metric.cc @@ -4,8 +4,8 @@ * \brief Registry of objective functions. */ #include +#include #include -#include #include "metric_common.h" @@ -43,7 +43,7 @@ Metric* CreateMetricImpl(const std::string& name) { } Metric * -Metric::Create(const std::string& name, GenericParameter const* tparam) { +Metric::Create(const std::string& name, Context const* tparam) { auto metric = CreateMetricImpl(name); if (metric == nullptr) { LOG(FATAL) << "Unknown metric function " << name; @@ -54,7 +54,7 @@ Metric::Create(const std::string& name, GenericParameter const* tparam) { } Metric * -GPUMetric::CreateGPUMetric(const std::string& name, GenericParameter const* tparam) { +GPUMetric::CreateGPUMetric(const std::string& name, Context const* tparam) { auto metric = CreateMetricImpl(name); if (metric == nullptr) { LOG(WARNING) << "Cannot find a GPU metric builder for metric " << name diff --git a/src/metric/metric_common.h b/src/metric/metric_common.h index b1da2c59d..747b70a63 100644 --- a/src/metric/metric_common.h +++ b/src/metric/metric_common.h @@ -12,12 +12,13 @@ #include "xgboost/metric.h" namespace xgboost { +struct Context; // This creates a GPU metric instance dynamically and adds it to the GPU metric registry, if not // present already. This is created when there is a device ordinal present and if xgboost // is compiled with CUDA support struct GPUMetric : Metric { - static Metric *CreateGPUMetric(const std::string& name, GenericParameter const* tparam); + static Metric *CreateGPUMetric(const std::string &name, Context const *tparam); }; /*! diff --git a/src/metric/multiclass_metric.cu b/src/metric/multiclass_metric.cu index c453f6686..559875505 100644 --- a/src/metric/multiclass_metric.cu +++ b/src/metric/multiclass_metric.cu @@ -126,13 +126,10 @@ class MultiClassMetricsReduction { #endif // XGBOOST_USE_CUDA - PackedReduceResult Reduce( - const GenericParameter &tparam, - int device, - size_t n_class, - const HostDeviceVector& weights, - const HostDeviceVector& labels, - const HostDeviceVector& preds) { + PackedReduceResult Reduce(const Context& tparam, int device, size_t n_class, + const HostDeviceVector& weights, + const HostDeviceVector& labels, + const HostDeviceVector& preds) { PackedReduceResult result; if (device < 0) { diff --git a/src/metric/rank_metric.cc b/src/metric/rank_metric.cc index 2956a3fa7..e4c07a922 100644 --- a/src/metric/rank_metric.cc +++ b/src/metric/rank_metric.cc @@ -118,7 +118,7 @@ struct EvalAMS : public Metric { const double br = 10.0; unsigned thresindex = 0; double s_tp = 0.0, b_fp = 0.0, tams = 0.0; - const auto& labels = info.labels.View(GenericParameter::kCpuId); + const auto& labels = info.labels.View(Context::kCpuId); for (unsigned i = 0; i < static_cast(ndata-1) && i < ntop; ++i) { const unsigned ridx = rec[i].second; const bst_float wt = info.GetWeight(ridx); @@ -191,7 +191,7 @@ struct EvalRank : public Metric, public EvalRankConfig { std::vector sum_tloc(tparam_->Threads(), 0.0); if (!rank_gpu_ || tparam_->gpu_id < 0) { - const auto& labels = info.labels.View(GenericParameter::kCpuId); + const auto& labels = info.labels.View(Context::kCpuId); const auto &h_preds = preds.ConstHostVector(); dmlc::OMPException exc; diff --git a/src/metric/survival_metric.cu b/src/metric/survival_metric.cu index 86ce9672a..77f48ff67 100644 --- a/src/metric/survival_metric.cu +++ b/src/metric/survival_metric.cu @@ -123,7 +123,7 @@ class ElementWiseSurvivalMetricsReduction { #endif // XGBOOST_USE_CUDA PackedReduceResult Reduce( - const GenericParameter &ctx, + const Context &ctx, const HostDeviceVector& weights, const HostDeviceVector& labels_lower_bound, const HostDeviceVector& labels_upper_bound, @@ -195,7 +195,7 @@ struct EvalAFTNLogLik { }; template struct EvalEWiseSurvivalBase : public Metric { - explicit EvalEWiseSurvivalBase(GenericParameter const *ctx) { + explicit EvalEWiseSurvivalBase(Context const *ctx) { tparam_ = ctx; } EvalEWiseSurvivalBase() = default; diff --git a/src/objective/adaptive.h b/src/objective/adaptive.h index ba37f83e4..11a8f8975 100644 --- a/src/objective/adaptive.h +++ b/src/objective/adaptive.h @@ -9,7 +9,7 @@ #include "../collective/communicator-inl.h" #include "../common/common.h" -#include "xgboost/generic_parameters.h" +#include "xgboost/context.h" #include "xgboost/host_device_vector.h" #include "xgboost/tree_model.h" diff --git a/src/objective/objective.cc b/src/objective/objective.cc index 5ba5f87fb..9512233dc 100644 --- a/src/objective/objective.cc +++ b/src/objective/objective.cc @@ -4,6 +4,7 @@ * \brief Registry of all objective functions. */ #include +#include #include #include @@ -16,7 +17,7 @@ DMLC_REGISTRY_ENABLE(::xgboost::ObjFunctionReg); namespace xgboost { // implement factory functions -ObjFunction* ObjFunction::Create(const std::string& name, GenericParameter const* tparam) { +ObjFunction* ObjFunction::Create(const std::string& name, Context const* ctx) { auto *e = ::dmlc::Registry< ::xgboost::ObjFunctionReg>::Get()->Find(name); if (e == nullptr) { std::stringstream ss; @@ -27,7 +28,7 @@ ObjFunction* ObjFunction::Create(const std::string& name, GenericParameter const << ss.str(); } auto pobj = (e->body)(); - pobj->ctx_ = tparam; + pobj->ctx_ = ctx; return pobj; } diff --git a/src/objective/regression_obj.cu b/src/objective/regression_obj.cu index 9aff1c787..ef2bcc331 100644 --- a/src/objective/regression_obj.cu +++ b/src/objective/regression_obj.cu @@ -23,8 +23,8 @@ #include "./regression_loss.h" #include "adaptive.h" #include "xgboost/base.h" +#include "xgboost/context.h" #include "xgboost/data.h" -#include "xgboost/generic_parameters.h" #include "xgboost/host_device_vector.h" #include "xgboost/json.h" #include "xgboost/linalg.h" diff --git a/src/predictor/cpu_predictor.cc b/src/predictor/cpu_predictor.cc index 444d1b089..8deeaf703 100644 --- a/src/predictor/cpu_predictor.cc +++ b/src/predictor/cpu_predictor.cc @@ -1,5 +1,5 @@ /*! - * Copyright by Contributors 2017-2021 + * Copyright by XGBoost Contributors 2017-2022 */ #include #include @@ -351,8 +351,7 @@ class CPUPredictor : public Predictor { } public: - explicit CPUPredictor(GenericParameter const* generic_param) : - Predictor::Predictor{generic_param} {} + explicit CPUPredictor(Context const *ctx) : Predictor::Predictor{ctx} {} void PredictBatch(DMatrix *dmat, PredictionCacheEntry *predts, const gbm::GBTreeModel &model, uint32_t tree_begin, @@ -614,9 +613,7 @@ class CPUPredictor : public Predictor { }; XGBOOST_REGISTER_PREDICTOR(CPUPredictor, "cpu_predictor") -.describe("Make predictions using CPU.") -.set_body([](GenericParameter const* generic_param) { - return new CPUPredictor(generic_param); - }); + .describe("Make predictions using CPU.") + .set_body([](Context const *ctx) { return new CPUPredictor(ctx); }); } // namespace predictor } // namespace xgboost diff --git a/src/predictor/gpu_predictor.cu b/src/predictor/gpu_predictor.cu index 271688330..35daf701c 100644 --- a/src/predictor/gpu_predictor.cu +++ b/src/predictor/gpu_predictor.cu @@ -723,8 +723,7 @@ class GPUPredictor : public xgboost::Predictor { } public: - explicit GPUPredictor(GenericParameter const* generic_param) : - Predictor::Predictor{generic_param} {} + explicit GPUPredictor(Context const* ctx) : Predictor::Predictor{ctx} {} ~GPUPredictor() override { if (ctx_->gpu_id >= 0 && ctx_->gpu_id < common::AllVisibleGPUs()) { @@ -1026,10 +1025,8 @@ class GPUPredictor : public xgboost::Predictor { }; XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor") -.describe("Make predictions using GPU.") -.set_body([](GenericParameter const* generic_param) { - return new GPUPredictor(generic_param); - }); + .describe("Make predictions using GPU.") + .set_body([](Context const* ctx) { return new GPUPredictor(ctx); }); } // namespace predictor } // namespace xgboost diff --git a/src/predictor/predictor.cc b/src/predictor/predictor.cc index 5701ed892..38ac9492f 100644 --- a/src/predictor/predictor.cc +++ b/src/predictor/predictor.cc @@ -1,14 +1,15 @@ /*! * Copyright 2017-2021 by Contributors */ +#include "xgboost/predictor.h" + #include + #include -#include "xgboost/predictor.h" -#include "xgboost/data.h" -#include "xgboost/generic_parameters.h" - #include "../gbm/gbtree.h" +#include "xgboost/context.h" +#include "xgboost/data.h" namespace dmlc { DMLC_REGISTRY_ENABLE(::xgboost::PredictorReg); @@ -30,7 +31,7 @@ void PredictionContainer::ClearExpiredEntries() { PredictionCacheEntry &PredictionContainer::Cache(std::shared_ptr m, int32_t device) { this->ClearExpiredEntries(); container_[m.get()].ref = m; - if (device != GenericParameter::kCpuId) { + if (device != Context::kCpuId) { container_[m.get()].predictions.SetDevice(device); } return container_[m.get()]; @@ -51,13 +52,12 @@ decltype(PredictionContainer::container_) const& PredictionContainer::Container( void Predictor::Configure( const std::vector>&) { } -Predictor* Predictor::Create( - std::string const& name, GenericParameter const* generic_param) { +Predictor* Predictor::Create(std::string const& name, Context const* ctx) { auto* e = ::dmlc::Registry::Get()->Find(name); if (e == nullptr) { LOG(FATAL) << "Unknown predictor type " << name; } - auto p_predictor = (e->body)(generic_param); + auto p_predictor = (e->body)(ctx); return p_predictor; } diff --git a/src/tree/common_row_partitioner.h b/src/tree/common_row_partitioner.h index 949948856..a5f4aac2d 100644 --- a/src/tree/common_row_partitioner.h +++ b/src/tree/common_row_partitioner.h @@ -11,8 +11,8 @@ #include "../common/numeric.h" // Iota #include "../common/partition_builder.h" -#include "hist/expand_entry.h" // CPUExpandEntry -#include "xgboost/generic_parameters.h" // Context +#include "hist/expand_entry.h" // CPUExpandEntry +#include "xgboost/context.h" // Context namespace xgboost { namespace tree { diff --git a/src/tree/gpu_hist/row_partitioner.cuh b/src/tree/gpu_hist/row_partitioner.cuh index 4f0a4142e..a2519ae6f 100644 --- a/src/tree/gpu_hist/row_partitioner.cuh +++ b/src/tree/gpu_hist/row_partitioner.cuh @@ -9,7 +9,7 @@ #include "../../common/device_helpers.cuh" #include "xgboost/base.h" -#include "xgboost/generic_parameters.h" +#include "xgboost/context.h" #include "xgboost/task.h" #include "xgboost/tree_model.h" diff --git a/src/tree/hist/evaluate_splits.h b/src/tree/hist/evaluate_splits.h index 0a09718ef..f76565e9a 100644 --- a/src/tree/hist/evaluate_splits.h +++ b/src/tree/hist/evaluate_splits.h @@ -5,19 +5,20 @@ #define XGBOOST_TREE_HIST_EVALUATE_SPLITS_H_ #include +#include #include #include -#include #include #include -#include "../param.h" -#include "../constraints.h" -#include "../split_evaluator.h" #include "../../common/categorical.h" -#include "../../common/random.h" #include "../../common/hist_util.h" +#include "../../common/random.h" #include "../../data/gradient_index.h" +#include "../constraints.h" +#include "../param.h" +#include "../split_evaluator.h" +#include "xgboost/context.h" namespace xgboost { namespace tree { @@ -427,7 +428,7 @@ class HistEvaluator { std::shared_ptr sampler) : param_{param}, column_sampler_{std::move(sampler)}, - tree_evaluator_{param, static_cast(info.num_col_), GenericParameter::kCpuId}, + tree_evaluator_{param, static_cast(info.num_col_), Context::kCpuId}, n_threads_{n_threads} { interaction_constraints_.Configure(param, info.num_col_); column_sampler_->Init(info.num_col_, info.feature_weights.HostVector(), param_.colsample_bynode, @@ -442,14 +443,14 @@ class HistEvaluator { * \param p_last_tree The last tree being updated by tree updater */ template -void UpdatePredictionCacheImpl(GenericParameter const *ctx, RegTree const *p_last_tree, +void UpdatePredictionCacheImpl(Context const *ctx, RegTree const *p_last_tree, std::vector const &partitioner, linalg::VectorView out_preds) { CHECK_GT(out_preds.Size(), 0U); CHECK(p_last_tree); auto const &tree = *p_last_tree; - CHECK_EQ(out_preds.DeviceIdx(), GenericParameter::kCpuId); + CHECK_EQ(out_preds.DeviceIdx(), Context::kCpuId); size_t n_nodes = p_last_tree->GetNodes().size(); for (auto &part : partitioner) { CHECK_EQ(part.Size(), n_nodes); diff --git a/src/tree/split_evaluator.h b/src/tree/split_evaluator.h index d19755d37..c036cc3ed 100644 --- a/src/tree/split_evaluator.h +++ b/src/tree/split_evaluator.h @@ -10,17 +10,18 @@ #include #include + +#include +#include #include #include -#include -#include -#include "xgboost/tree_model.h" -#include "xgboost/host_device_vector.h" -#include "xgboost/generic_parameters.h" -#include "../common/transform.h" #include "../common/math.h" +#include "../common/transform.h" #include "param.h" +#include "xgboost/context.h" +#include "xgboost/host_device_vector.h" +#include "xgboost/tree_model.h" namespace xgboost { namespace tree { @@ -38,7 +39,7 @@ class TreeEvaluator { public: TreeEvaluator(TrainParam const& p, bst_feature_t n_features, int32_t device) { device_ = device; - if (device != GenericParameter::kCpuId) { + if (device != Context::kCpuId) { lower_bounds_.SetDevice(device); upper_bounds_.SetDevice(device); monotone_.SetDevice(device); @@ -56,7 +57,7 @@ class TreeEvaluator { has_constraint_ = true; } - if (device_ != GenericParameter::kCpuId) { + if (device_ != Context::kCpuId) { // Pull to device early. lower_bounds_.ConstDeviceSpan(); upper_bounds_.ConstDeviceSpan(); @@ -151,7 +152,7 @@ class TreeEvaluator { public: /* Get a view to the evaluator that can be passed down to device. */ template auto GetEvaluator() const { - if (device_ != GenericParameter::kCpuId) { + if (device_ != Context::kCpuId) { auto constraints = monotone_.ConstDevicePointer(); return SplitEvaluator{constraints, lower_bounds_.ConstDevicePointer(), upper_bounds_.ConstDevicePointer(), has_constraint_}; diff --git a/src/tree/tree_updater.cc b/src/tree/tree_updater.cc index 190a1e020..286daa4d8 100644 --- a/src/tree/tree_updater.cc +++ b/src/tree/tree_updater.cc @@ -14,13 +14,12 @@ DMLC_REGISTRY_ENABLE(::xgboost::TreeUpdaterReg); namespace xgboost { -TreeUpdater* TreeUpdater::Create(const std::string& name, GenericParameter const* tparam, - ObjInfo task) { +TreeUpdater* TreeUpdater::Create(const std::string& name, Context const* ctx, ObjInfo task) { auto* e = ::dmlc::Registry< ::xgboost::TreeUpdaterReg>::Get()->Find(name); if (e == nullptr) { LOG(FATAL) << "Unknown tree updater " << name; } - auto p_updater = (e->body)(tparam, task); + auto p_updater = (e->body)(ctx, task); return p_updater; } diff --git a/src/tree/updater_approx.cc b/src/tree/updater_approx.cc index 734138da5..bc090ed3f 100644 --- a/src/tree/updater_approx.cc +++ b/src/tree/updater_approx.cc @@ -256,7 +256,7 @@ class GlobalApproxUpdater : public TreeUpdater { ObjInfo task_; public: - explicit GlobalApproxUpdater(GenericParameter const *ctx, ObjInfo task) + explicit GlobalApproxUpdater(Context const *ctx, ObjInfo task) : TreeUpdater(ctx), task_{task} { monitor_.Init(__func__); } @@ -337,8 +337,6 @@ XGBOOST_REGISTER_TREE_UPDATER(GlobalHistMaker, "grow_histmaker") .describe( "Tree constructor that uses approximate histogram construction " "for each node.") - .set_body([](GenericParameter const *ctx, ObjInfo task) { - return new GlobalApproxUpdater(ctx, task); - }); + .set_body([](Context const *ctx, ObjInfo task) { return new GlobalApproxUpdater(ctx, task); }); } // namespace tree } // namespace xgboost diff --git a/src/tree/updater_colmaker.cc b/src/tree/updater_colmaker.cc index 89e928e4d..07483038c 100644 --- a/src/tree/updater_colmaker.cc +++ b/src/tree/updater_colmaker.cc @@ -55,7 +55,7 @@ DMLC_REGISTER_PARAMETER(ColMakerTrainParam); /*! \brief column-wise update to construct a tree */ class ColMaker: public TreeUpdater { public: - explicit ColMaker(GenericParameter const *ctx) : TreeUpdater(ctx) {} + explicit ColMaker(Context const *ctx) : TreeUpdater(ctx) {} void Configure(const Args &args) override { param_.UpdateAllowUnknown(args); colmaker_param_.UpdateAllowUnknown(args); @@ -159,11 +159,11 @@ class ColMaker: public TreeUpdater { // constructor explicit Builder(const TrainParam ¶m, const ColMakerTrainParam &colmaker_train_param, FeatureInteractionConstraintHost _interaction_constraints, - GenericParameter const *ctx, const std::vector &column_densities) + Context const *ctx, const std::vector &column_densities) : param_(param), colmaker_train_param_{colmaker_train_param}, ctx_{ctx}, - tree_evaluator_(param_, column_densities.size(), GenericParameter::kCpuId), + tree_evaluator_(param_, column_densities.size(), Context::kCpuId), interaction_constraints_{std::move(_interaction_constraints)}, column_densities_(column_densities) {} // update one tree, growing @@ -594,7 +594,7 @@ class ColMaker: public TreeUpdater { const TrainParam& param_; const ColMakerTrainParam& colmaker_train_param_; // number of omp thread used during training - GenericParameter const* ctx_; + Context const* ctx_; common::ColumnSampler column_sampler_; // Instance Data: current node position in the tree of each instance std::vector position_; @@ -612,9 +612,7 @@ class ColMaker: public TreeUpdater { }; XGBOOST_REGISTER_TREE_UPDATER(ColMaker, "grow_colmaker") -.describe("Grow tree with parallelization over columns.") -.set_body([](GenericParameter const* ctx, ObjInfo) { - return new ColMaker(ctx); - }); + .describe("Grow tree with parallelization over columns.") + .set_body([](Context const *ctx, ObjInfo) { return new ColMaker(ctx); }); } // namespace tree } // namespace xgboost diff --git a/src/tree/updater_gpu_hist.cu b/src/tree/updater_gpu_hist.cu index b3b3004a2..1d40c72f0 100644 --- a/src/tree/updater_gpu_hist.cu +++ b/src/tree/updater_gpu_hist.cu @@ -4,41 +4,40 @@ #include #include #include + #include #include -#include #include +#include #include #include -#include "xgboost/base.h" -#include "xgboost/data.h" -#include "xgboost/generic_parameters.h" -#include "xgboost/host_device_vector.h" -#include "xgboost/parameter.h" -#include "xgboost/span.h" -#include "xgboost/json.h" - #include "../collective/device_communicator.cuh" -#include "../common/io.h" +#include "../common/bitfield.h" +#include "../common/categorical.h" #include "../common/device_helpers.cuh" #include "../common/hist_util.h" -#include "../common/bitfield.h" +#include "../common/io.h" #include "../common/timer.h" -#include "../common/categorical.h" #include "../data/ellpack_page.cuh" - -#include "param.h" -#include "driver.h" -#include "updater_gpu_common.cuh" -#include "split_evaluator.h" #include "constraints.cuh" -#include "gpu_hist/feature_groups.cuh" -#include "gpu_hist/gradient_based_sampler.cuh" -#include "gpu_hist/row_partitioner.cuh" -#include "gpu_hist/histogram.cuh" +#include "driver.h" #include "gpu_hist/evaluate_splits.cuh" #include "gpu_hist/expand_entry.cuh" +#include "gpu_hist/feature_groups.cuh" +#include "gpu_hist/gradient_based_sampler.cuh" +#include "gpu_hist/histogram.cuh" +#include "gpu_hist/row_partitioner.cuh" +#include "param.h" +#include "split_evaluator.h" +#include "updater_gpu_common.cuh" +#include "xgboost/base.h" +#include "xgboost/context.h" +#include "xgboost/data.h" +#include "xgboost/host_device_vector.h" +#include "xgboost/json.h" +#include "xgboost/parameter.h" +#include "xgboost/span.h" #include "xgboost/task.h" #include "xgboost/tree_model.h" @@ -730,7 +729,7 @@ class GPUHistMaker : public TreeUpdater { using GradientSumT = GradientPairPrecise; public: - explicit GPUHistMaker(GenericParameter const* ctx, ObjInfo task) + explicit GPUHistMaker(Context const* ctx, ObjInfo task) : TreeUpdater(ctx), task_{task} {}; void Configure(const Args& args) override { // Used in test to count how many configurations are performed @@ -879,9 +878,7 @@ class GPUHistMaker : public TreeUpdater { #if !defined(GTEST_TEST) XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist") .describe("Grow tree with GPU.") - .set_body([](GenericParameter const* tparam, ObjInfo task) { - return new GPUHistMaker(tparam, task); - }); + .set_body([](Context const* ctx, ObjInfo task) { return new GPUHistMaker(ctx, task); }); #endif // !defined(GTEST_TEST) } // namespace tree diff --git a/src/tree/updater_prune.cc b/src/tree/updater_prune.cc index a2f1a31b1..bec49bf47 100644 --- a/src/tree/updater_prune.cc +++ b/src/tree/updater_prune.cc @@ -20,7 +20,7 @@ DMLC_REGISTRY_FILE_TAG(updater_prune); /*! \brief pruner that prunes a tree after growing finishes */ class TreePruner : public TreeUpdater { public: - explicit TreePruner(GenericParameter const* ctx, ObjInfo task) : TreeUpdater(ctx) { + explicit TreePruner(Context const* ctx, ObjInfo task) : TreeUpdater(ctx) { syncher_.reset(TreeUpdater::Create("sync", ctx_, task)); pruner_monitor_.Init("TreePruner"); } @@ -110,6 +110,6 @@ class TreePruner : public TreeUpdater { XGBOOST_REGISTER_TREE_UPDATER(TreePruner, "prune") .describe("Pruner that prune the tree according to statistics.") - .set_body([](GenericParameter const* ctx, ObjInfo task) { return new TreePruner(ctx, task); }); + .set_body([](Context const* ctx, ObjInfo task) { return new TreePruner(ctx, task); }); } // namespace tree } // namespace xgboost diff --git a/src/tree/updater_quantile_hist.cc b/src/tree/updater_quantile_hist.cc index 1e9d76d4f..525376730 100644 --- a/src/tree/updater_quantile_hist.cc +++ b/src/tree/updater_quantile_hist.cc @@ -335,8 +335,6 @@ void QuantileHistMaker::Builder::InitData(DMatrix *fmat, const RegTree &tree, XGBOOST_REGISTER_TREE_UPDATER(QuantileHistMaker, "grow_quantile_histmaker") .describe("Grow tree using quantized histogram.") - .set_body([](GenericParameter const *ctx, ObjInfo task) { - return new QuantileHistMaker(ctx, task); - }); + .set_body([](Context const *ctx, ObjInfo task) { return new QuantileHistMaker(ctx, task); }); } // namespace tree } // namespace xgboost diff --git a/src/tree/updater_quantile_hist.h b/src/tree/updater_quantile_hist.h index 29bda34d4..dfb9c45b0 100644 --- a/src/tree/updater_quantile_hist.h +++ b/src/tree/updater_quantile_hist.h @@ -85,8 +85,7 @@ inline BatchParam HistBatch(TrainParam const& param) { /*! \brief construct a tree using quantized feature values */ class QuantileHistMaker: public TreeUpdater { public: - explicit QuantileHistMaker(GenericParameter const* ctx, ObjInfo task) - : TreeUpdater(ctx), task_{task} {} + explicit QuantileHistMaker(Context const* ctx, ObjInfo task) : TreeUpdater(ctx), task_{task} {} void Configure(const Args& args) override; void Update(HostDeviceVector* gpair, DMatrix* dmat, @@ -120,7 +119,7 @@ class QuantileHistMaker: public TreeUpdater { public: // constructor explicit Builder(const size_t n_trees, const TrainParam& param, DMatrix const* fmat, - ObjInfo task, GenericParameter const* ctx) + ObjInfo task, Context const* ctx) : n_trees_(n_trees), param_(param), p_last_fmat_(fmat), diff --git a/src/tree/updater_refresh.cc b/src/tree/updater_refresh.cc index a70074740..864c704fa 100644 --- a/src/tree/updater_refresh.cc +++ b/src/tree/updater_refresh.cc @@ -24,7 +24,7 @@ DMLC_REGISTRY_FILE_TAG(updater_refresh); /*! \brief pruner that prunes a tree after growing finishs */ class TreeRefresher : public TreeUpdater { public: - explicit TreeRefresher(GenericParameter const *ctx) : TreeUpdater(ctx) {} + explicit TreeRefresher(Context const *ctx) : TreeUpdater(ctx) {} void Configure(const Args &args) override { param_.UpdateAllowUnknown(args); } void LoadConfig(Json const& in) override { auto const& config = get(in); @@ -160,6 +160,6 @@ class TreeRefresher : public TreeUpdater { XGBOOST_REGISTER_TREE_UPDATER(TreeRefresher, "refresh") .describe("Refresher that refreshes the weight and statistics according to data.") - .set_body([](GenericParameter const *ctx, ObjInfo) { return new TreeRefresher(ctx); }); + .set_body([](Context const *ctx, ObjInfo) { return new TreeRefresher(ctx); }); } // namespace tree } // namespace xgboost diff --git a/src/tree/updater_sync.cc b/src/tree/updater_sync.cc index 331a982b1..a3f99362e 100644 --- a/src/tree/updater_sync.cc +++ b/src/tree/updater_sync.cc @@ -24,7 +24,7 @@ DMLC_REGISTRY_FILE_TAG(updater_sync); */ class TreeSyncher : public TreeUpdater { public: - explicit TreeSyncher(GenericParameter const* tparam) : TreeUpdater(tparam) {} + explicit TreeSyncher(Context const* tparam) : TreeUpdater(tparam) {} void Configure(const Args&) override {} void LoadConfig(Json const&) override {} @@ -56,6 +56,6 @@ class TreeSyncher : public TreeUpdater { XGBOOST_REGISTER_TREE_UPDATER(TreeSyncher, "sync") .describe("Syncher that synchronize the tree in all distributed nodes.") - .set_body([](GenericParameter const* tparam, ObjInfo) { return new TreeSyncher(tparam); }); + .set_body([](Context const* ctx, ObjInfo) { return new TreeSyncher(ctx); }); } // namespace tree } // namespace xgboost diff --git a/tests/cpp/common/test_json.cc b/tests/cpp/common/test_json.cc index 919a76dd1..829bbbed0 100644 --- a/tests/cpp/common/test_json.cc +++ b/tests/cpp/common/test_json.cc @@ -590,7 +590,7 @@ TEST(Json, DISABLED_RoundTripExhaustive) { } }; int64_t int32_max = static_cast(std::numeric_limits::max()); - GenericParameter ctx; + Context ctx; common::ParallelFor(int32_max, ctx.Threads(), [&](auto i) { test(static_cast(i)); }); } diff --git a/tests/cpp/common/test_linalg.cc b/tests/cpp/common/test_linalg.cc index 8f4ecb7c8..3da4c482c 100644 --- a/tests/cpp/common/test_linalg.cc +++ b/tests/cpp/common/test_linalg.cc @@ -2,7 +2,7 @@ * Copyright 2021 by XGBoost Contributors */ #include -#include +#include #include #include @@ -13,7 +13,7 @@ namespace xgboost { namespace linalg { namespace { -auto kCpuId = GenericParameter::kCpuId; +auto kCpuId = Context::kCpuId; } auto MakeMatrixFromTest(HostDeviceVector *storage, size_t n_rows, size_t n_cols) { diff --git a/tests/cpp/common/test_linalg.cu b/tests/cpp/common/test_linalg.cu index ae0eb28a7..14f89774b 100644 --- a/tests/cpp/common/test_linalg.cu +++ b/tests/cpp/common/test_linalg.cu @@ -4,7 +4,7 @@ #include #include "../../../src/common/linalg_op.cuh" -#include "xgboost/generic_parameters.h" +#include "xgboost/context.h" #include "xgboost/linalg.h" namespace xgboost { @@ -21,7 +21,7 @@ void TestElementWiseKernel() { ASSERT_FALSE(t.CContiguous()); ElementWiseTransformDevice(t, [] __device__(size_t i, float) { return i; }); // CPU view - t = l.View(GenericParameter::kCpuId).Slice(linalg::All(), 1, linalg::All()); + t = l.View(Context::kCpuId).Slice(linalg::All(), 1, linalg::All()); size_t k = 0; for (size_t i = 0; i < l.Shape(0); ++i) { for (size_t j = 0; j < l.Shape(2); ++j) { @@ -41,7 +41,7 @@ void TestElementWiseKernel() { ElementWiseTransformDevice(t, [] XGBOOST_DEVICE(size_t i, float) { return i; }); ASSERT_TRUE(t.CContiguous()); // CPU view - t = l.View(GenericParameter::kCpuId); + t = l.View(Context::kCpuId); size_t ind = 0; for (size_t i = 0; i < l.Shape(0); ++i) { diff --git a/tests/cpp/common/test_stats.cc b/tests/cpp/common/test_stats.cc index 79f38ae6a..99f4c7ee6 100644 --- a/tests/cpp/common/test_stats.cc +++ b/tests/cpp/common/test_stats.cc @@ -2,7 +2,7 @@ * Copyright 2022 by XGBoost Contributors */ #include -#include +#include #include "../../../src/common/stats.h" diff --git a/tests/cpp/common/test_stats.cu b/tests/cpp/common/test_stats.cu index eee92921d..1a56c68b7 100644 --- a/tests/cpp/common/test_stats.cu +++ b/tests/cpp/common/test_stats.cu @@ -2,12 +2,13 @@ * Copyright 2022 by XGBoost Contributors */ #include + #include #include #include "../../../src/common/stats.cuh" #include "xgboost/base.h" -#include "xgboost/generic_parameters.h" +#include "xgboost/context.h" #include "xgboost/host_device_vector.h" #include "xgboost/linalg.h" diff --git a/tests/cpp/data/test_array_interface.cc b/tests/cpp/data/test_array_interface.cc index 5bd771ff0..c36b46b63 100644 --- a/tests/cpp/data/test_array_interface.cc +++ b/tests/cpp/data/test_array_interface.cc @@ -20,7 +20,7 @@ TEST(ArrayInterface, Initialize) { HostDeviceVector u64_storage(storage.Size()); std::string u64_arr_str{ArrayInterfaceStr(linalg::TensorView{ - u64_storage.ConstHostSpan(), {kRows, kCols}, GenericParameter::kCpuId})}; + u64_storage.ConstHostSpan(), {kRows, kCols}, Context::kCpuId})}; std::copy(storage.ConstHostVector().cbegin(), storage.ConstHostVector().cend(), u64_storage.HostSpan().begin()); auto u64_arr = ArrayInterface<2>{u64_arr_str}; diff --git a/tests/cpp/data/test_metainfo.cc b/tests/cpp/data/test_metainfo.cc index c09b95c7e..e71e31c4d 100644 --- a/tests/cpp/data/test_metainfo.cc +++ b/tests/cpp/data/test_metainfo.cc @@ -129,8 +129,8 @@ TEST(MetaInfo, SaveLoadBinary) { EXPECT_EQ(inforead.group_ptr_, info.group_ptr_); EXPECT_EQ(inforead.weights_.HostVector(), info.weights_.HostVector()); - auto orig_margin = info.base_margin_.View(xgboost::GenericParameter::kCpuId); - auto read_margin = inforead.base_margin_.View(xgboost::GenericParameter::kCpuId); + auto orig_margin = info.base_margin_.View(xgboost::Context::kCpuId); + auto read_margin = inforead.base_margin_.View(xgboost::Context::kCpuId); EXPECT_TRUE(std::equal(orig_margin.Values().cbegin(), orig_margin.Values().cend(), read_margin.Values().cbegin())); diff --git a/tests/cpp/data/test_metainfo.cu b/tests/cpp/data/test_metainfo.cu index 434b63f64..95c8f5f39 100644 --- a/tests/cpp/data/test_metainfo.cu +++ b/tests/cpp/data/test_metainfo.cu @@ -1,13 +1,13 @@ /*! Copyright 2019-2021 by XGBoost Contributors */ #include +#include +#include #include #include -#include -#include -#include "test_array_interface.h" -#include "../../../src/common/device_helpers.cuh" +#include "../../../src/common/device_helpers.cuh" +#include "test_array_interface.h" #include "test_metainfo.h" namespace xgboost { @@ -65,7 +65,7 @@ TEST(MetaInfo, FromInterface) { } info.SetInfo(ctx, "base_margin", str.c_str()); - auto const h_base_margin = info.base_margin_.View(GenericParameter::kCpuId); + auto const h_base_margin = info.base_margin_.View(Context::kCpuId); ASSERT_EQ(h_base_margin.Size(), d_data.size()); for (size_t i = 0; i < d_data.size(); ++i) { ASSERT_EQ(h_base_margin(i), d_data[i]); diff --git a/tests/cpp/data/test_simple_dmatrix.cc b/tests/cpp/data/test_simple_dmatrix.cc index c67c39c0f..22accd93c 100644 --- a/tests/cpp/data/test_simple_dmatrix.cc +++ b/tests/cpp/data/test_simple_dmatrix.cc @@ -256,7 +256,7 @@ TEST(SimpleDMatrix, Slice) { std::iota(upper.begin(), upper.end(), 1.0f); auto& margin = p_m->Info().base_margin_; - margin = decltype(p_m->Info().base_margin_){{kRows, kClasses}, GenericParameter::kCpuId}; + margin = decltype(p_m->Info().base_margin_){{kRows, kClasses}, Context::kCpuId}; std::array ridxs {1, 3, 5}; std::unique_ptr out { p_m->Slice(ridxs) }; @@ -286,8 +286,8 @@ TEST(SimpleDMatrix, Slice) { ASSERT_EQ(p_m->Info().weights_.HostVector().at(ridx), out->Info().weights_.HostVector().at(i)); - auto out_margin = out->Info().base_margin_.View(GenericParameter::kCpuId); - auto in_margin = margin.View(GenericParameter::kCpuId); + auto out_margin = out->Info().base_margin_.View(Context::kCpuId); + auto in_margin = margin.View(Context::kCpuId); for (size_t j = 0; j < kClasses; ++j) { ASSERT_EQ(out_margin(i, j), in_margin(ridx, j)); } @@ -318,7 +318,7 @@ TEST(SimpleDMatrix, SliceCol) { std::iota(upper.begin(), upper.end(), 1.0f); auto& margin = p_m->Info().base_margin_; - margin = decltype(p_m->Info().base_margin_){{kRows, kClasses}, GenericParameter::kCpuId}; + margin = decltype(p_m->Info().base_margin_){{kRows, kClasses}, Context::kCpuId}; size_t constexpr kSlicCols {4}; for (auto slice = 0; slice < 2; slice++) { @@ -348,8 +348,8 @@ TEST(SimpleDMatrix, SliceCol) { out->Info().labels_upper_bound_.HostVector().at(i)); ASSERT_EQ(p_m->Info().weights_.HostVector().at(i), out->Info().weights_.HostVector().at(i)); - auto out_margin = out->Info().base_margin_.View(GenericParameter::kCpuId); - auto in_margin = margin.View(GenericParameter::kCpuId); + auto out_margin = out->Info().base_margin_.View(Context::kCpuId); + auto in_margin = margin.View(Context::kCpuId); for (size_t j = 0; j < kClasses; ++j) { ASSERT_EQ(out_margin(i, j), in_margin(i, j)); } diff --git a/tests/cpp/gbm/test_gblinear.cc b/tests/cpp/gbm/test_gblinear.cc index c53bb08f6..6294e381f 100644 --- a/tests/cpp/gbm/test_gblinear.cc +++ b/tests/cpp/gbm/test_gblinear.cc @@ -7,11 +7,11 @@ #include #include "../helpers.h" -#include "xgboost/json.h" -#include "xgboost/logging.h" +#include "xgboost/context.h" #include "xgboost/gbm.h" -#include "xgboost/generic_parameters.h" +#include "xgboost/json.h" #include "xgboost/learner.h" +#include "xgboost/logging.h" namespace xgboost { namespace gbm { diff --git a/tests/cpp/gbm/test_gbtree.cc b/tests/cpp/gbm/test_gbtree.cc index 13ec23c14..c96b98497 100644 --- a/tests/cpp/gbm/test_gbtree.cc +++ b/tests/cpp/gbm/test_gbtree.cc @@ -2,7 +2,7 @@ * Copyright 2019-2022 XGBoost contributors */ #include -#include +#include #include "../../../src/data/adapter.h" #include "../../../src/data/proxy_dmatrix.h" diff --git a/tests/cpp/helpers.cc b/tests/cpp/helpers.cc index bc7fe6bf5..f0f95dc24 100644 --- a/tests/cpp/helpers.cc +++ b/tests/cpp/helpers.cc @@ -532,7 +532,7 @@ std::unique_ptr CreateSparsePageDMatrixWithRC( return dmat; } -gbm::GBTreeModel CreateTestModel(LearnerModelParam const* param, GenericParameter const* ctx, +gbm::GBTreeModel CreateTestModel(LearnerModelParam const* param, Context const* ctx, size_t n_classes) { gbm::GBTreeModel model(param, ctx); @@ -549,13 +549,12 @@ gbm::GBTreeModel CreateTestModel(LearnerModelParam const* param, GenericParamete return model; } -std::unique_ptr CreateTrainedGBM( - std::string name, Args kwargs, size_t kRows, size_t kCols, - LearnerModelParam const* learner_model_param, - GenericParameter const* generic_param) { - auto caches = std::make_shared< PredictionContainer >();; - std::unique_ptr gbm { - GradientBooster::Create(name, generic_param, learner_model_param)}; +std::unique_ptr CreateTrainedGBM(std::string name, Args kwargs, size_t kRows, + size_t kCols, + LearnerModelParam const* learner_model_param, + Context const* ctx) { + auto caches = std::make_shared(); + std::unique_ptr gbm{GradientBooster::Create(name, ctx, learner_model_param)}; gbm->Configure(kwargs); auto p_dmat = RandomDataGenerator(kRows, kCols, 0).GenerateDMatrix(); diff --git a/tests/cpp/helpers.h b/tests/cpp/helpers.h index c7f73495c..6c7ae68d8 100644 --- a/tests/cpp/helpers.h +++ b/tests/cpp/helpers.h @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include #include @@ -355,18 +355,17 @@ std::unique_ptr CreateSparsePageDMatrixWithRC( size_t n_rows, size_t n_cols, size_t page_size, bool deterministic, const dmlc::TemporaryDirectory& tempdir = dmlc::TemporaryDirectory()); -gbm::GBTreeModel CreateTestModel(LearnerModelParam const* param, GenericParameter const* ctx, +gbm::GBTreeModel CreateTestModel(LearnerModelParam const* param, Context const* ctx, size_t n_classes = 1); -std::unique_ptr CreateTrainedGBM( - std::string name, Args kwargs, size_t kRows, size_t kCols, - LearnerModelParam const* learner_model_param, - GenericParameter const* generic_param); +std::unique_ptr CreateTrainedGBM(std::string name, Args kwargs, size_t kRows, + size_t kCols, + LearnerModelParam const* learner_model_param, + Context const* generic_param); -inline GenericParameter CreateEmptyGenericParam(int gpu_id) { - xgboost::GenericParameter tparam; - std::vector> args { - {"gpu_id", std::to_string(gpu_id)}}; +inline Context CreateEmptyGenericParam(int gpu_id) { + xgboost::Context tparam; + std::vector> args{{"gpu_id", std::to_string(gpu_id)}}; tparam.Init(args); return tparam; } diff --git a/tests/cpp/linear/test_linear.cc b/tests/cpp/linear/test_linear.cc index 779c20940..f76826e4c 100644 --- a/tests/cpp/linear/test_linear.cc +++ b/tests/cpp/linear/test_linear.cc @@ -17,12 +17,12 @@ TEST(Linear, Shotgun) { auto p_fmat = xgboost::RandomDataGenerator(kRows, kCols, 0).GenerateDMatrix(); - auto lparam = xgboost::CreateEmptyGenericParam(GPUIDX); + auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX); LearnerModelParam mparam{MakeMP(kCols, .5, 1)}; { - auto updater = std::unique_ptr( - xgboost::LinearUpdater::Create("shotgun", &lparam)); + auto updater = + std::unique_ptr(xgboost::LinearUpdater::Create("shotgun", &ctx)); updater->Configure({{"eta", "1."}}); xgboost::HostDeviceVector gpair( p_fmat->Info().num_row_, xgboost::GradientPair(-5, 1.0)); @@ -31,11 +31,10 @@ TEST(Linear, Shotgun) { updater->Update(&gpair, p_fmat.get(), &model, gpair.Size()); ASSERT_EQ(model.Bias()[0], 5.0f); - } { auto updater = std::unique_ptr( - xgboost::LinearUpdater::Create("shotgun", &lparam)); + xgboost::LinearUpdater::Create("shotgun", &ctx)); EXPECT_ANY_THROW(updater->Configure({{"feature_selector", "random"}})); } } @@ -50,11 +49,11 @@ TEST(Linear, coordinate) { auto p_fmat = xgboost::RandomDataGenerator(kRows, kCols, 0).GenerateDMatrix(); - auto lparam = xgboost::CreateEmptyGenericParam(GPUIDX); + auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX); LearnerModelParam mparam{MakeMP(kCols, .5, 1)}; auto updater = std::unique_ptr( - xgboost::LinearUpdater::Create("coord_descent", &lparam)); + xgboost::LinearUpdater::Create("coord_descent", &ctx)); updater->Configure({{"eta", "1."}}); xgboost::HostDeviceVector gpair( p_fmat->Info().num_row_, xgboost::GradientPair(-5, 1.0)); diff --git a/tests/cpp/metric/test_auc.cc b/tests/cpp/metric/test_auc.cc index ec5ed4c56..321f46cdc 100644 --- a/tests/cpp/metric/test_auc.cc +++ b/tests/cpp/metric/test_auc.cc @@ -5,8 +5,8 @@ namespace xgboost { namespace metric { TEST(Metric, DeclareUnifiedTest(BinaryAUC)) { - auto tparam = xgboost::CreateEmptyGenericParam(GPUIDX); - std::unique_ptr uni_ptr {Metric::Create("auc", &tparam)}; + auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX); + std::unique_ptr uni_ptr {Metric::Create("auc", &ctx)}; Metric * metric = uni_ptr.get(); ASSERT_STREQ(metric->Name(), "auc"); @@ -49,9 +49,9 @@ TEST(Metric, DeclareUnifiedTest(BinaryAUC)) { } TEST(Metric, DeclareUnifiedTest(MultiClassAUC)) { - auto tparam = CreateEmptyGenericParam(GPUIDX); + auto ctx = CreateEmptyGenericParam(GPUIDX); std::unique_ptr uni_ptr{ - Metric::Create("auc", &tparam)}; + Metric::Create("auc", &ctx)}; auto metric = uni_ptr.get(); // MultiClass @@ -115,8 +115,8 @@ TEST(Metric, DeclareUnifiedTest(MultiClassAUC)) { } TEST(Metric, DeclareUnifiedTest(RankingAUC)) { - auto tparam = CreateEmptyGenericParam(GPUIDX); - std::unique_ptr metric{Metric::Create("auc", &tparam)}; + auto ctx = CreateEmptyGenericParam(GPUIDX); + std::unique_ptr metric{Metric::Create("auc", &ctx)}; // single group EXPECT_NEAR(GetMetricEval(metric.get(), {0.7f, 0.2f, 0.3f, 0.6f}, @@ -153,9 +153,9 @@ TEST(Metric, DeclareUnifiedTest(RankingAUC)) { } TEST(Metric, DeclareUnifiedTest(PRAUC)) { - auto tparam = xgboost::CreateEmptyGenericParam(GPUIDX); + auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX); - xgboost::Metric *metric = xgboost::Metric::Create("aucpr", &tparam); + xgboost::Metric *metric = xgboost::Metric::Create("aucpr", &ctx); ASSERT_STREQ(metric->Name(), "aucpr"); EXPECT_NEAR(GetMetricEval(metric, {0, 0, 1, 1}, {0, 0, 1, 1}), 1, 1e-10); EXPECT_NEAR(GetMetricEval(metric, {0.1f, 0.9f, 0.1f, 0.9f}, {0, 0, 1, 1}), @@ -194,9 +194,9 @@ TEST(Metric, DeclareUnifiedTest(PRAUC)) { } TEST(Metric, DeclareUnifiedTest(MultiClassPRAUC)) { - auto tparam = xgboost::CreateEmptyGenericParam(GPUIDX); + auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX); - std::unique_ptr metric{Metric::Create("aucpr", &tparam)}; + std::unique_ptr metric{Metric::Create("aucpr", &ctx)}; float auc = 0; std::vector labels {1.0f, 0.0f, 2.0f}; @@ -223,9 +223,9 @@ TEST(Metric, DeclareUnifiedTest(MultiClassPRAUC)) { } TEST(Metric, DeclareUnifiedTest(RankingPRAUC)) { - auto tparam = xgboost::CreateEmptyGenericParam(GPUIDX); + auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX); - std::unique_ptr metric{Metric::Create("aucpr", &tparam)}; + std::unique_ptr metric{Metric::Create("aucpr", &ctx)}; std::vector labels {1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f}; std::vector groups {0, 2, 6}; diff --git a/tests/cpp/metric/test_elementwise_metric.cc b/tests/cpp/metric/test_elementwise_metric.cc index 6fa7519e0..fde9e42f2 100644 --- a/tests/cpp/metric/test_elementwise_metric.cc +++ b/tests/cpp/metric/test_elementwise_metric.cc @@ -13,8 +13,8 @@ namespace xgboost { namespace { inline void CheckDeterministicMetricElementWise(StringView name, int32_t device) { - auto lparam = CreateEmptyGenericParam(device); - std::unique_ptr metric{Metric::Create(name.c_str(), &lparam)}; + auto ctx = CreateEmptyGenericParam(device); + std::unique_ptr metric{Metric::Create(name.c_str(), &ctx)}; HostDeviceVector predts; size_t n_samples = 2048; @@ -48,8 +48,8 @@ namespace xgboost { namespace metric { TEST(Metric, DeclareUnifiedTest(RMSE)) { - auto lparam = xgboost::CreateEmptyGenericParam(GPUIDX); - xgboost::Metric * metric = xgboost::Metric::Create("rmse", &lparam); + auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX); + xgboost::Metric * metric = xgboost::Metric::Create("rmse", &ctx); metric->Configure({}); ASSERT_STREQ(metric->Name(), "rmse"); EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0, 1e-10); @@ -73,8 +73,8 @@ TEST(Metric, DeclareUnifiedTest(RMSE)) { } TEST(Metric, DeclareUnifiedTest(RMSLE)) { - auto lparam = xgboost::CreateEmptyGenericParam(GPUIDX); - xgboost::Metric * metric = xgboost::Metric::Create("rmsle", &lparam); + auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX); + xgboost::Metric * metric = xgboost::Metric::Create("rmsle", &ctx); metric->Configure({}); ASSERT_STREQ(metric->Name(), "rmsle"); EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0, 1e-10); @@ -98,8 +98,8 @@ TEST(Metric, DeclareUnifiedTest(RMSLE)) { } TEST(Metric, DeclareUnifiedTest(MAE)) { - auto lparam = xgboost::CreateEmptyGenericParam(GPUIDX); - xgboost::Metric * metric = xgboost::Metric::Create("mae", &lparam); + auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX); + xgboost::Metric * metric = xgboost::Metric::Create("mae", &ctx); metric->Configure({}); ASSERT_STREQ(metric->Name(), "mae"); EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0, 1e-10); @@ -123,8 +123,8 @@ TEST(Metric, DeclareUnifiedTest(MAE)) { } TEST(Metric, DeclareUnifiedTest(MAPE)) { - auto lparam = xgboost::CreateEmptyGenericParam(GPUIDX); - xgboost::Metric * metric = xgboost::Metric::Create("mape", &lparam); + auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX); + xgboost::Metric * metric = xgboost::Metric::Create("mape", &ctx); metric->Configure({}); ASSERT_STREQ(metric->Name(), "mape"); EXPECT_NEAR(GetMetricEval(metric, {150, 300}, {100, 200}), 0.5f, 1e-10); @@ -148,8 +148,8 @@ TEST(Metric, DeclareUnifiedTest(MAPE)) { } TEST(Metric, DeclareUnifiedTest(MPHE)) { - auto lparam = xgboost::CreateEmptyGenericParam(GPUIDX); - std::unique_ptr metric{xgboost::Metric::Create("mphe", &lparam)}; + auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX); + std::unique_ptr metric{xgboost::Metric::Create("mphe", &ctx)}; metric->Configure({}); ASSERT_STREQ(metric->Name(), "mphe"); EXPECT_NEAR(GetMetricEval(metric.get(), {0, 1}, {0, 1}), 0, 1e-10); @@ -179,8 +179,8 @@ TEST(Metric, DeclareUnifiedTest(MPHE)) { } TEST(Metric, DeclareUnifiedTest(LogLoss)) { - auto lparam = xgboost::CreateEmptyGenericParam(GPUIDX); - xgboost::Metric * metric = xgboost::Metric::Create("logloss", &lparam); + auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX); + xgboost::Metric * metric = xgboost::Metric::Create("logloss", &ctx); metric->Configure({}); ASSERT_STREQ(metric->Name(), "logloss"); EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0, 1e-10); @@ -208,8 +208,8 @@ TEST(Metric, DeclareUnifiedTest(LogLoss)) { } TEST(Metric, DeclareUnifiedTest(Error)) { - auto lparam = xgboost::CreateEmptyGenericParam(GPUIDX); - xgboost::Metric * metric = xgboost::Metric::Create("error", &lparam); + auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX); + xgboost::Metric * metric = xgboost::Metric::Create("error", &ctx); metric->Configure({}); ASSERT_STREQ(metric->Name(), "error"); EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0, 1e-10); @@ -228,16 +228,16 @@ TEST(Metric, DeclareUnifiedTest(Error)) { { 1, 2, 9, 8}), 0.55f, 0.001f); - EXPECT_ANY_THROW(xgboost::Metric::Create("error@abc", &lparam)); + EXPECT_ANY_THROW(xgboost::Metric::Create("error@abc", &ctx)); delete metric; - metric = xgboost::Metric::Create("error@0.5f", &lparam); + metric = xgboost::Metric::Create("error@0.5f", &ctx); metric->Configure({}); EXPECT_STREQ(metric->Name(), "error"); delete metric; - metric = xgboost::Metric::Create("error@0.1", &lparam); + metric = xgboost::Metric::Create("error@0.1", &ctx); metric->Configure({}); ASSERT_STREQ(metric->Name(), "error@0.1"); EXPECT_STREQ(metric->Name(), "error@0.1"); @@ -262,8 +262,8 @@ TEST(Metric, DeclareUnifiedTest(Error)) { } TEST(Metric, DeclareUnifiedTest(PoissionNegLogLik)) { - auto lparam = xgboost::CreateEmptyGenericParam(GPUIDX); - xgboost::Metric * metric = xgboost::Metric::Create("poisson-nloglik", &lparam); + auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX); + xgboost::Metric * metric = xgboost::Metric::Create("poisson-nloglik", &ctx); metric->Configure({}); ASSERT_STREQ(metric->Name(), "poisson-nloglik"); EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0.5f, 1e-10); diff --git a/tests/cpp/metric/test_metric.cc b/tests/cpp/metric/test_metric.cc index fdb620928..fc5de5747 100644 --- a/tests/cpp/metric/test_metric.cc +++ b/tests/cpp/metric/test_metric.cc @@ -4,16 +4,16 @@ #include "../helpers.h" TEST(Metric, UnknownMetric) { - auto tparam = xgboost::CreateEmptyGenericParam(GPUIDX); + auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX); xgboost::Metric * metric = nullptr; - EXPECT_ANY_THROW(metric = xgboost::Metric::Create("unknown_name", &tparam)); - EXPECT_NO_THROW(metric = xgboost::Metric::Create("rmse", &tparam)); + EXPECT_ANY_THROW(metric = xgboost::Metric::Create("unknown_name", &ctx)); + EXPECT_NO_THROW(metric = xgboost::Metric::Create("rmse", &ctx)); if (metric) { delete metric; } metric = nullptr; - EXPECT_ANY_THROW(metric = xgboost::Metric::Create("unknown_name@1", &tparam)); - EXPECT_NO_THROW(metric = xgboost::Metric::Create("error@0.5f", &tparam)); + EXPECT_ANY_THROW(metric = xgboost::Metric::Create("unknown_name@1", &ctx)); + EXPECT_NO_THROW(metric = xgboost::Metric::Create("error@0.5f", &ctx)); if (metric) { delete metric; } diff --git a/tests/cpp/metric/test_multiclass_metric.cc b/tests/cpp/metric/test_multiclass_metric.cc index a2c4be8fc..7aa8f8e8e 100644 --- a/tests/cpp/metric/test_multiclass_metric.cc +++ b/tests/cpp/metric/test_multiclass_metric.cc @@ -6,8 +6,8 @@ namespace xgboost { inline void CheckDeterministicMetricMultiClass(StringView name, int32_t device) { - auto lparam = CreateEmptyGenericParam(device); - std::unique_ptr metric{Metric::Create(name.c_str(), &lparam)}; + auto ctx = CreateEmptyGenericParam(device); + std::unique_ptr metric{Metric::Create(name.c_str(), &ctx)}; HostDeviceVector predts; MetaInfo info; @@ -43,9 +43,9 @@ inline void CheckDeterministicMetricMultiClass(StringView name, int32_t device) } // namespace xgboost inline void TestMultiClassError(int device) { - auto lparam = xgboost::CreateEmptyGenericParam(device); - lparam.gpu_id = device; - xgboost::Metric * metric = xgboost::Metric::Create("merror", &lparam); + auto ctx = xgboost::CreateEmptyGenericParam(device); + ctx.gpu_id = device; + xgboost::Metric * metric = xgboost::Metric::Create("merror", &ctx); metric->Configure({}); ASSERT_STREQ(metric->Name(), "merror"); EXPECT_ANY_THROW(GetMetricEval(metric, {0}, {0, 0})); @@ -64,9 +64,9 @@ TEST(Metric, DeclareUnifiedTest(MultiClassError)) { } inline void TestMultiClassLogLoss(int device) { - auto lparam = xgboost::CreateEmptyGenericParam(device); - lparam.gpu_id = device; - xgboost::Metric * metric = xgboost::Metric::Create("mlogloss", &lparam); + auto ctx = xgboost::CreateEmptyGenericParam(device); + ctx.gpu_id = device; + xgboost::Metric * metric = xgboost::Metric::Create("mlogloss", &ctx); metric->Configure({}); ASSERT_STREQ(metric->Name(), "mlogloss"); EXPECT_ANY_THROW(GetMetricEval(metric, {0}, {0, 0})); diff --git a/tests/cpp/metric/test_rank_metric.cc b/tests/cpp/metric/test_rank_metric.cc index e7eef166d..1edbd9fc8 100644 --- a/tests/cpp/metric/test_rank_metric.cc +++ b/tests/cpp/metric/test_rank_metric.cc @@ -5,9 +5,9 @@ #if !defined(__CUDACC__) TEST(Metric, AMS) { - auto tparam = xgboost::CreateEmptyGenericParam(GPUIDX); - EXPECT_ANY_THROW(xgboost::Metric::Create("ams", &tparam)); - xgboost::Metric * metric = xgboost::Metric::Create("ams@0.5f", &tparam); + auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX); + EXPECT_ANY_THROW(xgboost::Metric::Create("ams", &ctx)); + xgboost::Metric* metric = xgboost::Metric::Create("ams@0.5f", &ctx); ASSERT_STREQ(metric->Name(), "ams@0.5"); EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0.311f, 0.001f); EXPECT_NEAR(GetMetricEval(metric, @@ -16,7 +16,7 @@ TEST(Metric, AMS) { 0.29710f, 0.001f); delete metric; - metric = xgboost::Metric::Create("ams@0", &tparam); + metric = xgboost::Metric::Create("ams@0", &ctx); ASSERT_STREQ(metric->Name(), "ams@0"); EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0.311f, 0.001f); @@ -28,8 +28,8 @@ TEST(Metric, DeclareUnifiedTest(Precision)) { // When the limit for precision is not given, it takes the limit at // std::numeric_limits::max(); hence all values are very small // NOTE(AbdealiJK): Maybe this should be fixed to be num_row by default. - auto tparam = xgboost::CreateEmptyGenericParam(GPUIDX); - xgboost::Metric * metric = xgboost::Metric::Create("pre", &tparam); + auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX); + xgboost::Metric * metric = xgboost::Metric::Create("pre", &ctx); ASSERT_STREQ(metric->Name(), "pre"); EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0, 1e-7); EXPECT_NEAR(GetMetricEval(metric, @@ -38,7 +38,7 @@ TEST(Metric, DeclareUnifiedTest(Precision)) { 0, 1e-7); delete metric; - metric = xgboost::Metric::Create("pre@2", &tparam); + metric = xgboost::Metric::Create("pre@2", &ctx); ASSERT_STREQ(metric->Name(), "pre@2"); EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 0.5f, 1e-7); EXPECT_NEAR(GetMetricEval(metric, @@ -52,8 +52,8 @@ TEST(Metric, DeclareUnifiedTest(Precision)) { } TEST(Metric, DeclareUnifiedTest(NDCG)) { - auto tparam = xgboost::CreateEmptyGenericParam(GPUIDX); - xgboost::Metric * metric = xgboost::Metric::Create("ndcg", &tparam); + auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX); + xgboost::Metric * metric = xgboost::Metric::Create("ndcg", &ctx); ASSERT_STREQ(metric->Name(), "ndcg"); EXPECT_ANY_THROW(GetMetricEval(metric, {0, 1}, {})); EXPECT_NEAR(GetMetricEval(metric, @@ -66,7 +66,7 @@ TEST(Metric, DeclareUnifiedTest(NDCG)) { 0.6509f, 0.001f); delete metric; - metric = xgboost::Metric::Create("ndcg@2", &tparam); + metric = xgboost::Metric::Create("ndcg@2", &ctx); ASSERT_STREQ(metric->Name(), "ndcg@2"); EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 1, 1e-10); EXPECT_NEAR(GetMetricEval(metric, @@ -75,7 +75,7 @@ TEST(Metric, DeclareUnifiedTest(NDCG)) { 0.3868f, 0.001f); delete metric; - metric = xgboost::Metric::Create("ndcg@-", &tparam); + metric = xgboost::Metric::Create("ndcg@-", &ctx); ASSERT_STREQ(metric->Name(), "ndcg-"); EXPECT_NEAR(GetMetricEval(metric, xgboost::HostDeviceVector{}, @@ -86,7 +86,7 @@ TEST(Metric, DeclareUnifiedTest(NDCG)) { { 0, 0, 1, 1}), 0.6509f, 0.001f); delete metric; - metric = xgboost::Metric::Create("ndcg-", &tparam); + metric = xgboost::Metric::Create("ndcg-", &ctx); ASSERT_STREQ(metric->Name(), "ndcg-"); EXPECT_NEAR(GetMetricEval(metric, xgboost::HostDeviceVector{}, @@ -98,7 +98,7 @@ TEST(Metric, DeclareUnifiedTest(NDCG)) { 0.6509f, 0.001f); delete metric; - metric = xgboost::Metric::Create("ndcg@2-", &tparam); + metric = xgboost::Metric::Create("ndcg@2-", &ctx); ASSERT_STREQ(metric->Name(), "ndcg@2-"); EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 1, 1e-10); EXPECT_NEAR(GetMetricEval(metric, @@ -110,8 +110,8 @@ TEST(Metric, DeclareUnifiedTest(NDCG)) { } TEST(Metric, DeclareUnifiedTest(MAP)) { - auto tparam = xgboost::CreateEmptyGenericParam(GPUIDX); - xgboost::Metric * metric = xgboost::Metric::Create("map", &tparam); + auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX); + xgboost::Metric * metric = xgboost::Metric::Create("map", &ctx); ASSERT_STREQ(metric->Name(), "map"); EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 1, 1e-10); EXPECT_NEAR(GetMetricEval(metric, @@ -131,21 +131,21 @@ TEST(Metric, DeclareUnifiedTest(MAP)) { 0.8611f, 0.001f); delete metric; - metric = xgboost::Metric::Create("map@-", &tparam); + metric = xgboost::Metric::Create("map@-", &ctx); ASSERT_STREQ(metric->Name(), "map-"); EXPECT_NEAR(GetMetricEval(metric, xgboost::HostDeviceVector{}, {}), 0, 1e-10); delete metric; - metric = xgboost::Metric::Create("map-", &tparam); + metric = xgboost::Metric::Create("map-", &ctx); ASSERT_STREQ(metric->Name(), "map-"); EXPECT_NEAR(GetMetricEval(metric, xgboost::HostDeviceVector{}, {}), 0, 1e-10); delete metric; - metric = xgboost::Metric::Create("map@2", &tparam); + metric = xgboost::Metric::Create("map@2", &ctx); ASSERT_STREQ(metric->Name(), "map@2"); EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}), 1, 1e-10); EXPECT_NEAR(GetMetricEval(metric, diff --git a/tests/cpp/metric/test_survival_metric.cu b/tests/cpp/metric/test_survival_metric.cu index 43b4c71a1..0e472008b 100644 --- a/tests/cpp/metric/test_survival_metric.cu +++ b/tests/cpp/metric/test_survival_metric.cu @@ -13,8 +13,8 @@ namespace xgboost { namespace common { namespace { inline void CheckDeterministicMetricElementWise(StringView name, int32_t device) { - auto lparam = CreateEmptyGenericParam(device); - std::unique_ptr metric{Metric::Create(name.c_str(), &lparam)}; + auto ctx = CreateEmptyGenericParam(device); + std::unique_ptr metric{Metric::Create(name.c_str(), &ctx)}; metric->Configure(Args{}); HostDeviceVector predts; @@ -48,7 +48,7 @@ inline void CheckDeterministicMetricElementWise(StringView name, int32_t device) } // anonymous namespace TEST(Metric, DeclareUnifiedTest(AFTNegLogLik)) { - auto lparam = xgboost::CreateEmptyGenericParam(GPUIDX); + auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX); /** * Test aggregate output from the AFT metric over a small test data set. @@ -69,7 +69,7 @@ TEST(Metric, DeclareUnifiedTest(AFTNegLogLik)) { }; for (const auto& test_case : std::vector{ {"normal", 2.1508f}, {"logistic", 2.1804f}, {"extreme", 2.0706f} }) { - std::unique_ptr metric(Metric::Create("aft-nloglik", &lparam)); + std::unique_ptr metric(Metric::Create("aft-nloglik", &ctx)); metric->Configure({ {"aft_loss_distribution", test_case.dist_type}, {"aft_loss_distribution_scale", "1.0"} }); EXPECT_NEAR(metric->Eval(preds, info), test_case.reference_value, 1e-4); @@ -77,7 +77,7 @@ TEST(Metric, DeclareUnifiedTest(AFTNegLogLik)) { } TEST(Metric, DeclareUnifiedTest(IntervalRegressionAccuracy)) { - auto lparam = xgboost::CreateEmptyGenericParam(GPUIDX); + auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX); MetaInfo info; info.num_row_ = 4; @@ -86,7 +86,7 @@ TEST(Metric, DeclareUnifiedTest(IntervalRegressionAccuracy)) { info.weights_.HostVector() = std::vector(); HostDeviceVector preds(4, std::log(60.0f)); - std::unique_ptr metric(Metric::Create("interval-regression-accuracy", &lparam)); + std::unique_ptr metric(Metric::Create("interval-regression-accuracy", &ctx)); EXPECT_FLOAT_EQ(metric->Eval(preds, info), 0.75f); info.labels_lower_bound_.HostVector()[2] = 70.0f; EXPECT_FLOAT_EQ(metric->Eval(preds, info), 0.50f); @@ -102,8 +102,8 @@ TEST(Metric, DeclareUnifiedTest(IntervalRegressionAccuracy)) { // Test configuration of AFT metric TEST(AFTNegLogLikMetric, DeclareUnifiedTest(Configuration)) { - auto lparam = xgboost::CreateEmptyGenericParam(GPUIDX); - std::unique_ptr metric(Metric::Create("aft-nloglik", &lparam)); + auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX); + std::unique_ptr metric(Metric::Create("aft-nloglik", &ctx)); metric->Configure({{"aft_loss_distribution", "normal"}, {"aft_loss_distribution_scale", "10"}}); // Configuration round-trip test diff --git a/tests/cpp/objective/test_aft_obj.cc b/tests/cpp/objective/test_aft_obj.cc index 3dc26e89d..232994384 100644 --- a/tests/cpp/objective/test_aft_obj.cc +++ b/tests/cpp/objective/test_aft_obj.cc @@ -16,8 +16,8 @@ namespace xgboost { namespace common { TEST(Objective, DeclareUnifiedTest(AFTObjConfiguration)) { - auto lparam = CreateEmptyGenericParam(GPUIDX); - std::unique_ptr objective(ObjFunction::Create("survival:aft", &lparam)); + auto ctx = CreateEmptyGenericParam(GPUIDX); + std::unique_ptr objective(ObjFunction::Create("survival:aft", &ctx)); objective->Configure({ {"aft_loss_distribution", "logistic"}, {"aft_loss_distribution_scale", "5"} }); @@ -77,8 +77,8 @@ static inline void CheckGPairOverGridPoints( } TEST(Objective, DeclareUnifiedTest(AFTObjGPairUncensoredLabels)) { - auto lparam = CreateEmptyGenericParam(GPUIDX); - std::unique_ptr obj(ObjFunction::Create("survival:aft", &lparam)); + auto ctx = CreateEmptyGenericParam(GPUIDX); + std::unique_ptr obj(ObjFunction::Create("survival:aft", &ctx)); CheckGPairOverGridPoints(obj.get(), 100.0f, 100.0f, "normal", { -3.9120f, -3.4013f, -2.8905f, -2.3798f, -1.8691f, -1.3583f, -0.8476f, -0.3368f, 0.1739f, @@ -101,8 +101,8 @@ TEST(Objective, DeclareUnifiedTest(AFTObjGPairUncensoredLabels)) { } TEST(Objective, DeclareUnifiedTest(AFTObjGPairLeftCensoredLabels)) { - auto lparam = CreateEmptyGenericParam(GPUIDX); - std::unique_ptr obj(ObjFunction::Create("survival:aft", &lparam)); + auto ctx = CreateEmptyGenericParam(GPUIDX); + std::unique_ptr obj(ObjFunction::Create("survival:aft", &ctx)); CheckGPairOverGridPoints(obj.get(), 0.0f, 20.0f, "normal", { 0.0285f, 0.0832f, 0.1951f, 0.3804f, 0.6403f, 0.9643f, 1.3379f, 1.7475f, 2.1828f, 2.6361f, @@ -122,8 +122,8 @@ TEST(Objective, DeclareUnifiedTest(AFTObjGPairLeftCensoredLabels)) { } TEST(Objective, DeclareUnifiedTest(AFTObjGPairRightCensoredLabels)) { - auto lparam = CreateEmptyGenericParam(GPUIDX); - std::unique_ptr obj(ObjFunction::Create("survival:aft", &lparam)); + auto ctx = CreateEmptyGenericParam(GPUIDX); + std::unique_ptr obj(ObjFunction::Create("survival:aft", &ctx)); CheckGPairOverGridPoints(obj.get(), 60.0f, std::numeric_limits::infinity(), "normal", { -3.6583f, -3.1815f, -2.7135f, -2.2577f, -1.8190f, -1.4044f, -1.0239f, -0.6905f, -0.4190f, @@ -146,8 +146,8 @@ TEST(Objective, DeclareUnifiedTest(AFTObjGPairRightCensoredLabels)) { } TEST(Objective, DeclareUnifiedTest(AFTObjGPairIntervalCensoredLabels)) { - auto lparam = CreateEmptyGenericParam(GPUIDX); - std::unique_ptr obj(ObjFunction::Create("survival:aft", &lparam)); + auto ctx = CreateEmptyGenericParam(GPUIDX); + std::unique_ptr obj(ObjFunction::Create("survival:aft", &ctx)); CheckGPairOverGridPoints(obj.get(), 16.0f, 200.0f, "normal", { -2.4435f, -1.9965f, -1.5691f, -1.1679f, -0.7990f, -0.4649f, -0.1596f, 0.1336f, 0.4370f, diff --git a/tests/cpp/objective/test_hinge.cc b/tests/cpp/objective/test_hinge.cc index ec54d69aa..8dee0b18e 100644 --- a/tests/cpp/objective/test_hinge.cc +++ b/tests/cpp/objective/test_hinge.cc @@ -1,14 +1,14 @@ // Copyright by Contributors #include -#include +#include #include #include "../helpers.h" TEST(Objective, DeclareUnifiedTest(HingeObj)) { - xgboost::GenericParameter tparam = xgboost::CreateEmptyGenericParam(GPUIDX); + xgboost::Context ctx = xgboost::CreateEmptyGenericParam(GPUIDX); std::unique_ptr obj { - xgboost::ObjFunction::Create("binary:hinge", &tparam) + xgboost::ObjFunction::Create("binary:hinge", &ctx) }; xgboost::bst_float eps = std::numeric_limits::min(); diff --git a/tests/cpp/objective/test_multiclass_obj.cc b/tests/cpp/objective/test_multiclass_obj.cc index 30e06e977..5df9f174c 100644 --- a/tests/cpp/objective/test_multiclass_obj.cc +++ b/tests/cpp/objective/test_multiclass_obj.cc @@ -2,17 +2,17 @@ * Copyright 2018-2019 XGBoost contributors */ #include -#include +#include #include "../../src/common/common.h" #include "../helpers.h" namespace xgboost { TEST(Objective, DeclareUnifiedTest(SoftmaxMultiClassObjGPair)) { - GenericParameter lparam = CreateEmptyGenericParam(GPUIDX); + Context ctx = CreateEmptyGenericParam(GPUIDX); std::vector> args {{"num_class", "3"}}; std::unique_ptr obj { - ObjFunction::Create("multi:softmax", &lparam) + ObjFunction::Create("multi:softmax", &ctx) }; obj->Configure(args); @@ -36,11 +36,11 @@ TEST(Objective, DeclareUnifiedTest(SoftmaxMultiClassObjGPair)) { } TEST(Objective, DeclareUnifiedTest(SoftmaxMultiClassBasic)) { - auto lparam = CreateEmptyGenericParam(GPUIDX); + auto ctx = CreateEmptyGenericParam(GPUIDX); std::vector> args{ - std::pair("num_class", "3")}; + std::pair("num_class", "3")}; - std::unique_ptr obj { ObjFunction::Create("multi:softmax", &lparam) }; + std::unique_ptr obj{ObjFunction::Create("multi:softmax", &ctx)}; obj->Configure(args); CheckConfigReload(obj, "multi:softmax"); @@ -57,12 +57,12 @@ TEST(Objective, DeclareUnifiedTest(SoftmaxMultiClassBasic)) { } TEST(Objective, DeclareUnifiedTest(SoftprobMultiClassBasic)) { - GenericParameter lparam = CreateEmptyGenericParam(GPUIDX); + Context ctx = CreateEmptyGenericParam(GPUIDX); std::vector> args { std::pair("num_class", "3")}; std::unique_ptr obj { - ObjFunction::Create("multi:softprob", &lparam) + ObjFunction::Create("multi:softprob", &ctx) }; obj->Configure(args); CheckConfigReload(obj, "multi:softprob"); diff --git a/tests/cpp/objective/test_objective.cc b/tests/cpp/objective/test_objective.cc index fd110deb1..2f13b8bb3 100644 --- a/tests/cpp/objective/test_objective.cc +++ b/tests/cpp/objective/test_objective.cc @@ -1,13 +1,13 @@ // Copyright by Contributors #include +#include #include -#include #include "../helpers.h" TEST(Objective, UnknownFunction) { xgboost::ObjFunction* obj = nullptr; - xgboost::GenericParameter tparam; + xgboost::Context tparam; std::vector> args; tparam.UpdateAllowUnknown(args); @@ -21,7 +21,7 @@ TEST(Objective, UnknownFunction) { namespace xgboost { TEST(Objective, PredTransform) { // Test that show PredTransform uses the same device with predictor. - xgboost::GenericParameter tparam; + xgboost::Context tparam; tparam.UpdateAllowUnknown(Args{{"gpu_id", "0"}}); size_t n = 100; diff --git a/tests/cpp/objective/test_ranking_obj.cc b/tests/cpp/objective/test_ranking_obj.cc index 0bd8872e8..a007750e3 100644 --- a/tests/cpp/objective/test_ranking_obj.cc +++ b/tests/cpp/objective/test_ranking_obj.cc @@ -1,18 +1,17 @@ // Copyright by Contributors -#include -#include -#include "../helpers.h" +#include #include +#include + +#include "../helpers.h" namespace xgboost { TEST(Objective, DeclareUnifiedTest(PairwiseRankingGPair)) { std::vector> args; - xgboost::GenericParameter lparam = xgboost::CreateEmptyGenericParam(GPUIDX); + xgboost::Context ctx = xgboost::CreateEmptyGenericParam(GPUIDX); - std::unique_ptr obj { - xgboost::ObjFunction::Create("rank:pairwise", &lparam) - }; + std::unique_ptr obj{xgboost::ObjFunction::Create("rank:pairwise", &ctx)}; obj->Configure(args); CheckConfigReload(obj, "rank:pairwise"); @@ -37,12 +36,10 @@ TEST(Objective, DeclareUnifiedTest(PairwiseRankingGPair)) { } TEST(Objective, DeclareUnifiedTest(NDCG_JsonIO)) { - xgboost::GenericParameter tparam; - tparam.UpdateAllowUnknown(Args{}); + xgboost::Context ctx; + ctx.UpdateAllowUnknown(Args{}); - std::unique_ptr obj { - xgboost::ObjFunction::Create("rank:ndcg", &tparam) - }; + std::unique_ptr obj{xgboost::ObjFunction::Create("rank:ndcg", &ctx)}; obj->Configure(Args{}); Json j_obj {Object()}; @@ -58,11 +55,9 @@ TEST(Objective, DeclareUnifiedTest(NDCG_JsonIO)) { TEST(Objective, DeclareUnifiedTest(PairwiseRankingGPairSameLabels)) { std::vector> args; - xgboost::GenericParameter lparam = xgboost::CreateEmptyGenericParam(GPUIDX); + xgboost::Context ctx = xgboost::CreateEmptyGenericParam(GPUIDX); - std::unique_ptr obj { - ObjFunction::Create("rank:pairwise", &lparam) - }; + std::unique_ptr obj{ObjFunction::Create("rank:pairwise", &ctx)}; obj->Configure(args); // No computation of gradient/hessian, as there is no diversity in labels CheckRankingObjFunction(obj, @@ -78,11 +73,9 @@ TEST(Objective, DeclareUnifiedTest(PairwiseRankingGPairSameLabels)) { TEST(Objective, DeclareUnifiedTest(NDCGRankingGPair)) { std::vector> args; - xgboost::GenericParameter lparam = xgboost::CreateEmptyGenericParam(GPUIDX); + xgboost::Context ctx = xgboost::CreateEmptyGenericParam(GPUIDX); - std::unique_ptr obj { - xgboost::ObjFunction::Create("rank:ndcg", &lparam) - }; + std::unique_ptr obj{xgboost::ObjFunction::Create("rank:ndcg", &ctx)}; obj->Configure(args); CheckConfigReload(obj, "rank:ndcg"); @@ -107,11 +100,9 @@ TEST(Objective, DeclareUnifiedTest(NDCGRankingGPair)) { TEST(Objective, DeclareUnifiedTest(MAPRankingGPair)) { std::vector> args; - xgboost::GenericParameter lparam = xgboost::CreateEmptyGenericParam(GPUIDX); + xgboost::Context ctx = xgboost::CreateEmptyGenericParam(GPUIDX); - std::unique_ptr obj { - xgboost::ObjFunction::Create("rank:map", &lparam) - }; + std::unique_ptr obj{xgboost::ObjFunction::Create("rank:map", &ctx)}; obj->Configure(args); CheckConfigReload(obj, "rank:map"); diff --git a/tests/cpp/objective/test_regression_obj.cc b/tests/cpp/objective/test_regression_obj.cc index a26f69476..3fd6e7867 100644 --- a/tests/cpp/objective/test_regression_obj.cc +++ b/tests/cpp/objective/test_regression_obj.cc @@ -2,7 +2,7 @@ * Copyright 2017-2022 XGBoost contributors */ #include -#include +#include #include #include @@ -12,12 +12,10 @@ namespace xgboost { TEST(Objective, DeclareUnifiedTest(LinearRegressionGPair)) { - GenericParameter tparam = CreateEmptyGenericParam(GPUIDX); + Context ctx = CreateEmptyGenericParam(GPUIDX); std::vector> args; - std::unique_ptr obj { - ObjFunction::Create("reg:squarederror", &tparam) - }; + std::unique_ptr obj{ObjFunction::Create("reg:squarederror", &ctx)}; obj->Configure(args); CheckObjFunction(obj, @@ -36,10 +34,10 @@ TEST(Objective, DeclareUnifiedTest(LinearRegressionGPair)) { } TEST(Objective, DeclareUnifiedTest(SquaredLog)) { - GenericParameter tparam = CreateEmptyGenericParam(GPUIDX); + Context ctx = CreateEmptyGenericParam(GPUIDX); std::vector> args; - std::unique_ptr obj { ObjFunction::Create("reg:squaredlogerror", &tparam) }; + std::unique_ptr obj{ObjFunction::Create("reg:squaredlogerror", &ctx)}; obj->Configure(args); CheckConfigReload(obj, "reg:squaredlogerror"); @@ -59,10 +57,10 @@ TEST(Objective, DeclareUnifiedTest(SquaredLog)) { } TEST(Objective, DeclareUnifiedTest(PseudoHuber)) { - GenericParameter tparam = CreateEmptyGenericParam(GPUIDX); + Context ctx = CreateEmptyGenericParam(GPUIDX); Args args; - std::unique_ptr obj{ObjFunction::Create("reg:pseudohubererror", &tparam)}; + std::unique_ptr obj{ObjFunction::Create("reg:pseudohubererror", &ctx)}; obj->Configure(args); CheckConfigReload(obj, "reg:pseudohubererror"); @@ -88,9 +86,9 @@ TEST(Objective, DeclareUnifiedTest(PseudoHuber)) { } TEST(Objective, DeclareUnifiedTest(LogisticRegressionGPair)) { - GenericParameter tparam = CreateEmptyGenericParam(GPUIDX); + Context ctx = CreateEmptyGenericParam(GPUIDX); std::vector> args; - std::unique_ptr obj { ObjFunction::Create("reg:logistic", &tparam) }; + std::unique_ptr obj{ObjFunction::Create("reg:logistic", &ctx)}; obj->Configure(args); CheckConfigReload(obj, "reg:logistic"); @@ -104,11 +102,9 @@ TEST(Objective, DeclareUnifiedTest(LogisticRegressionGPair)) { } TEST(Objective, DeclareUnifiedTest(LogisticRegressionBasic)) { - GenericParameter lparam = CreateEmptyGenericParam(GPUIDX); + Context ctx = CreateEmptyGenericParam(GPUIDX); std::vector> args; - std::unique_ptr obj { - ObjFunction::Create("reg:logistic", &lparam) - }; + std::unique_ptr obj{ObjFunction::Create("reg:logistic", &ctx)}; obj->Configure(args); CheckConfigReload(obj, "reg:logistic"); @@ -135,10 +131,10 @@ TEST(Objective, DeclareUnifiedTest(LogisticRegressionBasic)) { } TEST(Objective, DeclareUnifiedTest(LogisticRawGPair)) { - GenericParameter lparam = CreateEmptyGenericParam(GPUIDX); + Context ctx = CreateEmptyGenericParam(GPUIDX); std::vector> args; std::unique_ptr obj { - ObjFunction::Create("binary:logitraw", &lparam) + ObjFunction::Create("binary:logitraw", &ctx) }; obj->Configure(args); @@ -151,10 +147,10 @@ TEST(Objective, DeclareUnifiedTest(LogisticRawGPair)) { } TEST(Objective, DeclareUnifiedTest(PoissonRegressionGPair)) { - GenericParameter lparam = CreateEmptyGenericParam(GPUIDX); + Context ctx = CreateEmptyGenericParam(GPUIDX); std::vector> args; std::unique_ptr obj { - ObjFunction::Create("count:poisson", &lparam) + ObjFunction::Create("count:poisson", &ctx) }; args.emplace_back(std::make_pair("max_delta_step", "0.1f")); @@ -175,10 +171,10 @@ TEST(Objective, DeclareUnifiedTest(PoissonRegressionGPair)) { } TEST(Objective, DeclareUnifiedTest(PoissonRegressionBasic)) { - GenericParameter lparam = CreateEmptyGenericParam(GPUIDX); + Context ctx = CreateEmptyGenericParam(GPUIDX); std::vector> args; std::unique_ptr obj { - ObjFunction::Create("count:poisson", &lparam) + ObjFunction::Create("count:poisson", &ctx) }; obj->Configure(args); @@ -204,10 +200,10 @@ TEST(Objective, DeclareUnifiedTest(PoissonRegressionBasic)) { } TEST(Objective, DeclareUnifiedTest(GammaRegressionGPair)) { - GenericParameter lparam = CreateEmptyGenericParam(GPUIDX); + Context ctx = CreateEmptyGenericParam(GPUIDX); std::vector> args; std::unique_ptr obj { - ObjFunction::Create("reg:gamma", &lparam) + ObjFunction::Create("reg:gamma", &ctx) }; obj->Configure(args); @@ -226,11 +222,9 @@ TEST(Objective, DeclareUnifiedTest(GammaRegressionGPair)) { } TEST(Objective, DeclareUnifiedTest(GammaRegressionBasic)) { - GenericParameter lparam = CreateEmptyGenericParam(GPUIDX); + Context ctx = CreateEmptyGenericParam(GPUIDX); std::vector> args; - std::unique_ptr obj { - ObjFunction::Create("reg:gamma", &lparam) - }; + std::unique_ptr obj{ObjFunction::Create("reg:gamma", &ctx)}; obj->Configure(args); CheckConfigReload(obj, "reg:gamma"); @@ -257,11 +251,9 @@ TEST(Objective, DeclareUnifiedTest(GammaRegressionBasic)) { } TEST(Objective, DeclareUnifiedTest(TweedieRegressionGPair)) { - GenericParameter lparam = CreateEmptyGenericParam(GPUIDX); + Context ctx = CreateEmptyGenericParam(GPUIDX); std::vector> args; - std::unique_ptr obj { - ObjFunction::Create("reg:tweedie", &lparam) - }; + std::unique_ptr obj{ObjFunction::Create("reg:tweedie", &ctx)}; args.emplace_back(std::make_pair("tweedie_variance_power", "1.1f")); obj->Configure(args); @@ -283,10 +275,9 @@ TEST(Objective, DeclareUnifiedTest(TweedieRegressionGPair)) { #if defined(__CUDACC__) TEST(Objective, CPU_vs_CUDA) { - GenericParameter lparam = CreateEmptyGenericParam(GPUIDX); + Context ctx = CreateEmptyGenericParam(GPUIDX); - ObjFunction * obj = - ObjFunction::Create("reg:squarederror", &lparam); + ObjFunction* obj = ObjFunction::Create("reg:squarederror", &ctx); HostDeviceVector cpu_out_preds; HostDeviceVector cuda_out_preds; @@ -309,12 +300,12 @@ TEST(Objective, CPU_vs_CUDA) { { // CPU - lparam.gpu_id = -1; + ctx.gpu_id = -1; obj->GetGradient(preds, info, 0, &cpu_out_preds); } { // CUDA - lparam.gpu_id = 0; + ctx.gpu_id = 0; obj->GetGradient(preds, info, 0, &cuda_out_preds); } @@ -335,11 +326,9 @@ TEST(Objective, CPU_vs_CUDA) { #endif TEST(Objective, DeclareUnifiedTest(TweedieRegressionBasic)) { - GenericParameter lparam = CreateEmptyGenericParam(GPUIDX); + Context ctx = CreateEmptyGenericParam(GPUIDX); std::vector> args; - std::unique_ptr obj { - ObjFunction::Create("reg:tweedie", &lparam) - }; + std::unique_ptr obj{ObjFunction::Create("reg:tweedie", &ctx)}; obj->Configure(args); CheckConfigReload(obj, "reg:tweedie"); @@ -366,11 +355,9 @@ TEST(Objective, DeclareUnifiedTest(TweedieRegressionBasic)) { // CoxRegression not implemented in GPU code, no need for testing. #if !defined(__CUDACC__) TEST(Objective, CoxRegressionGPair) { - GenericParameter lparam = CreateEmptyGenericParam(GPUIDX); + Context ctx = CreateEmptyGenericParam(GPUIDX); std::vector> args; - std::unique_ptr obj { - ObjFunction::Create("survival:cox", &lparam) - }; + std::unique_ptr obj{ObjFunction::Create("survival:cox", &ctx)}; obj->Configure(args); CheckObjFunction(obj, diff --git a/tests/cpp/plugin/test_example_objective.cc b/tests/cpp/plugin/test_example_objective.cc index aa4ac7be2..69d52075f 100644 --- a/tests/cpp/plugin/test_example_objective.cc +++ b/tests/cpp/plugin/test_example_objective.cc @@ -6,8 +6,8 @@ namespace xgboost { TEST(Plugin, ExampleObjective) { - xgboost::GenericParameter tparam = CreateEmptyGenericParam(GPUIDX); - auto * obj = xgboost::ObjFunction::Create("mylogistic", &tparam); + xgboost::Context ctx = CreateEmptyGenericParam(GPUIDX); + auto* obj = xgboost::ObjFunction::Create("mylogistic", &ctx); ASSERT_EQ(obj->DefaultEvalMetric(), std::string{"logloss"}); delete obj; } diff --git a/tests/cpp/plugin/test_regression_obj_oneapi.cc b/tests/cpp/plugin/test_regression_obj_oneapi.cc index d5ee44bed..63944beca 100755 --- a/tests/cpp/plugin/test_regression_obj_oneapi.cc +++ b/tests/cpp/plugin/test_regression_obj_oneapi.cc @@ -3,13 +3,13 @@ */ #include #include -#include +#include #include #include "../helpers.h" namespace xgboost { TEST(Plugin, LinearRegressionGPairOneAPI) { - GenericParameter tparam = CreateEmptyGenericParam(0); + Context tparam = CreateEmptyGenericParam(0); std::vector> args; std::unique_ptr obj { @@ -33,7 +33,7 @@ TEST(Plugin, LinearRegressionGPairOneAPI) { } TEST(Plugin, SquaredLogOneAPI) { - GenericParameter tparam = CreateEmptyGenericParam(0); + Context tparam = CreateEmptyGenericParam(0); std::vector> args; std::unique_ptr obj { ObjFunction::Create("reg:squaredlogerror_oneapi", &tparam) }; @@ -56,7 +56,7 @@ TEST(Plugin, SquaredLogOneAPI) { } TEST(Plugin, LogisticRegressionGPairOneAPI) { - GenericParameter tparam = CreateEmptyGenericParam(0); + Context tparam = CreateEmptyGenericParam(0); std::vector> args; std::unique_ptr obj { ObjFunction::Create("reg:logistic_oneapi", &tparam) }; @@ -72,7 +72,7 @@ TEST(Plugin, LogisticRegressionGPairOneAPI) { } TEST(Plugin, LogisticRegressionBasicOneAPI) { - GenericParameter lparam = CreateEmptyGenericParam(0); + Context lparam = CreateEmptyGenericParam(0); std::vector> args; std::unique_ptr obj { ObjFunction::Create("reg:logistic_oneapi", &lparam) @@ -103,7 +103,7 @@ TEST(Plugin, LogisticRegressionBasicOneAPI) { } TEST(Plugin, LogisticRawGPairOneAPI) { - GenericParameter lparam = CreateEmptyGenericParam(0); + Context lparam = CreateEmptyGenericParam(0); std::vector> args; std::unique_ptr obj { ObjFunction::Create("binary:logitraw_oneapi", &lparam) @@ -120,12 +120,12 @@ TEST(Plugin, LogisticRawGPairOneAPI) { } TEST(Plugin, CPUvsOneAPI) { - GenericParameter lparam = CreateEmptyGenericParam(0); + Context ctx = CreateEmptyGenericParam(0); ObjFunction * obj_cpu = - ObjFunction::Create("reg:squarederror", &lparam); + ObjFunction::Create("reg:squarederror", &ctx); ObjFunction * obj_oneapi = - ObjFunction::Create("reg:squarederror_oneapi", &lparam); + ObjFunction::Create("reg:squarederror_oneapi", &ctx); HostDeviceVector cpu_out_preds; HostDeviceVector oneapi_out_preds; @@ -148,12 +148,12 @@ TEST(Plugin, CPUvsOneAPI) { { // CPU - lparam.gpu_id = -1; + ctx.gpu_id = -1; obj_cpu->GetGradient(preds, info, 0, &cpu_out_preds); } { // oneapi - lparam.gpu_id = 0; + ctx.gpu_id = 0; obj_oneapi->GetGradient(preds, info, 0, &oneapi_out_preds); } diff --git a/tests/cpp/predictor/test_cpu_predictor.cc b/tests/cpp/predictor/test_cpu_predictor.cc index 137cb36fe..1b2eb374e 100644 --- a/tests/cpp/predictor/test_cpu_predictor.cc +++ b/tests/cpp/predictor/test_cpu_predictor.cc @@ -23,7 +23,7 @@ TEST(CpuPredictor, Basic) { LearnerModelParam mparam{MakeMP(kCols, .0, 1)}; - GenericParameter ctx; + Context ctx; ctx.UpdateAllowUnknown(Args{}); gbm::GBTreeModel model = CreateTestModel(&mparam, &ctx); @@ -103,7 +103,7 @@ TEST(CpuPredictor, ExternalMemory) { LearnerModelParam mparam{MakeMP(dmat->Info().num_col_, .0, 1)}; - GenericParameter ctx; + Context ctx; ctx.UpdateAllowUnknown(Args{}); gbm::GBTreeModel model = CreateTestModel(&mparam, &ctx); diff --git a/tests/cpp/predictor/test_predictor.cc b/tests/cpp/predictor/test_predictor.cc index 64d2b9a81..3e8a94c75 100644 --- a/tests/cpp/predictor/test_predictor.cc +++ b/tests/cpp/predictor/test_predictor.cc @@ -5,8 +5,8 @@ #include "test_predictor.h" #include +#include #include -#include #include #include @@ -26,7 +26,7 @@ TEST(Predictor, PredictionCache) { // Add a cache that is immediately expired. auto add_cache = [&]() { auto p_dmat = RandomDataGenerator(kRows, kCols, 0).GenerateDMatrix(); - container.Cache(p_dmat, GenericParameter::kCpuId); + container.Cache(p_dmat, Context::kCpuId); m = p_dmat.get(); }; @@ -216,7 +216,7 @@ void TestCategoricalPrediction(std::string name) { float left_weight = 1.3f; float right_weight = 1.7f; - GenericParameter ctx; + Context ctx; ctx.UpdateAllowUnknown(Args{}); gbm::GBTreeModel model(&mparam, &ctx); GBTreeModelForTest(&model, split_ind, split_cat, left_weight, right_weight); @@ -257,7 +257,7 @@ void TestCategoricalPredictLeaf(StringView name) { float left_weight = 1.3f; float right_weight = 1.7f; - GenericParameter ctx; + Context ctx; ctx.UpdateAllowUnknown(Args{}); gbm::GBTreeModel model(&mparam, &ctx); diff --git a/tests/cpp/predictor/test_predictor.h b/tests/cpp/predictor/test_predictor.h index 81ee249e2..61b05b31b 100644 --- a/tests/cpp/predictor/test_predictor.h +++ b/tests/cpp/predictor/test_predictor.h @@ -19,7 +19,7 @@ void TestPredictionFromGradientIndex(std::string name, size_t rows, size_t cols, std::unique_ptr(Predictor::Create(name, &lparam)); predictor->Configure({}); - GenericParameter ctx; + Context ctx; ctx.UpdateAllowUnknown(Args{}); gbm::GBTreeModel model = CreateTestModel(&mparam, &ctx, kClasses); diff --git a/tests/cpp/test_learner.cc b/tests/cpp/test_learner.cc index 5090fb57c..fc3fa17ac 100644 --- a/tests/cpp/test_learner.cc +++ b/tests/cpp/test_learner.cc @@ -363,7 +363,7 @@ TEST(Learner, ConstantSeed) { CHECK_NE(v_0, v_1); { - rng.seed(GenericParameter::kDefaultSeed); + rng.seed(Context::kDefaultSeed); std::uniform_real_distribution dist; float v_2 = dist(rng); CHECK_EQ(v_0, v_2); diff --git a/tests/cpp/tree/gpu_hist/test_row_partitioner.cu b/tests/cpp/tree/gpu_hist/test_row_partitioner.cu index fb9e03c35..f82123452 100644 --- a/tests/cpp/tree/gpu_hist/test_row_partitioner.cu +++ b/tests/cpp/tree/gpu_hist/test_row_partitioner.cu @@ -2,17 +2,17 @@ * Copyright 2019-2022 by XGBoost Contributors */ #include -#include -#include - #include #include #include +#include +#include + #include "../../../../src/tree/gpu_hist/row_partitioner.cuh" #include "../../helpers.h" #include "xgboost/base.h" -#include "xgboost/generic_parameters.h" +#include "xgboost/context.h" #include "xgboost/task.h" #include "xgboost/tree_model.h" diff --git a/tests/cpp/tree/test_approx.cc b/tests/cpp/tree/test_approx.cc index ba8d6f129..0b2d95100 100644 --- a/tests/cpp/tree/test_approx.cc +++ b/tests/cpp/tree/test_approx.cc @@ -12,7 +12,7 @@ namespace xgboost { namespace tree { TEST(Approx, Partitioner) { size_t n_samples = 1024, n_features = 1, base_rowid = 0; - GenericParameter ctx; + Context ctx; CommonRowPartitioner partitioner{&ctx, n_samples, base_rowid}; ASSERT_EQ(partitioner.base_rowid, base_rowid); ASSERT_EQ(partitioner.Size(), 1); @@ -69,7 +69,7 @@ TEST(Approx, Partitioner) { namespace { void TestLeafPartition(size_t n_samples) { size_t const n_features = 2, base_rowid = 0; - GenericParameter ctx; + Context ctx; common::RowSetCollection row_set; CommonRowPartitioner partitioner{&ctx, n_samples, base_rowid}; diff --git a/tests/cpp/tree/test_gpu_hist.cu b/tests/cpp/tree/test_gpu_hist.cu index 989966b96..1758c872f 100644 --- a/tests/cpp/tree/test_gpu_hist.cu +++ b/tests/cpp/tree/test_gpu_hist.cu @@ -18,7 +18,7 @@ #include "../filesystem.h" // dmlc::TemporaryDirectory #include "../helpers.h" #include "../histogram_helpers.h" -#include "xgboost/generic_parameters.h" +#include "xgboost/context.h" #include "xgboost/json.h" namespace xgboost { @@ -170,9 +170,9 @@ void TestHistogramIndexImpl() { // Build 2 matrices and build a histogram maker with that - GenericParameter generic_param(CreateEmptyGenericParam(0)); - tree::GPUHistMaker hist_maker{&generic_param,ObjInfo{ObjInfo::kRegression}}, - hist_maker_ext{&generic_param,ObjInfo{ObjInfo::kRegression}}; + Context ctx(CreateEmptyGenericParam(0)); + tree::GPUHistMaker hist_maker{&ctx, ObjInfo{ObjInfo::kRegression}}, + hist_maker_ext{&ctx, ObjInfo{ObjInfo::kRegression}}; std::unique_ptr hist_maker_dmat( CreateSparsePageDMatrixWithRC(kNRows, kNCols, 0, true)); @@ -239,8 +239,8 @@ void UpdateTree(HostDeviceVector* gpair, DMatrix* dmat, {"sampling_method", sampling_method}, }; - GenericParameter generic_param(CreateEmptyGenericParam(0)); - tree::GPUHistMaker hist_maker{&generic_param,ObjInfo{ObjInfo::kRegression}}; + Context ctx(CreateEmptyGenericParam(0)); + tree::GPUHistMaker hist_maker{&ctx,ObjInfo{ObjInfo::kRegression}}; hist_maker.Configure(args); std::vector> position(1); @@ -384,9 +384,9 @@ TEST(GpuHist, ExternalMemoryWithSampling) { } TEST(GpuHist, ConfigIO) { - GenericParameter generic_param(CreateEmptyGenericParam(0)); + Context ctx(CreateEmptyGenericParam(0)); std::unique_ptr updater{ - TreeUpdater::Create("grow_gpu_hist", &generic_param, ObjInfo{ObjInfo::kRegression})}; + TreeUpdater::Create("grow_gpu_hist", &ctx, ObjInfo{ObjInfo::kRegression})}; updater->Configure(Args{}); Json j_updater { Object() }; @@ -404,7 +404,7 @@ TEST(GpuHist, ConfigIO) { } TEST(GpuHist, MaxDepth) { - GenericParameter generic_param(CreateEmptyGenericParam(0)); + Context ctx(CreateEmptyGenericParam(0)); size_t constexpr kRows = 16; size_t constexpr kCols = 4; auto p_mat = RandomDataGenerator{kRows, kCols, 0}.GenerateDMatrix(); diff --git a/tests/cpp/tree/test_prediction_cache.cc b/tests/cpp/tree/test_prediction_cache.cc index 3e30e0699..a6677ad02 100644 --- a/tests/cpp/tree/test_prediction_cache.cc +++ b/tests/cpp/tree/test_prediction_cache.cc @@ -63,12 +63,12 @@ class TestPredictionCache : public ::testing::Test { void RunTest(std::string updater_name) { { omp_set_num_threads(1); - GenericParameter ctx; + Context ctx; ctx.InitAllowUnknown(Args{{"nthread", "8"}}); if (updater_name == "grow_gpu_hist") { ctx.gpu_id = 0; } else { - ctx.gpu_id = GenericParameter::kCpuId; + ctx.gpu_id = Context::kCpuId; } std::unique_ptr updater{ @@ -82,7 +82,7 @@ class TestPredictionCache : public ::testing::Test { HostDeviceVector out_prediction_cached; out_prediction_cached.SetDevice(ctx.gpu_id); out_prediction_cached.Resize(n_samples_); - auto cache = linalg::VectorView{ctx.gpu_id == GenericParameter::kCpuId + auto cache = linalg::VectorView{ctx.gpu_id == Context::kCpuId ? out_prediction_cached.HostSpan() : out_prediction_cached.DeviceSpan(), {out_prediction_cached.Size()}, diff --git a/tests/cpp/tree/test_prune.cc b/tests/cpp/tree/test_prune.cc index 77f78b139..52fa58a2d 100644 --- a/tests/cpp/tree/test_prune.cc +++ b/tests/cpp/tree/test_prune.cc @@ -31,7 +31,7 @@ TEST(Updater, Prune) { std::shared_ptr p_dmat { RandomDataGenerator{32, 10, 0}.GenerateDMatrix() }; - auto lparam = CreateEmptyGenericParam(GPUIDX); + auto ctx = CreateEmptyGenericParam(GPUIDX); // prepare tree RegTree tree = RegTree(); @@ -39,7 +39,7 @@ TEST(Updater, Prune) { std::vector trees {&tree}; // prepare pruner std::unique_ptr pruner( - TreeUpdater::Create("prune", &lparam, ObjInfo{ObjInfo::kRegression})); + TreeUpdater::Create("prune", &ctx, ObjInfo{ObjInfo::kRegression})); pruner->Configure(cfg); // loss_chg < min_split_loss; diff --git a/tests/cpp/tree/test_quantile_hist.cc b/tests/cpp/tree/test_quantile_hist.cc index 222339aae..23cb868ee 100644 --- a/tests/cpp/tree/test_quantile_hist.cc +++ b/tests/cpp/tree/test_quantile_hist.cc @@ -20,7 +20,7 @@ namespace xgboost { namespace tree { TEST(QuantileHist, Partitioner) { size_t n_samples = 1024, n_features = 1, base_rowid = 0; - GenericParameter ctx; + Context ctx; ctx.InitAllowUnknown(Args{}); CommonRowPartitioner partitioner{&ctx, n_samples, base_rowid}; diff --git a/tests/cpp/tree/test_refresh.cc b/tests/cpp/tree/test_refresh.cc index f0abd0a87..953d2eea4 100644 --- a/tests/cpp/tree/test_refresh.cc +++ b/tests/cpp/tree/test_refresh.cc @@ -29,11 +29,11 @@ TEST(Updater, Refresh) { {"reg_lambda", "1"}}; RegTree tree = RegTree(); - auto lparam = CreateEmptyGenericParam(GPUIDX); + auto ctx = CreateEmptyGenericParam(GPUIDX); tree.param.UpdateAllowUnknown(cfg); - std::vector trees {&tree}; + std::vector trees{&tree}; std::unique_ptr refresher( - TreeUpdater::Create("refresh", &lparam, ObjInfo{ObjInfo::kRegression})); + TreeUpdater::Create("refresh", &ctx, ObjInfo{ObjInfo::kRegression})); tree.ExpandNode(0, 2, 0.2f, false, 0.0, 0.2f, 0.8f, 0.0f, 0.0f, /*left_sum=*/0.0f, /*right_sum=*/0.0f); diff --git a/tests/cpp/tree/test_tree_stat.cc b/tests/cpp/tree/test_tree_stat.cc index 1a4ee5acb..5b52534c1 100644 --- a/tests/cpp/tree/test_tree_stat.cc +++ b/tests/cpp/tree/test_tree_stat.cc @@ -67,8 +67,8 @@ class UpdaterEtaTest : public ::testing::Test { } void RunTest(std::string updater) { - GenericParameter ctx(updater == "grow_gpu_hist" ? CreateEmptyGenericParam(0) - : CreateEmptyGenericParam(Context::kCpuId)); + Context ctx(updater == "grow_gpu_hist" ? CreateEmptyGenericParam(0) + : CreateEmptyGenericParam(Context::kCpuId)); float eta = 0.4; auto up_0 = std::unique_ptr{ TreeUpdater::Create(updater, &ctx, ObjInfo{ObjInfo::kClassification})}; @@ -140,9 +140,8 @@ class TestMinSplitLoss : public ::testing::Test { // test gamma {"gamma", std::to_string(gamma)}}; - std::cout << "updater:" << updater << std::endl; - GenericParameter ctx(updater == "grow_gpu_hist" ? CreateEmptyGenericParam(0) - : CreateEmptyGenericParam(Context::kCpuId)); + Context ctx(updater == "grow_gpu_hist" ? CreateEmptyGenericParam(0) + : CreateEmptyGenericParam(Context::kCpuId)); std::cout << ctx.gpu_id << std::endl; auto up = std::unique_ptr{ TreeUpdater::Create(updater, &ctx, ObjInfo{ObjInfo::kRegression})};