Offload some configurations into GBM. (#4553)

This is part 1 of refactoring configuration.

* Move tree heuristic configurations.
* Split up declarations and definitions for GBTree.
* Implement UseGPU in gbm.
This commit is contained in:
Jiaming Yuan
2019-06-14 09:18:51 +08:00
committed by GitHub
parent a2042b685a
commit c5719cc457
12 changed files with 630 additions and 519 deletions

View File

@@ -146,6 +146,10 @@ class GradientBooster {
virtual std::vector<std::string> DumpModel(const FeatureMap& fmap,
bool with_stats,
std::string format) const = 0;
/*!
* \brief Whether the current booster use GPU.
*/
virtual bool UseGPU() const = 0;
/*!
* \brief create a gradient booster from given name
* \param name name of gradient booster

View File

@@ -8,18 +8,15 @@
#include <dmlc/parameter.h>
#include <xgboost/enum_class_param.h>
#include <string>
namespace xgboost {
enum class TreeMethod : int {
kAuto = 0, kApprox = 1, kExact = 2, kHist = 3,
kGPUExact = 4, kGPUHist = 5
};
enum class DataSplitMode : int {
kAuto = 0, kCol = 1, kRow = 2
};
} // namespace xgboost
DECLARE_FIELD_ENUM_CLASS(xgboost::TreeMethod);
DECLARE_FIELD_ENUM_CLASS(xgboost::DataSplitMode);
namespace xgboost {
@@ -30,8 +27,6 @@ struct LearnerTrainParam : public dmlc::Parameter<LearnerTrainParam> {
bool seed_per_iteration;
// data split mode, can be row, col, or none.
DataSplitMode dsplit;
// tree construction method
TreeMethod tree_method;
// number of threads to use if OpenMP is enabled
// if equals 0, use system default
int nthread;
@@ -42,6 +37,8 @@ struct LearnerTrainParam : public dmlc::Parameter<LearnerTrainParam> {
// number of devices to use, -1 implies using all available devices.
int n_gpus;
std::string booster;
// declare parameters
DMLC_DECLARE_PARAMETER(LearnerTrainParam) {
DMLC_DECLARE_FIELD(seed).set_default(0).describe(
@@ -58,15 +55,6 @@ struct LearnerTrainParam : public dmlc::Parameter<LearnerTrainParam> {
.add_enum("col", DataSplitMode::kCol)
.add_enum("row", DataSplitMode::kRow)
.describe("Data split mode for distributed training.");
DMLC_DECLARE_FIELD(tree_method)
.set_default(TreeMethod::kAuto)
.add_enum("auto", TreeMethod::kAuto)
.add_enum("approx", TreeMethod::kApprox)
.add_enum("exact", TreeMethod::kExact)
.add_enum("hist", TreeMethod::kHist)
.add_enum("gpu_exact", TreeMethod::kGPUExact)
.add_enum("gpu_hist", TreeMethod::kGPUHist)
.describe("Choice of tree construction method.");
DMLC_DECLARE_FIELD(nthread).set_default(0).describe(
"Number of threads to use.");
DMLC_DECLARE_FIELD(disable_default_eval_metric)
@@ -79,6 +67,9 @@ struct LearnerTrainParam : public dmlc::Parameter<LearnerTrainParam> {
.set_default(0)
.set_lower_bound(-1)
.describe("Number of GPUs to use for multi-gpu algorithms.");
DMLC_DECLARE_FIELD(booster)
.set_default("gbtree")
.describe("Gradient booster used for training.");
}
};
} // namespace xgboost