Move num_parallel_tree to model parameter. (#7751)

The size of forest should be a property of model itself instead of a training
hyper-parameter.
This commit is contained in:
Jiaming Yuan 2022-03-29 02:32:42 +08:00 committed by GitHub
parent 8b3ecfca25
commit 3c9b04460a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 158 additions and 101 deletions

View File

@ -168,6 +168,9 @@
"num_trees": { "num_trees": {
"type": "string" "type": "string"
}, },
"num_parallel_tree": {
"type": "string"
},
"size_leaf_vector": { "size_leaf_vector": {
"type": "string" "type": "string"
} }

View File

@ -78,11 +78,14 @@ def from_cstr_to_pystr(data: CStrPptr, length: c_bst_ulong) -> List[str]:
return res return res
IterRange = TypeVar("IterRange", Optional[Tuple[int, int]], Tuple[int, int])
def _convert_ntree_limit( def _convert_ntree_limit(
booster: "Booster", booster: "Booster",
ntree_limit: Optional[int], ntree_limit: Optional[int],
iteration_range: Optional[Tuple[int, int]] iteration_range: IterRange
) -> Optional[Tuple[int, int]]: ) -> IterRange:
if ntree_limit is not None and ntree_limit != 0: if ntree_limit is not None and ntree_limit != 0:
warnings.warn( warnings.warn(
"ntree_limit is deprecated, use `iteration_range` or model " "ntree_limit is deprecated, use `iteration_range` or model "
@ -1292,11 +1295,18 @@ def _get_booster_layer_trees(model: "Booster") -> Tuple[int, int]:
num_parallel_tree = 0 num_parallel_tree = 0
elif booster == "dart": elif booster == "dart":
num_parallel_tree = int( num_parallel_tree = int(
config["learner"]["gradient_booster"]["gbtree"]["gbtree_train_param"][ config["learner"]["gradient_booster"]["gbtree"]["gbtree_model_param"][
"num_parallel_tree" "num_parallel_tree"
] ]
) )
elif booster == "gbtree": elif booster == "gbtree":
try:
num_parallel_tree = int(
config["learner"]["gradient_booster"]["gbtree_model_param"][
"num_parallel_tree"
]
)
except KeyError:
num_parallel_tree = int( num_parallel_tree = int(
config["learner"]["gradient_booster"]["gbtree_train_param"][ config["learner"]["gradient_booster"]["gbtree_train_param"][
"num_parallel_tree" "num_parallel_tree"

View File

@ -129,18 +129,16 @@ inline uint32_t GetIterationFromTreeLimit(uint32_t ntree_limit, Learner *learner
Json config{Object()}; Json config{Object()};
learner->SaveConfig(&config); learner->SaveConfig(&config);
auto const &booster = auto const &booster = get<String const>(config["learner"]["gradient_booster"]["name"]);
get<String const>(config["learner"]["gradient_booster"]["name"]);
if (booster == "gblinear") { if (booster == "gblinear") {
num_parallel_tree = 0; num_parallel_tree = 0;
} else if (booster == "dart") { } else if (booster == "dart") {
num_parallel_tree = std::stoi( num_parallel_tree =
get<String const>(config["learner"]["gradient_booster"]["gbtree"] std::stoi(get<String const>(config["learner"]["gradient_booster"]["gbtree"]
["gbtree_train_param"]["num_parallel_tree"])); ["gbtree_model_param"]["num_parallel_tree"]));
} else if (booster == "gbtree") { } else if (booster == "gbtree") {
num_parallel_tree = std::stoi(get<String const>( num_parallel_tree = std::stoi(get<String const>(
(config["learner"]["gradient_booster"]["gbtree_train_param"] (config["learner"]["gradient_booster"]["gbtree_model_param"]["num_parallel_tree"])));
["num_parallel_tree"])));
} else { } else {
LOG(FATAL) << "Unknown booster:" << booster; LOG(FATAL) << "Unknown booster:" << booster;
} }

View File

@ -429,9 +429,11 @@ void SketchContainerImpl<WQSketch>::AllReduce(
this->GatherSketchInfo(reduced, &worker_segments, &sketches_scan, &global_sketches); this->GatherSketchInfo(reduced, &worker_segments, &sketches_scan, &global_sketches);
std::vector<typename WQSketch::SummaryContainer> final_sketches(n_columns); std::vector<typename WQSketch::SummaryContainer> final_sketches(n_columns);
ParallelFor(n_columns, n_threads_, [&](auto fidx) {
// gcc raises subobject-linkage warning if we put allreduce_result as lambda capture
QuantileAllreduce<typename WQSketch::Entry> allreduce_result{global_sketches, worker_segments, QuantileAllreduce<typename WQSketch::Entry> allreduce_result{global_sketches, worker_segments,
sketches_scan, n_columns}; sketches_scan, n_columns};
ParallelFor(n_columns, n_threads_, [&](auto fidx) {
int32_t intermediate_num_cuts = num_cuts[fidx]; int32_t intermediate_num_cuts = num_cuts[fidx];
auto nbytes = WQSketch::SummaryContainer::CalcMemCost(intermediate_num_cuts); auto nbytes = WQSketch::SummaryContainer::CalcMemCost(intermediate_num_cuts);
if (IsCat(feature_types_, fidx)) { if (IsCat(feature_types_, fidx)) {

View File

@ -323,7 +323,7 @@ void GBTree::BoostNewTrees(HostDeviceVector<GradientPair>* gpair,
std::vector<RegTree*> new_trees; std::vector<RegTree*> new_trees;
ret->clear(); ret->clear();
// create the trees // create the trees
for (int i = 0; i < tparam_.num_parallel_tree; ++i) { for (int i = 0; i < model_.param.num_parallel_tree; ++i) {
if (tparam_.process_type == TreeProcessType::kDefault) { if (tparam_.process_type == TreeProcessType::kDefault) {
CHECK(!updaters_.front()->CanModifyTree()) CHECK(!updaters_.front()->CanModifyTree())
<< "Updater: `" << updaters_.front()->Name() << "` " << "Updater: `" << updaters_.front()->Name() << "` "
@ -347,7 +347,7 @@ void GBTree::BoostNewTrees(HostDeviceVector<GradientPair>* gpair,
<< "boosting rounds can not exceed previous training rounds"; << "boosting rounds can not exceed previous training rounds";
// move an existing tree from trees_to_update // move an existing tree from trees_to_update
auto t = std::move(model_.trees_to_update[model_.trees.size() + auto t = std::move(model_.trees_to_update[model_.trees.size() +
bst_group * tparam_.num_parallel_tree + i]); bst_group * model_.param.num_parallel_tree + i]);
new_trees.push_back(t.get()); new_trees.push_back(t.get());
ret->push_back(std::move(t)); ret->push_back(std::move(t));
} }
@ -414,6 +414,10 @@ void GBTree::SaveConfig(Json* p_out) const {
// e.g. updating a model, then saving and loading it would result in an empty // e.g. updating a model, then saving and loading it would result in an empty
// model // model
out["gbtree_train_param"]["process_type"] = String("default"); out["gbtree_train_param"]["process_type"] = String("default");
// Duplicated from SaveModel so that user can get `num_parallel_tree` without parsing
// the model. We might remove this once we can deprecate `best_ntree_limit` so that the
// language binding doesn't need to know about the forest size.
out["gbtree_model_param"] = ToJson(model_.param);
out["updater"] = Object(); out["updater"] = Object();
@ -460,6 +464,7 @@ void GBTree::Slice(int32_t layer_begin, int32_t layer_end, int32_t step,
std::vector<int32_t> &out_trees_info = out_model.tree_info; std::vector<int32_t> &out_trees_info = out_model.tree_info;
out_trees_info.resize(layer_trees * n_layers); out_trees_info.resize(layer_trees * n_layers);
out_model.param.num_trees = out_model.trees.size(); out_model.param.num_trees = out_model.trees.size();
out_model.param.num_parallel_tree = model_.param.num_parallel_tree;
if (!this->model_.trees_to_update.empty()) { if (!this->model_.trees_to_update.empty()) {
CHECK_EQ(this->model_.trees_to_update.size(), this->model_.trees.size()) CHECK_EQ(this->model_.trees_to_update.size(), this->model_.trees.size())
<< "Not all trees are updated, " << "Not all trees are updated, "
@ -512,8 +517,7 @@ void GBTree::PredictBatch(DMatrix* p_fmat,
} }
uint32_t tree_begin, tree_end; uint32_t tree_begin, tree_end;
std::tie(tree_begin, tree_end) = std::tie(tree_begin, tree_end) = detail::LayerToTree(model_, layer_begin, layer_end);
detail::LayerToTree(model_, tparam_, layer_begin, layer_end);
CHECK_LE(tree_end, model_.trees.size()) << "Invalid number of trees."; CHECK_LE(tree_end, model_.trees.size()) << "Invalid number of trees.";
if (tree_end > tree_begin) { if (tree_end > tree_begin) {
predictor->PredictBatch(p_fmat, out_preds, model_, tree_begin, tree_end); predictor->PredictBatch(p_fmat, out_preds, model_, tree_begin, tree_end);
@ -723,8 +727,7 @@ class Dart : public GBTree {
model_); model_);
p_out_preds->version = 0; p_out_preds->version = 0;
uint32_t tree_begin, tree_end; uint32_t tree_begin, tree_end;
std::tie(tree_begin, tree_end) = std::tie(tree_begin, tree_end) = detail::LayerToTree(model_, layer_begin, layer_end);
detail::LayerToTree(model_, tparam_, layer_begin, layer_end);
auto n_groups = model_.learner_model_param->num_output_group; auto n_groups = model_.learner_model_param->num_output_group;
PredictionCacheEntry predts; // temporary storage for prediction PredictionCacheEntry predts; // temporary storage for prediction
@ -779,7 +782,7 @@ class Dart : public GBTree {
float missing, PredictionCacheEntry *out_preds, float missing, PredictionCacheEntry *out_preds,
uint32_t layer_begin, unsigned layer_end) const override { uint32_t layer_begin, unsigned layer_end) const override {
uint32_t tree_begin, tree_end; uint32_t tree_begin, tree_end;
std::tie(tree_begin, tree_end) = detail::LayerToTree(model_, tparam_, layer_begin, layer_end); std::tie(tree_begin, tree_end) = detail::LayerToTree(model_, layer_begin, layer_end);
std::vector<Predictor const *> predictors{ std::vector<Predictor const *> predictors{
cpu_predictor_.get(), cpu_predictor_.get(),
#if defined(XGBOOST_USE_CUDA) #if defined(XGBOOST_USE_CUDA)
@ -867,7 +870,7 @@ class Dart : public GBTree {
DropTrees(false); DropTrees(false);
auto &predictor = this->GetPredictor(); auto &predictor = this->GetPredictor();
uint32_t _, tree_end; uint32_t _, tree_end;
std::tie(_, tree_end) = detail::LayerToTree(model_, tparam_, layer_begin, layer_end); std::tie(_, tree_end) = detail::LayerToTree(model_, layer_begin, layer_end);
predictor->PredictInstance(inst, out_preds, model_, tree_end); predictor->PredictInstance(inst, out_preds, model_, tree_end);
} }
@ -877,7 +880,7 @@ class Dart : public GBTree {
unsigned) override { unsigned) override {
CHECK(configured_); CHECK(configured_);
uint32_t tree_begin, tree_end; uint32_t tree_begin, tree_end;
std::tie(tree_begin, tree_end) = detail::LayerToTree(model_, tparam_, layer_begin, layer_end); std::tie(tree_begin, tree_end) = detail::LayerToTree(model_, layer_begin, layer_end);
cpu_predictor_->PredictContribution(p_fmat, out_contribs, model_, cpu_predictor_->PredictContribution(p_fmat, out_contribs, model_,
tree_end, &weight_drop_, approximate); tree_end, &weight_drop_, approximate);
} }
@ -887,9 +890,9 @@ class Dart : public GBTree {
unsigned layer_begin, unsigned layer_end, bool approximate) override { unsigned layer_begin, unsigned layer_end, bool approximate) override {
CHECK(configured_); CHECK(configured_);
uint32_t tree_begin, tree_end; uint32_t tree_begin, tree_end;
std::tie(tree_begin, tree_end) = detail::LayerToTree(model_, tparam_, layer_begin, layer_end); std::tie(tree_begin, tree_end) = detail::LayerToTree(model_, layer_begin, layer_end);
cpu_predictor_->PredictInteractionContributions(p_fmat, out_contribs, model_, cpu_predictor_->PredictInteractionContributions(p_fmat, out_contribs, model_, tree_end,
tree_end, &weight_drop_, approximate); &weight_drop_, approximate);
} }
protected: protected:

View File

@ -60,11 +60,6 @@ namespace gbm {
/*! \brief training parameters */ /*! \brief training parameters */
struct GBTreeTrainParam : public XGBoostParameter<GBTreeTrainParam> { struct GBTreeTrainParam : public XGBoostParameter<GBTreeTrainParam> {
/*!
* \brief number of parallel trees constructed each iteration
* use this option to support boosted random forest
*/
int num_parallel_tree;
/*! \brief tree updater sequence */ /*! \brief tree updater sequence */
std::string updater_seq; std::string updater_seq;
/*! \brief type of boosting process to run */ /*! \brief type of boosting process to run */
@ -75,11 +70,6 @@ struct GBTreeTrainParam : public XGBoostParameter<GBTreeTrainParam> {
TreeMethod tree_method; TreeMethod tree_method;
// declare parameters // declare parameters
DMLC_DECLARE_PARAMETER(GBTreeTrainParam) { DMLC_DECLARE_PARAMETER(GBTreeTrainParam) {
DMLC_DECLARE_FIELD(num_parallel_tree)
.set_default(1)
.set_lower_bound(1)
.describe("Number of parallel trees constructed during each iteration."\
" This option is used to support boosted random forest.");
DMLC_DECLARE_FIELD(updater_seq) DMLC_DECLARE_FIELD(updater_seq)
.set_default("grow_colmaker,prune") .set_default("grow_colmaker,prune")
.describe("Tree updater sequence."); .describe("Tree updater sequence.");
@ -156,12 +146,11 @@ struct DartTrainParam : public XGBoostParameter<DartTrainParam> {
namespace detail { namespace detail {
// From here on, layer becomes concrete trees. // From here on, layer becomes concrete trees.
inline std::pair<uint32_t, uint32_t> LayerToTree(gbm::GBTreeModel const &model, inline std::pair<uint32_t, uint32_t> LayerToTree(gbm::GBTreeModel const &model,
GBTreeTrainParam const &tparam,
size_t layer_begin, size_t layer_begin,
size_t layer_end) { size_t layer_end) {
bst_group_t groups = model.learner_model_param->num_output_group; bst_group_t groups = model.learner_model_param->num_output_group;
uint32_t tree_begin = layer_begin * groups * tparam.num_parallel_tree; uint32_t tree_begin = layer_begin * groups * model.param.num_parallel_tree;
uint32_t tree_end = layer_end * groups * tparam.num_parallel_tree; uint32_t tree_end = layer_end * groups * model.param.num_parallel_tree;
if (tree_end == 0) { if (tree_end == 0) {
tree_end = static_cast<uint32_t>(model.trees.size()); tree_end = static_cast<uint32_t>(model.trees.size());
} }
@ -177,7 +166,7 @@ inline bool SliceTrees(int32_t layer_begin, int32_t layer_end, int32_t step,
GBTreeModel const &model, GBTreeTrainParam const &tparam, GBTreeModel const &model, GBTreeTrainParam const &tparam,
uint32_t layer_trees, Func fn) { uint32_t layer_trees, Func fn) {
uint32_t tree_begin, tree_end; uint32_t tree_begin, tree_end;
std::tie(tree_begin, tree_end) = detail::LayerToTree(model, tparam, layer_begin, layer_end); std::tie(tree_begin, tree_end) = detail::LayerToTree(model, layer_begin, layer_end);
if (tree_end > model.trees.size()) { if (tree_end > model.trees.size()) {
return true; return true;
} }
@ -249,7 +238,7 @@ class GBTree : public GradientBooster {
// Number of trees per layer. // Number of trees per layer.
auto LayerTrees() const { auto LayerTrees() const {
auto n_trees = model_.learner_model_param->num_output_group * tparam_.num_parallel_tree; auto n_trees = model_.learner_model_param->num_output_group * model_.param.num_parallel_tree;
return n_trees; return n_trees;
} }
@ -258,7 +247,7 @@ class GBTree : public GradientBooster {
GradientBooster *out, bool* out_of_bound) const override; GradientBooster *out, bool* out_of_bound) const override;
int32_t BoostedRounds() const override { int32_t BoostedRounds() const override {
CHECK_NE(tparam_.num_parallel_tree, 0); CHECK_NE(model_.param.num_parallel_tree, 0);
CHECK_NE(model_.learner_model_param->num_output_group, 0); CHECK_NE(model_.learner_model_param->num_output_group, 0);
return model_.trees.size() / this->LayerTrees(); return model_.trees.size() / this->LayerTrees();
} }
@ -271,8 +260,7 @@ class GBTree : public GradientBooster {
uint32_t layer_begin, unsigned layer_end) const override { uint32_t layer_begin, unsigned layer_end) const override {
CHECK(configured_); CHECK(configured_);
uint32_t tree_begin, tree_end; uint32_t tree_begin, tree_end;
std::tie(tree_begin, tree_end) = std::tie(tree_begin, tree_end) = detail::LayerToTree(model_, layer_begin, layer_end);
detail::LayerToTree(model_, tparam_, layer_begin, layer_end);
CHECK_LE(tree_end, model_.trees.size()) << "Invalid number of trees."; CHECK_LE(tree_end, model_.trees.size()) << "Invalid number of trees.";
std::vector<Predictor const *> predictors{ std::vector<Predictor const *> predictors{
cpu_predictor_.get(), cpu_predictor_.get(),
@ -371,16 +359,15 @@ class GBTree : public GradientBooster {
uint32_t layer_begin, uint32_t layer_end) override { uint32_t layer_begin, uint32_t layer_end) override {
CHECK(configured_); CHECK(configured_);
uint32_t tree_begin, tree_end; uint32_t tree_begin, tree_end;
std::tie(tree_begin, tree_end) = detail::LayerToTree(model_, tparam_, layer_begin, layer_end); std::tie(tree_begin, tree_end) = detail::LayerToTree(model_, layer_begin, layer_end);
cpu_predictor_->PredictInstance(inst, out_preds, model_, cpu_predictor_->PredictInstance(inst, out_preds, model_, tree_end);
tree_end);
} }
void PredictLeaf(DMatrix* p_fmat, void PredictLeaf(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_preds, HostDeviceVector<bst_float>* out_preds,
uint32_t layer_begin, uint32_t layer_end) override { uint32_t layer_begin, uint32_t layer_end) override {
uint32_t tree_begin, tree_end; uint32_t tree_begin, tree_end;
std::tie(tree_begin, tree_end) = detail::LayerToTree(model_, tparam_, layer_begin, layer_end); std::tie(tree_begin, tree_end) = detail::LayerToTree(model_, layer_begin, layer_end);
CHECK_EQ(tree_begin, 0) << "Predict leaf supports only iteration end: (0, " CHECK_EQ(tree_begin, 0) << "Predict leaf supports only iteration end: (0, "
"n_iteration), use model slicing instead."; "n_iteration), use model slicing instead.";
this->GetPredictor()->PredictLeaf(p_fmat, out_preds, model_, tree_end); this->GetPredictor()->PredictLeaf(p_fmat, out_preds, model_, tree_end);
@ -392,7 +379,7 @@ class GBTree : public GradientBooster {
int, unsigned) override { int, unsigned) override {
CHECK(configured_); CHECK(configured_);
uint32_t tree_begin, tree_end; uint32_t tree_begin, tree_end;
std::tie(tree_begin, tree_end) = detail::LayerToTree(model_, tparam_, layer_begin, layer_end); std::tie(tree_begin, tree_end) = detail::LayerToTree(model_, layer_begin, layer_end);
CHECK_EQ(tree_begin, 0) CHECK_EQ(tree_begin, 0)
<< "Predict contribution supports only iteration end: (0, " << "Predict contribution supports only iteration end: (0, "
"n_iteration), using model slicing instead."; "n_iteration), using model slicing instead.";
@ -405,7 +392,7 @@ class GBTree : public GradientBooster {
uint32_t layer_begin, uint32_t layer_end, bool approximate) override { uint32_t layer_begin, uint32_t layer_end, bool approximate) override {
CHECK(configured_); CHECK(configured_);
uint32_t tree_begin, tree_end; uint32_t tree_begin, tree_end;
std::tie(tree_begin, tree_end) = detail::LayerToTree(model_, tparam_, layer_begin, layer_end); std::tie(tree_begin, tree_end) = detail::LayerToTree(model_, layer_begin, layer_end);
CHECK_EQ(tree_begin, 0) CHECK_EQ(tree_begin, 0)
<< "Predict interaction contribution supports only iteration end: (0, " << "Predict interaction contribution supports only iteration end: (0, "
"n_iteration), using model slicing instead."; "n_iteration), using model slicing instead.";

View File

@ -31,7 +31,7 @@ struct GBTreeModelParam : public dmlc::Parameter<GBTreeModelParam> {
/*! \brief number of trees */ /*! \brief number of trees */
int32_t num_trees; int32_t num_trees;
/*! \brief (Deprecated) number of roots */ /*! \brief (Deprecated) number of roots */
int32_t deprecated_num_roots; int32_t num_parallel_tree;
/*! \brief number of features to be used by trees */ /*! \brief number of features to be used by trees */
int32_t deprecated_num_feature; int32_t deprecated_num_feature;
/*! \brief pad this space, for backward compatibility reason.*/ /*! \brief pad this space, for backward compatibility reason.*/
@ -50,7 +50,7 @@ struct GBTreeModelParam : public dmlc::Parameter<GBTreeModelParam> {
std::memset(this, 0, sizeof(GBTreeModelParam)); // FIXME(trivialfis): Why? std::memset(this, 0, sizeof(GBTreeModelParam)); // FIXME(trivialfis): Why?
static_assert(sizeof(GBTreeModelParam) == (4 + 2 + 2 + 32) * sizeof(int32_t), static_assert(sizeof(GBTreeModelParam) == (4 + 2 + 2 + 32) * sizeof(int32_t),
"64/32 bit compatibility issue"); "64/32 bit compatibility issue");
deprecated_num_roots = 1; num_parallel_tree = 1;
} }
// declare parameters, only declare those that need to be set. // declare parameters, only declare those that need to be set.
@ -59,6 +59,12 @@ struct GBTreeModelParam : public dmlc::Parameter<GBTreeModelParam> {
.set_lower_bound(0) .set_lower_bound(0)
.set_default(0) .set_default(0)
.describe("Number of features used for training and prediction."); .describe("Number of features used for training and prediction.");
DMLC_DECLARE_FIELD(num_parallel_tree)
.set_default(1)
.set_lower_bound(1)
.describe(
"Number of parallel trees constructed during each iteration."
" This option is used to support boosted random forest.");
DMLC_DECLARE_FIELD(size_leaf_vector) DMLC_DECLARE_FIELD(size_leaf_vector)
.set_lower_bound(0) .set_lower_bound(0)
.set_default(0) .set_default(0)
@ -70,7 +76,7 @@ struct GBTreeModelParam : public dmlc::Parameter<GBTreeModelParam> {
inline GBTreeModelParam ByteSwap() const { inline GBTreeModelParam ByteSwap() const {
GBTreeModelParam x = *this; GBTreeModelParam x = *this;
dmlc::ByteSwap(&x.num_trees, sizeof(x.num_trees), 1); dmlc::ByteSwap(&x.num_trees, sizeof(x.num_trees), 1);
dmlc::ByteSwap(&x.deprecated_num_roots, sizeof(x.deprecated_num_roots), 1); dmlc::ByteSwap(&x.num_parallel_tree, sizeof(x.num_parallel_tree), 1);
dmlc::ByteSwap(&x.deprecated_num_feature, sizeof(x.deprecated_num_feature), 1); dmlc::ByteSwap(&x.deprecated_num_feature, sizeof(x.deprecated_num_feature), 1);
dmlc::ByteSwap(&x.pad_32bit, sizeof(x.pad_32bit), 1); dmlc::ByteSwap(&x.pad_32bit, sizeof(x.pad_32bit), 1);
dmlc::ByteSwap(&x.deprecated_num_pbuffer, sizeof(x.deprecated_num_pbuffer), 1); dmlc::ByteSwap(&x.deprecated_num_pbuffer, sizeof(x.deprecated_num_pbuffer), 1);

View File

@ -207,7 +207,7 @@ TEST(GBTree, JsonIO) {
ASSERT_EQ(get<Integer>(get<Object>(get<Array>(gbtree_model["trees"]).front()).at("id")), 0); ASSERT_EQ(get<Integer>(get<Object>(get<Array>(gbtree_model["trees"]).front()).at("id")), 0);
ASSERT_EQ(get<Array>(gbtree_model["tree_info"]).size(), 1ul); ASSERT_EQ(get<Array>(gbtree_model["tree_info"]).size(), 1ul);
auto j_train_param = model["config"]["gbtree_train_param"]; auto j_train_param = model["config"]["gbtree_model_param"];
ASSERT_EQ(get<String>(j_train_param["num_parallel_tree"]), "1"); ASSERT_EQ(get<String>(j_train_param["num_parallel_tree"]), "1");
} }
@ -337,6 +337,13 @@ std::pair<Json, Json> TestModelSlice(std::string booster) {
Json sliced_config {Object()}; Json sliced_config {Object()};
sliced->SaveConfig(&sliced_config); sliced->SaveConfig(&sliced_config);
// Only num trees is changed
if (booster == "gbtree") {
sliced_config["learner"]["gradient_booster"]["gbtree_model_param"]["num_trees"] = String("60");
} else {
sliced_config["learner"]["gradient_booster"]["gbtree"]["gbtree_model_param"]["num_trees"] =
String("60");
}
CHECK_EQ(sliced_config, config); CHECK_EQ(sliced_config, config);
auto get_trees = [&](Json const& model) { auto get_trees = [&](Json const& model) {

View File

@ -8,8 +8,6 @@ import locale
import tempfile import tempfile
dpath = os.path.join(tm.PROJECT_ROOT, 'demo/data/') dpath = os.path.join(tm.PROJECT_ROOT, 'demo/data/')
dtrain = xgb.DMatrix(dpath + 'agaricus.txt.train')
dtest = xgb.DMatrix(dpath + 'agaricus.txt.test')
rng = np.random.RandomState(1994) rng = np.random.RandomState(1994)
@ -38,6 +36,8 @@ class TestModels:
param = {'verbosity': 0, 'objective': 'binary:logistic', param = {'verbosity': 0, 'objective': 'binary:logistic',
'booster': 'gblinear', 'alpha': 0.0001, 'lambda': 1, 'booster': 'gblinear', 'alpha': 0.0001, 'lambda': 1,
'nthread': 1} 'nthread': 1}
dtrain = xgb.DMatrix(dpath + 'agaricus.txt.train')
dtest = xgb.DMatrix(dpath + 'agaricus.txt.test')
watchlist = [(dtest, 'eval'), (dtrain, 'train')] watchlist = [(dtest, 'eval'), (dtrain, 'train')]
num_round = 4 num_round = 4
bst = xgb.train(param, dtrain, num_round, watchlist) bst = xgb.train(param, dtrain, num_round, watchlist)
@ -124,7 +124,7 @@ class TestModels:
predt_1 = bst.predict(margined) predt_1 = bst.predict(margined)
assert np.any(np.abs(predt_1 - predt_0) > 1e-6) assert np.any(np.abs(predt_1 - predt_0) > 1e-6)
dtrain = xgb.DMatrix(dpath + 'agaricus.txt.train')
bst = xgb.train({'tree_method': 'hist'}, dtrain, 2) bst = xgb.train({'tree_method': 'hist'}, dtrain, 2)
predt_2 = bst.predict(dtrain) predt_2 = bst.predict(dtrain)
assert np.all(np.abs(predt_2 - predt_1) < 1e-6) assert np.all(np.abs(predt_2 - predt_1) < 1e-6)
@ -150,6 +150,8 @@ class TestModels:
'objective': 'reg:logistic', 'objective': 'reg:logistic',
"tree_method": tree_method "tree_method": tree_method
} }
dtrain = xgb.DMatrix(dpath + 'agaricus.txt.train')
dtest = xgb.DMatrix(dpath + 'agaricus.txt.test')
watchlist = [(dtest, 'eval'), (dtrain, 'train')] watchlist = [(dtest, 'eval'), (dtrain, 'train')]
num_round = 10 num_round = 10
@ -195,6 +197,8 @@ class TestModels:
self.run_custom_objective() self.run_custom_objective()
def test_multi_eval_metric(self): def test_multi_eval_metric(self):
dtrain = xgb.DMatrix(dpath + 'agaricus.txt.train')
dtest = xgb.DMatrix(dpath + 'agaricus.txt.test')
watchlist = [(dtest, 'eval'), (dtrain, 'train')] watchlist = [(dtest, 'eval'), (dtrain, 'train')]
param = {'max_depth': 2, 'eta': 0.2, 'verbosity': 1, param = {'max_depth': 2, 'eta': 0.2, 'verbosity': 1,
'objective': 'binary:logistic'} 'objective': 'binary:logistic'}
@ -216,6 +220,7 @@ class TestModels:
param['scale_pos_weight'] = ratio param['scale_pos_weight'] = ratio
return (dtrain, dtest, param) return (dtrain, dtest, param)
dtrain = xgb.DMatrix(dpath + 'agaricus.txt.train')
xgb.cv(param, dtrain, num_round, nfold=5, xgb.cv(param, dtrain, num_round, nfold=5,
metrics={'auc'}, seed=0, fpreproc=fpreproc) metrics={'auc'}, seed=0, fpreproc=fpreproc)
@ -223,6 +228,7 @@ class TestModels:
param = {'max_depth': 2, 'eta': 1, 'verbosity': 0, param = {'max_depth': 2, 'eta': 1, 'verbosity': 0,
'objective': 'binary:logistic'} 'objective': 'binary:logistic'}
num_round = 2 num_round = 2
dtrain = xgb.DMatrix(dpath + 'agaricus.txt.train')
xgb.cv(param, dtrain, num_round, nfold=5, xgb.cv(param, dtrain, num_round, nfold=5,
metrics={'error'}, seed=0, show_stdv=False) metrics={'error'}, seed=0, show_stdv=False)
@ -331,6 +337,7 @@ class TestModels:
os.remove(model_path) os.remove(model_path)
try: try:
dtrain = xgb.DMatrix(dpath + 'agaricus.txt.train')
xgb.train({'objective': 'foo'}, dtrain, num_boost_round=1) xgb.train({'objective': 'foo'}, dtrain, num_boost_round=1)
except ValueError as e: except ValueError as e:
e_str = str(e) e_str = str(e)
@ -422,24 +429,14 @@ class TestModels:
assert cls.get_booster().best_ntree_limit == 2 assert cls.get_booster().best_ntree_limit == 2
assert cls.best_ntree_limit == cls.get_booster().best_ntree_limit assert cls.best_ntree_limit == cls.get_booster().best_ntree_limit
@pytest.mark.skipif(**tm.no_sklearn()) def run_slice(
@pytest.mark.parametrize('booster', ['gbtree', 'dart']) self,
def test_slice(self, booster): booster: xgb.Booster,
from sklearn.datasets import make_classification dtrain: xgb.DMatrix,
num_classes = 3 num_parallel_tree: int,
X, y = make_classification(n_samples=1000, n_informative=5, num_classes: int,
n_classes=num_classes) num_boost_round: int
dtrain = xgb.DMatrix(data=X, label=y) ):
num_parallel_tree = 4
num_boost_round = 16
total_trees = num_parallel_tree * num_classes * num_boost_round
booster = xgb.train({
'num_parallel_tree': 4, 'subsample': 0.5, 'num_class': 3, 'booster': booster,
'objective': 'multi:softprob'},
num_boost_round=num_boost_round, dtrain=dtrain)
booster.feature_types = ["q"] * X.shape[1]
assert len(booster.get_dump()) == total_trees
beg = 3 beg = 3
end = 7 end = 7
sliced: xgb.Booster = booster[beg:end] sliced: xgb.Booster = booster[beg:end]
@ -449,41 +446,41 @@ class TestModels:
assert sliced_trees == len(sliced.get_dump()) assert sliced_trees == len(sliced.get_dump())
sliced_trees = sliced_trees // 2 sliced_trees = sliced_trees // 2
sliced: xgb.Booster = booster[beg: end: 2] sliced = booster[beg:end:2]
assert sliced_trees == len(sliced.get_dump()) assert sliced_trees == len(sliced.get_dump())
sliced: xgb.Booster = booster[beg: ...] sliced = booster[beg: ...]
sliced_trees = (num_boost_round - beg) * num_parallel_tree * num_classes sliced_trees = (num_boost_round - beg) * num_parallel_tree * num_classes
assert sliced_trees == len(sliced.get_dump()) assert sliced_trees == len(sliced.get_dump())
sliced: xgb.Booster = booster[beg:] sliced = booster[beg:]
sliced_trees = (num_boost_round - beg) * num_parallel_tree * num_classes sliced_trees = (num_boost_round - beg) * num_parallel_tree * num_classes
assert sliced_trees == len(sliced.get_dump()) assert sliced_trees == len(sliced.get_dump())
sliced: xgb.Booster = booster[:end] sliced = booster[:end]
sliced_trees = end * num_parallel_tree * num_classes sliced_trees = end * num_parallel_tree * num_classes
assert sliced_trees == len(sliced.get_dump()) assert sliced_trees == len(sliced.get_dump())
sliced: xgb.Booster = booster[...:end] sliced = booster[...: end]
sliced_trees = end * num_parallel_tree * num_classes sliced_trees = end * num_parallel_tree * num_classes
assert sliced_trees == len(sliced.get_dump()) assert sliced_trees == len(sliced.get_dump())
with pytest.raises(ValueError, match=r'>= 0'): with pytest.raises(ValueError, match=r">= 0"):
booster[-1:0] booster[-1:0]
# we do not accept empty slice. # we do not accept empty slice.
with pytest.raises(ValueError): with pytest.raises(ValueError):
booster[1:1] booster[1:1]
# stop can not be smaller than begin # stop can not be smaller than begin
with pytest.raises(ValueError, match=r'Invalid.*'): with pytest.raises(ValueError, match=r"Invalid.*"):
booster[3:0] booster[3:0]
with pytest.raises(ValueError, match=r'Invalid.*'): with pytest.raises(ValueError, match=r"Invalid.*"):
booster[3:-1] booster[3:-1]
# negative step is not supported. # negative step is not supported.
with pytest.raises(ValueError, match=r'.*>= 1.*'): with pytest.raises(ValueError, match=r".*>= 1.*"):
booster[0:2:-1] booster[0:2:-1]
# step can not be 0. # step can not be 0.
with pytest.raises(ValueError, match=r'.*>= 1.*'): with pytest.raises(ValueError, match=r".*>= 1.*"):
booster[0:2:0] booster[0:2:0]
trees = [_ for _ in booster] trees = [_ for _ in booster]
@ -525,6 +522,44 @@ class TestModels:
single = booster[1:7].predict(dtrain, output_margin=True) single = booster[1:7].predict(dtrain, output_margin=True)
np.testing.assert_allclose(merged, single, atol=1e-6) np.testing.assert_allclose(merged, single, atol=1e-6)
@pytest.mark.skipif(**tm.no_sklearn())
@pytest.mark.parametrize("booster", ["gbtree", "dart"])
def test_slice(self, booster):
from sklearn.datasets import make_classification
num_classes = 3
X, y = make_classification(
n_samples=1000, n_informative=5, n_classes=num_classes
)
dtrain = xgb.DMatrix(data=X, label=y)
num_parallel_tree = 4
num_boost_round = 16
total_trees = num_parallel_tree * num_classes * num_boost_round
booster = xgb.train(
{
"num_parallel_tree": num_parallel_tree,
"subsample": 0.5,
"num_class": num_classes,
"booster": booster,
"objective": "multi:softprob",
},
num_boost_round=num_boost_round,
dtrain=dtrain,
)
booster.feature_types = ["q"] * X.shape[1]
assert len(booster.get_dump()) == total_trees
self.run_slice(booster, dtrain, num_parallel_tree, num_classes, num_boost_round)
bytesarray = booster.save_raw(raw_format="ubj")
booster = xgb.Booster(model_file=bytesarray)
self.run_slice(booster, dtrain, num_parallel_tree, num_classes, num_boost_round)
bytesarray = booster.save_raw(raw_format="deprecated")
booster = xgb.Booster(model_file=bytesarray)
self.run_slice(booster, dtrain, num_parallel_tree, num_classes, num_boost_round)
@pytest.mark.skipif(**tm.no_pandas()) @pytest.mark.skipif(**tm.no_pandas())
def test_feature_info(self): def test_feature_info(self):
import pandas as pd import pandas as pd

View File

@ -530,7 +530,7 @@ def test_dask_regressor(model: str, client: "Client") -> None:
forest = int( forest = int(
json.loads(regressor.get_booster().save_config())["learner"][ json.loads(regressor.get_booster().save_config())["learner"][
"gradient_booster" "gradient_booster"
]["gbtree_train_param"]["num_parallel_tree"] ]["gbtree_model_param"]["num_parallel_tree"]
) )
if model == "boosting": if model == "boosting":
@ -584,7 +584,7 @@ def run_dask_classifier(
assert n_threads != 0 and n_threads != os.cpu_count() assert n_threads != 0 and n_threads != os.cpu_count()
forest = int( forest = int(
config["learner"]["gradient_booster"]["gbtree_train_param"]["num_parallel_tree"] config["learner"]["gradient_booster"]["gbtree_model_param"]["num_parallel_tree"]
) )
if model == "boosting": if model == "boosting":
assert len(history["validation_0"][metric]) == 2 assert len(history["validation_0"][metric]) == 2

View File

@ -329,21 +329,27 @@ def test_select_feature():
def test_num_parallel_tree(): def test_num_parallel_tree():
from sklearn.datasets import fetch_california_housing from sklearn.datasets import fetch_california_housing
reg = xgb.XGBRegressor(n_estimators=4, num_parallel_tree=4,
tree_method='hist') reg = xgb.XGBRegressor(n_estimators=4, num_parallel_tree=4, tree_method="hist")
X, y = fetch_california_housing(return_X_y=True) X, y = fetch_california_housing(return_X_y=True)
bst = reg.fit(X=X, y=y) bst = reg.fit(X=X, y=y)
dump = bst.get_booster().get_dump(dump_format='json') dump = bst.get_booster().get_dump(dump_format="json")
assert len(dump) == 16 assert len(dump) == 16
reg = xgb.XGBRFRegressor(n_estimators=4) reg = xgb.XGBRFRegressor(n_estimators=4)
bst = reg.fit(X=X, y=y) bst = reg.fit(X=X, y=y)
dump = bst.get_booster().get_dump(dump_format='json') dump = bst.get_booster().get_dump(dump_format="json")
assert len(dump) == 4 assert len(dump) == 4
config = json.loads(bst.get_booster().save_config()) config = json.loads(bst.get_booster().save_config())
assert int(config['learner']['gradient_booster']['gbtree_train_param'][ assert (
'num_parallel_tree']) == 4 int(
config["learner"]["gradient_booster"]["gbtree_model_param"][
"num_parallel_tree"
]
)
== 4
)
def test_calif_housing_regression(): def test_calif_housing_regression():