Support slicing tree model (#6302)

This PR is meant the end the confusion around best_ntree_limit and unify model slicing. We have multi-class and random forests, asking users to understand how to set ntree_limit is difficult and error prone.

* Implement the save_best option in early stopping.

Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>
This commit is contained in:
Jiaming Yuan
2020-11-03 02:27:39 -05:00
committed by GitHub
parent 29745c6df2
commit 2cc9662005
19 changed files with 550 additions and 37 deletions

View File

@@ -730,6 +730,22 @@ XGB_DLL int XGBoosterSaveRabitCheckpoint(BoosterHandle handle) {
API_END();
}
XGB_DLL int XGBoosterSlice(BoosterHandle handle, int begin_layer,
int end_layer, int step,
BoosterHandle *out) {
API_BEGIN();
CHECK_HANDLE();
auto* learner = static_cast<Learner*>(handle);
bool out_of_bound = false;
auto p_out = learner->Slice(begin_layer, end_layer, step, &out_of_bound);
if (out_of_bound) {
return -2;
}
CHECK(p_out);
*out = p_out;
API_END();
}
inline void XGBoostDumpModelImpl(BoosterHandle handle, const FeatureMap &fmap,
int with_stats, const char *format,
xgboost::bst_ulong *len,

View File

@@ -398,6 +398,38 @@ void GBTree::SaveModel(Json* p_out) const {
model_.SaveModel(&model);
}
void GBTree::Slice(int32_t layer_begin, int32_t layer_end, int32_t step,
GradientBooster *out, bool* out_of_bound) const {
CHECK(configured_);
CHECK(out);
auto p_gbtree = dynamic_cast<GBTree *>(out);
CHECK(p_gbtree);
GBTreeModel &out_model = p_gbtree->model_;
auto layer_trees = this->LayerTrees();
layer_end = layer_end == 0 ? model_.trees.size() / layer_trees : layer_end;
CHECK_GE(layer_end, layer_begin);
CHECK_GE(step, 1);
int32_t n_layers = (layer_end - layer_begin) / step;
std::vector<std::unique_ptr<RegTree>> &out_trees = out_model.trees;
out_trees.resize(layer_trees * n_layers);
std::vector<int32_t> &out_trees_info = out_model.tree_info;
out_trees_info.resize(layer_trees * n_layers);
out_model.param.num_trees = out_model.trees.size();
CHECK(this->model_.trees_to_update.empty());
*out_of_bound = detail::SliceTrees(
layer_begin, layer_end, step, this->model_, tparam_, layer_trees,
[&](auto const &in_it, auto const &out_it) {
auto new_tree =
std::make_unique<RegTree>(*this->model_.trees.at(in_it));
bst_group_t group = this->model_.tree_info[in_it];
out_trees.at(out_it) = std::move(new_tree);
out_trees_info.at(out_it) = group;
});
}
void GBTree::PredictBatch(DMatrix* p_fmat,
PredictionCacheEntry* out_preds,
bool,
@@ -494,6 +526,22 @@ class Dart : public GBTree {
dparam_.UpdateAllowUnknown(cfg);
}
void Slice(int32_t layer_begin, int32_t layer_end, int32_t step,
GradientBooster *out, bool* out_of_bound) const final {
GBTree::Slice(layer_begin, layer_end, step, out, out_of_bound);
if (*out_of_bound) {
return;
}
auto p_dart = dynamic_cast<Dart*>(out);
CHECK(p_dart);
CHECK(p_dart->weight_drop_.empty());
detail::SliceTrees(
layer_begin, layer_end, step, model_, tparam_, this->LayerTrees(),
[&](auto const& in_it, auto const&) {
p_dart->weight_drop_.push_back(this->weight_drop_.at(in_it));
});
}
void SaveModel(Json *p_out) const override {
auto &out = *p_out;
out["name"] = String("dart");

View File

@@ -152,6 +152,50 @@ struct DartTrainParam : public XGBoostParameter<DartTrainParam> {
}
};
namespace detail {
// From here on, layer becomes concrete trees.
inline std::pair<uint32_t, uint32_t> LayerToTree(gbm::GBTreeModel const &model,
GBTreeTrainParam const &tparam,
size_t layer_begin,
size_t layer_end) {
bst_group_t groups = model.learner_model_param->num_output_group;
uint32_t tree_begin = layer_begin * groups * tparam.num_parallel_tree;
uint32_t tree_end = layer_end * groups * tparam.num_parallel_tree;
if (tree_end == 0) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
CHECK_LT(tree_begin, tree_end);
return {tree_begin, tree_end};
}
// Call fn for each pair of input output tree. Return true if index is out of bound.
template <typename Func>
inline bool SliceTrees(int32_t layer_begin, int32_t layer_end, int32_t step,
GBTreeModel const &model, GBTreeTrainParam const &tparam,
uint32_t layer_trees, Func fn) {
uint32_t tree_begin, tree_end;
std::tie(tree_begin, tree_end) = detail::LayerToTree(model, tparam, layer_begin, layer_end);
if (tree_end > model.trees.size()) {
return true;
}
layer_end = layer_end == 0 ? model.trees.size() / layer_trees : layer_end;
uint32_t n_layers = (layer_end - layer_begin) / step;
int32_t in_it = tree_begin;
int32_t out_it = 0;
for (uint32_t l = 0; l < n_layers; ++l) {
for (uint32_t i = 0; i < layer_trees; ++i) {
CHECK_LT(in_it, tree_end);
fn(in_it, out_it);
out_it++;
in_it++;
}
in_it += (step - 1) * layer_trees;
}
return false;
}
} // namespace detail
// gradient boosted trees
class GBTree : public GradientBooster {
public:
@@ -200,6 +244,15 @@ class GBTree : public GradientBooster {
return model_.learner_model_param->num_output_group == 1;
}
// Number of trees per layer.
auto LayerTrees() const {
auto n_trees = model_.learner_model_param->num_output_group * tparam_.num_parallel_tree;
return n_trees;
}
// slice the trees, out must be already allocated
void Slice(int32_t layer_begin, int32_t layer_end, int32_t step,
GradientBooster *out, bool* out_of_bound) const override;
void PredictBatch(DMatrix* p_fmat,
PredictionCacheEntry* out_preds,
bool training,
@@ -210,13 +263,8 @@ class GBTree : public GradientBooster {
uint32_t layer_begin,
unsigned layer_end) const override {
CHECK(configured_);
// From here on, layer becomes concrete trees.
bst_group_t groups = model_.learner_model_param->num_output_group;
uint32_t tree_begin = layer_begin * groups * tparam_.num_parallel_tree;
uint32_t tree_end = layer_end * groups * tparam_.num_parallel_tree;
if (tree_end == 0 || tree_end > model_.trees.size()) {
tree_end = static_cast<uint32_t>(model_.trees.size());
}
uint32_t tree_begin, tree_end;
std::tie(tree_begin, tree_end) = detail::LayerToTree(model_, tparam_, layer_begin, layer_end);
this->GetPredictor()->InplacePredict(x, model_, missing, out_preds,
tree_begin, tree_end);
}

View File

@@ -6,10 +6,10 @@
#include "xgboost/json.h"
#include "xgboost/logging.h"
#include "gbtree_model.h"
#include "gbtree.h"
namespace xgboost {
namespace gbm {
void GBTreeModel::Save(dmlc::Stream* fo) const {
CHECK_EQ(param.num_trees, static_cast<int32_t>(trees.size()));

View File

@@ -1,5 +1,5 @@
/*!
* Copyright 2017-2019 by Contributors
* Copyright 2017-2020 by Contributors
* \file gbtree_model.h
*/
#ifndef XGBOOST_GBM_GBTREE_MODEL_H_
@@ -22,6 +22,7 @@ namespace xgboost {
class Json;
namespace gbm {
/*! \brief model parameters */
struct GBTreeModelParam : public dmlc::Parameter<GBTreeModelParam> {
public:

View File

@@ -971,6 +971,26 @@ class LearnerImpl : public LearnerIO {
return gbm_->DumpModel(fmap, with_stats, format);
}
Learner *Slice(int32_t begin_layer, int32_t end_layer, int32_t step,
bool *out_of_bound) override {
this->Configure();
CHECK_GE(begin_layer, 0);
auto *out_impl = new LearnerImpl({});
auto gbm = std::unique_ptr<GradientBooster>(GradientBooster::Create(
this->tparam_.booster, &this->generic_parameters_,
&this->learner_model_param_));
this->gbm_->Slice(begin_layer, end_layer, step, gbm.get(), out_of_bound);
out_impl->gbm_ = std::move(gbm);
Json config { Object() };
this->SaveConfig(&config);
out_impl->mparam_ = this->mparam_;
out_impl->attributes_ = this->attributes_;
out_impl->learner_model_param_ = this->learner_model_param_;
out_impl->LoadConfig(config);
out_impl->Configure();
return out_impl;
}
void UpdateOneIter(int iter, std::shared_ptr<DMatrix> train) override {
monitor_.Start("UpdateOneIter");
TrainingObserver::Instance().Update(iter);