[MT-TREE] Support prediction cache and model slicing. (#8968)

- Fix prediction range.
- Support prediction cache in mt-hist.
- Support model slicing.
- Make the booster a Python iterable by defining `__iter__`.
- Cleanup removed/deprecated parameters.
- A new field in the output model `iteration_indptr` for pointing to the ranges of trees for each iteration.
This commit is contained in:
Jiaming Yuan
2023-03-27 23:10:54 +08:00
committed by GitHub
parent c2b3a13e70
commit acc110c251
30 changed files with 502 additions and 343 deletions

View File

@@ -148,7 +148,7 @@ class GBLinear : public GradientBooster {
}
void PredictBatch(DMatrix* p_fmat, PredictionCacheEntry* predts, bool /*training*/,
uint32_t layer_begin, uint32_t) override {
bst_layer_t layer_begin, bst_layer_t) override {
monitor_.Start("PredictBatch");
LinearCheckLayer(layer_begin);
auto* out_preds = &predts->predictions;

View File

@@ -225,10 +225,9 @@ void CopyGradient(HostDeviceVector<GradientPair> const* in_gpair, int32_t n_thre
}
void GBTree::UpdateTreeLeaf(DMatrix const* p_fmat, HostDeviceVector<float> const& predictions,
ObjFunction const* obj,
std::int32_t group_idx,
ObjFunction const* obj, std::int32_t group_idx,
std::vector<HostDeviceVector<bst_node_t>> const& node_position,
std::vector<std::unique_ptr<RegTree>>* p_trees) {
TreesOneGroup* p_trees) {
CHECK(!updaters_.empty());
if (!updaters_.back()->HasNodePosition()) {
return;
@@ -252,8 +251,8 @@ void GBTree::UpdateTreeLeaf(DMatrix const* p_fmat, HostDeviceVector<float> const
void GBTree::DoBoost(DMatrix* p_fmat, HostDeviceVector<GradientPair>* in_gpair,
PredictionCacheEntry* predt, ObjFunction const* obj) {
std::vector<std::vector<std::unique_ptr<RegTree>>> new_trees;
const int ngroup = model_.learner_model_param->OutputLength();
TreesOneIter new_trees;
bst_target_t const n_groups = model_.learner_model_param->OutputLength();
ConfigureWithKnownData(this->cfg_, p_fmat);
monitor_.Start("BoostNewTrees");
@@ -265,7 +264,7 @@ void GBTree::DoBoost(DMatrix* p_fmat, HostDeviceVector<GradientPair>* in_gpair,
device,
device == Context::kCpuId ? predt->predictions.HostSpan() : predt->predictions.DeviceSpan(),
p_fmat->Info().num_row_, model_.learner_model_param->OutputLength());
CHECK_NE(ngroup, 0);
CHECK_NE(n_groups, 0);
if (!p_fmat->SingleColBlock() && obj->Task().UpdateTreeLeaf()) {
LOG(FATAL) << "Current objective doesn't support external memory.";
@@ -276,36 +275,39 @@ void GBTree::DoBoost(DMatrix* p_fmat, HostDeviceVector<GradientPair>* in_gpair,
std::vector<HostDeviceVector<bst_node_t>> node_position;
if (model_.learner_model_param->IsVectorLeaf()) {
std::vector<std::unique_ptr<RegTree>> ret;
TreesOneGroup ret;
BoostNewTrees(in_gpair, p_fmat, 0, &node_position, &ret);
UpdateTreeLeaf(p_fmat, predt->predictions, obj, 0, node_position, &ret);
// No update prediction cache yet.
std::size_t num_new_trees = ret.size();
new_trees.push_back(std::move(ret));
} else if (model_.learner_model_param->OutputLength() == 1) {
std::vector<std::unique_ptr<RegTree>> ret;
if (updaters_.size() > 0 && num_new_trees == 1 && predt->predictions.Size() > 0 &&
updaters_.back()->UpdatePredictionCache(p_fmat, out)) {
predt->Update(1);
}
} else if (model_.learner_model_param->OutputLength() == 1u) {
TreesOneGroup ret;
BoostNewTrees(in_gpair, p_fmat, 0, &node_position, &ret);
UpdateTreeLeaf(p_fmat, predt->predictions, obj, 0, node_position, &ret);
const size_t num_new_trees = ret.size();
new_trees.push_back(std::move(ret));
auto v_predt = out.Slice(linalg::All(), 0);
if (updaters_.size() > 0 && num_new_trees == 1 && predt->predictions.Size() > 0 &&
updaters_.back()->UpdatePredictionCache(p_fmat, v_predt)) {
updaters_.back()->UpdatePredictionCache(p_fmat, out)) {
predt->Update(1);
}
} else {
CHECK_EQ(in_gpair->Size() % ngroup, 0U) << "must have exactly ngroup * nrow gpairs";
HostDeviceVector<GradientPair> tmp(in_gpair->Size() / ngroup, GradientPair(),
CHECK_EQ(in_gpair->Size() % n_groups, 0U) << "must have exactly ngroup * nrow gpairs";
HostDeviceVector<GradientPair> tmp(in_gpair->Size() / n_groups, GradientPair(),
in_gpair->DeviceIdx());
bool update_predict = true;
for (int gid = 0; gid < ngroup; ++gid) {
for (bst_target_t gid = 0; gid < n_groups; ++gid) {
node_position.clear();
CopyGradient(in_gpair, ctx_->Threads(), ngroup, gid, &tmp);
std::vector<std::unique_ptr<RegTree>> ret;
CopyGradient(in_gpair, ctx_->Threads(), n_groups, gid, &tmp);
TreesOneGroup ret;
BoostNewTrees(&tmp, p_fmat, gid, &node_position, &ret);
UpdateTreeLeaf(p_fmat, predt->predictions, obj, gid, node_position, &ret);
const size_t num_new_trees = ret.size();
new_trees.push_back(std::move(ret));
auto v_predt = out.Slice(linalg::All(), gid);
auto v_predt = out.Slice(linalg::All(), linalg::Range(gid, gid + 1));
if (!(updaters_.size() > 0 && predt->predictions.Size() > 0 && num_new_trees == 1 &&
updaters_.back()->UpdatePredictionCache(p_fmat, v_predt))) {
update_predict = false;
@@ -363,7 +365,7 @@ void GBTree::InitUpdater(Args const& cfg) {
void GBTree::BoostNewTrees(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat, int bst_group,
std::vector<HostDeviceVector<bst_node_t>>* out_position,
std::vector<std::unique_ptr<RegTree>>* ret) {
TreesOneGroup* ret) {
std::vector<RegTree*> new_trees;
ret->clear();
// create the trees
@@ -419,15 +421,9 @@ void GBTree::BoostNewTrees(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fma
tree_param_.learning_rate = lr;
}
void GBTree::CommitModel(std::vector<std::vector<std::unique_ptr<RegTree>>>&& new_trees) {
void GBTree::CommitModel(TreesOneIter&& new_trees) {
monitor_.Start("CommitModel");
if (this->model_.learner_model_param->IsVectorLeaf()) {
model_.CommitModel(std::move(new_trees[0]), 0);
} else {
for (std::uint32_t gid = 0; gid < model_.learner_model_param->OutputLength(); ++gid) {
model_.CommitModel(std::move(new_trees[gid]), gid);
}
}
model_.CommitModel(std::forward<TreesOneIter>(new_trees));
monitor_.Stop("CommitModel");
}
@@ -519,28 +515,32 @@ void GBTree::SaveModel(Json* p_out) const {
model_.SaveModel(&model);
}
void GBTree::Slice(int32_t layer_begin, int32_t layer_end, int32_t step,
GradientBooster *out, bool* out_of_bound) const {
void GBTree::Slice(bst_layer_t begin, bst_layer_t end, bst_layer_t step, GradientBooster* out,
bool* out_of_bound) const {
CHECK(configured_);
CHECK(out);
auto p_gbtree = dynamic_cast<GBTree *>(out);
auto p_gbtree = dynamic_cast<GBTree*>(out);
CHECK(p_gbtree);
GBTreeModel &out_model = p_gbtree->model_;
auto layer_trees = this->LayerTrees();
CHECK_NE(this->model_.learner_model_param->num_feature, 0);
CHECK_NE(layer_trees, 0);
GBTreeModel& out_model = p_gbtree->model_;
CHECK(this->model_.learner_model_param->Initialized());
layer_end = layer_end == 0 ? model_.trees.size() / layer_trees : layer_end;
CHECK_GT(layer_end, layer_begin);
end = end == 0 ? model_.BoostedRounds() : end;
CHECK_GE(step, 1);
int32_t n_layers = (layer_end - layer_begin) / step;
std::vector<std::unique_ptr<RegTree>> &out_trees = out_model.trees;
out_trees.resize(layer_trees * n_layers);
std::vector<int32_t> &out_trees_info = out_model.tree_info;
out_trees_info.resize(layer_trees * n_layers);
out_model.param.num_trees = out_model.trees.size();
out_model.param.num_parallel_tree = model_.param.num_parallel_tree;
CHECK_NE(end, begin) << "Empty slice is not allowed.";
if (step > (end - begin)) {
*out_of_bound = true;
return;
}
auto& out_indptr = out_model.iteration_indptr;
TreesOneGroup& out_trees = out_model.trees;
std::vector<int32_t>& out_trees_info = out_model.tree_info;
bst_layer_t n_layers = (end - begin) / step;
out_indptr.resize(n_layers + 1, 0);
if (!this->model_.trees_to_update.empty()) {
CHECK_EQ(this->model_.trees_to_update.size(), this->model_.trees.size())
<< "Not all trees are updated, "
@@ -549,26 +549,31 @@ void GBTree::Slice(int32_t layer_begin, int32_t layer_end, int32_t step,
"want to update a portion of trees.";
}
*out_of_bound = detail::SliceTrees(layer_begin, layer_end, step, this->model_, layer_trees,
[&](auto const& in_it, auto const& out_it) {
auto new_tree =
std::make_unique<RegTree>(*this->model_.trees.at(in_it));
bst_group_t group = this->model_.tree_info[in_it];
out_trees.at(out_it) = std::move(new_tree);
out_trees_info.at(out_it) = group;
});
*out_of_bound =
detail::SliceTrees(begin, end, step, this->model_, [&](auto in_tree_idx, auto out_l) {
auto new_tree = std::make_unique<RegTree>(*this->model_.trees.at(in_tree_idx));
out_trees.emplace_back(std::move(new_tree));
bst_group_t group = this->model_.tree_info[in_tree_idx];
out_trees_info.push_back(group);
out_model.iteration_indptr[out_l + 1]++;
});
std::partial_sum(out_indptr.cbegin(), out_indptr.cend(), out_indptr.begin());
CHECK_EQ(out_model.iteration_indptr.front(), 0);
out_model.param.num_trees = out_model.trees.size();
out_model.param.num_parallel_tree = model_.param.num_parallel_tree;
}
void GBTree::PredictBatch(DMatrix* p_fmat,
PredictionCacheEntry* out_preds,
bool,
unsigned layer_begin,
unsigned layer_end) {
void GBTree::PredictBatch(DMatrix* p_fmat, PredictionCacheEntry* out_preds, bool,
bst_layer_t layer_begin, bst_layer_t layer_end) {
CHECK(configured_);
if (layer_end == 0) {
layer_end = this->BoostedRounds();
}
if (layer_begin != 0 || layer_end < out_preds->version) {
if (layer_begin != 0 || layer_end < static_cast<bst_layer_t>(out_preds->version)) {
// cache is dropped.
out_preds->version = 0;
}
@@ -590,8 +595,7 @@ void GBTree::PredictBatch(DMatrix* p_fmat,
predictor->InitOutPredictions(p_fmat->Info(), &out_preds->predictions, model_);
}
std::uint32_t tree_begin, tree_end;
std::tie(tree_begin, tree_end) = detail::LayerToTree(model_, layer_begin, layer_end);
auto [tree_begin, tree_end] = detail::LayerToTree(model_, layer_begin, layer_end);
CHECK_LE(tree_end, model_.trees.size()) << "Invalid number of trees.";
if (tree_end > tree_begin) {
predictor->PredictBatch(p_fmat, out_preds, model_, tree_begin, tree_end);
@@ -729,10 +733,9 @@ class Dart : public GBTree {
auto p_dart = dynamic_cast<Dart*>(out);
CHECK(p_dart);
CHECK(p_dart->weight_drop_.empty());
detail::SliceTrees(layer_begin, layer_end, step, model_, this->LayerTrees(),
[&](auto const& in_it, auto const&) {
p_dart->weight_drop_.push_back(this->weight_drop_.at(in_it));
});
detail::SliceTrees(layer_begin, layer_end, step, model_, [&](auto const& in_it, auto const&) {
p_dart->weight_drop_.push_back(this->weight_drop_.at(in_it));
});
}
void SaveModel(Json *p_out) const override {
@@ -798,8 +801,7 @@ class Dart : public GBTree {
predictor->InitOutPredictions(p_fmat->Info(), &p_out_preds->predictions,
model_);
p_out_preds->version = 0;
uint32_t tree_begin, tree_end;
std::tie(tree_begin, tree_end) = detail::LayerToTree(model_, layer_begin, layer_end);
auto [tree_begin, tree_end] = detail::LayerToTree(model_, layer_begin, layer_end);
auto n_groups = model_.learner_model_param->num_output_group;
PredictionCacheEntry predts; // temporary storage for prediction
@@ -807,14 +809,18 @@ class Dart : public GBTree {
predts.predictions.SetDevice(ctx_->gpu_id);
}
predts.predictions.Resize(p_fmat->Info().num_row_ * n_groups, 0);
// multi-target is not yet supported.
auto layer_trees = [&]() {
return model_.param.num_parallel_tree * model_.learner_model_param->OutputLength();
};
for (size_t i = tree_begin; i < tree_end; i += 1) {
for (bst_tree_t i = tree_begin; i < tree_end; i += 1) {
if (training && std::binary_search(idx_drop_.cbegin(), idx_drop_.cend(), i)) {
continue;
}
CHECK_GE(i, p_out_preds->version);
auto version = i / this->LayerTrees();
auto version = i / layer_trees();
p_out_preds->version = version;
predts.predictions.Fill(0);
predictor->PredictBatch(p_fmat, &predts, model_, i, i + 1);
@@ -841,21 +847,17 @@ class Dart : public GBTree {
}
}
void PredictBatch(DMatrix* p_fmat,
PredictionCacheEntry* p_out_preds,
bool training,
unsigned layer_begin,
unsigned layer_end) override {
void PredictBatch(DMatrix* p_fmat, PredictionCacheEntry* p_out_preds, bool training,
bst_layer_t layer_begin, bst_layer_t layer_end) override {
DropTrees(training);
this->PredictBatchImpl(p_fmat, p_out_preds, training, layer_begin, layer_end);
}
void InplacePredict(std::shared_ptr<DMatrix> p_fmat, float missing,
PredictionCacheEntry* p_out_preds, uint32_t layer_begin,
unsigned layer_end) const override {
PredictionCacheEntry* p_out_preds, bst_layer_t layer_begin,
bst_layer_t layer_end) const override {
CHECK(!this->model_.learner_model_param->IsVectorLeaf()) << "dart" << MTNotImplemented();
uint32_t tree_begin, tree_end;
std::tie(tree_begin, tree_end) = detail::LayerToTree(model_, layer_begin, layer_end);
auto [tree_begin, tree_end] = detail::LayerToTree(model_, layer_begin, layer_end);
auto n_groups = model_.learner_model_param->num_output_group;
std::vector<Predictor const*> predictors {
@@ -897,7 +899,7 @@ class Dart : public GBTree {
};
// Inplace predict is not used for training, so no need to drop tree.
for (size_t i = tree_begin; i < tree_end; ++i) {
for (bst_tree_t i = tree_begin; i < tree_end; ++i) {
predict_impl(i);
if (i == tree_begin) {
predictor->InitOutPredictions(p_fmat->Info(), &p_out_preds->predictions, model_);
@@ -941,31 +943,25 @@ class Dart : public GBTree {
unsigned layer_begin, unsigned layer_end, bool approximate, int,
unsigned) override {
CHECK(configured_);
uint32_t tree_begin, tree_end;
std::tie(tree_begin, tree_end) = detail::LayerToTree(model_, layer_begin, layer_end);
cpu_predictor_->PredictContribution(p_fmat, out_contribs, model_,
tree_end, &weight_drop_, approximate);
auto [tree_begin, tree_end] = detail::LayerToTree(model_, layer_begin, layer_end);
cpu_predictor_->PredictContribution(p_fmat, out_contribs, model_, tree_end, &weight_drop_,
approximate);
}
void PredictInteractionContributions(
DMatrix *p_fmat, HostDeviceVector<bst_float> *out_contribs,
unsigned layer_begin, unsigned layer_end, bool approximate) override {
CHECK(configured_);
uint32_t tree_begin, tree_end;
std::tie(tree_begin, tree_end) = detail::LayerToTree(model_, layer_begin, layer_end);
auto [tree_begin, tree_end] = detail::LayerToTree(model_, layer_begin, layer_end);
cpu_predictor_->PredictInteractionContributions(p_fmat, out_contribs, model_, tree_end,
&weight_drop_, approximate);
}
protected:
// commit new trees all at once
void CommitModel(std::vector<std::vector<std::unique_ptr<RegTree>>>&& new_trees) override {
int num_new_trees = 0;
for (uint32_t gid = 0; gid < model_.learner_model_param->num_output_group; ++gid) {
num_new_trees += new_trees[gid].size();
model_.CommitModel(std::move(new_trees[gid]), gid);
}
size_t num_drop = NormalizeTrees(num_new_trees);
void CommitModel(TreesOneIter&& new_trees) override {
auto n_new_trees = model_.CommitModel(std::forward<TreesOneIter>(new_trees));
size_t num_drop = NormalizeTrees(n_new_trees);
LOG(INFO) << "drop " << num_drop << " trees, "
<< "weight = " << weight_drop_.back();
}

View File

@@ -139,23 +139,13 @@ struct DartTrainParam : public XGBoostParameter<DartTrainParam> {
namespace detail {
// From here on, layer becomes concrete trees.
inline std::pair<uint32_t, uint32_t> LayerToTree(gbm::GBTreeModel const& model,
std::uint32_t layer_begin,
std::uint32_t layer_end) {
std::uint32_t tree_begin;
std::uint32_t tree_end;
if (model.learner_model_param->IsVectorLeaf()) {
tree_begin = layer_begin * model.param.num_parallel_tree;
tree_end = layer_end * model.param.num_parallel_tree;
} else {
bst_group_t groups = model.learner_model_param->OutputLength();
tree_begin = layer_begin * groups * model.param.num_parallel_tree;
tree_end = layer_end * groups * model.param.num_parallel_tree;
}
if (tree_end == 0) {
tree_end = model.trees.size();
}
inline std::pair<bst_tree_t, bst_tree_t> LayerToTree(gbm::GBTreeModel const& model,
bst_layer_t begin, bst_layer_t end) {
CHECK(!model.iteration_indptr.empty());
end = end == 0 ? model.BoostedRounds() : end;
CHECK_LE(end, model.BoostedRounds()) << "Out of range for tree layers.";
bst_tree_t tree_begin = model.iteration_indptr[begin];
bst_tree_t tree_end = model.iteration_indptr[end];
if (model.trees.size() != 0) {
CHECK_LE(tree_begin, tree_end);
}
@@ -164,27 +154,33 @@ inline std::pair<uint32_t, uint32_t> LayerToTree(gbm::GBTreeModel const& model,
// Call fn for each pair of input output tree. Return true if index is out of bound.
template <typename Func>
bool SliceTrees(int32_t layer_begin, int32_t layer_end, int32_t step, GBTreeModel const& model,
uint32_t layer_trees, Func fn) {
uint32_t tree_begin, tree_end;
std::tie(tree_begin, tree_end) = detail::LayerToTree(model, layer_begin, layer_end);
if (tree_end > model.trees.size()) {
bool SliceTrees(bst_layer_t begin, bst_layer_t end, bst_layer_t step, GBTreeModel const& model,
Func&& fn) {
end = end == 0 ? model.iteration_indptr.size() : end;
CHECK_GE(step, 1);
if (step > end - begin) {
return true;
}
if (end > model.BoostedRounds()) {
return true;
}
layer_end = layer_end == 0 ? model.trees.size() / layer_trees : layer_end;
uint32_t n_layers = (layer_end - layer_begin) / step;
int32_t in_it = tree_begin;
int32_t out_it = 0;
for (uint32_t l = 0; l < n_layers; ++l) {
for (uint32_t i = 0; i < layer_trees; ++i) {
CHECK_LT(in_it, tree_end);
fn(in_it, out_it);
out_it++;
in_it++;
bst_layer_t n_layers = (end - begin) / step;
bst_layer_t out_l = 0;
for (bst_layer_t l = begin; l < end; l += step) {
auto [tree_begin, tree_end] = detail::LayerToTree(model, l, l + 1);
if (tree_end > static_cast<bst_tree_t>(model.trees.size())) {
return true;
}
in_it += (step - 1) * layer_trees;
for (bst_tree_t tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
fn(tree_idx, out_l);
}
++out_l;
}
CHECK_EQ(out_l, n_layers);
return false;
}
} // namespace detail
@@ -241,37 +237,22 @@ class GBTree : public GradientBooster {
void SaveModel(Json* p_out) const override;
void LoadModel(Json const& in) override;
// Number of trees per layer.
[[nodiscard]] std::uint32_t LayerTrees() const {
if (model_.learner_model_param->IsVectorLeaf()) {
return model_.param.num_parallel_tree;
}
return model_.param.num_parallel_tree * model_.learner_model_param->OutputLength();
}
// slice the trees, out must be already allocated
void Slice(int32_t layer_begin, int32_t layer_end, int32_t step,
GradientBooster *out, bool* out_of_bound) const override;
[[nodiscard]] std::int32_t BoostedRounds() const override {
CHECK_NE(model_.param.num_parallel_tree, 0);
CHECK_NE(model_.learner_model_param->num_output_group, 0);
return model_.trees.size() / this->LayerTrees();
}
void Slice(bst_layer_t begin, bst_layer_t end, bst_layer_t step, GradientBooster* out,
bool* out_of_bound) const override;
[[nodiscard]] std::int32_t BoostedRounds() const override { return this->model_.BoostedRounds(); }
[[nodiscard]] bool ModelFitted() const override {
return !model_.trees.empty() || !model_.trees_to_update.empty();
}
void PredictBatch(DMatrix *p_fmat, PredictionCacheEntry *out_preds,
bool training, unsigned layer_begin, unsigned layer_end) override;
void PredictBatch(DMatrix* p_fmat, PredictionCacheEntry* out_preds, bool training,
bst_layer_t layer_begin, bst_layer_t layer_end) override;
void InplacePredict(std::shared_ptr<DMatrix> p_m, float missing, PredictionCacheEntry* out_preds,
uint32_t layer_begin, unsigned layer_end) const override {
bst_layer_t layer_begin, bst_layer_t layer_end) const override {
CHECK(configured_);
uint32_t tree_begin, tree_end;
std::tie(tree_begin, tree_end) = detail::LayerToTree(model_, layer_begin, layer_end);
auto [tree_begin, tree_end] = detail::LayerToTree(model_, layer_begin, layer_end);
CHECK_LE(tree_end, model_.trees.size()) << "Invalid number of trees.";
std::vector<Predictor const *> predictors{
cpu_predictor_.get(),
@@ -364,20 +345,18 @@ class GBTree : public GradientBooster {
}
}
void PredictInstance(const SparsePage::Inst& inst,
std::vector<bst_float>* out_preds,
void PredictInstance(const SparsePage::Inst& inst, std::vector<bst_float>* out_preds,
uint32_t layer_begin, uint32_t layer_end) override {
CHECK(configured_);
uint32_t tree_begin, tree_end;
std::tie(tree_begin, tree_end) = detail::LayerToTree(model_, layer_begin, layer_end);
std::uint32_t _, tree_end;
std::tie(_, tree_end) = detail::LayerToTree(model_, layer_begin, layer_end);
cpu_predictor_->PredictInstance(inst, out_preds, model_, tree_end);
}
void PredictLeaf(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_preds,
uint32_t layer_begin, uint32_t layer_end) override {
uint32_t tree_begin, tree_end;
std::tie(tree_begin, tree_end) = detail::LayerToTree(model_, layer_begin, layer_end);
auto [tree_begin, tree_end] = detail::LayerToTree(model_, layer_begin, layer_end);
CHECK_EQ(tree_begin, 0) << "Predict leaf supports only iteration end: (0, "
"n_iteration), use model slicing instead.";
this->GetPredictor()->PredictLeaf(p_fmat, out_preds, model_, tree_end);
@@ -388,8 +367,7 @@ class GBTree : public GradientBooster {
uint32_t layer_begin, uint32_t layer_end, bool approximate,
int, unsigned) override {
CHECK(configured_);
uint32_t tree_begin, tree_end;
std::tie(tree_begin, tree_end) = detail::LayerToTree(model_, layer_begin, layer_end);
auto [tree_begin, tree_end] = detail::LayerToTree(model_, layer_begin, layer_end);
CHECK_EQ(tree_begin, 0)
<< "Predict contribution supports only iteration end: (0, "
"n_iteration), using model slicing instead.";
@@ -401,8 +379,7 @@ class GBTree : public GradientBooster {
DMatrix *p_fmat, HostDeviceVector<bst_float> *out_contribs,
uint32_t layer_begin, uint32_t layer_end, bool approximate) override {
CHECK(configured_);
uint32_t tree_begin, tree_end;
std::tie(tree_begin, tree_end) = detail::LayerToTree(model_, layer_begin, layer_end);
auto [tree_begin, tree_end] = detail::LayerToTree(model_, layer_begin, layer_end);
CHECK_EQ(tree_begin, 0)
<< "Predict interaction contribution supports only iteration end: (0, "
"n_iteration), using model slicing instead.";
@@ -427,7 +404,7 @@ class GBTree : public GradientBooster {
DMatrix* f_dmat = nullptr) const;
// commit new trees all at once
virtual void CommitModel(std::vector<std::vector<std::unique_ptr<RegTree>>>&& new_trees);
virtual void CommitModel(TreesOneIter&& new_trees);
// --- data structure ---
GBTreeModel model_;

View File

@@ -1,15 +1,55 @@
/*!
* Copyright 2019-2022 by Contributors
/**
* Copyright 2019-2023, XGBoost Contributors
*/
#include <utility>
#include "xgboost/json.h"
#include "xgboost/logging.h"
#include "gbtree_model.h"
#include "gbtree.h"
namespace xgboost {
namespace gbm {
#include <algorithm> // for transform, max_element
#include <cstddef> // for size_t
#include <numeric> // for partial_sum
#include <ostream> // for operator<<, basic_ostream
#include <utility> // for move, pair
#include "../common/threading_utils.h" // for ParallelFor
#include "dmlc/base.h" // for BeginPtr
#include "dmlc/io.h" // for Stream
#include "xgboost/context.h" // for Context
#include "xgboost/json.h" // for Json, get, Integer, Array, FromJson, ToJson, Json...
#include "xgboost/learner.h" // for LearnerModelParam
#include "xgboost/logging.h" // for LogCheck_EQ, CHECK_EQ, CHECK
#include "xgboost/tree_model.h" // for RegTree
namespace xgboost::gbm {
namespace {
// For creating the tree indptr from old models.
void MakeIndptr(GBTreeModel* out_model) {
auto const& tree_info = out_model->tree_info;
if (tree_info.empty()) {
return;
}
auto n_groups = *std::max_element(tree_info.cbegin(), tree_info.cend()) + 1;
auto& indptr = out_model->iteration_indptr;
auto layer_trees = out_model->param.num_parallel_tree * n_groups;
CHECK_NE(layer_trees, 0);
indptr.resize(out_model->param.num_trees / layer_trees + 1, 0);
indptr[0] = 0;
for (std::size_t i = 1; i < indptr.size(); ++i) {
indptr[i] = n_groups * out_model->param.num_parallel_tree;
}
std::partial_sum(indptr.cbegin(), indptr.cend(), indptr.begin());
}
// Validate the consistency of the model.
void Validate(GBTreeModel const& model) {
CHECK_EQ(model.trees.size(), model.param.num_trees);
CHECK_EQ(model.tree_info.size(), model.param.num_trees);
// True even if the model is empty since we should always have 0 as the first element.
CHECK_EQ(model.iteration_indptr.back(), model.param.num_trees);
}
} // namespace
void GBTreeModel::Save(dmlc::Stream* fo) const {
CHECK_EQ(param.num_trees, static_cast<int32_t>(trees.size()));
@@ -61,6 +101,9 @@ void GBTreeModel::Load(dmlc::Stream* fi) {
}
}
}
MakeIndptr(this);
Validate(*this);
}
void GBTreeModel::SaveModel(Json* p_out) const {
@@ -72,10 +115,10 @@ void GBTreeModel::SaveModel(Json* p_out) const {
CHECK(ctx_);
common::ParallelFor(trees.size(), ctx_->Threads(), [&](auto t) {
auto const& tree = trees[t];
Json tree_json{Object()};
tree->SaveModel(&tree_json);
tree_json["id"] = Integer{static_cast<Integer::Int>(t)};
trees_json[t] = std::move(tree_json);
Json jtree{Object{}};
tree->SaveModel(&jtree);
jtree["id"] = Integer{static_cast<Integer::Int>(t)};
trees_json[t] = std::move(jtree);
});
std::vector<Json> tree_info_json(tree_info.size());
@@ -85,6 +128,11 @@ void GBTreeModel::SaveModel(Json* p_out) const {
out["trees"] = Array(std::move(trees_json));
out["tree_info"] = Array(std::move(tree_info_json));
std::vector<Json> jiteration_indptr(iteration_indptr.size());
std::transform(iteration_indptr.cbegin(), iteration_indptr.cend(), jiteration_indptr.begin(),
[](bst_tree_t i) { return Integer{i}; });
out["iteration_indptr"] = Array{std::move(jiteration_indptr)};
}
void GBTreeModel::LoadModel(Json const& in) {
@@ -93,22 +141,59 @@ void GBTreeModel::LoadModel(Json const& in) {
trees.clear();
trees_to_update.clear();
auto const& jmodel = get<Object const>(in);
auto const& trees_json = get<Array const>(in["trees"]);
trees.resize(trees_json.size());
CHECK_EQ(trees_json.size(), param.num_trees);
trees.resize(param.num_trees);
auto const& tree_info_json = get<Array const>(in["tree_info"]);
CHECK_EQ(tree_info_json.size(), param.num_trees);
tree_info.resize(param.num_trees);
CHECK(ctx_);
common::ParallelFor(trees_json.size(), ctx_->Threads(), [&](auto t) {
auto tree_id = get<Integer>(trees_json[t]["id"]);
trees.at(tree_id).reset(new RegTree());
trees.at(tree_id)->LoadModel(trees_json[t]);
common::ParallelFor(param.num_trees, ctx_->Threads(), [&](auto t) {
auto tree_id = get<Integer const>(trees_json[t]["id"]);
trees.at(tree_id).reset(new RegTree{});
trees[tree_id]->LoadModel(trees_json[t]);
});
tree_info.resize(param.num_trees);
auto const& tree_info_json = get<Array const>(in["tree_info"]);
for (int32_t i = 0; i < param.num_trees; ++i) {
for (bst_tree_t i = 0; i < param.num_trees; ++i) {
tree_info[i] = get<Integer const>(tree_info_json[i]);
}
auto indptr_it = jmodel.find("iteration_indptr");
iteration_indptr.clear();
if (indptr_it != jmodel.cend()) {
auto const& vec = get<Array const>(indptr_it->second);
iteration_indptr.resize(vec.size());
std::transform(vec.cbegin(), vec.cend(), iteration_indptr.begin(),
[](Json const& v) { return get<Integer const>(v); });
} else {
MakeIndptr(this);
}
Validate(*this);
}
} // namespace gbm
} // namespace xgboost
bst_tree_t GBTreeModel::CommitModel(TreesOneIter&& new_trees) {
CHECK(!iteration_indptr.empty());
CHECK_EQ(iteration_indptr.back(), param.num_trees);
bst_tree_t n_new_trees{0};
if (learner_model_param->IsVectorLeaf()) {
n_new_trees += new_trees.front().size();
this->CommitModelGroup(std::move(new_trees.front()), 0);
} else {
for (bst_target_t gidx{0}; gidx < learner_model_param->OutputLength(); ++gidx) {
n_new_trees += new_trees[gidx].size();
this->CommitModelGroup(std::move(new_trees[gidx]), gidx);
}
}
iteration_indptr.push_back(n_new_trees + iteration_indptr.back());
Validate(*this);
return n_new_trees;
}
} // namespace xgboost::gbm

View File

@@ -1,5 +1,5 @@
/*!
* Copyright 2017-2020 by Contributors
/**
* Copyright 2017-2023, XGBoost Contributors
* \file gbtree_model.h
*/
#ifndef XGBOOST_GBM_GBTREE_MODEL_H_
@@ -25,26 +25,28 @@ namespace xgboost {
class Json;
namespace gbm {
/**
* \brief Container for all trees built (not update) for one group.
*/
using TreesOneGroup = std::vector<std::unique_ptr<RegTree>>;
/**
* \brief Container for all trees built (not update) for one iteration.
*/
using TreesOneIter = std::vector<TreesOneGroup>;
/*! \brief model parameters */
struct GBTreeModelParam : public dmlc::Parameter<GBTreeModelParam> {
public:
/*! \brief number of trees */
int32_t num_trees;
/*! \brief (Deprecated) number of roots */
int32_t num_parallel_tree;
/*! \brief number of features to be used by trees */
int32_t deprecated_num_feature;
/*! \brief pad this space, for backward compatibility reason.*/
int32_t pad_32bit;
/*! \brief deprecated padding space. */
int64_t deprecated_num_pbuffer;
// deprecated. use learner_model_param_->num_output_group.
int32_t deprecated_num_output_group;
/*! \brief size of leaf vector needed in tree */
int32_t size_leaf_vector;
/**
* \brief number of trees
*/
std::int32_t num_trees;
/**
* \brief Number of trees for a forest.
*/
std::int32_t num_parallel_tree;
/*! \brief reserved parameters */
int32_t reserved[32];
int32_t reserved[38];
/*! \brief constructor */
GBTreeModelParam() {
@@ -66,23 +68,14 @@ struct GBTreeModelParam : public dmlc::Parameter<GBTreeModelParam> {
.describe(
"Number of parallel trees constructed during each iteration."
" This option is used to support boosted random forest.");
DMLC_DECLARE_FIELD(size_leaf_vector)
.set_lower_bound(0)
.set_default(0)
.describe("Reserved option for vector tree.");
}
// Swap byte order for all fields. Useful for transporting models between machines with different
// endianness (big endian vs little endian)
inline GBTreeModelParam ByteSwap() const {
GBTreeModelParam ByteSwap() const {
GBTreeModelParam x = *this;
dmlc::ByteSwap(&x.num_trees, sizeof(x.num_trees), 1);
dmlc::ByteSwap(&x.num_parallel_tree, sizeof(x.num_parallel_tree), 1);
dmlc::ByteSwap(&x.deprecated_num_feature, sizeof(x.deprecated_num_feature), 1);
dmlc::ByteSwap(&x.pad_32bit, sizeof(x.pad_32bit), 1);
dmlc::ByteSwap(&x.deprecated_num_pbuffer, sizeof(x.deprecated_num_pbuffer), 1);
dmlc::ByteSwap(&x.deprecated_num_output_group, sizeof(x.deprecated_num_output_group), 1);
dmlc::ByteSwap(&x.size_leaf_vector, sizeof(x.size_leaf_vector), 1);
dmlc::ByteSwap(x.reserved, sizeof(x.reserved[0]), sizeof(x.reserved) / sizeof(x.reserved[0]));
return x;
}
@@ -107,6 +100,9 @@ struct GBTreeModel : public Model {
trees.clear();
param.num_trees = 0;
tree_info.clear();
iteration_indptr.clear();
iteration_indptr.push_back(0);
}
}
@@ -116,22 +112,35 @@ struct GBTreeModel : public Model {
void SaveModel(Json* p_out) const override;
void LoadModel(Json const& p_out) override;
std::vector<std::string> DumpModel(const FeatureMap& fmap, bool with_stats, int32_t n_threads,
std::string format) const {
[[nodiscard]] std::vector<std::string> DumpModel(const FeatureMap& fmap, bool with_stats,
int32_t n_threads, std::string format) const {
std::vector<std::string> dump(trees.size());
common::ParallelFor(trees.size(), n_threads,
[&](size_t i) { dump[i] = trees[i]->DumpModel(fmap, with_stats, format); });
return dump;
}
void CommitModel(std::vector<std::unique_ptr<RegTree> >&& new_trees,
int bst_group) {
for (auto & new_tree : new_trees) {
/**
* \brief Add trees to the model.
*
* \return The number of new trees.
*/
bst_tree_t CommitModel(TreesOneIter&& new_trees);
void CommitModelGroup(std::vector<std::unique_ptr<RegTree>>&& new_trees, bst_target_t group_idx) {
for (auto& new_tree : new_trees) {
trees.push_back(std::move(new_tree));
tree_info.push_back(bst_group);
tree_info.push_back(group_idx);
}
param.num_trees += static_cast<int>(new_trees.size());
}
[[nodiscard]] std::int32_t BoostedRounds() const {
if (trees.empty()) {
CHECK_EQ(iteration_indptr.size(), 1);
}
return static_cast<std::int32_t>(iteration_indptr.size() - 1);
}
// base margin
LearnerModelParam const* learner_model_param;
// model parameter
@@ -140,10 +149,19 @@ struct GBTreeModel : public Model {
std::vector<std::unique_ptr<RegTree> > trees;
/*! \brief for the update process, a place to keep the initial trees */
std::vector<std::unique_ptr<RegTree> > trees_to_update;
/*! \brief some information indicator of the tree, reserved */
/**
* \brief Group index for trees.
*/
std::vector<int> tree_info;
/**
* \brief Number of trees accumulated for each iteration.
*/
std::vector<bst_tree_t> iteration_indptr{0};
private:
/**
* \brief Whether the stack contains multi-target tree.
*/
Context const* ctx_;
};
} // namespace gbm

View File

@@ -45,7 +45,7 @@
#include "common/timer.h" // for Monitor
#include "common/version.h" // for Version
#include "dmlc/endian.h" // for ByteSwap, DMLC_IO_NO_ENDIAN_SWAP
#include "xgboost/base.h" // for Args, bst_float, GradientPair, bst_feature_t
#include "xgboost/base.h" // for Args, bst_float, GradientPair, bst_feature_t, ...
#include "xgboost/context.h" // for Context
#include "xgboost/data.h" // for DMatrix, MetaInfo
#include "xgboost/gbm.h" // for GradientBooster
@@ -1247,19 +1247,19 @@ class LearnerImpl : public LearnerIO {
return gbm_->DumpModel(fmap, with_stats, format);
}
Learner* Slice(int32_t begin_layer, int32_t end_layer, int32_t step,
Learner* Slice(bst_layer_t begin, bst_layer_t end, bst_layer_t step,
bool* out_of_bound) override {
this->Configure();
this->CheckModelInitialized();
CHECK_NE(this->learner_model_param_.num_feature, 0);
CHECK_GE(begin_layer, 0);
CHECK_GE(begin, 0);
auto* out_impl = new LearnerImpl({});
out_impl->learner_model_param_.Copy(this->learner_model_param_);
out_impl->ctx_ = this->ctx_;
auto gbm = std::unique_ptr<GradientBooster>(GradientBooster::Create(
this->tparam_.booster, &out_impl->ctx_, &out_impl->learner_model_param_));
this->gbm_->Slice(begin_layer, end_layer, step, gbm.get(), out_of_bound);
this->gbm_->Slice(begin, end, step, gbm.get(), out_of_bound);
out_impl->gbm_ = std::move(gbm);
Json config{Object()};

View File

@@ -287,7 +287,6 @@ void PredictBatchByBlockOfRowsKernel(DataView batch, gbm::GBTreeModel const &mod
linalg::TensorView<float, 2> out_predt) {
auto &thread_temp = *p_thread_temp;
CHECK_EQ(model.param.size_leaf_vector, 0) << "size_leaf_vector is enforced to 0 so far";
// parallel over local batch
const auto nsize = static_cast<bst_omp_uint>(batch.Size());
const int num_feature = model.learner_model_param->num_feature;
@@ -515,7 +514,6 @@ class ColumnSplitHelper {
void PredictBatchKernel(DataView batch, std::vector<bst_float> *out_preds) {
auto const num_group = model_.learner_model_param->num_output_group;
CHECK_EQ(model_.param.size_leaf_vector, 0) << "size_leaf_vector is enforced to 0 so far";
// parallel over local batch
auto const nsize = batch.Size();
auto const num_feature = model_.learner_model_param->num_feature;
@@ -736,8 +734,7 @@ class CPUPredictor : public Predictor {
if (ntree_limit == 0 || ntree_limit > model.trees.size()) {
ntree_limit = static_cast<unsigned>(model.trees.size());
}
out_preds->resize(model.learner_model_param->num_output_group *
(model.param.size_leaf_vector + 1));
out_preds->resize(model.learner_model_param->num_output_group);
auto base_score = model.learner_model_param->BaseScore(ctx_)(0);
// loop over output groups
for (uint32_t gid = 0; gid < model.learner_model_param->num_output_group; ++gid) {

View File

@@ -342,7 +342,6 @@ class DeviceModel {
void Init(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end, int32_t gpu_id) {
dh::safe_cuda(cudaSetDevice(gpu_id));
CHECK_EQ(model.param.size_leaf_vector, 0);
// Copy decision trees to device
tree_segments = std::move(HostDeviceVector<size_t>({}, gpu_id));
auto& h_tree_segments = tree_segments.HostVector();

View File

@@ -677,9 +677,6 @@ template <typename Partitioner>
void UpdatePredictionCacheImpl(Context const *ctx, RegTree const *p_last_tree,
std::vector<Partitioner> const &partitioner,
linalg::VectorView<float> out_preds) {
CHECK_GT(out_preds.Size(), 0U);
CHECK(p_last_tree);
auto const &tree = *p_last_tree;
CHECK_EQ(out_preds.DeviceIdx(), Context::kCpuId);
size_t n_nodes = p_last_tree->GetNodes().size();
@@ -687,7 +684,7 @@ void UpdatePredictionCacheImpl(Context const *ctx, RegTree const *p_last_tree,
CHECK_EQ(part.Size(), n_nodes);
common::BlockedSpace2d space(
part.Size(), [&](size_t node) { return part[node].Size(); }, 1024);
common::ParallelFor2d(space, ctx->Threads(), [&](size_t nidx, common::Range1d r) {
common::ParallelFor2d(space, ctx->Threads(), [&](bst_node_t nidx, common::Range1d r) {
if (!tree[nidx].IsDeleted() && tree[nidx].IsLeaf()) {
auto const &rowset = part[nidx];
auto leaf_value = tree[nidx].LeafValue();
@@ -698,5 +695,42 @@ void UpdatePredictionCacheImpl(Context const *ctx, RegTree const *p_last_tree,
});
}
}
template <typename Partitioner>
void UpdatePredictionCacheImpl(Context const *ctx, RegTree const *p_last_tree,
std::vector<Partitioner> const &partitioner,
linalg::MatrixView<float> out_preds) {
CHECK_GT(out_preds.Size(), 0U);
CHECK(p_last_tree);
auto const &tree = *p_last_tree;
if (!tree.IsMultiTarget()) {
UpdatePredictionCacheImpl(ctx, p_last_tree, partitioner, out_preds.Slice(linalg::All(), 0));
return;
}
auto const *mttree = tree.GetMultiTargetTree();
auto n_nodes = mttree->Size();
auto n_targets = tree.NumTargets();
CHECK_EQ(out_preds.Shape(1), n_targets);
CHECK_EQ(out_preds.DeviceIdx(), Context::kCpuId);
for (auto &part : partitioner) {
CHECK_EQ(part.Size(), n_nodes);
common::BlockedSpace2d space(
part.Size(), [&](size_t node) { return part[node].Size(); }, 1024);
common::ParallelFor2d(space, ctx->Threads(), [&](bst_node_t nidx, common::Range1d r) {
if (tree.IsLeaf(nidx)) {
auto const &rowset = part[nidx];
auto leaf_value = mttree->LeafValue(nidx);
for (std::size_t const *it = rowset.begin + r.begin(); it < rowset.begin + r.end(); ++it) {
for (std::size_t i = 0; i < n_targets; ++i) {
out_preds(*it, i) += leaf_value(i);
}
}
}
});
}
}
} // namespace xgboost::tree
#endif // XGBOOST_TREE_HIST_EVALUATE_SPLITS_H_

View File

@@ -116,7 +116,7 @@ class GloablApproxBuilder {
return nodes.front();
}
void UpdatePredictionCache(DMatrix const *data, linalg::VectorView<float> out_preds) const {
void UpdatePredictionCache(DMatrix const *data, linalg::MatrixView<float> out_preds) const {
monitor_->Start(__func__);
// Caching prediction seems redundant for approx tree method, as sketching takes up
// majority of training time.
@@ -303,7 +303,7 @@ class GlobalApproxUpdater : public TreeUpdater {
}
}
bool UpdatePredictionCache(const DMatrix *data, linalg::VectorView<float> out_preds) override {
bool UpdatePredictionCache(const DMatrix *data, linalg::MatrixView<float> out_preds) override {
if (data != cached_ || !pimpl_) {
return false;
}

View File

@@ -517,7 +517,7 @@ struct GPUHistMakerDevice {
});
}
bool UpdatePredictionCache(linalg::VectorView<float> out_preds_d, RegTree const* p_tree) {
bool UpdatePredictionCache(linalg::MatrixView<float> out_preds_d, RegTree const* p_tree) {
if (positions.empty()) {
return false;
}
@@ -535,11 +535,12 @@ struct GPUHistMakerDevice {
h_nodes.size() * sizeof(RegTree::Node), cudaMemcpyHostToDevice,
ctx_->CUDACtx()->Stream()));
auto d_nodes = dh::ToSpan(nodes);
CHECK_EQ(out_preds_d.Shape(1), 1);
dh::LaunchN(d_position.size(), ctx_->CUDACtx()->Stream(),
[=] XGBOOST_DEVICE(std::size_t idx) mutable {
bst_node_t nidx = d_position[idx];
auto weight = d_nodes[nidx].LeafValue();
out_preds_d(idx) += weight;
out_preds_d(idx, 0) += weight;
});
return true;
}
@@ -858,7 +859,7 @@ class GPUHistMaker : public TreeUpdater {
}
bool UpdatePredictionCache(const DMatrix* data,
linalg::VectorView<bst_float> p_out_preds) override {
linalg::MatrixView<bst_float> p_out_preds) override {
if (maker == nullptr || p_last_fmat_ == nullptr || p_last_fmat_ != data) {
return false;
}

View File

@@ -125,6 +125,7 @@ class MultiTargetHistBuilder {
std::vector<CommonRowPartitioner> partitioner_;
// Pointer to last updated tree, used for update prediction cache.
RegTree const *p_last_tree_{nullptr};
DMatrix const * p_last_fmat_{nullptr};
ObjInfo const *task_{nullptr};
@@ -147,6 +148,7 @@ class MultiTargetHistBuilder {
void InitData(DMatrix *p_fmat, RegTree const *p_tree) {
monitor_->Start(__func__);
p_last_fmat_ = p_fmat;
std::size_t page_id = 0;
bst_bin_t n_total_bins = 0;
partitioner_.clear();
@@ -312,6 +314,19 @@ class MultiTargetHistBuilder {
task_{task} {
monitor_->Init(__func__);
}
bool UpdatePredictionCache(DMatrix const *data, linalg::MatrixView<float> out_preds) const {
// p_last_fmat_ is a valid pointer as long as UpdatePredictionCache() is called in
// conjunction with Update().
if (!p_last_fmat_ || !p_last_tree_ || data != p_last_fmat_) {
return false;
}
monitor_->Start(__func__);
CHECK_EQ(out_preds.Size(), data->Info().num_row_ * p_last_tree_->NumTargets());
UpdatePredictionCacheImpl(ctx_, p_last_tree_, partitioner_, out_preds);
monitor_->Stop(__func__);
return true;
}
};
class HistBuilder {
@@ -347,7 +362,7 @@ class HistBuilder {
monitor_->Init(__func__);
}
bool UpdatePredictionCache(DMatrix const *data, linalg::VectorView<float> out_preds) const {
bool UpdatePredictionCache(DMatrix const *data, linalg::MatrixView<float> out_preds) const {
// p_last_fmat_ is a valid pointer as long as UpdatePredictionCache() is called in
// conjunction with Update().
if (!p_last_fmat_ || !p_last_tree_ || data != p_last_fmat_) {
@@ -582,12 +597,11 @@ class QuantileHistMaker : public TreeUpdater {
}
}
bool UpdatePredictionCache(const DMatrix *data, linalg::VectorView<float> out_preds) override {
bool UpdatePredictionCache(const DMatrix *data, linalg::MatrixView<float> out_preds) override {
if (p_impl_) {
return p_impl_->UpdatePredictionCache(data, out_preds);
} else if (p_mtimpl_) {
// Not yet supported.
return false;
return p_mtimpl_->UpdatePredictionCache(data, out_preds);
} else {
return false;
}