/*! * Copyright by Contributors 2017-2021 */ #include #include #include #include #include #include "xgboost/base.h" #include "xgboost/data.h" #include "xgboost/predictor.h" #include "xgboost/tree_model.h" #include "xgboost/tree_updater.h" #include "xgboost/logging.h" #include "xgboost/host_device_vector.h" #include "predict_fn.h" #include "../data/adapter.h" #include "../common/math.h" #include "../common/threading_utils.h" #include "../common/categorical.h" #include "../gbm/gbtree_model.h" namespace xgboost { namespace predictor { DMLC_REGISTRY_FILE_TAG(cpu_predictor); template bst_node_t GetLeafIndex(RegTree const &tree, const RegTree::FVec &feat, RegTree::CategoricalSplitMatrix const& cats) { bst_node_t nid = 0; while (!tree[nid].IsLeaf()) { unsigned split_index = tree[nid].SplitIndex(); auto fvalue = feat.GetFvalue(split_index); nid = GetNextNode( tree[nid], nid, fvalue, has_missing && feat.IsMissing(split_index), cats); } return nid; } bst_float PredValue(const SparsePage::Inst &inst, const std::vector> &trees, const std::vector &tree_info, int bst_group, RegTree::FVec *p_feats, unsigned tree_begin, unsigned tree_end) { bst_float psum = 0.0f; p_feats->Fill(inst); for (size_t i = tree_begin; i < tree_end; ++i) { if (tree_info[i] == bst_group) { auto const &tree = *trees[i]; bool has_categorical = tree.HasCategoricalSplit(); auto cats = tree.GetCategoriesMatrix(); bst_node_t nidx = -1; if (has_categorical) { nidx = GetLeafIndex(tree, *p_feats, cats); } else { nidx = GetLeafIndex(tree, *p_feats, cats); } psum += (*trees[i])[nidx].LeafValue(); } } p_feats->Drop(inst); return psum; } template bst_float PredValueByOneTree(const RegTree::FVec &p_feats, RegTree const &tree, RegTree::CategoricalSplitMatrix const& cats) { const bst_node_t leaf = p_feats.HasMissing() ? GetLeafIndex(tree, p_feats, cats) : GetLeafIndex(tree, p_feats, cats); return tree[leaf].LeafValue(); } void PredictByAllTrees(gbm::GBTreeModel const &model, const size_t tree_begin, const size_t tree_end, std::vector *out_preds, const size_t predict_offset, const size_t num_group, const std::vector &thread_temp, const size_t offset, const size_t block_size) { std::vector &preds = *out_preds; for (size_t tree_id = tree_begin; tree_id < tree_end; ++tree_id) { const size_t gid = model.tree_info[tree_id]; auto const &tree = *model.trees[tree_id]; auto const& cats = tree.GetCategoriesMatrix(); auto has_categorical = tree.HasCategoricalSplit(); if (has_categorical) { for (size_t i = 0; i < block_size; ++i) { preds[(predict_offset + i) * num_group + gid] += PredValueByOneTree(thread_temp[offset + i], tree, cats); } } else { for (size_t i = 0; i < block_size; ++i) { preds[(predict_offset + i) * num_group + gid] += PredValueByOneTree(thread_temp[offset + i], tree, cats); } } } } template void FVecFill(const size_t block_size, const size_t batch_offset, const int num_feature, DataView* batch, const size_t fvec_offset, std::vector* p_feats) { for (size_t i = 0; i < block_size; ++i) { RegTree::FVec &feats = (*p_feats)[fvec_offset + i]; if (feats.Size() == 0) { feats.Init(num_feature); } const SparsePage::Inst inst = (*batch)[batch_offset + i]; feats.Fill(inst); } } template void FVecDrop(const size_t block_size, const size_t batch_offset, DataView* batch, const size_t fvec_offset, std::vector* p_feats) { for (size_t i = 0; i < block_size; ++i) { RegTree::FVec &feats = (*p_feats)[fvec_offset + i]; const SparsePage::Inst inst = (*batch)[batch_offset + i]; feats.Drop(inst); } } template struct SparsePageView { bst_row_t base_rowid; HostSparsePageView view; static size_t constexpr kUnroll = kUnrollLen; explicit SparsePageView(SparsePage const *p) : base_rowid{p->base_rowid} { view = p->GetView(); } SparsePage::Inst operator[](size_t i) { return view[i]; } size_t Size() const { return view.Size(); } }; template class AdapterView { Adapter* adapter_; float missing_; common::Span workspace_; std::vector current_unroll_; public: static size_t constexpr kUnroll = kUnrollLen; public: explicit AdapterView(Adapter *adapter, float missing, common::Span workplace, int32_t n_threads) : adapter_{adapter}, missing_{missing}, workspace_{workplace}, current_unroll_(n_threads > 0 ? n_threads : 1, 0) {} SparsePage::Inst operator[](size_t i) { bst_feature_t columns = adapter_->NumColumns(); auto const &batch = adapter_->Value(); auto row = batch.GetLine(i); auto t = omp_get_thread_num(); auto const beg = (columns * kUnroll * t) + (current_unroll_[t] * columns); size_t non_missing {beg}; for (size_t c = 0; c < row.Size(); ++c) { auto e = row.GetElement(c); if (missing_ != e.value && !common::CheckNAN(e.value)) { workspace_[non_missing] = Entry{static_cast(e.column_idx), e.value}; ++non_missing; } } auto ret = workspace_.subspan(beg, non_missing - beg); current_unroll_[t]++; if (current_unroll_[t] == kUnroll) { current_unroll_[t] = 0; } return ret; } size_t Size() const { return adapter_->NumRows(); } bst_row_t const static base_rowid = 0; // NOLINT }; template void PredictBatchByBlockOfRowsKernel( DataView batch, std::vector *out_preds, gbm::GBTreeModel const &model, int32_t tree_begin, int32_t tree_end, std::vector *p_thread_temp, int32_t n_threads) { auto &thread_temp = *p_thread_temp; int32_t const num_group = model.learner_model_param->num_output_group; CHECK_EQ(model.param.size_leaf_vector, 0) << "size_leaf_vector is enforced to 0 so far"; // parallel over local batch const auto nsize = static_cast(batch.Size()); const int num_feature = model.learner_model_param->num_feature; omp_ulong n_blocks = common::DivRoundUp(nsize, block_of_rows_size); common::ParallelFor(n_blocks, n_threads, [&](bst_omp_uint block_id) { const size_t batch_offset = block_id * block_of_rows_size; const size_t block_size = std::min(nsize - batch_offset, block_of_rows_size); const size_t fvec_offset = omp_get_thread_num() * block_of_rows_size; FVecFill(block_size, batch_offset, num_feature, &batch, fvec_offset, p_thread_temp); // process block of rows through all trees to keep cache locality PredictByAllTrees(model, tree_begin, tree_end, out_preds, batch_offset + batch.base_rowid, num_group, thread_temp, fvec_offset, block_size); FVecDrop(block_size, batch_offset, &batch, fvec_offset, p_thread_temp); }); } float FillNodeMeanValues(RegTree const *tree, bst_node_t nidx, std::vector *mean_values) { bst_float result; auto &node = (*tree)[nidx]; auto &node_mean_values = *mean_values; if (node.IsLeaf()) { result = node.LeafValue(); } else { result = FillNodeMeanValues(tree, node.LeftChild(), mean_values) * tree->Stat(node.LeftChild()).sum_hess; result += FillNodeMeanValues(tree, node.RightChild(), mean_values) * tree->Stat(node.RightChild()).sum_hess; result /= tree->Stat(nidx).sum_hess; } node_mean_values[nidx] = result; return result; } void FillNodeMeanValues(RegTree const* tree, std::vector* mean_values) { size_t num_nodes = tree->param.num_nodes; if (mean_values->size() == num_nodes) { return; } mean_values->resize(num_nodes); FillNodeMeanValues(tree, 0, mean_values); } class CPUPredictor : public Predictor { protected: // init thread buffers static void InitThreadTemp(int nthread, int num_feature, std::vector* out) { int prev_thread_temp_size = out->size(); if (prev_thread_temp_size < nthread) { out->resize(nthread, RegTree::FVec()); } } void PredictDMatrix(DMatrix *p_fmat, std::vector *out_preds, gbm::GBTreeModel const &model, int32_t tree_begin, int32_t tree_end) const { auto const n_threads = this->ctx_->Threads(); constexpr double kDensityThresh = .5; size_t total = std::max(p_fmat->Info().num_row_ * p_fmat->Info().num_col_, static_cast(1)); double density = static_cast(p_fmat->Info().num_nonzero_) / static_cast(total); bool blocked = density > kDensityThresh; std::vector feat_vecs; InitThreadTemp(n_threads * (blocked ? kBlockOfRowsSize : 1), model.learner_model_param->num_feature, &feat_vecs); for (auto const &batch : p_fmat->GetBatches()) { CHECK_EQ(out_preds->size(), p_fmat->Info().num_row_ * model.learner_model_param->num_output_group); size_t constexpr kUnroll = 8; if (blocked) { PredictBatchByBlockOfRowsKernel, kBlockOfRowsSize>( SparsePageView{&batch}, out_preds, model, tree_begin, tree_end, &feat_vecs, n_threads); } else { PredictBatchByBlockOfRowsKernel, 1>( SparsePageView{&batch}, out_preds, model, tree_begin, tree_end, &feat_vecs, n_threads); } } } public: explicit CPUPredictor(GenericParameter const* generic_param) : Predictor::Predictor{generic_param} {} void PredictBatch(DMatrix *dmat, PredictionCacheEntry *predts, const gbm::GBTreeModel &model, uint32_t tree_begin, uint32_t tree_end = 0) const override { auto* out_preds = &predts->predictions; // This is actually already handled in gbm, but large amount of tests rely on the // behaviour. if (tree_end == 0) { tree_end = model.trees.size(); } this->PredictDMatrix(dmat, &out_preds->HostVector(), model, tree_begin, tree_end); } template void DispatchedInplacePredict(dmlc::any const &x, std::shared_ptr p_m, const gbm::GBTreeModel &model, float missing, PredictionCacheEntry *out_preds, uint32_t tree_begin, uint32_t tree_end) const { auto const n_threads = this->ctx_->Threads(); auto m = dmlc::get>(x); CHECK_EQ(m->NumColumns(), model.learner_model_param->num_feature) << "Number of columns in data must equal to trained model."; if (p_m) { p_m->Info().num_row_ = m->NumRows(); this->InitOutPredictions(p_m->Info(), &(out_preds->predictions), model); } else { MetaInfo info; info.num_row_ = m->NumRows(); this->InitOutPredictions(info, &(out_preds->predictions), model); } std::vector workspace(m->NumColumns() * 8 * n_threads); auto &predictions = out_preds->predictions.HostVector(); std::vector thread_temp; InitThreadTemp(n_threads * kBlockSize, model.learner_model_param->num_feature, &thread_temp); PredictBatchByBlockOfRowsKernel, kBlockSize>( AdapterView(m.get(), missing, common::Span{workspace}, n_threads), &predictions, model, tree_begin, tree_end, &thread_temp, n_threads); } bool InplacePredict(dmlc::any const &x, std::shared_ptr p_m, const gbm::GBTreeModel &model, float missing, PredictionCacheEntry *out_preds, uint32_t tree_begin, unsigned tree_end) const override { if (x.type() == typeid(std::shared_ptr)) { this->DispatchedInplacePredict( x, p_m, model, missing, out_preds, tree_begin, tree_end); } else if (x.type() == typeid(std::shared_ptr)) { this->DispatchedInplacePredict( x, p_m, model, missing, out_preds, tree_begin, tree_end); } else if (x.type() == typeid(std::shared_ptr)) { this->DispatchedInplacePredict ( x, p_m, model, missing, out_preds, tree_begin, tree_end); } else if (x.type() == typeid(std::shared_ptr)) { this->DispatchedInplacePredict ( x, p_m, model, missing, out_preds, tree_begin, tree_end); } else { return false; } return true; } void PredictInstance(const SparsePage::Inst& inst, std::vector* out_preds, const gbm::GBTreeModel& model, unsigned ntree_limit) const override { std::vector feat_vecs; feat_vecs.resize(1, RegTree::FVec()); feat_vecs[0].Init(model.learner_model_param->num_feature); ntree_limit *= model.learner_model_param->num_output_group; if (ntree_limit == 0 || ntree_limit > model.trees.size()) { ntree_limit = static_cast(model.trees.size()); } out_preds->resize(model.learner_model_param->num_output_group * (model.param.size_leaf_vector + 1)); // loop over output groups for (uint32_t gid = 0; gid < model.learner_model_param->num_output_group; ++gid) { (*out_preds)[gid] = PredValue(inst, model.trees, model.tree_info, gid, &feat_vecs[0], 0, ntree_limit) + model.learner_model_param->base_score; } } void PredictLeaf(DMatrix* p_fmat, HostDeviceVector* out_preds, const gbm::GBTreeModel& model, unsigned ntree_limit) const override { auto const n_threads = this->ctx_->Threads(); std::vector feat_vecs; const int num_feature = model.learner_model_param->num_feature; InitThreadTemp(n_threads, num_feature, &feat_vecs); const MetaInfo& info = p_fmat->Info(); // number of valid trees if (ntree_limit == 0 || ntree_limit > model.trees.size()) { ntree_limit = static_cast(model.trees.size()); } std::vector& preds = out_preds->HostVector(); preds.resize(info.num_row_ * ntree_limit); // start collecting the prediction for (const auto &batch : p_fmat->GetBatches()) { // parallel over local batch auto page = batch.GetView(); const auto nsize = static_cast(batch.Size()); common::ParallelFor(nsize, n_threads, [&](bst_omp_uint i) { const int tid = omp_get_thread_num(); auto ridx = static_cast(batch.base_rowid + i); RegTree::FVec &feats = feat_vecs[tid]; if (feats.Size() == 0) { feats.Init(num_feature); } feats.Fill(page[i]); for (unsigned j = 0; j < ntree_limit; ++j) { auto const& tree = *model.trees[j]; auto const& cats = tree.GetCategoriesMatrix(); bst_node_t tid = GetLeafIndex(tree, feats, cats); preds[ridx * ntree_limit + j] = static_cast(tid); } feats.Drop(page[i]); }); } } void PredictContribution(DMatrix *p_fmat, HostDeviceVector *out_contribs, const gbm::GBTreeModel &model, uint32_t ntree_limit, std::vector const *tree_weights, bool approximate, int condition, unsigned condition_feature) const override { auto const n_threads = this->ctx_->Threads(); const int num_feature = model.learner_model_param->num_feature; std::vector feat_vecs; InitThreadTemp(n_threads, num_feature, &feat_vecs); const MetaInfo& info = p_fmat->Info(); // number of valid trees if (ntree_limit == 0 || ntree_limit > model.trees.size()) { ntree_limit = static_cast(model.trees.size()); } const int ngroup = model.learner_model_param->num_output_group; CHECK_NE(ngroup, 0); size_t const ncolumns = num_feature + 1; CHECK_NE(ncolumns, 0); // allocate space for (number of features + bias) times the number of rows std::vector& contribs = out_contribs->HostVector(); contribs.resize(info.num_row_ * ncolumns * model.learner_model_param->num_output_group); // make sure contributions is zeroed, we could be reusing a previously // allocated one std::fill(contribs.begin(), contribs.end(), 0); // initialize tree node mean values std::vector> mean_values(ntree_limit); common::ParallelFor(ntree_limit, n_threads, [&](bst_omp_uint i) { FillNodeMeanValues(model.trees[i].get(), &(mean_values[i])); }); auto base_margin = info.base_margin_.View(GenericParameter::kCpuId); // start collecting the contributions for (const auto &batch : p_fmat->GetBatches()) { auto page = batch.GetView(); // parallel over local batch const auto nsize = static_cast(batch.Size()); common::ParallelFor(nsize, n_threads, [&](bst_omp_uint i) { auto row_idx = static_cast(batch.base_rowid + i); RegTree::FVec &feats = feat_vecs[omp_get_thread_num()]; if (feats.Size() == 0) { feats.Init(num_feature); } std::vector this_tree_contribs(ncolumns); // loop over all classes for (int gid = 0; gid < ngroup; ++gid) { bst_float* p_contribs = &contribs[(row_idx * ngroup + gid) * ncolumns]; feats.Fill(page[i]); // calculate contributions for (unsigned j = 0; j < ntree_limit; ++j) { auto *tree_mean_values = &mean_values.at(j); std::fill(this_tree_contribs.begin(), this_tree_contribs.end(), 0); if (model.tree_info[j] != gid) { continue; } if (!approximate) { model.trees[j]->CalculateContributions( feats, tree_mean_values, &this_tree_contribs[0], condition, condition_feature); } else { model.trees[j]->CalculateContributionsApprox( feats, tree_mean_values, &this_tree_contribs[0]); } for (size_t ci = 0; ci < ncolumns; ++ci) { p_contribs[ci] += this_tree_contribs[ci] * (tree_weights == nullptr ? 1 : (*tree_weights)[j]); } } feats.Drop(page[i]); // add base margin to BIAS if (base_margin.Size() != 0) { CHECK_EQ(base_margin.Shape(1), ngroup); p_contribs[ncolumns - 1] += base_margin(row_idx, gid); } else { p_contribs[ncolumns - 1] += model.learner_model_param->base_score; } } }); } } void PredictInteractionContributions( DMatrix *p_fmat, HostDeviceVector *out_contribs, const gbm::GBTreeModel &model, unsigned ntree_limit, std::vector const *tree_weights, bool approximate) const override { const MetaInfo& info = p_fmat->Info(); const int ngroup = model.learner_model_param->num_output_group; size_t const ncolumns = model.learner_model_param->num_feature; const unsigned row_chunk = ngroup * (ncolumns + 1) * (ncolumns + 1); const unsigned mrow_chunk = (ncolumns + 1) * (ncolumns + 1); const unsigned crow_chunk = ngroup * (ncolumns + 1); // allocate space for (number of features^2) times the number of rows and tmp off/on contribs std::vector& contribs = out_contribs->HostVector(); contribs.resize(info.num_row_ * ngroup * (ncolumns + 1) * (ncolumns + 1)); HostDeviceVector contribs_off_hdv(info.num_row_ * ngroup * (ncolumns + 1)); auto &contribs_off = contribs_off_hdv.HostVector(); HostDeviceVector contribs_on_hdv(info.num_row_ * ngroup * (ncolumns + 1)); auto &contribs_on = contribs_on_hdv.HostVector(); HostDeviceVector contribs_diag_hdv(info.num_row_ * ngroup * (ncolumns + 1)); auto &contribs_diag = contribs_diag_hdv.HostVector(); // Compute the difference in effects when conditioning on each of the features on and off // see: Axiomatic characterizations of probabilistic and // cardinal-probabilistic interaction indices PredictContribution(p_fmat, &contribs_diag_hdv, model, ntree_limit, tree_weights, approximate, 0, 0); for (size_t i = 0; i < ncolumns + 1; ++i) { PredictContribution(p_fmat, &contribs_off_hdv, model, ntree_limit, tree_weights, approximate, -1, i); PredictContribution(p_fmat, &contribs_on_hdv, model, ntree_limit, tree_weights, approximate, 1, i); for (size_t j = 0; j < info.num_row_; ++j) { for (int l = 0; l < ngroup; ++l) { const unsigned o_offset = j * row_chunk + l * mrow_chunk + i * (ncolumns + 1); const unsigned c_offset = j * crow_chunk + l * (ncolumns + 1); contribs[o_offset + i] = 0; for (size_t k = 0; k < ncolumns + 1; ++k) { // fill in the diagonal with additive effects, and off-diagonal with the interactions if (k == i) { contribs[o_offset + i] += contribs_diag[c_offset + k]; } else { contribs[o_offset + k] = (contribs_on[c_offset + k] - contribs_off[c_offset + k])/2.0; contribs[o_offset + i] -= contribs[o_offset + k]; } } } } } } private: static size_t constexpr kBlockOfRowsSize = 64; }; XGBOOST_REGISTER_PREDICTOR(CPUPredictor, "cpu_predictor") .describe("Make predictions using CPU.") .set_body([](GenericParameter const* generic_param) { return new CPUPredictor(generic_param); }); } // namespace predictor } // namespace xgboost