Pass pointer to model parameters. (#5101)
* Pass pointer to model parameters. This PR de-duplicates most of the model parameters except the one in `tree_model.h`. One difficulty is `base_score` is a model property but can be changed at runtime by objective function. Hence when performing model IO, we need to save the one provided by users, instead of the one transformed by objective. Here we created an immutable version of `LearnerModelParam` that represents the value of model parameter after configuration.
This commit is contained in:
@@ -49,9 +49,9 @@ class CPUPredictor : public Predictor {
|
||||
void PredLoopInternal(DMatrix* p_fmat, std::vector<bst_float>* out_preds,
|
||||
gbm::GBTreeModel const& model, int32_t tree_begin,
|
||||
int32_t tree_end) {
|
||||
int32_t const num_group = model.param.num_output_group;
|
||||
int32_t const num_group = model.learner_model_param_->num_output_group;
|
||||
const int nthread = omp_get_max_threads();
|
||||
InitThreadTemp(nthread, model.param.num_feature);
|
||||
InitThreadTemp(nthread, model.learner_model_param_->num_feature);
|
||||
std::vector<bst_float>& preds = *out_preds;
|
||||
CHECK_EQ(model.param.size_leaf_vector, 0)
|
||||
<< "size_leaf_vector is enforced to 0 so far";
|
||||
@@ -108,7 +108,7 @@ class CPUPredictor : public Predictor {
|
||||
unsigned ntree_limit) const {
|
||||
CHECK(cache_);
|
||||
if (ntree_limit == 0 ||
|
||||
ntree_limit * model.param.num_output_group >= model.trees.size()) {
|
||||
ntree_limit * model.learner_model_param_->num_output_group >= model.trees.size()) {
|
||||
auto it = cache_->find(dmat);
|
||||
if (it != cache_->end()) {
|
||||
const HostDeviceVector<bst_float>& y = it->second.predictions;
|
||||
@@ -126,8 +126,8 @@ class CPUPredictor : public Predictor {
|
||||
void InitOutPredictions(const MetaInfo& info,
|
||||
HostDeviceVector<bst_float>* out_preds,
|
||||
const gbm::GBTreeModel& model) const {
|
||||
CHECK_NE(model.param.num_output_group, 0);
|
||||
size_t n = model.param.num_output_group * info.num_row_;
|
||||
CHECK_NE(model.learner_model_param_->num_output_group, 0);
|
||||
size_t n = model.learner_model_param_->num_output_group * info.num_row_;
|
||||
const auto& base_margin = info.base_margin_.HostVector();
|
||||
out_preds->Resize(n);
|
||||
std::vector<bst_float>& out_preds_h = out_preds->HostVector();
|
||||
@@ -139,19 +139,19 @@ class CPUPredictor : public Predictor {
|
||||
std::ostringstream oss;
|
||||
oss << "Ignoring the base margin, since it has incorrect length. "
|
||||
<< "The base margin must be an array of length ";
|
||||
if (model.param.num_output_group > 1) {
|
||||
if (model.learner_model_param_->num_output_group > 1) {
|
||||
oss << "[num_class] * [number of data points], i.e. "
|
||||
<< model.param.num_output_group << " * " << info.num_row_
|
||||
<< model.learner_model_param_->num_output_group << " * " << info.num_row_
|
||||
<< " = " << n << ". ";
|
||||
} else {
|
||||
oss << "[number of data points], i.e. " << info.num_row_ << ". ";
|
||||
}
|
||||
oss << "Instead, all data points will use "
|
||||
<< "base_margin = " << model.base_margin;
|
||||
<< "base_score = " << model.learner_model_param_->base_score;
|
||||
LOG(WARNING) << oss.str();
|
||||
}
|
||||
std::fill(out_preds_h.begin(), out_preds_h.end(),
|
||||
model.base_margin);
|
||||
model.learner_model_param_->base_score);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -167,7 +167,7 @@ class CPUPredictor : public Predictor {
|
||||
}
|
||||
this->InitOutPredictions(dmat->Info(), out_preds, model);
|
||||
|
||||
ntree_limit *= model.param.num_output_group;
|
||||
ntree_limit *= model.learner_model_param_->num_output_group;
|
||||
if (ntree_limit == 0 || ntree_limit > model.trees.size()) {
|
||||
ntree_limit = static_cast<unsigned>(model.trees.size());
|
||||
}
|
||||
@@ -198,7 +198,7 @@ class CPUPredictor : public Predictor {
|
||||
InitOutPredictions(e.data->Info(), &(e.predictions), model);
|
||||
PredLoopInternal(e.data.get(), &(e.predictions.HostVector()), model, 0,
|
||||
model.trees.size());
|
||||
} else if (model.param.num_output_group == 1 && updaters->size() > 0 &&
|
||||
} else if (model.learner_model_param_->num_output_group == 1 && updaters->size() > 0 &&
|
||||
num_new_trees == 1 &&
|
||||
updaters->back()->UpdatePredictionCache(e.data.get(),
|
||||
&(e.predictions))) {
|
||||
@@ -215,29 +215,29 @@ class CPUPredictor : public Predictor {
|
||||
const gbm::GBTreeModel& model, unsigned ntree_limit) override {
|
||||
if (thread_temp.size() == 0) {
|
||||
thread_temp.resize(1, RegTree::FVec());
|
||||
thread_temp[0].Init(model.param.num_feature);
|
||||
thread_temp[0].Init(model.learner_model_param_->num_feature);
|
||||
}
|
||||
ntree_limit *= model.param.num_output_group;
|
||||
ntree_limit *= model.learner_model_param_->num_output_group;
|
||||
if (ntree_limit == 0 || ntree_limit > model.trees.size()) {
|
||||
ntree_limit = static_cast<unsigned>(model.trees.size());
|
||||
}
|
||||
out_preds->resize(model.param.num_output_group *
|
||||
out_preds->resize(model.learner_model_param_->num_output_group *
|
||||
(model.param.size_leaf_vector + 1));
|
||||
// loop over output groups
|
||||
for (uint32_t gid = 0; gid < model.param.num_output_group; ++gid) {
|
||||
for (uint32_t gid = 0; gid < model.learner_model_param_->num_output_group; ++gid) {
|
||||
(*out_preds)[gid] =
|
||||
PredValue(inst, model.trees, model.tree_info, gid,
|
||||
&thread_temp[0], 0, ntree_limit) +
|
||||
model.base_margin;
|
||||
model.learner_model_param_->base_score;
|
||||
}
|
||||
}
|
||||
void PredictLeaf(DMatrix* p_fmat, std::vector<bst_float>* out_preds,
|
||||
const gbm::GBTreeModel& model, unsigned ntree_limit) override {
|
||||
const int nthread = omp_get_max_threads();
|
||||
InitThreadTemp(nthread, model.param.num_feature);
|
||||
InitThreadTemp(nthread, model.learner_model_param_->num_feature);
|
||||
const MetaInfo& info = p_fmat->Info();
|
||||
// number of valid trees
|
||||
ntree_limit *= model.param.num_output_group;
|
||||
ntree_limit *= model.learner_model_param_->num_output_group;
|
||||
if (ntree_limit == 0 || ntree_limit > model.trees.size()) {
|
||||
ntree_limit = static_cast<unsigned>(model.trees.size());
|
||||
}
|
||||
@@ -268,20 +268,20 @@ class CPUPredictor : public Predictor {
|
||||
bool approximate, int condition,
|
||||
unsigned condition_feature) override {
|
||||
const int nthread = omp_get_max_threads();
|
||||
InitThreadTemp(nthread, model.param.num_feature);
|
||||
InitThreadTemp(nthread, model.learner_model_param_->num_feature);
|
||||
const MetaInfo& info = p_fmat->Info();
|
||||
// number of valid trees
|
||||
ntree_limit *= model.param.num_output_group;
|
||||
ntree_limit *= model.learner_model_param_->num_output_group;
|
||||
if (ntree_limit == 0 || ntree_limit > model.trees.size()) {
|
||||
ntree_limit = static_cast<unsigned>(model.trees.size());
|
||||
}
|
||||
const int ngroup = model.param.num_output_group;
|
||||
const int ngroup = model.learner_model_param_->num_output_group;
|
||||
CHECK_NE(ngroup, 0);
|
||||
size_t const ncolumns = model.param.num_feature + 1;
|
||||
size_t const ncolumns = model.learner_model_param_->num_feature + 1;
|
||||
CHECK_NE(ncolumns, 0);
|
||||
// allocate space for (number of features + bias) times the number of rows
|
||||
std::vector<bst_float>& contribs = *out_contribs;
|
||||
contribs.resize(info.num_row_ * ncolumns * model.param.num_output_group);
|
||||
contribs.resize(info.num_row_ * ncolumns * model.learner_model_param_->num_output_group);
|
||||
// make sure contributions is zeroed, we could be reusing a previously
|
||||
// allocated one
|
||||
std::fill(contribs.begin(), contribs.end(), 0);
|
||||
@@ -298,8 +298,8 @@ class CPUPredictor : public Predictor {
|
||||
#pragma omp parallel for schedule(static)
|
||||
for (bst_omp_uint i = 0; i < nsize; ++i) {
|
||||
auto row_idx = static_cast<size_t>(batch.base_rowid + i);
|
||||
std::vector<bst_float> this_tree_contribs(ncolumns);
|
||||
RegTree::FVec& feats = thread_temp[omp_get_thread_num()];
|
||||
std::vector<bst_float> this_tree_contribs(ncolumns);
|
||||
// loop over all classes
|
||||
for (int gid = 0; gid < ngroup; ++gid) {
|
||||
bst_float* p_contribs = &contribs[(row_idx * ngroup + gid) * ncolumns];
|
||||
@@ -326,7 +326,7 @@ class CPUPredictor : public Predictor {
|
||||
if (base_margin.size() != 0) {
|
||||
p_contribs[ncolumns - 1] += base_margin[row_idx * ngroup + gid];
|
||||
} else {
|
||||
p_contribs[ncolumns - 1] += model.base_margin;
|
||||
p_contribs[ncolumns - 1] += model.learner_model_param_->base_score;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -338,8 +338,8 @@ class CPUPredictor : public Predictor {
|
||||
std::vector<bst_float>* tree_weights,
|
||||
bool approximate) override {
|
||||
const MetaInfo& info = p_fmat->Info();
|
||||
const int ngroup = model.param.num_output_group;
|
||||
size_t const ncolumns = model.param.num_feature;
|
||||
const int ngroup = model.learner_model_param_->num_output_group;
|
||||
size_t const ncolumns = model.learner_model_param_->num_feature;
|
||||
const unsigned row_chunk = ngroup * (ncolumns + 1) * (ncolumns + 1);
|
||||
const unsigned mrow_chunk = (ncolumns + 1) * (ncolumns + 1);
|
||||
const unsigned crow_chunk = ngroup * (ncolumns + 1);
|
||||
|
||||
Reference in New Issue
Block a user