Upgrade clang-tidy on CI. (#5469)

* Correct all clang-tidy errors.
* Upgrade clang-tidy to 10 on CI.

Co-authored-by: Hyunsu Cho <chohyu01@cs.washington.edu>
This commit is contained in:
Jiaming Yuan
2020-04-05 04:42:29 +08:00
committed by GitHub
parent 30e94ddd04
commit 0012f2ef93
107 changed files with 932 additions and 903 deletions

View File

@@ -53,8 +53,8 @@ class GBLinear : public GradientBooster {
public:
explicit GBLinear(LearnerModelParam const* learner_model_param)
: learner_model_param_{learner_model_param},
model_{learner_model_param_},
previous_model_{learner_model_param_},
model_{learner_model_param},
previous_model_{learner_model_param},
sum_instance_weight_(0),
sum_weight_complete_(false),
is_converged_(false) {}
@@ -95,14 +95,14 @@ class GBLinear : public GradientBooster {
void LoadConfig(Json const& in) override {
CHECK_EQ(get<String>(in["name"]), "gblinear");
fromJson(in["gblinear_train_param"], &param_);
FromJson(in["gblinear_train_param"], &param_);
updater_.reset(LinearUpdater::Create(param_.updater, generic_param_));
this->updater_->LoadConfig(in["updater"]);
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String{"gblinear"};
out["gblinear_train_param"] = toJson(param_);
out["gblinear_train_param"] = ToJson(param_);
out["updater"] = Object();
auto& j_updater = out["updater"];
@@ -140,7 +140,7 @@ class GBLinear : public GradientBooster {
void PredictInstance(const SparsePage::Inst &inst,
std::vector<bst_float> *out_preds,
unsigned ntree_limit) override {
const int ngroup = model_.learner_model_param_->num_output_group;
const int ngroup = model_.learner_model_param->num_output_group;
for (int gid = 0; gid < ngroup; ++gid) {
this->Pred(inst, dmlc::BeginPtr(*out_preds), gid,
learner_model_param_->base_score);
@@ -161,8 +161,8 @@ class GBLinear : public GradientBooster {
CHECK_EQ(ntree_limit, 0U)
<< "GBLinear::PredictContribution: ntrees is only valid for gbtree predictor";
const auto& base_margin = p_fmat->Info().base_margin_.ConstHostVector();
const int ngroup = model_.learner_model_param_->num_output_group;
const size_t ncolumns = model_.learner_model_param_->num_feature + 1;
const int ngroup = model_.learner_model_param->num_output_group;
const size_t ncolumns = model_.learner_model_param->num_feature + 1;
// allocate space for (#features + bias) times #groups times #rows
std::vector<bst_float>& contribs = *out_contribs;
contribs.resize(p_fmat->Info().num_row_ * ncolumns * ngroup);
@@ -181,11 +181,11 @@ class GBLinear : public GradientBooster {
bst_float *p_contribs = &contribs[(row_idx * ngroup + gid) * ncolumns];
// calculate linear terms' contributions
for (auto& ins : inst) {
if (ins.index >= model_.learner_model_param_->num_feature) continue;
if (ins.index >= model_.learner_model_param->num_feature) continue;
p_contribs[ins.index] = ins.fvalue * model_[ins.index][gid];
}
// add base margin to BIAS
p_contribs[ncolumns - 1] = model_.bias()[gid] +
p_contribs[ncolumns - 1] = model_.Bias()[gid] +
((base_margin.size() != 0) ? base_margin[row_idx * ngroup + gid] :
learner_model_param_->base_score);
}
@@ -199,10 +199,10 @@ class GBLinear : public GradientBooster {
std::vector<bst_float>& contribs = *out_contribs;
// linear models have no interaction effects
const size_t nelements = model_.learner_model_param_->num_feature *
model_.learner_model_param_->num_feature;
const size_t nelements = model_.learner_model_param->num_feature *
model_.learner_model_param->num_feature;
contribs.resize(p_fmat->Info().num_row_ * nelements *
model_.learner_model_param_->num_output_group);
model_.learner_model_param->num_output_group);
std::fill(contribs.begin(), contribs.end(), 0);
}
@@ -228,7 +228,7 @@ class GBLinear : public GradientBooster {
std::vector<bst_float> &preds = *out_preds;
const auto& base_margin = p_fmat->Info().base_margin_.ConstHostVector();
// start collecting the prediction
const int ngroup = model_.learner_model_param_->num_output_group;
const int ngroup = model_.learner_model_param->num_output_group;
preds.resize(p_fmat->Info().num_row_ * ngroup);
for (const auto &batch : p_fmat->GetBatches<SparsePage>()) {
// output convention: nrow * k, where nrow is number of rows
@@ -283,9 +283,9 @@ class GBLinear : public GradientBooster {
void Pred(const SparsePage::Inst &inst, bst_float *preds, int gid,
bst_float base) {
bst_float psum = model_.bias()[gid] + base;
bst_float psum = model_.Bias()[gid] + base;
for (const auto& ins : inst) {
if (ins.index >= model_.learner_model_param_->num_feature) continue;
if (ins.index >= model_.learner_model_param->num_feature) continue;
psum += ins.fvalue * model_[ins.index][gid];
}
preds[gid] = psum;

View File

@@ -41,14 +41,14 @@ struct DeprecatedGBLinearModelParam : public dmlc::Parameter<DeprecatedGBLinearM
class GBLinearModel : public Model {
private:
// Deprecated in 1.0.0
DeprecatedGBLinearModelParam param;
DeprecatedGBLinearModelParam param_;
public:
LearnerModelParam const* learner_model_param_;
LearnerModelParam const* learner_model_param;
public:
explicit GBLinearModel(LearnerModelParam const* learner_model_param) :
learner_model_param_ {learner_model_param} {}
learner_model_param {learner_model_param} {}
void Configure(Args const &cfg) { }
// weight for each of feature, bias is the last one
@@ -59,8 +59,8 @@ class GBLinearModel : public Model {
return;
}
// bias is the last weight
weight.resize((learner_model_param_->num_feature + 1) *
learner_model_param_->num_output_group);
weight.resize((learner_model_param->num_feature + 1) *
learner_model_param->num_output_group);
std::fill(weight.begin(), weight.end(), 0.0f);
}
@@ -69,52 +69,54 @@ class GBLinearModel : public Model {
// save the model to file
void Save(dmlc::Stream *fo) const {
fo->Write(&param, sizeof(param));
fo->Write(&param_, sizeof(param_));
fo->Write(weight);
}
// load model from file
void Load(dmlc::Stream *fi) {
CHECK_EQ(fi->Read(&param, sizeof(param)), sizeof(param));
CHECK_EQ(fi->Read(&param_, sizeof(param_)), sizeof(param_));
fi->Read(&weight);
}
// model bias
inline bst_float *bias() {
return &weight[learner_model_param_->num_feature *
learner_model_param_->num_output_group];
inline bst_float *Bias() {
return &weight[learner_model_param->num_feature *
learner_model_param->num_output_group];
}
inline const bst_float *bias() const {
return &weight[learner_model_param_->num_feature *
learner_model_param_->num_output_group];
inline const bst_float *Bias() const {
return &weight[learner_model_param->num_feature *
learner_model_param->num_output_group];
}
// get i-th weight
inline bst_float *operator[](size_t i) {
return &weight[i * learner_model_param_->num_output_group];
return &weight[i * learner_model_param->num_output_group];
}
inline const bst_float *operator[](size_t i) const {
return &weight[i * learner_model_param_->num_output_group];
return &weight[i * learner_model_param->num_output_group];
}
std::vector<std::string> DumpModel(const FeatureMap &fmap, bool with_stats,
std::string format) const {
const int ngroup = learner_model_param_->num_output_group;
const unsigned nfeature = learner_model_param_->num_feature;
const int ngroup = learner_model_param->num_output_group;
const unsigned nfeature = learner_model_param->num_feature;
std::stringstream fo("");
if (format == "json") {
fo << " { \"bias\": [" << std::endl;
for (int gid = 0; gid < ngroup; ++gid) {
if (gid != 0)
if (gid != 0) {
fo << "," << std::endl;
fo << " " << this->bias()[gid];
}
fo << " " << this->Bias()[gid];
}
fo << std::endl
<< " ]," << std::endl
<< " \"weight\": [" << std::endl;
for (unsigned i = 0; i < nfeature; ++i) {
for (int gid = 0; gid < ngroup; ++gid) {
if (i != 0 || gid != 0)
if (i != 0 || gid != 0) {
fo << "," << std::endl;
}
fo << " " << (*this)[i][gid];
}
}
@@ -122,7 +124,7 @@ class GBLinearModel : public Model {
} else {
fo << "bias:\n";
for (int gid = 0; gid < ngroup; ++gid) {
fo << this->bias()[gid] << std::endl;
fo << this->Bias()[gid] << std::endl;
}
fo << "weight:\n";
for (unsigned i = 0; i < nfeature; ++i) {

View File

@@ -186,7 +186,7 @@ void GBTree::DoBoost(DMatrix* p_fmat,
HostDeviceVector<GradientPair>* in_gpair,
PredictionCacheEntry* predt) {
std::vector<std::vector<std::unique_ptr<RegTree> > > new_trees;
const int ngroup = model_.learner_model_param_->num_output_group;
const int ngroup = model_.learner_model_param->num_output_group;
ConfigureWithKnownData(this->cfg_, p_fmat);
monitor_.Start("BoostNewTrees");
CHECK_NE(ngroup, 0);
@@ -300,17 +300,17 @@ void GBTree::CommitModel(std::vector<std::vector<std::unique_ptr<RegTree>>>&& ne
PredictionCacheEntry* predts) {
monitor_.Start("CommitModel");
int num_new_trees = 0;
for (uint32_t gid = 0; gid < model_.learner_model_param_->num_output_group; ++gid) {
for (uint32_t gid = 0; gid < model_.learner_model_param->num_output_group; ++gid) {
num_new_trees += new_trees[gid].size();
model_.CommitModel(std::move(new_trees[gid]), gid);
}
auto* out = &predts->predictions;
if (model_.learner_model_param_->num_output_group == 1 &&
if (model_.learner_model_param->num_output_group == 1 &&
updaters_.size() > 0 &&
num_new_trees == 1 &&
out->Size() > 0 &&
updaters_.back()->UpdatePredictionCache(m, out)) {
auto delta = num_new_trees / model_.learner_model_param_->num_output_group;
auto delta = num_new_trees / model_.learner_model_param->num_output_group;
predts->Update(delta);
}
monitor_.Stop("CommitModel");
@@ -318,7 +318,7 @@ void GBTree::CommitModel(std::vector<std::vector<std::unique_ptr<RegTree>>>&& ne
void GBTree::LoadConfig(Json const& in) {
CHECK_EQ(get<String>(in["name"]), "gbtree");
fromJson(in["gbtree_train_param"], &tparam_);
FromJson(in["gbtree_train_param"], &tparam_);
int32_t const n_gpus = xgboost::common::AllVisibleGPUs();
if (n_gpus == 0 && tparam_.predictor == PredictorType::kGPUPredictor) {
LOG(WARNING)
@@ -347,7 +347,7 @@ void GBTree::LoadConfig(Json const& in) {
void GBTree::SaveConfig(Json* p_out) const {
auto& out = *p_out;
out["name"] = String("gbtree");
out["gbtree_train_param"] = toJson(tparam_);
out["gbtree_train_param"] = ToJson(tparam_);
out["updater"] = Object();
auto& j_updaters = out["updater"];
@@ -495,7 +495,7 @@ class Dart : public GBTree {
CHECK_EQ(get<String>(in["name"]), "dart");
auto const& gbtree = in["gbtree"];
GBTree::LoadConfig(gbtree);
fromJson(in["dart_train_param"], &dparam_);
FromJson(in["dart_train_param"], &dparam_);
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
@@ -503,7 +503,7 @@ class Dart : public GBTree {
out["gbtree"] = Object();
auto& gbtree = out["gbtree"];
GBTree::SaveConfig(&gbtree);
out["dart_train_param"] = toJson(dparam_);
out["dart_train_param"] = ToJson(dparam_);
}
void PredictBatch(DMatrix* p_fmat,
@@ -511,7 +511,7 @@ class Dart : public GBTree {
bool training,
unsigned ntree_limit) override {
DropTrees(training);
int num_group = model_.learner_model_param_->num_output_group;
int num_group = model_.learner_model_param->num_output_group;
ntree_limit *= num_group;
if (ntree_limit == 0 || ntree_limit > model_.trees.size()) {
ntree_limit = static_cast<unsigned>(model_.trees.size());
@@ -525,7 +525,7 @@ class Dart : public GBTree {
std::copy(base_margin.begin(), base_margin.end(), out_preds.begin());
} else {
std::fill(out_preds.begin(), out_preds.end(),
model_.learner_model_param_->base_score);
model_.learner_model_param->base_score);
}
const int nthread = omp_get_max_threads();
InitThreadTemp(nthread);
@@ -538,18 +538,18 @@ class Dart : public GBTree {
DropTrees(false);
if (thread_temp_.size() == 0) {
thread_temp_.resize(1, RegTree::FVec());
thread_temp_[0].Init(model_.learner_model_param_->num_feature);
thread_temp_[0].Init(model_.learner_model_param->num_feature);
}
out_preds->resize(model_.learner_model_param_->num_output_group);
ntree_limit *= model_.learner_model_param_->num_output_group;
out_preds->resize(model_.learner_model_param->num_output_group);
ntree_limit *= model_.learner_model_param->num_output_group;
if (ntree_limit == 0 || ntree_limit > model_.trees.size()) {
ntree_limit = static_cast<unsigned>(model_.trees.size());
}
// loop over output groups
for (uint32_t gid = 0; gid < model_.learner_model_param_->num_output_group; ++gid) {
for (uint32_t gid = 0; gid < model_.learner_model_param->num_output_group; ++gid) {
(*out_preds)[gid] =
PredValue(inst, gid, &thread_temp_[0], 0, ntree_limit) +
model_.learner_model_param_->base_score;
model_.learner_model_param->base_score;
}
}
@@ -582,7 +582,7 @@ class Dart : public GBTree {
int num_group,
unsigned tree_begin,
unsigned tree_end) {
CHECK_EQ(num_group, model_.learner_model_param_->num_output_group);
CHECK_EQ(num_group, model_.learner_model_param->num_output_group);
std::vector<bst_float>& preds = *out_preds;
CHECK_EQ(model_.param.size_leaf_vector, 0)
<< "size_leaf_vector is enforced to 0 so far";
@@ -635,7 +635,7 @@ class Dart : public GBTree {
DMatrix* m,
PredictionCacheEntry* predts) override {
int num_new_trees = 0;
for (uint32_t gid = 0; gid < model_.learner_model_param_->num_output_group; ++gid) {
for (uint32_t gid = 0; gid < model_.learner_model_param->num_output_group; ++gid) {
num_new_trees += new_trees[gid].size();
model_.CommitModel(std::move(new_trees[gid]), gid);
}
@@ -752,7 +752,7 @@ class Dart : public GBTree {
if (prev_thread_temp_size < nthread) {
thread_temp_.resize(nthread, RegTree::FVec());
for (int i = prev_thread_temp_size; i < nthread; ++i) {
thread_temp_[i].Init(model_.learner_model_param_->num_feature);
thread_temp_[i].Init(model_.learner_model_param->num_feature);
}
}
}

View File

@@ -195,7 +195,7 @@ class GBTree : public GradientBooster {
void LoadModel(Json const& in) override;
bool AllowLazyCheckPoint() const override {
return model_.learner_model_param_->num_output_group == 1 ||
return model_.learner_model_param->num_output_group == 1 ||
tparam_.updater_seq.find("distcol") != std::string::npos;
}
@@ -210,7 +210,7 @@ class GBTree : public GradientBooster {
unsigned layer_end = 0) const override {
CHECK(configured_);
// From here on, layer becomes concrete trees.
bst_group_t groups = model_.learner_model_param_->num_output_group;
bst_group_t groups = model_.learner_model_param->num_output_group;
uint32_t tree_begin = layer_begin * groups * tparam_.num_parallel_tree;
uint32_t tree_end = layer_end * groups * tparam_.num_parallel_tree;
if (tree_end == 0 || tree_end > model_.trees.size()) {

View File

@@ -40,7 +40,7 @@ void GBTreeModel::Load(dmlc::Stream* fi) {
void GBTreeModel::SaveModel(Json* p_out) const {
auto& out = *p_out;
CHECK_EQ(param.num_trees, static_cast<int>(trees.size()));
out["gbtree_model_param"] = toJson(param);
out["gbtree_model_param"] = ToJson(param);
std::vector<Json> trees_json;
size_t t = 0;
for (auto const& tree : trees) {
@@ -62,7 +62,7 @@ void GBTreeModel::SaveModel(Json* p_out) const {
}
void GBTreeModel::LoadModel(Json const& in) {
fromJson(in["gbtree_model_param"], &param);
FromJson(in["gbtree_model_param"], &param);
trees.clear();
trees_to_update.clear();

View File

@@ -65,8 +65,8 @@ struct GBTreeModelParam : public dmlc::Parameter<GBTreeModelParam> {
struct GBTreeModel : public Model {
public:
explicit GBTreeModel(LearnerModelParam const* learner_model_param) :
learner_model_param_{learner_model_param} {}
explicit GBTreeModel(LearnerModelParam const* learner_model) :
learner_model_param{learner_model} {}
void Configure(const Args& cfg) {
// initialize model parameters if not yet been initialized.
if (trees.size() == 0) {
@@ -109,7 +109,7 @@ struct GBTreeModel : public Model {
}
// base margin
LearnerModelParam const* learner_model_param_;
LearnerModelParam const* learner_model_param;
// model parameter
GBTreeModelParam param;
/*! \brief vector of trees stored in the model */