Upgrade clang-tidy on CI. (#5469)
* Correct all clang-tidy errors. * Upgrade clang-tidy to 10 on CI. Co-authored-by: Hyunsu Cho <chohyu01@cs.washington.edu>
This commit is contained in:
@@ -252,7 +252,7 @@ class CyclicFeatureSelector : public FeatureSelector {
|
||||
int NextFeature(int iteration, const gbm::GBLinearModel &model,
|
||||
int group_idx, const std::vector<GradientPair> &gpair,
|
||||
DMatrix *p_fmat, float alpha, float lambda) override {
|
||||
return iteration % model.learner_model_param_->num_feature;
|
||||
return iteration % model.learner_model_param->num_feature;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -266,7 +266,7 @@ class ShuffleFeatureSelector : public FeatureSelector {
|
||||
const std::vector<GradientPair> &gpair,
|
||||
DMatrix *p_fmat, float alpha, float lambda, int param) override {
|
||||
if (feat_index_.size() == 0) {
|
||||
feat_index_.resize(model.learner_model_param_->num_feature);
|
||||
feat_index_.resize(model.learner_model_param->num_feature);
|
||||
std::iota(feat_index_.begin(), feat_index_.end(), 0);
|
||||
}
|
||||
std::shuffle(feat_index_.begin(), feat_index_.end(), common::GlobalRandom());
|
||||
@@ -275,7 +275,7 @@ class ShuffleFeatureSelector : public FeatureSelector {
|
||||
int NextFeature(int iteration, const gbm::GBLinearModel &model,
|
||||
int group_idx, const std::vector<GradientPair> &gpair,
|
||||
DMatrix *p_fmat, float alpha, float lambda) override {
|
||||
return feat_index_[iteration % model.learner_model_param_->num_feature];
|
||||
return feat_index_[iteration % model.learner_model_param->num_feature];
|
||||
}
|
||||
|
||||
protected:
|
||||
@@ -291,7 +291,7 @@ class RandomFeatureSelector : public FeatureSelector {
|
||||
int NextFeature(int iteration, const gbm::GBLinearModel &model,
|
||||
int group_idx, const std::vector<GradientPair> &gpair,
|
||||
DMatrix *p_fmat, float alpha, float lambda) override {
|
||||
return common::GlobalRandom()() % model.learner_model_param_->num_feature;
|
||||
return common::GlobalRandom()() % model.learner_model_param->num_feature;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -310,11 +310,11 @@ class GreedyFeatureSelector : public FeatureSelector {
|
||||
const std::vector<GradientPair> &gpair,
|
||||
DMatrix *p_fmat, float alpha, float lambda, int param) override {
|
||||
top_k_ = static_cast<bst_uint>(param);
|
||||
const bst_uint ngroup = model.learner_model_param_->num_output_group;
|
||||
const bst_uint ngroup = model.learner_model_param->num_output_group;
|
||||
if (param <= 0) top_k_ = std::numeric_limits<bst_uint>::max();
|
||||
if (counter_.size() == 0) {
|
||||
counter_.resize(ngroup);
|
||||
gpair_sums_.resize(model.learner_model_param_->num_feature * ngroup);
|
||||
gpair_sums_.resize(model.learner_model_param->num_feature * ngroup);
|
||||
}
|
||||
for (bst_uint gid = 0u; gid < ngroup; ++gid) {
|
||||
counter_[gid] = 0u;
|
||||
@@ -327,10 +327,10 @@ class GreedyFeatureSelector : public FeatureSelector {
|
||||
// k-th selected feature for a group
|
||||
auto k = counter_[group_idx]++;
|
||||
// stop after either reaching top-K or going through all the features in a group
|
||||
if (k >= top_k_ || counter_[group_idx] == model.learner_model_param_->num_feature) return -1;
|
||||
if (k >= top_k_ || counter_[group_idx] == model.learner_model_param->num_feature) return -1;
|
||||
|
||||
const int ngroup = model.learner_model_param_->num_output_group;
|
||||
const bst_omp_uint nfeat = model.learner_model_param_->num_feature;
|
||||
const int ngroup = model.learner_model_param->num_output_group;
|
||||
const bst_omp_uint nfeat = model.learner_model_param->num_feature;
|
||||
// Calculate univariate gradient sums
|
||||
std::fill(gpair_sums_.begin(), gpair_sums_.end(), std::make_pair(0., 0.));
|
||||
for (const auto &batch : p_fmat->GetBatches<CSCPage>()) {
|
||||
@@ -387,8 +387,8 @@ class ThriftyFeatureSelector : public FeatureSelector {
|
||||
DMatrix *p_fmat, float alpha, float lambda, int param) override {
|
||||
top_k_ = static_cast<bst_uint>(param);
|
||||
if (param <= 0) top_k_ = std::numeric_limits<bst_uint>::max();
|
||||
const bst_uint ngroup = model.learner_model_param_->num_output_group;
|
||||
const bst_omp_uint nfeat = model.learner_model_param_->num_feature;
|
||||
const bst_uint ngroup = model.learner_model_param->num_output_group;
|
||||
const bst_omp_uint nfeat = model.learner_model_param->num_feature;
|
||||
|
||||
if (deltaw_.size() == 0) {
|
||||
deltaw_.resize(nfeat * ngroup);
|
||||
@@ -444,9 +444,9 @@ class ThriftyFeatureSelector : public FeatureSelector {
|
||||
// k-th selected feature for a group
|
||||
auto k = counter_[group_idx]++;
|
||||
// stop after either reaching top-N or going through all the features in a group
|
||||
if (k >= top_k_ || counter_[group_idx] == model.learner_model_param_->num_feature) return -1;
|
||||
if (k >= top_k_ || counter_[group_idx] == model.learner_model_param->num_feature) return -1;
|
||||
// note that sorted_idx stores the "long" indices
|
||||
const size_t grp_offset = group_idx * model.learner_model_param_->num_feature;
|
||||
const size_t grp_offset = group_idx * model.learner_model_param->num_feature;
|
||||
return static_cast<int>(sorted_idx_[grp_offset + k] - grp_offset);
|
||||
}
|
||||
|
||||
|
||||
@@ -36,26 +36,26 @@ class CoordinateUpdater : public LinearUpdater {
|
||||
|
||||
void LoadConfig(Json const& in) override {
|
||||
auto const& config = get<Object const>(in);
|
||||
fromJson(config.at("linear_train_param"), &tparam_);
|
||||
fromJson(config.at("coordinate_param"), &cparam_);
|
||||
FromJson(config.at("linear_train_param"), &tparam_);
|
||||
FromJson(config.at("coordinate_param"), &cparam_);
|
||||
}
|
||||
void SaveConfig(Json* p_out) const override {
|
||||
auto& out = *p_out;
|
||||
out["linear_train_param"] = toJson(tparam_);
|
||||
out["coordinate_param"] = toJson(cparam_);
|
||||
out["linear_train_param"] = ToJson(tparam_);
|
||||
out["coordinate_param"] = ToJson(cparam_);
|
||||
}
|
||||
|
||||
void Update(HostDeviceVector<GradientPair> *in_gpair, DMatrix *p_fmat,
|
||||
gbm::GBLinearModel *model, double sum_instance_weight) override {
|
||||
tparam_.DenormalizePenalties(sum_instance_weight);
|
||||
const int ngroup = model->learner_model_param_->num_output_group;
|
||||
const int ngroup = model->learner_model_param->num_output_group;
|
||||
// update bias
|
||||
for (int group_idx = 0; group_idx < ngroup; ++group_idx) {
|
||||
auto grad = GetBiasGradientParallel(group_idx, ngroup,
|
||||
in_gpair->ConstHostVector(), p_fmat);
|
||||
auto dbias = static_cast<float>(tparam_.learning_rate *
|
||||
CoordinateDeltaBias(grad.first, grad.second));
|
||||
model->bias()[group_idx] += dbias;
|
||||
model->Bias()[group_idx] += dbias;
|
||||
UpdateBiasResidualParallel(group_idx, ngroup,
|
||||
dbias, &in_gpair->HostVector(), p_fmat);
|
||||
}
|
||||
@@ -65,7 +65,7 @@ class CoordinateUpdater : public LinearUpdater {
|
||||
tparam_.reg_lambda_denorm, cparam_.top_k);
|
||||
// update weights
|
||||
for (int group_idx = 0; group_idx < ngroup; ++group_idx) {
|
||||
for (unsigned i = 0U; i < model->learner_model_param_->num_feature; i++) {
|
||||
for (unsigned i = 0U; i < model->learner_model_param->num_feature; i++) {
|
||||
int fidx = selector_->NextFeature
|
||||
(i, *model, group_idx, in_gpair->ConstHostVector(), p_fmat,
|
||||
tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm);
|
||||
@@ -78,7 +78,7 @@ class CoordinateUpdater : public LinearUpdater {
|
||||
|
||||
inline void UpdateFeature(int fidx, int group_idx, std::vector<GradientPair> *in_gpair,
|
||||
DMatrix *p_fmat, gbm::GBLinearModel *model) {
|
||||
const int ngroup = model->learner_model_param_->num_output_group;
|
||||
const int ngroup = model->learner_model_param->num_output_group;
|
||||
bst_float &w = (*model)[fidx][group_idx];
|
||||
auto gradient =
|
||||
GetGradientParallel(group_idx, ngroup, fidx, *in_gpair, p_fmat);
|
||||
|
||||
@@ -44,13 +44,13 @@ class GPUCoordinateUpdater : public LinearUpdater { // NOLINT
|
||||
|
||||
void LoadConfig(Json const& in) override {
|
||||
auto const& config = get<Object const>(in);
|
||||
fromJson(config.at("linear_train_param"), &tparam_);
|
||||
fromJson(config.at("coordinate_param"), &coord_param_);
|
||||
FromJson(config.at("linear_train_param"), &tparam_);
|
||||
FromJson(config.at("coordinate_param"), &coord_param_);
|
||||
}
|
||||
void SaveConfig(Json* p_out) const override {
|
||||
auto& out = *p_out;
|
||||
out["linear_train_param"] = toJson(tparam_);
|
||||
out["coordinate_param"] = toJson(coord_param_);
|
||||
out["linear_train_param"] = ToJson(tparam_);
|
||||
out["coordinate_param"] = ToJson(coord_param_);
|
||||
}
|
||||
|
||||
void LazyInitDevice(DMatrix *p_fmat, const LearnerModelParam &model_param) {
|
||||
@@ -103,7 +103,7 @@ class GPUCoordinateUpdater : public LinearUpdater { // NOLINT
|
||||
gbm::GBLinearModel *model, double sum_instance_weight) override {
|
||||
tparam_.DenormalizePenalties(sum_instance_weight);
|
||||
monitor_.Start("LazyInitDevice");
|
||||
this->LazyInitDevice(p_fmat, *(model->learner_model_param_));
|
||||
this->LazyInitDevice(p_fmat, *(model->learner_model_param));
|
||||
monitor_.Stop("LazyInitDevice");
|
||||
|
||||
monitor_.Start("UpdateGpair");
|
||||
@@ -122,9 +122,9 @@ class GPUCoordinateUpdater : public LinearUpdater { // NOLINT
|
||||
tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm,
|
||||
coord_param_.top_k);
|
||||
monitor_.Start("UpdateFeature");
|
||||
for (auto group_idx = 0; group_idx < model->learner_model_param_->num_output_group;
|
||||
for (auto group_idx = 0; group_idx < model->learner_model_param->num_output_group;
|
||||
++group_idx) {
|
||||
for (auto i = 0U; i < model->learner_model_param_->num_feature; i++) {
|
||||
for (auto i = 0U; i < model->learner_model_param->num_feature; i++) {
|
||||
auto fidx = selector_->NextFeature(
|
||||
i, *model, group_idx, in_gpair->ConstHostVector(), p_fmat,
|
||||
tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm);
|
||||
@@ -136,21 +136,21 @@ class GPUCoordinateUpdater : public LinearUpdater { // NOLINT
|
||||
}
|
||||
|
||||
void UpdateBias(DMatrix *p_fmat, gbm::GBLinearModel *model) {
|
||||
for (int group_idx = 0; group_idx < model->learner_model_param_->num_output_group;
|
||||
for (int group_idx = 0; group_idx < model->learner_model_param->num_output_group;
|
||||
++group_idx) {
|
||||
// Get gradient
|
||||
auto grad = GradientPair(0, 0);
|
||||
if (learner_param_->gpu_id >= 0) {
|
||||
grad = GetBiasGradient(group_idx, model->learner_model_param_->num_output_group);
|
||||
grad = GetBiasGradient(group_idx, model->learner_model_param->num_output_group);
|
||||
}
|
||||
auto dbias = static_cast<float>(
|
||||
tparam_.learning_rate *
|
||||
CoordinateDeltaBias(grad.GetGrad(), grad.GetHess()));
|
||||
model->bias()[group_idx] += dbias;
|
||||
model->Bias()[group_idx] += dbias;
|
||||
|
||||
// Update residual
|
||||
if (learner_param_->gpu_id >= 0) {
|
||||
UpdateBiasResidual(dbias, group_idx, model->learner_model_param_->num_output_group);
|
||||
UpdateBiasResidual(dbias, group_idx, model->learner_model_param->num_output_group);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -162,7 +162,7 @@ class GPUCoordinateUpdater : public LinearUpdater { // NOLINT
|
||||
// Get gradient
|
||||
auto grad = GradientPair(0, 0);
|
||||
if (learner_param_->gpu_id >= 0) {
|
||||
grad = GetGradient(group_idx, model->learner_model_param_->num_output_group, fidx);
|
||||
grad = GetGradient(group_idx, model->learner_model_param->num_output_group, fidx);
|
||||
}
|
||||
auto dw = static_cast<float>(tparam_.learning_rate *
|
||||
CoordinateDelta(grad.GetGrad(), grad.GetHess(),
|
||||
@@ -171,7 +171,7 @@ class GPUCoordinateUpdater : public LinearUpdater { // NOLINT
|
||||
w += dw;
|
||||
|
||||
if (learner_param_->gpu_id >= 0) {
|
||||
UpdateResidual(dw, group_idx, model->learner_model_param_->num_output_group, fidx);
|
||||
UpdateResidual(dw, group_idx, model->learner_model_param->num_output_group, fidx);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -186,7 +186,7 @@ class GPUCoordinateUpdater : public LinearUpdater { // NOLINT
|
||||
counting, f);
|
||||
auto perm = thrust::make_permutation_iterator(gpair_.data(), skip);
|
||||
|
||||
return dh::SumReduction(temp_, perm, num_row_);
|
||||
return dh::SumReduction(&temp_, perm, num_row_);
|
||||
}
|
||||
|
||||
// This needs to be public because of the __device__ lambda.
|
||||
@@ -214,7 +214,7 @@ class GPUCoordinateUpdater : public LinearUpdater { // NOLINT
|
||||
}; // NOLINT
|
||||
thrust::transform_iterator<decltype(f), decltype(counting), GradientPair>
|
||||
multiply_iterator(counting, f);
|
||||
return dh::SumReduction(temp_, multiply_iterator, col_size);
|
||||
return dh::SumReduction(&temp_, multiply_iterator, col_size);
|
||||
}
|
||||
|
||||
// This needs to be public because of the __device__ lambda.
|
||||
|
||||
@@ -25,18 +25,18 @@ class ShotgunUpdater : public LinearUpdater {
|
||||
}
|
||||
void LoadConfig(Json const& in) override {
|
||||
auto const& config = get<Object const>(in);
|
||||
fromJson(config.at("linear_train_param"), ¶m_);
|
||||
FromJson(config.at("linear_train_param"), ¶m_);
|
||||
}
|
||||
void SaveConfig(Json* p_out) const override {
|
||||
auto& out = *p_out;
|
||||
out["linear_train_param"] = toJson(param_);
|
||||
out["linear_train_param"] = ToJson(param_);
|
||||
}
|
||||
|
||||
void Update(HostDeviceVector<GradientPair> *in_gpair, DMatrix *p_fmat,
|
||||
gbm::GBLinearModel *model, double sum_instance_weight) override {
|
||||
auto &gpair = in_gpair->HostVector();
|
||||
param_.DenormalizePenalties(sum_instance_weight);
|
||||
const int ngroup = model->learner_model_param_->num_output_group;
|
||||
const int ngroup = model->learner_model_param->num_output_group;
|
||||
|
||||
// update bias
|
||||
for (int gid = 0; gid < ngroup; ++gid) {
|
||||
@@ -44,7 +44,7 @@ class ShotgunUpdater : public LinearUpdater {
|
||||
in_gpair->ConstHostVector(), p_fmat);
|
||||
auto dbias = static_cast<bst_float>(param_.learning_rate *
|
||||
CoordinateDeltaBias(grad.first, grad.second));
|
||||
model->bias()[gid] += dbias;
|
||||
model->Bias()[gid] += dbias;
|
||||
UpdateBiasResidualParallel(gid, ngroup, dbias, &in_gpair->HostVector(), p_fmat);
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user