From 6bc9747df579e5e1825e6ad14d26fab7e0f86cca Mon Sep 17 00:00:00 2001 From: vcarpani <38493091+vcarpani@users.noreply.github.com> Date: Thu, 8 Oct 2020 17:14:59 +0200 Subject: [PATCH] Reduce compile warnings (#6198) Co-authored-by: Hyunsu Cho --- include/xgboost/tree_updater.h | 3 +++ src/common/survival_util.h | 10 +++++++++ src/common/transform.h | 5 ++++- src/linear/coordinate_common.h | 38 ++++++++++++++++----------------- src/objective/multiclass_obj.cu | 3 +++ src/tree/tree_model.cc | 2 +- 6 files changed, 40 insertions(+), 21 deletions(-) diff --git a/include/xgboost/tree_updater.h b/include/xgboost/tree_updater.h index a091c81b0..867fd2dc8 100644 --- a/include/xgboost/tree_updater.h +++ b/include/xgboost/tree_updater.h @@ -72,6 +72,9 @@ class TreeUpdater : public Configurable { */ virtual bool UpdatePredictionCache(const DMatrix* data, HostDeviceVector* out_preds) { + // Remove unused parameter compiler warning. + (void) data; + (void) out_preds; return false; } diff --git a/src/common/survival_util.h b/src/common/survival_util.h index d0b09a49f..1a384e8de 100644 --- a/src/common/survival_util.h +++ b/src/common/survival_util.h @@ -240,6 +240,9 @@ namespace aft { template <> XGBOOST_DEVICE inline double GetLimitGradAtInfPred(CensoringType censor_type, bool sign, double sigma) { + // Remove unused parameter compiler warning. + (void) sigma; + switch (censor_type) { case CensoringType::kUncensored: return sign ? kMinGradient : kMaxGradient; @@ -288,6 +291,10 @@ GetLimitGradAtInfPred(CensoringType censor_type, bool sign template <> XGBOOST_DEVICE inline double GetLimitHessAtInfPred(CensoringType censor_type, bool sign, double sigma) { + // Remove unused parameter compiler warning. + (void) sign; + (void) sigma; + switch (censor_type) { case CensoringType::kUncensored: case CensoringType::kRightCensored: @@ -317,6 +324,9 @@ GetLimitGradAtInfPred(CensoringType censor_type, bool sign, template <> XGBOOST_DEVICE inline double GetLimitHessAtInfPred(CensoringType censor_type, bool sign, double sigma) { + // Remove unused parameter compiler warning. + (void) sigma; + switch (censor_type) { case CensoringType::kUncensored: case CensoringType::kRightCensored: diff --git a/src/common/transform.h b/src/common/transform.h index fa2d0d379..b3ad7fdb9 100644 --- a/src/common/transform.h +++ b/src/common/transform.h @@ -157,7 +157,10 @@ class Transform { /*! \brief Dummy funtion defined when compiling for CPU. */ template ::type* = nullptr, typename... HDV> - void LaunchCUDA(Functor _func, HDV*... _vectors) const { + void LaunchCUDA(Functor _func, HDV*...) const { + // Remove unused parameter compiler warning. + (void) _func; + LOG(FATAL) << "Not part of device code. WITH_CUDA: " << WITH_CUDA(); } #endif // defined(__CUDACC__) diff --git a/src/linear/coordinate_common.h b/src/linear/coordinate_common.h index 45c7a2b4b..c77755fc3 100644 --- a/src/linear/coordinate_common.h +++ b/src/linear/coordinate_common.h @@ -220,10 +220,10 @@ class FeatureSelector { * \param lambda Regularisation lambda. * \param param A parameter with algorithm-dependent use. */ - virtual void Setup(const gbm::GBLinearModel &model, - const std::vector &gpair, - DMatrix *p_fmat, - float alpha, float lambda, int param) {} + virtual void Setup(const gbm::GBLinearModel &, + const std::vector &, + DMatrix *, + float , float , int ) {} /** * \brief Select next coordinate to update. * @@ -250,8 +250,8 @@ class FeatureSelector { class CyclicFeatureSelector : public FeatureSelector { public: int NextFeature(int iteration, const gbm::GBLinearModel &model, - int group_idx, const std::vector &gpair, - DMatrix *p_fmat, float alpha, float lambda) override { + int , const std::vector &, + DMatrix *, float, float) override { return iteration % model.learner_model_param->num_feature; } }; @@ -263,8 +263,8 @@ class CyclicFeatureSelector : public FeatureSelector { class ShuffleFeatureSelector : public FeatureSelector { public: void Setup(const gbm::GBLinearModel &model, - const std::vector &gpair, - DMatrix *p_fmat, float alpha, float lambda, int param) override { + const std::vector &g, + DMatrix *, float, float, int) override { if (feat_index_.size() == 0) { feat_index_.resize(model.learner_model_param->num_feature); std::iota(feat_index_.begin(), feat_index_.end(), 0); @@ -273,8 +273,8 @@ class ShuffleFeatureSelector : public FeatureSelector { } int NextFeature(int iteration, const gbm::GBLinearModel &model, - int group_idx, const std::vector &gpair, - DMatrix *p_fmat, float alpha, float lambda) override { + int, const std::vector &, + DMatrix *, float, float) override { return feat_index_[iteration % model.learner_model_param->num_feature]; } @@ -288,9 +288,9 @@ class ShuffleFeatureSelector : public FeatureSelector { */ class RandomFeatureSelector : public FeatureSelector { public: - int NextFeature(int iteration, const gbm::GBLinearModel &model, - int group_idx, const std::vector &gpair, - DMatrix *p_fmat, float alpha, float lambda) override { + int NextFeature(int, const gbm::GBLinearModel &model, + int, const std::vector &, + DMatrix *, float, float) override { return common::GlobalRandom()() % model.learner_model_param->num_feature; } }; @@ -307,8 +307,8 @@ class RandomFeatureSelector : public FeatureSelector { class GreedyFeatureSelector : public FeatureSelector { public: void Setup(const gbm::GBLinearModel &model, - const std::vector &gpair, - DMatrix *p_fmat, float alpha, float lambda, int param) override { + const std::vector &, + DMatrix *, float, float, int param) override { top_k_ = static_cast(param); const bst_uint ngroup = model.learner_model_param->num_output_group; if (param <= 0) top_k_ = std::numeric_limits::max(); @@ -321,7 +321,7 @@ class GreedyFeatureSelector : public FeatureSelector { } } - int NextFeature(int iteration, const gbm::GBLinearModel &model, + int NextFeature(int, const gbm::GBLinearModel &model, int group_idx, const std::vector &gpair, DMatrix *p_fmat, float alpha, float lambda) override { // k-th selected feature for a group @@ -438,9 +438,9 @@ class ThriftyFeatureSelector : public FeatureSelector { } } - int NextFeature(int iteration, const gbm::GBLinearModel &model, - int group_idx, const std::vector &gpair, - DMatrix *p_fmat, float alpha, float lambda) override { + int NextFeature(int, const gbm::GBLinearModel &model, + int group_idx, const std::vector &, + DMatrix *, float, float) override { // k-th selected feature for a group auto k = counter_[group_idx]++; // stop after either reaching top-N or going through all the features in a group diff --git a/src/objective/multiclass_obj.cu b/src/objective/multiclass_obj.cu index 11c332728..d39c5a460 100644 --- a/src/objective/multiclass_obj.cu +++ b/src/objective/multiclass_obj.cu @@ -49,6 +49,9 @@ class SoftmaxMultiClassObj : public ObjFunction { const MetaInfo& info, int iter, HostDeviceVector* out_gpair) override { + // Remove unused parameter compiler warning. + (void) iter; + if (info.labels_.Size() == 0) { return; } diff --git a/src/tree/tree_model.cc b/src/tree/tree_model.cc index 0447ed692..120ad757b 100644 --- a/src/tree/tree_model.cc +++ b/src/tree/tree_model.cc @@ -787,7 +787,7 @@ void RegTree::LoadCategoricalSplit(Json const& in) { if (!categories_nodes.empty()) { last_cat_node = get(categories_nodes[cnt]); } - for (size_t nidx = 0; nidx < param.num_nodes; ++nidx) { + for (bst_node_t nidx = 0; nidx < param.num_nodes; ++nidx) { if (nidx == last_cat_node) { auto j_begin = get(categories_segments[cnt]); auto j_end = get(categories_sizes[cnt]) + j_begin;