diff --git a/include/xgboost/gbm.h b/include/xgboost/gbm.h index ea4987e43..8081e15d0 100644 --- a/include/xgboost/gbm.h +++ b/include/xgboost/gbm.h @@ -75,8 +75,9 @@ class GradientBooster : public Model, public Configurable { * \param prediction The output prediction cache entry that needs to be updated. * the booster may change content of gpair */ - virtual void DoBoost(DMatrix* p_fmat, HostDeviceVector* in_gpair, - PredictionCacheEntry *prediction) = 0; + virtual void DoBoost(DMatrix* p_fmat, + HostDeviceVector* in_gpair, + PredictionCacheEntry*) = 0; /*! * \brief generate predictions for given feature matrix @@ -103,10 +104,10 @@ class GradientBooster : public Model, public Configurable { * \param layer_begin (Optional) Begining of boosted tree layer used for prediction. * \param layer_end (Optional) End of booster layer. 0 means do not limit trees. */ - virtual void InplacePredict(dmlc::any const &x, float missing, - PredictionCacheEntry *out_preds, - uint32_t layer_begin = 0, - uint32_t layer_end = 0) const { + virtual void InplacePredict(dmlc::any const &, float, + PredictionCacheEntry*, + uint32_t, + uint32_t) const { LOG(FATAL) << "Inplace predict is not supported by current booster."; } /*! diff --git a/include/xgboost/metric.h b/include/xgboost/metric.h index 8e9d2afe6..faca15504 100644 --- a/include/xgboost/metric.h +++ b/include/xgboost/metric.h @@ -41,14 +41,14 @@ class Metric : public Configurable { * override this function to maintain internal configuration * \param in JSON object containing the configuration */ - void LoadConfig(Json const& in) override {} + void LoadConfig(Json const&) override {} /*! * \brief Save configuration to JSON object * By default, metric has no internal configuration; * override this function to maintain internal configuration * \param out pointer to output JSON object */ - void SaveConfig(Json* out) const override {} + void SaveConfig(Json*) const override {} /*! * \brief evaluate a specific metric diff --git a/include/xgboost/objective.h b/include/xgboost/objective.h index 1c0942ed0..dcf244987 100644 --- a/include/xgboost/objective.h +++ b/include/xgboost/objective.h @@ -53,7 +53,7 @@ class ObjFunction : public Configurable { * \brief transform prediction values, this is only called when Prediction is called * \param io_preds prediction values, saves to this vector as well */ - virtual void PredTransform(HostDeviceVector *io_preds) {} + virtual void PredTransform(HostDeviceVector*) {} /*! * \brief transform prediction values, this is only called when Eval is called, diff --git a/src/c_api/c_api.cc b/src/c_api/c_api.cc index 397f83e69..4e5aff625 100644 --- a/src/c_api/c_api.cc +++ b/src/c_api/c_api.cc @@ -240,7 +240,7 @@ XGB_DLL int XGDMatrixFree(DMatrixHandle handle) { } XGB_DLL int XGDMatrixSaveBinary(DMatrixHandle handle, const char* fname, - int silent) { + int) { API_BEGIN(); CHECK_HANDLE(); auto dmat = static_cast*>(handle)->get(); @@ -528,8 +528,8 @@ XGB_DLL int XGBoosterPredictFromDense(BoosterHandle handle, float *values, xgboost::bst_ulong n_rows, xgboost::bst_ulong n_cols, float missing, - unsigned iteration_begin, - unsigned iteration_end, + unsigned, + unsigned, char const* c_type, xgboost::bst_ulong cache_id, xgboost::bst_ulong *out_len, @@ -560,8 +560,8 @@ XGB_DLL int XGBoosterPredictFromCSR(BoosterHandle handle, size_t nelem, size_t num_col, float missing, - unsigned iteration_begin, - unsigned iteration_end, + unsigned, + unsigned, char const *c_type, xgboost::bst_ulong cache_id, xgboost::bst_ulong *out_len, diff --git a/src/common/column_matrix.h b/src/common/column_matrix.h index ad5bfe373..2f43b828d 100644 --- a/src/common/column_matrix.h +++ b/src/common/column_matrix.h @@ -43,7 +43,7 @@ class Column { BinIdxType GetFeatureBinIdx(size_t idx) const { return index_[idx]; } - const uint32_t GetBaseIdx() const { return index_base_; } + uint32_t GetBaseIdx() const { return index_base_; } common::Span GetFeatureBinIdxPtr() const { return index_; } @@ -179,12 +179,12 @@ class ColumnMatrix { but for ColumnMatrix we still have a chance to reduce the memory consumption */ } else { if (bins_type_size_ == kUint8BinsTypeSize) { - SetIndex(gmat.index.data(), gmat, nrow, nfeature); + SetIndex(gmat.index.data(), gmat, nfeature); } else if (bins_type_size_ == kUint16BinsTypeSize) { - SetIndex(gmat.index.data(), gmat, nrow, nfeature); + SetIndex(gmat.index.data(), gmat, nfeature); } else { CHECK_EQ(bins_type_size_, kUint32BinsTypeSize); - SetIndex(gmat.index.data(), gmat, nrow, nfeature); + SetIndex(gmat.index.data(), gmat, nfeature); } } } @@ -271,7 +271,7 @@ class ColumnMatrix { template inline void SetIndex(uint32_t* index, const GHistIndexMatrix& gmat, - const size_t nrow, const size_t nfeature) { + const size_t nfeature) { std::vector num_nonzeros; num_nonzeros.resize(nfeature); std::fill(num_nonzeros.begin(), num_nonzeros.end(), 0); @@ -311,18 +311,18 @@ class ColumnMatrix { rbegin += batch.Size(); } } - const BinTypeSize GetTypeSize() const { + BinTypeSize GetTypeSize() const { return bins_type_size_; } // This is just an utility function - const bool NoMissingValues(const size_t n_elements, + bool NoMissingValues(const size_t n_elements, const size_t n_row, const size_t n_features) { return n_elements == n_features * n_row; } // And this returns part of state - const bool AnyMissing() const { + bool AnyMissing() const { return any_missing_; } diff --git a/src/common/hist_util.cc b/src/common/hist_util.cc index 4ecdec0ba..01ed4bfc4 100644 --- a/src/common/hist_util.cc +++ b/src/common/hist_util.cc @@ -29,8 +29,7 @@ namespace xgboost { namespace common { -void GHistIndexMatrix::ResizeIndex(const size_t rbegin, const SparsePage& batch, - const size_t n_offsets, const size_t n_index, +void GHistIndexMatrix::ResizeIndex(const size_t n_index, const bool isDense) { if ((max_num_bins - 1 <= static_cast(std::numeric_limits::max())) && isDense) { index.SetBinTypeSize(kUint8BinsTypeSize); @@ -119,7 +118,7 @@ void GHistIndexMatrix::Init(DMatrix* p_fmat, int max_bins) { const size_t n_offsets = cut.Ptrs().size() - 1; const size_t n_index = row_ptr[rbegin + batch.Size()]; - ResizeIndex(rbegin, batch, n_offsets, n_index, isDense); + ResizeIndex(n_index, isDense); CHECK_GT(cut.Values().size(), 0U); diff --git a/src/common/hist_util.h b/src/common/hist_util.h index 9c14b2455..809bb0e4c 100644 --- a/src/common/hist_util.h +++ b/src/common/hist_util.h @@ -273,8 +273,7 @@ struct GHistIndexMatrix { } } - void ResizeIndex(const size_t rbegin, const SparsePage& batch, - const size_t n_offsets, const size_t n_index, + void ResizeIndex(const size_t n_index, const bool isDense); inline void GetFeatureCounts(size_t* counts) const { diff --git a/src/common/io.h b/src/common/io.h index d9544a6d1..70ad11124 100644 --- a/src/common/io.h +++ b/src/common/io.h @@ -32,7 +32,7 @@ class PeekableInStream : public dmlc::Stream { size_t Read(void* dptr, size_t size) override; virtual size_t PeekRead(void* dptr, size_t size); - void Write(const void* dptr, size_t size) override { + void Write(const void*, size_t) override { LOG(FATAL) << "Not implemented"; } @@ -60,7 +60,7 @@ class FixedSizeStream : public PeekableInStream { size_t Tell() const { return pointer_; } void Seek(size_t pos); - void Write(const void* dptr, size_t size) override { + void Write(const void*, size_t) override { LOG(FATAL) << "Not implemented"; } diff --git a/src/data/adapter.h b/src/data/adapter.h index c3981c24f..4d7c924c3 100644 --- a/src/data/adapter.h +++ b/src/data/adapter.h @@ -136,8 +136,7 @@ class CSRAdapterBatch : public detail::NoMetaInfo { const float* values_; }; CSRAdapterBatch(const size_t* row_ptr, const unsigned* feature_idx, - const float* values, size_t num_rows, size_t num_elements, - size_t num_features) + const float* values, size_t num_rows, size_t, size_t) : row_ptr_(row_ptr), feature_idx_(feature_idx), values_(values), diff --git a/src/gbm/gblinear.cc b/src/gbm/gblinear.cc index 728265b81..81de5bea6 100644 --- a/src/gbm/gblinear.cc +++ b/src/gbm/gblinear.cc @@ -113,7 +113,7 @@ class GBLinear : public GradientBooster { void DoBoost(DMatrix *p_fmat, HostDeviceVector *in_gpair, - PredictionCacheEntry* predt) override { + PredictionCacheEntry*) override { monitor_.Start("DoBoost"); model_.LazyInitModel(); @@ -128,8 +128,7 @@ class GBLinear : public GradientBooster { void PredictBatch(DMatrix *p_fmat, PredictionCacheEntry *predts, - bool training, - unsigned ntree_limit) override { + bool, unsigned ntree_limit) override { monitor_.Start("PredictBatch"); auto* out_preds = &predts->predictions; CHECK_EQ(ntree_limit, 0U) @@ -140,7 +139,7 @@ class GBLinear : public GradientBooster { // add base margin void PredictInstance(const SparsePage::Inst &inst, std::vector *out_preds, - unsigned ntree_limit) override { + unsigned) override { const int ngroup = model_.learner_model_param->num_output_group; for (int gid = 0; gid < ngroup; ++gid) { this->Pred(inst, dmlc::BeginPtr(*out_preds), gid, @@ -148,16 +147,15 @@ class GBLinear : public GradientBooster { } } - void PredictLeaf(DMatrix *p_fmat, - std::vector *out_preds, - unsigned ntree_limit) override { + void PredictLeaf(DMatrix*, + std::vector*, + unsigned) override { LOG(FATAL) << "gblinear does not support prediction of leaf index"; } void PredictContribution(DMatrix* p_fmat, HostDeviceVector* out_contribs, - unsigned ntree_limit, bool approximate, int condition = 0, - unsigned condition_feature = 0) override { + unsigned ntree_limit, bool, int, unsigned) override { model_.LazyInitModel(); CHECK_EQ(ntree_limit, 0U) << "GBLinear::PredictContribution: ntrees is only valid for gbtree predictor"; @@ -196,7 +194,7 @@ class GBLinear : public GradientBooster { void PredictInteractionContributions(DMatrix* p_fmat, HostDeviceVector* out_contribs, - unsigned ntree_limit, bool approximate) override { + unsigned, bool) override { std::vector& contribs = out_contribs->HostVector(); // linear models have no interaction effects diff --git a/src/gbm/gblinear_model.h b/src/gbm/gblinear_model.h index f2d0d9a86..6e3e83105 100644 --- a/src/gbm/gblinear_model.h +++ b/src/gbm/gblinear_model.h @@ -95,7 +95,7 @@ class GBLinearModel : public Model { return &weight[i * learner_model_param->num_output_group]; } - std::vector DumpModel(const FeatureMap &fmap, bool with_stats, + std::vector DumpModel(const FeatureMap &, bool, std::string format) const { const int ngroup = learner_model_param->num_output_group; const unsigned nfeature = learner_model_param->num_feature; diff --git a/src/gbm/gbtree.cc b/src/gbm/gbtree.cc index f6679d1a0..1ad22ffea 100644 --- a/src/gbm/gbtree.cc +++ b/src/gbm/gbtree.cc @@ -401,7 +401,7 @@ void GBTree::SaveModel(Json* p_out) const { void GBTree::PredictBatch(DMatrix* p_fmat, PredictionCacheEntry* out_preds, - bool training, + bool, unsigned ntree_limit) { CHECK(configured_); GetPredictor(&out_preds->predictions, p_fmat) @@ -601,8 +601,8 @@ class Dart : public GBTree { void PredictContribution(DMatrix* p_fmat, HostDeviceVector* out_contribs, - unsigned ntree_limit, bool approximate, int condition, - unsigned condition_feature) override { + unsigned ntree_limit, bool approximate, int, + unsigned) override { CHECK(configured_); cpu_predictor_->PredictContribution(p_fmat, out_contribs, model_, ntree_limit, &weight_drop_, approximate); @@ -674,8 +674,7 @@ class Dart : public GBTree { // commit new trees all at once void CommitModel(std::vector>>&& new_trees, - DMatrix* m, - PredictionCacheEntry* predts) override { + DMatrix*, PredictionCacheEntry*) override { int num_new_trees = 0; for (uint32_t gid = 0; gid < model_.learner_model_param->num_output_group; ++gid) { num_new_trees += new_trees[gid].size(); diff --git a/src/gbm/gbtree.h b/src/gbm/gbtree.h index 85f167abc..fbb227d2b 100644 --- a/src/gbm/gbtree.h +++ b/src/gbm/gbtree.h @@ -239,7 +239,7 @@ class GBTree : public GradientBooster { void PredictContribution(DMatrix* p_fmat, HostDeviceVector* out_contribs, unsigned ntree_limit, bool approximate, - int condition, unsigned condition_feature) override { + int, unsigned) override { CHECK(configured_); this->GetPredictor()->PredictContribution( p_fmat, out_contribs, model_, ntree_limit, nullptr, approximate); diff --git a/src/linear/coordinate_common.h b/src/linear/coordinate_common.h index c77755fc3..14f99bb3d 100644 --- a/src/linear/coordinate_common.h +++ b/src/linear/coordinate_common.h @@ -263,7 +263,7 @@ class CyclicFeatureSelector : public FeatureSelector { class ShuffleFeatureSelector : public FeatureSelector { public: void Setup(const gbm::GBLinearModel &model, - const std::vector &g, + const std::vector&, DMatrix *, float, float, int) override { if (feat_index_.size() == 0) { feat_index_.resize(model.learner_model_param->num_feature); diff --git a/src/objective/rank_obj.cu b/src/objective/rank_obj.cu index e7a220dbc..2482f9e95 100644 --- a/src/objective/rank_obj.cu +++ b/src/objective/rank_obj.cu @@ -111,17 +111,17 @@ class PairwiseLambdaWeightComputer { * \param list a list that is sorted by pred score * \param io_pairs record of pairs, containing the pairs to fill in weights */ - static void GetLambdaWeight(const std::vector &sorted_list, - std::vector *io_pairs) {} + static void GetLambdaWeight(const std::vector&, + std::vector*) {} static char const* Name() { return "rank:pairwise"; } #if defined(__CUDACC__) - PairwiseLambdaWeightComputer(const bst_float *dpreds, - const bst_float *dlabels, - const dh::SegmentSorter &segment_label_sorter) {} + PairwiseLambdaWeightComputer(const bst_float*, + const bst_float*, + const dh::SegmentSorter&) {} class PairwiseLambdaWeightMultiplier { public: @@ -270,7 +270,7 @@ class NDCGLambdaWeightComputer }; NDCGLambdaWeightComputer(const bst_float *dpreds, - const bst_float *dlabels, + const bst_float*, const dh::SegmentSorter &segment_label_sorter) : IndexablePredictionSorter(dpreds, segment_label_sorter), dgroup_dcg_(segment_label_sorter.GetNumGroups(), 0.0f), @@ -293,7 +293,7 @@ class NDCGLambdaWeightComputer group_segments)), thrust::make_discard_iterator(), // We don't care for the group indices dgroup_dcg_.begin()); // Sum of the item's DCG values in the group - CHECK(end_range.second - dgroup_dcg_.begin() == dgroup_dcg_.size()); + CHECK(static_cast(end_range.second - dgroup_dcg_.begin()) == dgroup_dcg_.size()); } inline const common::Span GetGroupDcgsSpan() const { diff --git a/src/objective/regression_loss.h b/src/objective/regression_loss.h index c6b028914..914a6704f 100644 --- a/src/objective/regression_loss.h +++ b/src/objective/regression_loss.h @@ -18,11 +18,11 @@ struct LinearSquareLoss { // duplication is necessary, as __device__ specifier // cannot be made conditional on template parameter XGBOOST_DEVICE static bst_float PredTransform(bst_float x) { return x; } - XGBOOST_DEVICE static bool CheckLabel(bst_float x) { return true; } + XGBOOST_DEVICE static bool CheckLabel(bst_float) { return true; } XGBOOST_DEVICE static bst_float FirstOrderGradient(bst_float predt, bst_float label) { return predt - label; } - XGBOOST_DEVICE static bst_float SecondOrderGradient(bst_float predt, bst_float label) { + XGBOOST_DEVICE static bst_float SecondOrderGradient(bst_float, bst_float) { return 1.0f; } template @@ -72,7 +72,7 @@ struct LogisticRegression { XGBOOST_DEVICE static bst_float FirstOrderGradient(bst_float predt, bst_float label) { return predt - label; } - XGBOOST_DEVICE static bst_float SecondOrderGradient(bst_float predt, bst_float label) { + XGBOOST_DEVICE static bst_float SecondOrderGradient(bst_float predt, bst_float) { const float eps = 1e-16f; return fmaxf(predt * (1.0f - predt), eps); } @@ -102,7 +102,7 @@ struct PseudoHuberError { XGBOOST_DEVICE static bst_float PredTransform(bst_float x) { return x; } - XGBOOST_DEVICE static bool CheckLabel(bst_float label) { + XGBOOST_DEVICE static bool CheckLabel(bst_float) { return true; } XGBOOST_DEVICE static bst_float FirstOrderGradient(bst_float predt, bst_float label) { @@ -144,7 +144,7 @@ struct LogisticRaw : public LogisticRegression { predt = common::Sigmoid(predt); return predt - label; } - XGBOOST_DEVICE static bst_float SecondOrderGradient(bst_float predt, bst_float label) { + XGBOOST_DEVICE static bst_float SecondOrderGradient(bst_float predt, bst_float) { const float eps = 1e-16f; predt = common::Sigmoid(predt); return fmaxf(predt * (1.0f - predt), eps); diff --git a/src/objective/regression_obj.cu b/src/objective/regression_obj.cu index e71e1f035..764850d19 100644 --- a/src/objective/regression_obj.cu +++ b/src/objective/regression_obj.cu @@ -52,8 +52,7 @@ class RegLossObj : public ObjFunction { } void GetGradient(const HostDeviceVector& preds, - const MetaInfo &info, - int iter, + const MetaInfo &info, int, HostDeviceVector* out_gpair) override { CHECK_EQ(preds.Size(), info.labels_.Size()) << " " << "labels are not correctly provided" @@ -191,8 +190,7 @@ class PoissonRegression : public ObjFunction { } void GetGradient(const HostDeviceVector& preds, - const MetaInfo &info, - int iter, + const MetaInfo &info, int, HostDeviceVector *out_gpair) override { CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided"; @@ -280,11 +278,10 @@ XGBOOST_REGISTER_OBJECTIVE(PoissonRegression, "count:poisson") class CoxRegression : public ObjFunction { public: void Configure( - const std::vector > &args) override {} + const std::vector >&) override {} void GetGradient(const HostDeviceVector& preds, - const MetaInfo &info, - int iter, + const MetaInfo &info, int, HostDeviceVector *out_gpair) override { CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided"; @@ -379,11 +376,10 @@ XGBOOST_REGISTER_OBJECTIVE(CoxRegression, "survival:cox") class GammaRegression : public ObjFunction { public: void Configure( - const std::vector > &args) override {} + const std::vector >&) override {} void GetGradient(const HostDeviceVector &preds, - const MetaInfo &info, - int iter, + const MetaInfo &info, int, HostDeviceVector *out_gpair) override { CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided"; @@ -479,8 +475,7 @@ class TweedieRegression : public ObjFunction { } void GetGradient(const HostDeviceVector& preds, - const MetaInfo &info, - int iter, + const MetaInfo &info, int, HostDeviceVector *out_gpair) override { CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided"; diff --git a/src/predictor/gpu_predictor.cu b/src/predictor/gpu_predictor.cu index 39035a3d8..d8c3e5c06 100644 --- a/src/predictor/gpu_predictor.cu +++ b/src/predictor/gpu_predictor.cu @@ -110,9 +110,8 @@ struct SparsePageLoader { struct EllpackLoader { EllpackDeviceAccessor const& matrix; - XGBOOST_DEVICE EllpackLoader(EllpackDeviceAccessor const& m, bool use_shared, - bst_feature_t num_features, bst_row_t num_rows, - size_t entry_start) + XGBOOST_DEVICE EllpackLoader(EllpackDeviceAccessor const& m, bool, + bst_feature_t, bst_row_t, size_t) : matrix{m} {} __device__ __forceinline__ float GetElement(size_t ridx, size_t fidx) const { auto gidx = matrix.GetBinIndex(ridx, fidx); @@ -587,7 +586,7 @@ class GPUPredictor : public xgboost::Predictor { template void DispatchedInplacePredict(dmlc::any const &x, - const gbm::GBTreeModel &model, float missing, + const gbm::GBTreeModel &model, float, PredictionCacheEntry *out_preds, uint32_t tree_begin, uint32_t tree_end) const { auto max_shared_memory_bytes = dh::MaxSharedMemory(this->generic_param_->gpu_id); @@ -648,9 +647,9 @@ class GPUPredictor : public xgboost::Predictor { void PredictContribution(DMatrix* p_fmat, HostDeviceVector* out_contribs, const gbm::GBTreeModel& model, unsigned ntree_limit, - std::vector* tree_weights, - bool approximate, int condition, - unsigned condition_feature) override { + std::vector*, + bool approximate, int, + unsigned) override { if (approximate) { LOG(FATAL) << "Approximated contribution is not implemented in GPU Predictor."; } @@ -702,7 +701,7 @@ class GPUPredictor : public xgboost::Predictor { HostDeviceVector* out_contribs, const gbm::GBTreeModel& model, unsigned ntree_limit, - std::vector* tree_weights, + std::vector*, bool approximate) override { if (approximate) { LOG(FATAL) << "[Internal error]: " << __func__ @@ -774,16 +773,16 @@ class GPUPredictor : public xgboost::Predictor { } } - void PredictInstance(const SparsePage::Inst& inst, - std::vector* out_preds, - const gbm::GBTreeModel& model, unsigned ntree_limit) override { + void PredictInstance(const SparsePage::Inst&, + std::vector*, + const gbm::GBTreeModel&, unsigned) override { LOG(FATAL) << "[Internal error]: " << __func__ << " is not implemented in GPU Predictor."; } - void PredictLeaf(DMatrix* p_fmat, std::vector* out_preds, - const gbm::GBTreeModel& model, - unsigned ntree_limit) override { + void PredictLeaf(DMatrix*, std::vector*, + const gbm::GBTreeModel&, + unsigned) override { LOG(FATAL) << "[Internal error]: " << __func__ << " is not implemented in GPU Predictor."; } diff --git a/src/tree/gpu_hist/gradient_based_sampler.cu b/src/tree/gpu_hist/gradient_based_sampler.cu index eb441f39e..3caa83f13 100644 --- a/src/tree/gpu_hist/gradient_based_sampler.cu +++ b/src/tree/gpu_hist/gradient_based_sampler.cu @@ -233,7 +233,7 @@ GradientBasedSample ExternalMemoryUniformSampling::Sample(common::Span position, common::Span position_out, common::Span ridx, common::Span ridx_out, - bst_node_t left_nidx, bst_node_t right_nidx, + bst_node_t left_nidx, bst_node_t, int64_t* d_left_count, cudaStream_t stream) { WriteResultsFunctor write_results{left_nidx, position, position_out, ridx, ridx_out, d_left_count};