Reduce compile warnings (#6198)

Co-authored-by: Hyunsu Cho <chohyu01@cs.washington.edu>
This commit is contained in:
vcarpani 2020-10-08 17:14:59 +02:00 committed by GitHub
parent a4ce0eae43
commit 6bc9747df5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 40 additions and 21 deletions

View File

@ -72,6 +72,9 @@ class TreeUpdater : public Configurable {
*/ */
virtual bool UpdatePredictionCache(const DMatrix* data, virtual bool UpdatePredictionCache(const DMatrix* data,
HostDeviceVector<bst_float>* out_preds) { HostDeviceVector<bst_float>* out_preds) {
// Remove unused parameter compiler warning.
(void) data;
(void) out_preds;
return false; return false;
} }

View File

@ -240,6 +240,9 @@ namespace aft {
template <> template <>
XGBOOST_DEVICE inline double XGBOOST_DEVICE inline double
GetLimitGradAtInfPred<NormalDistribution>(CensoringType censor_type, bool sign, double sigma) { GetLimitGradAtInfPred<NormalDistribution>(CensoringType censor_type, bool sign, double sigma) {
// Remove unused parameter compiler warning.
(void) sigma;
switch (censor_type) { switch (censor_type) {
case CensoringType::kUncensored: case CensoringType::kUncensored:
return sign ? kMinGradient : kMaxGradient; return sign ? kMinGradient : kMaxGradient;
@ -288,6 +291,10 @@ GetLimitGradAtInfPred<LogisticDistribution>(CensoringType censor_type, bool sign
template <> template <>
XGBOOST_DEVICE inline double XGBOOST_DEVICE inline double
GetLimitHessAtInfPred<LogisticDistribution>(CensoringType censor_type, bool sign, double sigma) { GetLimitHessAtInfPred<LogisticDistribution>(CensoringType censor_type, bool sign, double sigma) {
// Remove unused parameter compiler warning.
(void) sign;
(void) sigma;
switch (censor_type) { switch (censor_type) {
case CensoringType::kUncensored: case CensoringType::kUncensored:
case CensoringType::kRightCensored: case CensoringType::kRightCensored:
@ -317,6 +324,9 @@ GetLimitGradAtInfPred<ExtremeDistribution>(CensoringType censor_type, bool sign,
template <> template <>
XGBOOST_DEVICE inline double XGBOOST_DEVICE inline double
GetLimitHessAtInfPred<ExtremeDistribution>(CensoringType censor_type, bool sign, double sigma) { GetLimitHessAtInfPred<ExtremeDistribution>(CensoringType censor_type, bool sign, double sigma) {
// Remove unused parameter compiler warning.
(void) sigma;
switch (censor_type) { switch (censor_type) {
case CensoringType::kUncensored: case CensoringType::kUncensored:
case CensoringType::kRightCensored: case CensoringType::kRightCensored:

View File

@ -157,7 +157,10 @@ class Transform {
/*! \brief Dummy funtion defined when compiling for CPU. */ /*! \brief Dummy funtion defined when compiling for CPU. */
template <typename std::enable_if<!CompiledWithCuda>::type* = nullptr, template <typename std::enable_if<!CompiledWithCuda>::type* = nullptr,
typename... HDV> typename... HDV>
void LaunchCUDA(Functor _func, HDV*... _vectors) const { void LaunchCUDA(Functor _func, HDV*...) const {
// Remove unused parameter compiler warning.
(void) _func;
LOG(FATAL) << "Not part of device code. WITH_CUDA: " << WITH_CUDA(); LOG(FATAL) << "Not part of device code. WITH_CUDA: " << WITH_CUDA();
} }
#endif // defined(__CUDACC__) #endif // defined(__CUDACC__)

View File

@ -220,10 +220,10 @@ class FeatureSelector {
* \param lambda Regularisation lambda. * \param lambda Regularisation lambda.
* \param param A parameter with algorithm-dependent use. * \param param A parameter with algorithm-dependent use.
*/ */
virtual void Setup(const gbm::GBLinearModel &model, virtual void Setup(const gbm::GBLinearModel &,
const std::vector<GradientPair> &gpair, const std::vector<GradientPair> &,
DMatrix *p_fmat, DMatrix *,
float alpha, float lambda, int param) {} float , float , int ) {}
/** /**
* \brief Select next coordinate to update. * \brief Select next coordinate to update.
* *
@ -250,8 +250,8 @@ class FeatureSelector {
class CyclicFeatureSelector : public FeatureSelector { class CyclicFeatureSelector : public FeatureSelector {
public: public:
int NextFeature(int iteration, const gbm::GBLinearModel &model, int NextFeature(int iteration, const gbm::GBLinearModel &model,
int group_idx, const std::vector<GradientPair> &gpair, int , const std::vector<GradientPair> &,
DMatrix *p_fmat, float alpha, float lambda) override { DMatrix *, float, float) override {
return iteration % model.learner_model_param->num_feature; return iteration % model.learner_model_param->num_feature;
} }
}; };
@ -263,8 +263,8 @@ class CyclicFeatureSelector : public FeatureSelector {
class ShuffleFeatureSelector : public FeatureSelector { class ShuffleFeatureSelector : public FeatureSelector {
public: public:
void Setup(const gbm::GBLinearModel &model, void Setup(const gbm::GBLinearModel &model,
const std::vector<GradientPair> &gpair, const std::vector<GradientPair> &g,
DMatrix *p_fmat, float alpha, float lambda, int param) override { DMatrix *, float, float, int) override {
if (feat_index_.size() == 0) { if (feat_index_.size() == 0) {
feat_index_.resize(model.learner_model_param->num_feature); feat_index_.resize(model.learner_model_param->num_feature);
std::iota(feat_index_.begin(), feat_index_.end(), 0); std::iota(feat_index_.begin(), feat_index_.end(), 0);
@ -273,8 +273,8 @@ class ShuffleFeatureSelector : public FeatureSelector {
} }
int NextFeature(int iteration, const gbm::GBLinearModel &model, int NextFeature(int iteration, const gbm::GBLinearModel &model,
int group_idx, const std::vector<GradientPair> &gpair, int, const std::vector<GradientPair> &,
DMatrix *p_fmat, float alpha, float lambda) override { DMatrix *, float, float) override {
return feat_index_[iteration % model.learner_model_param->num_feature]; return feat_index_[iteration % model.learner_model_param->num_feature];
} }
@ -288,9 +288,9 @@ class ShuffleFeatureSelector : public FeatureSelector {
*/ */
class RandomFeatureSelector : public FeatureSelector { class RandomFeatureSelector : public FeatureSelector {
public: public:
int NextFeature(int iteration, const gbm::GBLinearModel &model, int NextFeature(int, const gbm::GBLinearModel &model,
int group_idx, const std::vector<GradientPair> &gpair, int, const std::vector<GradientPair> &,
DMatrix *p_fmat, float alpha, float lambda) override { DMatrix *, float, float) override {
return common::GlobalRandom()() % model.learner_model_param->num_feature; return common::GlobalRandom()() % model.learner_model_param->num_feature;
} }
}; };
@ -307,8 +307,8 @@ class RandomFeatureSelector : public FeatureSelector {
class GreedyFeatureSelector : public FeatureSelector { class GreedyFeatureSelector : public FeatureSelector {
public: public:
void Setup(const gbm::GBLinearModel &model, void Setup(const gbm::GBLinearModel &model,
const std::vector<GradientPair> &gpair, const std::vector<GradientPair> &,
DMatrix *p_fmat, float alpha, float lambda, int param) override { DMatrix *, float, float, int param) override {
top_k_ = static_cast<bst_uint>(param); top_k_ = static_cast<bst_uint>(param);
const bst_uint ngroup = model.learner_model_param->num_output_group; const bst_uint ngroup = model.learner_model_param->num_output_group;
if (param <= 0) top_k_ = std::numeric_limits<bst_uint>::max(); if (param <= 0) top_k_ = std::numeric_limits<bst_uint>::max();
@ -321,7 +321,7 @@ class GreedyFeatureSelector : public FeatureSelector {
} }
} }
int NextFeature(int iteration, const gbm::GBLinearModel &model, int NextFeature(int, const gbm::GBLinearModel &model,
int group_idx, const std::vector<GradientPair> &gpair, int group_idx, const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda) override { DMatrix *p_fmat, float alpha, float lambda) override {
// k-th selected feature for a group // k-th selected feature for a group
@ -438,9 +438,9 @@ class ThriftyFeatureSelector : public FeatureSelector {
} }
} }
int NextFeature(int iteration, const gbm::GBLinearModel &model, int NextFeature(int, const gbm::GBLinearModel &model,
int group_idx, const std::vector<GradientPair> &gpair, int group_idx, const std::vector<GradientPair> &,
DMatrix *p_fmat, float alpha, float lambda) override { DMatrix *, float, float) override {
// k-th selected feature for a group // k-th selected feature for a group
auto k = counter_[group_idx]++; auto k = counter_[group_idx]++;
// stop after either reaching top-N or going through all the features in a group // stop after either reaching top-N or going through all the features in a group

View File

@ -49,6 +49,9 @@ class SoftmaxMultiClassObj : public ObjFunction {
const MetaInfo& info, const MetaInfo& info,
int iter, int iter,
HostDeviceVector<GradientPair>* out_gpair) override { HostDeviceVector<GradientPair>* out_gpair) override {
// Remove unused parameter compiler warning.
(void) iter;
if (info.labels_.Size() == 0) { if (info.labels_.Size() == 0) {
return; return;
} }

View File

@ -787,7 +787,7 @@ void RegTree::LoadCategoricalSplit(Json const& in) {
if (!categories_nodes.empty()) { if (!categories_nodes.empty()) {
last_cat_node = get<Integer const>(categories_nodes[cnt]); last_cat_node = get<Integer const>(categories_nodes[cnt]);
} }
for (size_t nidx = 0; nidx < param.num_nodes; ++nidx) { for (bst_node_t nidx = 0; nidx < param.num_nodes; ++nidx) {
if (nidx == last_cat_node) { if (nidx == last_cat_node) {
auto j_begin = get<Integer const>(categories_segments[cnt]); auto j_begin = get<Integer const>(categories_segments[cnt]);
auto j_end = get<Integer const>(categories_sizes[cnt]) + j_begin; auto j_end = get<Integer const>(categories_sizes[cnt]) + j_begin;