Clean up C++ warnings (#6213)

This commit is contained in:
Igor Moura 2020-10-19 12:02:33 -03:00 committed by GitHub
parent ddf37cca30
commit d1254808d5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 78 additions and 89 deletions

View File

@ -75,8 +75,9 @@ class GradientBooster : public Model, public Configurable {
* \param prediction The output prediction cache entry that needs to be updated. * \param prediction The output prediction cache entry that needs to be updated.
* the booster may change content of gpair * the booster may change content of gpair
*/ */
virtual void DoBoost(DMatrix* p_fmat, HostDeviceVector<GradientPair>* in_gpair, virtual void DoBoost(DMatrix* p_fmat,
PredictionCacheEntry *prediction) = 0; HostDeviceVector<GradientPair>* in_gpair,
PredictionCacheEntry*) = 0;
/*! /*!
* \brief generate predictions for given feature matrix * \brief generate predictions for given feature matrix
@ -103,10 +104,10 @@ class GradientBooster : public Model, public Configurable {
* \param layer_begin (Optional) Begining of boosted tree layer used for prediction. * \param layer_begin (Optional) Begining of boosted tree layer used for prediction.
* \param layer_end (Optional) End of booster layer. 0 means do not limit trees. * \param layer_end (Optional) End of booster layer. 0 means do not limit trees.
*/ */
virtual void InplacePredict(dmlc::any const &x, float missing, virtual void InplacePredict(dmlc::any const &, float,
PredictionCacheEntry *out_preds, PredictionCacheEntry*,
uint32_t layer_begin = 0, uint32_t,
uint32_t layer_end = 0) const { uint32_t) const {
LOG(FATAL) << "Inplace predict is not supported by current booster."; LOG(FATAL) << "Inplace predict is not supported by current booster.";
} }
/*! /*!

View File

@ -41,14 +41,14 @@ class Metric : public Configurable {
* override this function to maintain internal configuration * override this function to maintain internal configuration
* \param in JSON object containing the configuration * \param in JSON object containing the configuration
*/ */
void LoadConfig(Json const& in) override {} void LoadConfig(Json const&) override {}
/*! /*!
* \brief Save configuration to JSON object * \brief Save configuration to JSON object
* By default, metric has no internal configuration; * By default, metric has no internal configuration;
* override this function to maintain internal configuration * override this function to maintain internal configuration
* \param out pointer to output JSON object * \param out pointer to output JSON object
*/ */
void SaveConfig(Json* out) const override {} void SaveConfig(Json*) const override {}
/*! /*!
* \brief evaluate a specific metric * \brief evaluate a specific metric

View File

@ -53,7 +53,7 @@ class ObjFunction : public Configurable {
* \brief transform prediction values, this is only called when Prediction is called * \brief transform prediction values, this is only called when Prediction is called
* \param io_preds prediction values, saves to this vector as well * \param io_preds prediction values, saves to this vector as well
*/ */
virtual void PredTransform(HostDeviceVector<bst_float> *io_preds) {} virtual void PredTransform(HostDeviceVector<bst_float>*) {}
/*! /*!
* \brief transform prediction values, this is only called when Eval is called, * \brief transform prediction values, this is only called when Eval is called,

View File

@ -240,7 +240,7 @@ XGB_DLL int XGDMatrixFree(DMatrixHandle handle) {
} }
XGB_DLL int XGDMatrixSaveBinary(DMatrixHandle handle, const char* fname, XGB_DLL int XGDMatrixSaveBinary(DMatrixHandle handle, const char* fname,
int silent) { int) {
API_BEGIN(); API_BEGIN();
CHECK_HANDLE(); CHECK_HANDLE();
auto dmat = static_cast<std::shared_ptr<DMatrix>*>(handle)->get(); auto dmat = static_cast<std::shared_ptr<DMatrix>*>(handle)->get();
@ -528,8 +528,8 @@ XGB_DLL int XGBoosterPredictFromDense(BoosterHandle handle, float *values,
xgboost::bst_ulong n_rows, xgboost::bst_ulong n_rows,
xgboost::bst_ulong n_cols, xgboost::bst_ulong n_cols,
float missing, float missing,
unsigned iteration_begin, unsigned,
unsigned iteration_end, unsigned,
char const* c_type, char const* c_type,
xgboost::bst_ulong cache_id, xgboost::bst_ulong cache_id,
xgboost::bst_ulong *out_len, xgboost::bst_ulong *out_len,
@ -560,8 +560,8 @@ XGB_DLL int XGBoosterPredictFromCSR(BoosterHandle handle,
size_t nelem, size_t nelem,
size_t num_col, size_t num_col,
float missing, float missing,
unsigned iteration_begin, unsigned,
unsigned iteration_end, unsigned,
char const *c_type, char const *c_type,
xgboost::bst_ulong cache_id, xgboost::bst_ulong cache_id,
xgboost::bst_ulong *out_len, xgboost::bst_ulong *out_len,

View File

@ -43,7 +43,7 @@ class Column {
BinIdxType GetFeatureBinIdx(size_t idx) const { return index_[idx]; } BinIdxType GetFeatureBinIdx(size_t idx) const { return index_[idx]; }
const uint32_t GetBaseIdx() const { return index_base_; } uint32_t GetBaseIdx() const { return index_base_; }
common::Span<const BinIdxType> GetFeatureBinIdxPtr() const { return index_; } common::Span<const BinIdxType> GetFeatureBinIdxPtr() const { return index_; }
@ -179,12 +179,12 @@ class ColumnMatrix {
but for ColumnMatrix we still have a chance to reduce the memory consumption */ but for ColumnMatrix we still have a chance to reduce the memory consumption */
} else { } else {
if (bins_type_size_ == kUint8BinsTypeSize) { if (bins_type_size_ == kUint8BinsTypeSize) {
SetIndex<uint8_t>(gmat.index.data<uint32_t>(), gmat, nrow, nfeature); SetIndex<uint8_t>(gmat.index.data<uint32_t>(), gmat, nfeature);
} else if (bins_type_size_ == kUint16BinsTypeSize) { } else if (bins_type_size_ == kUint16BinsTypeSize) {
SetIndex<uint16_t>(gmat.index.data<uint32_t>(), gmat, nrow, nfeature); SetIndex<uint16_t>(gmat.index.data<uint32_t>(), gmat, nfeature);
} else { } else {
CHECK_EQ(bins_type_size_, kUint32BinsTypeSize); CHECK_EQ(bins_type_size_, kUint32BinsTypeSize);
SetIndex<uint32_t>(gmat.index.data<uint32_t>(), gmat, nrow, nfeature); SetIndex<uint32_t>(gmat.index.data<uint32_t>(), gmat, nfeature);
} }
} }
} }
@ -271,7 +271,7 @@ class ColumnMatrix {
template<typename T> template<typename T>
inline void SetIndex(uint32_t* index, const GHistIndexMatrix& gmat, inline void SetIndex(uint32_t* index, const GHistIndexMatrix& gmat,
const size_t nrow, const size_t nfeature) { const size_t nfeature) {
std::vector<size_t> num_nonzeros; std::vector<size_t> num_nonzeros;
num_nonzeros.resize(nfeature); num_nonzeros.resize(nfeature);
std::fill(num_nonzeros.begin(), num_nonzeros.end(), 0); std::fill(num_nonzeros.begin(), num_nonzeros.end(), 0);
@ -311,18 +311,18 @@ class ColumnMatrix {
rbegin += batch.Size(); rbegin += batch.Size();
} }
} }
const BinTypeSize GetTypeSize() const { BinTypeSize GetTypeSize() const {
return bins_type_size_; return bins_type_size_;
} }
// This is just an utility function // This is just an utility function
const bool NoMissingValues(const size_t n_elements, bool NoMissingValues(const size_t n_elements,
const size_t n_row, const size_t n_features) { const size_t n_row, const size_t n_features) {
return n_elements == n_features * n_row; return n_elements == n_features * n_row;
} }
// And this returns part of state // And this returns part of state
const bool AnyMissing() const { bool AnyMissing() const {
return any_missing_; return any_missing_;
} }

View File

@ -29,8 +29,7 @@
namespace xgboost { namespace xgboost {
namespace common { namespace common {
void GHistIndexMatrix::ResizeIndex(const size_t rbegin, const SparsePage& batch, void GHistIndexMatrix::ResizeIndex(const size_t n_index,
const size_t n_offsets, const size_t n_index,
const bool isDense) { const bool isDense) {
if ((max_num_bins - 1 <= static_cast<int>(std::numeric_limits<uint8_t>::max())) && isDense) { if ((max_num_bins - 1 <= static_cast<int>(std::numeric_limits<uint8_t>::max())) && isDense) {
index.SetBinTypeSize(kUint8BinsTypeSize); index.SetBinTypeSize(kUint8BinsTypeSize);
@ -119,7 +118,7 @@ void GHistIndexMatrix::Init(DMatrix* p_fmat, int max_bins) {
const size_t n_offsets = cut.Ptrs().size() - 1; const size_t n_offsets = cut.Ptrs().size() - 1;
const size_t n_index = row_ptr[rbegin + batch.Size()]; const size_t n_index = row_ptr[rbegin + batch.Size()];
ResizeIndex(rbegin, batch, n_offsets, n_index, isDense); ResizeIndex(n_index, isDense);
CHECK_GT(cut.Values().size(), 0U); CHECK_GT(cut.Values().size(), 0U);

View File

@ -273,8 +273,7 @@ struct GHistIndexMatrix {
} }
} }
void ResizeIndex(const size_t rbegin, const SparsePage& batch, void ResizeIndex(const size_t n_index,
const size_t n_offsets, const size_t n_index,
const bool isDense); const bool isDense);
inline void GetFeatureCounts(size_t* counts) const { inline void GetFeatureCounts(size_t* counts) const {

View File

@ -32,7 +32,7 @@ class PeekableInStream : public dmlc::Stream {
size_t Read(void* dptr, size_t size) override; size_t Read(void* dptr, size_t size) override;
virtual size_t PeekRead(void* dptr, size_t size); virtual size_t PeekRead(void* dptr, size_t size);
void Write(const void* dptr, size_t size) override { void Write(const void*, size_t) override {
LOG(FATAL) << "Not implemented"; LOG(FATAL) << "Not implemented";
} }
@ -60,7 +60,7 @@ class FixedSizeStream : public PeekableInStream {
size_t Tell() const { return pointer_; } size_t Tell() const { return pointer_; }
void Seek(size_t pos); void Seek(size_t pos);
void Write(const void* dptr, size_t size) override { void Write(const void*, size_t) override {
LOG(FATAL) << "Not implemented"; LOG(FATAL) << "Not implemented";
} }

View File

@ -136,8 +136,7 @@ class CSRAdapterBatch : public detail::NoMetaInfo {
const float* values_; const float* values_;
}; };
CSRAdapterBatch(const size_t* row_ptr, const unsigned* feature_idx, CSRAdapterBatch(const size_t* row_ptr, const unsigned* feature_idx,
const float* values, size_t num_rows, size_t num_elements, const float* values, size_t num_rows, size_t, size_t)
size_t num_features)
: row_ptr_(row_ptr), : row_ptr_(row_ptr),
feature_idx_(feature_idx), feature_idx_(feature_idx),
values_(values), values_(values),

View File

@ -113,7 +113,7 @@ class GBLinear : public GradientBooster {
void DoBoost(DMatrix *p_fmat, void DoBoost(DMatrix *p_fmat,
HostDeviceVector<GradientPair> *in_gpair, HostDeviceVector<GradientPair> *in_gpair,
PredictionCacheEntry* predt) override { PredictionCacheEntry*) override {
monitor_.Start("DoBoost"); monitor_.Start("DoBoost");
model_.LazyInitModel(); model_.LazyInitModel();
@ -128,8 +128,7 @@ class GBLinear : public GradientBooster {
void PredictBatch(DMatrix *p_fmat, void PredictBatch(DMatrix *p_fmat,
PredictionCacheEntry *predts, PredictionCacheEntry *predts,
bool training, bool, unsigned ntree_limit) override {
unsigned ntree_limit) override {
monitor_.Start("PredictBatch"); monitor_.Start("PredictBatch");
auto* out_preds = &predts->predictions; auto* out_preds = &predts->predictions;
CHECK_EQ(ntree_limit, 0U) CHECK_EQ(ntree_limit, 0U)
@ -140,7 +139,7 @@ class GBLinear : public GradientBooster {
// add base margin // add base margin
void PredictInstance(const SparsePage::Inst &inst, void PredictInstance(const SparsePage::Inst &inst,
std::vector<bst_float> *out_preds, std::vector<bst_float> *out_preds,
unsigned ntree_limit) override { unsigned) override {
const int ngroup = model_.learner_model_param->num_output_group; const int ngroup = model_.learner_model_param->num_output_group;
for (int gid = 0; gid < ngroup; ++gid) { for (int gid = 0; gid < ngroup; ++gid) {
this->Pred(inst, dmlc::BeginPtr(*out_preds), gid, this->Pred(inst, dmlc::BeginPtr(*out_preds), gid,
@ -148,16 +147,15 @@ class GBLinear : public GradientBooster {
} }
} }
void PredictLeaf(DMatrix *p_fmat, void PredictLeaf(DMatrix*,
std::vector<bst_float> *out_preds, std::vector<bst_float>*,
unsigned ntree_limit) override { unsigned) override {
LOG(FATAL) << "gblinear does not support prediction of leaf index"; LOG(FATAL) << "gblinear does not support prediction of leaf index";
} }
void PredictContribution(DMatrix* p_fmat, void PredictContribution(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs, HostDeviceVector<bst_float>* out_contribs,
unsigned ntree_limit, bool approximate, int condition = 0, unsigned ntree_limit, bool, int, unsigned) override {
unsigned condition_feature = 0) override {
model_.LazyInitModel(); model_.LazyInitModel();
CHECK_EQ(ntree_limit, 0U) CHECK_EQ(ntree_limit, 0U)
<< "GBLinear::PredictContribution: ntrees is only valid for gbtree predictor"; << "GBLinear::PredictContribution: ntrees is only valid for gbtree predictor";
@ -196,7 +194,7 @@ class GBLinear : public GradientBooster {
void PredictInteractionContributions(DMatrix* p_fmat, void PredictInteractionContributions(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs, HostDeviceVector<bst_float>* out_contribs,
unsigned ntree_limit, bool approximate) override { unsigned, bool) override {
std::vector<bst_float>& contribs = out_contribs->HostVector(); std::vector<bst_float>& contribs = out_contribs->HostVector();
// linear models have no interaction effects // linear models have no interaction effects

View File

@ -95,7 +95,7 @@ class GBLinearModel : public Model {
return &weight[i * learner_model_param->num_output_group]; return &weight[i * learner_model_param->num_output_group];
} }
std::vector<std::string> DumpModel(const FeatureMap &fmap, bool with_stats, std::vector<std::string> DumpModel(const FeatureMap &, bool,
std::string format) const { std::string format) const {
const int ngroup = learner_model_param->num_output_group; const int ngroup = learner_model_param->num_output_group;
const unsigned nfeature = learner_model_param->num_feature; const unsigned nfeature = learner_model_param->num_feature;

View File

@ -401,7 +401,7 @@ void GBTree::SaveModel(Json* p_out) const {
void GBTree::PredictBatch(DMatrix* p_fmat, void GBTree::PredictBatch(DMatrix* p_fmat,
PredictionCacheEntry* out_preds, PredictionCacheEntry* out_preds,
bool training, bool,
unsigned ntree_limit) { unsigned ntree_limit) {
CHECK(configured_); CHECK(configured_);
GetPredictor(&out_preds->predictions, p_fmat) GetPredictor(&out_preds->predictions, p_fmat)
@ -601,8 +601,8 @@ class Dart : public GBTree {
void PredictContribution(DMatrix* p_fmat, void PredictContribution(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs, HostDeviceVector<bst_float>* out_contribs,
unsigned ntree_limit, bool approximate, int condition, unsigned ntree_limit, bool approximate, int,
unsigned condition_feature) override { unsigned) override {
CHECK(configured_); CHECK(configured_);
cpu_predictor_->PredictContribution(p_fmat, out_contribs, model_, cpu_predictor_->PredictContribution(p_fmat, out_contribs, model_,
ntree_limit, &weight_drop_, approximate); ntree_limit, &weight_drop_, approximate);
@ -674,8 +674,7 @@ class Dart : public GBTree {
// commit new trees all at once // commit new trees all at once
void void
CommitModel(std::vector<std::vector<std::unique_ptr<RegTree>>>&& new_trees, CommitModel(std::vector<std::vector<std::unique_ptr<RegTree>>>&& new_trees,
DMatrix* m, DMatrix*, PredictionCacheEntry*) override {
PredictionCacheEntry* predts) override {
int num_new_trees = 0; int num_new_trees = 0;
for (uint32_t gid = 0; gid < model_.learner_model_param->num_output_group; ++gid) { for (uint32_t gid = 0; gid < model_.learner_model_param->num_output_group; ++gid) {
num_new_trees += new_trees[gid].size(); num_new_trees += new_trees[gid].size();

View File

@ -239,7 +239,7 @@ class GBTree : public GradientBooster {
void PredictContribution(DMatrix* p_fmat, void PredictContribution(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs, HostDeviceVector<bst_float>* out_contribs,
unsigned ntree_limit, bool approximate, unsigned ntree_limit, bool approximate,
int condition, unsigned condition_feature) override { int, unsigned) override {
CHECK(configured_); CHECK(configured_);
this->GetPredictor()->PredictContribution( this->GetPredictor()->PredictContribution(
p_fmat, out_contribs, model_, ntree_limit, nullptr, approximate); p_fmat, out_contribs, model_, ntree_limit, nullptr, approximate);

View File

@ -263,7 +263,7 @@ class CyclicFeatureSelector : public FeatureSelector {
class ShuffleFeatureSelector : public FeatureSelector { class ShuffleFeatureSelector : public FeatureSelector {
public: public:
void Setup(const gbm::GBLinearModel &model, void Setup(const gbm::GBLinearModel &model,
const std::vector<GradientPair> &g, const std::vector<GradientPair>&,
DMatrix *, float, float, int) override { DMatrix *, float, float, int) override {
if (feat_index_.size() == 0) { if (feat_index_.size() == 0) {
feat_index_.resize(model.learner_model_param->num_feature); feat_index_.resize(model.learner_model_param->num_feature);

View File

@ -111,17 +111,17 @@ class PairwiseLambdaWeightComputer {
* \param list a list that is sorted by pred score * \param list a list that is sorted by pred score
* \param io_pairs record of pairs, containing the pairs to fill in weights * \param io_pairs record of pairs, containing the pairs to fill in weights
*/ */
static void GetLambdaWeight(const std::vector<ListEntry> &sorted_list, static void GetLambdaWeight(const std::vector<ListEntry>&,
std::vector<LambdaPair> *io_pairs) {} std::vector<LambdaPair>*) {}
static char const* Name() { static char const* Name() {
return "rank:pairwise"; return "rank:pairwise";
} }
#if defined(__CUDACC__) #if defined(__CUDACC__)
PairwiseLambdaWeightComputer(const bst_float *dpreds, PairwiseLambdaWeightComputer(const bst_float*,
const bst_float *dlabels, const bst_float*,
const dh::SegmentSorter<float> &segment_label_sorter) {} const dh::SegmentSorter<float>&) {}
class PairwiseLambdaWeightMultiplier { class PairwiseLambdaWeightMultiplier {
public: public:
@ -270,7 +270,7 @@ class NDCGLambdaWeightComputer
}; };
NDCGLambdaWeightComputer(const bst_float *dpreds, NDCGLambdaWeightComputer(const bst_float *dpreds,
const bst_float *dlabels, const bst_float*,
const dh::SegmentSorter<float> &segment_label_sorter) const dh::SegmentSorter<float> &segment_label_sorter)
: IndexablePredictionSorter(dpreds, segment_label_sorter), : IndexablePredictionSorter(dpreds, segment_label_sorter),
dgroup_dcg_(segment_label_sorter.GetNumGroups(), 0.0f), dgroup_dcg_(segment_label_sorter.GetNumGroups(), 0.0f),
@ -293,7 +293,7 @@ class NDCGLambdaWeightComputer
group_segments)), group_segments)),
thrust::make_discard_iterator(), // We don't care for the group indices thrust::make_discard_iterator(), // We don't care for the group indices
dgroup_dcg_.begin()); // Sum of the item's DCG values in the group dgroup_dcg_.begin()); // Sum of the item's DCG values in the group
CHECK(end_range.second - dgroup_dcg_.begin() == dgroup_dcg_.size()); CHECK(static_cast<unsigned>(end_range.second - dgroup_dcg_.begin()) == dgroup_dcg_.size());
} }
inline const common::Span<const float> GetGroupDcgsSpan() const { inline const common::Span<const float> GetGroupDcgsSpan() const {

View File

@ -18,11 +18,11 @@ struct LinearSquareLoss {
// duplication is necessary, as __device__ specifier // duplication is necessary, as __device__ specifier
// cannot be made conditional on template parameter // cannot be made conditional on template parameter
XGBOOST_DEVICE static bst_float PredTransform(bst_float x) { return x; } XGBOOST_DEVICE static bst_float PredTransform(bst_float x) { return x; }
XGBOOST_DEVICE static bool CheckLabel(bst_float x) { return true; } XGBOOST_DEVICE static bool CheckLabel(bst_float) { return true; }
XGBOOST_DEVICE static bst_float FirstOrderGradient(bst_float predt, bst_float label) { XGBOOST_DEVICE static bst_float FirstOrderGradient(bst_float predt, bst_float label) {
return predt - label; return predt - label;
} }
XGBOOST_DEVICE static bst_float SecondOrderGradient(bst_float predt, bst_float label) { XGBOOST_DEVICE static bst_float SecondOrderGradient(bst_float, bst_float) {
return 1.0f; return 1.0f;
} }
template <typename T> template <typename T>
@ -72,7 +72,7 @@ struct LogisticRegression {
XGBOOST_DEVICE static bst_float FirstOrderGradient(bst_float predt, bst_float label) { XGBOOST_DEVICE static bst_float FirstOrderGradient(bst_float predt, bst_float label) {
return predt - label; return predt - label;
} }
XGBOOST_DEVICE static bst_float SecondOrderGradient(bst_float predt, bst_float label) { XGBOOST_DEVICE static bst_float SecondOrderGradient(bst_float predt, bst_float) {
const float eps = 1e-16f; const float eps = 1e-16f;
return fmaxf(predt * (1.0f - predt), eps); return fmaxf(predt * (1.0f - predt), eps);
} }
@ -102,7 +102,7 @@ struct PseudoHuberError {
XGBOOST_DEVICE static bst_float PredTransform(bst_float x) { XGBOOST_DEVICE static bst_float PredTransform(bst_float x) {
return x; return x;
} }
XGBOOST_DEVICE static bool CheckLabel(bst_float label) { XGBOOST_DEVICE static bool CheckLabel(bst_float) {
return true; return true;
} }
XGBOOST_DEVICE static bst_float FirstOrderGradient(bst_float predt, bst_float label) { XGBOOST_DEVICE static bst_float FirstOrderGradient(bst_float predt, bst_float label) {
@ -144,7 +144,7 @@ struct LogisticRaw : public LogisticRegression {
predt = common::Sigmoid(predt); predt = common::Sigmoid(predt);
return predt - label; return predt - label;
} }
XGBOOST_DEVICE static bst_float SecondOrderGradient(bst_float predt, bst_float label) { XGBOOST_DEVICE static bst_float SecondOrderGradient(bst_float predt, bst_float) {
const float eps = 1e-16f; const float eps = 1e-16f;
predt = common::Sigmoid(predt); predt = common::Sigmoid(predt);
return fmaxf(predt * (1.0f - predt), eps); return fmaxf(predt * (1.0f - predt), eps);

View File

@ -52,8 +52,7 @@ class RegLossObj : public ObjFunction {
} }
void GetGradient(const HostDeviceVector<bst_float>& preds, void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info, const MetaInfo &info, int,
int iter,
HostDeviceVector<GradientPair>* out_gpair) override { HostDeviceVector<GradientPair>* out_gpair) override {
CHECK_EQ(preds.Size(), info.labels_.Size()) CHECK_EQ(preds.Size(), info.labels_.Size())
<< " " << "labels are not correctly provided" << " " << "labels are not correctly provided"
@ -191,8 +190,7 @@ class PoissonRegression : public ObjFunction {
} }
void GetGradient(const HostDeviceVector<bst_float>& preds, void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info, const MetaInfo &info, int,
int iter,
HostDeviceVector<GradientPair> *out_gpair) override { HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty"; CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided"; CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided";
@ -280,11 +278,10 @@ XGBOOST_REGISTER_OBJECTIVE(PoissonRegression, "count:poisson")
class CoxRegression : public ObjFunction { class CoxRegression : public ObjFunction {
public: public:
void Configure( void Configure(
const std::vector<std::pair<std::string, std::string> > &args) override {} const std::vector<std::pair<std::string, std::string> >&) override {}
void GetGradient(const HostDeviceVector<bst_float>& preds, void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info, const MetaInfo &info, int,
int iter,
HostDeviceVector<GradientPair> *out_gpair) override { HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty"; CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided"; CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided";
@ -379,11 +376,10 @@ XGBOOST_REGISTER_OBJECTIVE(CoxRegression, "survival:cox")
class GammaRegression : public ObjFunction { class GammaRegression : public ObjFunction {
public: public:
void Configure( void Configure(
const std::vector<std::pair<std::string, std::string> > &args) override {} const std::vector<std::pair<std::string, std::string> >&) override {}
void GetGradient(const HostDeviceVector<bst_float> &preds, void GetGradient(const HostDeviceVector<bst_float> &preds,
const MetaInfo &info, const MetaInfo &info, int,
int iter,
HostDeviceVector<GradientPair> *out_gpair) override { HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty"; CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided"; CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided";
@ -479,8 +475,7 @@ class TweedieRegression : public ObjFunction {
} }
void GetGradient(const HostDeviceVector<bst_float>& preds, void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info, const MetaInfo &info, int,
int iter,
HostDeviceVector<GradientPair> *out_gpair) override { HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty"; CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided"; CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided";

View File

@ -110,9 +110,8 @@ struct SparsePageLoader {
struct EllpackLoader { struct EllpackLoader {
EllpackDeviceAccessor const& matrix; EllpackDeviceAccessor const& matrix;
XGBOOST_DEVICE EllpackLoader(EllpackDeviceAccessor const& m, bool use_shared, XGBOOST_DEVICE EllpackLoader(EllpackDeviceAccessor const& m, bool,
bst_feature_t num_features, bst_row_t num_rows, bst_feature_t, bst_row_t, size_t)
size_t entry_start)
: matrix{m} {} : matrix{m} {}
__device__ __forceinline__ float GetElement(size_t ridx, size_t fidx) const { __device__ __forceinline__ float GetElement(size_t ridx, size_t fidx) const {
auto gidx = matrix.GetBinIndex(ridx, fidx); auto gidx = matrix.GetBinIndex(ridx, fidx);
@ -587,7 +586,7 @@ class GPUPredictor : public xgboost::Predictor {
template <typename Adapter, typename Loader> template <typename Adapter, typename Loader>
void DispatchedInplacePredict(dmlc::any const &x, void DispatchedInplacePredict(dmlc::any const &x,
const gbm::GBTreeModel &model, float missing, const gbm::GBTreeModel &model, float,
PredictionCacheEntry *out_preds, PredictionCacheEntry *out_preds,
uint32_t tree_begin, uint32_t tree_end) const { uint32_t tree_begin, uint32_t tree_end) const {
auto max_shared_memory_bytes = dh::MaxSharedMemory(this->generic_param_->gpu_id); auto max_shared_memory_bytes = dh::MaxSharedMemory(this->generic_param_->gpu_id);
@ -648,9 +647,9 @@ class GPUPredictor : public xgboost::Predictor {
void PredictContribution(DMatrix* p_fmat, void PredictContribution(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs, HostDeviceVector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, unsigned ntree_limit, const gbm::GBTreeModel& model, unsigned ntree_limit,
std::vector<bst_float>* tree_weights, std::vector<bst_float>*,
bool approximate, int condition, bool approximate, int,
unsigned condition_feature) override { unsigned) override {
if (approximate) { if (approximate) {
LOG(FATAL) << "Approximated contribution is not implemented in GPU Predictor."; LOG(FATAL) << "Approximated contribution is not implemented in GPU Predictor.";
} }
@ -702,7 +701,7 @@ class GPUPredictor : public xgboost::Predictor {
HostDeviceVector<bst_float>* out_contribs, HostDeviceVector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, const gbm::GBTreeModel& model,
unsigned ntree_limit, unsigned ntree_limit,
std::vector<bst_float>* tree_weights, std::vector<bst_float>*,
bool approximate) override { bool approximate) override {
if (approximate) { if (approximate) {
LOG(FATAL) << "[Internal error]: " << __func__ LOG(FATAL) << "[Internal error]: " << __func__
@ -774,16 +773,16 @@ class GPUPredictor : public xgboost::Predictor {
} }
} }
void PredictInstance(const SparsePage::Inst& inst, void PredictInstance(const SparsePage::Inst&,
std::vector<bst_float>* out_preds, std::vector<bst_float>*,
const gbm::GBTreeModel& model, unsigned ntree_limit) override { const gbm::GBTreeModel&, unsigned) override {
LOG(FATAL) << "[Internal error]: " << __func__ LOG(FATAL) << "[Internal error]: " << __func__
<< " is not implemented in GPU Predictor."; << " is not implemented in GPU Predictor.";
} }
void PredictLeaf(DMatrix* p_fmat, std::vector<bst_float>* out_preds, void PredictLeaf(DMatrix*, std::vector<bst_float>*,
const gbm::GBTreeModel& model, const gbm::GBTreeModel&,
unsigned ntree_limit) override { unsigned) override {
LOG(FATAL) << "[Internal error]: " << __func__ LOG(FATAL) << "[Internal error]: " << __func__
<< " is not implemented in GPU Predictor."; << " is not implemented in GPU Predictor.";
} }

View File

@ -233,7 +233,7 @@ GradientBasedSample ExternalMemoryUniformSampling::Sample(common::Span<GradientP
GradientBasedSampling::GradientBasedSampling(EllpackPageImpl* page, GradientBasedSampling::GradientBasedSampling(EllpackPageImpl* page,
size_t n_rows, size_t n_rows,
const BatchParam& batch_param, const BatchParam&,
float subsample) float subsample)
: page_(page), : page_(page),
subsample_(subsample), subsample_(subsample),

View File

@ -72,7 +72,7 @@ void RowPartitioner::SortPosition(common::Span<bst_node_t> position,
common::Span<bst_node_t> position_out, common::Span<bst_node_t> position_out,
common::Span<RowIndexT> ridx, common::Span<RowIndexT> ridx,
common::Span<RowIndexT> ridx_out, common::Span<RowIndexT> ridx_out,
bst_node_t left_nidx, bst_node_t right_nidx, bst_node_t left_nidx, bst_node_t,
int64_t* d_left_count, cudaStream_t stream) { int64_t* d_left_count, cudaStream_t stream) {
WriteResultsFunctor write_results{left_nidx, position, position_out, WriteResultsFunctor write_results{left_nidx, position, position_out,
ridx, ridx_out, d_left_count}; ridx, ridx_out, d_left_count};