Reduce compiler warnings on CPU-only build. (#8483)

This commit is contained in:
Jiaming Yuan 2022-11-29 00:04:16 +08:00 committed by GitHub
parent d666ba775e
commit 3fc1046fd3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 69 additions and 83 deletions

View File

@ -417,7 +417,7 @@ class EllpackPage {
size_t Size() const; size_t Size() const;
/*! \brief Set the base row id for this page. */ /*! \brief Set the base row id for this page. */
void SetBaseRowId(size_t row_id); void SetBaseRowId(std::size_t row_id);
const EllpackPageImpl* Impl() const { return impl_.get(); } const EllpackPageImpl* Impl() const { return impl_.get(); }
EllpackPageImpl* Impl() { return impl_.get(); } EllpackPageImpl* Impl() { return impl_.get(); }

View File

@ -734,9 +734,7 @@ void MetaInfo::Validate(int32_t device) const {
} }
#if !defined(XGBOOST_USE_CUDA) #if !defined(XGBOOST_USE_CUDA)
void MetaInfo::SetInfoFromCUDA(Context const& ctx, StringView key, Json arr) { void MetaInfo::SetInfoFromCUDA(Context const&, StringView, Json) { common::AssertGPUSupport(); }
common::AssertGPUSupport();
}
#endif // !defined(XGBOOST_USE_CUDA) #endif // !defined(XGBOOST_USE_CUDA)
using DMatrixThreadLocal = using DMatrixThreadLocal =

View File

@ -12,7 +12,7 @@ class EllpackPageImpl {};
EllpackPage::EllpackPage() = default; EllpackPage::EllpackPage() = default;
EllpackPage::EllpackPage(DMatrix* dmat, const BatchParam& param) { EllpackPage::EllpackPage(DMatrix*, const BatchParam&) {
LOG(FATAL) << "Internal Error: XGBoost is not compiled with CUDA but " LOG(FATAL) << "Internal Error: XGBoost is not compiled with CUDA but "
"EllpackPage is required"; "EllpackPage is required";
} }
@ -22,7 +22,7 @@ EllpackPage::~EllpackPage() {
"EllpackPage is required"; "EllpackPage is required";
} }
void EllpackPage::SetBaseRowId(size_t row_id) { void EllpackPage::SetBaseRowId(std::size_t) {
LOG(FATAL) << "Internal Error: XGBoost is not compiled with CUDA but " LOG(FATAL) << "Internal Error: XGBoost is not compiled with CUDA but "
"EllpackPage is required"; "EllpackPage is required";
} }

View File

@ -25,7 +25,7 @@ EllpackPage::EllpackPage(EllpackPage&& that) { std::swap(impl_, that.impl_); }
size_t EllpackPage::Size() const { return impl_->Size(); } size_t EllpackPage::Size() const { return impl_->Size(); }
void EllpackPage::SetBaseRowId(size_t row_id) { impl_->SetBaseRowId(row_id); } void EllpackPage::SetBaseRowId(std::size_t row_id) { impl_->SetBaseRowId(row_id); }
// Bin each input data entry, store the bin indices in compressed form. // Bin each input data entry, store the bin indices in compressed form.
__global__ void CompressBinEllpackKernel( __global__ void CompressBinEllpackKernel(

View File

@ -190,7 +190,7 @@ class EllpackPageImpl {
size_t Size() const; size_t Size() const;
/*! \brief Set the base row id for this page. */ /*! \brief Set the base row id for this page. */
void SetBaseRowId(size_t row_id) { void SetBaseRowId(std::size_t row_id) {
base_rowid = row_id; base_rowid = row_id;
} }

View File

@ -19,7 +19,7 @@ const MetaInfo &SparsePageDMatrix::Info() const { return info_; }
namespace detail { namespace detail {
// Use device dispatch // Use device dispatch
size_t NSamplesDevice(DMatrixProxy *proxy) std::size_t NSamplesDevice(DMatrixProxy *)
#if defined(XGBOOST_USE_CUDA) #if defined(XGBOOST_USE_CUDA)
; // NOLINT ; // NOLINT
#else #else
@ -28,7 +28,7 @@ size_t NSamplesDevice(DMatrixProxy *proxy)
return 0; return 0;
} }
#endif #endif
size_t NFeaturesDevice(DMatrixProxy *proxy) std::size_t NFeaturesDevice(DMatrixProxy *)
#if defined(XGBOOST_USE_CUDA) #if defined(XGBOOST_USE_CUDA)
; // NOLINT ; // NOLINT
#else #else
@ -189,7 +189,7 @@ BatchSet<GHistIndexMatrix> SparsePageDMatrix::GetGradientIndex(const BatchParam
} }
#if !defined(XGBOOST_USE_CUDA) #if !defined(XGBOOST_USE_CUDA)
BatchSet<EllpackPage> SparsePageDMatrix::GetEllpackBatches(const BatchParam& param) { BatchSet<EllpackPage> SparsePageDMatrix::GetEllpackBatches(const BatchParam &) {
common::AssertGPUSupport(); common::AssertGPUSupport();
auto begin_iter = BatchIterator<EllpackPage>(ellpack_page_source_); auto begin_iter = BatchIterator<EllpackPage>(ellpack_page_source_);
return BatchSet<EllpackPage>(BatchIterator<EllpackPage>(begin_iter)); return BatchSet<EllpackPage>(BatchIterator<EllpackPage>(begin_iter));

View File

@ -9,11 +9,11 @@ namespace xgboost {
namespace data { namespace data {
namespace detail { namespace detail {
size_t NSamplesDevice(DMatrixProxy *proxy) { std::size_t NSamplesDevice(DMatrixProxy *proxy) {
return Dispatch(proxy, [](auto const &value) { return value.NumRows(); }); return Dispatch(proxy, [](auto const &value) { return value.NumRows(); });
} }
size_t NFeaturesDevice(DMatrixProxy *proxy) { std::size_t NFeaturesDevice(DMatrixProxy *proxy) {
return Dispatch(proxy, [](auto const &value) { return value.NumCols(); }); return Dispatch(proxy, [](auto const &value) { return value.NumCols(); });
} }
} // namespace detail } // namespace detail

View File

@ -191,9 +191,8 @@ void GBTree::ConfigureUpdaters() {
} }
} }
void GPUCopyGradient(HostDeviceVector<GradientPair> const *in_gpair, void GPUCopyGradient(HostDeviceVector<GradientPair> const*, bst_group_t, bst_group_t,
bst_group_t n_groups, bst_group_t group_id, HostDeviceVector<GradientPair>*)
HostDeviceVector<GradientPair> *out_gpair)
#if defined(XGBOOST_USE_CUDA) #if defined(XGBOOST_USE_CUDA)
; // NOLINT ; // NOLINT
#else #else
@ -627,9 +626,8 @@ GBTree::GetPredictor(HostDeviceVector<float> const *out_pred,
* \param predts Prediction for current tree. * \param predts Prediction for current tree.
* \param tree_w Tree weight. * \param tree_w Tree weight.
*/ */
void GPUDartPredictInc(common::Span<float> out_predts, void GPUDartPredictInc(common::Span<float>, common::Span<float>, float, size_t, bst_group_t,
common::Span<float> predts, float tree_w, size_t n_rows, bst_group_t)
bst_group_t n_groups, bst_group_t group)
#if defined(XGBOOST_USE_CUDA) #if defined(XGBOOST_USE_CUDA)
; // NOLINT ; // NOLINT
#else #else

View File

@ -343,6 +343,7 @@ void GenericParameter::ConfigureGpuId(bool require_gpu) {
#else #else
// Just set it to CPU, don't think about it. // Just set it to CPU, don't think about it.
this->UpdateAllowUnknown(Args{{"gpu_id", std::to_string(kCpuId)}}); this->UpdateAllowUnknown(Args{{"gpu_id", std::to_string(kCpuId)}});
(void)(require_gpu);
#endif // defined(XGBOOST_USE_CUDA) #endif // defined(XGBOOST_USE_CUDA)
common::SetDevice(this->gpu_id); common::SetDevice(this->gpu_id);

View File

@ -390,24 +390,21 @@ XGBOOST_REGISTER_METRIC(EvalAUC, "auc")
.set_body([](const char*) { return new EvalROCAUC(); }); .set_body([](const char*) { return new EvalROCAUC(); });
#if !defined(XGBOOST_USE_CUDA) #if !defined(XGBOOST_USE_CUDA)
std::tuple<double, double, double> std::tuple<double, double, double> GPUBinaryROCAUC(common::Span<float const>, MetaInfo const &,
GPUBinaryROCAUC(common::Span<float const> predts, MetaInfo const &info, std::int32_t,
int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache) { std::shared_ptr<DeviceAUCCache> *) {
common::AssertGPUSupport(); common::AssertGPUSupport();
return {}; return {};
} }
double GPUMultiClassROCAUC(common::Span<float const> predts, double GPUMultiClassROCAUC(common::Span<float const>, MetaInfo const &, std::int32_t,
MetaInfo const &info, int32_t device, std::shared_ptr<DeviceAUCCache> *, std::size_t) {
std::shared_ptr<DeviceAUCCache> *cache,
size_t n_classes) {
common::AssertGPUSupport(); common::AssertGPUSupport();
return 0.0; return 0.0;
} }
std::pair<double, uint32_t> std::pair<double, std::uint32_t> GPURankingAUC(common::Span<float const>, MetaInfo const &,
GPURankingAUC(common::Span<float const> predts, MetaInfo const &info, std::int32_t, std::shared_ptr<DeviceAUCCache> *) {
int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache) {
common::AssertGPUSupport(); common::AssertGPUSupport();
return {}; return {};
} }
@ -432,8 +429,8 @@ class EvalPRAUC : public EvalAUC<EvalPRAUC> {
return std::make_tuple(pr, re, auc); return std::make_tuple(pr, re, auc);
} }
double EvalMultiClass(HostDeviceVector<float> const &predts, double EvalMultiClass(HostDeviceVector<float> const &predts, MetaInfo const &info,
MetaInfo const &info, size_t n_classes) { size_t n_classes) {
if (tparam_->gpu_id == GenericParameter::kCpuId) { if (tparam_->gpu_id == GenericParameter::kCpuId) {
auto n_threads = this->tparam_->Threads(); auto n_threads = this->tparam_->Threads();
return MultiClassOVR(predts.ConstHostSpan(), info, n_classes, n_threads, return MultiClassOVR(predts.ConstHostSpan(), info, n_classes, n_threads,
@ -472,24 +469,20 @@ XGBOOST_REGISTER_METRIC(AUCPR, "aucpr")
.set_body([](char const *) { return new EvalPRAUC{}; }); .set_body([](char const *) { return new EvalPRAUC{}; });
#if !defined(XGBOOST_USE_CUDA) #if !defined(XGBOOST_USE_CUDA)
std::tuple<double, double, double> std::tuple<double, double, double> GPUBinaryPRAUC(common::Span<float const>, MetaInfo const &,
GPUBinaryPRAUC(common::Span<float const> predts, MetaInfo const &info, std::int32_t, std::shared_ptr<DeviceAUCCache> *) {
int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache) {
common::AssertGPUSupport(); common::AssertGPUSupport();
return {}; return {};
} }
double GPUMultiClassPRAUC(common::Span<float const> predts, double GPUMultiClassPRAUC(common::Span<float const>, MetaInfo const &, std::int32_t,
MetaInfo const &info, int32_t device, std::shared_ptr<DeviceAUCCache> *, std::size_t) {
std::shared_ptr<DeviceAUCCache> *cache,
size_t n_classes) {
common::AssertGPUSupport(); common::AssertGPUSupport();
return {}; return {};
} }
std::pair<double, uint32_t> std::pair<double, std::uint32_t> GPURankingPRAUC(common::Span<float const>, MetaInfo const &,
GPURankingPRAUC(common::Span<float const> predts, MetaInfo const &info, std::int32_t, std::shared_ptr<DeviceAUCCache> *) {
int32_t device, std::shared_ptr<DeviceAUCCache> *cache) {
common::AssertGPUSupport(); common::AssertGPUSupport();
return {}; return {};
} }

View File

@ -162,9 +162,9 @@ GPUBinaryAUC(common::Span<float const> predts, MetaInfo const &info,
return std::make_tuple(last.first, last.second, auc); return std::make_tuple(last.first, last.second, auc);
} }
std::tuple<double, double, double> std::tuple<double, double, double> GPUBinaryROCAUC(common::Span<float const> predts,
GPUBinaryROCAUC(common::Span<float const> predts, MetaInfo const &info, MetaInfo const &info, std::int32_t device,
int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache) { std::shared_ptr<DeviceAUCCache> *p_cache) {
auto &cache = *p_cache; auto &cache = *p_cache;
InitCacheOnce<false>(predts, p_cache); InitCacheOnce<false>(predts, p_cache);
@ -451,10 +451,9 @@ void MultiClassSortedIdx(common::Span<float const> predts,
dh::SegmentedArgSort<false>(d_predts_t, d_class_ptr, d_sorted_idx); dh::SegmentedArgSort<false>(d_predts_t, d_class_ptr, d_sorted_idx);
} }
double GPUMultiClassROCAUC(common::Span<float const> predts, double GPUMultiClassROCAUC(common::Span<float const> predts, MetaInfo const &info,
MetaInfo const &info, int32_t device, std::int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache,
std::shared_ptr<DeviceAUCCache> *p_cache, std::size_t n_classes) {
size_t n_classes) {
auto& cache = *p_cache; auto& cache = *p_cache;
InitCacheOnce<true>(predts, p_cache); InitCacheOnce<true>(predts, p_cache);
@ -480,9 +479,9 @@ struct RankScanItem {
}; };
} // anonymous namespace } // anonymous namespace
std::pair<double, uint32_t> std::pair<double, std::uint32_t> GPURankingAUC(common::Span<float const> predts,
GPURankingAUC(common::Span<float const> predts, MetaInfo const &info, MetaInfo const &info, std::int32_t device,
int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache) { std::shared_ptr<DeviceAUCCache> *p_cache) {
auto& cache = *p_cache; auto& cache = *p_cache;
InitCacheOnce<false>(predts, p_cache); InitCacheOnce<false>(predts, p_cache);
@ -600,9 +599,9 @@ GPURankingAUC(common::Span<float const> predts, MetaInfo const &info,
return std::make_pair(auc, n_valid); return std::make_pair(auc, n_valid);
} }
std::tuple<double, double, double> std::tuple<double, double, double> GPUBinaryPRAUC(common::Span<float const> predts,
GPUBinaryPRAUC(common::Span<float const> predts, MetaInfo const &info, MetaInfo const &info, std::int32_t device,
int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache) { std::shared_ptr<DeviceAUCCache> *p_cache) {
auto& cache = *p_cache; auto& cache = *p_cache;
InitCacheOnce<false>(predts, p_cache); InitCacheOnce<false>(predts, p_cache);
@ -640,10 +639,9 @@ GPUBinaryPRAUC(common::Span<float const> predts, MetaInfo const &info,
return std::make_tuple(1.0, 1.0, auc); return std::make_tuple(1.0, 1.0, auc);
} }
double GPUMultiClassPRAUC(common::Span<float const> predts, double GPUMultiClassPRAUC(common::Span<float const> predts, MetaInfo const &info,
MetaInfo const &info, int32_t device, std::int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache,
std::shared_ptr<DeviceAUCCache> *p_cache, std::size_t n_classes) {
size_t n_classes) {
auto& cache = *p_cache; auto& cache = *p_cache;
InitCacheOnce<true>(predts, p_cache); InitCacheOnce<true>(predts, p_cache);
@ -816,9 +814,9 @@ GPURankingPRAUCImpl(common::Span<float const> predts, MetaInfo const &info,
return std::make_pair(auc, n_groups - invalid_groups); return std::make_pair(auc, n_groups - invalid_groups);
} }
std::pair<double, uint32_t> std::pair<double, std::uint32_t> GPURankingPRAUC(common::Span<float const> predts,
GPURankingPRAUC(common::Span<float const> predts, MetaInfo const &info, MetaInfo const &info, std::int32_t device,
int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache) { std::shared_ptr<DeviceAUCCache> *p_cache) {
dh::safe_cuda(cudaSetDevice(device)); dh::safe_cuda(cudaSetDevice(device));
if (predts.empty()) { if (predts.empty()) {
return std::make_pair(0.0, static_cast<uint32_t>(0)); return std::make_pair(0.0, static_cast<uint32_t>(0));

View File

@ -29,34 +29,32 @@ XGBOOST_DEVICE inline double TrapezoidArea(double x0, double x1, double y0, doub
struct DeviceAUCCache; struct DeviceAUCCache;
std::tuple<double, double, double> std::tuple<double, double, double> GPUBinaryROCAUC(common::Span<float const> predts,
GPUBinaryROCAUC(common::Span<float const> predts, MetaInfo const &info, MetaInfo const &info, std::int32_t device,
int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache); std::shared_ptr<DeviceAUCCache> *p_cache);
double GPUMultiClassROCAUC(common::Span<float const> predts, double GPUMultiClassROCAUC(common::Span<float const> predts, MetaInfo const &info,
MetaInfo const &info, int32_t device, std::int32_t device, std::shared_ptr<DeviceAUCCache> *cache,
std::shared_ptr<DeviceAUCCache> *cache, std::size_t n_classes);
size_t n_classes);
std::pair<double, uint32_t> std::pair<double, std::uint32_t> GPURankingAUC(common::Span<float const> predts,
GPURankingAUC(common::Span<float const> predts, MetaInfo const &info, MetaInfo const &info, std::int32_t device,
int32_t device, std::shared_ptr<DeviceAUCCache> *cache); std::shared_ptr<DeviceAUCCache> *cache);
/********** /**********
* PR AUC * * PR AUC *
**********/ **********/
std::tuple<double, double, double> std::tuple<double, double, double> GPUBinaryPRAUC(common::Span<float const> predts,
GPUBinaryPRAUC(common::Span<float const> predts, MetaInfo const &info, MetaInfo const &info, std::int32_t device,
int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache); std::shared_ptr<DeviceAUCCache> *p_cache);
double GPUMultiClassPRAUC(common::Span<float const> predts, double GPUMultiClassPRAUC(common::Span<float const> predts, MetaInfo const &info,
MetaInfo const &info, int32_t device, std::int32_t device, std::shared_ptr<DeviceAUCCache> *cache,
std::shared_ptr<DeviceAUCCache> *cache, std::size_t n_classes);
size_t n_classes);
std::pair<double, uint32_t> std::pair<double, std::uint32_t> GPURankingPRAUC(common::Span<float const> predts,
GPURankingPRAUC(common::Span<float const> predts, MetaInfo const &info, MetaInfo const &info, std::int32_t device,
int32_t device, std::shared_ptr<DeviceAUCCache> *cache); std::shared_ptr<DeviceAUCCache> *cache);
namespace detail { namespace detail {
XGBOOST_DEVICE inline double CalcH(double fp_a, double fp_b, double tp_a, XGBOOST_DEVICE inline double CalcH(double fp_a, double fp_b, double tp_a,