diff --git a/include/xgboost/base.h b/include/xgboost/base.h index a5edadb6c..f02d75cdc 100644 --- a/include/xgboost/base.h +++ b/include/xgboost/base.h @@ -271,10 +271,11 @@ class GradientPairInt64 { GradientPairInt64() = default; // Copy constructor if of same value type, marked as default to be trivially_copyable - GradientPairInt64(const GradientPairInt64 &g) = default; + GradientPairInt64(GradientPairInt64 const &g) = default; + GradientPairInt64 &operator=(GradientPairInt64 const &g) = default; - XGBOOST_DEVICE T GetQuantisedGrad() const { return grad_; } - XGBOOST_DEVICE T GetQuantisedHess() const { return hess_; } + XGBOOST_DEVICE [[nodiscard]] T GetQuantisedGrad() const { return grad_; } + XGBOOST_DEVICE [[nodiscard]] T GetQuantisedHess() const { return hess_; } XGBOOST_DEVICE GradientPairInt64 &operator+=(const GradientPairInt64 &rhs) { grad_ += rhs.grad_; @@ -323,17 +324,6 @@ using omp_ulong = dmlc::omp_ulong; // NOLINT using bst_omp_uint = dmlc::omp_uint; // NOLINT /*! \brief Type used for representing version number in binary form.*/ using XGBoostVersionT = int32_t; - -/*! - * \brief define compatible keywords in g++ - * Used to support g++-4.6 and g++4.7 - */ -#if DMLC_USE_CXX11 && defined(__GNUC__) && !defined(__clang_version__) -#if __GNUC__ == 4 && __GNUC_MINOR__ < 8 -#define override -#define final -#endif // __GNUC__ == 4 && __GNUC_MINOR__ < 8 -#endif // DMLC_USE_CXX11 && defined(__GNUC__) && !defined(__clang_version__) } // namespace xgboost #endif // XGBOOST_BASE_H_ diff --git a/src/common/math.h b/src/common/math.h index c4d794b5d..be5ff7abd 100644 --- a/src/common/math.h +++ b/src/common/math.h @@ -134,12 +134,6 @@ inline float LogSum(Iterator begin, Iterator end) { return mx + std::log(sum); } -// comparator functions for sorting pairs in descending order -inline static bool CmpFirst(const std::pair &a, - const std::pair &b) { - return a.first > b.first; -} - // Redefined here to workaround a VC bug that doesn't support overloading for integer // types. template diff --git a/src/data/iterative_dmatrix.cu b/src/data/iterative_dmatrix.cu index 1e74cb23c..cf34ca61d 100644 --- a/src/data/iterative_dmatrix.cu +++ b/src/data/iterative_dmatrix.cu @@ -114,7 +114,7 @@ void IterativeDMatrix::InitFromCUDA(Context const* ctx, BatchParam const& p, this->info_.num_row_ = accumulated_rows; this->info_.num_nonzero_ = nnz; - auto init_page = [this, &proxy, &cuts, row_stride, accumulated_rows, get_device]() { + auto init_page = [this, &cuts, row_stride, accumulated_rows, get_device]() { if (!ellpack_) { // Should be put inside the while loop to protect against empty batch. In // that case device id is invalid. diff --git a/src/metric/rank_metric.cc b/src/metric/rank_metric.cc index dd9adc017..8df6e585f 100644 --- a/src/metric/rank_metric.cc +++ b/src/metric/rank_metric.cc @@ -68,7 +68,8 @@ struct EvalAMS : public MetricNoCache { const auto &h_preds = preds.ConstHostVector(); common::ParallelFor(ndata, ctx_->Threads(), [&](bst_omp_uint i) { rec[i] = std::make_pair(h_preds[i], i); }); - common::Sort(ctx_, rec.begin(), rec.end(), common::CmpFirst); + common::Sort(ctx_, rec.begin(), rec.end(), + [](auto const& l, auto const& r) { return l.first > r.first; }); auto ntop = static_cast(ratio_ * ndata); if (ntop == 0) ntop = ndata; const double br = 10.0; diff --git a/src/predictor/gpu_predictor.cu b/src/predictor/gpu_predictor.cu index 578fda180..70a5c02d5 100644 --- a/src/predictor/gpu_predictor.cu +++ b/src/predictor/gpu_predictor.cu @@ -344,7 +344,7 @@ class DeviceModel { dh::safe_cuda(cudaSetDevice(gpu_id)); // Copy decision trees to device - tree_segments = std::move(HostDeviceVector({}, gpu_id)); + tree_segments = HostDeviceVector({}, gpu_id); auto& h_tree_segments = tree_segments.HostVector(); h_tree_segments.reserve((tree_end - tree_begin) + 1); size_t sum = 0; @@ -354,10 +354,8 @@ class DeviceModel { h_tree_segments.push_back(sum); } - nodes = std::move(HostDeviceVector(h_tree_segments.back(), RegTree::Node(), - gpu_id)); - stats = std::move(HostDeviceVector(h_tree_segments.back(), - RTreeNodeStat(), gpu_id)); + nodes = HostDeviceVector(h_tree_segments.back(), RegTree::Node(), gpu_id); + stats = HostDeviceVector(h_tree_segments.back(), RTreeNodeStat(), gpu_id); auto d_nodes = nodes.DevicePointer(); auto d_stats = stats.DevicePointer(); for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { @@ -371,7 +369,7 @@ class DeviceModel { sizeof(RTreeNodeStat) * src_stats.size(), cudaMemcpyDefault)); } - tree_group = std::move(HostDeviceVector(model.tree_info.size(), 0, gpu_id)); + tree_group = HostDeviceVector(model.tree_info.size(), 0, gpu_id); auto& h_tree_group = tree_group.HostVector(); std::memcpy(h_tree_group.data(), model.tree_info.data(), sizeof(int) * model.tree_info.size()); @@ -435,7 +433,7 @@ struct ShapSplitCondition { bool is_missing_branch; // Does this instance flow down this path? - XGBOOST_DEVICE bool EvaluateSplit(float x) const { + [[nodiscard]] XGBOOST_DEVICE bool EvaluateSplit(float x) const { // is nan if (isnan(x)) { return is_missing_branch;