Fix clang warnings. (#9447)

- static function in header. (which is marked as unused due to translation unit
visibility).
- Implicit copy operator is deprecated.
- Unused lambda capture.
- Moving a temporary variable prevents copy elision.
This commit is contained in:
Jiaming Yuan 2023-08-09 15:34:45 +08:00 committed by GitHub
parent 819098a48f
commit f05294a6f2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 12 additions and 29 deletions

View File

@ -271,10 +271,11 @@ class GradientPairInt64 {
GradientPairInt64() = default; GradientPairInt64() = default;
// Copy constructor if of same value type, marked as default to be trivially_copyable // Copy constructor if of same value type, marked as default to be trivially_copyable
GradientPairInt64(const GradientPairInt64 &g) = default; GradientPairInt64(GradientPairInt64 const &g) = default;
GradientPairInt64 &operator=(GradientPairInt64 const &g) = default;
XGBOOST_DEVICE T GetQuantisedGrad() const { return grad_; } XGBOOST_DEVICE [[nodiscard]] T GetQuantisedGrad() const { return grad_; }
XGBOOST_DEVICE T GetQuantisedHess() const { return hess_; } XGBOOST_DEVICE [[nodiscard]] T GetQuantisedHess() const { return hess_; }
XGBOOST_DEVICE GradientPairInt64 &operator+=(const GradientPairInt64 &rhs) { XGBOOST_DEVICE GradientPairInt64 &operator+=(const GradientPairInt64 &rhs) {
grad_ += rhs.grad_; grad_ += rhs.grad_;
@ -323,17 +324,6 @@ using omp_ulong = dmlc::omp_ulong; // NOLINT
using bst_omp_uint = dmlc::omp_uint; // NOLINT using bst_omp_uint = dmlc::omp_uint; // NOLINT
/*! \brief Type used for representing version number in binary form.*/ /*! \brief Type used for representing version number in binary form.*/
using XGBoostVersionT = int32_t; using XGBoostVersionT = int32_t;
/*!
* \brief define compatible keywords in g++
* Used to support g++-4.6 and g++4.7
*/
#if DMLC_USE_CXX11 && defined(__GNUC__) && !defined(__clang_version__)
#if __GNUC__ == 4 && __GNUC_MINOR__ < 8
#define override
#define final
#endif // __GNUC__ == 4 && __GNUC_MINOR__ < 8
#endif // DMLC_USE_CXX11 && defined(__GNUC__) && !defined(__clang_version__)
} // namespace xgboost } // namespace xgboost
#endif // XGBOOST_BASE_H_ #endif // XGBOOST_BASE_H_

View File

@ -134,12 +134,6 @@ inline float LogSum(Iterator begin, Iterator end) {
return mx + std::log(sum); return mx + std::log(sum);
} }
// comparator functions for sorting pairs in descending order
inline static bool CmpFirst(const std::pair<float, unsigned> &a,
const std::pair<float, unsigned> &b) {
return a.first > b.first;
}
// Redefined here to workaround a VC bug that doesn't support overloading for integer // Redefined here to workaround a VC bug that doesn't support overloading for integer
// types. // types.
template <typename T> template <typename T>

View File

@ -114,7 +114,7 @@ void IterativeDMatrix::InitFromCUDA(Context const* ctx, BatchParam const& p,
this->info_.num_row_ = accumulated_rows; this->info_.num_row_ = accumulated_rows;
this->info_.num_nonzero_ = nnz; this->info_.num_nonzero_ = nnz;
auto init_page = [this, &proxy, &cuts, row_stride, accumulated_rows, get_device]() { auto init_page = [this, &cuts, row_stride, accumulated_rows, get_device]() {
if (!ellpack_) { if (!ellpack_) {
// Should be put inside the while loop to protect against empty batch. In // Should be put inside the while loop to protect against empty batch. In
// that case device id is invalid. // that case device id is invalid.

View File

@ -68,7 +68,8 @@ struct EvalAMS : public MetricNoCache {
const auto &h_preds = preds.ConstHostVector(); const auto &h_preds = preds.ConstHostVector();
common::ParallelFor(ndata, ctx_->Threads(), common::ParallelFor(ndata, ctx_->Threads(),
[&](bst_omp_uint i) { rec[i] = std::make_pair(h_preds[i], i); }); [&](bst_omp_uint i) { rec[i] = std::make_pair(h_preds[i], i); });
common::Sort(ctx_, rec.begin(), rec.end(), common::CmpFirst); common::Sort(ctx_, rec.begin(), rec.end(),
[](auto const& l, auto const& r) { return l.first > r.first; });
auto ntop = static_cast<unsigned>(ratio_ * ndata); auto ntop = static_cast<unsigned>(ratio_ * ndata);
if (ntop == 0) ntop = ndata; if (ntop == 0) ntop = ndata;
const double br = 10.0; const double br = 10.0;

View File

@ -344,7 +344,7 @@ class DeviceModel {
dh::safe_cuda(cudaSetDevice(gpu_id)); dh::safe_cuda(cudaSetDevice(gpu_id));
// Copy decision trees to device // Copy decision trees to device
tree_segments = std::move(HostDeviceVector<size_t>({}, gpu_id)); tree_segments = HostDeviceVector<size_t>({}, gpu_id);
auto& h_tree_segments = tree_segments.HostVector(); auto& h_tree_segments = tree_segments.HostVector();
h_tree_segments.reserve((tree_end - tree_begin) + 1); h_tree_segments.reserve((tree_end - tree_begin) + 1);
size_t sum = 0; size_t sum = 0;
@ -354,10 +354,8 @@ class DeviceModel {
h_tree_segments.push_back(sum); h_tree_segments.push_back(sum);
} }
nodes = std::move(HostDeviceVector<RegTree::Node>(h_tree_segments.back(), RegTree::Node(), nodes = HostDeviceVector<RegTree::Node>(h_tree_segments.back(), RegTree::Node(), gpu_id);
gpu_id)); stats = HostDeviceVector<RTreeNodeStat>(h_tree_segments.back(), RTreeNodeStat(), gpu_id);
stats = std::move(HostDeviceVector<RTreeNodeStat>(h_tree_segments.back(),
RTreeNodeStat(), gpu_id));
auto d_nodes = nodes.DevicePointer(); auto d_nodes = nodes.DevicePointer();
auto d_stats = stats.DevicePointer(); auto d_stats = stats.DevicePointer();
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
@ -371,7 +369,7 @@ class DeviceModel {
sizeof(RTreeNodeStat) * src_stats.size(), cudaMemcpyDefault)); sizeof(RTreeNodeStat) * src_stats.size(), cudaMemcpyDefault));
} }
tree_group = std::move(HostDeviceVector<int>(model.tree_info.size(), 0, gpu_id)); tree_group = HostDeviceVector<int>(model.tree_info.size(), 0, gpu_id);
auto& h_tree_group = tree_group.HostVector(); auto& h_tree_group = tree_group.HostVector();
std::memcpy(h_tree_group.data(), model.tree_info.data(), sizeof(int) * model.tree_info.size()); std::memcpy(h_tree_group.data(), model.tree_info.data(), sizeof(int) * model.tree_info.size());
@ -435,7 +433,7 @@ struct ShapSplitCondition {
bool is_missing_branch; bool is_missing_branch;
// Does this instance flow down this path? // Does this instance flow down this path?
XGBOOST_DEVICE bool EvaluateSplit(float x) const { [[nodiscard]] XGBOOST_DEVICE bool EvaluateSplit(float x) const {
// is nan // is nan
if (isnan(x)) { if (isnan(x)) {
return is_missing_branch; return is_missing_branch;