Fix clang warnings. (#9447)

- static function in header. (which is marked as unused due to translation unit
visibility).
- Implicit copy operator is deprecated.
- Unused lambda capture.
- Moving a temporary variable prevents copy elision.
This commit is contained in:
Jiaming Yuan
2023-08-09 15:34:45 +08:00
committed by GitHub
parent 819098a48f
commit f05294a6f2
5 changed files with 12 additions and 29 deletions

View File

@@ -134,12 +134,6 @@ inline float LogSum(Iterator begin, Iterator end) {
return mx + std::log(sum);
}
// comparator functions for sorting pairs in descending order
inline static bool CmpFirst(const std::pair<float, unsigned> &a,
const std::pair<float, unsigned> &b) {
return a.first > b.first;
}
// Redefined here to workaround a VC bug that doesn't support overloading for integer
// types.
template <typename T>

View File

@@ -114,7 +114,7 @@ void IterativeDMatrix::InitFromCUDA(Context const* ctx, BatchParam const& p,
this->info_.num_row_ = accumulated_rows;
this->info_.num_nonzero_ = nnz;
auto init_page = [this, &proxy, &cuts, row_stride, accumulated_rows, get_device]() {
auto init_page = [this, &cuts, row_stride, accumulated_rows, get_device]() {
if (!ellpack_) {
// Should be put inside the while loop to protect against empty batch. In
// that case device id is invalid.

View File

@@ -68,7 +68,8 @@ struct EvalAMS : public MetricNoCache {
const auto &h_preds = preds.ConstHostVector();
common::ParallelFor(ndata, ctx_->Threads(),
[&](bst_omp_uint i) { rec[i] = std::make_pair(h_preds[i], i); });
common::Sort(ctx_, rec.begin(), rec.end(), common::CmpFirst);
common::Sort(ctx_, rec.begin(), rec.end(),
[](auto const& l, auto const& r) { return l.first > r.first; });
auto ntop = static_cast<unsigned>(ratio_ * ndata);
if (ntop == 0) ntop = ndata;
const double br = 10.0;

View File

@@ -344,7 +344,7 @@ class DeviceModel {
dh::safe_cuda(cudaSetDevice(gpu_id));
// Copy decision trees to device
tree_segments = std::move(HostDeviceVector<size_t>({}, gpu_id));
tree_segments = HostDeviceVector<size_t>({}, gpu_id);
auto& h_tree_segments = tree_segments.HostVector();
h_tree_segments.reserve((tree_end - tree_begin) + 1);
size_t sum = 0;
@@ -354,10 +354,8 @@ class DeviceModel {
h_tree_segments.push_back(sum);
}
nodes = std::move(HostDeviceVector<RegTree::Node>(h_tree_segments.back(), RegTree::Node(),
gpu_id));
stats = std::move(HostDeviceVector<RTreeNodeStat>(h_tree_segments.back(),
RTreeNodeStat(), gpu_id));
nodes = HostDeviceVector<RegTree::Node>(h_tree_segments.back(), RegTree::Node(), gpu_id);
stats = HostDeviceVector<RTreeNodeStat>(h_tree_segments.back(), RTreeNodeStat(), gpu_id);
auto d_nodes = nodes.DevicePointer();
auto d_stats = stats.DevicePointer();
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
@@ -371,7 +369,7 @@ class DeviceModel {
sizeof(RTreeNodeStat) * src_stats.size(), cudaMemcpyDefault));
}
tree_group = std::move(HostDeviceVector<int>(model.tree_info.size(), 0, gpu_id));
tree_group = HostDeviceVector<int>(model.tree_info.size(), 0, gpu_id);
auto& h_tree_group = tree_group.HostVector();
std::memcpy(h_tree_group.data(), model.tree_info.data(), sizeof(int) * model.tree_info.size());
@@ -435,7 +433,7 @@ struct ShapSplitCondition {
bool is_missing_branch;
// Does this instance flow down this path?
XGBOOST_DEVICE bool EvaluateSplit(float x) const {
[[nodiscard]] XGBOOST_DEVICE bool EvaluateSplit(float x) const {
// is nan
if (isnan(x)) {
return is_missing_branch;