From 4d665b3fb02ccb136d43853726d24ab11b5274ef Mon Sep 17 00:00:00 2001 From: Jiaming Yuan Date: Sat, 4 Mar 2023 05:47:04 +0800 Subject: [PATCH] Restore clang tidy test. (#8861) --- .clang-tidy | 2 +- cmake/version_config.h.in | 10 +++--- include/xgboost/cache.h | 7 ++-- include/xgboost/json.h | 6 ++-- include/xgboost/json_io.h | 4 +-- include/xgboost/linalg.h | 37 +++++++++++----------- include/xgboost/metric.h | 2 +- include/xgboost/version_config.h | 10 +++--- src/collective/communicator-inl.h | 8 ++--- src/common/categorical.h | 6 ++-- src/common/charconv.cc | 20 ++++++------ src/common/charconv.h | 2 +- src/common/compressed_iterator.h | 11 ++++--- src/common/device_helpers.cuh | 5 +-- src/common/hist_util.cu | 22 ++++++------- src/common/hist_util.cuh | 16 ++++++---- src/common/io.cc | 2 +- src/common/io.h | 7 ++-- src/common/json.cc | 8 ++--- src/common/numeric.cc | 2 +- src/common/numeric.h | 4 +-- src/common/quantile.cu | 8 ++--- src/common/row_set.h | 6 ++-- src/common/stats.h | 2 +- src/data/device_adapter.cuh | 14 ++++---- src/gbm/gblinear.cc | 11 +++---- src/gbm/gblinear_model.h | 6 ++-- src/tree/gpu_hist/evaluate_splits.cu | 2 +- src/tree/gpu_hist/histogram.cu | 23 +++++++------- src/tree/gpu_hist/row_partitioner.cuh | 2 +- src/tree/hist/evaluate_splits.h | 7 ++-- src/tree/param.h | 11 +++---- src/tree/tree_model.cc | 4 +-- src/tree/updater_gpu_hist.cu | 8 ++--- tests/buildkite/pipeline.yml | 10 +++--- tests/ci_build/Dockerfile.clang_tidy | 12 +++---- tests/ci_build/tidy.py | 4 +++ tests/cpp/c_api/test_c_api.cc | 2 +- tests/cpp/common/test_charconv.cc | 2 +- tests/cpp/common/test_group_data.cc | 4 +-- tests/cpp/common/test_hist_util.cu | 2 +- tests/cpp/common/test_intrusive_ptr.cc | 4 +-- tests/cpp/common/test_linalg.cc | 12 +++---- tests/cpp/common/test_linalg.cu | 6 ++-- tests/cpp/common/test_span.cc | 4 +-- tests/cpp/data/test_array_interface.cc | 12 +++---- tests/cpp/data/test_data.cc | 2 +- tests/cpp/data/test_simple_dmatrix.cu | 8 ++--- tests/cpp/data/test_sparse_page_dmatrix.cc | 2 +- tests/cpp/data/test_sparse_page_dmatrix.cu | 9 +++--- tests/cpp/helpers.cc | 2 +- tests/cpp/helpers.h | 10 +++--- tests/cpp/objective/test_regression_obj.cc | 4 +-- tests/cpp/tree/test_histmaker.cc | 5 ++- tests/cpp/tree/test_prune.cc | 10 +++--- 55 files changed, 216 insertions(+), 205 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index 3be1d9e0c..c01182eb4 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -1,4 +1,4 @@ -Checks: 'modernize-*,-modernize-make-*,-modernize-use-auto,-modernize-raw-string-literal,-modernize-avoid-c-arrays,-modernize-use-trailing-return-type,google-*,-google-default-arguments,-clang-diagnostic-#pragma-messages,readability-identifier-naming' +Checks: 'modernize-*,-modernize-use-nodiscard,-modernize-concat-nested-namespaces,-modernize-make-*,-modernize-use-auto,-modernize-raw-string-literal,-modernize-avoid-c-arrays,-modernize-use-trailing-return-type,google-*,-google-default-arguments,-clang-diagnostic-#pragma-messages,readability-identifier-naming' CheckOptions: - { key: readability-identifier-naming.ClassCase, value: CamelCase } - { key: readability-identifier-naming.StructCase, value: CamelCase } diff --git a/cmake/version_config.h.in b/cmake/version_config.h.in index dfde79a5a..38d64fa9e 100644 --- a/cmake/version_config.h.in +++ b/cmake/version_config.h.in @@ -1,11 +1,11 @@ -/*! - * Copyright 2019 XGBoost contributors +/** + * Copyright 2019-2023 by XGBoost contributors */ #ifndef XGBOOST_VERSION_CONFIG_H_ #define XGBOOST_VERSION_CONFIG_H_ -#define XGBOOST_VER_MAJOR @xgboost_VERSION_MAJOR@ -#define XGBOOST_VER_MINOR @xgboost_VERSION_MINOR@ -#define XGBOOST_VER_PATCH @xgboost_VERSION_PATCH@ +#define XGBOOST_VER_MAJOR @xgboost_VERSION_MAJOR@ /* NOLINT */ +#define XGBOOST_VER_MINOR @xgboost_VERSION_MINOR@ /* NOLINT */ +#define XGBOOST_VER_PATCH @xgboost_VERSION_PATCH@ /* NOLINT */ #endif // XGBOOST_VERSION_CONFIG_H_ diff --git a/include/xgboost/cache.h b/include/xgboost/cache.h index 423274c50..781f45b1c 100644 --- a/include/xgboost/cache.h +++ b/include/xgboost/cache.h @@ -4,7 +4,7 @@ #ifndef XGBOOST_CACHE_H_ #define XGBOOST_CACHE_H_ -#include // CHECK_EQ +#include // for CHECK_EQ, CHECK #include // for size_t #include // for weak_ptr, shared_ptr, make_shared @@ -12,6 +12,7 @@ #include // for queue #include // for thread #include // for unordered_map +#include // for move #include // for vector namespace xgboost { @@ -32,6 +33,8 @@ class DMatrixCache { CacheT const& Value() const { return *value; } CacheT& Value() { return *value; } + + Item(std::shared_ptr m, std::shared_ptr v) : ref{m}, value{std::move(v)} {} }; static constexpr std::size_t DefaultSize() { return 32; } @@ -141,7 +144,7 @@ class DMatrixCache { auto it = container_.find(key); if (it == container_.cend()) { // after the new DMatrix, cache size is at most max_size - container_[key] = {m, std::make_shared(args...)}; + container_.emplace(key, Item{m, std::make_shared(args...)}); queue_.emplace(key); } return container_.at(key).value; diff --git a/include/xgboost/json.h b/include/xgboost/json.h index 3546e58d1..3b34c2874 100644 --- a/include/xgboost/json.h +++ b/include/xgboost/json.h @@ -1,5 +1,5 @@ /** - * Copyright by XGBoost Contributors 2019-2023 + * Copyright 2019-2023 by XGBoost Contributors */ #ifndef XGBOOST_JSON_H_ #define XGBOOST_JSON_H_ @@ -372,7 +372,7 @@ class Json { /*! \brief Use your own JsonWriter. */ static void Dump(Json json, JsonWriter* writer); - Json() : ptr_{new JsonNull} {} + Json() = default; // number explicit Json(JsonNumber number) : ptr_{new JsonNumber(std::move(number))} {} @@ -462,7 +462,7 @@ class Json { IntrusivePtr const& Ptr() const { return ptr_; } private: - IntrusivePtr ptr_; + IntrusivePtr ptr_{new JsonNull}; }; /** diff --git a/include/xgboost/json_io.h b/include/xgboost/json_io.h index 742231055..e11545b04 100644 --- a/include/xgboost/json_io.h +++ b/include/xgboost/json_io.h @@ -22,13 +22,13 @@ namespace detail { // static_cast and std::to_string. template ::value>* = nullptr> std::string CharToStr(Char c) { - static_assert(std::is_same::value, ""); + static_assert(std::is_same::value); return std::string{c}; } template ::value>* = nullptr> std::string CharToStr(Char c) { - static_assert(std::is_same::value, ""); + static_assert(std::is_same::value); return (c <= static_cast(127) ? std::string{c} : std::to_string(c)); } } // namespace detail diff --git a/include/xgboost/linalg.h b/include/xgboost/linalg.h index cb28f582c..176002225 100644 --- a/include/xgboost/linalg.h +++ b/include/xgboost/linalg.h @@ -52,14 +52,14 @@ struct ArrayInterfaceHandler { template constexpr size_t Offset(S (&strides)[D], size_t n, Head head) { - static_assert(dim < D, ""); + static_assert(dim < D); return n + head * strides[dim]; } template constexpr std::enable_if_t Offset(S (&strides)[D], size_t n, Head head, Tail &&...rest) { - static_assert(dim < D, ""); + static_assert(dim < D); return Offset(strides, n + (head * strides[dim]), std::forward(rest)...); } @@ -193,14 +193,14 @@ LINALG_HD auto UnravelImpl(I idx, common::Span shape) { template void ReshapeImpl(size_t (&out_shape)[D], I s) { - static_assert(dim < D, ""); + static_assert(dim < D); out_shape[dim] = s; } template * = nullptr> void ReshapeImpl(size_t (&out_shape)[D], I &&s, S &&...rest) { - static_assert(dim < D, ""); + static_assert(dim < D); out_shape[dim] = s; ReshapeImpl(out_shape, std::forward(rest)...); } @@ -230,7 +230,8 @@ struct Conjunction : std::true_type {}; template struct Conjunction : B1 {}; template -struct Conjunction : std::conditional_t, B1> {}; +struct Conjunction + : std::conditional_t(B1::value), Conjunction, B1> {}; template using IsAllIntegral = Conjunction>...>; @@ -291,8 +292,8 @@ class TensorView { template LINALG_HD size_t MakeSliceDim(size_t new_shape[D], size_t new_stride[D], detail::RangeTag &&range) const { - static_assert(new_dim < D, ""); - static_assert(old_dim < kDim, ""); + static_assert(new_dim < D); + static_assert(old_dim < kDim); new_stride[new_dim] = stride_[old_dim]; new_shape[new_dim] = range.Size(); assert(static_cast(range.end) <= shape_[old_dim]); @@ -306,8 +307,8 @@ class TensorView { template LINALG_HD size_t MakeSliceDim(size_t new_shape[D], size_t new_stride[D], detail::RangeTag &&range, S &&...slices) const { - static_assert(new_dim < D, ""); - static_assert(old_dim < kDim, ""); + static_assert(new_dim < D); + static_assert(old_dim < kDim); new_stride[new_dim] = stride_[old_dim]; new_shape[new_dim] = range.Size(); assert(static_cast(range.end) <= shape_[old_dim]); @@ -320,8 +321,8 @@ class TensorView { template LINALG_HD size_t MakeSliceDim(size_t new_shape[D], size_t new_stride[D], detail::AllTag) const { - static_assert(new_dim < D, ""); - static_assert(old_dim < kDim, ""); + static_assert(new_dim < D); + static_assert(old_dim < kDim); new_stride[new_dim] = stride_[old_dim]; new_shape[new_dim] = shape_[old_dim]; return 0; @@ -332,8 +333,8 @@ class TensorView { template LINALG_HD size_t MakeSliceDim(size_t new_shape[D], size_t new_stride[D], detail::AllTag, S &&...slices) const { - static_assert(new_dim < D, ""); - static_assert(old_dim < kDim, ""); + static_assert(new_dim < D); + static_assert(old_dim < kDim); new_stride[new_dim] = stride_[old_dim]; new_shape[new_dim] = shape_[old_dim]; return MakeSliceDim(new_shape, new_stride, @@ -343,7 +344,7 @@ class TensorView { template LINALG_HD size_t MakeSliceDim(DMLC_ATTRIBUTE_UNUSED size_t new_shape[D], DMLC_ATTRIBUTE_UNUSED size_t new_stride[D], Index i) const { - static_assert(old_dim < kDim, ""); + static_assert(old_dim < kDim); return stride_[old_dim] * i; } /** @@ -352,7 +353,7 @@ class TensorView { template LINALG_HD std::enable_if_t::value, size_t> MakeSliceDim( size_t new_shape[D], size_t new_stride[D], Index i, S &&...slices) const { - static_assert(old_dim < kDim, ""); + static_assert(old_dim < kDim); auto offset = stride_[old_dim] * i; auto res = MakeSliceDim(new_shape, new_stride, std::forward(slices)...); @@ -501,7 +502,7 @@ class TensorView { */ LINALG_HD bool CContiguous() const { StrideT stride; - static_assert(std::is_same::value, ""); + static_assert(std::is_same::value); // It's contiguous if the stride can be calculated from shape. detail::CalcStride(shape_, stride); return common::Span{stride_} == common::Span{stride}; @@ -511,7 +512,7 @@ class TensorView { */ LINALG_HD bool FContiguous() const { StrideT stride; - static_assert(std::is_same::value, ""); + static_assert(std::is_same::value); // It's contiguous if the stride can be calculated from shape. detail::CalcStride(shape_, stride); return common::Span{stride_} == common::Span{stride}; @@ -625,7 +626,7 @@ Json ArrayInterface(TensorView const &t) { array_interface["version"] = 3; char constexpr kT = detail::ArrayInterfaceHandler::TypeChar(); - static_assert(kT != '\0', ""); + static_assert(kT != '\0'); if (DMLC_LITTLE_ENDIAN) { array_interface["typestr"] = String{"<" + (kT + std::to_string(sizeof(T)))}; } else { diff --git a/include/xgboost/metric.h b/include/xgboost/metric.h index 2f67e47dd..3e405cf58 100644 --- a/include/xgboost/metric.h +++ b/include/xgboost/metric.h @@ -28,7 +28,7 @@ struct Context; */ class Metric : public Configurable { protected: - Context const* ctx_; + Context const* ctx_{nullptr}; public: /*! diff --git a/include/xgboost/version_config.h b/include/xgboost/version_config.h index 3eb87e664..8005b8391 100644 --- a/include/xgboost/version_config.h +++ b/include/xgboost/version_config.h @@ -1,11 +1,11 @@ -/*! - * Copyright 2019 XGBoost contributors +/** + * Copyright 2019-2023 by XGBoost contributors */ #ifndef XGBOOST_VERSION_CONFIG_H_ #define XGBOOST_VERSION_CONFIG_H_ -#define XGBOOST_VER_MAJOR 2 -#define XGBOOST_VER_MINOR 0 -#define XGBOOST_VER_PATCH 0 +#define XGBOOST_VER_MAJOR 2 /* NOLINT */ +#define XGBOOST_VER_MINOR 0 /* NOLINT */ +#define XGBOOST_VER_PATCH 0 /* NOLINT */ #endif // XGBOOST_VERSION_CONFIG_H_ diff --git a/src/collective/communicator-inl.h b/src/collective/communicator-inl.h index 7e943ffac..702bda256 100644 --- a/src/collective/communicator-inl.h +++ b/src/collective/communicator-inl.h @@ -1,5 +1,5 @@ -/*! - * Copyright 2022 XGBoost contributors +/** + * Copyright 2022-2023 by XGBoost contributors */ #pragma once #include @@ -9,7 +9,7 @@ namespace xgboost { namespace collective { -/*! +/** * \brief Initialize the collective communicator. * * Currently the communicator API is experimental, function signatures may change in the future @@ -210,7 +210,7 @@ inline void Allreduce(uint64_t *send_receive_buffer, size_t count) { template {} && !std::is_same{}> > inline void Allreduce(T *send_receive_buffer, size_t count) { - static_assert(sizeof(T) == sizeof(uint64_t), ""); + static_assert(sizeof(T) == sizeof(uint64_t)); Communicator::Get()->AllReduce(send_receive_buffer, count, DataType::kUInt64, op); } diff --git a/src/common/categorical.h b/src/common/categorical.h index 452aaa8c1..d7e262812 100644 --- a/src/common/categorical.h +++ b/src/common/categorical.h @@ -42,9 +42,9 @@ constexpr inline bst_cat_t OutOfRangeCat() { inline XGBOOST_DEVICE bool InvalidCat(float cat) { constexpr auto kMaxCat = OutOfRangeCat(); - static_assert(static_cast(static_cast(kMaxCat)) == kMaxCat, ""); - static_assert(static_cast(static_cast(kMaxCat + 1)) != kMaxCat + 1, ""); - static_assert(static_cast(kMaxCat + 1) == kMaxCat, ""); + static_assert(static_cast(static_cast(kMaxCat)) == kMaxCat); + static_assert(static_cast(static_cast(kMaxCat + 1)) != kMaxCat + 1); + static_assert(static_cast(kMaxCat + 1) == kMaxCat); return cat < 0 || cat >= kMaxCat; } diff --git a/src/common/charconv.cc b/src/common/charconv.cc index 8be2c0a81..3114a90e3 100644 --- a/src/common/charconv.cc +++ b/src/common/charconv.cc @@ -270,7 +270,9 @@ struct RyuPowLogUtils { */ static uint32_t MulPow5InvDivPow2(const uint32_t m, const uint32_t q, const int32_t j) noexcept(true) { - return MulShift(m, kFloatPow5InvSplit[q], j); + static_assert(sizeof(kFloatPow5InvSplit) == 55 * sizeof(std::uint64_t)); + assert(q < 55); + return MulShift(m, kFloatPow5InvSplit[q], j); // NOLINT } /* @@ -495,12 +497,10 @@ class PowerBaseComputer { static_cast(IEEE754::kFloatBias) - static_cast(IEEE754::kFloatMantissaBits) - static_cast(2); - static_assert(static_cast(1) - - static_cast(IEEE754::kFloatBias) - - static_cast(IEEE754::kFloatMantissaBits) - - static_cast(2) == - -151, - ""); + static_assert(static_cast(1) - static_cast(IEEE754::kFloatBias) - + static_cast(IEEE754::kFloatMantissaBits) - + static_cast(2) == + -151); mantissa_base2 = f.mantissa; } else { base2_range.exponent = static_cast(f.exponent) - IEEE754::kFloatBias - @@ -544,7 +544,7 @@ class RyuPrinter { // Function precondition: v is not a 10-digit number. // (f2s: 9 digits are sufficient for round-tripping.) // (d2fixed: We print 9-digit blocks.) - static_assert(100000000 == Tens(8), ""); + static_assert(100000000 == Tens(8)); assert(v < Tens(9)); if (v >= Tens(8)) { return 9; @@ -911,7 +911,7 @@ from_chars_result FromCharFloatImpl(const char *buffer, const int len, // the bias and also special-case the value 0. int32_t shift = (f_e2 == 0 ? 1 : f_e2) - exp_b2 - IEEE754::kFloatBias - IEEE754::kFloatMantissaBits; - assert(shift >= 0); + assert(shift >= 1); // We need to round up if the exact value is more than 0.5 above the value we // computed. That's equivalent to checking if the last removed bit was 1 and @@ -920,7 +920,7 @@ from_chars_result FromCharFloatImpl(const char *buffer, const int len, // // We need to update trailingZeros given that we have the exact output // exponent ieee_e2 now. - trailing_zeros &= (mantissa_b2 & ((1u << (shift - 1)) - 1)) == 0; + trailing_zeros &= (mantissa_b2 & ((1u << (shift - 1)) - 1)) == 0; // NOLINT uint32_t lastRemovedBit = (mantissa_b2 >> (shift - 1)) & 1; bool roundup = (lastRemovedBit != 0) && (!trailing_zeros || (((mantissa_b2 >> shift) & 1) != 0)); diff --git a/src/common/charconv.h b/src/common/charconv.h index b931ed7ce..c37b0bd96 100644 --- a/src/common/charconv.h +++ b/src/common/charconv.h @@ -87,7 +87,7 @@ inline to_chars_result to_chars(char *first, char *last, int64_t value) { // NOL if (value < 0) { *first = '-'; std::advance(first, 1); - unsigned_value = uint64_t(~value) + uint64_t(1); + unsigned_value = static_cast(~value) + static_cast(1); } return detail::ToCharsUnsignedImpl(first, last, unsigned_value); } diff --git a/src/common/compressed_iterator.h b/src/common/compressed_iterator.h index 9f60722fb..5a5b5f252 100644 --- a/src/common/compressed_iterator.h +++ b/src/common/compressed_iterator.h @@ -1,12 +1,13 @@ -/*! - * Copyright 2017 by Contributors +/** + * Copyright 2017-2023 by XGBoost Contributors * \file compressed_iterator.h */ #pragma once #include -#include -#include + #include +#include +#include // for size_t #include "common.h" @@ -36,7 +37,7 @@ static const int kPadding = 4; // Assign padding so we can read slightly off // The number of bits required to represent a given unsigned range inline XGBOOST_DEVICE size_t SymbolBits(size_t num_symbols) { auto bits = std::ceil(log2(static_cast(num_symbols))); - return common::Max(static_cast(bits), size_t(1)); + return common::Max(static_cast(bits), static_cast(1)); } } // namespace detail diff --git a/src/common/device_helpers.cuh b/src/common/device_helpers.cuh index d56965dfe..58300d06c 100644 --- a/src/common/device_helpers.cuh +++ b/src/common/device_helpers.cuh @@ -20,6 +20,7 @@ #include #include +#include // for size_t #include #include #include @@ -178,7 +179,7 @@ inline size_t MaxSharedMemory(int device_idx) { dh::safe_cuda(cudaDeviceGetAttribute (&max_shared_memory, cudaDevAttrMaxSharedMemoryPerBlock, device_idx)); - return size_t(max_shared_memory); + return static_cast(max_shared_memory); } /** @@ -195,7 +196,7 @@ inline size_t MaxSharedMemoryOptin(int device_idx) { dh::safe_cuda(cudaDeviceGetAttribute (&max_shared_memory, cudaDevAttrMaxSharedMemoryPerBlockOptin, device_idx)); - return size_t(max_shared_memory); + return static_cast(max_shared_memory); } inline void CheckComputeCapability() { diff --git a/src/common/hist_util.cu b/src/common/hist_util.cu index 2d3dff054..08ef98ea1 100644 --- a/src/common/hist_util.cu +++ b/src/common/hist_util.cu @@ -1,33 +1,31 @@ -/*! - * Copyright 2018~2020 XGBoost contributors +/** + * Copyright 2018~2023 by XGBoost contributors */ - -#include - +#include #include +#include #include #include -#include #include +#include #include #include -#include -#include +#include +#include // for size_t #include #include #include #include +#include "categorical.h" #include "device_helpers.cuh" -#include "hist_util.h" #include "hist_util.cuh" +#include "hist_util.h" #include "math.h" // NOLINT #include "quantile.h" -#include "categorical.h" #include "xgboost/host_device_vector.h" - namespace xgboost { namespace common { @@ -318,7 +316,7 @@ HistogramCuts DeviceSketch(int device, DMatrix* dmat, int max_bins, size_t batch_nnz = batch.data.Size(); auto const& info = dmat->Info(); for (auto begin = 0ull; begin < batch_nnz; begin += sketch_batch_num_elements) { - size_t end = std::min(batch_nnz, size_t(begin + sketch_batch_num_elements)); + size_t end = std::min(batch_nnz, static_cast(begin + sketch_batch_num_elements)); if (has_weights) { bool is_ranking = HostSketchContainer::UseGroup(dmat->Info()); dh::caching_device_vector groups(info.group_ptr_.cbegin(), diff --git a/src/common/hist_util.cuh b/src/common/hist_util.cuh index 7dd62b382..856404107 100644 --- a/src/common/hist_util.cuh +++ b/src/common/hist_util.cuh @@ -1,5 +1,5 @@ -/*! - * Copyright 2020 XGBoost contributors +/** + * Copyright 2020-2023 by XGBoost contributors * * \brief Front end and utilities for GPU based sketching. Works on sliding window * instead of stream. @@ -9,11 +9,13 @@ #include +#include // for size_t + +#include "../data/device_adapter.cuh" +#include "device_helpers.cuh" #include "hist_util.h" #include "quantile.cuh" -#include "device_helpers.cuh" #include "timer.h" -#include "../data/device_adapter.cuh" namespace xgboost { namespace common { @@ -304,7 +306,8 @@ void AdapterDeviceSketch(Batch batch, int num_bins, num_rows, num_cols, std::numeric_limits::max(), device, num_cuts_per_feature, true); for (auto begin = 0ull; begin < batch.Size(); begin += sketch_batch_num_elements) { - size_t end = std::min(batch.Size(), size_t(begin + sketch_batch_num_elements)); + size_t end = + std::min(batch.Size(), static_cast(begin + sketch_batch_num_elements)); ProcessWeightedSlidingWindow(batch, info, num_cuts_per_feature, HostSketchContainer::UseGroup(info), missing, device, num_cols, begin, end, @@ -316,7 +319,8 @@ void AdapterDeviceSketch(Batch batch, int num_bins, num_rows, num_cols, std::numeric_limits::max(), device, num_cuts_per_feature, false); for (auto begin = 0ull; begin < batch.Size(); begin += sketch_batch_num_elements) { - size_t end = std::min(batch.Size(), size_t(begin + sketch_batch_num_elements)); + size_t end = + std::min(batch.Size(), static_cast(begin + sketch_batch_num_elements)); ProcessSlidingWindow(batch, info, device, num_cols, begin, end, missing, sketch_container, num_cuts_per_feature); } diff --git a/src/common/io.cc b/src/common/io.cc index 8405e6604..da3a75d65 100644 --- a/src/common/io.cc +++ b/src/common/io.cc @@ -50,7 +50,7 @@ size_t PeekableInStream::PeekRead(void* dptr, size_t size) { } } -FixedSizeStream::FixedSizeStream(PeekableInStream* stream) : PeekableInStream(stream), pointer_{0} { +FixedSizeStream::FixedSizeStream(PeekableInStream* stream) : PeekableInStream(stream) { size_t constexpr kInitialSize = 4096; size_t size{kInitialSize}, total{0}; buffer_.clear(); diff --git a/src/common/io.h b/src/common/io.h index bcc6c4704..2dd593c60 100644 --- a/src/common/io.h +++ b/src/common/io.h @@ -27,8 +27,7 @@ using MemoryBufferStream = rabit::utils::MemoryBufferStream; */ class PeekableInStream : public dmlc::Stream { public: - explicit PeekableInStream(dmlc::Stream* strm) - : strm_(strm), buffer_ptr_(0) {} + explicit PeekableInStream(dmlc::Stream* strm) : strm_(strm) {} size_t Read(void* dptr, size_t size) override; virtual size_t PeekRead(void* dptr, size_t size); @@ -41,7 +40,7 @@ class PeekableInStream : public dmlc::Stream { /*! \brief input stream */ dmlc::Stream *strm_; /*! \brief current buffer pointer */ - size_t buffer_ptr_; + size_t buffer_ptr_{0}; /*! \brief internal buffer */ std::string buffer_; }; @@ -72,7 +71,7 @@ class FixedSizeStream : public PeekableInStream { void Take(std::string* out); private: - size_t pointer_; + size_t pointer_{0}; std::string buffer_; }; diff --git a/src/common/json.cc b/src/common/json.cc index 0fddf87d5..8e2dd05ff 100644 --- a/src/common/json.cc +++ b/src/common/json.cc @@ -710,10 +710,10 @@ void Json::Dump(Json json, JsonWriter* writer) { writer->Save(json); } -static_assert(std::is_nothrow_move_constructible::value, ""); -static_assert(std::is_nothrow_move_constructible::value, ""); -static_assert(std::is_nothrow_move_constructible::value, ""); -static_assert(std::is_nothrow_move_constructible::value, ""); +static_assert(std::is_nothrow_move_constructible::value); +static_assert(std::is_nothrow_move_constructible::value); +static_assert(std::is_nothrow_move_constructible::value); +static_assert(std::is_nothrow_move_constructible::value); Json UBJReader::ParseArray() { auto marker = PeekNextChar(); diff --git a/src/common/numeric.cc b/src/common/numeric.cc index 2a1ca4d44..240e0234a 100644 --- a/src/common/numeric.cc +++ b/src/common/numeric.cc @@ -14,7 +14,7 @@ double Reduce(Context const* ctx, HostDeviceVector const& values) { if (ctx->IsCPU()) { auto const& h_values = values.ConstHostVector(); auto result = cpu_impl::Reduce(ctx, h_values.cbegin(), h_values.cend(), 0.0); - static_assert(std::is_same::value, ""); + static_assert(std::is_same::value); return result; } return cuda_impl::Reduce(ctx, values); diff --git a/src/common/numeric.h b/src/common/numeric.h index 7b52b7ba6..6a1c15fd0 100644 --- a/src/common/numeric.h +++ b/src/common/numeric.h @@ -42,8 +42,8 @@ void RunLengthEncode(Iter begin, Iter end, std::vector* p_out) { */ template void PartialSum(int32_t n_threads, InIt begin, InIt end, T init, OutIt out_it) { - static_assert(std::is_same::value_type>::value, ""); - static_assert(std::is_same::value_type>::value, ""); + static_assert(std::is_same::value_type>::value); + static_assert(std::is_same::value_type>::value); // The number of threads is pegged to the batch size. If the OMP block is parallelized // on anything other than the batch/block size, it should be reassigned auto n = static_cast(std::distance(begin, end)); diff --git a/src/common/quantile.cu b/src/common/quantile.cu index 8f89ed26f..cabdc603b 100644 --- a/src/common/quantile.cu +++ b/src/common/quantile.cu @@ -1,5 +1,5 @@ -/*! - * Copyright 2020-2022 by XGBoost Contributors +/** + * Copyright 2020-2023 by XGBoost Contributors */ #include #include @@ -109,7 +109,7 @@ void PruneImpl(common::Span cuts_ptr, template void CopyTo(Span out, Span src) { CHECK_EQ(out.size(), src.size()); - static_assert(std::is_same, std::remove_cv_t>::value, ""); + static_assert(std::is_same, std::remove_cv_t>::value); dh::safe_cuda(cudaMemcpyAsync(out.data(), src.data(), out.size_bytes(), cudaMemcpyDefault)); @@ -143,7 +143,7 @@ common::Span> MergePath( thrust::make_zip_iterator(thrust::make_tuple(b_ind_iter, place_holder)); dh::XGBCachingDeviceAllocator alloc; - static_assert(sizeof(Tuple) == sizeof(SketchEntry), ""); + static_assert(sizeof(Tuple) == sizeof(SketchEntry)); // We reuse the memory for storing merge path. common::Span merge_path{reinterpret_cast(out.data()), out.size()}; // Determine the merge path, 0 if element is from x, 1 if it's from y. diff --git a/src/common/row_set.h b/src/common/row_set.h index 87d5f5287..11f12bda3 100644 --- a/src/common/row_set.h +++ b/src/common/row_set.h @@ -77,14 +77,14 @@ class RowSetCollection { if (row_indices_.empty()) { // edge case: empty instance set constexpr size_t* kBegin = nullptr; constexpr size_t* kEnd = nullptr; - static_assert(kEnd - kBegin == 0, ""); - elem_of_each_node_.emplace_back(Elem(kBegin, kEnd, 0)); + static_assert(kEnd - kBegin == 0); + elem_of_each_node_.emplace_back(kBegin, kEnd, 0); return; } const size_t* begin = dmlc::BeginPtr(row_indices_); const size_t* end = dmlc::BeginPtr(row_indices_) + row_indices_.size(); - elem_of_each_node_.emplace_back(Elem(begin, end, 0)); + elem_of_each_node_.emplace_back(begin, end, 0); } std::vector* Data() { return &row_indices_; } diff --git a/src/common/stats.h b/src/common/stats.h index 639da32ce..2f42a698e 100644 --- a/src/common/stats.h +++ b/src/common/stats.h @@ -49,7 +49,7 @@ float Quantile(Context const* ctx, double alpha, Iter const& begin, Iter const& } auto val = [&](size_t i) { return *(begin + sorted_idx[i]); }; - static_assert(std::is_same::value, ""); + static_assert(std::is_same::value); if (alpha <= (1 / (n + 1))) { return val(0); diff --git a/src/data/device_adapter.cuh b/src/data/device_adapter.cuh index 4a635e92d..56c494dd1 100644 --- a/src/data/device_adapter.cuh +++ b/src/data/device_adapter.cuh @@ -1,12 +1,14 @@ -/*! - * Copyright (c) 2019 by Contributors +/** + * Copyright 2019-2023 by XGBoost Contributors * \file device_adapter.cuh */ #ifndef XGBOOST_DATA_DEVICE_ADAPTER_H_ #define XGBOOST_DATA_DEVICE_ADAPTER_H_ +#include // for size_t #include #include #include + #include "../common/device_helpers.cuh" #include "../common/math.h" #include "adapter.h" @@ -205,10 +207,10 @@ size_t GetRowCounts(const AdapterBatchT batch, common::Span offset, } }); dh::XGBCachingDeviceAllocator alloc; - size_t row_stride = dh::Reduce( - thrust::cuda::par(alloc), thrust::device_pointer_cast(offset.data()), - thrust::device_pointer_cast(offset.data()) + offset.size(), size_t(0), - thrust::maximum()); + size_t row_stride = + dh::Reduce(thrust::cuda::par(alloc), thrust::device_pointer_cast(offset.data()), + thrust::device_pointer_cast(offset.data()) + offset.size(), + static_cast(0), thrust::maximum()); return row_stride; } }; // namespace data diff --git a/src/gbm/gblinear.cc b/src/gbm/gblinear.cc index 84e766121..575820758 100644 --- a/src/gbm/gblinear.cc +++ b/src/gbm/gblinear.cc @@ -75,10 +75,7 @@ class GBLinear : public GradientBooster { : GradientBooster{ctx}, learner_model_param_{learner_model_param}, model_{learner_model_param}, - previous_model_{learner_model_param}, - sum_instance_weight_(0), - sum_weight_complete_(false), - is_converged_(false) {} + previous_model_{learner_model_param} {} void Configure(const Args& cfg) override { if (model_.weight.size() == 0) { @@ -344,10 +341,10 @@ class GBLinear : public GradientBooster { GBLinearModel previous_model_; GBLinearTrainParam param_; std::unique_ptr updater_; - double sum_instance_weight_; - bool sum_weight_complete_; + double sum_instance_weight_{}; + bool sum_weight_complete_{false}; common::Monitor monitor_; - bool is_converged_; + bool is_converged_{false}; }; // register the objective functions diff --git a/src/gbm/gblinear_model.h b/src/gbm/gblinear_model.h index 577494f87..80dd1ac04 100644 --- a/src/gbm/gblinear_model.h +++ b/src/gbm/gblinear_model.h @@ -47,12 +47,12 @@ class GBLinearModel : public Model { DeprecatedGBLinearModelParam param_; public: - int32_t num_boosted_rounds; + int32_t num_boosted_rounds{0}; LearnerModelParam const* learner_model_param; public: - explicit GBLinearModel(LearnerModelParam const* learner_model_param) : - num_boosted_rounds{0}, learner_model_param {learner_model_param} {} + explicit GBLinearModel(LearnerModelParam const *learner_model_param) + : learner_model_param{learner_model_param} {} void Configure(Args const &) { } // weight for each of feature, bias is the last one diff --git a/src/tree/gpu_hist/evaluate_splits.cu b/src/tree/gpu_hist/evaluate_splits.cu index 540b9c4fe..c48c8ddf3 100644 --- a/src/tree/gpu_hist/evaluate_splits.cu +++ b/src/tree/gpu_hist/evaluate_splits.cu @@ -97,7 +97,7 @@ class EvaluateSplitAgent { idx += kBlockSize) { local_sum += LoadGpair(node_histogram + idx); } - local_sum = SumReduceT(temp_storage->sum_reduce).Sum(local_sum); + local_sum = SumReduceT(temp_storage->sum_reduce).Sum(local_sum); // NOLINT // Broadcast result from thread 0 return {__shfl_sync(0xffffffff, local_sum.GetQuantisedGrad(), 0), __shfl_sync(0xffffffff, local_sum.GetQuantisedHess(), 0)}; diff --git a/src/tree/gpu_hist/histogram.cu b/src/tree/gpu_hist/histogram.cu index eda9baba9..489c8d6f7 100644 --- a/src/tree/gpu_hist/histogram.cu +++ b/src/tree/gpu_hist/histogram.cu @@ -1,15 +1,15 @@ -/*! - * Copyright 2020-2021 by XGBoost Contributors +/** + * Copyright 2020-2023 by XGBoost Contributors */ #include #include #include -#include +#include // uint32_t #include -#include "../../common/device_helpers.cuh" #include "../../common/deterministic.cuh" +#include "../../common/device_helpers.cuh" #include "../../data/ellpack_page.cuh" #include "histogram.cuh" #include "row_partitioner.cuh" @@ -83,7 +83,8 @@ GradientQuantiser::GradientQuantiser(common::Span gpair) { */ to_floating_point_ = histogram_rounding / - T(IntT(1) << (sizeof(typename GradientSumT::ValueT) * 8 - 2)); // keep 1 for sign bit + static_cast(static_cast(1) + << (sizeof(typename GradientSumT::ValueT) * 8 - 2)); // keep 1 for sign bit /** * Factor for converting gradients from floating-point to fixed-point. For * f64: @@ -93,8 +94,8 @@ GradientQuantiser::GradientQuantiser(common::Span gpair) { * rounding is calcuated as exp(m), see the rounding factor calcuation for * details. */ - to_fixed_point_ = - GradientSumT(T(1) / to_floating_point_.GetGrad(), T(1) / to_floating_point_.GetHess()); + to_fixed_point_ = GradientSumT(static_cast(1) / to_floating_point_.GetGrad(), + static_cast(1) / to_floating_point_.GetHess()); } @@ -153,7 +154,8 @@ class HistogramAgent { d_gpair_(d_gpair) {} __device__ void ProcessPartialTileShared(std::size_t offset) { for (std::size_t idx = offset + threadIdx.x; - idx < min(offset + kBlockThreads * kItemsPerTile, n_elements_); idx += kBlockThreads) { + idx < std::min(offset + kBlockThreads * kItemsPerTile, n_elements_); + idx += kBlockThreads) { int ridx = d_ridx_[idx / feature_stride_]; int gidx = matrix_ @@ -295,9 +297,8 @@ void BuildGradientHistogram(CUDAContext const* ctx, EllpackDeviceAccessor const& // Allocate number of blocks such that each block has about kMinItemsPerBlock work // Up to a maximum where the device is saturated - grid_size = - min(grid_size, - unsigned(common::DivRoundUp(items_per_group, kMinItemsPerBlock))); + grid_size = std::min(grid_size, static_cast( + common::DivRoundUp(items_per_group, kMinItemsPerBlock))); dh::LaunchKernel {dim3(grid_size, num_groups), static_cast(kBlockThreads), smem_size, ctx->Stream()} (kernel, matrix, feature_groups, d_ridx, histogram.data(), diff --git a/src/tree/gpu_hist/row_partitioner.cuh b/src/tree/gpu_hist/row_partitioner.cuh index a2519ae6f..f1c420ba0 100644 --- a/src/tree/gpu_hist/row_partitioner.cuh +++ b/src/tree/gpu_hist/row_partitioner.cuh @@ -130,7 +130,7 @@ void SortPositionBatch(common::Span> d_batch_info, std::size_t item_idx; AssignBatch(batch_info_itr, idx, &batch_idx, &item_idx); auto op_res = op(ridx[item_idx], batch_info_itr[batch_idx].data); - return IndexFlagTuple{bst_uint(item_idx), op_res, batch_idx, op_res}; + return IndexFlagTuple{static_cast(item_idx), op_res, batch_idx, op_res}; }); size_t temp_bytes = 0; if (tmp->empty()) { diff --git a/src/tree/hist/evaluate_splits.h b/src/tree/hist/evaluate_splits.h index 41f7183f2..d90eb93f4 100644 --- a/src/tree/hist/evaluate_splits.h +++ b/src/tree/hist/evaluate_splits.h @@ -1,10 +1,11 @@ -/*! - * Copyright 2021-2022 by XGBoost Contributors +/** + * Copyright 2021-2023 by XGBoost Contributors */ #ifndef XGBOOST_TREE_HIST_EVALUATE_SPLITS_H_ #define XGBOOST_TREE_HIST_EVALUATE_SPLITS_H_ #include +#include // for size_t #include #include #include @@ -367,7 +368,7 @@ class HistEvaluator { std::copy_n(entries.cbegin(), num_entries, buffer.begin() + num_entries * rank); collective::Allgather(buffer.data(), buffer.size() * sizeof(ExpandEntry)); for (auto worker = 0; worker < world; ++worker) { - for (auto nidx_in_set = 0; nidx_in_set < entries.size(); ++nidx_in_set) { + for (std::size_t nidx_in_set = 0; nidx_in_set < entries.size(); ++nidx_in_set) { entries[nidx_in_set].split.Update(buffer[worker * num_entries + nidx_in_set].split); } } diff --git a/src/tree/param.h b/src/tree/param.h index 3f5e4ec7b..98895e5a2 100644 --- a/src/tree/param.h +++ b/src/tree/param.h @@ -1,5 +1,5 @@ -/*! - * Copyright 2014-2021 by Contributors +/** + * Copyright 2014-2023 by XGBoost Contributors * \file param.h * \brief training parameters, statistics used to support tree construction. * \author Tianqi Chen @@ -238,9 +238,8 @@ XGBOOST_DEVICE inline static T1 ThresholdL1(T1 w, T2 alpha) { // calculate the cost of loss function template -XGBOOST_DEVICE inline T CalcGainGivenWeight(const TrainingParams &p, - T sum_grad, T sum_hess, T w) { - return -(T(2.0) * sum_grad * w + (sum_hess + p.reg_lambda) * common::Sqr(w)); +XGBOOST_DEVICE inline T CalcGainGivenWeight(const TrainingParams &p, T sum_grad, T sum_hess, T w) { + return -(static_cast(2.0) * sum_grad * w + (sum_hess + p.reg_lambda) * common::Sqr(w)); } // calculate weight given the statistics @@ -261,7 +260,7 @@ XGBOOST_DEVICE inline T CalcWeight(const TrainingParams &p, T sum_grad, template XGBOOST_DEVICE inline T CalcGain(const TrainingParams &p, T sum_grad, T sum_hess) { if (sum_hess < p.min_child_weight || sum_hess <= 0.0) { - return T(0.0); + return static_cast(0.0); } if (p.max_delta_step == 0.0f) { if (p.reg_alpha == 0.0f) { diff --git a/src/tree/tree_model.cc b/src/tree/tree_model.cc index 4bd2294d1..55e37a919 100644 --- a/src/tree/tree_model.cc +++ b/src/tree/tree_model.cc @@ -1069,8 +1069,8 @@ bool LoadModelImpl(Json const& in, TreeParam* param, std::vector* split_types = std::remove_reference_t(n_nodes); split_categories_segments = std::remove_reference_t(n_nodes); - static_assert(std::is_integral(lefts, 0))>::value, ""); - static_assert(std::is_floating_point(loss_changes, 0))>::value, ""); + static_assert(std::is_integral(lefts, 0))>::value); + static_assert(std::is_floating_point(loss_changes, 0))>::value); CHECK_EQ(n_nodes, split_categories_segments.size()); // Set node diff --git a/src/tree/updater_gpu_hist.cu b/src/tree/updater_gpu_hist.cu index 0f4d542b0..6345c44d5 100644 --- a/src/tree/updater_gpu_hist.cu +++ b/src/tree/updater_gpu_hist.cu @@ -160,11 +160,11 @@ class DeviceHistogramStorage { if (nidx_map_.find(nidx) != nidx_map_.cend()) { // Fetch from normal cache auto ptr = data_.data().get() + nidx_map_.at(nidx); - return common::Span(reinterpret_cast(ptr), n_bins_); + return {reinterpret_cast(ptr), static_cast(n_bins_)}; } else { // Fetch from overflow auto ptr = overflow_.data().get() + overflow_nidx_map_.at(nidx); - return common::Span(reinterpret_cast(ptr), n_bins_); + return {reinterpret_cast(ptr), static_cast(n_bins_)}; } } }; @@ -334,8 +334,8 @@ struct GPUHistMakerDevice { } bst_feature_t max_active_features = 0; for (auto input : h_node_inputs) { - max_active_features = std::max(max_active_features, - bst_feature_t(input.feature_set.size())); + max_active_features = + std::max(max_active_features, static_cast(input.feature_set.size())); } dh::safe_cuda(cudaMemcpyAsync( d_node_inputs.data().get(), h_node_inputs.data(), diff --git a/tests/buildkite/pipeline.yml b/tests/buildkite/pipeline.yml index 2f01c36db..e2a4fcaf2 100644 --- a/tests/buildkite/pipeline.yml +++ b/tests/buildkite/pipeline.yml @@ -22,11 +22,11 @@ steps: queue: linux-amd64-cpu - wait #### -------- BUILD -------- - # - label: ":console: Run clang-tidy" - # command: "tests/buildkite/run-clang-tidy.sh" - # key: run-clang-tidy - # agents: - # queue: linux-amd64-cpu + - label: ":console: Run clang-tidy" + command: "tests/buildkite/run-clang-tidy.sh" + key: run-clang-tidy + agents: + queue: linux-amd64-cpu - wait - label: ":console: Build CPU" command: "tests/buildkite/build-cpu.sh" diff --git a/tests/ci_build/Dockerfile.clang_tidy b/tests/ci_build/Dockerfile.clang_tidy index 3a33a080c..967f24d3c 100644 --- a/tests/ci_build/Dockerfile.clang_tidy +++ b/tests/ci_build/Dockerfile.clang_tidy @@ -1,5 +1,5 @@ ARG CUDA_VERSION_ARG -FROM nvidia/cuda:$CUDA_VERSION_ARG-devel-ubuntu18.04 +FROM nvidia/cuda:$CUDA_VERSION_ARG-devel-ubuntu20.04 ARG CUDA_VERSION_ARG # Environment @@ -7,21 +7,21 @@ ENV DEBIAN_FRONTEND noninteractive # Install all basic requirements RUN \ - apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub && \ + apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/3bf863cc.pub && \ apt-get update && \ apt-get install -y tar unzip wget git build-essential python3 python3-pip software-properties-common \ apt-transport-https ca-certificates gnupg-agent && \ wget -nv -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - && \ - add-apt-repository -u 'deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-11 main' && \ + add-apt-repository -u 'deb http://apt.llvm.org/focal/ llvm-toolchain-focal-15 main' && \ apt-get update && \ - apt-get install -y llvm-11 clang-tidy-11 clang-11 && \ + apt-get install -y llvm-15 clang-tidy-15 clang-15 libomp-15-dev && \ wget -nv -nc https://cmake.org/files/v3.18/cmake-3.18.0-Linux-x86_64.sh --no-check-certificate && \ bash cmake-3.18.0-Linux-x86_64.sh --skip-license --prefix=/usr # Set default clang-tidy version RUN \ - update-alternatives --install /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-11 100 && \ - update-alternatives --install /usr/bin/clang clang /usr/bin/clang-11 100 + update-alternatives --install /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-15 100 && \ + update-alternatives --install /usr/bin/clang clang /usr/bin/clang-15 100 # Install Python packages RUN \ diff --git a/tests/ci_build/tidy.py b/tests/ci_build/tidy.py index 107e62662..33e153850 100755 --- a/tests/ci_build/tidy.py +++ b/tests/ci_build/tidy.py @@ -109,6 +109,10 @@ class ClangTidy(object): continue elif components[i] == '-rdynamic': continue + elif components[i] == "-Xfatbin=-compress-all": + continue + elif components[i] == "-forward-unknown-to-host-compiler": + continue elif (components[i] == '-x' and components[i+1] == 'cu'): # -x cu -> -x cuda diff --git a/tests/cpp/c_api/test_c_api.cc b/tests/cpp/c_api/test_c_api.cc index 6b5bc7cb8..675da940c 100644 --- a/tests/cpp/c_api/test_c_api.cc +++ b/tests/cpp/c_api/test_c_api.cc @@ -267,7 +267,7 @@ TEST(CAPI, DMatrixSetFeatureName) { } char const* feat_types [] {"i", "q"}; - static_assert(sizeof(feat_types)/ sizeof(feat_types[0]) == kCols, ""); + static_assert(sizeof(feat_types) / sizeof(feat_types[0]) == kCols); XGDMatrixSetStrFeatureInfo(handle, "feature_type", feat_types, kCols); char const **c_out_types; XGDMatrixGetStrFeatureInfo(handle, u8"feature_type", &out_len, diff --git a/tests/cpp/common/test_charconv.cc b/tests/cpp/common/test_charconv.cc index cce48f76f..0e43ea51e 100644 --- a/tests/cpp/common/test_charconv.cc +++ b/tests/cpp/common/test_charconv.cc @@ -128,7 +128,7 @@ TEST(Ryu, Regression) { TestRyu("2E2", 200.0f); TestRyu("3.3554432E7", 3.3554432E7f); - static_assert(1.1920929E-7f == std::numeric_limits::epsilon(), ""); + static_assert(1.1920929E-7f == std::numeric_limits::epsilon()); TestRyu("1.1920929E-7", std::numeric_limits::epsilon()); } diff --git a/tests/cpp/common/test_group_data.cc b/tests/cpp/common/test_group_data.cc index 94bb23e4a..719bc3fc5 100644 --- a/tests/cpp/common/test_group_data.cc +++ b/tests/cpp/common/test_group_data.cc @@ -43,8 +43,8 @@ TEST(GroupData, ParallelGroupBuilder) { builder2.Push(2, Entry(0, 4), 0); builder2.Push(2, Entry(1, 5), 0); - expected_data.emplace_back(Entry(0, 4)); - expected_data.emplace_back(Entry(1, 5)); + expected_data.emplace_back(0, 4); + expected_data.emplace_back(1, 5); expected_offsets.emplace_back(6); EXPECT_EQ(data, expected_data); diff --git a/tests/cpp/common/test_hist_util.cu b/tests/cpp/common/test_hist_util.cu index c9db7f646..45948b711 100644 --- a/tests/cpp/common/test_hist_util.cu +++ b/tests/cpp/common/test_hist_util.cu @@ -143,7 +143,7 @@ void TestMixedSketch() { size_t n_samples = 1000, n_features = 2, n_categories = 3; std::vector data(n_samples * n_features); SimpleLCG gen; - SimpleRealUniformDistribution cat_d{0.0f, float(n_categories)}; + SimpleRealUniformDistribution cat_d{0.0f, static_cast(n_categories)}; SimpleRealUniformDistribution num_d{0.0f, 3.0f}; for (size_t i = 0; i < n_samples * n_features; ++i) { if (i % 2 == 0) { diff --git a/tests/cpp/common/test_intrusive_ptr.cc b/tests/cpp/common/test_intrusive_ptr.cc index a41697f17..5b0747625 100644 --- a/tests/cpp/common/test_intrusive_ptr.cc +++ b/tests/cpp/common/test_intrusive_ptr.cc @@ -13,9 +13,9 @@ class NotCopyConstructible { NotCopyConstructible(NotCopyConstructible&& that) = default; }; static_assert( - !std::is_trivially_copy_constructible::value, ""); + !std::is_trivially_copy_constructible::value); static_assert( - !std::is_trivially_copy_assignable::value, ""); + !std::is_trivially_copy_assignable::value); class ForIntrusivePtrTest { public: diff --git a/tests/cpp/common/test_linalg.cc b/tests/cpp/common/test_linalg.cc index 3da4c482c..bfba591fa 100644 --- a/tests/cpp/common/test_linalg.cc +++ b/tests/cpp/common/test_linalg.cc @@ -1,5 +1,5 @@ -/*! - * Copyright 2021 by XGBoost Contributors +/** + * Copyright 2021-2023 by XGBoost Contributors */ #include #include @@ -108,7 +108,7 @@ TEST(Linalg, TensorView) { // for Slice. auto t = MakeTensorView(data, {2, 3, 4}, 0); auto s = t.Slice(1, 2, All()); - static_assert(decltype(s)::kDimension == 1, ""); + static_assert(decltype(s)::kDimension == 1); } { auto t = MakeTensorView(data, {2, 3, 4}, 0); @@ -121,7 +121,7 @@ TEST(Linalg, TensorView) { // range slice auto t = MakeTensorView(data, {2, 3, 4}, 0); auto s = t.Slice(linalg::All(), linalg::Range(1, 3), 2); - static_assert(decltype(s)::kDimension == 2, ""); + static_assert(decltype(s)::kDimension == 2); std::vector sol{6, 10, 18, 22}; auto k = 0; for (size_t i = 0; i < s.Shape(0); ++i) { @@ -136,7 +136,7 @@ TEST(Linalg, TensorView) { // range slice auto t = MakeTensorView(data, {2, 3, 4}, 0); auto s = t.Slice(1, linalg::Range(1, 3), linalg::Range(1, 3)); - static_assert(decltype(s)::kDimension == 2, ""); + static_assert(decltype(s)::kDimension == 2); std::vector sol{17, 18, 21, 22}; auto k = 0; for (size_t i = 0; i < s.Shape(0); ++i) { @@ -151,7 +151,7 @@ TEST(Linalg, TensorView) { // same as no slice. auto t = MakeTensorView(data, {2, 3, 4}, 0); auto s = t.Slice(linalg::All(), linalg::Range(0, 3), linalg::Range(0, 4)); - static_assert(decltype(s)::kDimension == 3, ""); + static_assert(decltype(s)::kDimension == 3); auto all = t.Slice(linalg::All(), linalg::All(), linalg::All()); for (size_t i = 0; i < s.Shape(0); ++i) { for (size_t j = 0; j < s.Shape(1); ++j) { diff --git a/tests/cpp/common/test_linalg.cu b/tests/cpp/common/test_linalg.cu index 14f89774b..ac2b9a581 100644 --- a/tests/cpp/common/test_linalg.cu +++ b/tests/cpp/common/test_linalg.cu @@ -1,5 +1,5 @@ -/*! - * Copyright 2021-2022 by XGBoost Contributors +/** + * Copyright 2021-2023 by XGBoost Contributors */ #include @@ -60,7 +60,7 @@ void TestSlice() { dh::LaunchN(1, [=] __device__(size_t) { auto s = t.Slice(linalg::All(), linalg::Range(0, 3), linalg::Range(0, 4)); auto all = t.Slice(linalg::All(), linalg::All(), linalg::All()); - static_assert(decltype(s)::kDimension == 3, ""); + static_assert(decltype(s)::kDimension == 3); for (size_t i = 0; i < s.Shape(0); ++i) { for (size_t j = 0; j < s.Shape(1); ++j) { for (size_t k = 0; k < s.Shape(2); ++k) { diff --git a/tests/cpp/common/test_span.cc b/tests/cpp/common/test_span.cc index 3ee99c0ae..133fae9fd 100644 --- a/tests/cpp/common/test_span.cc +++ b/tests/cpp/common/test_span.cc @@ -522,9 +522,9 @@ TEST(Span, Empty) { TEST(SpanDeathTest, Empty) { std::vector data(1, 0); ASSERT_TRUE(data.data()); - Span s{data.data(), Span::index_type(0)}; // ok to define 0 size span. + // ok to define 0 size span. + Span s{data.data(), static_cast::index_type>(0)}; EXPECT_DEATH(s[0], ""); // not ok to use it. } - } // namespace common } // namespace xgboost diff --git a/tests/cpp/data/test_array_interface.cc b/tests/cpp/data/test_array_interface.cc index 72e5ccc10..7e0484842 100644 --- a/tests/cpp/data/test_array_interface.cc +++ b/tests/cpp/data/test_array_interface.cc @@ -119,13 +119,13 @@ TEST(ArrayInterface, TrivialDim) { } TEST(ArrayInterface, ToDType) { - static_assert(ToDType::kType == ArrayInterfaceHandler::kF4, ""); - static_assert(ToDType::kType == ArrayInterfaceHandler::kF8, ""); + static_assert(ToDType::kType == ArrayInterfaceHandler::kF4); + static_assert(ToDType::kType == ArrayInterfaceHandler::kF8); - static_assert(ToDType::kType == ArrayInterfaceHandler::kU4, ""); - static_assert(ToDType::kType == ArrayInterfaceHandler::kU8, ""); + static_assert(ToDType::kType == ArrayInterfaceHandler::kU4); + static_assert(ToDType::kType == ArrayInterfaceHandler::kU8); - static_assert(ToDType::kType == ArrayInterfaceHandler::kI4, ""); - static_assert(ToDType::kType == ArrayInterfaceHandler::kI8, ""); + static_assert(ToDType::kType == ArrayInterfaceHandler::kI4); + static_assert(ToDType::kType == ArrayInterfaceHandler::kI8); } } // namespace xgboost diff --git a/tests/cpp/data/test_data.cc b/tests/cpp/data/test_data.cc index 7b35c6f6f..c37328192 100644 --- a/tests/cpp/data/test_data.cc +++ b/tests/cpp/data/test_data.cc @@ -21,7 +21,7 @@ TEST(SparsePage, PushCSC) { offset = {0, 1, 4}; for (size_t i = 0; i < offset.back(); ++i) { - data.emplace_back(Entry(i, 0.1f)); + data.emplace_back(i, 0.1f); } SparsePage other; diff --git a/tests/cpp/data/test_simple_dmatrix.cu b/tests/cpp/data/test_simple_dmatrix.cu index 4b020c0a6..04859ed1e 100644 --- a/tests/cpp/data/test_simple_dmatrix.cu +++ b/tests/cpp/data/test_simple_dmatrix.cu @@ -189,8 +189,8 @@ TEST(SimpleCSRSource, FromColumnarSparse) { auto& mask = column_bitfields[0]; mask.resize(8); - for (size_t j = 0; j < mask.size(); ++j) { - mask[j] = ~0; + for (auto && j : mask) { + j = ~0; } // the 2^th entry of first column is invalid // [0 0 0 0 0 1 0 0] @@ -201,8 +201,8 @@ TEST(SimpleCSRSource, FromColumnarSparse) { auto& mask = column_bitfields[1]; mask.resize(8); - for (size_t j = 0; j < mask.size(); ++j) { - mask[j] = ~0; + for (auto && j : mask) { + j = ~0; } // the 19^th entry of second column is invalid // [~0~], [~0~], [0 0 0 0 1 0 0 0] diff --git a/tests/cpp/data/test_sparse_page_dmatrix.cc b/tests/cpp/data/test_sparse_page_dmatrix.cc index 8c2ff9514..24dc40949 100644 --- a/tests/cpp/data/test_sparse_page_dmatrix.cc +++ b/tests/cpp/data/test_sparse_page_dmatrix.cc @@ -96,7 +96,7 @@ void TestRetainPage() { // make sure it's const and the caller can not modify the content of page. for (auto& page : m->GetBatches()) { - static_assert(std::is_const>::value, ""); + static_assert(std::is_const>::value); } } diff --git a/tests/cpp/data/test_sparse_page_dmatrix.cu b/tests/cpp/data/test_sparse_page_dmatrix.cu index 64ce0568c..bb562ffb7 100644 --- a/tests/cpp/data/test_sparse_page_dmatrix.cu +++ b/tests/cpp/data/test_sparse_page_dmatrix.cu @@ -1,5 +1,6 @@ -// Copyright by Contributors - +/** + * Copyright 2019-2023 by XGBoost Contributors + */ #include "../../../src/common/compressed_iterator.h" #include "../../../src/data/ellpack_page.cuh" #include "../../../src/data/sparse_page_dmatrix.h" @@ -69,7 +70,7 @@ TEST(SparsePageDMatrix, RetainEllpackPage) { std::vector> iterators; for (auto it = begin; it != end; ++it) { iterators.push_back(it.Page()); - gidx_buffers.emplace_back(HostDeviceVector{}); + gidx_buffers.emplace_back(); gidx_buffers.back().Resize((*it).Impl()->gidx_buffer.Size()); gidx_buffers.back().Copy((*it).Impl()->gidx_buffer); } @@ -87,7 +88,7 @@ TEST(SparsePageDMatrix, RetainEllpackPage) { // make sure it's const and the caller can not modify the content of page. for (auto& page : m->GetBatches({0, 32})) { - static_assert(std::is_const>::value, ""); + static_assert(std::is_const>::value); } // The above iteration clears out all references inside DMatrix. diff --git a/tests/cpp/helpers.cc b/tests/cpp/helpers.cc index fcaffa5c6..ebb56d2d3 100644 --- a/tests/cpp/helpers.cc +++ b/tests/cpp/helpers.cc @@ -186,7 +186,7 @@ SimpleLCG::StateType SimpleLCG::operator()() { SimpleLCG::StateType SimpleLCG::Min() const { return min(); } SimpleLCG::StateType SimpleLCG::Max() const { return max(); } // Make sure it's compile time constant. -static_assert(SimpleLCG::max() - SimpleLCG::min(), ""); +static_assert(SimpleLCG::max() - SimpleLCG::min()); void RandomDataGenerator::GenerateDense(HostDeviceVector *out) const { xgboost::SimpleRealUniformDistribution dist(lower_, upper_); diff --git a/tests/cpp/helpers.h b/tests/cpp/helpers.h index 63ef6ac50..ec1ace796 100644 --- a/tests/cpp/helpers.h +++ b/tests/cpp/helpers.h @@ -46,7 +46,7 @@ class GradientBooster; template Float RelError(Float l, Float r) { - static_assert(std::is_floating_point::value, ""); + static_assert(std::is_floating_point::value); return std::abs(1.0f - l / r); } @@ -164,7 +164,7 @@ class SimpleRealUniformDistribution { ResultT sum_value = 0, r_k = 1; for (size_t k = m; k != 0; --k) { - sum_value += ResultT((*rng)() - rng->Min()) * r_k; + sum_value += static_cast((*rng)() - rng->Min()) * r_k; r_k *= r; } @@ -191,12 +191,10 @@ Json GetArrayInterface(HostDeviceVector *storage, size_t rows, size_t cols) { Json array_interface{Object()}; array_interface["data"] = std::vector(2); if (storage->DeviceCanRead()) { - array_interface["data"][0] = - Integer(reinterpret_cast(storage->ConstDevicePointer())); + array_interface["data"][0] = Integer{reinterpret_cast(storage->ConstDevicePointer())}; array_interface["stream"] = nullptr; } else { - array_interface["data"][0] = - Integer(reinterpret_cast(storage->ConstHostPointer())); + array_interface["data"][0] = Integer{reinterpret_cast(storage->ConstHostPointer())}; } array_interface["data"][1] = Boolean(false); diff --git a/tests/cpp/objective/test_regression_obj.cc b/tests/cpp/objective/test_regression_obj.cc index c5cd2537c..bfb292dc3 100644 --- a/tests/cpp/objective/test_regression_obj.cc +++ b/tests/cpp/objective/test_regression_obj.cc @@ -157,7 +157,7 @@ TEST(Objective, DeclareUnifiedTest(PoissonRegressionGPair)) { ObjFunction::Create("count:poisson", &ctx) }; - args.emplace_back(std::make_pair("max_delta_step", "0.1f")); + args.emplace_back("max_delta_step", "0.1f"); obj->Configure(args); CheckObjFunction(obj, @@ -259,7 +259,7 @@ TEST(Objective, DeclareUnifiedTest(TweedieRegressionGPair)) { std::vector> args; std::unique_ptr obj{ObjFunction::Create("reg:tweedie", &ctx)}; - args.emplace_back(std::make_pair("tweedie_variance_power", "1.1f")); + args.emplace_back("tweedie_variance_power", "1.1f"); obj->Configure(args); CheckObjFunction(obj, diff --git a/tests/cpp/tree/test_histmaker.cc b/tests/cpp/tree/test_histmaker.cc index cd49eeec1..809f66c22 100644 --- a/tests/cpp/tree/test_histmaker.cc +++ b/tests/cpp/tree/test_histmaker.cc @@ -1,3 +1,6 @@ +/** + * Copyright 2019-2023 by XGBoost Contributors + */ #include #include #include @@ -18,7 +21,7 @@ std::unique_ptr> GenerateGradients(std::size_t ro xgboost::SimpleLCG gen; xgboost::SimpleRealUniformDistribution dist(0.0f, 1.0f); - for (auto i = 0; i < rows; ++i) { + for (std::size_t i = 0; i < rows; ++i) { auto grad = dist(&gen); auto hess = dist(&gen); h_gradients[i] = GradientPair{grad, hess}; diff --git a/tests/cpp/tree/test_prune.cc b/tests/cpp/tree/test_prune.cc index 52fa58a2d..9dd3ec30a 100644 --- a/tests/cpp/tree/test_prune.cc +++ b/tests/cpp/tree/test_prune.cc @@ -19,10 +19,8 @@ TEST(Updater, Prune) { int constexpr kCols = 16; std::vector> cfg; - cfg.emplace_back(std::pair("num_feature", - std::to_string(kCols))); - cfg.emplace_back(std::pair( - "min_split_loss", "10")); + cfg.emplace_back("num_feature", std::to_string(kCols)); + cfg.emplace_back("min_split_loss", "10"); // These data are just place holders. HostDeviceVector gpair = @@ -73,7 +71,7 @@ TEST(Updater, Prune) { 0, 0.5f, true, 0.3, 0.4, 0.5, /*loss_chg=*/19.0f, 0.0f, /*left_sum=*/0.0f, /*right_sum=*/0.0f); - cfg.emplace_back(std::make_pair("max_depth", "1")); + cfg.emplace_back("max_depth", "1"); pruner->Configure(cfg); pruner->Update(&gpair, p_dmat.get(), position, trees); @@ -83,7 +81,7 @@ TEST(Updater, Prune) { 0, 0.5f, true, 0.3, 0.4, 0.5, /*loss_chg=*/18.0f, 0.0f, /*left_sum=*/0.0f, /*right_sum=*/0.0f); - cfg.emplace_back(std::make_pair("min_split_loss", "0")); + cfg.emplace_back("min_split_loss", "0"); pruner->Configure(cfg); pruner->Update(&gpair, p_dmat.get(), position, trees); ASSERT_EQ(tree.NumExtraNodes(), 2);