Update clang-tidy. (#10730)

- Install cmake using pip.
- Fix compile command generation.
- Clean up the tidy script and remove the need to load the yaml file.
- Fix modernized type traits.
- Fix span class. Polymorphism support is dropped
This commit is contained in:
Jiaming Yuan 2024-08-22 04:12:18 +08:00 committed by GitHub
parent 03bd1183bc
commit cb54374550
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
34 changed files with 361 additions and 387 deletions

View File

@ -686,8 +686,11 @@ class TCPSocket {
* \return size of data actually received return -1 if error occurs
*/
auto Recv(void *buf, std::size_t len, std::int32_t flags = 0) {
char *_buf = reinterpret_cast<char *>(buf);
char *_buf = static_cast<char *>(buf);
// See https://github.com/llvm/llvm-project/issues/104241 for skipped tidy analysis
// NOLINTBEGIN(clang-analyzer-unix.BlockInCriticalSection)
return recv(handle_, _buf, len, flags);
// NOLINTEND(clang-analyzer-unix.BlockInCriticalSection)
}
/**
* \brief Send string, format is matched with the Python socket wrapper in RABIT.

View File

@ -85,7 +85,7 @@ enum GPUAccess {
template <typename T>
class HostDeviceVector {
static_assert(std::is_standard_layout<T>::value, "HostDeviceVector admits only POD types");
static_assert(std::is_standard_layout_v<T>, "HostDeviceVector admits only POD types");
public:
explicit HostDeviceVector(size_t size = 0, T v = T(), DeviceOrd device = DeviceOrd::CPU());

View File

@ -11,9 +11,8 @@
#include <functional>
#include <map>
#include <memory>
#include <string>
#include <type_traits> // std::enable_if,std::enable_if_t
#include <type_traits> // std::enable_if_t
#include <utility>
#include <vector>
@ -223,6 +222,14 @@ class JsonObject : public Value {
~JsonObject() override = default;
};
namespace detail {
template <typename T, typename U>
using IsSameT = std::enable_if_t<std::is_same_v<std::remove_cv_t<T>, std::remove_cv_t<U>>>;
template <typename T>
using IsF64T = std::enable_if_t<std::is_same_v<T, double>>;
} // namespace detail
class JsonNumber : public Value {
public:
using Float = float;
@ -232,15 +239,11 @@ class JsonNumber : public Value {
public:
JsonNumber() : Value(ValueKind::kNumber) {}
template <typename FloatT,
typename std::enable_if<std::is_same<FloatT, Float>::value>::type* = nullptr>
JsonNumber(FloatT value) : Value(ValueKind::kNumber) { // NOLINT
number_ = value;
}
template <typename FloatT,
typename std::enable_if<std::is_same<FloatT, double>::value>::type* = nullptr>
JsonNumber(FloatT value) : Value{ValueKind::kNumber}, // NOLINT
number_{static_cast<Float>(value)} {}
template <typename FloatT, typename detail::IsSameT<FloatT, Float>* = nullptr>
JsonNumber(FloatT value) : Value(ValueKind::kNumber), number_{value} {} // NOLINT
template <typename FloatT, typename detail::IsF64T<FloatT>* = nullptr>
JsonNumber(FloatT value) // NOLINT
: Value{ValueKind::kNumber}, number_{static_cast<Float>(value)} {}
JsonNumber(JsonNumber const& that) = delete;
JsonNumber(JsonNumber&& that) noexcept : Value{ValueKind::kNumber}, number_{that.number_} {}
@ -258,6 +261,13 @@ class JsonNumber : public Value {
}
};
namespace detail {
template <typename IntT>
using Not32SizeT = std::enable_if_t<std::is_same_v<IntT, std::uint32_t> &&
!std::is_same_v<std::size_t, std::uint32_t>>;
}
class JsonInteger : public Value {
public:
using Int = int64_t;
@ -267,24 +277,18 @@ class JsonInteger : public Value {
public:
JsonInteger() : Value(ValueKind::kInteger) {} // NOLINT
template <typename IntT,
typename std::enable_if<std::is_same<IntT, Int>::value>::type* = nullptr>
JsonInteger(IntT value) : Value(ValueKind::kInteger), integer_{value} {} // NOLINT
template <typename IntT,
typename std::enable_if<std::is_same<IntT, size_t>::value>::type* = nullptr>
JsonInteger(IntT value) : Value(ValueKind::kInteger), // NOLINT
integer_{static_cast<Int>(value)} {}
template <typename IntT,
typename std::enable_if<std::is_same<IntT, int32_t>::value>::type* = nullptr>
JsonInteger(IntT value) : Value(ValueKind::kInteger), // NOLINT
integer_{static_cast<Int>(value)} {}
template <typename IntT,
typename std::enable_if<
std::is_same<IntT, uint32_t>::value &&
!std::is_same<std::size_t, uint32_t>::value>::type * = nullptr>
template <typename IntT, typename detail::IsSameT<IntT, Int>* = nullptr>
JsonInteger(IntT value) : Value(ValueKind::kInteger), integer_{value} {} // NOLINT
template <typename IntT, typename detail::IsSameT<IntT, std::size_t>* = nullptr>
JsonInteger(IntT value) // NOLINT
: Value(ValueKind::kInteger),
integer_{static_cast<Int>(value)} {}
: Value(ValueKind::kInteger), integer_{static_cast<Int>(value)} {}
template <typename IntT, typename detail::IsSameT<IntT, std::int32_t>* = nullptr>
JsonInteger(IntT value) // NOLINT
: Value(ValueKind::kInteger), integer_{static_cast<Int>(value)} {}
template <typename IntT,
typename detail::Not32SizeT<IntT>* = nullptr>
JsonInteger(IntT value) // NOLINT
: Value(ValueKind::kInteger), integer_{static_cast<Int>(value)} {}
JsonInteger(JsonInteger &&that) noexcept
: Value{ValueKind::kInteger}, integer_{that.integer_} {}
@ -325,12 +329,8 @@ class JsonBoolean : public Value {
public:
JsonBoolean() : Value(ValueKind::kBoolean) {} // NOLINT
// Ambigious with JsonNumber.
template <typename Bool,
typename std::enable_if<
std::is_same<Bool, bool>::value ||
std::is_same<Bool, bool const>::value>::type* = nullptr>
JsonBoolean(Bool value) : // NOLINT
Value(ValueKind::kBoolean), boolean_{value} {}
template <typename Bool, typename detail::IsSameT<std::remove_cv_t<Bool>, bool>* = nullptr>
JsonBoolean(Bool value) : Value(ValueKind::kBoolean), boolean_{value} {} // NOLINT
JsonBoolean(JsonBoolean&& value) noexcept: // NOLINT
Value(ValueKind::kBoolean), boolean_{value.boolean_} {}
@ -506,71 +506,52 @@ bool IsA(Json const& j) {
namespace detail {
// Number
template <typename T,
typename std::enable_if<
std::is_same<T, JsonNumber>::value>::type* = nullptr>
template <typename T, typename std::enable_if_t<std::is_same_v<T, JsonNumber>>* = nullptr>
JsonNumber::Float& GetImpl(T& val) { // NOLINT
return val.GetNumber();
}
template <typename T,
typename std::enable_if<
std::is_same<T, JsonNumber const>::value>::type* = nullptr>
template <typename T, typename std::enable_if_t<std::is_same_v<T, JsonNumber const>>* = nullptr>
JsonNumber::Float const& GetImpl(T& val) { // NOLINT
return val.GetNumber();
}
// Integer
template <typename T,
typename std::enable_if<
std::is_same<T, JsonInteger>::value>::type* = nullptr>
template <typename T, typename std::enable_if_t<std::is_same_v<T, JsonInteger>>* = nullptr>
JsonInteger::Int& GetImpl(T& val) { // NOLINT
return val.GetInteger();
}
template <typename T,
typename std::enable_if<
std::is_same<T, JsonInteger const>::value>::type* = nullptr>
template <typename T, typename std::enable_if_t<std::is_same_v<T, JsonInteger const>>* = nullptr>
JsonInteger::Int const& GetImpl(T& val) { // NOLINT
return val.GetInteger();
}
// String
template <typename T,
typename std::enable_if<
std::is_same<T, JsonString>::value>::type* = nullptr>
template <typename T, typename std::enable_if_t<std::is_same_v<T, JsonString>>* = nullptr>
std::string& GetImpl(T& val) { // NOLINT
return val.GetString();
}
template <typename T,
typename std::enable_if<
std::is_same<T, JsonString const>::value>::type* = nullptr>
template <typename T, typename std::enable_if_t<std::is_same_v<T, JsonString const>>* = nullptr>
std::string const& GetImpl(T& val) { // NOLINT
return val.GetString();
}
// Boolean
template <typename T,
typename std::enable_if<
std::is_same<T, JsonBoolean>::value>::type* = nullptr>
template <typename T, typename std::enable_if_t<std::is_same_v<T, JsonBoolean>>* = nullptr>
bool& GetImpl(T& val) { // NOLINT
return val.GetBoolean();
}
template <typename T,
typename std::enable_if<
std::is_same<T, JsonBoolean const>::value>::type* = nullptr>
typename std::enable_if_t<std::is_same_v<T, JsonBoolean const>>* = nullptr>
bool const& GetImpl(T& val) { // NOLINT
return val.GetBoolean();
}
// Array
template <typename T,
typename std::enable_if<
std::is_same<T, JsonArray>::value>::type* = nullptr>
template <typename T, typename std::enable_if_t<std::is_same_v<T, JsonArray>>* = nullptr>
std::vector<Json>& GetImpl(T& val) { // NOLINT
return val.GetArray();
}
template <typename T,
typename std::enable_if<
std::is_same<T, JsonArray const>::value>::type* = nullptr>
template <typename T, typename std::enable_if_t<std::is_same_v<T, JsonArray const>>* = nullptr>
std::vector<Json> const& GetImpl(T& val) { // NOLINT
return val.GetArray();
}
@ -586,12 +567,11 @@ std::vector<T> const& GetImpl(JsonTypedArray<T, kind> const& val) {
}
// Object
template <typename T, typename std::enable_if<std::is_same<T, JsonObject>::value>::type* = nullptr>
template <typename T, typename std::enable_if_t<std::is_same_v<T, JsonObject>>* = nullptr>
JsonObject::Map& GetImpl(T& val) { // NOLINT
return val.GetObject();
}
template <typename T,
typename std::enable_if<std::is_same<T, JsonObject const>::value>::type* = nullptr>
template <typename T, typename std::enable_if_t<std::is_same_v<T, JsonObject const>>* = nullptr>
JsonObject::Map const& GetImpl(T& val) { // NOLINT
return val.GetObject();
}

View File

@ -1,5 +1,5 @@
/**
* Copyright 2019-2023, XGBoost Contributors
* Copyright 2019-2024, XGBoost Contributors
*/
#ifndef XGBOOST_JSON_IO_H_
#define XGBOOST_JSON_IO_H_
@ -7,11 +7,8 @@
#include <xgboost/base.h>
#include <xgboost/json.h>
#include <cinttypes>
#include <cstdint> // for int8_t
#include <limits>
#include <map>
#include <memory>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
@ -111,7 +108,7 @@ class JsonReader {
};
class JsonWriter {
template <typename T, std::enable_if_t<!std::is_same<Json, T>::value>* = nullptr>
template <typename T, std::enable_if_t<!std::is_same_v<Json, T>>* = nullptr>
void Save(T const& v) {
this->Save(Json{v});
}

View File

@ -43,9 +43,9 @@ namespace detail {
struct ArrayInterfaceHandler {
template <typename T>
static constexpr char TypeChar() {
return (std::is_floating_point<T>::value
return (std::is_floating_point_v<T>
? 'f'
: (std::is_integral<T>::value ? (std::is_signed<T>::value ? 'i' : 'u') : '\0'));
: (std::is_integral_v<T> ? (std::is_signed_v<T> ? 'i' : 'u') : '\0'));
}
};
@ -93,7 +93,7 @@ struct RangeTag {
*/
template <typename T>
constexpr int32_t CalcSliceDim() {
return std::is_same<T, IntTag>::value ? 0 : 1;
return std::is_same_v<T, IntTag> ? 0 : 1;
}
template <typename T, typename... S>
@ -114,7 +114,7 @@ template <typename S>
using RemoveCRType = std::remove_const_t<std::remove_reference_t<S>>;
template <typename S>
using IndexToTag = std::conditional_t<std::is_integral<RemoveCRType<S>>::value, IntTag, S>;
using IndexToTag = std::conditional_t<std::is_integral_v<RemoveCRType<S>>, IntTag, S>;
template <int32_t n, typename Fn>
LINALG_HD constexpr auto UnrollLoop(Fn fn) {
@ -159,7 +159,7 @@ inline LINALG_HD int Popc(uint64_t v) {
template <std::size_t D, typename Head>
LINALG_HD void IndexToArr(std::size_t (&arr)[D], Head head) {
static_assert(std::is_integral<std::remove_reference_t<Head>>::value, "Invalid index type.");
static_assert(std::is_integral_v<std::remove_reference_t<Head>>, "Invalid index type.");
arr[D - 1] = head;
}
@ -169,7 +169,7 @@ LINALG_HD void IndexToArr(std::size_t (&arr)[D], Head head) {
template <std::size_t D, typename Head, typename... Rest>
LINALG_HD void IndexToArr(std::size_t (&arr)[D], Head head, Rest &&...index) {
static_assert(sizeof...(Rest) < D, "Index overflow.");
static_assert(std::is_integral<std::remove_reference_t<Head>>::value, "Invalid index type.");
static_assert(std::is_integral_v<std::remove_reference_t<Head>>, "Invalid index type.");
arr[D - sizeof...(Rest) - 1] = head;
IndexToArr(arr, std::forward<Rest>(index)...);
}
@ -193,7 +193,7 @@ constexpr auto ArrToTuple(T (&arr)[N]) {
template <typename I, std::int32_t D>
LINALG_HD auto UnravelImpl(I idx, common::Span<size_t const, D> shape) {
std::size_t index[D]{0};
static_assert(std::is_signed<decltype(D)>::value,
static_assert(std::is_signed_v<decltype(D)>,
"Don't change the type without changing the for loop.");
auto const sptr = shape.data();
for (int32_t dim = D; --dim > 0;) {
@ -379,7 +379,7 @@ class TensorView {
* \brief Slice dimension for Index tag.
*/
template <size_t old_dim, size_t new_dim, int32_t D, typename Index, typename... S>
LINALG_HD std::enable_if_t<std::is_integral<Index>::value, size_t> MakeSliceDim(
LINALG_HD std::enable_if_t<std::is_integral_v<Index>, size_t> MakeSliceDim(
size_t new_shape[D], size_t new_stride[D], Index i, S &&...slices) const {
static_assert(old_dim < kDim);
auto offset = stride_[old_dim] * i;
@ -547,7 +547,7 @@ class TensorView {
*/
[[nodiscard]] LINALG_HD bool CContiguous() const {
StrideT stride;
static_assert(std::is_same<decltype(stride), decltype(stride_)>::value);
static_assert(std::is_same_v<decltype(stride), decltype(stride_)>);
// It's contiguous if the stride can be calculated from shape.
detail::CalcStride(shape_, stride);
return common::Span<size_t const, kDim>{stride_} == common::Span<size_t const, kDim>{stride};
@ -557,7 +557,7 @@ class TensorView {
*/
[[nodiscard]] LINALG_HD bool FContiguous() const {
StrideT stride;
static_assert(std::is_same<decltype(stride), decltype(stride_)>::value);
static_assert(std::is_same_v<decltype(stride), decltype(stride_)>);
// It's contiguous if the stride can be calculated from shape.
detail::CalcStride<kDim, true>(shape_, stride);
return common::Span<size_t const, kDim>{stride_} == common::Span<size_t const, kDim>{stride};

View File

@ -55,7 +55,7 @@ class FieldEntry<EnumClass> : public FieldEntry<int> { \
public: \
FieldEntry() { \
static_assert( \
std::is_same<int, typename std::underlying_type<EnumClass>::type>::value, \
std::is_same_v<int, typename std::underlying_type_t<EnumClass>>, \
"enum class must be backed by int"); \
is_enum_ = true; \
} \

View File

@ -1,5 +1,5 @@
/**
* Copyright 2018-2023, XGBoost contributors
* Copyright 2018-2024, XGBoost contributors
* \brief span class based on ISO++20 span
*
* About NOLINTs in this file:
@ -129,9 +129,8 @@ namespace detail {
* represent ptrdiff_t, which is just int64_t. So we make it deterministic
* here.
*/
using ptrdiff_t = typename std::conditional< // NOLINT
std::is_same<std::ptrdiff_t, std::int64_t>::value,
std::ptrdiff_t, std::int64_t>::type;
using ptrdiff_t = typename std::conditional_t< // NOLINT
std::is_same_v<std::ptrdiff_t, std::int64_t>, std::ptrdiff_t, std::int64_t>;
} // namespace detail
#if defined(_MSC_VER) && _MSC_VER < 1910
@ -169,8 +168,8 @@ class SpanIterator {
span_(_span), index_(_idx) {}
friend SpanIterator<SpanType, true>;
template <bool B, typename std::enable_if<!B && IsConst>::type* = nullptr>
XGBOOST_DEVICE constexpr SpanIterator( // NOLINT
template <bool B, typename std::enable_if_t<!B && IsConst>* = nullptr>
XGBOOST_DEVICE constexpr SpanIterator( // NOLINT
const SpanIterator<SpanType, B>& other_) __span_noexcept
: SpanIterator(other_.span_, other_.index_) {}
@ -303,8 +302,8 @@ struct IsAllowedExtentConversion : public std::integral_constant<
bool, From == To || From == dynamic_extent || To == dynamic_extent> {};
template <class From, class To>
struct IsAllowedElementTypeConversion : public std::integral_constant<
bool, std::is_convertible<From(*)[], To(*)[]>::value> {};
struct IsAllowedElementTypeConversion
: public std::integral_constant<bool, std::is_convertible_v<From (*)[], To (*)[]>> {}; // NOLINT
template <class T>
struct IsSpanOracle : std::false_type {};
@ -313,7 +312,7 @@ template <class T, std::size_t Extent>
struct IsSpanOracle<Span<T, Extent>> : std::true_type {};
template <class T>
struct IsSpan : public IsSpanOracle<typename std::remove_cv<T>::type> {};
struct IsSpan : public IsSpanOracle<typename std::remove_cv_t<T>> {};
// Re-implement std algorithms here to adopt CUDA.
template <typename T>
@ -452,35 +451,34 @@ class Span {
__span_noexcept : size_(N), data_(&arr[0]) {}
template <class Container,
class = typename std::enable_if<
!std::is_const<element_type>::value &&
!detail::IsSpan<Container>::value &&
std::is_convertible<typename Container::pointer, pointer>::value &&
std::is_convertible<typename Container::pointer,
decltype(std::declval<Container>().data())>::value>::type>
Span(Container& _cont) : // NOLINT
size_(_cont.size()), data_(_cont.data()) {
class = typename std::enable_if_t<
!std::is_const_v<element_type> && !detail::IsSpan<Container>::value &&
std::is_convertible_v<typename Container::pointer, pointer> &&
std::is_convertible_v<typename Container::pointer,
decltype(std::declval<Container>().data())>>>
Span(Container& _cont) // NOLINT
: size_(_cont.size()), data_(_cont.data()) {
static_assert(!detail::IsSpan<Container>::value, "Wrong constructor of Span is called.");
}
template <class Container,
class = typename std::enable_if<
std::is_const<element_type>::value &&
!detail::IsSpan<Container>::value &&
std::is_convertible<typename Container::pointer, pointer>::value &&
std::is_convertible<typename Container::pointer,
decltype(std::declval<Container>().data())>::value>::type>
Span(const Container& _cont) : size_(_cont.size()), // NOLINT
data_(_cont.data()) {
class = typename std::enable_if_t<
std::is_const_v<element_type> && !detail::IsSpan<Container>::value &&
std::is_convertible_v<typename Container::pointer, pointer> &&
std::is_convertible_v<typename Container::pointer,
decltype(std::declval<Container>().data())>>>
Span(const Container& _cont) // NOLINT
: size_(_cont.size()), data_(_cont.data()) {
static_assert(!detail::IsSpan<Container>::value, "Wrong constructor of Span is called.");
}
template <class U, std::size_t OtherExtent,
class = typename std::enable_if<
detail::IsAllowedElementTypeConversion<U, T>::value &&
detail::IsAllowedExtentConversion<OtherExtent, Extent>::value>>
XGBOOST_DEVICE constexpr Span(const Span<U, OtherExtent>& _other) // NOLINT
__span_noexcept : size_(_other.size()), data_(_other.data()) {}
class = typename std::enable_if_t<
detail::IsAllowedElementTypeConversion<U, T>::value &&
detail::IsAllowedExtentConversion<OtherExtent, Extent>::value>>
XGBOOST_DEVICE constexpr Span(const Span<U, OtherExtent>& _other) // NOLINT
__span_noexcept : size_(_other.size()),
data_(_other.data()) {}
XGBOOST_DEVICE constexpr Span(const Span& _other)
__span_noexcept : size_(_other.size()), data_(_other.data()) {}

View File

@ -82,7 +82,7 @@ class AllreduceFunctor {
}
private:
template <class T, std::enable_if_t<std::is_integral<T>::value>* = nullptr>
template <class T, std::enable_if_t<std::is_integral_v<T>>* = nullptr>
void AccumulateBitwise(T* buffer, T const* input, std::size_t size, Op reduce_operation) const {
switch (reduce_operation) {
case Op::kBitwiseAND:
@ -99,7 +99,7 @@ class AllreduceFunctor {
}
}
template <class T, std::enable_if_t<std::is_floating_point<T>::value>* = nullptr>
template <class T, std::enable_if_t<std::is_floating_point_v<T>>* = nullptr>
void AccumulateBitwise(T*, T const*, std::size_t, Op) const {
LOG(FATAL) << "Floating point types do not support bitwise operations.";
}

View File

@ -81,8 +81,8 @@ struct AtomicDispatcher<sizeof(uint64_t)> {
// atomicAdd is not defined for size_t.
template <typename T = size_t,
std::enable_if_t<std::is_same<size_t, T>::value &&
!std::is_same<size_t, unsigned long long>::value> * = // NOLINT
std::enable_if_t<std::is_same_v<size_t, T> &&
!std::is_same_v<size_t, unsigned long long>> * = // NOLINT
nullptr>
XGBOOST_DEV_INLINE T atomicAdd(T *addr, T v) { // NOLINT
using Type = typename dh::detail::AtomicDispatcher<sizeof(T)>::Type;
@ -381,7 +381,7 @@ void CopyTo(Src const &src, Dst *dst) {
dst->resize(src.size());
using SVT = std::remove_cv_t<typename Src::value_type>;
using DVT = std::remove_cv_t<typename Dst::value_type>;
static_assert(std::is_same<SVT, DVT>::value,
static_assert(std::is_same_v<SVT, DVT>,
"Host and device containers must have same value type.");
dh::safe_cuda(cudaMemcpyAsync(thrust::raw_pointer_cast(dst->data()), src.data(),
src.size() * sizeof(SVT), cudaMemcpyDefault));

View File

@ -224,11 +224,11 @@ void JsonArray::Save(JsonWriter* writer) const { writer->Visit(this); }
namespace {
// error C2668: 'fpclassify': ambiguous call to overloaded function
template <typename T>
std::enable_if_t<std::is_floating_point<T>::value, bool> IsInfMSVCWar(T v) {
std::enable_if_t<std::is_floating_point_v<T>, bool> IsInfMSVCWar(T v) {
return std::isinf(v);
}
template <typename T>
std::enable_if_t<std::is_integral<T>::value, bool> IsInfMSVCWar(T) {
std::enable_if_t<std::is_integral_v<T>, bool> IsInfMSVCWar(T) {
return false;
}
} // namespace
@ -247,7 +247,7 @@ bool JsonTypedArray<T, kind>::operator==(Value const& rhs) const {
if (vec_.size() != arr.size()) {
return false;
}
if (std::is_same<float, T>::value) {
if (std::is_same_v<float, T>) {
for (size_t i = 0; i < vec_.size(); ++i) {
bool equal{false};
if (common::CheckNAN(vec_[i])) {
@ -693,10 +693,10 @@ void Json::Dump(Json json, JsonWriter* writer) {
writer->Save(json);
}
static_assert(std::is_nothrow_move_constructible<Json>::value);
static_assert(std::is_nothrow_move_constructible<Object>::value);
static_assert(std::is_nothrow_move_constructible<Array>::value);
static_assert(std::is_nothrow_move_constructible<String>::value);
static_assert(std::is_nothrow_move_constructible_v<Json>);
static_assert(std::is_nothrow_move_constructible_v<Object>);
static_assert(std::is_nothrow_move_constructible_v<Array>);
static_assert(std::is_nothrow_move_constructible_v<String>);
Json UBJReader::ParseArray() {
auto marker = PeekNextChar();
@ -887,17 +887,17 @@ template <typename T, Value::ValueKind kind>
void WriteTypedArray(JsonTypedArray<T, kind> const* arr, std::vector<char>* stream) {
stream->emplace_back('[');
stream->push_back('$');
if (std::is_same<T, float>::value) {
if (std::is_same_v<T, float>) {
stream->push_back('d');
} else if (std::is_same_v<T, double>) {
stream->push_back('D');
} else if (std::is_same<T, int8_t>::value) {
} else if (std::is_same_v<T, int8_t>) {
stream->push_back('i');
} else if (std::is_same<T, uint8_t>::value) {
} else if (std::is_same_v<T, uint8_t>) {
stream->push_back('U');
} else if (std::is_same<T, int32_t>::value) {
} else if (std::is_same_v<T, int32_t>) {
stream->push_back('l');
} else if (std::is_same<T, int64_t>::value) {
} else if (std::is_same_v<T, int64_t>) {
stream->push_back('L');
} else {
LOG(FATAL) << "Not implemented";

View File

@ -12,7 +12,7 @@
#include <algorithm> // for max
#include <cmath> // for exp, abs, log, lgamma
#include <limits> // for numeric_limits
#include <type_traits> // for is_floating_point, conditional, is_signed, is_same, declval, enable_if
#include <type_traits> // for is_floating_point_v, conditional, is_signed, is_same, declval
#include <utility> // for pair
namespace xgboost {
@ -43,15 +43,11 @@ XGBOOST_DEVICE inline double Sigmoid(double x) {
*/
template <typename T, typename U>
XGBOOST_DEVICE constexpr bool CloseTo(T a, U b) {
using Casted =
typename std::conditional<
std::is_floating_point<T>::value || std::is_floating_point<U>::value,
double,
typename std::conditional<
std::is_signed<T>::value || std::is_signed<U>::value,
int64_t,
uint64_t>::type>::type;
return std::is_floating_point<Casted>::value ?
using Casted = typename std::conditional_t<
std::is_floating_point_v<T> || std::is_floating_point_v<U>, double,
typename std::conditional_t<std::is_signed_v<T> || std::is_signed_v<U>, std::int64_t,
std::uint64_t>>;
return std::is_floating_point_v<Casted> ?
std::abs(static_cast<Casted>(a) -static_cast<Casted>(b)) < 1e-6 : a == b;
}
@ -65,11 +61,10 @@ XGBOOST_DEVICE constexpr bool CloseTo(T a, U b) {
*/
template <typename Iterator>
XGBOOST_DEVICE inline void Softmax(Iterator start, Iterator end) {
static_assert(std::is_same<bst_float,
typename std::remove_reference<
decltype(std::declval<Iterator>().operator*())>::type
>::value,
"Values should be of type bst_float");
static_assert(
std::is_same_v<
float, typename std::remove_reference_t<decltype(std::declval<Iterator>().operator*())>>,
"Values should be of type bst_float");
bst_float wmax = *start;
for (Iterator i = start+1; i != end; ++i) {
wmax = fmaxf(*i, wmax);
@ -137,9 +132,7 @@ inline float LogSum(Iterator begin, Iterator end) {
// Redefined here to workaround a VC bug that doesn't support overloading for integer
// types.
template <typename T>
XGBOOST_DEVICE typename std::enable_if<
std::numeric_limits<T>::is_integer, bool>::type
CheckNAN(T) {
XGBOOST_DEVICE typename std::enable_if_t<std::numeric_limits<T>::is_integer, bool> CheckNAN(T) {
return false;
}

View File

@ -1,9 +1,9 @@
/*!
* Copyright 2022 by XGBoost Contributors
/**
* Copyright 2022-2024, XGBoost Contributors
*/
#include "numeric.h"
#include <type_traits> // std::is_same
#include <type_traits> // std::is_same_v
#include "xgboost/context.h" // Context
#include "xgboost/host_device_vector.h" // HostDeviceVector
@ -16,7 +16,7 @@ double Reduce(Context const* ctx, HostDeviceVector<float> const& values) {
} else {
auto const& h_values = values.ConstHostVector();
auto result = cpu_impl::Reduce(ctx, h_values.cbegin(), h_values.cend(), 0.0);
static_assert(std::is_same<decltype(result), double>::value);
static_assert(std::is_same_v<decltype(result), double>);
return result;
}
}

View File

@ -1,17 +1,18 @@
/**
* Copyright 2022-2023 by XGBoost contributors.
* Copyright 2022-2024, XGBoost contributors.
*/
#ifndef XGBOOST_COMMON_NUMERIC_H_
#define XGBOOST_COMMON_NUMERIC_H_
#include <dmlc/common.h> // OMPException
#include <algorithm> // for std::max
#include <cstddef> // for size_t
#include <cstdint> // for int32_t
#include <iterator> // for iterator_traits
#include <numeric> // for accumulate
#include <vector>
#include <algorithm> // for max
#include <cstddef> // for size_t
#include <cstdint> // for int32_t
#include <iterator> // for iterator_traits
#include <numeric> // for accumulate
#include <type_traits> // for is_same_v
#include <vector> // for vector
#include "common.h" // AssertGPUSupport
#include "threading_utils.h" // MemStackAllocator, DefaultMaxThreads
@ -44,8 +45,8 @@ void RunLengthEncode(Iter begin, Iter end, std::vector<Idx>* p_out) {
*/
template <typename InIt, typename OutIt, typename T>
void PartialSum(int32_t n_threads, InIt begin, InIt end, T init, OutIt out_it) {
static_assert(std::is_same<T, typename std::iterator_traits<InIt>::value_type>::value);
static_assert(std::is_same<T, typename std::iterator_traits<OutIt>::value_type>::value);
static_assert(std::is_same_v<T, typename std::iterator_traits<InIt>::value_type>);
static_assert(std::is_same_v<T, typename std::iterator_traits<OutIt>::value_type>);
// The number of threads is pegged to the batch size. If the OMP block is parallelized
// on anything other than the batch/block size, it should be reassigned
auto n = static_cast<size_t>(std::distance(begin, end));

View File

@ -105,9 +105,9 @@ class TrainingObserver {
/*\brief Observe objects with `XGBoostParamer' type. */
template <typename Parameter,
typename std::enable_if<
std::is_base_of<XGBoostParameter<Parameter>, Parameter>::value>::type* = nullptr>
void Observe(const Parameter &p, std::string name) const {
typename std::enable_if_t<std::is_base_of_v<XGBoostParameter<Parameter>, Parameter>>* =
nullptr>
void Observe(const Parameter& p, std::string name) const {
if (XGBOOST_EXPECT(!kObserve, true)) { return; }
Json obj {toJson(p)};

View File

@ -8,8 +8,9 @@
#include <thrust/transform_scan.h>
#include <thrust/unique.h>
#include <limits> // std::numeric_limits
#include <numeric> // for partial_sum
#include <limits> // for numeric_limits
#include <numeric> // for partial_sum
#include <type_traits> // for is_same_v
#include <utility>
#include "../collective/allgather.h"
@ -108,7 +109,7 @@ void PruneImpl(common::Span<SketchContainer::OffsetT const> cuts_ptr,
template <typename T, typename U>
void CopyTo(Span<T> out, Span<U> src) {
CHECK_EQ(out.size(), src.size());
static_assert(std::is_same<std::remove_cv_t<T>, std::remove_cv_t<T>>::value);
static_assert(std::is_same_v<std::remove_cv_t<T>, std::remove_cv_t<T>>);
dh::safe_cuda(cudaMemcpyAsync(out.data(), src.data(),
out.size_bytes(),
cudaMemcpyDefault));

View File

@ -15,7 +15,7 @@
#include <cstddef> // std::size_t
#include <iterator> // std::distance
#include <limits> // std::numeric_limits
#include <type_traits> // std::is_floating_point,std::iterator_traits
#include <type_traits> // std::is_floating_point_v,std::iterator_traits
#include "algorithm.cuh" // SegmentedArgMergeSort
#include "cuda_context.cuh" // CUDAContext
@ -37,9 +37,9 @@ struct QuantileSegmentOp {
AlphaIt alpha_it;
Span<float> d_results;
static_assert(std::is_floating_point<typename std::iterator_traits<ValIt>::value_type>::value,
static_assert(std::is_floating_point_v<typename std::iterator_traits<ValIt>::value_type>,
"Invalid value for quantile.");
static_assert(std::is_floating_point<typename std::iterator_traits<ValIt>::value_type>::value,
static_assert(std::is_floating_point_v<typename std::iterator_traits<ValIt>::value_type>,
"Invalid alpha.");
XGBOOST_DEVICE void operator()(std::size_t seg_idx) {
@ -102,9 +102,9 @@ struct WeightedQuantileSegOp {
Span<float const> d_weight_cdf;
Span<std::size_t const> d_sorted_idx;
Span<float> d_results;
static_assert(std::is_floating_point<typename std::iterator_traits<AlphaIt>::value_type>::value,
static_assert(std::is_floating_point_v<typename std::iterator_traits<AlphaIt>::value_type>,
"Invalid alpha.");
static_assert(std::is_floating_point<typename std::iterator_traits<ValIt>::value_type>::value,
static_assert(std::is_floating_point_v<typename std::iterator_traits<ValIt>::value_type>,
"Invalid value for quantile.");
XGBOOST_DEVICE void operator()(std::size_t seg_idx) {
@ -146,7 +146,7 @@ auto MakeWQSegOp(SegIt seg_it, ValIt val_it, AlphaIt alpha_it, Span<float const>
* std::distance(seg_begin, seg_end) should be equal to n_segments + 1
*/
template <typename SegIt, typename ValIt, typename AlphaIt,
std::enable_if_t<!std::is_floating_point<AlphaIt>::value>* = nullptr>
std::enable_if_t<!std::is_floating_point_v<AlphaIt>>* = nullptr>
void SegmentedQuantile(Context const* ctx, AlphaIt alpha_it, SegIt seg_begin, SegIt seg_end,
ValIt val_begin, ValIt val_end, HostDeviceVector<float>* quantiles) {
dh::device_vector<std::size_t> sorted_idx;
@ -197,8 +197,8 @@ void SegmentedQuantile(Context const* ctx, double alpha, SegIt seg_begin, SegIt
* @param w_begin Iterator for weight for each input element
*/
template <typename SegIt, typename ValIt, typename AlphaIt, typename WIter,
typename std::enable_if_t<!std::is_same<
typename std::iterator_traits<AlphaIt>::value_type, void>::value>* = nullptr>
typename std::enable_if_t<
!std::is_same_v<typename std::iterator_traits<AlphaIt>::value_type, void>>* = nullptr>
void SegmentedWeightedQuantile(Context const* ctx, AlphaIt alpha_it, SegIt seg_beg, SegIt seg_end,
ValIt val_begin, ValIt val_end, WIter w_begin, WIter w_end,
HostDeviceVector<float>* quantiles) {

View File

@ -49,7 +49,7 @@ float Quantile(Context const* ctx, double alpha, Iter const& begin, Iter const&
}
auto val = [&](size_t i) { return *(begin + sorted_idx[i]); };
static_assert(std::is_same<decltype(val(0)), float>::value);
static_assert(std::is_same_v<decltype(val(0)), float>);
if (alpha <= (1 / (n + 1))) {
return val(0);

View File

@ -128,7 +128,7 @@ class Transform {
}
#if defined(__CUDACC__)
template <typename std::enable_if<CompiledWithCuda>::type* = nullptr,
template <typename std::enable_if_t<CompiledWithCuda>* = nullptr,
typename... HDV>
void LaunchCUDA(Functor _func, HDV*... _vectors) const {
UnpackShard(device_, _vectors...);
@ -151,9 +151,8 @@ class Transform {
}
#else
/*! \brief Dummy function defined when compiling for CPU. */
template <typename std::enable_if<!CompiledWithCuda>::type* = nullptr,
typename... HDV>
void LaunchCUDA(Functor _func, HDV*...) const {
template <typename std::enable_if_t<!CompiledWithCuda> * = nullptr, typename... HDV>
void LaunchCUDA(Functor _func, HDV *...) const {
// Remove unused parameter compiler warning.
(void) _func;

View File

@ -12,7 +12,7 @@
#include <limits> // for numeric_limits
#include <map> // for map
#include <string> // for string
#include <type_traits> // for alignment_of, remove_pointer_t, invoke_result_t
#include <type_traits> // for alignment_of_v, remove_pointer_t, invoke_result_t
#include <vector> // for vector
#include "../common/bitfield.h" // for RBitField8
@ -334,7 +334,7 @@ struct ToDType<double> {
};
template <typename T>
struct ToDType<T,
std::enable_if_t<std::is_same<T, long double>::value && sizeof(long double) == 16>> {
std::enable_if_t<std::is_same_v<T, long double> && sizeof(long double) == 16>> {
static constexpr ArrayInterfaceHandler::Type kType = ArrayInterfaceHandler::kF16;
};
// uint
@ -555,7 +555,7 @@ class ArrayInterface {
}
[[nodiscard]] XGBOOST_DEVICE std::size_t ElementAlignment() const {
return this->DispatchCall([](auto *typed_data_ptr) {
return std::alignment_of<std::remove_pointer_t<decltype(typed_data_ptr)>>::value;
return std::alignment_of_v<std::remove_pointer_t<decltype(typed_data_ptr)>>;
});
}
@ -567,9 +567,8 @@ class ArrayInterface {
#if defined(XGBOOST_USE_CUDA)
// No operator defined for half -> size_t
using Type = std::conditional_t<
std::is_same<__half,
std::remove_cv_t<std::remove_pointer_t<decltype(p_values)>>>::value &&
std::is_same<std::size_t, std::remove_cv_t<T>>::value,
std::is_same_v<__half, std::remove_cv_t<std::remove_pointer_t<decltype(p_values)>>> &&
std::is_same_v<std::size_t, std::remove_cv_t<T>>,
unsigned long long, T>; // NOLINT
return static_cast<T>(static_cast<Type>(p_values[offset]));
#else

View File

@ -294,16 +294,14 @@ SimpleDMatrix::SimpleDMatrix(AdapterT* adapter, float missing, int nthread,
IteratorAdapter<DataIterHandle, XGBCallbackDataIterNext, XGBoostBatchCSR>;
// If AdapterT is either IteratorAdapter or FileAdapter type, use the total batch size to
// determine the correct number of rows, as offset_vec may be too short
if (std::is_same<AdapterT, IteratorAdapterT>::value ||
std::is_same<AdapterT, FileAdapter>::value) {
if (std::is_same_v<AdapterT, IteratorAdapterT> || std::is_same_v<AdapterT, FileAdapter>) {
info_.num_row_ = total_batch_size;
// Ensure offset_vec.size() - 1 == [number of rows]
while (offset_vec.size() - 1 < total_batch_size) {
offset_vec.emplace_back(offset_vec.back());
}
} else {
CHECK((std::is_same<AdapterT, CSCAdapter>::value ||
std::is_same<AdapterT, CSCArrayAdapter>::value))
CHECK((std::is_same_v<AdapterT, CSCAdapter> || std::is_same_v<AdapterT, CSCArrayAdapter>))
<< "Expecting CSCAdapter";
info_.num_row_ = offset_vec.size() - 1;
}

View File

@ -344,7 +344,7 @@ class LambdaRankNDCG : public LambdaRankObj<LambdaRankNDCG, ltr::NDCGCache> {
common::Span<double const> discount, bst_group_t g) {
auto delta = [&](auto y_high, auto y_low, std::size_t rank_high, std::size_t rank_low,
bst_group_t g) {
static_assert(std::is_floating_point<decltype(y_high)>::value);
static_assert(std::is_floating_point_v<decltype(y_high)>);
return DeltaNDCG<exp_gain>(y_high, y_low, rank_high, rank_low, inv_IDCG(g), discount);
};
this->CalcLambdaForGroup<unbiased>(iter, g_predt, g_label, w, g_rank, g, delta, g_gpair);

View File

@ -1,5 +1,5 @@
/**
* Copyright 2019-2023, XGBoost contributors
* Copyright 2019-2024, XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/device_vector.h>

View File

@ -41,7 +41,7 @@ XGBOOST_DEVICE float LossChangeMissing(const GradientPairInt64 &scan,
template <int kBlockSize>
class EvaluateSplitAgent {
public:
using ArgMaxT = cub::KeyValuePair<int, float>;
using ArgMaxT = cub::KeyValuePair<std::uint32_t, float>;
using BlockScanT = cub::BlockScan<GradientPairInt64, kBlockSize>;
using MaxReduceT = cub::WarpReduce<ArgMaxT>;
using SumReduceT = cub::WarpReduce<GradientPairInt64>;

View File

@ -1,10 +1,10 @@
/**
* Copyright 2023 by XGBoost Contributors
* Copyright 2023-2024, XGBoost Contributors
*/
#ifndef XGBOOST_TREE_IO_UTILS_H_
#define XGBOOST_TREE_IO_UTILS_H_
#include <string> // for string
#include <type_traits> // for enable_if_t, is_same, conditional_t
#include <type_traits> // for enable_if_t, is_same_v, conditional_t
#include <vector> // for vector
#include "xgboost/json.h" // for Json
@ -23,26 +23,24 @@ using IndexArrayT = std::conditional_t<feature_is_64, I64ArrayT<typed>, I32Array
// typed array, not boolean
template <typename JT, typename T>
std::enable_if_t<!std::is_same<T, Json>::value && !std::is_same<JT, Boolean>::value, T> GetElem(
std::enable_if_t<!std::is_same_v<T, Json> && !std::is_same_v<JT, Boolean>, T> GetElem(
std::vector<T> const& arr, size_t i) {
return arr[i];
}
// typed array boolean
template <typename JT, typename T>
std::enable_if_t<!std::is_same<T, Json>::value && std::is_same<T, uint8_t>::value &&
std::is_same<JT, Boolean>::value,
bool>
std::enable_if_t<
!std::is_same_v<T, Json> && std::is_same_v<T, uint8_t> && std::is_same_v<JT, Boolean>, bool>
GetElem(std::vector<T> const& arr, size_t i) {
return arr[i] == 1;
}
// json array
template <typename JT, typename T>
std::enable_if_t<
std::is_same<T, Json>::value,
std::conditional_t<std::is_same<JT, Integer>::value, int64_t,
std::conditional_t<std::is_same<Boolean, JT>::value, bool, float>>>
std::enable_if_t<std::is_same_v<T, Json>,
std::conditional_t<std::is_same_v<JT, Integer>, int64_t,
std::conditional_t<std::is_same_v<Boolean, JT>, bool, float>>>
GetElem(std::vector<T> const& arr, size_t i) {
if (std::is_same<JT, Boolean>::value && !IsA<Boolean>(arr[i])) {
if (std::is_same_v<JT, Boolean> && !IsA<Boolean>(arr[i])) {
return get<Integer const>(arr[i]) == 1;
}
return get<JT const>(arr[i]);

View File

@ -12,7 +12,7 @@
#include <iomanip>
#include <limits>
#include <sstream>
#include <type_traits>
#include <type_traits> // for is_floating_point_v
#include "../common/categorical.h" // for GetNodeCats
#include "../common/common.h" // for EscapeU8
@ -35,7 +35,7 @@ namespace {
template <typename Float>
std::enable_if_t<std::is_floating_point_v<Float>, std::string> ToStr(Float value) {
int32_t constexpr kFloatMaxPrecision = std::numeric_limits<float>::max_digits10;
static_assert(std::is_floating_point<Float>::value,
static_assert(std::is_floating_point_v<Float>,
"Use std::to_string instead for non-floating point values.");
std::stringstream ss;
ss << std::setprecision(kFloatMaxPrecision) << value;
@ -45,7 +45,7 @@ std::enable_if_t<std::is_floating_point_v<Float>, std::string> ToStr(Float value
template <typename Float>
std::string ToStr(linalg::VectorView<Float> value, bst_target_t limit) {
int32_t constexpr kFloatMaxPrecision = std::numeric_limits<float>::max_digits10;
static_assert(std::is_floating_point<Float>::value,
static_assert(std::is_floating_point_v<Float>,
"Use std::to_string instead for non-floating point values.");
std::stringstream ss;
ss << std::setprecision(kFloatMaxPrecision);
@ -1091,8 +1091,8 @@ void LoadModelImpl(Json const& in, TreeParam const& param, std::vector<RTreeNode
stats = std::remove_reference_t<decltype(stats)>(n_nodes);
nodes = std::remove_reference_t<decltype(nodes)>(n_nodes);
static_assert(std::is_integral<decltype(GetElem<Integer>(lefts, 0))>::value);
static_assert(std::is_floating_point<decltype(GetElem<Number>(loss_changes, 0))>::value);
static_assert(std::is_integral_v<decltype(GetElem<Integer>(lefts, 0))>);
static_assert(std::is_floating_point_v<decltype(GetElem<Number>(loss_changes, 0))>);
// Set node
for (int32_t i = 0; i < n_nodes; ++i) {

View File

@ -11,20 +11,28 @@ RUN \
apt-get update && \
apt-get install -y wget git python3 python3-pip software-properties-common \
apt-transport-https ca-certificates gnupg-agent && \
apt-get install -y llvm-15 clang-tidy-15 clang-15 libomp-15-dev && \
apt-get install -y cmake
apt-get install -y ninja-build
# Install clang-tidy: https://apt.llvm.org/
RUN \
apt-add-repository "deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-19 main" && \
wget -O llvm-snapshot.gpg.key https://apt.llvm.org/llvm-snapshot.gpg.key && \
apt-key add ./llvm-snapshot.gpg.key && \
rm llvm-snapshot.gpg.key && \
apt-get update && \
apt-get install -y clang-tidy-19 clang-19 libomp-19-dev
# Set default clang-tidy version
RUN \
update-alternatives --install /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-15 100 && \
update-alternatives --install /usr/bin/clang clang /usr/bin/clang-15 100
update-alternatives --install /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-19 100 && \
update-alternatives --install /usr/bin/clang clang /usr/bin/clang-19 100
RUN \
apt-get install libgtest-dev libgmock-dev -y
# Install Python packages
RUN \
pip3 install pyyaml
pip3 install cmake
ENV GOSU_VERSION=1.10

View File

@ -16,6 +16,8 @@ class LintersPaths:
BLACK = (
# core
"python-package/",
# CI
"tests/ci_build/tidy.py",
# tests
"tests/python/test_config.py",
"tests/python/test_callback.py",
@ -119,6 +121,7 @@ class LintersPaths:
"demo/guide-python/learning_to_rank.py",
"demo/aft_survival/aft_survival_viz_demo.py",
# CI
"tests/ci_build/tidy.py",
"tests/ci_build/lint_python.py",
"tests/ci_build/test_r_package.py",
"tests/ci_build/test_utils.py",

View File

@ -1,4 +1,6 @@
#!/usr/bin/env python
from __future__ import annotations
import argparse
import json
import os
@ -9,20 +11,17 @@ import sys
from multiprocessing import Pool, cpu_count
from time import time
import yaml
def call(args: list[str]) -> tuple[int, int, str, list[str]]:
"""Subprocess run wrapper."""
completed = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
error_msg = completed.stdout.decode("utf-8")
# `workspace` is a name used in the CI container. Normally we should keep the dir
# as `xgboost`.
matched = re.search(
"(workspace|xgboost)/.*(src|tests|include)/.*warning:", error_msg, re.MULTILINE
)
def call(args):
'''Subprocess run wrapper.'''
completed = subprocess.run(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
error_msg = completed.stdout.decode('utf-8')
# `workspace` is a name used in Jenkins CI. Normally we should keep the
# dir as `xgboost`.
matched = re.search('(workspace|xgboost)/.*(src|tests|include)/.*warning:',
error_msg,
re.MULTILINE)
if matched is None:
return_code = 0
else:
@ -30,195 +29,203 @@ def call(args):
return (completed.returncode, return_code, error_msg, args)
class ClangTidy(object):
''' clang tidy wrapper.
class ClangTidy:
"""clang tidy wrapper.
Args:
args: Command line arguments.
cpp_lint: Run linter on C++ source code.
cuda_lint: Run linter on CUDA source code.
use_dmlc_gtest: Whether to use gtest bundled in dmlc-core.
'''
def __init__(self, args):
"""
def __init__(self, args: argparse.Namespace) -> None:
self.cpp_lint = args.cpp
self.cuda_lint = args.cuda
self.use_dmlc_gtest: bool = args.use_dmlc_gtest
self.cuda_archs = args.cuda_archs.copy() if args.cuda_archs else []
if args.tidy_version:
self.exe = 'clang-tidy-' + str(args.tidy_version)
self.exe = "clang-tidy-" + str(args.tidy_version)
else:
self.exe = 'clang-tidy'
self.exe = "clang-tidy"
print('Run linter on CUDA: ', self.cuda_lint)
print('Run linter on C++:', self.cpp_lint)
print('Use dmlc gtest:', self.use_dmlc_gtest)
print('CUDA archs:', ' '.join(self.cuda_archs))
print("Run linter on CUDA: ", self.cuda_lint)
print("Run linter on C++:", self.cpp_lint)
print("Use dmlc gtest:", self.use_dmlc_gtest)
print("CUDA archs:", " ".join(self.cuda_archs))
if not self.cpp_lint and not self.cuda_lint:
raise ValueError('Both --cpp and --cuda are set to 0.')
raise ValueError("Both --cpp and --cuda are set to 0.")
self.root_path = os.path.abspath(os.path.curdir)
print('Project root:', self.root_path)
self.cdb_path = os.path.join(self.root_path, 'cdb')
print("Project root:", self.root_path)
self.cdb_path = os.path.join(self.root_path, "cdb")
def __enter__(self):
def __enter__(self) -> "ClangTidy":
self.start = time()
if os.path.exists(self.cdb_path):
shutil.rmtree(self.cdb_path)
self._generate_cdb()
return self
def __exit__(self, *args):
def __exit__(self, *args: list) -> None:
if os.path.exists(self.cdb_path):
shutil.rmtree(self.cdb_path)
self.end = time()
print('Finish running clang-tidy:', self.end - self.start)
print("Finish running clang-tidy:", self.end - self.start)
def _generate_cdb(self):
'''Run CMake to generate compilation database.'''
def _generate_cdb(self) -> None:
"""Run CMake to generate compilation database."""
os.mkdir(self.cdb_path)
os.chdir(self.cdb_path)
cmake_args = ['cmake', '..', '-DCMAKE_EXPORT_COMPILE_COMMANDS=ON',
'-DGOOGLE_TEST=ON']
cmake_args = [
"cmake",
self.root_path,
"-GNinja", # prevents cmake from using --option-files for include path.
"-DCMAKE_EXPORT_COMPILE_COMMANDS=ON",
"-DGOOGLE_TEST=ON",
"-DCMAKE_CXX_FLAGS='-Wno-clang-diagnostic-deprecated-declarations'",
]
if self.use_dmlc_gtest:
cmake_args.append('-DUSE_DMLC_GTEST=ON')
cmake_args.append("-DUSE_DMLC_GTEST=ON")
else:
cmake_args.append('-DUSE_DMLC_GTEST=OFF')
cmake_args.append("-DUSE_DMLC_GTEST=OFF")
if self.cuda_lint:
cmake_args.extend(['-DUSE_CUDA=ON', '-DUSE_NCCL=ON'])
cmake_args.extend(["-DUSE_CUDA=ON", "-DUSE_NCCL=ON"])
if self.cuda_archs:
arch_list = ';'.join(self.cuda_archs)
cmake_args.append(f'-DGPU_COMPUTE_VER={arch_list}')
arch_list = ";".join(self.cuda_archs)
cmake_args.append(f"-DCMAKE_CUDA_ARCHITECTURES={arch_list}")
subprocess.run(cmake_args)
os.chdir(self.root_path)
def convert_nvcc_command_to_clang(self, command):
'''Convert nvcc flags to corresponding clang flags.'''
def convert_nvcc_command_to_clang(self, command: str) -> str:
"""Convert nvcc flags to corresponding clang flags."""
components = command.split()
compiler: str = components[0]
if compiler.find('nvcc') != -1:
compiler = 'clang++'
if compiler.find("nvcc") != -1:
compiler = "clang++"
components[0] = compiler
# check each component in a command
converted_components = [compiler]
for i in range(1, len(components)):
if components[i] == '-lineinfo':
if components[i] == "-lineinfo":
continue
elif components[i] == '-fuse-ld=gold':
elif components[i] == "-fuse-ld=gold":
continue
elif components[i] == '-rdynamic':
elif components[i] == "-fuse-ld=lld":
continue
elif components[i].find("--default-stream") != -1:
continue
elif components[i] == "-rdynamic":
continue
elif components[i] == "-Xfatbin=-compress-all":
continue
elif components[i] == "-forward-unknown-to-host-compiler":
continue
elif (components[i] == '-x' and
components[i+1] == 'cu'):
elif components[i] == "-x" and components[i + 1] == "cu":
# -x cu -> -x cuda
converted_components.append('-x')
converted_components.append('cuda')
components[i+1] = ''
converted_components.append("-x")
converted_components.append("cuda")
components[i + 1] = ""
continue
elif components[i].find('-Xcompiler') != -1:
elif components[i].find("-Xcompiler") != -1:
continue
elif components[i].find('--expt') != -1:
elif components[i].find("--expt-") != -1:
continue
elif components[i].find('-ccbin') != -1:
elif components[i].find("-ccbin") != -1:
continue
elif components[i].find('--generate-code') != -1:
keyword = 'code=sm'
elif components[i].find("--generate-code") != -1:
keyword = "code=sm"
pos = components[i].find(keyword)
capability = components[i][pos + len(keyword) + 1:
pos + len(keyword) + 3]
capability = components[i][
pos + len(keyword) + 1 : pos + len(keyword) + 3
]
if pos != -1:
converted_components.append(
'--cuda-gpu-arch=sm_' + capability)
elif components[i].find('--std=c++14') != -1:
converted_components.append('-std=c++14')
elif components[i].startswith('-isystem='):
converted_components.extend(components[i].split('='))
converted_components.append("--cuda-gpu-arch=sm_" + capability)
elif components[i].find("--std=c++14") != -1:
converted_components.append("-std=c++14")
elif components[i].startswith("-isystem="):
converted_components.extend(components[i].split("="))
else:
converted_components.append(components[i])
converted_components.append('-isystem /usr/local/cuda/include/')
converted_components.append("-isystem /usr/local/cuda/include/")
command = ''
command = ""
for c in converted_components:
command = command + ' ' + c
command = command + " " + c
command = command.strip()
return command
def _configure_flags(self, path, command):
src = os.path.join(self.root_path, 'src')
src = src.replace('/', '\\/')
include = os.path.join(self.root_path, 'include')
include = include.replace('/', '\\/')
def _configure_flags(self, path: str, command: str) -> list[list[str]]:
src = os.path.join(self.root_path, "src").replace("/", "\\/")
include = os.path.join(self.root_path, "include").replace("/", "\\/")
header_filter = '(' + src + '|' + include + ')'
common_args = [self.exe,
"-header-filter=" + header_filter,
'-config='+self.clang_tidy]
common_args.append(path)
common_args.append('--')
header_filter = "(" + src + "|" + include + ")"
common_args = [
self.exe,
path,
"--header-filter=" + header_filter,
"--config-file=" + self.tidy_file,
]
common_args.append("--")
command = self.convert_nvcc_command_to_clang(command)
command = command.split()[1:] # remove clang/c++/g++
if '-c' in command:
index = command.index('-c')
del command[index+1]
command.remove('-c')
if '-o' in command:
index = command.index('-o')
del command[index+1]
command.remove('-o')
command_split = command.split()[1:] # remove clang/c++/g++
if "-c" in command_split:
index = command_split.index("-c")
del command_split[index + 1]
command_split.remove("-c")
if "-o" in command_split:
index = command_split.index("-o")
del command_split[index + 1]
command_split.remove("-o")
common_args.extend(command)
common_args.extend(command_split)
# Two passes, one for device code another for host code.
if path.endswith('cu'):
if path.endswith("cu"):
args = [common_args.copy(), common_args.copy()]
args[0].append('--cuda-host-only')
args[1].append('--cuda-device-only')
args[0].append("--cuda-host-only")
args[1].append("--cuda-device-only")
else:
args = [common_args.copy()]
for a in args:
a.append('-Wno-unused-command-line-argument')
a.append("-Wno-unused-command-line-argument")
return args
def _configure(self):
'''Load and configure compile_commands and clang_tidy.'''
def _configure(self) -> list[list[str]]:
"""Load and configure compile_commands and clang_tidy."""
def should_lint(path):
if not self.cpp_lint and path.endswith('.cc'):
def should_lint(path: str) -> bool:
if not self.cpp_lint and path.endswith(".cc"):
return False
isxgb = path.find('dmlc-core') == -1
isxgb = path.find("dmlc-core") == -1
isxgb = isxgb and (not path.startswith(self.cdb_path))
if isxgb:
print(path)
return True
return False
cdb_file = os.path.join(self.cdb_path, 'compile_commands.json')
with open(cdb_file, 'r') as fd:
cdb_file = os.path.join(self.cdb_path, "compile_commands.json")
with open(cdb_file, "r") as fd:
self.compile_commands = json.load(fd)
tidy_file = os.path.join(self.root_path, '.clang-tidy')
with open(tidy_file) as fd:
self.clang_tidy = yaml.safe_load(fd)
self.clang_tidy = str(self.clang_tidy)
self.tidy_file = os.path.join(self.root_path, ".clang-tidy")
all_files = []
for entry in self.compile_commands:
path = entry['file']
path = entry["file"]
if should_lint(path):
args = self._configure_flags(path, entry['command'])
args = self._configure_flags(path, entry["command"])
all_files.extend(args)
return all_files
def run(self):
'''Run clang-tidy.'''
def run(self) -> bool:
"""Run clang-tidy."""
all_files = self._configure()
passed = True
BAR = '-'*32
BAR = "-" * 32
with Pool(cpu_count()) as pool:
results = pool.map(call, all_files)
for i, (process_status, tidy_status, msg, args) in enumerate(results):
@ -226,54 +233,50 @@ class ClangTidy(object):
# for cub in thrust is not correct.
if tidy_status == 1:
passed = False
print(BAR, '\n'
'Command args:', ' '.join(args), ', ',
'Process return code:', process_status, ', ',
'Tidy result code:', tidy_status, ', ',
'Message:\n', msg,
BAR, '\n')
print(
BAR,
"\n" "Command args:",
" ".join(args),
", ",
"Process return code:",
process_status,
", ",
"Tidy result code:",
tidy_status,
", ",
"Message:\n",
msg,
BAR,
"\n",
)
if not passed:
print('Errors in `thrust` namespace can be safely ignored.',
'Please address rest of the clang-tidy warnings.')
print(
"Errors in `thrust` namespace can be safely ignored.",
"Please address rest of the clang-tidy warnings.",
)
return passed
def test_tidy(args):
'''See if clang-tidy and our regex is working correctly. There are
many subtleties we need to be careful. For instances:
def test_tidy(args: argparse.Namespace) -> None:
"""See if clang-tidy and our regex is working correctly. There are many subtleties
we need to be careful. Tests here are not thorough, at least we want to guarantee
tidy is not missing anything on the CI.
* Is the string re-directed to pipe encoded as UTF-8? or is it
bytes?
* On Jenkins there's no 'xgboost' directory, are we catching the
right keywords?
* Should we use re.DOTALL?
* Should we use re.MULTILINE?
Tests here are not thorough, at least we want to guarantee tidy is
not missing anything on Jenkins.
'''
"""
root_path = os.path.abspath(os.path.curdir)
tidy_file = os.path.join(root_path, '.clang-tidy')
test_file_path = os.path.join(root_path,
'tests', 'ci_build', 'test_tidy.cc')
tidy_file = os.path.join(root_path, ".clang-tidy")
test_file_path = os.path.join(root_path, "tests", "ci_build", "test_tidy.cc")
with open(tidy_file) as fd:
tidy_config = fd.read()
tidy_config = str(tidy_config)
tidy_config = '-config='+tidy_config
tidy_config = "--config-file=" + tidy_file
if not args.tidy_version:
tidy = 'clang-tidy'
tidy = "clang-tidy"
else:
tidy = 'clang-tidy-' + str(args.tidy_version)
args = [tidy, tidy_config, test_file_path]
(proc_code, tidy_status, error_msg, _) = call(args)
tidy = "clang-tidy-" + str(args.tidy_version)
cmd = [tidy, tidy_config, test_file_path]
(proc_code, tidy_status, error_msg, _) = call(cmd)
assert proc_code == 0
assert tidy_status == 1
print('clang-tidy is working.')
print("clang-tidy is working.")
if __name__ == "__main__":

View File

@ -1,3 +1,6 @@
/**
* Copyright 2020-2024, XGBoost contributors
*/
#include <gtest/gtest.h>
#include <xgboost/intrusive_ptr.h>
@ -12,10 +15,8 @@ class NotCopyConstructible {
NotCopyConstructible &operator=(NotCopyConstructible const &that) = delete;
NotCopyConstructible(NotCopyConstructible&& that) = default;
};
static_assert(
!std::is_trivially_copy_constructible<NotCopyConstructible>::value);
static_assert(
!std::is_trivially_copy_assignable<NotCopyConstructible>::value);
static_assert(!std::is_trivially_copy_constructible_v<NotCopyConstructible>);
static_assert(!std::is_trivially_copy_assignable_v<NotCopyConstructible>);
class ForIntrusivePtrTest {
public:

View File

@ -1,5 +1,5 @@
/**
* Copyright 2018-2023, XGBoost contributors
* Copyright 2018-2024, XGBoost contributors
*/
#include "test_span.h"
@ -174,19 +174,11 @@ TEST(Span, FromFirstLast) {
}
}
struct BaseClass {
virtual void operator()() {}
};
struct DerivedClass : public BaseClass {
void operator()() override {}
};
TEST(Span, FromOther) {
// convert constructor
{
Span<DerivedClass> derived;
Span<BaseClass> base { derived };
Span<int> derived;
Span<int const> base{derived};
ASSERT_EQ(base.size(), derived.size());
ASSERT_EQ(base.data(), derived.data());
}

View File

@ -93,11 +93,11 @@ TEST(Adapter, CSCAdapterColsMoreThanRows) {
// A mock for JVM data iterator.
class CSRIterForTest {
std::vector<float> data_ {1, 2, 3, 4, 5};
std::vector<std::remove_pointer<decltype(std::declval<XGBoostBatchCSR>().index)>::type>
feature_idx_ {0, 1, 0, 1, 1};
std::vector<std::remove_pointer<decltype(std::declval<XGBoostBatchCSR>().offset)>::type>
row_ptr_ {0, 2, 4, 5, 5};
std::vector<float> data_{1, 2, 3, 4, 5};
std::vector<std::remove_pointer_t<decltype(std::declval<XGBoostBatchCSR>().index)>> feature_idx_{
0, 1, 0, 1, 1};
std::vector<std::remove_pointer_t<decltype(std::declval<XGBoostBatchCSR>().offset)>> row_ptr_{
0, 2, 4, 5, 5};
size_t iter_ {0};
public:

View File

@ -49,7 +49,7 @@ void TestSparseDMatrixLoadFile(Context const* ctx) {
1};
Page out;
for (auto const &page : m.GetBatches<Page>(ctx)) {
if (std::is_same<Page, SparsePage>::value) {
if (std::is_same_v<Page, SparsePage>) {
out.Push(page);
} else {
out.PushCSC(page);
@ -89,7 +89,7 @@ void TestRetainPage() {
for (auto it = begin; it != end; ++it) {
iterators.push_back(it.Page());
pages.emplace_back(Page{});
if (std::is_same<Page, SparsePage>::value) {
if (std::is_same_v<Page, SparsePage>) {
pages.back().Push(*it);
} else {
pages.back().PushCSC(*it);
@ -105,7 +105,7 @@ void TestRetainPage() {
// make sure it's const and the caller can not modify the content of page.
for (auto &page : p_fmat->GetBatches<Page>({&ctx})) {
static_assert(std::is_const<std::remove_reference_t<decltype(page)>>::value);
static_assert(std::is_const_v<std::remove_reference_t<decltype(page)>>);
}
}

View File

@ -166,7 +166,7 @@ TEST(SparsePageDMatrix, RetainEllpackPage) {
// make sure it's const and the caller can not modify the content of page.
for (auto& page : m->GetBatches<EllpackPage>(&ctx, param)) {
static_assert(std::is_const<std::remove_reference_t<decltype(page)>>::value);
static_assert(std::is_const_v<std::remove_reference_t<decltype(page)>>);
}
// The above iteration clears out all references inside DMatrix.

View File

@ -62,7 +62,7 @@ void TestPartitioner(bst_target_t n_targets) {
auto ptr = gmat.cut.Ptrs()[split_ind + 1];
float split_value = gmat.cut.Values().at(ptr / 2);
RegTree tree{n_targets, n_features};
if constexpr (std::is_same<ExpandEntry, CPUExpandEntry>::value) {
if constexpr (std::is_same_v<ExpandEntry, CPUExpandEntry>) {
GetSplit(&tree, split_value, &candidates);
} else {
GetMultiSplitForTest(&tree, split_value, &candidates);
@ -119,7 +119,7 @@ void VerifyColumnSplitPartitioner(bst_target_t n_targets, size_t n_samples,
{
RegTree tree{n_targets, n_features};
CommonRowPartitioner partitioner{&ctx, n_samples, base_rowid, true};
if constexpr (std::is_same<ExpandEntry, CPUExpandEntry>::value) {
if constexpr (std::is_same_v<ExpandEntry, CPUExpandEntry>) {
GetSplit(&tree, min_value, &candidates);
} else {
GetMultiSplitForTest(&tree, min_value, &candidates);
@ -132,7 +132,7 @@ void VerifyColumnSplitPartitioner(bst_target_t n_targets, size_t n_samples,
{
RegTree tree{n_targets, n_features};
CommonRowPartitioner partitioner{&ctx, n_samples, base_rowid, true};
if constexpr (std::is_same<ExpandEntry, CPUExpandEntry>::value) {
if constexpr (std::is_same_v<ExpandEntry, CPUExpandEntry>) {
GetSplit(&tree, mid_value, &candidates);
} else {
GetMultiSplitForTest(&tree, mid_value, &candidates);
@ -187,7 +187,7 @@ void TestColumnSplitPartitioner(bst_target_t n_targets) {
auto ptr = gmat.cut.Ptrs()[split_ind + 1];
mid_value = gmat.cut.Values().at(ptr / 2);
RegTree tree{n_targets, n_features};
if constexpr (std::is_same<ExpandEntry, CPUExpandEntry>::value) {
if constexpr (std::is_same_v<ExpandEntry, CPUExpandEntry>) {
GetSplit(&tree, mid_value, &candidates);
} else {
GetMultiSplitForTest(&tree, mid_value, &candidates);