Restore clang tidy test. (#8861)
This commit is contained in:
parent
2dc22e7aad
commit
4d665b3fb0
@ -1,4 +1,4 @@
|
||||
Checks: 'modernize-*,-modernize-make-*,-modernize-use-auto,-modernize-raw-string-literal,-modernize-avoid-c-arrays,-modernize-use-trailing-return-type,google-*,-google-default-arguments,-clang-diagnostic-#pragma-messages,readability-identifier-naming'
|
||||
Checks: 'modernize-*,-modernize-use-nodiscard,-modernize-concat-nested-namespaces,-modernize-make-*,-modernize-use-auto,-modernize-raw-string-literal,-modernize-avoid-c-arrays,-modernize-use-trailing-return-type,google-*,-google-default-arguments,-clang-diagnostic-#pragma-messages,readability-identifier-naming'
|
||||
CheckOptions:
|
||||
- { key: readability-identifier-naming.ClassCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.StructCase, value: CamelCase }
|
||||
|
||||
@ -1,11 +1,11 @@
|
||||
/*!
|
||||
* Copyright 2019 XGBoost contributors
|
||||
/**
|
||||
* Copyright 2019-2023 by XGBoost contributors
|
||||
*/
|
||||
#ifndef XGBOOST_VERSION_CONFIG_H_
|
||||
#define XGBOOST_VERSION_CONFIG_H_
|
||||
|
||||
#define XGBOOST_VER_MAJOR @xgboost_VERSION_MAJOR@
|
||||
#define XGBOOST_VER_MINOR @xgboost_VERSION_MINOR@
|
||||
#define XGBOOST_VER_PATCH @xgboost_VERSION_PATCH@
|
||||
#define XGBOOST_VER_MAJOR @xgboost_VERSION_MAJOR@ /* NOLINT */
|
||||
#define XGBOOST_VER_MINOR @xgboost_VERSION_MINOR@ /* NOLINT */
|
||||
#define XGBOOST_VER_PATCH @xgboost_VERSION_PATCH@ /* NOLINT */
|
||||
|
||||
#endif // XGBOOST_VERSION_CONFIG_H_
|
||||
|
||||
@ -4,7 +4,7 @@
|
||||
#ifndef XGBOOST_CACHE_H_
|
||||
#define XGBOOST_CACHE_H_
|
||||
|
||||
#include <xgboost/logging.h> // CHECK_EQ
|
||||
#include <xgboost/logging.h> // for CHECK_EQ, CHECK
|
||||
|
||||
#include <cstddef> // for size_t
|
||||
#include <memory> // for weak_ptr, shared_ptr, make_shared
|
||||
@ -12,6 +12,7 @@
|
||||
#include <queue> // for queue
|
||||
#include <thread> // for thread
|
||||
#include <unordered_map> // for unordered_map
|
||||
#include <utility> // for move
|
||||
#include <vector> // for vector
|
||||
|
||||
namespace xgboost {
|
||||
@ -32,6 +33,8 @@ class DMatrixCache {
|
||||
|
||||
CacheT const& Value() const { return *value; }
|
||||
CacheT& Value() { return *value; }
|
||||
|
||||
Item(std::shared_ptr<DMatrix> m, std::shared_ptr<CacheT> v) : ref{m}, value{std::move(v)} {}
|
||||
};
|
||||
|
||||
static constexpr std::size_t DefaultSize() { return 32; }
|
||||
@ -141,7 +144,7 @@ class DMatrixCache {
|
||||
auto it = container_.find(key);
|
||||
if (it == container_.cend()) {
|
||||
// after the new DMatrix, cache size is at most max_size
|
||||
container_[key] = {m, std::make_shared<CacheT>(args...)};
|
||||
container_.emplace(key, Item{m, std::make_shared<CacheT>(args...)});
|
||||
queue_.emplace(key);
|
||||
}
|
||||
return container_.at(key).value;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/**
|
||||
* Copyright by XGBoost Contributors 2019-2023
|
||||
* Copyright 2019-2023 by XGBoost Contributors
|
||||
*/
|
||||
#ifndef XGBOOST_JSON_H_
|
||||
#define XGBOOST_JSON_H_
|
||||
@ -372,7 +372,7 @@ class Json {
|
||||
/*! \brief Use your own JsonWriter. */
|
||||
static void Dump(Json json, JsonWriter* writer);
|
||||
|
||||
Json() : ptr_{new JsonNull} {}
|
||||
Json() = default;
|
||||
|
||||
// number
|
||||
explicit Json(JsonNumber number) : ptr_{new JsonNumber(std::move(number))} {}
|
||||
@ -462,7 +462,7 @@ class Json {
|
||||
IntrusivePtr<Value> const& Ptr() const { return ptr_; }
|
||||
|
||||
private:
|
||||
IntrusivePtr<Value> ptr_;
|
||||
IntrusivePtr<Value> ptr_{new JsonNull};
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@ -22,13 +22,13 @@ namespace detail {
|
||||
// static_cast and std::to_string.
|
||||
template <typename Char, std::enable_if_t<std::is_signed<Char>::value>* = nullptr>
|
||||
std::string CharToStr(Char c) {
|
||||
static_assert(std::is_same<Char, char>::value, "");
|
||||
static_assert(std::is_same<Char, char>::value);
|
||||
return std::string{c};
|
||||
}
|
||||
|
||||
template <typename Char, std::enable_if_t<!std::is_signed<Char>::value>* = nullptr>
|
||||
std::string CharToStr(Char c) {
|
||||
static_assert(std::is_same<Char, char>::value, "");
|
||||
static_assert(std::is_same<Char, char>::value);
|
||||
return (c <= static_cast<char>(127) ? std::string{c} : std::to_string(c));
|
||||
}
|
||||
} // namespace detail
|
||||
|
||||
@ -52,14 +52,14 @@ struct ArrayInterfaceHandler {
|
||||
|
||||
template <size_t dim, typename S, typename Head, size_t D>
|
||||
constexpr size_t Offset(S (&strides)[D], size_t n, Head head) {
|
||||
static_assert(dim < D, "");
|
||||
static_assert(dim < D);
|
||||
return n + head * strides[dim];
|
||||
}
|
||||
|
||||
template <size_t dim, typename S, size_t D, typename Head, typename... Tail>
|
||||
constexpr std::enable_if_t<sizeof...(Tail) != 0, size_t> Offset(S (&strides)[D], size_t n,
|
||||
Head head, Tail &&...rest) {
|
||||
static_assert(dim < D, "");
|
||||
static_assert(dim < D);
|
||||
return Offset<dim + 1>(strides, n + (head * strides[dim]), std::forward<Tail>(rest)...);
|
||||
}
|
||||
|
||||
@ -193,14 +193,14 @@ LINALG_HD auto UnravelImpl(I idx, common::Span<size_t const, D> shape) {
|
||||
|
||||
template <size_t dim, typename I, int32_t D>
|
||||
void ReshapeImpl(size_t (&out_shape)[D], I s) {
|
||||
static_assert(dim < D, "");
|
||||
static_assert(dim < D);
|
||||
out_shape[dim] = s;
|
||||
}
|
||||
|
||||
template <size_t dim, int32_t D, typename... S, typename I,
|
||||
std::enable_if_t<sizeof...(S) != 0> * = nullptr>
|
||||
void ReshapeImpl(size_t (&out_shape)[D], I &&s, S &&...rest) {
|
||||
static_assert(dim < D, "");
|
||||
static_assert(dim < D);
|
||||
out_shape[dim] = s;
|
||||
ReshapeImpl<dim + 1>(out_shape, std::forward<S>(rest)...);
|
||||
}
|
||||
@ -230,7 +230,8 @@ struct Conjunction : std::true_type {};
|
||||
template <class B1>
|
||||
struct Conjunction<B1> : B1 {};
|
||||
template <class B1, class... Bn>
|
||||
struct Conjunction<B1, Bn...> : std::conditional_t<bool(B1::value), Conjunction<Bn...>, B1> {};
|
||||
struct Conjunction<B1, Bn...>
|
||||
: std::conditional_t<static_cast<bool>(B1::value), Conjunction<Bn...>, B1> {};
|
||||
|
||||
template <typename... Index>
|
||||
using IsAllIntegral = Conjunction<std::is_integral<std::remove_reference_t<Index>>...>;
|
||||
@ -291,8 +292,8 @@ class TensorView {
|
||||
template <size_t old_dim, size_t new_dim, int32_t D, typename I>
|
||||
LINALG_HD size_t MakeSliceDim(size_t new_shape[D], size_t new_stride[D],
|
||||
detail::RangeTag<I> &&range) const {
|
||||
static_assert(new_dim < D, "");
|
||||
static_assert(old_dim < kDim, "");
|
||||
static_assert(new_dim < D);
|
||||
static_assert(old_dim < kDim);
|
||||
new_stride[new_dim] = stride_[old_dim];
|
||||
new_shape[new_dim] = range.Size();
|
||||
assert(static_cast<decltype(shape_[old_dim])>(range.end) <= shape_[old_dim]);
|
||||
@ -306,8 +307,8 @@ class TensorView {
|
||||
template <size_t old_dim, size_t new_dim, int32_t D, typename I, typename... S>
|
||||
LINALG_HD size_t MakeSliceDim(size_t new_shape[D], size_t new_stride[D],
|
||||
detail::RangeTag<I> &&range, S &&...slices) const {
|
||||
static_assert(new_dim < D, "");
|
||||
static_assert(old_dim < kDim, "");
|
||||
static_assert(new_dim < D);
|
||||
static_assert(old_dim < kDim);
|
||||
new_stride[new_dim] = stride_[old_dim];
|
||||
new_shape[new_dim] = range.Size();
|
||||
assert(static_cast<decltype(shape_[old_dim])>(range.end) <= shape_[old_dim]);
|
||||
@ -320,8 +321,8 @@ class TensorView {
|
||||
|
||||
template <size_t old_dim, size_t new_dim, int32_t D>
|
||||
LINALG_HD size_t MakeSliceDim(size_t new_shape[D], size_t new_stride[D], detail::AllTag) const {
|
||||
static_assert(new_dim < D, "");
|
||||
static_assert(old_dim < kDim, "");
|
||||
static_assert(new_dim < D);
|
||||
static_assert(old_dim < kDim);
|
||||
new_stride[new_dim] = stride_[old_dim];
|
||||
new_shape[new_dim] = shape_[old_dim];
|
||||
return 0;
|
||||
@ -332,8 +333,8 @@ class TensorView {
|
||||
template <size_t old_dim, size_t new_dim, int32_t D, typename... S>
|
||||
LINALG_HD size_t MakeSliceDim(size_t new_shape[D], size_t new_stride[D], detail::AllTag,
|
||||
S &&...slices) const {
|
||||
static_assert(new_dim < D, "");
|
||||
static_assert(old_dim < kDim, "");
|
||||
static_assert(new_dim < D);
|
||||
static_assert(old_dim < kDim);
|
||||
new_stride[new_dim] = stride_[old_dim];
|
||||
new_shape[new_dim] = shape_[old_dim];
|
||||
return MakeSliceDim<old_dim + 1, new_dim + 1, D>(new_shape, new_stride,
|
||||
@ -343,7 +344,7 @@ class TensorView {
|
||||
template <size_t old_dim, size_t new_dim, int32_t D, typename Index>
|
||||
LINALG_HD size_t MakeSliceDim(DMLC_ATTRIBUTE_UNUSED size_t new_shape[D],
|
||||
DMLC_ATTRIBUTE_UNUSED size_t new_stride[D], Index i) const {
|
||||
static_assert(old_dim < kDim, "");
|
||||
static_assert(old_dim < kDim);
|
||||
return stride_[old_dim] * i;
|
||||
}
|
||||
/**
|
||||
@ -352,7 +353,7 @@ class TensorView {
|
||||
template <size_t old_dim, size_t new_dim, int32_t D, typename Index, typename... S>
|
||||
LINALG_HD std::enable_if_t<std::is_integral<Index>::value, size_t> MakeSliceDim(
|
||||
size_t new_shape[D], size_t new_stride[D], Index i, S &&...slices) const {
|
||||
static_assert(old_dim < kDim, "");
|
||||
static_assert(old_dim < kDim);
|
||||
auto offset = stride_[old_dim] * i;
|
||||
auto res =
|
||||
MakeSliceDim<old_dim + 1, new_dim, D>(new_shape, new_stride, std::forward<S>(slices)...);
|
||||
@ -501,7 +502,7 @@ class TensorView {
|
||||
*/
|
||||
LINALG_HD bool CContiguous() const {
|
||||
StrideT stride;
|
||||
static_assert(std::is_same<decltype(stride), decltype(stride_)>::value, "");
|
||||
static_assert(std::is_same<decltype(stride), decltype(stride_)>::value);
|
||||
// It's contiguous if the stride can be calculated from shape.
|
||||
detail::CalcStride(shape_, stride);
|
||||
return common::Span<size_t const, kDim>{stride_} == common::Span<size_t const, kDim>{stride};
|
||||
@ -511,7 +512,7 @@ class TensorView {
|
||||
*/
|
||||
LINALG_HD bool FContiguous() const {
|
||||
StrideT stride;
|
||||
static_assert(std::is_same<decltype(stride), decltype(stride_)>::value, "");
|
||||
static_assert(std::is_same<decltype(stride), decltype(stride_)>::value);
|
||||
// It's contiguous if the stride can be calculated from shape.
|
||||
detail::CalcStride<kDim, true>(shape_, stride);
|
||||
return common::Span<size_t const, kDim>{stride_} == common::Span<size_t const, kDim>{stride};
|
||||
@ -625,7 +626,7 @@ Json ArrayInterface(TensorView<T const, D> const &t) {
|
||||
array_interface["version"] = 3;
|
||||
|
||||
char constexpr kT = detail::ArrayInterfaceHandler::TypeChar<T>();
|
||||
static_assert(kT != '\0', "");
|
||||
static_assert(kT != '\0');
|
||||
if (DMLC_LITTLE_ENDIAN) {
|
||||
array_interface["typestr"] = String{"<" + (kT + std::to_string(sizeof(T)))};
|
||||
} else {
|
||||
|
||||
@ -28,7 +28,7 @@ struct Context;
|
||||
*/
|
||||
class Metric : public Configurable {
|
||||
protected:
|
||||
Context const* ctx_;
|
||||
Context const* ctx_{nullptr};
|
||||
|
||||
public:
|
||||
/*!
|
||||
|
||||
@ -1,11 +1,11 @@
|
||||
/*!
|
||||
* Copyright 2019 XGBoost contributors
|
||||
/**
|
||||
* Copyright 2019-2023 by XGBoost contributors
|
||||
*/
|
||||
#ifndef XGBOOST_VERSION_CONFIG_H_
|
||||
#define XGBOOST_VERSION_CONFIG_H_
|
||||
|
||||
#define XGBOOST_VER_MAJOR 2
|
||||
#define XGBOOST_VER_MINOR 0
|
||||
#define XGBOOST_VER_PATCH 0
|
||||
#define XGBOOST_VER_MAJOR 2 /* NOLINT */
|
||||
#define XGBOOST_VER_MINOR 0 /* NOLINT */
|
||||
#define XGBOOST_VER_PATCH 0 /* NOLINT */
|
||||
|
||||
#endif // XGBOOST_VERSION_CONFIG_H_
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*!
|
||||
* Copyright 2022 XGBoost contributors
|
||||
/**
|
||||
* Copyright 2022-2023 by XGBoost contributors
|
||||
*/
|
||||
#pragma once
|
||||
#include <string>
|
||||
@ -9,7 +9,7 @@
|
||||
namespace xgboost {
|
||||
namespace collective {
|
||||
|
||||
/*!
|
||||
/**
|
||||
* \brief Initialize the collective communicator.
|
||||
*
|
||||
* Currently the communicator API is experimental, function signatures may change in the future
|
||||
@ -210,7 +210,7 @@ inline void Allreduce(uint64_t *send_receive_buffer, size_t count) {
|
||||
template <Operation op, typename T,
|
||||
typename = std::enable_if_t<std::is_same<size_t, T>{} && !std::is_same<uint64_t, T>{}> >
|
||||
inline void Allreduce(T *send_receive_buffer, size_t count) {
|
||||
static_assert(sizeof(T) == sizeof(uint64_t), "");
|
||||
static_assert(sizeof(T) == sizeof(uint64_t));
|
||||
Communicator::Get()->AllReduce(send_receive_buffer, count, DataType::kUInt64, op);
|
||||
}
|
||||
|
||||
|
||||
@ -42,9 +42,9 @@ constexpr inline bst_cat_t OutOfRangeCat() {
|
||||
|
||||
inline XGBOOST_DEVICE bool InvalidCat(float cat) {
|
||||
constexpr auto kMaxCat = OutOfRangeCat();
|
||||
static_assert(static_cast<bst_cat_t>(static_cast<float>(kMaxCat)) == kMaxCat, "");
|
||||
static_assert(static_cast<bst_cat_t>(static_cast<float>(kMaxCat + 1)) != kMaxCat + 1, "");
|
||||
static_assert(static_cast<float>(kMaxCat + 1) == kMaxCat, "");
|
||||
static_assert(static_cast<bst_cat_t>(static_cast<float>(kMaxCat)) == kMaxCat);
|
||||
static_assert(static_cast<bst_cat_t>(static_cast<float>(kMaxCat + 1)) != kMaxCat + 1);
|
||||
static_assert(static_cast<float>(kMaxCat + 1) == kMaxCat);
|
||||
return cat < 0 || cat >= kMaxCat;
|
||||
}
|
||||
|
||||
|
||||
@ -270,7 +270,9 @@ struct RyuPowLogUtils {
|
||||
*/
|
||||
static uint32_t MulPow5InvDivPow2(const uint32_t m, const uint32_t q,
|
||||
const int32_t j) noexcept(true) {
|
||||
return MulShift(m, kFloatPow5InvSplit[q], j);
|
||||
static_assert(sizeof(kFloatPow5InvSplit) == 55 * sizeof(std::uint64_t));
|
||||
assert(q < 55);
|
||||
return MulShift(m, kFloatPow5InvSplit[q], j); // NOLINT
|
||||
}
|
||||
|
||||
/*
|
||||
@ -495,12 +497,10 @@ class PowerBaseComputer {
|
||||
static_cast<int32_t>(IEEE754::kFloatBias) -
|
||||
static_cast<int32_t>(IEEE754::kFloatMantissaBits) -
|
||||
static_cast<int32_t>(2);
|
||||
static_assert(static_cast<int32_t>(1) -
|
||||
static_cast<int32_t>(IEEE754::kFloatBias) -
|
||||
static_assert(static_cast<int32_t>(1) - static_cast<int32_t>(IEEE754::kFloatBias) -
|
||||
static_cast<int32_t>(IEEE754::kFloatMantissaBits) -
|
||||
static_cast<int32_t>(2) ==
|
||||
-151,
|
||||
"");
|
||||
-151);
|
||||
mantissa_base2 = f.mantissa;
|
||||
} else {
|
||||
base2_range.exponent = static_cast<int32_t>(f.exponent) - IEEE754::kFloatBias -
|
||||
@ -544,7 +544,7 @@ class RyuPrinter {
|
||||
// Function precondition: v is not a 10-digit number.
|
||||
// (f2s: 9 digits are sufficient for round-tripping.)
|
||||
// (d2fixed: We print 9-digit blocks.)
|
||||
static_assert(100000000 == Tens(8), "");
|
||||
static_assert(100000000 == Tens(8));
|
||||
assert(v < Tens(9));
|
||||
if (v >= Tens(8)) {
|
||||
return 9;
|
||||
@ -911,7 +911,7 @@ from_chars_result FromCharFloatImpl(const char *buffer, const int len,
|
||||
// the bias and also special-case the value 0.
|
||||
int32_t shift = (f_e2 == 0 ? 1 : f_e2) - exp_b2 - IEEE754::kFloatBias -
|
||||
IEEE754::kFloatMantissaBits;
|
||||
assert(shift >= 0);
|
||||
assert(shift >= 1);
|
||||
|
||||
// We need to round up if the exact value is more than 0.5 above the value we
|
||||
// computed. That's equivalent to checking if the last removed bit was 1 and
|
||||
@ -920,7 +920,7 @@ from_chars_result FromCharFloatImpl(const char *buffer, const int len,
|
||||
//
|
||||
// We need to update trailingZeros given that we have the exact output
|
||||
// exponent ieee_e2 now.
|
||||
trailing_zeros &= (mantissa_b2 & ((1u << (shift - 1)) - 1)) == 0;
|
||||
trailing_zeros &= (mantissa_b2 & ((1u << (shift - 1)) - 1)) == 0; // NOLINT
|
||||
uint32_t lastRemovedBit = (mantissa_b2 >> (shift - 1)) & 1;
|
||||
bool roundup = (lastRemovedBit != 0) &&
|
||||
(!trailing_zeros || (((mantissa_b2 >> shift) & 1) != 0));
|
||||
|
||||
@ -87,7 +87,7 @@ inline to_chars_result to_chars(char *first, char *last, int64_t value) { // NOL
|
||||
if (value < 0) {
|
||||
*first = '-';
|
||||
std::advance(first, 1);
|
||||
unsigned_value = uint64_t(~value) + uint64_t(1);
|
||||
unsigned_value = static_cast<uint64_t>(~value) + static_cast<uint64_t>(1);
|
||||
}
|
||||
return detail::ToCharsUnsignedImpl(first, last, unsigned_value);
|
||||
}
|
||||
|
||||
@ -1,12 +1,13 @@
|
||||
/*!
|
||||
* Copyright 2017 by Contributors
|
||||
/**
|
||||
* Copyright 2017-2023 by XGBoost Contributors
|
||||
* \file compressed_iterator.h
|
||||
*/
|
||||
#pragma once
|
||||
#include <xgboost/base.h>
|
||||
#include <cmath>
|
||||
#include <cstddef>
|
||||
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <cstddef> // for size_t
|
||||
|
||||
#include "common.h"
|
||||
|
||||
@ -36,7 +37,7 @@ static const int kPadding = 4; // Assign padding so we can read slightly off
|
||||
// The number of bits required to represent a given unsigned range
|
||||
inline XGBOOST_DEVICE size_t SymbolBits(size_t num_symbols) {
|
||||
auto bits = std::ceil(log2(static_cast<double>(num_symbols)));
|
||||
return common::Max(static_cast<size_t>(bits), size_t(1));
|
||||
return common::Max(static_cast<size_t>(bits), static_cast<std::size_t>(1));
|
||||
}
|
||||
} // namespace detail
|
||||
|
||||
|
||||
@ -20,6 +20,7 @@
|
||||
|
||||
#include <algorithm>
|
||||
#include <chrono>
|
||||
#include <cstddef> // for size_t
|
||||
#include <cub/cub.cuh>
|
||||
#include <cub/util_allocator.cuh>
|
||||
#include <numeric>
|
||||
@ -178,7 +179,7 @@ inline size_t MaxSharedMemory(int device_idx) {
|
||||
dh::safe_cuda(cudaDeviceGetAttribute
|
||||
(&max_shared_memory, cudaDevAttrMaxSharedMemoryPerBlock,
|
||||
device_idx));
|
||||
return size_t(max_shared_memory);
|
||||
return static_cast<std::size_t>(max_shared_memory);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -195,7 +196,7 @@ inline size_t MaxSharedMemoryOptin(int device_idx) {
|
||||
dh::safe_cuda(cudaDeviceGetAttribute
|
||||
(&max_shared_memory, cudaDevAttrMaxSharedMemoryPerBlockOptin,
|
||||
device_idx));
|
||||
return size_t(max_shared_memory);
|
||||
return static_cast<std::size_t>(max_shared_memory);
|
||||
}
|
||||
|
||||
inline void CheckComputeCapability() {
|
||||
|
||||
@ -1,33 +1,31 @@
|
||||
/*!
|
||||
* Copyright 2018~2020 XGBoost contributors
|
||||
/**
|
||||
* Copyright 2018~2023 by XGBoost contributors
|
||||
*/
|
||||
|
||||
#include <xgboost/logging.h>
|
||||
|
||||
#include <thrust/binary_search.h>
|
||||
#include <thrust/copy.h>
|
||||
#include <thrust/execution_policy.h>
|
||||
#include <thrust/functional.h>
|
||||
#include <thrust/iterator/counting_iterator.h>
|
||||
#include <thrust/iterator/transform_iterator.h>
|
||||
#include <thrust/iterator/discard_iterator.h>
|
||||
#include <thrust/iterator/transform_iterator.h>
|
||||
#include <thrust/reduce.h>
|
||||
#include <thrust/sort.h>
|
||||
#include <thrust/binary_search.h>
|
||||
#include <thrust/execution_policy.h>
|
||||
#include <xgboost/logging.h>
|
||||
|
||||
#include <cstddef> // for size_t
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "categorical.h"
|
||||
#include "device_helpers.cuh"
|
||||
#include "hist_util.h"
|
||||
#include "hist_util.cuh"
|
||||
#include "hist_util.h"
|
||||
#include "math.h" // NOLINT
|
||||
#include "quantile.h"
|
||||
#include "categorical.h"
|
||||
#include "xgboost/host_device_vector.h"
|
||||
|
||||
|
||||
namespace xgboost {
|
||||
namespace common {
|
||||
|
||||
@ -318,7 +316,7 @@ HistogramCuts DeviceSketch(int device, DMatrix* dmat, int max_bins,
|
||||
size_t batch_nnz = batch.data.Size();
|
||||
auto const& info = dmat->Info();
|
||||
for (auto begin = 0ull; begin < batch_nnz; begin += sketch_batch_num_elements) {
|
||||
size_t end = std::min(batch_nnz, size_t(begin + sketch_batch_num_elements));
|
||||
size_t end = std::min(batch_nnz, static_cast<std::size_t>(begin + sketch_batch_num_elements));
|
||||
if (has_weights) {
|
||||
bool is_ranking = HostSketchContainer::UseGroup(dmat->Info());
|
||||
dh::caching_device_vector<uint32_t> groups(info.group_ptr_.cbegin(),
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*!
|
||||
* Copyright 2020 XGBoost contributors
|
||||
/**
|
||||
* Copyright 2020-2023 by XGBoost contributors
|
||||
*
|
||||
* \brief Front end and utilities for GPU based sketching. Works on sliding window
|
||||
* instead of stream.
|
||||
@ -9,11 +9,13 @@
|
||||
|
||||
#include <thrust/host_vector.h>
|
||||
|
||||
#include <cstddef> // for size_t
|
||||
|
||||
#include "../data/device_adapter.cuh"
|
||||
#include "device_helpers.cuh"
|
||||
#include "hist_util.h"
|
||||
#include "quantile.cuh"
|
||||
#include "device_helpers.cuh"
|
||||
#include "timer.h"
|
||||
#include "../data/device_adapter.cuh"
|
||||
|
||||
namespace xgboost {
|
||||
namespace common {
|
||||
@ -304,7 +306,8 @@ void AdapterDeviceSketch(Batch batch, int num_bins,
|
||||
num_rows, num_cols, std::numeric_limits<size_t>::max(),
|
||||
device, num_cuts_per_feature, true);
|
||||
for (auto begin = 0ull; begin < batch.Size(); begin += sketch_batch_num_elements) {
|
||||
size_t end = std::min(batch.Size(), size_t(begin + sketch_batch_num_elements));
|
||||
size_t end =
|
||||
std::min(batch.Size(), static_cast<std::size_t>(begin + sketch_batch_num_elements));
|
||||
ProcessWeightedSlidingWindow(batch, info,
|
||||
num_cuts_per_feature,
|
||||
HostSketchContainer::UseGroup(info), missing, device, num_cols, begin, end,
|
||||
@ -316,7 +319,8 @@ void AdapterDeviceSketch(Batch batch, int num_bins,
|
||||
num_rows, num_cols, std::numeric_limits<size_t>::max(),
|
||||
device, num_cuts_per_feature, false);
|
||||
for (auto begin = 0ull; begin < batch.Size(); begin += sketch_batch_num_elements) {
|
||||
size_t end = std::min(batch.Size(), size_t(begin + sketch_batch_num_elements));
|
||||
size_t end =
|
||||
std::min(batch.Size(), static_cast<std::size_t>(begin + sketch_batch_num_elements));
|
||||
ProcessSlidingWindow(batch, info, device, num_cols, begin, end, missing,
|
||||
sketch_container, num_cuts_per_feature);
|
||||
}
|
||||
|
||||
@ -50,7 +50,7 @@ size_t PeekableInStream::PeekRead(void* dptr, size_t size) {
|
||||
}
|
||||
}
|
||||
|
||||
FixedSizeStream::FixedSizeStream(PeekableInStream* stream) : PeekableInStream(stream), pointer_{0} {
|
||||
FixedSizeStream::FixedSizeStream(PeekableInStream* stream) : PeekableInStream(stream) {
|
||||
size_t constexpr kInitialSize = 4096;
|
||||
size_t size{kInitialSize}, total{0};
|
||||
buffer_.clear();
|
||||
|
||||
@ -27,8 +27,7 @@ using MemoryBufferStream = rabit::utils::MemoryBufferStream;
|
||||
*/
|
||||
class PeekableInStream : public dmlc::Stream {
|
||||
public:
|
||||
explicit PeekableInStream(dmlc::Stream* strm)
|
||||
: strm_(strm), buffer_ptr_(0) {}
|
||||
explicit PeekableInStream(dmlc::Stream* strm) : strm_(strm) {}
|
||||
|
||||
size_t Read(void* dptr, size_t size) override;
|
||||
virtual size_t PeekRead(void* dptr, size_t size);
|
||||
@ -41,7 +40,7 @@ class PeekableInStream : public dmlc::Stream {
|
||||
/*! \brief input stream */
|
||||
dmlc::Stream *strm_;
|
||||
/*! \brief current buffer pointer */
|
||||
size_t buffer_ptr_;
|
||||
size_t buffer_ptr_{0};
|
||||
/*! \brief internal buffer */
|
||||
std::string buffer_;
|
||||
};
|
||||
@ -72,7 +71,7 @@ class FixedSizeStream : public PeekableInStream {
|
||||
void Take(std::string* out);
|
||||
|
||||
private:
|
||||
size_t pointer_;
|
||||
size_t pointer_{0};
|
||||
std::string buffer_;
|
||||
};
|
||||
|
||||
|
||||
@ -710,10 +710,10 @@ void Json::Dump(Json json, JsonWriter* writer) {
|
||||
writer->Save(json);
|
||||
}
|
||||
|
||||
static_assert(std::is_nothrow_move_constructible<Json>::value, "");
|
||||
static_assert(std::is_nothrow_move_constructible<Object>::value, "");
|
||||
static_assert(std::is_nothrow_move_constructible<Array>::value, "");
|
||||
static_assert(std::is_nothrow_move_constructible<String>::value, "");
|
||||
static_assert(std::is_nothrow_move_constructible<Json>::value);
|
||||
static_assert(std::is_nothrow_move_constructible<Object>::value);
|
||||
static_assert(std::is_nothrow_move_constructible<Array>::value);
|
||||
static_assert(std::is_nothrow_move_constructible<String>::value);
|
||||
|
||||
Json UBJReader::ParseArray() {
|
||||
auto marker = PeekNextChar();
|
||||
|
||||
@ -14,7 +14,7 @@ double Reduce(Context const* ctx, HostDeviceVector<float> const& values) {
|
||||
if (ctx->IsCPU()) {
|
||||
auto const& h_values = values.ConstHostVector();
|
||||
auto result = cpu_impl::Reduce(ctx, h_values.cbegin(), h_values.cend(), 0.0);
|
||||
static_assert(std::is_same<decltype(result), double>::value, "");
|
||||
static_assert(std::is_same<decltype(result), double>::value);
|
||||
return result;
|
||||
}
|
||||
return cuda_impl::Reduce(ctx, values);
|
||||
|
||||
@ -42,8 +42,8 @@ void RunLengthEncode(Iter begin, Iter end, std::vector<Idx>* p_out) {
|
||||
*/
|
||||
template <typename InIt, typename OutIt, typename T>
|
||||
void PartialSum(int32_t n_threads, InIt begin, InIt end, T init, OutIt out_it) {
|
||||
static_assert(std::is_same<T, typename std::iterator_traits<InIt>::value_type>::value, "");
|
||||
static_assert(std::is_same<T, typename std::iterator_traits<OutIt>::value_type>::value, "");
|
||||
static_assert(std::is_same<T, typename std::iterator_traits<InIt>::value_type>::value);
|
||||
static_assert(std::is_same<T, typename std::iterator_traits<OutIt>::value_type>::value);
|
||||
// The number of threads is pegged to the batch size. If the OMP block is parallelized
|
||||
// on anything other than the batch/block size, it should be reassigned
|
||||
auto n = static_cast<size_t>(std::distance(begin, end));
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*!
|
||||
* Copyright 2020-2022 by XGBoost Contributors
|
||||
/**
|
||||
* Copyright 2020-2023 by XGBoost Contributors
|
||||
*/
|
||||
#include <thrust/binary_search.h>
|
||||
#include <thrust/execution_policy.h>
|
||||
@ -109,7 +109,7 @@ void PruneImpl(common::Span<SketchContainer::OffsetT const> cuts_ptr,
|
||||
template <typename T, typename U>
|
||||
void CopyTo(Span<T> out, Span<U> src) {
|
||||
CHECK_EQ(out.size(), src.size());
|
||||
static_assert(std::is_same<std::remove_cv_t<T>, std::remove_cv_t<T>>::value, "");
|
||||
static_assert(std::is_same<std::remove_cv_t<T>, std::remove_cv_t<T>>::value);
|
||||
dh::safe_cuda(cudaMemcpyAsync(out.data(), src.data(),
|
||||
out.size_bytes(),
|
||||
cudaMemcpyDefault));
|
||||
@ -143,7 +143,7 @@ common::Span<thrust::tuple<uint64_t, uint64_t>> MergePath(
|
||||
thrust::make_zip_iterator(thrust::make_tuple(b_ind_iter, place_holder));
|
||||
|
||||
dh::XGBCachingDeviceAllocator<Tuple> alloc;
|
||||
static_assert(sizeof(Tuple) == sizeof(SketchEntry), "");
|
||||
static_assert(sizeof(Tuple) == sizeof(SketchEntry));
|
||||
// We reuse the memory for storing merge path.
|
||||
common::Span<Tuple> merge_path{reinterpret_cast<Tuple *>(out.data()), out.size()};
|
||||
// Determine the merge path, 0 if element is from x, 1 if it's from y.
|
||||
|
||||
@ -77,14 +77,14 @@ class RowSetCollection {
|
||||
if (row_indices_.empty()) { // edge case: empty instance set
|
||||
constexpr size_t* kBegin = nullptr;
|
||||
constexpr size_t* kEnd = nullptr;
|
||||
static_assert(kEnd - kBegin == 0, "");
|
||||
elem_of_each_node_.emplace_back(Elem(kBegin, kEnd, 0));
|
||||
static_assert(kEnd - kBegin == 0);
|
||||
elem_of_each_node_.emplace_back(kBegin, kEnd, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
const size_t* begin = dmlc::BeginPtr(row_indices_);
|
||||
const size_t* end = dmlc::BeginPtr(row_indices_) + row_indices_.size();
|
||||
elem_of_each_node_.emplace_back(Elem(begin, end, 0));
|
||||
elem_of_each_node_.emplace_back(begin, end, 0);
|
||||
}
|
||||
|
||||
std::vector<size_t>* Data() { return &row_indices_; }
|
||||
|
||||
@ -49,7 +49,7 @@ float Quantile(Context const* ctx, double alpha, Iter const& begin, Iter const&
|
||||
}
|
||||
|
||||
auto val = [&](size_t i) { return *(begin + sorted_idx[i]); };
|
||||
static_assert(std::is_same<decltype(val(0)), float>::value, "");
|
||||
static_assert(std::is_same<decltype(val(0)), float>::value);
|
||||
|
||||
if (alpha <= (1 / (n + 1))) {
|
||||
return val(0);
|
||||
|
||||
@ -1,12 +1,14 @@
|
||||
/*!
|
||||
* Copyright (c) 2019 by Contributors
|
||||
/**
|
||||
* Copyright 2019-2023 by XGBoost Contributors
|
||||
* \file device_adapter.cuh
|
||||
*/
|
||||
#ifndef XGBOOST_DATA_DEVICE_ADAPTER_H_
|
||||
#define XGBOOST_DATA_DEVICE_ADAPTER_H_
|
||||
#include <cstddef> // for size_t
|
||||
#include <limits>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "../common/device_helpers.cuh"
|
||||
#include "../common/math.h"
|
||||
#include "adapter.h"
|
||||
@ -205,10 +207,10 @@ size_t GetRowCounts(const AdapterBatchT batch, common::Span<size_t> offset,
|
||||
}
|
||||
});
|
||||
dh::XGBCachingDeviceAllocator<char> alloc;
|
||||
size_t row_stride = dh::Reduce(
|
||||
thrust::cuda::par(alloc), thrust::device_pointer_cast(offset.data()),
|
||||
thrust::device_pointer_cast(offset.data()) + offset.size(), size_t(0),
|
||||
thrust::maximum<size_t>());
|
||||
size_t row_stride =
|
||||
dh::Reduce(thrust::cuda::par(alloc), thrust::device_pointer_cast(offset.data()),
|
||||
thrust::device_pointer_cast(offset.data()) + offset.size(),
|
||||
static_cast<std::size_t>(0), thrust::maximum<size_t>());
|
||||
return row_stride;
|
||||
}
|
||||
}; // namespace data
|
||||
|
||||
@ -75,10 +75,7 @@ class GBLinear : public GradientBooster {
|
||||
: GradientBooster{ctx},
|
||||
learner_model_param_{learner_model_param},
|
||||
model_{learner_model_param},
|
||||
previous_model_{learner_model_param},
|
||||
sum_instance_weight_(0),
|
||||
sum_weight_complete_(false),
|
||||
is_converged_(false) {}
|
||||
previous_model_{learner_model_param} {}
|
||||
|
||||
void Configure(const Args& cfg) override {
|
||||
if (model_.weight.size() == 0) {
|
||||
@ -344,10 +341,10 @@ class GBLinear : public GradientBooster {
|
||||
GBLinearModel previous_model_;
|
||||
GBLinearTrainParam param_;
|
||||
std::unique_ptr<LinearUpdater> updater_;
|
||||
double sum_instance_weight_;
|
||||
bool sum_weight_complete_;
|
||||
double sum_instance_weight_{};
|
||||
bool sum_weight_complete_{false};
|
||||
common::Monitor monitor_;
|
||||
bool is_converged_;
|
||||
bool is_converged_{false};
|
||||
};
|
||||
|
||||
// register the objective functions
|
||||
|
||||
@ -47,12 +47,12 @@ class GBLinearModel : public Model {
|
||||
DeprecatedGBLinearModelParam param_;
|
||||
|
||||
public:
|
||||
int32_t num_boosted_rounds;
|
||||
int32_t num_boosted_rounds{0};
|
||||
LearnerModelParam const* learner_model_param;
|
||||
|
||||
public:
|
||||
explicit GBLinearModel(LearnerModelParam const* learner_model_param) :
|
||||
num_boosted_rounds{0}, learner_model_param {learner_model_param} {}
|
||||
explicit GBLinearModel(LearnerModelParam const *learner_model_param)
|
||||
: learner_model_param{learner_model_param} {}
|
||||
void Configure(Args const &) { }
|
||||
|
||||
// weight for each of feature, bias is the last one
|
||||
|
||||
@ -97,7 +97,7 @@ class EvaluateSplitAgent {
|
||||
idx += kBlockSize) {
|
||||
local_sum += LoadGpair(node_histogram + idx);
|
||||
}
|
||||
local_sum = SumReduceT(temp_storage->sum_reduce).Sum(local_sum);
|
||||
local_sum = SumReduceT(temp_storage->sum_reduce).Sum(local_sum); // NOLINT
|
||||
// Broadcast result from thread 0
|
||||
return {__shfl_sync(0xffffffff, local_sum.GetQuantisedGrad(), 0),
|
||||
__shfl_sync(0xffffffff, local_sum.GetQuantisedHess(), 0)};
|
||||
|
||||
@ -1,15 +1,15 @@
|
||||
/*!
|
||||
* Copyright 2020-2021 by XGBoost Contributors
|
||||
/**
|
||||
* Copyright 2020-2023 by XGBoost Contributors
|
||||
*/
|
||||
#include <thrust/iterator/transform_iterator.h>
|
||||
#include <thrust/reduce.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <ctgmath>
|
||||
#include <cstdint> // uint32_t
|
||||
#include <limits>
|
||||
|
||||
#include "../../common/device_helpers.cuh"
|
||||
#include "../../common/deterministic.cuh"
|
||||
#include "../../common/device_helpers.cuh"
|
||||
#include "../../data/ellpack_page.cuh"
|
||||
#include "histogram.cuh"
|
||||
#include "row_partitioner.cuh"
|
||||
@ -83,7 +83,8 @@ GradientQuantiser::GradientQuantiser(common::Span<GradientPair const> gpair) {
|
||||
*/
|
||||
to_floating_point_ =
|
||||
histogram_rounding /
|
||||
T(IntT(1) << (sizeof(typename GradientSumT::ValueT) * 8 - 2)); // keep 1 for sign bit
|
||||
static_cast<T>(static_cast<IntT>(1)
|
||||
<< (sizeof(typename GradientSumT::ValueT) * 8 - 2)); // keep 1 for sign bit
|
||||
/**
|
||||
* Factor for converting gradients from floating-point to fixed-point. For
|
||||
* f64:
|
||||
@ -93,8 +94,8 @@ GradientQuantiser::GradientQuantiser(common::Span<GradientPair const> gpair) {
|
||||
* rounding is calcuated as exp(m), see the rounding factor calcuation for
|
||||
* details.
|
||||
*/
|
||||
to_fixed_point_ =
|
||||
GradientSumT(T(1) / to_floating_point_.GetGrad(), T(1) / to_floating_point_.GetHess());
|
||||
to_fixed_point_ = GradientSumT(static_cast<T>(1) / to_floating_point_.GetGrad(),
|
||||
static_cast<T>(1) / to_floating_point_.GetHess());
|
||||
}
|
||||
|
||||
|
||||
@ -153,7 +154,8 @@ class HistogramAgent {
|
||||
d_gpair_(d_gpair) {}
|
||||
__device__ void ProcessPartialTileShared(std::size_t offset) {
|
||||
for (std::size_t idx = offset + threadIdx.x;
|
||||
idx < min(offset + kBlockThreads * kItemsPerTile, n_elements_); idx += kBlockThreads) {
|
||||
idx < std::min(offset + kBlockThreads * kItemsPerTile, n_elements_);
|
||||
idx += kBlockThreads) {
|
||||
int ridx = d_ridx_[idx / feature_stride_];
|
||||
int gidx =
|
||||
matrix_
|
||||
@ -295,9 +297,8 @@ void BuildGradientHistogram(CUDAContext const* ctx, EllpackDeviceAccessor const&
|
||||
|
||||
// Allocate number of blocks such that each block has about kMinItemsPerBlock work
|
||||
// Up to a maximum where the device is saturated
|
||||
grid_size =
|
||||
min(grid_size,
|
||||
unsigned(common::DivRoundUp(items_per_group, kMinItemsPerBlock)));
|
||||
grid_size = std::min(grid_size, static_cast<std::uint32_t>(
|
||||
common::DivRoundUp(items_per_group, kMinItemsPerBlock)));
|
||||
|
||||
dh::LaunchKernel {dim3(grid_size, num_groups), static_cast<uint32_t>(kBlockThreads), smem_size,
|
||||
ctx->Stream()} (kernel, matrix, feature_groups, d_ridx, histogram.data(),
|
||||
|
||||
@ -130,7 +130,7 @@ void SortPositionBatch(common::Span<const PerNodeData<OpDataT>> d_batch_info,
|
||||
std::size_t item_idx;
|
||||
AssignBatch(batch_info_itr, idx, &batch_idx, &item_idx);
|
||||
auto op_res = op(ridx[item_idx], batch_info_itr[batch_idx].data);
|
||||
return IndexFlagTuple{bst_uint(item_idx), op_res, batch_idx, op_res};
|
||||
return IndexFlagTuple{static_cast<bst_uint>(item_idx), op_res, batch_idx, op_res};
|
||||
});
|
||||
size_t temp_bytes = 0;
|
||||
if (tmp->empty()) {
|
||||
|
||||
@ -1,10 +1,11 @@
|
||||
/*!
|
||||
* Copyright 2021-2022 by XGBoost Contributors
|
||||
/**
|
||||
* Copyright 2021-2023 by XGBoost Contributors
|
||||
*/
|
||||
#ifndef XGBOOST_TREE_HIST_EVALUATE_SPLITS_H_
|
||||
#define XGBOOST_TREE_HIST_EVALUATE_SPLITS_H_
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstddef> // for size_t
|
||||
#include <limits>
|
||||
#include <memory>
|
||||
#include <numeric>
|
||||
@ -367,7 +368,7 @@ class HistEvaluator {
|
||||
std::copy_n(entries.cbegin(), num_entries, buffer.begin() + num_entries * rank);
|
||||
collective::Allgather(buffer.data(), buffer.size() * sizeof(ExpandEntry));
|
||||
for (auto worker = 0; worker < world; ++worker) {
|
||||
for (auto nidx_in_set = 0; nidx_in_set < entries.size(); ++nidx_in_set) {
|
||||
for (std::size_t nidx_in_set = 0; nidx_in_set < entries.size(); ++nidx_in_set) {
|
||||
entries[nidx_in_set].split.Update(buffer[worker * num_entries + nidx_in_set].split);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*!
|
||||
* Copyright 2014-2021 by Contributors
|
||||
/**
|
||||
* Copyright 2014-2023 by XGBoost Contributors
|
||||
* \file param.h
|
||||
* \brief training parameters, statistics used to support tree construction.
|
||||
* \author Tianqi Chen
|
||||
@ -238,9 +238,8 @@ XGBOOST_DEVICE inline static T1 ThresholdL1(T1 w, T2 alpha) {
|
||||
|
||||
// calculate the cost of loss function
|
||||
template <typename TrainingParams, typename T>
|
||||
XGBOOST_DEVICE inline T CalcGainGivenWeight(const TrainingParams &p,
|
||||
T sum_grad, T sum_hess, T w) {
|
||||
return -(T(2.0) * sum_grad * w + (sum_hess + p.reg_lambda) * common::Sqr(w));
|
||||
XGBOOST_DEVICE inline T CalcGainGivenWeight(const TrainingParams &p, T sum_grad, T sum_hess, T w) {
|
||||
return -(static_cast<T>(2.0) * sum_grad * w + (sum_hess + p.reg_lambda) * common::Sqr(w));
|
||||
}
|
||||
|
||||
// calculate weight given the statistics
|
||||
@ -261,7 +260,7 @@ XGBOOST_DEVICE inline T CalcWeight(const TrainingParams &p, T sum_grad,
|
||||
template <typename TrainingParams, typename T>
|
||||
XGBOOST_DEVICE inline T CalcGain(const TrainingParams &p, T sum_grad, T sum_hess) {
|
||||
if (sum_hess < p.min_child_weight || sum_hess <= 0.0) {
|
||||
return T(0.0);
|
||||
return static_cast<T>(0.0);
|
||||
}
|
||||
if (p.max_delta_step == 0.0f) {
|
||||
if (p.reg_alpha == 0.0f) {
|
||||
|
||||
@ -1069,8 +1069,8 @@ bool LoadModelImpl(Json const& in, TreeParam* param, std::vector<RTreeNodeStat>*
|
||||
split_types = std::remove_reference_t<decltype(split_types)>(n_nodes);
|
||||
split_categories_segments = std::remove_reference_t<decltype(split_categories_segments)>(n_nodes);
|
||||
|
||||
static_assert(std::is_integral<decltype(GetElem<Integer>(lefts, 0))>::value, "");
|
||||
static_assert(std::is_floating_point<decltype(GetElem<Number>(loss_changes, 0))>::value, "");
|
||||
static_assert(std::is_integral<decltype(GetElem<Integer>(lefts, 0))>::value);
|
||||
static_assert(std::is_floating_point<decltype(GetElem<Number>(loss_changes, 0))>::value);
|
||||
CHECK_EQ(n_nodes, split_categories_segments.size());
|
||||
|
||||
// Set node
|
||||
|
||||
@ -160,11 +160,11 @@ class DeviceHistogramStorage {
|
||||
if (nidx_map_.find(nidx) != nidx_map_.cend()) {
|
||||
// Fetch from normal cache
|
||||
auto ptr = data_.data().get() + nidx_map_.at(nidx);
|
||||
return common::Span<GradientSumT>(reinterpret_cast<GradientSumT*>(ptr), n_bins_);
|
||||
return {reinterpret_cast<GradientSumT*>(ptr), static_cast<std::size_t>(n_bins_)};
|
||||
} else {
|
||||
// Fetch from overflow
|
||||
auto ptr = overflow_.data().get() + overflow_nidx_map_.at(nidx);
|
||||
return common::Span<GradientSumT>(reinterpret_cast<GradientSumT*>(ptr), n_bins_);
|
||||
return {reinterpret_cast<GradientSumT*>(ptr), static_cast<std::size_t>(n_bins_)};
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -334,8 +334,8 @@ struct GPUHistMakerDevice {
|
||||
}
|
||||
bst_feature_t max_active_features = 0;
|
||||
for (auto input : h_node_inputs) {
|
||||
max_active_features = std::max(max_active_features,
|
||||
bst_feature_t(input.feature_set.size()));
|
||||
max_active_features =
|
||||
std::max(max_active_features, static_cast<bst_feature_t>(input.feature_set.size()));
|
||||
}
|
||||
dh::safe_cuda(cudaMemcpyAsync(
|
||||
d_node_inputs.data().get(), h_node_inputs.data(),
|
||||
|
||||
@ -22,11 +22,11 @@ steps:
|
||||
queue: linux-amd64-cpu
|
||||
- wait
|
||||
#### -------- BUILD --------
|
||||
# - label: ":console: Run clang-tidy"
|
||||
# command: "tests/buildkite/run-clang-tidy.sh"
|
||||
# key: run-clang-tidy
|
||||
# agents:
|
||||
# queue: linux-amd64-cpu
|
||||
- label: ":console: Run clang-tidy"
|
||||
command: "tests/buildkite/run-clang-tidy.sh"
|
||||
key: run-clang-tidy
|
||||
agents:
|
||||
queue: linux-amd64-cpu
|
||||
- wait
|
||||
- label: ":console: Build CPU"
|
||||
command: "tests/buildkite/build-cpu.sh"
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
ARG CUDA_VERSION_ARG
|
||||
FROM nvidia/cuda:$CUDA_VERSION_ARG-devel-ubuntu18.04
|
||||
FROM nvidia/cuda:$CUDA_VERSION_ARG-devel-ubuntu20.04
|
||||
ARG CUDA_VERSION_ARG
|
||||
|
||||
# Environment
|
||||
@ -7,21 +7,21 @@ ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
# Install all basic requirements
|
||||
RUN \
|
||||
apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub && \
|
||||
apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/3bf863cc.pub && \
|
||||
apt-get update && \
|
||||
apt-get install -y tar unzip wget git build-essential python3 python3-pip software-properties-common \
|
||||
apt-transport-https ca-certificates gnupg-agent && \
|
||||
wget -nv -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - && \
|
||||
add-apt-repository -u 'deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-11 main' && \
|
||||
add-apt-repository -u 'deb http://apt.llvm.org/focal/ llvm-toolchain-focal-15 main' && \
|
||||
apt-get update && \
|
||||
apt-get install -y llvm-11 clang-tidy-11 clang-11 && \
|
||||
apt-get install -y llvm-15 clang-tidy-15 clang-15 libomp-15-dev && \
|
||||
wget -nv -nc https://cmake.org/files/v3.18/cmake-3.18.0-Linux-x86_64.sh --no-check-certificate && \
|
||||
bash cmake-3.18.0-Linux-x86_64.sh --skip-license --prefix=/usr
|
||||
|
||||
# Set default clang-tidy version
|
||||
RUN \
|
||||
update-alternatives --install /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-11 100 && \
|
||||
update-alternatives --install /usr/bin/clang clang /usr/bin/clang-11 100
|
||||
update-alternatives --install /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-15 100 && \
|
||||
update-alternatives --install /usr/bin/clang clang /usr/bin/clang-15 100
|
||||
|
||||
# Install Python packages
|
||||
RUN \
|
||||
|
||||
@ -109,6 +109,10 @@ class ClangTidy(object):
|
||||
continue
|
||||
elif components[i] == '-rdynamic':
|
||||
continue
|
||||
elif components[i] == "-Xfatbin=-compress-all":
|
||||
continue
|
||||
elif components[i] == "-forward-unknown-to-host-compiler":
|
||||
continue
|
||||
elif (components[i] == '-x' and
|
||||
components[i+1] == 'cu'):
|
||||
# -x cu -> -x cuda
|
||||
|
||||
@ -267,7 +267,7 @@ TEST(CAPI, DMatrixSetFeatureName) {
|
||||
}
|
||||
|
||||
char const* feat_types [] {"i", "q"};
|
||||
static_assert(sizeof(feat_types)/ sizeof(feat_types[0]) == kCols, "");
|
||||
static_assert(sizeof(feat_types) / sizeof(feat_types[0]) == kCols);
|
||||
XGDMatrixSetStrFeatureInfo(handle, "feature_type", feat_types, kCols);
|
||||
char const **c_out_types;
|
||||
XGDMatrixGetStrFeatureInfo(handle, u8"feature_type", &out_len,
|
||||
|
||||
@ -128,7 +128,7 @@ TEST(Ryu, Regression) {
|
||||
TestRyu("2E2", 200.0f);
|
||||
TestRyu("3.3554432E7", 3.3554432E7f);
|
||||
|
||||
static_assert(1.1920929E-7f == std::numeric_limits<float>::epsilon(), "");
|
||||
static_assert(1.1920929E-7f == std::numeric_limits<float>::epsilon());
|
||||
TestRyu("1.1920929E-7", std::numeric_limits<float>::epsilon());
|
||||
}
|
||||
|
||||
|
||||
@ -43,8 +43,8 @@ TEST(GroupData, ParallelGroupBuilder) {
|
||||
builder2.Push(2, Entry(0, 4), 0);
|
||||
builder2.Push(2, Entry(1, 5), 0);
|
||||
|
||||
expected_data.emplace_back(Entry(0, 4));
|
||||
expected_data.emplace_back(Entry(1, 5));
|
||||
expected_data.emplace_back(0, 4);
|
||||
expected_data.emplace_back(1, 5);
|
||||
expected_offsets.emplace_back(6);
|
||||
|
||||
EXPECT_EQ(data, expected_data);
|
||||
|
||||
@ -143,7 +143,7 @@ void TestMixedSketch() {
|
||||
size_t n_samples = 1000, n_features = 2, n_categories = 3;
|
||||
std::vector<float> data(n_samples * n_features);
|
||||
SimpleLCG gen;
|
||||
SimpleRealUniformDistribution<float> cat_d{0.0f, float(n_categories)};
|
||||
SimpleRealUniformDistribution<float> cat_d{0.0f, static_cast<float>(n_categories)};
|
||||
SimpleRealUniformDistribution<float> num_d{0.0f, 3.0f};
|
||||
for (size_t i = 0; i < n_samples * n_features; ++i) {
|
||||
if (i % 2 == 0) {
|
||||
|
||||
@ -13,9 +13,9 @@ class NotCopyConstructible {
|
||||
NotCopyConstructible(NotCopyConstructible&& that) = default;
|
||||
};
|
||||
static_assert(
|
||||
!std::is_trivially_copy_constructible<NotCopyConstructible>::value, "");
|
||||
!std::is_trivially_copy_constructible<NotCopyConstructible>::value);
|
||||
static_assert(
|
||||
!std::is_trivially_copy_assignable<NotCopyConstructible>::value, "");
|
||||
!std::is_trivially_copy_assignable<NotCopyConstructible>::value);
|
||||
|
||||
class ForIntrusivePtrTest {
|
||||
public:
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*!
|
||||
* Copyright 2021 by XGBoost Contributors
|
||||
/**
|
||||
* Copyright 2021-2023 by XGBoost Contributors
|
||||
*/
|
||||
#include <gtest/gtest.h>
|
||||
#include <xgboost/context.h>
|
||||
@ -108,7 +108,7 @@ TEST(Linalg, TensorView) {
|
||||
// for Slice.
|
||||
auto t = MakeTensorView(data, {2, 3, 4}, 0);
|
||||
auto s = t.Slice(1, 2, All());
|
||||
static_assert(decltype(s)::kDimension == 1, "");
|
||||
static_assert(decltype(s)::kDimension == 1);
|
||||
}
|
||||
{
|
||||
auto t = MakeTensorView(data, {2, 3, 4}, 0);
|
||||
@ -121,7 +121,7 @@ TEST(Linalg, TensorView) {
|
||||
// range slice
|
||||
auto t = MakeTensorView(data, {2, 3, 4}, 0);
|
||||
auto s = t.Slice(linalg::All(), linalg::Range(1, 3), 2);
|
||||
static_assert(decltype(s)::kDimension == 2, "");
|
||||
static_assert(decltype(s)::kDimension == 2);
|
||||
std::vector<double> sol{6, 10, 18, 22};
|
||||
auto k = 0;
|
||||
for (size_t i = 0; i < s.Shape(0); ++i) {
|
||||
@ -136,7 +136,7 @@ TEST(Linalg, TensorView) {
|
||||
// range slice
|
||||
auto t = MakeTensorView(data, {2, 3, 4}, 0);
|
||||
auto s = t.Slice(1, linalg::Range(1, 3), linalg::Range(1, 3));
|
||||
static_assert(decltype(s)::kDimension == 2, "");
|
||||
static_assert(decltype(s)::kDimension == 2);
|
||||
std::vector<double> sol{17, 18, 21, 22};
|
||||
auto k = 0;
|
||||
for (size_t i = 0; i < s.Shape(0); ++i) {
|
||||
@ -151,7 +151,7 @@ TEST(Linalg, TensorView) {
|
||||
// same as no slice.
|
||||
auto t = MakeTensorView(data, {2, 3, 4}, 0);
|
||||
auto s = t.Slice(linalg::All(), linalg::Range(0, 3), linalg::Range(0, 4));
|
||||
static_assert(decltype(s)::kDimension == 3, "");
|
||||
static_assert(decltype(s)::kDimension == 3);
|
||||
auto all = t.Slice(linalg::All(), linalg::All(), linalg::All());
|
||||
for (size_t i = 0; i < s.Shape(0); ++i) {
|
||||
for (size_t j = 0; j < s.Shape(1); ++j) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*!
|
||||
* Copyright 2021-2022 by XGBoost Contributors
|
||||
/**
|
||||
* Copyright 2021-2023 by XGBoost Contributors
|
||||
*/
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
@ -60,7 +60,7 @@ void TestSlice() {
|
||||
dh::LaunchN(1, [=] __device__(size_t) {
|
||||
auto s = t.Slice(linalg::All(), linalg::Range(0, 3), linalg::Range(0, 4));
|
||||
auto all = t.Slice(linalg::All(), linalg::All(), linalg::All());
|
||||
static_assert(decltype(s)::kDimension == 3, "");
|
||||
static_assert(decltype(s)::kDimension == 3);
|
||||
for (size_t i = 0; i < s.Shape(0); ++i) {
|
||||
for (size_t j = 0; j < s.Shape(1); ++j) {
|
||||
for (size_t k = 0; k < s.Shape(2); ++k) {
|
||||
|
||||
@ -522,9 +522,9 @@ TEST(Span, Empty) {
|
||||
TEST(SpanDeathTest, Empty) {
|
||||
std::vector<float> data(1, 0);
|
||||
ASSERT_TRUE(data.data());
|
||||
Span<float> s{data.data(), Span<float>::index_type(0)}; // ok to define 0 size span.
|
||||
// ok to define 0 size span.
|
||||
Span<float> s{data.data(), static_cast<Span<float>::index_type>(0)};
|
||||
EXPECT_DEATH(s[0], ""); // not ok to use it.
|
||||
}
|
||||
|
||||
} // namespace common
|
||||
} // namespace xgboost
|
||||
|
||||
@ -119,13 +119,13 @@ TEST(ArrayInterface, TrivialDim) {
|
||||
}
|
||||
|
||||
TEST(ArrayInterface, ToDType) {
|
||||
static_assert(ToDType<float>::kType == ArrayInterfaceHandler::kF4, "");
|
||||
static_assert(ToDType<double>::kType == ArrayInterfaceHandler::kF8, "");
|
||||
static_assert(ToDType<float>::kType == ArrayInterfaceHandler::kF4);
|
||||
static_assert(ToDType<double>::kType == ArrayInterfaceHandler::kF8);
|
||||
|
||||
static_assert(ToDType<uint32_t>::kType == ArrayInterfaceHandler::kU4, "");
|
||||
static_assert(ToDType<uint64_t>::kType == ArrayInterfaceHandler::kU8, "");
|
||||
static_assert(ToDType<uint32_t>::kType == ArrayInterfaceHandler::kU4);
|
||||
static_assert(ToDType<uint64_t>::kType == ArrayInterfaceHandler::kU8);
|
||||
|
||||
static_assert(ToDType<int32_t>::kType == ArrayInterfaceHandler::kI4, "");
|
||||
static_assert(ToDType<int64_t>::kType == ArrayInterfaceHandler::kI8, "");
|
||||
static_assert(ToDType<int32_t>::kType == ArrayInterfaceHandler::kI4);
|
||||
static_assert(ToDType<int64_t>::kType == ArrayInterfaceHandler::kI8);
|
||||
}
|
||||
} // namespace xgboost
|
||||
|
||||
@ -21,7 +21,7 @@ TEST(SparsePage, PushCSC) {
|
||||
|
||||
offset = {0, 1, 4};
|
||||
for (size_t i = 0; i < offset.back(); ++i) {
|
||||
data.emplace_back(Entry(i, 0.1f));
|
||||
data.emplace_back(i, 0.1f);
|
||||
}
|
||||
|
||||
SparsePage other;
|
||||
|
||||
@ -189,8 +189,8 @@ TEST(SimpleCSRSource, FromColumnarSparse) {
|
||||
auto& mask = column_bitfields[0];
|
||||
mask.resize(8);
|
||||
|
||||
for (size_t j = 0; j < mask.size(); ++j) {
|
||||
mask[j] = ~0;
|
||||
for (auto && j : mask) {
|
||||
j = ~0;
|
||||
}
|
||||
// the 2^th entry of first column is invalid
|
||||
// [0 0 0 0 0 1 0 0]
|
||||
@ -201,8 +201,8 @@ TEST(SimpleCSRSource, FromColumnarSparse) {
|
||||
auto& mask = column_bitfields[1];
|
||||
mask.resize(8);
|
||||
|
||||
for (size_t j = 0; j < mask.size(); ++j) {
|
||||
mask[j] = ~0;
|
||||
for (auto && j : mask) {
|
||||
j = ~0;
|
||||
}
|
||||
// the 19^th entry of second column is invalid
|
||||
// [~0~], [~0~], [0 0 0 0 1 0 0 0]
|
||||
|
||||
@ -96,7 +96,7 @@ void TestRetainPage() {
|
||||
|
||||
// make sure it's const and the caller can not modify the content of page.
|
||||
for (auto& page : m->GetBatches<Page>()) {
|
||||
static_assert(std::is_const<std::remove_reference_t<decltype(page)>>::value, "");
|
||||
static_assert(std::is_const<std::remove_reference_t<decltype(page)>>::value);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
// Copyright by Contributors
|
||||
|
||||
/**
|
||||
* Copyright 2019-2023 by XGBoost Contributors
|
||||
*/
|
||||
#include "../../../src/common/compressed_iterator.h"
|
||||
#include "../../../src/data/ellpack_page.cuh"
|
||||
#include "../../../src/data/sparse_page_dmatrix.h"
|
||||
@ -69,7 +70,7 @@ TEST(SparsePageDMatrix, RetainEllpackPage) {
|
||||
std::vector<std::shared_ptr<EllpackPage const>> iterators;
|
||||
for (auto it = begin; it != end; ++it) {
|
||||
iterators.push_back(it.Page());
|
||||
gidx_buffers.emplace_back(HostDeviceVector<common::CompressedByteT>{});
|
||||
gidx_buffers.emplace_back();
|
||||
gidx_buffers.back().Resize((*it).Impl()->gidx_buffer.Size());
|
||||
gidx_buffers.back().Copy((*it).Impl()->gidx_buffer);
|
||||
}
|
||||
@ -87,7 +88,7 @@ TEST(SparsePageDMatrix, RetainEllpackPage) {
|
||||
|
||||
// make sure it's const and the caller can not modify the content of page.
|
||||
for (auto& page : m->GetBatches<EllpackPage>({0, 32})) {
|
||||
static_assert(std::is_const<std::remove_reference_t<decltype(page)>>::value, "");
|
||||
static_assert(std::is_const<std::remove_reference_t<decltype(page)>>::value);
|
||||
}
|
||||
|
||||
// The above iteration clears out all references inside DMatrix.
|
||||
|
||||
@ -186,7 +186,7 @@ SimpleLCG::StateType SimpleLCG::operator()() {
|
||||
SimpleLCG::StateType SimpleLCG::Min() const { return min(); }
|
||||
SimpleLCG::StateType SimpleLCG::Max() const { return max(); }
|
||||
// Make sure it's compile time constant.
|
||||
static_assert(SimpleLCG::max() - SimpleLCG::min(), "");
|
||||
static_assert(SimpleLCG::max() - SimpleLCG::min());
|
||||
|
||||
void RandomDataGenerator::GenerateDense(HostDeviceVector<float> *out) const {
|
||||
xgboost::SimpleRealUniformDistribution<bst_float> dist(lower_, upper_);
|
||||
|
||||
@ -46,7 +46,7 @@ class GradientBooster;
|
||||
|
||||
template <typename Float>
|
||||
Float RelError(Float l, Float r) {
|
||||
static_assert(std::is_floating_point<Float>::value, "");
|
||||
static_assert(std::is_floating_point<Float>::value);
|
||||
return std::abs(1.0f - l / r);
|
||||
}
|
||||
|
||||
@ -164,7 +164,7 @@ class SimpleRealUniformDistribution {
|
||||
ResultT sum_value = 0, r_k = 1;
|
||||
|
||||
for (size_t k = m; k != 0; --k) {
|
||||
sum_value += ResultT((*rng)() - rng->Min()) * r_k;
|
||||
sum_value += static_cast<ResultT>((*rng)() - rng->Min()) * r_k;
|
||||
r_k *= r;
|
||||
}
|
||||
|
||||
@ -191,12 +191,10 @@ Json GetArrayInterface(HostDeviceVector<T> *storage, size_t rows, size_t cols) {
|
||||
Json array_interface{Object()};
|
||||
array_interface["data"] = std::vector<Json>(2);
|
||||
if (storage->DeviceCanRead()) {
|
||||
array_interface["data"][0] =
|
||||
Integer(reinterpret_cast<int64_t>(storage->ConstDevicePointer()));
|
||||
array_interface["data"][0] = Integer{reinterpret_cast<int64_t>(storage->ConstDevicePointer())};
|
||||
array_interface["stream"] = nullptr;
|
||||
} else {
|
||||
array_interface["data"][0] =
|
||||
Integer(reinterpret_cast<int64_t>(storage->ConstHostPointer()));
|
||||
array_interface["data"][0] = Integer{reinterpret_cast<int64_t>(storage->ConstHostPointer())};
|
||||
}
|
||||
array_interface["data"][1] = Boolean(false);
|
||||
|
||||
|
||||
@ -157,7 +157,7 @@ TEST(Objective, DeclareUnifiedTest(PoissonRegressionGPair)) {
|
||||
ObjFunction::Create("count:poisson", &ctx)
|
||||
};
|
||||
|
||||
args.emplace_back(std::make_pair("max_delta_step", "0.1f"));
|
||||
args.emplace_back("max_delta_step", "0.1f");
|
||||
obj->Configure(args);
|
||||
|
||||
CheckObjFunction(obj,
|
||||
@ -259,7 +259,7 @@ TEST(Objective, DeclareUnifiedTest(TweedieRegressionGPair)) {
|
||||
std::vector<std::pair<std::string, std::string>> args;
|
||||
std::unique_ptr<ObjFunction> obj{ObjFunction::Create("reg:tweedie", &ctx)};
|
||||
|
||||
args.emplace_back(std::make_pair("tweedie_variance_power", "1.1f"));
|
||||
args.emplace_back("tweedie_variance_power", "1.1f");
|
||||
obj->Configure(args);
|
||||
|
||||
CheckObjFunction(obj,
|
||||
|
||||
@ -1,3 +1,6 @@
|
||||
/**
|
||||
* Copyright 2019-2023 by XGBoost Contributors
|
||||
*/
|
||||
#include <gtest/gtest.h>
|
||||
#include <xgboost/tree_model.h>
|
||||
#include <xgboost/tree_updater.h>
|
||||
@ -18,7 +21,7 @@ std::unique_ptr<HostDeviceVector<GradientPair>> GenerateGradients(std::size_t ro
|
||||
xgboost::SimpleLCG gen;
|
||||
xgboost::SimpleRealUniformDistribution<bst_float> dist(0.0f, 1.0f);
|
||||
|
||||
for (auto i = 0; i < rows; ++i) {
|
||||
for (std::size_t i = 0; i < rows; ++i) {
|
||||
auto grad = dist(&gen);
|
||||
auto hess = dist(&gen);
|
||||
h_gradients[i] = GradientPair{grad, hess};
|
||||
|
||||
@ -19,10 +19,8 @@ TEST(Updater, Prune) {
|
||||
int constexpr kCols = 16;
|
||||
|
||||
std::vector<std::pair<std::string, std::string>> cfg;
|
||||
cfg.emplace_back(std::pair<std::string, std::string>("num_feature",
|
||||
std::to_string(kCols)));
|
||||
cfg.emplace_back(std::pair<std::string, std::string>(
|
||||
"min_split_loss", "10"));
|
||||
cfg.emplace_back("num_feature", std::to_string(kCols));
|
||||
cfg.emplace_back("min_split_loss", "10");
|
||||
|
||||
// These data are just place holders.
|
||||
HostDeviceVector<GradientPair> gpair =
|
||||
@ -73,7 +71,7 @@ TEST(Updater, Prune) {
|
||||
0, 0.5f, true, 0.3, 0.4, 0.5,
|
||||
/*loss_chg=*/19.0f, 0.0f,
|
||||
/*left_sum=*/0.0f, /*right_sum=*/0.0f);
|
||||
cfg.emplace_back(std::make_pair("max_depth", "1"));
|
||||
cfg.emplace_back("max_depth", "1");
|
||||
pruner->Configure(cfg);
|
||||
pruner->Update(&gpair, p_dmat.get(), position, trees);
|
||||
|
||||
@ -83,7 +81,7 @@ TEST(Updater, Prune) {
|
||||
0, 0.5f, true, 0.3, 0.4, 0.5,
|
||||
/*loss_chg=*/18.0f, 0.0f,
|
||||
/*left_sum=*/0.0f, /*right_sum=*/0.0f);
|
||||
cfg.emplace_back(std::make_pair("min_split_loss", "0"));
|
||||
cfg.emplace_back("min_split_loss", "0");
|
||||
pruner->Configure(cfg);
|
||||
pruner->Update(&gpair, p_dmat.get(), position, trees);
|
||||
ASSERT_EQ(tree.NumExtraNodes(), 2);
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user