Restore clang tidy test. (#8861)
This commit is contained in:
@@ -4,7 +4,7 @@
|
||||
#ifndef XGBOOST_CACHE_H_
|
||||
#define XGBOOST_CACHE_H_
|
||||
|
||||
#include <xgboost/logging.h> // CHECK_EQ
|
||||
#include <xgboost/logging.h> // for CHECK_EQ, CHECK
|
||||
|
||||
#include <cstddef> // for size_t
|
||||
#include <memory> // for weak_ptr, shared_ptr, make_shared
|
||||
@@ -12,6 +12,7 @@
|
||||
#include <queue> // for queue
|
||||
#include <thread> // for thread
|
||||
#include <unordered_map> // for unordered_map
|
||||
#include <utility> // for move
|
||||
#include <vector> // for vector
|
||||
|
||||
namespace xgboost {
|
||||
@@ -32,6 +33,8 @@ class DMatrixCache {
|
||||
|
||||
CacheT const& Value() const { return *value; }
|
||||
CacheT& Value() { return *value; }
|
||||
|
||||
Item(std::shared_ptr<DMatrix> m, std::shared_ptr<CacheT> v) : ref{m}, value{std::move(v)} {}
|
||||
};
|
||||
|
||||
static constexpr std::size_t DefaultSize() { return 32; }
|
||||
@@ -141,7 +144,7 @@ class DMatrixCache {
|
||||
auto it = container_.find(key);
|
||||
if (it == container_.cend()) {
|
||||
// after the new DMatrix, cache size is at most max_size
|
||||
container_[key] = {m, std::make_shared<CacheT>(args...)};
|
||||
container_.emplace(key, Item{m, std::make_shared<CacheT>(args...)});
|
||||
queue_.emplace(key);
|
||||
}
|
||||
return container_.at(key).value;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/**
|
||||
* Copyright by XGBoost Contributors 2019-2023
|
||||
* Copyright 2019-2023 by XGBoost Contributors
|
||||
*/
|
||||
#ifndef XGBOOST_JSON_H_
|
||||
#define XGBOOST_JSON_H_
|
||||
@@ -372,7 +372,7 @@ class Json {
|
||||
/*! \brief Use your own JsonWriter. */
|
||||
static void Dump(Json json, JsonWriter* writer);
|
||||
|
||||
Json() : ptr_{new JsonNull} {}
|
||||
Json() = default;
|
||||
|
||||
// number
|
||||
explicit Json(JsonNumber number) : ptr_{new JsonNumber(std::move(number))} {}
|
||||
@@ -462,7 +462,7 @@ class Json {
|
||||
IntrusivePtr<Value> const& Ptr() const { return ptr_; }
|
||||
|
||||
private:
|
||||
IntrusivePtr<Value> ptr_;
|
||||
IntrusivePtr<Value> ptr_{new JsonNull};
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -22,13 +22,13 @@ namespace detail {
|
||||
// static_cast and std::to_string.
|
||||
template <typename Char, std::enable_if_t<std::is_signed<Char>::value>* = nullptr>
|
||||
std::string CharToStr(Char c) {
|
||||
static_assert(std::is_same<Char, char>::value, "");
|
||||
static_assert(std::is_same<Char, char>::value);
|
||||
return std::string{c};
|
||||
}
|
||||
|
||||
template <typename Char, std::enable_if_t<!std::is_signed<Char>::value>* = nullptr>
|
||||
std::string CharToStr(Char c) {
|
||||
static_assert(std::is_same<Char, char>::value, "");
|
||||
static_assert(std::is_same<Char, char>::value);
|
||||
return (c <= static_cast<char>(127) ? std::string{c} : std::to_string(c));
|
||||
}
|
||||
} // namespace detail
|
||||
|
||||
@@ -52,14 +52,14 @@ struct ArrayInterfaceHandler {
|
||||
|
||||
template <size_t dim, typename S, typename Head, size_t D>
|
||||
constexpr size_t Offset(S (&strides)[D], size_t n, Head head) {
|
||||
static_assert(dim < D, "");
|
||||
static_assert(dim < D);
|
||||
return n + head * strides[dim];
|
||||
}
|
||||
|
||||
template <size_t dim, typename S, size_t D, typename Head, typename... Tail>
|
||||
constexpr std::enable_if_t<sizeof...(Tail) != 0, size_t> Offset(S (&strides)[D], size_t n,
|
||||
Head head, Tail &&...rest) {
|
||||
static_assert(dim < D, "");
|
||||
static_assert(dim < D);
|
||||
return Offset<dim + 1>(strides, n + (head * strides[dim]), std::forward<Tail>(rest)...);
|
||||
}
|
||||
|
||||
@@ -193,14 +193,14 @@ LINALG_HD auto UnravelImpl(I idx, common::Span<size_t const, D> shape) {
|
||||
|
||||
template <size_t dim, typename I, int32_t D>
|
||||
void ReshapeImpl(size_t (&out_shape)[D], I s) {
|
||||
static_assert(dim < D, "");
|
||||
static_assert(dim < D);
|
||||
out_shape[dim] = s;
|
||||
}
|
||||
|
||||
template <size_t dim, int32_t D, typename... S, typename I,
|
||||
std::enable_if_t<sizeof...(S) != 0> * = nullptr>
|
||||
void ReshapeImpl(size_t (&out_shape)[D], I &&s, S &&...rest) {
|
||||
static_assert(dim < D, "");
|
||||
static_assert(dim < D);
|
||||
out_shape[dim] = s;
|
||||
ReshapeImpl<dim + 1>(out_shape, std::forward<S>(rest)...);
|
||||
}
|
||||
@@ -230,7 +230,8 @@ struct Conjunction : std::true_type {};
|
||||
template <class B1>
|
||||
struct Conjunction<B1> : B1 {};
|
||||
template <class B1, class... Bn>
|
||||
struct Conjunction<B1, Bn...> : std::conditional_t<bool(B1::value), Conjunction<Bn...>, B1> {};
|
||||
struct Conjunction<B1, Bn...>
|
||||
: std::conditional_t<static_cast<bool>(B1::value), Conjunction<Bn...>, B1> {};
|
||||
|
||||
template <typename... Index>
|
||||
using IsAllIntegral = Conjunction<std::is_integral<std::remove_reference_t<Index>>...>;
|
||||
@@ -291,8 +292,8 @@ class TensorView {
|
||||
template <size_t old_dim, size_t new_dim, int32_t D, typename I>
|
||||
LINALG_HD size_t MakeSliceDim(size_t new_shape[D], size_t new_stride[D],
|
||||
detail::RangeTag<I> &&range) const {
|
||||
static_assert(new_dim < D, "");
|
||||
static_assert(old_dim < kDim, "");
|
||||
static_assert(new_dim < D);
|
||||
static_assert(old_dim < kDim);
|
||||
new_stride[new_dim] = stride_[old_dim];
|
||||
new_shape[new_dim] = range.Size();
|
||||
assert(static_cast<decltype(shape_[old_dim])>(range.end) <= shape_[old_dim]);
|
||||
@@ -306,8 +307,8 @@ class TensorView {
|
||||
template <size_t old_dim, size_t new_dim, int32_t D, typename I, typename... S>
|
||||
LINALG_HD size_t MakeSliceDim(size_t new_shape[D], size_t new_stride[D],
|
||||
detail::RangeTag<I> &&range, S &&...slices) const {
|
||||
static_assert(new_dim < D, "");
|
||||
static_assert(old_dim < kDim, "");
|
||||
static_assert(new_dim < D);
|
||||
static_assert(old_dim < kDim);
|
||||
new_stride[new_dim] = stride_[old_dim];
|
||||
new_shape[new_dim] = range.Size();
|
||||
assert(static_cast<decltype(shape_[old_dim])>(range.end) <= shape_[old_dim]);
|
||||
@@ -320,8 +321,8 @@ class TensorView {
|
||||
|
||||
template <size_t old_dim, size_t new_dim, int32_t D>
|
||||
LINALG_HD size_t MakeSliceDim(size_t new_shape[D], size_t new_stride[D], detail::AllTag) const {
|
||||
static_assert(new_dim < D, "");
|
||||
static_assert(old_dim < kDim, "");
|
||||
static_assert(new_dim < D);
|
||||
static_assert(old_dim < kDim);
|
||||
new_stride[new_dim] = stride_[old_dim];
|
||||
new_shape[new_dim] = shape_[old_dim];
|
||||
return 0;
|
||||
@@ -332,8 +333,8 @@ class TensorView {
|
||||
template <size_t old_dim, size_t new_dim, int32_t D, typename... S>
|
||||
LINALG_HD size_t MakeSliceDim(size_t new_shape[D], size_t new_stride[D], detail::AllTag,
|
||||
S &&...slices) const {
|
||||
static_assert(new_dim < D, "");
|
||||
static_assert(old_dim < kDim, "");
|
||||
static_assert(new_dim < D);
|
||||
static_assert(old_dim < kDim);
|
||||
new_stride[new_dim] = stride_[old_dim];
|
||||
new_shape[new_dim] = shape_[old_dim];
|
||||
return MakeSliceDim<old_dim + 1, new_dim + 1, D>(new_shape, new_stride,
|
||||
@@ -343,7 +344,7 @@ class TensorView {
|
||||
template <size_t old_dim, size_t new_dim, int32_t D, typename Index>
|
||||
LINALG_HD size_t MakeSliceDim(DMLC_ATTRIBUTE_UNUSED size_t new_shape[D],
|
||||
DMLC_ATTRIBUTE_UNUSED size_t new_stride[D], Index i) const {
|
||||
static_assert(old_dim < kDim, "");
|
||||
static_assert(old_dim < kDim);
|
||||
return stride_[old_dim] * i;
|
||||
}
|
||||
/**
|
||||
@@ -352,7 +353,7 @@ class TensorView {
|
||||
template <size_t old_dim, size_t new_dim, int32_t D, typename Index, typename... S>
|
||||
LINALG_HD std::enable_if_t<std::is_integral<Index>::value, size_t> MakeSliceDim(
|
||||
size_t new_shape[D], size_t new_stride[D], Index i, S &&...slices) const {
|
||||
static_assert(old_dim < kDim, "");
|
||||
static_assert(old_dim < kDim);
|
||||
auto offset = stride_[old_dim] * i;
|
||||
auto res =
|
||||
MakeSliceDim<old_dim + 1, new_dim, D>(new_shape, new_stride, std::forward<S>(slices)...);
|
||||
@@ -501,7 +502,7 @@ class TensorView {
|
||||
*/
|
||||
LINALG_HD bool CContiguous() const {
|
||||
StrideT stride;
|
||||
static_assert(std::is_same<decltype(stride), decltype(stride_)>::value, "");
|
||||
static_assert(std::is_same<decltype(stride), decltype(stride_)>::value);
|
||||
// It's contiguous if the stride can be calculated from shape.
|
||||
detail::CalcStride(shape_, stride);
|
||||
return common::Span<size_t const, kDim>{stride_} == common::Span<size_t const, kDim>{stride};
|
||||
@@ -511,7 +512,7 @@ class TensorView {
|
||||
*/
|
||||
LINALG_HD bool FContiguous() const {
|
||||
StrideT stride;
|
||||
static_assert(std::is_same<decltype(stride), decltype(stride_)>::value, "");
|
||||
static_assert(std::is_same<decltype(stride), decltype(stride_)>::value);
|
||||
// It's contiguous if the stride can be calculated from shape.
|
||||
detail::CalcStride<kDim, true>(shape_, stride);
|
||||
return common::Span<size_t const, kDim>{stride_} == common::Span<size_t const, kDim>{stride};
|
||||
@@ -625,7 +626,7 @@ Json ArrayInterface(TensorView<T const, D> const &t) {
|
||||
array_interface["version"] = 3;
|
||||
|
||||
char constexpr kT = detail::ArrayInterfaceHandler::TypeChar<T>();
|
||||
static_assert(kT != '\0', "");
|
||||
static_assert(kT != '\0');
|
||||
if (DMLC_LITTLE_ENDIAN) {
|
||||
array_interface["typestr"] = String{"<" + (kT + std::to_string(sizeof(T)))};
|
||||
} else {
|
||||
|
||||
@@ -28,7 +28,7 @@ struct Context;
|
||||
*/
|
||||
class Metric : public Configurable {
|
||||
protected:
|
||||
Context const* ctx_;
|
||||
Context const* ctx_{nullptr};
|
||||
|
||||
public:
|
||||
/*!
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
/*!
|
||||
* Copyright 2019 XGBoost contributors
|
||||
/**
|
||||
* Copyright 2019-2023 by XGBoost contributors
|
||||
*/
|
||||
#ifndef XGBOOST_VERSION_CONFIG_H_
|
||||
#define XGBOOST_VERSION_CONFIG_H_
|
||||
|
||||
#define XGBOOST_VER_MAJOR 2
|
||||
#define XGBOOST_VER_MINOR 0
|
||||
#define XGBOOST_VER_PATCH 0
|
||||
#define XGBOOST_VER_MAJOR 2 /* NOLINT */
|
||||
#define XGBOOST_VER_MINOR 0 /* NOLINT */
|
||||
#define XGBOOST_VER_PATCH 0 /* NOLINT */
|
||||
|
||||
#endif // XGBOOST_VERSION_CONFIG_H_
|
||||
|
||||
Reference in New Issue
Block a user