finish gbtree.cu porting

This commit is contained in:
amdsc21 2023-03-08 21:09:56 +01:00
parent cdd7794641
commit 7e1b06417b
4 changed files with 21 additions and 5 deletions

View File

@ -530,17 +530,17 @@ class TensorView {
/** /**
* \brief Number of items in the tensor. * \brief Number of items in the tensor.
*/ */
LINALG_HD [[nodiscard]] std::size_t Size() const { return size_; } LINALG_HD std::size_t Size() const { return size_; }
/** /**
* \brief Whether this is a contiguous array, both C and F contiguous returns true. * \brief Whether this is a contiguous array, both C and F contiguous returns true.
*/ */
LINALG_HD [[nodiscard]] bool Contiguous() const { LINALG_HD bool Contiguous() const {
return data_.size() == this->Size() || this->CContiguous() || this->FContiguous(); return data_.size() == this->Size() || this->CContiguous() || this->FContiguous();
} }
/** /**
* \brief Whether it's a c-contiguous array. * \brief Whether it's a c-contiguous array.
*/ */
LINALG_HD [[nodiscard]] bool CContiguous() const { LINALG_HD bool CContiguous() const {
StrideT stride; StrideT stride;
static_assert(std::is_same<decltype(stride), decltype(stride_)>::value); static_assert(std::is_same<decltype(stride), decltype(stride_)>::value);
// It's contiguous if the stride can be calculated from shape. // It's contiguous if the stride can be calculated from shape.
@ -550,7 +550,7 @@ class TensorView {
/** /**
* \brief Whether it's a f-contiguous array. * \brief Whether it's a f-contiguous array.
*/ */
LINALG_HD [[nodiscard]] bool FContiguous() const { LINALG_HD bool FContiguous() const {
StrideT stride; StrideT stride;
static_assert(std::is_same<decltype(stride), decltype(stride_)>::value); static_assert(std::is_same<decltype(stride), decltype(stride_)>::value);
// It's contiguous if the stride can be calculated from shape. // It's contiguous if the stride can be calculated from shape.

View File

@ -114,7 +114,7 @@ namespace common {
#define HIP_KERNEL_CHECK(cond) \ #define HIP_KERNEL_CHECK(cond) \
do { \ do { \
if (XGBOOST_EXPECT(!(cond), false)) { \ if (XGBOOST_EXPECT(!(cond), false)) { \
__trap(); \ __builtin_trap(); \
} \ } \
} while (0) } while (0)
@ -122,10 +122,17 @@ namespace common {
#define __ASSERT_STR_HELPER(x) #x #define __ASSERT_STR_HELPER(x) #x
#if 0 /* need to fix __assert_fail, without __host__ */
#define HIP_KERNEL_CHECK(cond) \ #define HIP_KERNEL_CHECK(cond) \
(XGBOOST_EXPECT((cond), true) \ (XGBOOST_EXPECT((cond), true) \
? static_cast<void>(0) \ ? static_cast<void>(0) \
: __assert_fail(__ASSERT_STR_HELPER((cond)), __FILE__, __LINE__, __PRETTY_FUNCTION__)) : __assert_fail(__ASSERT_STR_HELPER((cond)), __FILE__, __LINE__, __PRETTY_FUNCTION__))
#else
#define HIP_KERNEL_CHECK(cond) \
(XGBOOST_EXPECT((cond), true) \
? static_cast<void>(0) \
: __builtin_trap())
#endif
#endif // defined(_MSC_VER) #endif // defined(_MSC_VER)

View File

@ -1,7 +1,13 @@
/*! /*!
* Copyright 2021 by Contributors * Copyright 2021 by Contributors
*/ */
#if defined(XGBOOST_USE_CUDA)
#include "../common/device_helpers.cuh" #include "../common/device_helpers.cuh"
#elif defined(XGBOOST_USE_HIP)
#include "../common/device_helpers.hip.h"
#endif
#include "xgboost/context.h" #include "xgboost/context.h"
#include "xgboost/linalg.h" #include "xgboost/linalg.h"
#include "xgboost/span.h" #include "xgboost/span.h"

View File

@ -0,0 +1,3 @@
#include "gbtree.cu"