add hip tests

This commit is contained in:
amdsc21 2023-03-11 00:38:16 +01:00
parent e961016e71
commit 204d0c9a53
18 changed files with 76 additions and 20 deletions

View File

@ -13,6 +13,11 @@ if (USE_CUDA)
list(APPEND TEST_SOURCES ${CUDA_TEST_SOURCES}) list(APPEND TEST_SOURCES ${CUDA_TEST_SOURCES})
endif (USE_CUDA) endif (USE_CUDA)
if (USE_HIP)
file(GLOB_RECURSE HIP_TEST_SOURCES "*.hip")
list(APPEND TEST_SOURCES ${HIP_TEST_SOURCES})
endif (USE_HIP)
if (USE_HIP) if (USE_HIP)
file(GLOB_RECURSE HIP_TEST_SOURCES "*.cu") file(GLOB_RECURSE HIP_TEST_SOURCES "*.cu")
list(APPEND TEST_SOURCES ${HIP_TEST_SOURCES}) list(APPEND TEST_SOURCES ${HIP_TEST_SOURCES})
@ -43,6 +48,11 @@ if (USE_HIP AND PLUGIN_RMM)
target_include_directories(testxgboost PRIVATE ${HIP_INCLUDE_DIRS}) target_include_directories(testxgboost PRIVATE ${HIP_INCLUDE_DIRS})
endif (USE_HIP AND PLUGIN_RMM) endif (USE_HIP AND PLUGIN_RMM)
if (USE_HIP AND PLUGIN_RMM)
find_package(HIP)
target_include_directories(testxgboost PRIVATE ${HIP_INCLUDE_DIRS})
endif (USE_HIP AND PLUGIN_RMM)
target_include_directories(testxgboost target_include_directories(testxgboost
PRIVATE PRIVATE
${GTEST_INCLUDE_DIRS} ${GTEST_INCLUDE_DIRS}

View File

@ -623,13 +623,27 @@ class RMMAllocator {
int n_gpu; int n_gpu;
RMMAllocator() : n_gpu(common::AllVisibleGPUs()) { RMMAllocator() : n_gpu(common::AllVisibleGPUs()) {
int current_device; int current_device;
#if defined(XGBOOST_USE_CUDA)
CHECK_EQ(cudaGetDevice(&current_device), cudaSuccess); CHECK_EQ(cudaGetDevice(&current_device), cudaSuccess);
#elif defined(XGBOOST_USE_HIP)
CHECK_EQ(hipGetDevice(&current_device), hipSuccess);
#endif
for (int i = 0; i < n_gpu; ++i) { for (int i = 0; i < n_gpu; ++i) {
#if defined(XGBOOST_USE_CUDA)
CHECK_EQ(cudaSetDevice(i), cudaSuccess); CHECK_EQ(cudaSetDevice(i), cudaSuccess);
#elif defined(XGBOOST_USE_HIP)
CHECK_EQ(hipSetDevice(i), hipSuccess);
#endif
cuda_mr.push_back(std::make_unique<CUDAMemoryResource>()); cuda_mr.push_back(std::make_unique<CUDAMemoryResource>());
pool_mr.push_back(std::make_unique<PoolMemoryResource>(cuda_mr[i].get())); pool_mr.push_back(std::make_unique<PoolMemoryResource>(cuda_mr[i].get()));
} }
#if defined(XGBOOST_USE_CUDA)
CHECK_EQ(cudaSetDevice(current_device), cudaSuccess); CHECK_EQ(cudaSetDevice(current_device), cudaSuccess);
#elif defined(XGBOOST_USE_HIP)
CHECK_EQ(hipSetDevice(current_device), hipSuccess);
#endif
} }
~RMMAllocator() = default; ~RMMAllocator() = default;
}; };

View File

@ -26,13 +26,13 @@
#include "filesystem.h" // dmlc::TemporaryDirectory #include "filesystem.h" // dmlc::TemporaryDirectory
#include "xgboost/linalg.h" #include "xgboost/linalg.h"
#if defined(__CUDACC__) #if defined(__CUDACC__) || defined(__HIP_PLATFORM_AMD__)
#define DeclareUnifiedTest(name) GPU ## name #define DeclareUnifiedTest(name) GPU ## name
#else #else
#define DeclareUnifiedTest(name) name #define DeclareUnifiedTest(name) name
#endif #endif
#if defined(__CUDACC__) #if defined(__CUDACC__) || defined(__HIP_PLATFORM_AMD__)
#define GPUIDX 0 #define GPUIDX 0
#else #else
#define GPUIDX -1 #define GPUIDX -1
@ -294,7 +294,7 @@ class RandomDataGenerator {
std::shared_ptr<DMatrix> GenerateDMatrix(bool with_label = false, bool float_label = true, std::shared_ptr<DMatrix> GenerateDMatrix(bool with_label = false, bool float_label = true,
size_t classes = 1) const; size_t classes = 1) const;
#if defined(XGBOOST_USE_CUDA) #if defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
std::shared_ptr<DMatrix> GenerateDeviceDMatrix(); std::shared_ptr<DMatrix> GenerateDeviceDMatrix();
#endif #endif
std::shared_ptr<DMatrix> GenerateQuantileDMatrix(); std::shared_ptr<DMatrix> GenerateQuantileDMatrix();

View File

@ -1,9 +1,9 @@
#if defined(__CUDACC__) #if defined(__CUDACC__) || defined(__HIP_PLATFORM_AMD__)
#include "../../src/data/ellpack_page.cuh" #include "../../src/data/ellpack_page.cuh"
#endif #endif
namespace xgboost { namespace xgboost {
#if defined(__CUDACC__) #if defined(__CUDACC__) || defined(__HIP_PLATFORM_AMD__)
namespace { namespace {
class HistogramCutsWrapper : public common::HistogramCuts { class HistogramCutsWrapper : public common::HistogramCuts {
public: public:

View File

@ -0,0 +1,4 @@
#if defined(XGBOOST_USE_HIP)
#include "test_linear.cu"
#endif

View File

@ -0,0 +1,4 @@
#if defined(XGBOOST_USE_HIP)
#include "test_auc.cu"
#endif

View File

@ -0,0 +1,4 @@
#if defined(XGBOOST_USE_HIP)
#include "test_elementwise_metric.cu"
#endif

View File

@ -0,0 +1,4 @@
#if defined(XGBOOST_USE_HIP)
#include "test_multiclass_metric.cu"
#endif

View File

@ -3,7 +3,7 @@
#include "../helpers.h" #include "../helpers.h"
#if !defined(__CUDACC__) #if !defined(__CUDACC__) && !defined(__HIP_PLATFORM_AMD__)
TEST(Metric, AMS) { TEST(Metric, AMS) {
auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX); auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX);
EXPECT_ANY_THROW(xgboost::Metric::Create("ams", &ctx)); EXPECT_ANY_THROW(xgboost::Metric::Create("ams", &ctx));

View File

@ -0,0 +1,4 @@
#if defined(XGBOOST_USE_HIP)
#include "test_rank_metric.cu"
#endif

View File

@ -0,0 +1,4 @@
#if defined(XGBOOST_USE_HIP)
#include "test_survival_metric.cu"
#endif

View File

@ -146,7 +146,11 @@ TEST(GpuPredictor, LesserFeatures) {
// Very basic test of empty model // Very basic test of empty model
TEST(GPUPredictor, ShapStump) { TEST(GPUPredictor, ShapStump) {
#if defined(XGBOOST_USE_CUDA)
cudaSetDevice(0); cudaSetDevice(0);
#elif defined(XGBOOST_USE_HIP)
hipSetDevice(0);
#endif
Context ctx; Context ctx;
ctx.gpu_id = 0; ctx.gpu_id = 0;

View File

@ -0,0 +1,4 @@
#if defined(XGBOOST_USE_HIP)
#include "test_gpu_predictor.cu"
#endif

View File

@ -170,7 +170,7 @@ void TestPredictionWithLesserFeatures(std::string predictor_name) {
auto m_invalid = RandomDataGenerator(kRows, kTrainCols + 1, 0.5).GenerateDMatrix(false); auto m_invalid = RandomDataGenerator(kRows, kTrainCols + 1, 0.5).GenerateDMatrix(false);
ASSERT_THROW({learner->Predict(m_invalid, false, &prediction, 0, 0);}, dmlc::Error); ASSERT_THROW({learner->Predict(m_invalid, false, &prediction, 0, 0);}, dmlc::Error);
#if defined(XGBOOST_USE_CUDA) #if defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
HostDeviceVector<float> from_cpu; HostDeviceVector<float> from_cpu;
learner->SetParam("predictor", "cpu_predictor"); learner->SetParam("predictor", "cpu_predictor");
learner->Predict(m_test, false, &from_cpu, 0, 0); learner->Predict(m_test, false, &from_cpu, 0, 0);
@ -184,7 +184,7 @@ void TestPredictionWithLesserFeatures(std::string predictor_name) {
for (size_t i = 0; i < h_cpu.size(); ++i) { for (size_t i = 0; i < h_cpu.size(); ++i) {
ASSERT_NEAR(h_cpu[i], h_gpu[i], kRtEps); ASSERT_NEAR(h_cpu[i], h_gpu[i], kRtEps);
} }
#endif // defined(XGBOOST_USE_CUDA) #endif // defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
} }
void GBTreeModelForTest(gbm::GBTreeModel *model, uint32_t split_ind, void GBTreeModelForTest(gbm::GBTreeModel *model, uint32_t split_ind,

View File

@ -266,7 +266,7 @@ TEST(Learner, BinaryModelIO) {
ASSERT_EQ(config_str.find("WARNING"), std::string::npos); ASSERT_EQ(config_str.find("WARNING"), std::string::npos);
} }
#if defined(XGBOOST_USE_CUDA) #if defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
// Tests for automatic GPU configuration. // Tests for automatic GPU configuration.
TEST(Learner, GPUConfiguration) { TEST(Learner, GPUConfiguration) {
using Arg = std::pair<std::string, std::string>; using Arg = std::pair<std::string, std::string>;
@ -325,7 +325,7 @@ TEST(Learner, GPUConfiguration) {
ASSERT_EQ(learner->Ctx()->gpu_id, 0); ASSERT_EQ(learner->Ctx()->gpu_id, 0);
} }
} }
#endif // defined(XGBOOST_USE_CUDA) #endif // defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
TEST(Learner, Seed) { TEST(Learner, Seed) {
auto m = RandomDataGenerator{10, 10, 0}.GenerateDMatrix(); auto m = RandomDataGenerator{10, 10, 0}.GenerateDMatrix();

View File

@ -116,9 +116,9 @@ TEST_F(TestL1MultiTarget, Exact) { this->RunTest("exact"); }
TEST_F(TestL1MultiTarget, Approx) { this->RunTest("approx"); } TEST_F(TestL1MultiTarget, Approx) { this->RunTest("approx"); }
#if defined(XGBOOST_USE_CUDA) #if defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
TEST_F(TestL1MultiTarget, GpuHist) { this->RunTest("gpu_hist"); } TEST_F(TestL1MultiTarget, GpuHist) { this->RunTest("gpu_hist"); }
#endif // defined(XGBOOST_USE_CUDA) #endif // defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
TEST(MultiStrategy, Configure) { TEST(MultiStrategy, Configure) {
auto p_fmat = RandomDataGenerator{12ul, 3ul, 0.0}.GenerateDMatrix(); auto p_fmat = RandomDataGenerator{12ul, 3ul, 0.0}.GenerateDMatrix();

View File

@ -338,7 +338,7 @@ TEST_F(SerializationTest, CPUCoordDescent) {
fmap_, p_dmat_); fmap_, p_dmat_);
} }
#if defined(XGBOOST_USE_CUDA) #if defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
TEST_F(SerializationTest, GpuHist) { TEST_F(SerializationTest, GpuHist) {
TestLearnerSerialization({{"booster", "gbtree"}, TestLearnerSerialization({{"booster", "gbtree"},
{"seed", "0"}, {"seed", "0"},
@ -416,7 +416,7 @@ TEST_F(SerializationTest, GPUCoordDescent) {
{"updater", "gpu_coord_descent"}}, {"updater", "gpu_coord_descent"}},
fmap_, p_dmat_); fmap_, p_dmat_);
} }
#endif // defined(XGBOOST_USE_CUDA) #endif // defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
class L1SerializationTest : public SerializationTest {}; class L1SerializationTest : public SerializationTest {};
@ -447,7 +447,7 @@ TEST_F(L1SerializationTest, Hist) {
fmap_, p_dmat_); fmap_, p_dmat_);
} }
#if defined(XGBOOST_USE_CUDA) #if defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
TEST_F(L1SerializationTest, GpuHist) { TEST_F(L1SerializationTest, GpuHist) {
TestLearnerSerialization({{"booster", "gbtree"}, TestLearnerSerialization({{"booster", "gbtree"},
{"objective", "reg:absoluteerror"}, {"objective", "reg:absoluteerror"},
@ -456,7 +456,7 @@ TEST_F(L1SerializationTest, GpuHist) {
{"tree_method", "gpu_hist"}}, {"tree_method", "gpu_hist"}},
fmap_, p_dmat_); fmap_, p_dmat_);
} }
#endif // defined(XGBOOST_USE_CUDA) #endif // defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
class LogitSerializationTest : public SerializationTest { class LogitSerializationTest : public SerializationTest {
protected: protected:
@ -542,7 +542,7 @@ TEST_F(LogitSerializationTest, CPUCoordDescent) {
fmap_, p_dmat_); fmap_, p_dmat_);
} }
#if defined(XGBOOST_USE_CUDA) #if defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
TEST_F(LogitSerializationTest, GpuHist) { TEST_F(LogitSerializationTest, GpuHist) {
TestLearnerSerialization({{"booster", "gbtree"}, TestLearnerSerialization({{"booster", "gbtree"},
{"objective", "binary:logistic"}, {"objective", "binary:logistic"},
@ -578,7 +578,7 @@ TEST_F(LogitSerializationTest, GPUCoordDescent) {
{"updater", "gpu_coord_descent"}}, {"updater", "gpu_coord_descent"}},
fmap_, p_dmat_); fmap_, p_dmat_);
} }
#endif // defined(XGBOOST_USE_CUDA) #endif // defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
class MultiClassesSerializationTest : public SerializationTest { class MultiClassesSerializationTest : public SerializationTest {
protected: protected:
@ -684,7 +684,7 @@ TEST_F(MultiClassesSerializationTest, CPUCoordDescent) {
fmap_, p_dmat_); fmap_, p_dmat_);
} }
#if defined(XGBOOST_USE_CUDA) #if defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
TEST_F(MultiClassesSerializationTest, GpuHist) { TEST_F(MultiClassesSerializationTest, GpuHist) {
TestLearnerSerialization({{"booster", "gbtree"}, TestLearnerSerialization({{"booster", "gbtree"},
{"num_class", std::to_string(kClasses)}, {"num_class", std::to_string(kClasses)},
@ -731,5 +731,5 @@ TEST_F(MultiClassesSerializationTest, GPUCoordDescent) {
{"updater", "gpu_coord_descent"}}, {"updater", "gpu_coord_descent"}},
fmap_, p_dmat_); fmap_, p_dmat_);
} }
#endif // defined(XGBOOST_USE_CUDA) #endif // defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
} // namespace xgboost } // namespace xgboost