more tests
This commit is contained in:
parent
204d0c9a53
commit
332f6a89a9
@ -0,0 +1,4 @@
|
||||
|
||||
#if defined(XGBOOST_USE_HIP)
|
||||
#include "test_nccl_device_communicator.cu"
|
||||
#endif
|
||||
@ -22,8 +22,13 @@ TEST(ArrayInterface, Stream) {
|
||||
HostDeviceVector<float> storage;
|
||||
auto arr_str = RandomDataGenerator{kRows, kCols, 0}.GenerateArrayInterface(&storage);
|
||||
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
cudaStream_t stream;
|
||||
cudaStreamCreate(&stream);
|
||||
#elif defined(XGBOOST_USE_HIP)
|
||||
hipStream_t stream;
|
||||
hipStreamCreate(&stream);
|
||||
#endif
|
||||
|
||||
auto j_arr =Json::Load(StringView{arr_str});
|
||||
j_arr["stream"] = Integer(reinterpret_cast<int64_t>(stream));
|
||||
@ -37,19 +42,35 @@ TEST(ArrayInterface, Stream) {
|
||||
auto t = out[0];
|
||||
CHECK_GE(t, dur);
|
||||
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
cudaStreamDestroy(stream);
|
||||
#elif defined(XGBOOST_USE_HIP)
|
||||
hipStreamDestroy(stream);
|
||||
#endif
|
||||
}
|
||||
|
||||
TEST(ArrayInterface, Ptr) {
|
||||
std::vector<float> h_data(10);
|
||||
ASSERT_FALSE(ArrayInterfaceHandler::IsCudaPtr(h_data.data()));
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
dh::safe_cuda(cudaGetLastError());
|
||||
#elif defined(XGBOOST_USE_HIP)
|
||||
dh::safe_cuda(hipGetLastError());
|
||||
#endif
|
||||
|
||||
dh::device_vector<float> d_data(10);
|
||||
ASSERT_TRUE(ArrayInterfaceHandler::IsCudaPtr(d_data.data().get()));
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
dh::safe_cuda(cudaGetLastError());
|
||||
#elif defined(XGBOOST_USE_HIP)
|
||||
dh::safe_cuda(hipGetLastError());
|
||||
#endif
|
||||
|
||||
ASSERT_FALSE(ArrayInterfaceHandler::IsCudaPtr(nullptr));
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
dh::safe_cuda(cudaGetLastError());
|
||||
#elif defined(XGBOOST_USE_HIP)
|
||||
dh::safe_cuda(hipGetLastError());
|
||||
#endif
|
||||
}
|
||||
} // namespace xgboost
|
||||
|
||||
@ -0,0 +1,4 @@
|
||||
|
||||
#if defined(XGBOOST_USE_HIP)
|
||||
#include "test_array_interface.cu"
|
||||
#endif
|
||||
@ -6,7 +6,13 @@
|
||||
#include "../../../src/common/timer.h"
|
||||
#include "../helpers.h"
|
||||
#include <thrust/device_vector.h>
|
||||
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
#include "../../../src/data/device_adapter.cuh"
|
||||
#elif defined(XGBOOST_USE_HIP)
|
||||
#include "../../../src/data/device_adapter.hip.h"
|
||||
#endif
|
||||
|
||||
#include "test_array_interface.h"
|
||||
using namespace xgboost; // NOLINT
|
||||
|
||||
@ -44,7 +50,12 @@ void TestCudfAdapter()
|
||||
KERNEL_CHECK(element.value == element.row_idx * 2.0f);
|
||||
}
|
||||
});
|
||||
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
dh::safe_cuda(cudaDeviceSynchronize());
|
||||
#elif defined(XGBOOST_USE_HIP)
|
||||
dh::safe_cuda(hipDeviceSynchronize());
|
||||
#endif
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@ -0,0 +1,4 @@
|
||||
|
||||
#if defined(XGBOOST_USE_HIP)
|
||||
#include "test_device_adapter.cu"
|
||||
#endif
|
||||
@ -223,7 +223,11 @@ TEST(EllpackPage, Compact) {
|
||||
|
||||
dh::LaunchN(kCols, ReadRowFunction(impl->GetDeviceAccessor(0),
|
||||
current_row, row_d.data().get()));
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
dh::safe_cuda(cudaDeviceSynchronize());
|
||||
#elif defined(XGBOOST_USE_HIP)
|
||||
dh::safe_cuda(hipDeviceSynchronize());
|
||||
#endif
|
||||
thrust::copy(row_d.begin(), row_d.end(), row.begin());
|
||||
|
||||
dh::LaunchN(kCols,
|
||||
|
||||
@ -0,0 +1,4 @@
|
||||
|
||||
#if defined(XGBOOST_USE_HIP)
|
||||
#include "test_ellpack_page.cu"
|
||||
#endif
|
||||
@ -0,0 +1,4 @@
|
||||
|
||||
#if defined(XGBOOST_USE_HIP)
|
||||
#include "test_ellpack_page_raw_format.cu"
|
||||
#endif
|
||||
@ -133,7 +133,7 @@ TEST(GradientIndex, PushBatch) {
|
||||
test(0.9f);
|
||||
}
|
||||
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
#if defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
|
||||
|
||||
namespace {
|
||||
class GHistIndexMatrixTest : public testing::TestWithParam<std::tuple<float, float>> {
|
||||
@ -207,6 +207,6 @@ INSTANTIATE_TEST_SUITE_P(GHistIndexMatrix, GHistIndexMatrixTest,
|
||||
std::make_tuple(.5f, .6), // sparse columns
|
||||
std::make_tuple(.6f, .4))); // dense columns
|
||||
|
||||
#endif // defined(XGBOOST_USE_CUDA)
|
||||
#endif // defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
|
||||
} // namespace data
|
||||
} // namespace xgboost
|
||||
|
||||
@ -3,7 +3,12 @@
|
||||
*/
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
#include "../../../src/data/device_adapter.cuh"
|
||||
#elif defined(XGBOOST_USE_HIP)
|
||||
#include "../../../src/data/device_adapter.hip.h"
|
||||
#endif
|
||||
|
||||
#include "../../../src/data/ellpack_page.cuh"
|
||||
#include "../../../src/data/iterative_dmatrix.h"
|
||||
#include "../helpers.h"
|
||||
|
||||
@ -0,0 +1,4 @@
|
||||
|
||||
#if defined(XGBOOST_USE_HIP)
|
||||
#include "test_iterative_dmatrix.cu"
|
||||
#endif
|
||||
@ -258,7 +258,7 @@ TEST(MetaInfo, Validate) {
|
||||
EXPECT_THROW(info.SetInfo(ctx, "group", groups.data(), xgboost::DataType::kUInt32, groups.size()),
|
||||
dmlc::Error);
|
||||
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
#if defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
|
||||
info.group_ptr_.clear();
|
||||
labels.resize(info.num_row_);
|
||||
info.SetInfo(ctx, "label", labels.data(), xgboost::DataType::kFloat32, info.num_row_);
|
||||
@ -271,7 +271,7 @@ TEST(MetaInfo, Validate) {
|
||||
std::string arr_interface_str{ArrayInterfaceStr(
|
||||
xgboost::linalg::MakeVec(d_groups.ConstDevicePointer(), d_groups.Size(), 0))};
|
||||
EXPECT_THROW(info.SetInfo(ctx, "group", xgboost::StringView{arr_interface_str}), dmlc::Error);
|
||||
#endif // defined(XGBOOST_USE_CUDA)
|
||||
#endif // defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
|
||||
}
|
||||
|
||||
TEST(MetaInfo, HostExtend) {
|
||||
|
||||
@ -6,7 +6,12 @@
|
||||
#include <xgboost/data.h>
|
||||
#include <xgboost/json.h>
|
||||
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
#include "../../../src/common/device_helpers.cuh"
|
||||
#elif defined(XGBOOST_USE_HIP)
|
||||
#include "../../../src/common/device_helpers.hip.h"
|
||||
#endif
|
||||
|
||||
#include "test_array_interface.h"
|
||||
#include "test_metainfo.h"
|
||||
|
||||
@ -43,7 +48,12 @@ std::string PrepareData(std::string typestr, thrust::device_vector<T>* out, cons
|
||||
}
|
||||
|
||||
TEST(MetaInfo, FromInterface) {
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
cudaSetDevice(0);
|
||||
#elif defined(XGBOOST_USE_HIP)
|
||||
hipSetDevice(0);
|
||||
#endif
|
||||
|
||||
Context ctx;
|
||||
thrust::device_vector<float> d_data;
|
||||
|
||||
@ -87,7 +97,12 @@ TEST(MetaInfo, GPUStridedData) {
|
||||
}
|
||||
|
||||
TEST(MetaInfo, Group) {
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
cudaSetDevice(0);
|
||||
#elif defined(XGBOOST_USE_HIP)
|
||||
hipSetDevice(0);
|
||||
#endif
|
||||
|
||||
MetaInfo info;
|
||||
Context ctx;
|
||||
|
||||
@ -141,7 +156,12 @@ TEST(MetaInfo, GPUQid) {
|
||||
|
||||
|
||||
TEST(MetaInfo, DeviceExtend) {
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
dh::safe_cuda(cudaSetDevice(0));
|
||||
#elif defined(XGBOOST_USE_HIP)
|
||||
dh::safe_cuda(hipSetDevice(0));
|
||||
#endif
|
||||
|
||||
size_t const kRows = 100;
|
||||
MetaInfo lhs, rhs;
|
||||
Context ctx;
|
||||
|
||||
@ -0,0 +1,4 @@
|
||||
|
||||
#if defined(XGBOOST_USE_HIP)
|
||||
#include "test_metainfo.cu"
|
||||
#endif
|
||||
@ -2,7 +2,13 @@
|
||||
#include <xgboost/host_device_vector.h>
|
||||
#include <memory>
|
||||
#include "../helpers.h"
|
||||
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
#include "../../../src/data/device_adapter.cuh"
|
||||
#elif defined(XGBOOST_USE_HIP)
|
||||
#include "../../../src/data/device_adapter.hip.h"
|
||||
#endif
|
||||
|
||||
#include "../../../src/data/proxy_dmatrix.h"
|
||||
|
||||
namespace xgboost {
|
||||
|
||||
@ -0,0 +1,4 @@
|
||||
|
||||
#if defined(XGBOOST_USE_HIP)
|
||||
#include "test_proxy_dmatrix.cu"
|
||||
#endif
|
||||
@ -3,7 +3,13 @@
|
||||
#include "../../../src/data/simple_dmatrix.h"
|
||||
|
||||
#include <thrust/sequence.h>
|
||||
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
#include "../../../src/data/device_adapter.cuh"
|
||||
#elif defined(XGBOOST_USE_HIP)
|
||||
#include "../../../src/data/device_adapter.hip.h"
|
||||
#endif
|
||||
|
||||
#include "../helpers.h"
|
||||
#include "test_array_interface.h"
|
||||
#include "../../../src/data/array_interface.h"
|
||||
@ -109,8 +115,14 @@ TEST(SimpleDMatrix, FromColumnarWithEmptyRows) {
|
||||
auto& data = columns_data[i];
|
||||
data.resize(kRows);
|
||||
thrust::sequence(data.begin(), data.end(), 0);
|
||||
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
dh::safe_cuda(cudaDeviceSynchronize());
|
||||
dh::safe_cuda(cudaGetLastError());
|
||||
#elif defined(XGBOOST_USE_HIP)
|
||||
dh::safe_cuda(hipDeviceSynchronize());
|
||||
dh::safe_cuda(hipGetLastError());
|
||||
#endif
|
||||
|
||||
ASSERT_EQ(data.size(), kRows);
|
||||
|
||||
|
||||
@ -0,0 +1,4 @@
|
||||
|
||||
#if defined(XGBOOST_USE_HIP)
|
||||
#include "test_simple_dmatrix.cu"
|
||||
#endif
|
||||
@ -0,0 +1,4 @@
|
||||
|
||||
#if defined(XGBOOST_USE_HIP)
|
||||
#include "test_sparse_page_dmatrix.cu"
|
||||
#endif
|
||||
@ -0,0 +1,4 @@
|
||||
|
||||
#if defined(XGBOOST_USE_HIP)
|
||||
#include "test_federated_adapter.cu"
|
||||
#endif
|
||||
@ -0,0 +1,4 @@
|
||||
|
||||
#if defined(XGBOOST_USE_HIP)
|
||||
#include "test_driver.cu"
|
||||
#endif
|
||||
@ -0,0 +1,4 @@
|
||||
|
||||
#if defined(XGBOOST_USE_HIP)
|
||||
#include "test_evaluate_splits.cu"
|
||||
#endif
|
||||
@ -0,0 +1,4 @@
|
||||
|
||||
#if defined(XGBOOST_USE_HIP)
|
||||
#include "test_gradient_based_sampler.cu"
|
||||
#endif
|
||||
@ -40,9 +40,15 @@ void TestDeterministicHistogram(bool is_dense, int shm_size) {
|
||||
quantiser);
|
||||
|
||||
std::vector<GradientPairInt64> histogram_h(num_bins);
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
dh::safe_cuda(cudaMemcpy(histogram_h.data(), d_histogram.data(),
|
||||
num_bins * sizeof(GradientPairInt64),
|
||||
cudaMemcpyDeviceToHost));
|
||||
#elif defined(XGBOOST_USE_HIP)
|
||||
dh::safe_cuda(hipMemcpy(histogram_h.data(), d_histogram.data(),
|
||||
num_bins * sizeof(GradientPairInt64),
|
||||
hipMemcpyDeviceToHost));
|
||||
#endif
|
||||
|
||||
for (size_t i = 0; i < kRounds; ++i) {
|
||||
dh::device_vector<GradientPairInt64> new_histogram(num_bins);
|
||||
@ -54,9 +60,15 @@ void TestDeterministicHistogram(bool is_dense, int shm_size) {
|
||||
d_new_histogram, quantiser);
|
||||
|
||||
std::vector<GradientPairInt64> new_histogram_h(num_bins);
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
dh::safe_cuda(cudaMemcpy(new_histogram_h.data(), d_new_histogram.data(),
|
||||
num_bins * sizeof(GradientPairInt64),
|
||||
cudaMemcpyDeviceToHost));
|
||||
#elif defined(XGBOOST_USE_HIP)
|
||||
dh::safe_cuda(hipMemcpy(new_histogram_h.data(), d_new_histogram.data(),
|
||||
num_bins * sizeof(GradientPairInt64),
|
||||
hipMemcpyDeviceToHost));
|
||||
#endif
|
||||
for (size_t j = 0; j < new_histogram_h.size(); ++j) {
|
||||
ASSERT_EQ(new_histogram_h[j].GetQuantisedGrad(), histogram_h[j].GetQuantisedGrad());
|
||||
ASSERT_EQ(new_histogram_h[j].GetQuantisedHess(), histogram_h[j].GetQuantisedHess());
|
||||
@ -76,9 +88,15 @@ void TestDeterministicHistogram(bool is_dense, int shm_size) {
|
||||
dh::ToSpan(baseline), quantiser);
|
||||
|
||||
std::vector<GradientPairInt64> baseline_h(num_bins);
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
dh::safe_cuda(cudaMemcpy(baseline_h.data(), baseline.data().get(),
|
||||
num_bins * sizeof(GradientPairInt64),
|
||||
cudaMemcpyDeviceToHost));
|
||||
#elif defined(XGBOOST_USE_HIP)
|
||||
dh::safe_cuda(hipMemcpy(baseline_h.data(), baseline.data().get(),
|
||||
num_bins * sizeof(GradientPairInt64),
|
||||
hipMemcpyDeviceToHost));
|
||||
#endif
|
||||
|
||||
for (size_t i = 0; i < baseline.size(); ++i) {
|
||||
EXPECT_NEAR(baseline_h[i].GetQuantisedGrad(), histogram_h[i].GetQuantisedGrad(),
|
||||
|
||||
@ -0,0 +1,4 @@
|
||||
|
||||
#if defined(XGBOOST_USE_HIP)
|
||||
#include "test_histogram.cu"
|
||||
#endif
|
||||
@ -67,9 +67,16 @@ void TestSortPositionBatch(const std::vector<int>& ridx_in, const std::vector<Se
|
||||
h_batch_info[i] = {segments.at(i), 0};
|
||||
total_rows += segments.at(i).Size();
|
||||
}
|
||||
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
dh::safe_cuda(cudaMemcpyAsync(d_batch_info.data().get(), h_batch_info.data(),
|
||||
h_batch_info.size() * sizeof(PerNodeData<int>), cudaMemcpyDefault,
|
||||
nullptr));
|
||||
#elif defined(XGBOOST_USE_HIP)
|
||||
dh::safe_cuda(hipMemcpyAsync(d_batch_info.data().get(), h_batch_info.data(),
|
||||
h_batch_info.size() * sizeof(PerNodeData<int>), hipMemcpyDefault,
|
||||
nullptr));
|
||||
#endif
|
||||
dh::device_vector<int8_t> tmp;
|
||||
SortPositionBatch<uint32_t, decltype(op), int>(dh::ToSpan(d_batch_info), dh::ToSpan(ridx),
|
||||
dh::ToSpan(ridx_tmp), dh::ToSpan(counts),
|
||||
|
||||
@ -0,0 +1,4 @@
|
||||
|
||||
#if defined(XGBOOST_USE_HIP)
|
||||
#include "test_row_partitioner.cu"
|
||||
#endif
|
||||
@ -10,7 +10,12 @@
|
||||
#include <set>
|
||||
#include "../../../src/tree/constraints.cuh"
|
||||
#include "../../../src/tree/param.h"
|
||||
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
#include "../../../src/common/device_helpers.cuh"
|
||||
#elif defined(XGBOOST_USE_HIP)
|
||||
#include "../../../src/common/device_helpers.hip.h"
|
||||
#endif
|
||||
|
||||
namespace xgboost {
|
||||
namespace {
|
||||
|
||||
@ -0,0 +1,4 @@
|
||||
|
||||
#if defined(XGBOOST_USE_HIP)
|
||||
#include "test_constraints.cu"
|
||||
#endif
|
||||
@ -37,12 +37,12 @@ TEST(InitEstimation, FitStump) {
|
||||
TestFitStump(&ctx);
|
||||
}
|
||||
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
#if defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
|
||||
TEST(InitEstimation, GPUFitStump) {
|
||||
Context ctx;
|
||||
ctx.UpdateAllowUnknown(Args{{"gpu_id", "0"}});
|
||||
TestFitStump(&ctx);
|
||||
}
|
||||
#endif // defined(XGBOOST_USE_CUDA)
|
||||
#endif // defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
|
||||
} // namespace tree
|
||||
} // namespace xgboost
|
||||
|
||||
@ -25,7 +25,11 @@
|
||||
namespace xgboost::tree {
|
||||
TEST(GpuHist, DeviceHistogram) {
|
||||
// Ensures that node allocates correctly after reaching `kStopGrowingSize`.
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
dh::safe_cuda(cudaSetDevice(0));
|
||||
#elif defined(XGBOOST_USE_HIP)
|
||||
dh::safe_cuda(hipSetDevice(0));
|
||||
#endif
|
||||
constexpr size_t kNBins = 128;
|
||||
constexpr int kNNodes = 4;
|
||||
constexpr size_t kStopGrowing = kNNodes * kNBins * 2u;
|
||||
@ -120,8 +124,14 @@ void TestBuildHist(bool use_shared_memory_histograms) {
|
||||
auto node_histogram = d_hist.GetNodeHistogram(0);
|
||||
// d_hist.data stored in float, not gradient pair
|
||||
thrust::host_vector<GradientPairInt64> h_result (node_histogram.size());
|
||||
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
dh::safe_cuda(cudaMemcpy(h_result.data(), node_histogram.data(), node_histogram.size_bytes(),
|
||||
cudaMemcpyDeviceToHost));
|
||||
#elif defined(XGBOOST_USE_HIP)
|
||||
dh::safe_cuda(hipMemcpy(h_result.data(), node_histogram.data(), node_histogram.size_bytes(),
|
||||
hipMemcpyDeviceToHost));
|
||||
#endif
|
||||
|
||||
std::vector<GradientPairPrecise> solution = GetHostHistGpair();
|
||||
for (size_t i = 0; i < h_result.size(); ++i) {
|
||||
|
||||
@ -0,0 +1,4 @@
|
||||
|
||||
#if defined(XGBOOST_USE_HIP)
|
||||
#include "test_gpu_hist.cu"
|
||||
#endif
|
||||
@ -18,10 +18,10 @@ TEST(Updater, HasNodePosition) {
|
||||
up.reset(TreeUpdater::Create("grow_quantile_histmaker", &ctx, &task));
|
||||
ASSERT_TRUE(up->HasNodePosition());
|
||||
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
#if defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
|
||||
ctx.gpu_id = 0;
|
||||
up.reset(TreeUpdater::Create("grow_gpu_hist", &ctx, &task));
|
||||
ASSERT_TRUE(up->HasNodePosition());
|
||||
#endif // defined(XGBOOST_USE_CUDA)
|
||||
#endif // defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
|
||||
}
|
||||
} // namespace xgboost
|
||||
|
||||
@ -106,7 +106,7 @@ TEST_F(TestPredictionCache, Approx) { this->RunTest("grow_histmaker"); }
|
||||
|
||||
TEST_F(TestPredictionCache, Hist) { this->RunTest("grow_quantile_histmaker"); }
|
||||
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
#if defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
|
||||
TEST_F(TestPredictionCache, GpuHist) { this->RunTest("grow_gpu_hist"); }
|
||||
#endif // defined(XGBOOST_USE_CUDA)
|
||||
#endif // defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
|
||||
} // namespace xgboost
|
||||
|
||||
@ -111,7 +111,7 @@ TEST_F(RegenTest, Mixed) {
|
||||
ASSERT_EQ(n, this->Iter() + 1);
|
||||
}
|
||||
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
#if defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
|
||||
TEST_F(RegenTest, GpuHist) {
|
||||
auto n = this->TestTreeMethod<EllpackPage>("gpu_hist", "reg:squarederror");
|
||||
ASSERT_EQ(n, 1);
|
||||
@ -121,5 +121,5 @@ TEST_F(RegenTest, GpuHist) {
|
||||
n = this->TestTreeMethod<EllpackPage>("hist", "reg:logistic");
|
||||
ASSERT_EQ(n, 2);
|
||||
}
|
||||
#endif // defined(XGBOOST_USE_CUDA)
|
||||
#endif // defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
|
||||
} // namespace xgboost
|
||||
|
||||
@ -146,12 +146,12 @@ TEST_F(TestGrowPolicy, Hist) {
|
||||
this->TestCombination("hist");
|
||||
}
|
||||
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
#if defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
|
||||
TEST_F(TestGrowPolicy, GpuHist) {
|
||||
this->TestTreeGrowPolicy("gpu_hist", "depthwise");
|
||||
this->TestTreeGrowPolicy("gpu_hist", "lossguide");
|
||||
|
||||
this->TestCombination("gpu_hist");
|
||||
}
|
||||
#endif // defined(XGBOOST_USE_CUDA)
|
||||
#endif // defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
|
||||
} // namespace xgboost
|
||||
|
||||
@ -52,9 +52,9 @@ class UpdaterTreeStatTest : public ::testing::Test {
|
||||
}
|
||||
};
|
||||
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
#if defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
|
||||
TEST_F(UpdaterTreeStatTest, GpuHist) { this->RunTest("grow_gpu_hist"); }
|
||||
#endif // defined(XGBOOST_USE_CUDA)
|
||||
#endif // defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
|
||||
|
||||
TEST_F(UpdaterTreeStatTest, Hist) { this->RunTest("grow_quantile_histmaker"); }
|
||||
|
||||
@ -124,9 +124,9 @@ TEST_F(UpdaterEtaTest, Exact) { this->RunTest("grow_colmaker"); }
|
||||
|
||||
TEST_F(UpdaterEtaTest, Approx) { this->RunTest("grow_histmaker"); }
|
||||
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
#if defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
|
||||
TEST_F(UpdaterEtaTest, GpuHist) { this->RunTest("grow_gpu_hist"); }
|
||||
#endif // defined(XGBOOST_USE_CUDA)
|
||||
#endif // defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
|
||||
|
||||
class TestMinSplitLoss : public ::testing::Test {
|
||||
std::shared_ptr<DMatrix> dmat_;
|
||||
@ -194,7 +194,7 @@ class TestMinSplitLoss : public ::testing::Test {
|
||||
TEST_F(TestMinSplitLoss, Approx) { this->RunTest("grow_histmaker"); }
|
||||
|
||||
TEST_F(TestMinSplitLoss, Hist) { this->RunTest("grow_quantile_histmaker"); }
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
#if defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
|
||||
TEST_F(TestMinSplitLoss, GpuHist) { this->RunTest("grow_gpu_hist"); }
|
||||
#endif // defined(XGBOOST_USE_CUDA)
|
||||
#endif // defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
|
||||
} // namespace xgboost
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user