Remove MGPU cpp tests. (#8276)

Co-authored-by: Hyunsu Philip Cho <chohyu01@cs.washington.edu>
This commit is contained in:
Jiaming Yuan
2022-09-27 21:18:23 +08:00
committed by GitHub
parent fcab51aa82
commit 6d1452074a
10 changed files with 52 additions and 104 deletions

View File

@@ -11,13 +11,14 @@
namespace xgboost {
namespace common {
void SetDevice(int device) {
namespace {
void SetDeviceForTest(int device) {
int n_devices;
dh::safe_cuda(cudaGetDeviceCount(&n_devices));
device %= n_devices;
dh::safe_cuda(cudaSetDevice(device));
}
} // namespace
struct HostDeviceVectorSetDeviceHandler {
template <typename Functor>
@@ -57,7 +58,7 @@ void InitHostDeviceVector(size_t n, int device, HostDeviceVector<int> *v) {
void PlusOne(HostDeviceVector<int> *v) {
int device = v->DeviceIdx();
SetDevice(device);
SetDeviceForTest(device);
thrust::transform(dh::tcbegin(*v), dh::tcend(*v), dh::tbegin(*v),
[=]__device__(unsigned int a){ return a + 1; });
ASSERT_TRUE(v->DeviceCanWrite());
@@ -68,7 +69,7 @@ void CheckDevice(HostDeviceVector<int>* v,
unsigned int first,
GPUAccess access) {
ASSERT_EQ(v->Size(), size);
SetDevice(v->DeviceIdx());
SetDeviceForTest(v->DeviceIdx());
ASSERT_TRUE(thrust::equal(dh::tcbegin(*v), dh::tcend(*v),
thrust::make_counting_iterator(first)));
@@ -182,16 +183,5 @@ TEST(HostDeviceVector, Empty) {
ASSERT_FALSE(another.Empty());
ASSERT_TRUE(vec.Empty());
}
TEST(HostDeviceVector, MGPU_Basic) { // NOLINT
if (AllVisibleGPUs() < 2) {
LOG(WARNING) << "Not testing in multi-gpu environment.";
return;
}
size_t n = 1001;
int device = 1;
TestHostDeviceVector(n, device);
}
} // namespace common
} // namespace xgboost

View File

@@ -1,35 +0,0 @@
/*!
* Copyright 2018-2022 by XGBoost Contributors
* \brief This converts all tests from CPU to GPU.
*/
#include "test_transform_range.cc"
namespace xgboost {
namespace common {
TEST(Transform, MGPU_SpecifiedGpuId) { // NOLINT
if (AllVisibleGPUs() < 2) {
LOG(WARNING) << "Not testing in multi-gpu environment.";
return;
}
// Use 1 GPU, Numbering of GPU starts from 1
auto device = 1;
auto const size {256};
std::vector<bst_float> h_in(size);
std::vector<bst_float> h_out(size);
std::iota(h_in.begin(), h_in.end(), 0);
std::vector<bst_float> h_sol(size);
std::iota(h_sol.begin(), h_sol.end(), 0);
const HostDeviceVector<bst_float> in_vec {h_in, device};
HostDeviceVector<bst_float> out_vec {h_out, device};
ASSERT_NO_THROW(Transform<>::Init(TestTransformRange<bst_float>{}, Range{0, size},
common::OmpGetNumThreads(0), device)
.Eval(&out_vec, &in_vec));
std::vector<bst_float> res = out_vec.HostVector();
ASSERT_TRUE(std::equal(h_sol.begin(), h_sol.end(), res.begin()));
}
} // namespace common
} // namespace xgboost

View File

@@ -84,29 +84,3 @@ TEST(Metric, DeclareUnifiedTest(MultiClassLogLoss)) {
TestMultiClassLogLoss(GPUIDX);
xgboost::CheckDeterministicMetricMultiClass(xgboost::StringView{"mlogloss"}, GPUIDX);
}
#if defined(__CUDACC__)
namespace xgboost {
namespace common {
TEST(Metric, MGPU_MultiClassError) {
if (AllVisibleGPUs() < 2) {
LOG(WARNING) << "Not testing in multi-gpu environment.";
return;
}
{
TestMultiClassError(0);
}
{
TestMultiClassError(1);
}
{
TestMultiClassLogLoss(0);
}
{
TestMultiClassLogLoss(1);
}
}
} // namespace common
} // namespace xgboost
#endif // defined(__CUDACC__)

View File

@@ -172,7 +172,7 @@ TEST(CpuPredictor, InplacePredict) {
std::string arr_str;
Json::Dump(array_interface, &arr_str);
x->SetArrayData(arr_str.data());
TestInplacePrediction(x, "cpu_predictor", kRows, kCols, -1);
TestInplacePrediction(x, "cpu_predictor", kRows, kCols, Context::kCpuId);
}
{
@@ -189,7 +189,7 @@ TEST(CpuPredictor, InplacePredict) {
Json::Dump(col_interface, &col_str);
std::shared_ptr<data::DMatrixProxy> x{new data::DMatrixProxy};
x->SetCSRData(rptr_str.data(), col_str.data(), data_str.data(), kCols, true);
TestInplacePrediction(x, "cpu_predictor", kRows, kCols, -1);
TestInplacePrediction(x, "cpu_predictor", kRows, kCols, Context::kCpuId);
}
}

View File

@@ -140,26 +140,10 @@ TEST(GPUPredictor, InplacePredictCuDF) {
TestInplacePrediction(p_fmat, "gpu_predictor", kRows, kCols, 0);
}
TEST(GPUPredictor, MGPU_InplacePredict) { // NOLINT
int32_t n_gpus = xgboost::common::AllVisibleGPUs();
if (n_gpus <= 1) {
LOG(WARNING) << "GPUPredictor.MGPU_InplacePredict is skipped.";
return;
}
size_t constexpr kRows{128}, kCols{64};
RandomDataGenerator gen(kRows, kCols, 0.5);
gen.Device(1);
HostDeviceVector<float> data;
std::string interface_str = gen.GenerateArrayInterface(&data);
std::shared_ptr<DMatrix> p_fmat{new data::DMatrixProxy};
dynamic_cast<data::DMatrixProxy*>(p_fmat.get())->SetCUDAArray(interface_str.c_str());
TestInplacePrediction(p_fmat, "gpu_predictor", kRows, kCols, 1);
EXPECT_THROW(TestInplacePrediction(p_fmat, "gpu_predictor", kRows, kCols, 0), dmlc::Error);
}
TEST(GpuPredictor, LesserFeatures) {
TestPredictionWithLesserFeatures("gpu_predictor");
}
// Very basic test of empty model
TEST(GPUPredictor, ShapStump) {
cudaSetDevice(0);