Remove internal use of gpu_id. (#9568)

This commit is contained in:
Jiaming Yuan
2023-09-20 23:29:51 +08:00
committed by GitHub
parent 38ac52dd87
commit 8c676c889d
121 changed files with 1012 additions and 1044 deletions

View File

@@ -65,7 +65,7 @@ TEST(GBTree, PredictionCache) {
gbtree.Configure({{"tree_method", "hist"}});
auto p_m = RandomDataGenerator{kRows, kCols, 0}.GenerateDMatrix();
linalg::Matrix<GradientPair> gpair({kRows}, ctx.Ordinal());
linalg::Matrix<GradientPair> gpair({kRows}, ctx.Device());
gpair.Data()->Copy(GenerateRandomGradients(kRows));
PredictionCacheEntry out_predictions;
@@ -156,7 +156,7 @@ TEST(GBTree, ChoosePredictor) {
// pull data into device.
data.HostVector();
data.SetDevice(0);
data.SetDevice(DeviceOrd::CUDA(0));
data.DeviceSpan();
ASSERT_FALSE(data.HostCanWrite());
@@ -215,7 +215,7 @@ TEST(GBTree, ChooseTreeMethod) {
}
learner->Configure();
for (std::int32_t i = 0; i < 3; ++i) {
linalg::Matrix<GradientPair> gpair{{Xy->Info().num_row_}, Context::kCpuId};
linalg::Matrix<GradientPair> gpair{{Xy->Info().num_row_}, DeviceOrd::CPU()};
gpair.Data()->Copy(GenerateRandomGradients(Xy->Info().num_row_));
learner->BoostOneIter(0, Xy, &gpair);
}
@@ -400,7 +400,7 @@ class Dart : public testing::TestWithParam<char const*> {
if (device == "GPU") {
ctx = MakeCUDACtx(0);
}
auto rng = RandomDataGenerator(kRows, kCols, 0).Device(ctx.gpu_id);
auto rng = RandomDataGenerator(kRows, kCols, 0).Device(ctx.Device());
auto array_str = rng.GenerateArrayInterface(&data);
auto p_mat = GetDMatrixFromData(data.HostVector(), kRows, kCols);
@@ -710,7 +710,7 @@ TEST(GBTree, InplacePredictionError) {
auto test_qdm_err = [&](std::string booster, Context const* ctx) {
std::shared_ptr<DMatrix> p_fmat;
bst_bin_t max_bins = 16;
auto rng = RandomDataGenerator{n_samples, n_features, 0.5f}.Device(ctx->gpu_id).Bins(max_bins);
auto rng = RandomDataGenerator{n_samples, n_features, 0.5f}.Device(ctx->Device()).Bins(max_bins);
if (ctx->IsCPU()) {
p_fmat = rng.GenerateQuantileDMatrix(true);
} else {

View File

@@ -22,7 +22,7 @@ void TestInplaceFallback(Context const* ctx) {
bst_feature_t n_features{32};
HostDeviceVector<float> X_storage;
// use a different device than the learner
std::int32_t data_ordinal = ctx->IsCPU() ? 0 : -1;
auto data_ordinal = ctx->IsCPU() ? DeviceOrd::CUDA(0) : DeviceOrd::CPU();
auto X = RandomDataGenerator{n_samples, n_features, 0.0}
.Device(data_ordinal)
.GenerateArrayInterface(&X_storage);
@@ -30,7 +30,7 @@ void TestInplaceFallback(Context const* ctx) {
auto y = RandomDataGenerator{n_samples, 1u, 0.0}.GenerateArrayInterface(&y_storage);
std::shared_ptr<DMatrix> Xy;
if (data_ordinal == Context::kCpuId) {
if (data_ordinal.IsCPU()) {
auto X_adapter = data::ArrayAdapter{StringView{X}};
Xy.reset(DMatrix::Create(&X_adapter, std::numeric_limits<float>::quiet_NaN(), ctx->Threads()));
} else {
@@ -49,7 +49,7 @@ void TestInplaceFallback(Context const* ctx) {
std::shared_ptr<DMatrix> p_m{new data::DMatrixProxy};
auto proxy = std::dynamic_pointer_cast<data::DMatrixProxy>(p_m);
if (data_ordinal == Context::kCpuId) {
if (data_ordinal.IsCPU()) {
proxy->SetArrayData(StringView{X});
} else {
proxy->SetCUDAArray(X.c_str());
@@ -64,7 +64,7 @@ void TestInplaceFallback(Context const* ctx) {
// test when the contexts match
Context new_ctx = *proxy->Ctx();
ASSERT_NE(new_ctx.gpu_id, ctx->gpu_id);
ASSERT_NE(new_ctx.Ordinal(), ctx->Ordinal());
learner->SetParam("device", new_ctx.DeviceName());
HostDeviceVector<float>* out_predt_1{nullptr};