Merge branch 'master' into sync-condition-2023Oct11

This commit is contained in:
Hui Liu
2023-10-27 10:09:37 -07:00
6 changed files with 283 additions and 52 deletions

View File

@@ -45,4 +45,97 @@ TEST(Context, ErrorInit) {
ASSERT_NE(msg.find("foo"), std::string::npos);
}
}
TEST(Context, SYCL) {
Context ctx;
// Default SYCL device
{
ctx.UpdateAllowUnknown(Args{{"device", "sycl"}});
ASSERT_EQ(ctx.Device(), DeviceOrd::SyclDefault());
ASSERT_EQ(ctx.Ordinal(), -1);
std::int32_t flag{0};
ctx.DispatchDevice([&] { flag = -1; }, [&] { flag = 1; }, [&] { flag = 2; });
ASSERT_EQ(flag, 2);
std::stringstream ss;
ss << ctx.Device();
ASSERT_EQ(ss.str(), "sycl:-1");
}
// SYCL device with idx
{
ctx.UpdateAllowUnknown(Args{{"device", "sycl:42"}});
ASSERT_EQ(ctx.Device(), DeviceOrd::SyclDefault(42));
ASSERT_EQ(ctx.Ordinal(), 42);
std::int32_t flag{0};
ctx.DispatchDevice([&] { flag = -1; }, [&] { flag = 1; }, [&] { flag = 2; });
ASSERT_EQ(flag, 2);
std::stringstream ss;
ss << ctx.Device();
ASSERT_EQ(ss.str(), "sycl:42");
}
// SYCL cpu
{
ctx.UpdateAllowUnknown(Args{{"device", "sycl:cpu"}});
ASSERT_EQ(ctx.Device(), DeviceOrd::SyclCPU());
ASSERT_EQ(ctx.Ordinal(), -1);
std::int32_t flag{0};
ctx.DispatchDevice([&] { flag = -1; }, [&] { flag = 1; }, [&] { flag = 2; });
ASSERT_EQ(flag, 2);
std::stringstream ss;
ss << ctx.Device();
ASSERT_EQ(ss.str(), "sycl:cpu:-1");
}
// SYCL cpu with idx
{
ctx.UpdateAllowUnknown(Args{{"device", "sycl:cpu:42"}});
ASSERT_EQ(ctx.Device(), DeviceOrd::SyclCPU(42));
ASSERT_EQ(ctx.Ordinal(), 42);
std::int32_t flag{0};
ctx.DispatchDevice([&] { flag = -1; }, [&] { flag = 1; }, [&] { flag = 2; });
ASSERT_EQ(flag, 2);
std::stringstream ss;
ss << ctx.Device();
ASSERT_EQ(ss.str(), "sycl:cpu:42");
}
// SYCL gpu
{
ctx.UpdateAllowUnknown(Args{{"device", "sycl:gpu"}});
ASSERT_EQ(ctx.Device(), DeviceOrd::SyclGPU());
ASSERT_EQ(ctx.Ordinal(), -1);
std::int32_t flag{0};
ctx.DispatchDevice([&] { flag = -1; }, [&] { flag = 1; }, [&] { flag = 2; });
ASSERT_EQ(flag, 2);
std::stringstream ss;
ss << ctx.Device();
ASSERT_EQ(ss.str(), "sycl:gpu:-1");
}
// SYCL gpu with idx
{
ctx.UpdateAllowUnknown(Args{{"device", "sycl:gpu:42"}});
ASSERT_EQ(ctx.Device(), DeviceOrd::SyclGPU(42));
ASSERT_EQ(ctx.Ordinal(), 42);
std::int32_t flag{0};
ctx.DispatchDevice([&] { flag = -1; }, [&] { flag = 1; }, [&] { flag = 2; });
ASSERT_EQ(flag, 2);
std::stringstream ss;
ss << ctx.Device();
ASSERT_EQ(ss.str(), "sycl:gpu:42");
}
}
} // namespace xgboost

View File

@@ -251,10 +251,10 @@ def test_gpu_transform(spark_diabetes_dataset) -> None:
model: SparkXGBRegressorModel = regressor.fit(train_df)
# The model trained with GPUs, and transform with GPU configurations.
assert model._gpu_transform()
assert model._run_on_gpu()
model.set_device("cpu")
assert not model._gpu_transform()
assert not model._run_on_gpu()
# without error
cpu_rows = model.transform(test_df).select("prediction").collect()
@@ -263,11 +263,11 @@ def test_gpu_transform(spark_diabetes_dataset) -> None:
# The model trained with CPUs. Even with GPU configurations,
# still prefer transforming with CPUs
assert not model._gpu_transform()
assert not model._run_on_gpu()
# Set gpu transform explicitly.
model.set_device("cuda")
assert model._gpu_transform()
assert model._run_on_gpu()
# without error
gpu_rows = model.transform(test_df).select("prediction").collect()

View File

@@ -888,6 +888,22 @@ class TestPySparkLocal:
clf = SparkXGBClassifier(device="cuda")
clf._validate_params()
def test_gpu_params(self) -> None:
clf = SparkXGBClassifier()
assert not clf._run_on_gpu()
clf = SparkXGBClassifier(device="cuda", tree_method="hist")
assert clf._run_on_gpu()
clf = SparkXGBClassifier(device="cuda")
assert clf._run_on_gpu()
clf = SparkXGBClassifier(tree_method="gpu_hist")
assert clf._run_on_gpu()
clf = SparkXGBClassifier(use_gpu=True)
assert clf._run_on_gpu()
def test_gpu_transform(self, clf_data: ClfData) -> None:
"""local mode"""
classifier = SparkXGBClassifier(device="cpu")
@@ -898,23 +914,23 @@ class TestPySparkLocal:
model.write().overwrite().save(path)
# The model trained with CPU, transform defaults to cpu
assert not model._gpu_transform()
assert not model._run_on_gpu()
# without error
model.transform(clf_data.cls_df_test).collect()
model.set_device("cuda")
assert model._gpu_transform()
assert model._run_on_gpu()
model_loaded = SparkXGBClassifierModel.load(path)
# The model trained with CPU, transform defaults to cpu
assert not model_loaded._gpu_transform()
assert not model_loaded._run_on_gpu()
# without error
model_loaded.transform(clf_data.cls_df_test).collect()
model_loaded.set_device("cuda")
assert model_loaded._gpu_transform()
assert model_loaded._run_on_gpu()
class XgboostLocalTest(SparkTestCase):