Unify test helpers for creating ctx. (#9274)

This commit is contained in:
Jiaming Yuan 2023-06-10 03:35:22 +08:00 committed by GitHub
parent ea0deeca68
commit 152e2fb072
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
36 changed files with 161 additions and 169 deletions

View File

@ -11,7 +11,7 @@
#include "../../../src/common/algorithm.cuh"
#include "../../../src/common/device_helpers.cuh"
#include "../helpers.h" // CreateEmptyGenericParam
#include "../helpers.h" // MakeCUDACtx
namespace xgboost {
namespace common {
@ -83,7 +83,7 @@ TEST(Algorithm, GpuArgSort) {
TEST(Algorithm, SegmentedSequence) {
dh::device_vector<std::size_t> idx(16);
dh::device_vector<std::size_t> ptr(3);
Context ctx = CreateEmptyGenericParam(0);
Context ctx = MakeCUDACtx(0);
ptr[0] = 0;
ptr[1] = 4;
ptr[2] = idx.size();

View File

@ -14,7 +14,7 @@ TEST(DenseColumn, Test) {
int32_t max_num_bins[] = {static_cast<int32_t>(std::numeric_limits<uint8_t>::max()) + 1,
static_cast<int32_t>(std::numeric_limits<uint16_t>::max()) + 1,
static_cast<int32_t>(std::numeric_limits<uint16_t>::max()) + 2};
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
BinTypeSize last{kUint8BinsTypeSize};
for (int32_t max_num_bin : max_num_bins) {
auto dmat = RandomDataGenerator(100, 10, 0.0).GenerateDMatrix();
@ -63,7 +63,7 @@ TEST(SparseColumn, Test) {
int32_t max_num_bins[] = {static_cast<int32_t>(std::numeric_limits<uint8_t>::max()) + 1,
static_cast<int32_t>(std::numeric_limits<uint16_t>::max()) + 1,
static_cast<int32_t>(std::numeric_limits<uint16_t>::max()) + 2};
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
for (int32_t max_num_bin : max_num_bins) {
auto dmat = RandomDataGenerator(100, 1, 0.85).GenerateDMatrix();
GHistIndexMatrix gmat{&ctx, dmat.get(), max_num_bin, 0.5f, false};
@ -92,7 +92,7 @@ TEST(DenseColumnWithMissing, Test) {
int32_t max_num_bins[] = {static_cast<int32_t>(std::numeric_limits<uint8_t>::max()) + 1,
static_cast<int32_t>(std::numeric_limits<uint16_t>::max()) + 1,
static_cast<int32_t>(std::numeric_limits<uint16_t>::max()) + 2};
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
for (int32_t max_num_bin : max_num_bins) {
auto dmat = RandomDataGenerator(100, 1, 0.5).GenerateDMatrix();
GHistIndexMatrix gmat(&ctx, dmat.get(), max_num_bin, 0.2, false);

View File

@ -156,7 +156,7 @@ TEST(CutsBuilder, SearchGroupInd) {
}
TEST(HistUtil, DenseCutsCategorical) {
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
int categorical_sizes[] = {2, 6, 8, 12};
int num_bins = 256;
int sizes[] = {25, 100, 1000};
@ -177,7 +177,7 @@ TEST(HistUtil, DenseCutsCategorical) {
}
TEST(HistUtil, DenseCutsAccuracyTest) {
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100};
int num_columns = 5;
@ -195,7 +195,7 @@ TEST(HistUtil, DenseCutsAccuracyTestWeights) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
@ -218,7 +218,7 @@ void TestQuantileWithHessian(bool use_sorted) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {1000, 1500};
int num_columns = 5;
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
@ -257,7 +257,7 @@ TEST(HistUtil, DenseCutsExternalMemory) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
dmlc::TemporaryDirectory tmpdir;
@ -278,7 +278,7 @@ TEST(HistUtil, IndexBinBound) {
kUint32BinsTypeSize};
size_t constexpr kRows = 100;
size_t constexpr kCols = 10;
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
size_t bin_id = 0;
for (auto max_bin : bin_sizes) {
auto p_fmat = RandomDataGenerator(kRows, kCols, 0).GenerateDMatrix();
@ -303,7 +303,7 @@ TEST(HistUtil, IndexBinData) {
static_cast<uint64_t>(std::numeric_limits<uint16_t>::max()) + 2 };
size_t constexpr kRows = 100;
size_t constexpr kCols = 10;
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
for (auto max_bin : kBinSizes) {
auto p_fmat = RandomDataGenerator(kRows, kCols, 0).GenerateDMatrix();
@ -331,7 +331,7 @@ void TestSketchFromWeights(bool with_group) {
size_t constexpr kRows = 300, kCols = 20, kBins = 256;
size_t constexpr kGroups = 10;
auto m = RandomDataGenerator{kRows, kCols, 0}.Device(0).GenerateDMatrix();
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
common::HistogramCuts cuts = SketchOnDMatrix(&ctx, m.get(), kBins);
MetaInfo info;
@ -397,7 +397,7 @@ TEST(HistUtil, SketchFromWeights) {
}
TEST(HistUtil, SketchCategoricalFeatures) {
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
TestCategoricalSketch(1000, 256, 32, false, [&ctx](DMatrix* p_fmat, int32_t num_bins) {
return SketchOnDMatrix(&ctx, p_fmat, num_bins);
});

View File

@ -310,7 +310,7 @@ TEST(HistUtil, AdapterDeviceSketch) {
data::CupyAdapter adapter(str);
auto device_cuts = MakeUnweightedCutsForTest(adapter, num_bins, missing);
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
auto host_cuts = GetHostCuts(&ctx, &adapter, num_bins, missing);
EXPECT_EQ(device_cuts.Values(), host_cuts.Values());

View File

@ -302,7 +302,7 @@ namespace {
void TestSameOnAllWorkers() {
auto const world = collective::GetWorldSize();
constexpr size_t kRows = 1000, kCols = 100;
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
RunWithSeedsAndBins(
kRows, [=, &ctx](int32_t seed, size_t n_bins, MetaInfo const&) {

View File

@ -21,7 +21,7 @@
#include "../../../src/data/adapter.h" // for SparsePageAdapterBatch
#include "../../../src/data/gradient_index.h" // for GHistIndexMatrix
#include "../../../src/tree/param.h" // for TrainParam
#include "../helpers.h" // for CreateEmptyGenericParam, GenerateRandomCa...
#include "../helpers.h" // for GenerateRandomCategoricalSingleColumn...
#include "xgboost/base.h" // for bst_bin_t
#include "xgboost/context.h" // for Context
#include "xgboost/host_device_vector.h" // for HostDeviceVector
@ -29,7 +29,7 @@
namespace xgboost {
namespace data {
TEST(GradientIndex, ExternalMemory) {
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
std::unique_ptr<DMatrix> dmat = CreateSparsePageDMatrix(10000);
std::vector<size_t> base_rowids;
std::vector<float> hessian(dmat->Info().num_row_, 1);
@ -58,7 +58,7 @@ TEST(GradientIndex, FromCategoricalBasic) {
size_t max_bins = 8;
auto x = GenerateRandomCategoricalSingleColumn(kRows, kCats);
auto m = GetDMatrixFromData(x, kRows, 1);
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
auto &h_ft = m->Info().feature_types.HostVector();
h_ft.resize(kCols, FeatureType::kCategorical);

View File

@ -67,7 +67,7 @@ void TestSparseDMatrixLoadFile(Context const* ctx) {
}
TEST(SparsePageDMatrix, LoadFile) {
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
TestSparseDMatrixLoadFile<SparsePage>(&ctx);
TestSparseDMatrixLoadFile<CSCPage>(&ctx);
TestSparseDMatrixLoadFile<SortedCSCPage>(&ctx);
@ -77,7 +77,7 @@ TEST(SparsePageDMatrix, LoadFile) {
template <typename Page>
void TestRetainPage() {
auto m = CreateSparsePageDMatrix(10000);
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
auto batches = m->GetBatches<Page>(&ctx);
auto begin = batches.begin();
auto end = batches.end();
@ -145,7 +145,7 @@ TEST(SparsePageDMatrix, ColAccess) {
const std::string tmp_file = tempdir.path + "/simple.libsvm";
CreateSimpleTestData(tmp_file);
xgboost::DMatrix *dmat = xgboost::DMatrix::Load(UriSVM(tmp_file, tmp_file));
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
// Loop over the batches and assert the data is as expected
size_t iter = 0;
@ -224,7 +224,7 @@ TEST(SparsePageDMatrix, ColAccessBatches) {
// Create multiple sparse pages
std::unique_ptr<xgboost::DMatrix> dmat{xgboost::CreateSparsePageDMatrix(kEntries)};
ASSERT_EQ(dmat->Ctx()->Threads(), AllThreadsForTest());
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
for (auto const &page : dmat->GetBatches<xgboost::CSCPage>(&ctx)) {
ASSERT_EQ(dmat->Info().num_col_, page.Size());
}

View File

@ -108,7 +108,7 @@ TEST(SparsePageDMatrix, RetainEllpackPage) {
}
TEST(SparsePageDMatrix, EllpackPageContent) {
auto ctx = CreateEmptyGenericParam(0);
auto ctx = MakeCUDACtx(0);
constexpr size_t kRows = 6;
constexpr size_t kCols = 2;
constexpr size_t kPageSize = 1;

View File

@ -382,13 +382,6 @@ std::unique_ptr<GradientBooster> CreateTrainedGBM(std::string name, Args kwargs,
LearnerModelParam const* learner_model_param,
Context const* generic_param);
inline Context CreateEmptyGenericParam(int gpu_id) {
xgboost::Context tparam;
std::vector<std::pair<std::string, std::string>> args{{"gpu_id", std::to_string(gpu_id)}};
tparam.Init(args);
return tparam;
}
inline std::unique_ptr<HostDeviceVector<GradientPair>> GenerateGradients(
std::size_t rows, bst_target_t n_targets = 1) {
auto p_gradients = std::make_unique<HostDeviceVector<GradientPair>>(rows * n_targets);
@ -407,9 +400,14 @@ inline std::unique_ptr<HostDeviceVector<GradientPair>> GenerateGradients(
}
/**
* \brief Make a context that uses CUDA.
* \brief Make a context that uses CUDA if device >= 0.
*/
inline Context MakeCUDACtx(std::int32_t device) { return Context{}.MakeCUDA(device); }
inline Context MakeCUDACtx(std::int32_t device) {
if (device == Context::kCpuId) {
return Context{};
}
return Context{}.MakeCUDA(device);
}
inline HostDeviceVector<GradientPair> GenerateRandomGradients(const size_t n_rows,
float lower= 0.0f, float upper = 1.0f) {

View File

@ -12,19 +12,19 @@
namespace xgboost {
inline void TestUpdaterJsonIO(std::string updater_str) {
auto runtime = xgboost::CreateEmptyGenericParam(GPUIDX);
Context ctx{MakeCUDACtx(GPUIDX)};
Json config_0 {Object() };
{
auto updater = std::unique_ptr<xgboost::LinearUpdater>(
xgboost::LinearUpdater::Create(updater_str, &runtime));
auto updater =
std::unique_ptr<xgboost::LinearUpdater>(xgboost::LinearUpdater::Create(updater_str, &ctx));
updater->Configure({{"eta", std::to_string(3.14)}});
updater->SaveConfig(&config_0);
}
{
auto updater = std::unique_ptr<xgboost::LinearUpdater>(
xgboost::LinearUpdater::Create(updater_str, &runtime));
auto updater =
std::unique_ptr<xgboost::LinearUpdater>(xgboost::LinearUpdater::Create(updater_str, &ctx));
updater->LoadConfig(config_0);
Json config_1 { Object() };
updater->SaveConfig(&config_1);

View File

@ -17,7 +17,7 @@ TEST(Linear, Shotgun) {
auto p_fmat = xgboost::RandomDataGenerator(kRows, kCols, 0).GenerateDMatrix();
auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
LearnerModelParam mparam{MakeMP(kCols, .5, 1)};
{
@ -49,7 +49,7 @@ TEST(Linear, coordinate) {
auto p_fmat = xgboost::RandomDataGenerator(kRows, kCols, 0).GenerateDMatrix();
auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
LearnerModelParam mparam{MakeMP(kCols, .5, 1)};
auto updater = std::unique_ptr<xgboost::LinearUpdater>(

View File

@ -13,7 +13,7 @@ TEST(Linear, GPUCoordinate) {
size_t constexpr kCols = 10;
auto mat = xgboost::RandomDataGenerator(kRows, kCols, 0).GenerateDMatrix();
auto ctx = CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(0);
LearnerModelParam mparam{MakeMP(kCols, .5, 1)};
auto updater = std::unique_ptr<xgboost::LinearUpdater>(

View File

@ -11,7 +11,7 @@ namespace xgboost {
namespace metric {
inline void VerifyBinaryAUC(DataSplitMode data_split_mode = DataSplitMode::kRow) {
auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
std::unique_ptr<Metric> uni_ptr{Metric::Create("auc", &ctx)};
Metric* metric = uni_ptr.get();
ASSERT_STREQ(metric->Name(), "auc");
@ -54,7 +54,7 @@ inline void VerifyBinaryAUC(DataSplitMode data_split_mode = DataSplitMode::kRow)
}
inline void VerifyMultiClassAUC(DataSplitMode data_split_mode = DataSplitMode::kRow) {
auto ctx = CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
std::unique_ptr<Metric> uni_ptr{Metric::Create("auc", &ctx)};
auto metric = uni_ptr.get();
@ -115,7 +115,7 @@ inline void VerifyMultiClassAUC(DataSplitMode data_split_mode = DataSplitMode::k
}
inline void VerifyRankingAUC(DataSplitMode data_split_mode = DataSplitMode::kRow) {
auto ctx = CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
std::unique_ptr<Metric> metric{Metric::Create("auc", &ctx)};
// single group
@ -149,7 +149,7 @@ inline void VerifyRankingAUC(DataSplitMode data_split_mode = DataSplitMode::kRow
}
inline void VerifyPRAUC(DataSplitMode data_split_mode = DataSplitMode::kRow) {
auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
xgboost::Metric* metric = xgboost::Metric::Create("aucpr", &ctx);
ASSERT_STREQ(metric->Name(), "aucpr");
@ -186,7 +186,7 @@ inline void VerifyPRAUC(DataSplitMode data_split_mode = DataSplitMode::kRow) {
}
inline void VerifyMultiClassPRAUC(DataSplitMode data_split_mode = DataSplitMode::kRow) {
auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
std::unique_ptr<Metric> metric{Metric::Create("aucpr", &ctx)};
@ -210,7 +210,7 @@ inline void VerifyMultiClassPRAUC(DataSplitMode data_split_mode = DataSplitMode:
}
inline void VerifyRankingPRAUC(DataSplitMode data_split_mode = DataSplitMode::kRow) {
auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
std::unique_ptr<Metric> metric{Metric::Create("aucpr", &ctx)};

View File

@ -15,7 +15,7 @@ namespace xgboost {
namespace metric {
inline void CheckDeterministicMetricElementWise(StringView name, int32_t device) {
auto ctx = CreateEmptyGenericParam(device);
auto ctx = MakeCUDACtx(device);
std::unique_ptr<Metric> metric{Metric::Create(name.c_str(), &ctx)};
HostDeviceVector<float> predts;
@ -46,7 +46,7 @@ inline void CheckDeterministicMetricElementWise(StringView name, int32_t device)
}
inline void VerifyRMSE(DataSplitMode data_split_mode = DataSplitMode::kRow) {
auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
xgboost::Metric * metric = xgboost::Metric::Create("rmse", &ctx);
metric->Configure({});
ASSERT_STREQ(metric->Name(), "rmse");
@ -75,7 +75,7 @@ inline void VerifyRMSE(DataSplitMode data_split_mode = DataSplitMode::kRow) {
}
inline void VerifyRMSLE(DataSplitMode data_split_mode = DataSplitMode::kRow) {
auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
xgboost::Metric * metric = xgboost::Metric::Create("rmsle", &ctx);
metric->Configure({});
ASSERT_STREQ(metric->Name(), "rmsle");
@ -104,7 +104,7 @@ inline void VerifyRMSLE(DataSplitMode data_split_mode = DataSplitMode::kRow) {
}
inline void VerifyMAE(DataSplitMode data_split_mode = DataSplitMode::kRow) {
auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
xgboost::Metric * metric = xgboost::Metric::Create("mae", &ctx);
metric->Configure({});
ASSERT_STREQ(metric->Name(), "mae");
@ -133,7 +133,7 @@ inline void VerifyMAE(DataSplitMode data_split_mode = DataSplitMode::kRow) {
}
inline void VerifyMAPE(DataSplitMode data_split_mode = DataSplitMode::kRow) {
auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
xgboost::Metric * metric = xgboost::Metric::Create("mape", &ctx);
metric->Configure({});
ASSERT_STREQ(metric->Name(), "mape");
@ -162,7 +162,7 @@ inline void VerifyMAPE(DataSplitMode data_split_mode = DataSplitMode::kRow) {
}
inline void VerifyMPHE(DataSplitMode data_split_mode = DataSplitMode::kRow) {
auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
std::unique_ptr<xgboost::Metric> metric{xgboost::Metric::Create("mphe", &ctx)};
metric->Configure({});
ASSERT_STREQ(metric->Name(), "mphe");
@ -197,7 +197,7 @@ inline void VerifyMPHE(DataSplitMode data_split_mode = DataSplitMode::kRow) {
}
inline void VerifyLogLoss(DataSplitMode data_split_mode = DataSplitMode::kRow) {
auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
xgboost::Metric * metric = xgboost::Metric::Create("logloss", &ctx);
metric->Configure({});
ASSERT_STREQ(metric->Name(), "logloss");
@ -230,7 +230,7 @@ inline void VerifyLogLoss(DataSplitMode data_split_mode = DataSplitMode::kRow) {
}
inline void VerifyError(DataSplitMode data_split_mode = DataSplitMode::kRow) {
auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
xgboost::Metric * metric = xgboost::Metric::Create("error", &ctx);
metric->Configure({});
ASSERT_STREQ(metric->Name(), "error");
@ -292,7 +292,7 @@ inline void VerifyError(DataSplitMode data_split_mode = DataSplitMode::kRow) {
}
inline void VerifyPoissonNegLogLik(DataSplitMode data_split_mode = DataSplitMode::kRow) {
auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
xgboost::Metric * metric = xgboost::Metric::Create("poisson-nloglik", &ctx);
metric->Configure({});
ASSERT_STREQ(metric->Name(), "poisson-nloglik");
@ -332,7 +332,7 @@ inline void VerifyMultiRMSE(DataSplitMode data_split_mode = DataSplitMode::kRow)
HostDeviceVector<float> predt(n_samples * n_targets, 0);
auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
std::unique_ptr<Metric> metric{Metric::Create("rmse", &ctx)};
metric->Configure({});
@ -347,7 +347,7 @@ inline void VerifyMultiRMSE(DataSplitMode data_split_mode = DataSplitMode::kRow)
}
inline void VerifyQuantile(DataSplitMode data_split_mode = DataSplitMode::kRow) {
auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
std::unique_ptr<Metric> metric{Metric::Create("quantile", &ctx)};
HostDeviceVector<float> predts{0.1f, 0.9f, 0.1f, 0.9f};

View File

@ -2,10 +2,10 @@
#include <xgboost/metric.h>
#include "../helpers.h"
namespace xgboost {
TEST(Metric, UnknownMetric) {
auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX);
xgboost::Metric * metric = nullptr;
auto ctx = MakeCUDACtx(GPUIDX);
xgboost::Metric* metric = nullptr;
EXPECT_ANY_THROW(metric = xgboost::Metric::Create("unknown_name", &ctx));
EXPECT_NO_THROW(metric = xgboost::Metric::Create("rmse", &ctx));
if (metric) {
@ -18,3 +18,4 @@ TEST(Metric, UnknownMetric) {
delete metric;
}
}
} // namespace xgboost

View File

@ -8,7 +8,7 @@ namespace xgboost {
namespace metric {
inline void CheckDeterministicMetricMultiClass(StringView name, int32_t device) {
auto ctx = CreateEmptyGenericParam(device);
auto ctx = MakeCUDACtx(device);
std::unique_ptr<Metric> metric{Metric::Create(name.c_str(), &ctx)};
HostDeviceVector<float> predts;
@ -45,7 +45,7 @@ inline void CheckDeterministicMetricMultiClass(StringView name, int32_t device)
}
inline void TestMultiClassError(int device, DataSplitMode data_split_mode) {
auto ctx = xgboost::CreateEmptyGenericParam(device);
auto ctx = MakeCUDACtx(device);
ctx.gpu_id = device;
xgboost::Metric * metric = xgboost::Metric::Create("merror", &ctx);
metric->Configure({});
@ -66,7 +66,7 @@ inline void VerifyMultiClassError(DataSplitMode data_split_mode = DataSplitMode:
}
inline void TestMultiClassLogLoss(int device, DataSplitMode data_split_mode) {
auto ctx = xgboost::CreateEmptyGenericParam(device);
auto ctx = MakeCUDACtx(device);
ctx.gpu_id = device;
xgboost::Metric * metric = xgboost::Metric::Create("mlogloss", &ctx);
metric->Configure({});

View File

@ -22,7 +22,7 @@ namespace metric {
#if !defined(__CUDACC__)
TEST(Metric, AMS) {
auto ctx = CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
EXPECT_ANY_THROW(Metric::Create("ams", &ctx));
Metric* metric = Metric::Create("ams@0.5f", &ctx);
ASSERT_STREQ(metric->Name(), "ams@0.5");

View File

@ -20,7 +20,7 @@
namespace xgboost::metric {
inline void VerifyPrecision(DataSplitMode data_split_mode = DataSplitMode::kRow) {
auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
std::unique_ptr<xgboost::Metric> metric{Metric::Create("pre", &ctx)};
ASSERT_STREQ(metric->Name(), "pre");
EXPECT_NEAR(GetMetricEval(metric.get(), {0, 1}, {0, 1}, {}, {}, data_split_mode), 0.5, 1e-7);
@ -44,7 +44,7 @@ inline void VerifyPrecision(DataSplitMode data_split_mode = DataSplitMode::kRow)
}
inline void VerifyNDCG(DataSplitMode data_split_mode = DataSplitMode::kRow) {
auto ctx = CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
Metric * metric = xgboost::Metric::Create("ndcg", &ctx);
ASSERT_STREQ(metric->Name(), "ndcg");
EXPECT_ANY_THROW(GetMetricEval(metric, {0, 1}, {}, {}, {}, data_split_mode));
@ -102,7 +102,7 @@ inline void VerifyNDCG(DataSplitMode data_split_mode = DataSplitMode::kRow) {
}
inline void VerifyMAP(DataSplitMode data_split_mode = DataSplitMode::kRow) {
auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
Metric * metric = xgboost::Metric::Create("map", &ctx);
ASSERT_STREQ(metric->Name(), "map");
EXPECT_NEAR(GetMetricEval(metric, {0, 1}, {0, 1}, {}, {}, data_split_mode), 1, kRtEps);
@ -150,7 +150,7 @@ inline void VerifyMAP(DataSplitMode data_split_mode = DataSplitMode::kRow) {
}
inline void VerifyNDCGExpGain(DataSplitMode data_split_mode = DataSplitMode::kRow) {
Context ctx = xgboost::CreateEmptyGenericParam(GPUIDX);
Context ctx = MakeCUDACtx(GPUIDX);
auto p_fmat = xgboost::RandomDataGenerator{0, 0, 0}.GenerateDMatrix();
MetaInfo& info = p_fmat->Info();

View File

@ -31,7 +31,7 @@ TEST_F(DeclareUnifiedDistributedTest(MetricTest), IntervalRegressionAccuracyColu
// Test configuration of AFT metric
TEST(AFTNegLogLikMetric, DeclareUnifiedTest(Configuration)) {
auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
std::unique_ptr<Metric> metric(Metric::Create("aft-nloglik", &ctx));
metric->Configure({{"aft_loss_distribution", "normal"}, {"aft_loss_distribution_scale", "10"}});

View File

@ -13,7 +13,7 @@
namespace xgboost {
namespace common {
inline void CheckDeterministicMetricElementWise(StringView name, int32_t device) {
auto ctx = CreateEmptyGenericParam(device);
auto ctx = MakeCUDACtx(device);
std::unique_ptr<Metric> metric{Metric::Create(name.c_str(), &ctx)};
metric->Configure(Args{});
@ -48,7 +48,7 @@ inline void CheckDeterministicMetricElementWise(StringView name, int32_t device)
}
inline void VerifyAFTNegLogLik(DataSplitMode data_split_mode = DataSplitMode::kRow) {
auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
/**
* Test aggregate output from the AFT metric over a small test data set.
@ -79,7 +79,7 @@ inline void VerifyAFTNegLogLik(DataSplitMode data_split_mode = DataSplitMode::kR
}
inline void VerifyIntervalRegressionAccuracy(DataSplitMode data_split_mode = DataSplitMode::kRow) {
auto ctx = xgboost::CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
auto p_fmat = EmptyDMatrix();
MetaInfo& info = p_fmat->Info();

View File

@ -16,7 +16,7 @@ namespace xgboost {
namespace common {
TEST(Objective, DeclareUnifiedTest(AFTObjConfiguration)) {
auto ctx = CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
std::unique_ptr<ObjFunction> objective(ObjFunction::Create("survival:aft", &ctx));
objective->Configure({ {"aft_loss_distribution", "logistic"},
{"aft_loss_distribution_scale", "5"} });
@ -77,7 +77,7 @@ static inline void CheckGPairOverGridPoints(
}
TEST(Objective, DeclareUnifiedTest(AFTObjGPairUncensoredLabels)) {
auto ctx = CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
std::unique_ptr<ObjFunction> obj(ObjFunction::Create("survival:aft", &ctx));
CheckGPairOverGridPoints(obj.get(), 100.0f, 100.0f, "normal",
@ -101,7 +101,7 @@ TEST(Objective, DeclareUnifiedTest(AFTObjGPairUncensoredLabels)) {
}
TEST(Objective, DeclareUnifiedTest(AFTObjGPairLeftCensoredLabels)) {
auto ctx = CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
std::unique_ptr<ObjFunction> obj(ObjFunction::Create("survival:aft", &ctx));
CheckGPairOverGridPoints(obj.get(), 0.0f, 20.0f, "normal",
@ -122,7 +122,7 @@ TEST(Objective, DeclareUnifiedTest(AFTObjGPairLeftCensoredLabels)) {
}
TEST(Objective, DeclareUnifiedTest(AFTObjGPairRightCensoredLabels)) {
auto ctx = CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
std::unique_ptr<ObjFunction> obj(ObjFunction::Create("survival:aft", &ctx));
CheckGPairOverGridPoints(obj.get(), 60.0f, std::numeric_limits<float>::infinity(), "normal",
@ -146,7 +146,7 @@ TEST(Objective, DeclareUnifiedTest(AFTObjGPairRightCensoredLabels)) {
}
TEST(Objective, DeclareUnifiedTest(AFTObjGPairIntervalCensoredLabels)) {
auto ctx = CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
std::unique_ptr<ObjFunction> obj(ObjFunction::Create("survival:aft", &ctx));
CheckGPairOverGridPoints(obj.get(), 16.0f, 200.0f, "normal",

View File

@ -4,14 +4,12 @@
#include <limits>
#include "../helpers.h"
namespace xgboost {
TEST(Objective, DeclareUnifiedTest(HingeObj)) {
xgboost::Context ctx = xgboost::CreateEmptyGenericParam(GPUIDX);
std::unique_ptr<xgboost::ObjFunction> obj {
xgboost::ObjFunction::Create("binary:hinge", &ctx)
};
Context ctx = MakeCUDACtx(GPUIDX);
std::unique_ptr<ObjFunction> obj{ObjFunction::Create("binary:hinge", &ctx)};
xgboost::bst_float eps = std::numeric_limits<xgboost::bst_float>::min();
float eps = std::numeric_limits<xgboost::bst_float>::min();
CheckObjFunction(obj,
{-1.0f, -0.5f, 0.5f, 1.0f, -1.0f, -0.5f, 0.5f, 1.0f},
{ 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f},
@ -27,3 +25,4 @@ TEST(Objective, DeclareUnifiedTest(HingeObj)) {
ASSERT_NO_THROW(obj->DefaultEvalMetric());
}
} // namespace xgboost

View File

@ -9,7 +9,7 @@
namespace xgboost {
TEST(Objective, DeclareUnifiedTest(SoftmaxMultiClassObjGPair)) {
Context ctx = CreateEmptyGenericParam(GPUIDX);
Context ctx = MakeCUDACtx(GPUIDX);
std::vector<std::pair<std::string, std::string>> args {{"num_class", "3"}};
std::unique_ptr<ObjFunction> obj {
ObjFunction::Create("multi:softmax", &ctx)
@ -36,7 +36,7 @@ TEST(Objective, DeclareUnifiedTest(SoftmaxMultiClassObjGPair)) {
}
TEST(Objective, DeclareUnifiedTest(SoftmaxMultiClassBasic)) {
auto ctx = CreateEmptyGenericParam(GPUIDX);
auto ctx = MakeCUDACtx(GPUIDX);
std::vector<std::pair<std::string, std::string>> args{
std::pair<std::string, std::string>("num_class", "3")};
@ -57,7 +57,7 @@ TEST(Objective, DeclareUnifiedTest(SoftmaxMultiClassBasic)) {
}
TEST(Objective, DeclareUnifiedTest(SoftprobMultiClassBasic)) {
Context ctx = CreateEmptyGenericParam(GPUIDX);
Context ctx = MakeCUDACtx(GPUIDX);
std::vector<std::pair<std::string, std::string>> args {
std::pair<std::string, std::string>("num_class", "3")};

View File

@ -10,11 +10,11 @@
#include <memory> // std::unique_ptr
#include <vector> // std::vector
#include "../helpers.h" // CheckConfigReload,CreateEmptyGenericParam,DeclareUnifiedTest
#include "../helpers.h" // CheckConfigReload,MakeCUDACtx,DeclareUnifiedTest
namespace xgboost {
TEST(Objective, DeclareUnifiedTest(Quantile)) {
Context ctx = CreateEmptyGenericParam(GPUIDX);
Context ctx = MakeCUDACtx(GPUIDX);
{
Args args{{"quantile_alpha", "[0.6, 0.8]"}};
@ -37,7 +37,7 @@ TEST(Objective, DeclareUnifiedTest(Quantile)) {
}
TEST(Objective, DeclareUnifiedTest(QuantileIntercept)) {
Context ctx = CreateEmptyGenericParam(GPUIDX);
Context ctx = MakeCUDACtx(GPUIDX);
Args args{{"quantile_alpha", "[0.6, 0.8]"}};
std::unique_ptr<ObjFunction> obj{ObjFunction::Create("reg:quantileerror", &ctx)};
obj->Configure(args);

View File

@ -17,7 +17,7 @@
namespace xgboost {
TEST(Objective, DeclareUnifiedTest(LinearRegressionGPair)) {
Context ctx = CreateEmptyGenericParam(GPUIDX);
Context ctx = MakeCUDACtx(GPUIDX);
std::vector<std::pair<std::string, std::string>> args;
std::unique_ptr<ObjFunction> obj{ObjFunction::Create("reg:squarederror", &ctx)};
@ -39,7 +39,7 @@ TEST(Objective, DeclareUnifiedTest(LinearRegressionGPair)) {
}
TEST(Objective, DeclareUnifiedTest(SquaredLog)) {
Context ctx = CreateEmptyGenericParam(GPUIDX);
Context ctx = MakeCUDACtx(GPUIDX);
std::vector<std::pair<std::string, std::string>> args;
std::unique_ptr<ObjFunction> obj{ObjFunction::Create("reg:squaredlogerror", &ctx)};
@ -62,7 +62,7 @@ TEST(Objective, DeclareUnifiedTest(SquaredLog)) {
}
TEST(Objective, DeclareUnifiedTest(PseudoHuber)) {
Context ctx = CreateEmptyGenericParam(GPUIDX);
Context ctx = MakeCUDACtx(GPUIDX);
Args args;
std::unique_ptr<ObjFunction> obj{ObjFunction::Create("reg:pseudohubererror", &ctx)};
@ -91,7 +91,7 @@ TEST(Objective, DeclareUnifiedTest(PseudoHuber)) {
}
TEST(Objective, DeclareUnifiedTest(LogisticRegressionGPair)) {
Context ctx = CreateEmptyGenericParam(GPUIDX);
Context ctx = MakeCUDACtx(GPUIDX);
std::vector<std::pair<std::string, std::string>> args;
std::unique_ptr<ObjFunction> obj{ObjFunction::Create("reg:logistic", &ctx)};
@ -107,7 +107,7 @@ TEST(Objective, DeclareUnifiedTest(LogisticRegressionGPair)) {
}
TEST(Objective, DeclareUnifiedTest(LogisticRegressionBasic)) {
Context ctx = CreateEmptyGenericParam(GPUIDX);
Context ctx = MakeCUDACtx(GPUIDX);
std::vector<std::pair<std::string, std::string>> args;
std::unique_ptr<ObjFunction> obj{ObjFunction::Create("reg:logistic", &ctx)};
@ -136,7 +136,7 @@ TEST(Objective, DeclareUnifiedTest(LogisticRegressionBasic)) {
}
TEST(Objective, DeclareUnifiedTest(LogisticRawGPair)) {
Context ctx = CreateEmptyGenericParam(GPUIDX);
Context ctx = MakeCUDACtx(GPUIDX);
std::vector<std::pair<std::string, std::string>> args;
std::unique_ptr<ObjFunction> obj {
ObjFunction::Create("binary:logitraw", &ctx)
@ -152,7 +152,7 @@ TEST(Objective, DeclareUnifiedTest(LogisticRawGPair)) {
}
TEST(Objective, DeclareUnifiedTest(PoissonRegressionGPair)) {
Context ctx = CreateEmptyGenericParam(GPUIDX);
Context ctx = MakeCUDACtx(GPUIDX);
std::vector<std::pair<std::string, std::string>> args;
std::unique_ptr<ObjFunction> obj {
ObjFunction::Create("count:poisson", &ctx)
@ -176,7 +176,7 @@ TEST(Objective, DeclareUnifiedTest(PoissonRegressionGPair)) {
}
TEST(Objective, DeclareUnifiedTest(PoissonRegressionBasic)) {
Context ctx = CreateEmptyGenericParam(GPUIDX);
Context ctx = MakeCUDACtx(GPUIDX);
std::vector<std::pair<std::string, std::string>> args;
std::unique_ptr<ObjFunction> obj {
ObjFunction::Create("count:poisson", &ctx)
@ -205,7 +205,7 @@ TEST(Objective, DeclareUnifiedTest(PoissonRegressionBasic)) {
}
TEST(Objective, DeclareUnifiedTest(GammaRegressionGPair)) {
Context ctx = CreateEmptyGenericParam(GPUIDX);
Context ctx = MakeCUDACtx(GPUIDX);
std::vector<std::pair<std::string, std::string>> args;
std::unique_ptr<ObjFunction> obj {
ObjFunction::Create("reg:gamma", &ctx)
@ -227,7 +227,7 @@ TEST(Objective, DeclareUnifiedTest(GammaRegressionGPair)) {
}
TEST(Objective, DeclareUnifiedTest(GammaRegressionBasic)) {
Context ctx = CreateEmptyGenericParam(GPUIDX);
Context ctx = MakeCUDACtx(GPUIDX);
std::vector<std::pair<std::string, std::string>> args;
std::unique_ptr<ObjFunction> obj{ObjFunction::Create("reg:gamma", &ctx)};
@ -256,7 +256,7 @@ TEST(Objective, DeclareUnifiedTest(GammaRegressionBasic)) {
}
TEST(Objective, DeclareUnifiedTest(TweedieRegressionGPair)) {
Context ctx = CreateEmptyGenericParam(GPUIDX);
Context ctx = MakeCUDACtx(GPUIDX);
std::vector<std::pair<std::string, std::string>> args;
std::unique_ptr<ObjFunction> obj{ObjFunction::Create("reg:tweedie", &ctx)};
@ -280,7 +280,7 @@ TEST(Objective, DeclareUnifiedTest(TweedieRegressionGPair)) {
#if defined(__CUDACC__)
TEST(Objective, CPU_vs_CUDA) {
Context ctx = CreateEmptyGenericParam(GPUIDX);
Context ctx = MakeCUDACtx(GPUIDX);
ObjFunction* obj = ObjFunction::Create("reg:squarederror", &ctx);
HostDeviceVector<GradientPair> cpu_out_preds;
@ -331,7 +331,7 @@ TEST(Objective, CPU_vs_CUDA) {
#endif
TEST(Objective, DeclareUnifiedTest(TweedieRegressionBasic)) {
Context ctx = CreateEmptyGenericParam(GPUIDX);
Context ctx = MakeCUDACtx(GPUIDX);
std::vector<std::pair<std::string, std::string>> args;
std::unique_ptr<ObjFunction> obj{ObjFunction::Create("reg:tweedie", &ctx)};
@ -360,7 +360,7 @@ TEST(Objective, DeclareUnifiedTest(TweedieRegressionBasic)) {
// CoxRegression not implemented in GPU code, no need for testing.
#if !defined(__CUDACC__)
TEST(Objective, CoxRegressionGPair) {
Context ctx = CreateEmptyGenericParam(GPUIDX);
Context ctx = MakeCUDACtx(GPUIDX);
std::vector<std::pair<std::string, std::string>> args;
std::unique_ptr<ObjFunction> obj{ObjFunction::Create("survival:cox", &ctx)};
@ -375,7 +375,7 @@ TEST(Objective, CoxRegressionGPair) {
#endif
TEST(Objective, DeclareUnifiedTest(AbsoluteError)) {
Context ctx = CreateEmptyGenericParam(GPUIDX);
Context ctx = MakeCUDACtx(GPUIDX);
std::unique_ptr<ObjFunction> obj{ObjFunction::Create("reg:absoluteerror", &ctx)};
obj->Configure({});
CheckConfigReload(obj, "reg:absoluteerror");
@ -419,7 +419,7 @@ TEST(Objective, DeclareUnifiedTest(AbsoluteError)) {
}
TEST(Objective, DeclareUnifiedTest(AbsoluteErrorLeaf)) {
Context ctx = CreateEmptyGenericParam(GPUIDX);
Context ctx = MakeCUDACtx(GPUIDX);
bst_target_t constexpr kTargets = 3, kRows = 16;
std::unique_ptr<ObjFunction> obj{ObjFunction::Create("reg:absoluteerror", &ctx)};
obj->Configure({});

View File

@ -4,12 +4,10 @@
#include "../helpers.h"
namespace xgboost {
TEST(Plugin, ExampleObjective) {
xgboost::Context ctx = CreateEmptyGenericParam(GPUIDX);
xgboost::Context ctx = MakeCUDACtx(GPUIDX);
auto* obj = xgboost::ObjFunction::Create("mylogistic", &ctx);
ASSERT_EQ(obj->DefaultEvalMetric(), std::string{"logloss"});
delete obj;
}
} // namespace xgboost

View File

@ -12,7 +12,7 @@
namespace xgboost {
TEST(Plugin, OneAPIPredictorBasic) {
auto lparam = CreateEmptyGenericParam(0);
auto lparam = MakeCUDACtx(0);
std::unique_ptr<Predictor> oneapi_predictor =
std::unique_ptr<Predictor>(Predictor::Create("oneapi_predictor", &lparam));
@ -82,7 +82,7 @@ TEST(Plugin, OneAPIPredictorExternalMemory) {
dmlc::TemporaryDirectory tmpdir;
std::string filename = tmpdir.path + "/big.libsvm";
std::unique_ptr<DMatrix> dmat = CreateSparsePageDMatrix(12, 64, filename);
auto lparam = CreateEmptyGenericParam(0);
auto lparam = MakeCUDACtx(0);
std::unique_ptr<Predictor> oneapi_predictor =
std::unique_ptr<Predictor>(Predictor::Create("oneapi_predictor", &lparam));

View File

@ -9,7 +9,7 @@
namespace xgboost {
TEST(Plugin, LinearRegressionGPairOneAPI) {
Context tparam = CreateEmptyGenericParam(0);
Context tparam = MakeCUDACtx(0);
std::vector<std::pair<std::string, std::string>> args;
std::unique_ptr<ObjFunction> obj {
@ -33,7 +33,7 @@ TEST(Plugin, LinearRegressionGPairOneAPI) {
}
TEST(Plugin, SquaredLogOneAPI) {
Context tparam = CreateEmptyGenericParam(0);
Context tparam = MakeCUDACtx(0);
std::vector<std::pair<std::string, std::string>> args;
std::unique_ptr<ObjFunction> obj { ObjFunction::Create("reg:squaredlogerror_oneapi", &tparam) };
@ -56,7 +56,7 @@ TEST(Plugin, SquaredLogOneAPI) {
}
TEST(Plugin, LogisticRegressionGPairOneAPI) {
Context tparam = CreateEmptyGenericParam(0);
Context tparam = MakeCUDACtx(0);
std::vector<std::pair<std::string, std::string>> args;
std::unique_ptr<ObjFunction> obj { ObjFunction::Create("reg:logistic_oneapi", &tparam) };
@ -72,7 +72,7 @@ TEST(Plugin, LogisticRegressionGPairOneAPI) {
}
TEST(Plugin, LogisticRegressionBasicOneAPI) {
Context lparam = CreateEmptyGenericParam(0);
Context lparam = MakeCUDACtx(0);
std::vector<std::pair<std::string, std::string>> args;
std::unique_ptr<ObjFunction> obj {
ObjFunction::Create("reg:logistic_oneapi", &lparam)
@ -103,7 +103,7 @@ TEST(Plugin, LogisticRegressionBasicOneAPI) {
}
TEST(Plugin, LogisticRawGPairOneAPI) {
Context lparam = CreateEmptyGenericParam(0);
Context lparam = MakeCUDACtx(0);
std::vector<std::pair<std::string, std::string>> args;
std::unique_ptr<ObjFunction> obj {
ObjFunction::Create("binary:logitraw_oneapi", &lparam)
@ -120,7 +120,7 @@ TEST(Plugin, LogisticRawGPairOneAPI) {
}
TEST(Plugin, CPUvsOneAPI) {
Context ctx = CreateEmptyGenericParam(0);
Context ctx = MakeCUDACtx(0);
ObjFunction * obj_cpu =
ObjFunction::Create("reg:squarederror", &ctx);
@ -140,8 +140,8 @@ TEST(Plugin, CPUvsOneAPI) {
}
auto& info = pdmat->Info();
info.labels_.Resize(kRows);
auto& h_labels = info.labels_.HostVector();
info.labels.Reshape(kRows, 1);
auto& h_labels = info.labels.Data()->HostVector();
for (size_t i = 0; i < h_labels.size(); ++i) {
h_labels[i] = 1 / static_cast<float>(i+1);
}

View File

@ -20,16 +20,15 @@ namespace xgboost {
namespace {
void TestBasic(DMatrix* dmat) {
auto lparam = CreateEmptyGenericParam(GPUIDX);
Context ctx;
std::unique_ptr<Predictor> cpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("cpu_predictor", &lparam));
std::unique_ptr<Predictor>(Predictor::Create("cpu_predictor", &ctx));
size_t const kRows = dmat->Info().num_row_;
size_t const kCols = dmat->Info().num_col_;
LearnerModelParam mparam{MakeMP(kCols, .0, 1)};
Context ctx;
ctx.UpdateAllowUnknown(Args{});
gbm::GBTreeModel model = CreateTestModel(&mparam, &ctx);

View File

@ -19,8 +19,8 @@ namespace xgboost {
namespace predictor {
TEST(GPUPredictor, Basic) {
auto cpu_lparam = CreateEmptyGenericParam(-1);
auto gpu_lparam = CreateEmptyGenericParam(0);
auto cpu_lparam = MakeCUDACtx(-1);
auto gpu_lparam = MakeCUDACtx(0);
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &gpu_lparam));
@ -84,7 +84,7 @@ TEST(GPUPredictor, EllpackTraining) {
}
TEST(GPUPredictor, ExternalMemoryTest) {
auto lparam = CreateEmptyGenericParam(0);
auto lparam = MakeCUDACtx(0);
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &lparam));
gpu_predictor->Configure({});
@ -157,7 +157,7 @@ TEST(GPUPredictor, ShapStump) {
trees.push_back(std::unique_ptr<RegTree>(new RegTree));
model.CommitModelGroup(std::move(trees), 0);
auto gpu_lparam = CreateEmptyGenericParam(0);
auto gpu_lparam = MakeCUDACtx(0);
std::unique_ptr<Predictor> gpu_predictor = std::unique_ptr<Predictor>(
Predictor::Create("gpu_predictor", &gpu_lparam));
gpu_predictor->Configure({});
@ -185,8 +185,8 @@ TEST(GPUPredictor, Shap) {
trees[0]->ExpandNode(0, 0, 0.5, true, 1.0, -1.0, 1.0, 0.0, 5.0, 2.0, 3.0);
model.CommitModelGroup(std::move(trees), 0);
auto gpu_lparam = CreateEmptyGenericParam(0);
auto cpu_lparam = CreateEmptyGenericParam(-1);
auto gpu_lparam = MakeCUDACtx(0);
auto cpu_lparam = MakeCUDACtx(-1);
std::unique_ptr<Predictor> gpu_predictor = std::unique_ptr<Predictor>(
Predictor::Create("gpu_predictor", &gpu_lparam));
std::unique_ptr<Predictor> cpu_predictor = std::unique_ptr<Predictor>(
@ -220,7 +220,7 @@ TEST(GPUPredictor, CategoricalPredictLeaf) {
TEST(GPUPredictor, PredictLeafBasic) {
size_t constexpr kRows = 5, kCols = 5;
auto dmat = RandomDataGenerator(kRows, kCols, 0).Device(0).GenerateDMatrix();
auto lparam = CreateEmptyGenericParam(GPUIDX);
auto lparam = MakeCUDACtx(GPUIDX);
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &lparam));
gpu_predictor->Configure({});

View File

@ -37,10 +37,10 @@ void TestPredictionFromGradientIndex(std::string name, size_t rows, size_t cols,
constexpr size_t kClasses { 3 };
LearnerModelParam mparam{MakeMP(cols, .5, kClasses)};
auto lparam = CreateEmptyGenericParam(0);
auto cuda_ctx = MakeCUDACtx(0);
std::unique_ptr<Predictor> predictor =
std::unique_ptr<Predictor>(Predictor::Create(name, &lparam));
std::unique_ptr<Predictor>(Predictor::Create(name, &cuda_ctx));
predictor->Configure({});
Context ctx;

View File

@ -25,7 +25,7 @@ void InitRowPartitionForTest(common::RowSetCollection *row_set, size_t n_samples
} // anonymous namespace
void TestAddHistRows(bool is_distributed) {
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
std::vector<CPUExpandEntry> nodes_for_explicit_hist_build_;
std::vector<CPUExpandEntry> nodes_for_subtraction_trick_;
int starting_index = std::numeric_limits<int>::max();
@ -74,7 +74,7 @@ TEST(CPUHistogram, AddRows) {
void TestSyncHist(bool is_distributed) {
size_t constexpr kNRows = 8, kNCols = 16;
int32_t constexpr kMaxBins = 4;
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
std::vector<CPUExpandEntry> nodes_for_explicit_hist_build_;
std::vector<CPUExpandEntry> nodes_for_subtraction_trick_;
@ -229,7 +229,7 @@ TEST(CPUHistogram, SyncHist) {
void TestBuildHistogram(bool is_distributed, bool force_read_by_column, bool is_col_split) {
size_t constexpr kNRows = 8, kNCols = 16;
int32_t constexpr kMaxBins = 4;
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
auto p_fmat =
RandomDataGenerator(kNRows, kNCols, 0.8).Seed(3).GenerateDMatrix();
if (is_col_split) {
@ -330,7 +330,7 @@ void TestHistogramCategorical(size_t n_categories, bool force_read_by_column) {
auto x = GenerateRandomCategoricalSingleColumn(kRows, n_categories);
auto cat_m = GetDMatrixFromData(x, kRows, 1);
cat_m->Info().feature_types.HostVector().push_back(FeatureType::kCategorical);
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
BatchParam batch_param{0, static_cast<int32_t>(kBins)};
@ -475,7 +475,7 @@ void TestHistogramExternalMemory(Context const *ctx, BatchParam batch_param, boo
TEST(CPUHistogram, ExternalMemory) {
int32_t constexpr kBins = 256;
auto ctx = CreateEmptyGenericParam(Context::kCpuId);
Context ctx;
TestHistogramExternalMemory(&ctx, BatchParam{kBins, common::Span<float>{}, false}, true, false);
TestHistogramExternalMemory(&ctx, BatchParam{kBins, common::Span<float>{}, false}, true, true);

View File

@ -91,7 +91,7 @@ void TestBuildHist(bool use_shared_memory_histograms) {
auto page = BuildEllpackPage(kNRows, kNCols);
BatchParam batch_param{};
Context ctx{CreateEmptyGenericParam(0)};
Context ctx{MakeCUDACtx(0)};
GPUHistMakerDevice<GradientSumT> maker(&ctx, page.get(), {}, kNRows, param, kNCols, kNCols,
batch_param);
xgboost::SimpleLCG gen;
@ -169,7 +169,7 @@ void TestHistogramIndexImpl() {
int constexpr kNRows = 1000, kNCols = 10;
// Build 2 matrices and build a histogram maker with that
Context ctx(CreateEmptyGenericParam(0));
Context ctx(MakeCUDACtx(0));
ObjInfo task{ObjInfo::kRegression};
tree::GPUHistMaker hist_maker{&ctx, &task}, hist_maker_ext{&ctx, &task};
std::unique_ptr<DMatrix> hist_maker_dmat(
@ -262,7 +262,7 @@ TEST(GpuHist, UniformSampling) {
// Build a tree using the in-memory DMatrix.
RegTree tree;
HostDeviceVector<bst_float> preds(kRows, 0.0, 0);
Context ctx(CreateEmptyGenericParam(0));
Context ctx(MakeCUDACtx(0));
UpdateTree(&ctx, &gpair, dmat.get(), 0, &tree, &preds, 1.0, "uniform", kRows);
// Build another tree using sampling.
RegTree tree_sampling;
@ -292,7 +292,7 @@ TEST(GpuHist, GradientBasedSampling) {
// Build a tree using the in-memory DMatrix.
RegTree tree;
HostDeviceVector<bst_float> preds(kRows, 0.0, 0);
Context ctx(CreateEmptyGenericParam(0));
Context ctx(MakeCUDACtx(0));
UpdateTree(&ctx, &gpair, dmat.get(), 0, &tree, &preds, 1.0, "uniform", kRows);
// Build another tree using sampling.
@ -327,7 +327,7 @@ TEST(GpuHist, ExternalMemory) {
// Build a tree using the in-memory DMatrix.
RegTree tree;
Context ctx(CreateEmptyGenericParam(0));
Context ctx(MakeCUDACtx(0));
HostDeviceVector<bst_float> preds(kRows, 0.0, 0);
UpdateTree(&ctx, &gpair, dmat.get(), 0, &tree, &preds, 1.0, "uniform", kRows);
// Build another tree using multiple ELLPACK pages.
@ -365,7 +365,7 @@ TEST(GpuHist, ExternalMemoryWithSampling) {
// Build a tree using the in-memory DMatrix.
auto rng = common::GlobalRandom();
Context ctx(CreateEmptyGenericParam(0));
Context ctx(MakeCUDACtx(0));
RegTree tree;
HostDeviceVector<bst_float> preds(kRows, 0.0, 0);
UpdateTree(&ctx, &gpair, dmat.get(), 0, &tree, &preds, kSubsample, kSamplingMethod, kRows);
@ -386,7 +386,7 @@ TEST(GpuHist, ExternalMemoryWithSampling) {
}
TEST(GpuHist, ConfigIO) {
Context ctx(CreateEmptyGenericParam(0));
Context ctx(MakeCUDACtx(0));
ObjInfo task{ObjInfo::kRegression};
std::unique_ptr<TreeUpdater> updater{TreeUpdater::Create("grow_gpu_hist", &ctx, &task)};
updater->Configure(Args{});
@ -404,7 +404,7 @@ TEST(GpuHist, ConfigIO) {
}
TEST(GpuHist, MaxDepth) {
Context ctx(CreateEmptyGenericParam(0));
Context ctx(MakeCUDACtx(0));
size_t constexpr kRows = 16;
size_t constexpr kCols = 4;
auto p_mat = RandomDataGenerator{kRows, kCols, 0}.GenerateDMatrix();

View File

@ -29,7 +29,7 @@ TEST(Updater, Prune) {
std::shared_ptr<DMatrix> p_dmat {
RandomDataGenerator{32, 10, 0}.GenerateDMatrix() };
auto ctx = CreateEmptyGenericParam(GPUIDX);
Context ctx;
// prepare tree
RegTree tree = RegTree{1u, kCols};

View File

@ -29,7 +29,7 @@ TEST(Updater, Refresh) {
{"reg_lambda", "1"}};
RegTree tree = RegTree{1u, kCols};
auto ctx = CreateEmptyGenericParam(GPUIDX);
Context ctx;
std::vector<RegTree*> trees{&tree};
ObjInfo task{ObjInfo::kRegression};

View File

@ -33,8 +33,7 @@ class UpdaterTreeStatTest : public ::testing::Test {
ObjInfo task{ObjInfo::kRegression};
param.Init(Args{});
Context ctx(updater == "grow_gpu_hist" ? CreateEmptyGenericParam(0)
: CreateEmptyGenericParam(Context::kCpuId));
Context ctx(updater == "grow_gpu_hist" ? MakeCUDACtx(0) : MakeCUDACtx(Context::kCpuId));
auto up = std::unique_ptr<TreeUpdater>{TreeUpdater::Create(updater, &ctx, &task)};
up->Configure(Args{});
RegTree tree{1u, kCols};
@ -79,8 +78,7 @@ class UpdaterEtaTest : public ::testing::Test {
void RunTest(std::string updater) {
ObjInfo task{ObjInfo::kClassification};
Context ctx(updater == "grow_gpu_hist" ? CreateEmptyGenericParam(0)
: CreateEmptyGenericParam(Context::kCpuId));
Context ctx(updater == "grow_gpu_hist" ? MakeCUDACtx(0) : MakeCUDACtx(Context::kCpuId));
float eta = 0.4;
auto up_0 = std::unique_ptr<TreeUpdater>{TreeUpdater::Create(updater, &ctx, &task)};
@ -156,8 +154,7 @@ class TestMinSplitLoss : public ::testing::Test {
param.UpdateAllowUnknown(args);
ObjInfo task{ObjInfo::kRegression};
Context ctx(updater == "grow_gpu_hist" ? CreateEmptyGenericParam(0)
: CreateEmptyGenericParam(Context::kCpuId));
Context ctx{MakeCUDACtx(updater == "grow_gpu_hist" ? 0 : Context::kCpuId)};
auto up = std::unique_ptr<TreeUpdater>{TreeUpdater::Create(updater, &ctx, &task)};
up->Configure({});