[backport] Fix device dispatch for linear updater. (#9507) (#9532)

This commit is contained in:
Jiaming Yuan 2023-08-29 15:10:43 +08:00 committed by GitHub
parent 4301558a57
commit a0d3573c74
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 80 additions and 35 deletions

View File

@ -329,7 +329,7 @@ Parameters for Linear Booster (``booster=gblinear``)
- Choice of algorithm to fit linear model - Choice of algorithm to fit linear model
- ``shotgun``: Parallel coordinate descent algorithm based on shotgun algorithm. Uses 'hogwild' parallelism and therefore produces a nondeterministic solution on each run. - ``shotgun``: Parallel coordinate descent algorithm based on shotgun algorithm. Uses 'hogwild' parallelism and therefore produces a nondeterministic solution on each run.
- ``coord_descent``: Ordinary coordinate descent algorithm. Also multithreaded but still produces a deterministic solution. - ``coord_descent``: Ordinary coordinate descent algorithm. Also multithreaded but still produces a deterministic solution. When the ``device`` parameter is set to ``cuda`` or ``gpu``, a GPU variant would be used.
* ``feature_selector`` [default= ``cyclic``] * ``feature_selector`` [default= ``cyclic``]

View File

@ -1,5 +1,5 @@
/*! /**
* Copyright 2020 by Contributors * Copyright 2020-2023, XGBoost Contributors
* \file global_config.h * \file global_config.h
* \brief Global configuration for XGBoost * \brief Global configuration for XGBoost
* \author Hyunsu Cho * \author Hyunsu Cho
@ -7,24 +7,22 @@
#ifndef XGBOOST_GLOBAL_CONFIG_H_ #ifndef XGBOOST_GLOBAL_CONFIG_H_
#define XGBOOST_GLOBAL_CONFIG_H_ #define XGBOOST_GLOBAL_CONFIG_H_
#include <xgboost/parameter.h> #include <dmlc/thread_local.h> // for ThreadLocalStore
#include <vector> #include <xgboost/parameter.h> // for XGBoostParameter
#include <string>
#include <cstdint> // for int32_t
namespace xgboost { namespace xgboost {
class Json;
struct GlobalConfiguration : public XGBoostParameter<GlobalConfiguration> { struct GlobalConfiguration : public XGBoostParameter<GlobalConfiguration> {
int verbosity { 1 }; std::int32_t verbosity{1};
bool use_rmm { false }; bool use_rmm{false};
DMLC_DECLARE_PARAMETER(GlobalConfiguration) { DMLC_DECLARE_PARAMETER(GlobalConfiguration) {
DMLC_DECLARE_FIELD(verbosity) DMLC_DECLARE_FIELD(verbosity)
.set_range(0, 3) .set_range(0, 3)
.set_default(1) // shows only warning .set_default(1) // shows only warning
.describe("Flag to print out detailed breakdown of runtime."); .describe("Flag to print out detailed breakdown of runtime.");
DMLC_DECLARE_FIELD(use_rmm) DMLC_DECLARE_FIELD(use_rmm).set_default(false).describe(
.set_default(false) "Whether to use RAPIDS Memory Manager to allocate GPU memory in XGBoost");
.describe("Whether to use RAPIDS Memory Manager to allocate GPU memory in XGBoost");
} }
}; };

View File

@ -1,5 +1,5 @@
/*! /**
* Copyright 2014-2022 by XGBoost Contributors * Copyright 2014-2023, XGBoost Contributors
* \file gblinear.cc * \file gblinear.cc
* \brief Implementation of Linear booster, with L1/L2 regularization: Elastic Net * \brief Implementation of Linear booster, with L1/L2 regularization: Elastic Net
* the update rule is parallel coordinate descent (shotgun) * the update rule is parallel coordinate descent (shotgun)
@ -26,9 +26,9 @@
#include "../common/timer.h" #include "../common/timer.h"
#include "../common/common.h" #include "../common/common.h"
#include "../common/threading_utils.h" #include "../common/threading_utils.h"
#include "../common/error_msg.h"
namespace xgboost { namespace xgboost::gbm {
namespace gbm {
DMLC_REGISTRY_FILE_TAG(gblinear); DMLC_REGISTRY_FILE_TAG(gblinear);
@ -83,7 +83,16 @@ class GBLinear : public GradientBooster {
} }
param_.UpdateAllowUnknown(cfg); param_.UpdateAllowUnknown(cfg);
param_.CheckGPUSupport(); param_.CheckGPUSupport();
updater_.reset(LinearUpdater::Create(param_.updater, ctx_)); if (param_.updater == "gpu_coord_descent") {
LOG(WARNING) << error::DeprecatedFunc("gpu_coord_descent", "2.0.0",
R"(device="cuda", updater="coord_descent")");
}
if (param_.updater == "coord_descent" && ctx_->IsCUDA()) {
updater_.reset(LinearUpdater::Create("gpu_coord_descent", ctx_));
} else {
updater_.reset(LinearUpdater::Create(param_.updater, ctx_));
}
updater_->Configure(cfg); updater_->Configure(cfg);
monitor_.Init("GBLinear"); monitor_.Init("GBLinear");
} }
@ -354,5 +363,4 @@ XGBOOST_REGISTER_GBM(GBLinear, "gblinear")
.set_body([](LearnerModelParam const* booster_config, Context const* ctx) { .set_body([](LearnerModelParam const* booster_config, Context const* ctx) {
return new GBLinear(booster_config, ctx); return new GBLinear(booster_config, ctx);
}); });
} // namespace gbm } // namespace xgboost::gbm
} // namespace xgboost

View File

@ -9,8 +9,7 @@
#include "coordinate_common.h" #include "coordinate_common.h"
#include "xgboost/json.h" #include "xgboost/json.h"
namespace xgboost { namespace xgboost::linear {
namespace linear {
DMLC_REGISTER_PARAMETER(CoordinateParam); DMLC_REGISTER_PARAMETER(CoordinateParam);
DMLC_REGISTRY_FILE_TAG(updater_coordinate); DMLC_REGISTRY_FILE_TAG(updater_coordinate);
@ -39,8 +38,9 @@ class CoordinateUpdater : public LinearUpdater {
FromJson(config.at("linear_train_param"), &tparam_); FromJson(config.at("linear_train_param"), &tparam_);
FromJson(config.at("coordinate_param"), &cparam_); FromJson(config.at("coordinate_param"), &cparam_);
} }
void SaveConfig(Json* p_out) const override { void SaveConfig(Json *p_out) const override {
auto& out = *p_out; LOG(DEBUG) << "Save config for CPU updater.";
auto &out = *p_out;
out["linear_train_param"] = ToJson(tparam_); out["linear_train_param"] = ToJson(tparam_);
out["coordinate_param"] = ToJson(cparam_); out["coordinate_param"] = ToJson(cparam_);
} }
@ -99,5 +99,4 @@ class CoordinateUpdater : public LinearUpdater {
XGBOOST_REGISTER_LINEAR_UPDATER(CoordinateUpdater, "coord_descent") XGBOOST_REGISTER_LINEAR_UPDATER(CoordinateUpdater, "coord_descent")
.describe("Update linear model according to coordinate descent algorithm.") .describe("Update linear model according to coordinate descent algorithm.")
.set_body([]() { return new CoordinateUpdater(); }); .set_body([]() { return new CoordinateUpdater(); });
} // namespace linear } // namespace xgboost::linear
} // namespace xgboost

View File

@ -15,8 +15,7 @@
#include "../common/timer.h" #include "../common/timer.h"
#include "./param.h" #include "./param.h"
namespace xgboost { namespace xgboost::linear {
namespace linear {
DMLC_REGISTRY_FILE_TAG(updater_gpu_coordinate); DMLC_REGISTRY_FILE_TAG(updater_gpu_coordinate);
@ -29,7 +28,7 @@ DMLC_REGISTRY_FILE_TAG(updater_gpu_coordinate);
class GPUCoordinateUpdater : public LinearUpdater { // NOLINT class GPUCoordinateUpdater : public LinearUpdater { // NOLINT
public: public:
// set training parameter // set training parameter
void Configure(Args const& args) override { void Configure(Args const &args) override {
tparam_.UpdateAllowUnknown(args); tparam_.UpdateAllowUnknown(args);
coord_param_.UpdateAllowUnknown(args); coord_param_.UpdateAllowUnknown(args);
selector_.reset(FeatureSelector::Create(tparam_.feature_selector)); selector_.reset(FeatureSelector::Create(tparam_.feature_selector));
@ -41,8 +40,9 @@ class GPUCoordinateUpdater : public LinearUpdater { // NOLINT
FromJson(config.at("linear_train_param"), &tparam_); FromJson(config.at("linear_train_param"), &tparam_);
FromJson(config.at("coordinate_param"), &coord_param_); FromJson(config.at("coordinate_param"), &coord_param_);
} }
void SaveConfig(Json* p_out) const override { void SaveConfig(Json *p_out) const override {
auto& out = *p_out; LOG(DEBUG) << "Save config for GPU updater.";
auto &out = *p_out;
out["linear_train_param"] = ToJson(tparam_); out["linear_train_param"] = ToJson(tparam_);
out["coordinate_param"] = ToJson(coord_param_); out["coordinate_param"] = ToJson(coord_param_);
} }
@ -101,10 +101,9 @@ class GPUCoordinateUpdater : public LinearUpdater { // NOLINT
monitor_.Stop("LazyInitDevice"); monitor_.Stop("LazyInitDevice");
monitor_.Start("UpdateGpair"); monitor_.Start("UpdateGpair");
auto &in_gpair_host = in_gpair->ConstHostVector();
// Update gpair // Update gpair
if (ctx_->gpu_id >= 0) { if (ctx_->gpu_id >= 0) {
this->UpdateGpair(in_gpair_host); this->UpdateGpair(in_gpair->ConstHostVector());
} }
monitor_.Stop("UpdateGpair"); monitor_.Stop("UpdateGpair");
@ -249,5 +248,4 @@ XGBOOST_REGISTER_LINEAR_UPDATER(GPUCoordinateUpdater, "gpu_coord_descent")
"Update linear model according to coordinate descent algorithm. GPU " "Update linear model according to coordinate descent algorithm. GPU "
"accelerated.") "accelerated.")
.set_body([]() { return new GPUCoordinateUpdater(); }); .set_body([]() { return new GPUCoordinateUpdater(); });
} // namespace linear } // namespace xgboost::linear
} // namespace xgboost

View File

@ -0,0 +1,42 @@
/**
* Copyright 2023, XGBoost Contributors
*/
#include <gtest/gtest.h>
#include <xgboost/global_config.h> // for GlobalConfigThreadLocalStore
#include <xgboost/json.h> // for Json, Object
#include <xgboost/learner.h> // for Learner
#include <algorithm> // for transform
#include <string> // for string
#include <utility> // for swap
#include "../helpers.h" // for RandomDataGenerator
namespace xgboost {
TEST(GBlinear, DispatchUpdater) {
auto verbosity = 3;
std::swap(GlobalConfigThreadLocalStore::Get()->verbosity, verbosity);
auto test = [](std::string device) {
auto p_fmat = RandomDataGenerator{10, 10, 0.0f}.GenerateDMatrix(true);
std::unique_ptr<Learner> learner{Learner::Create({p_fmat})};
learner->SetParams(
Args{{"booster", "gblinear"}, {"updater", "coord_descent"}, {"device", device}});
learner->Configure();
for (std::int32_t iter = 0; iter < 3; ++iter) {
learner->UpdateOneIter(iter, p_fmat);
}
Json config{Object{}};
::testing::internal::CaptureStderr();
learner->SaveConfig(&config);
auto str = ::testing::internal::GetCapturedStderr();
std::transform(device.cbegin(), device.cend(), device.begin(),
[](char c) { return std::toupper(c); });
ASSERT_NE(str.find(device), std::string::npos);
};
test("cpu");
test("gpu");
std::swap(GlobalConfigThreadLocalStore::Get()->verbosity, verbosity);
}
} // namespace xgboost