Avoid calling CUDA code on CPU for linear model. (#7154)
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
/*!
|
||||
* Copyright 2014-2020 by Contributors
|
||||
* Copyright 2014-2021 by Contributors
|
||||
* \file gblinear.cc
|
||||
* \brief Implementation of Linear booster, with L1/L2 regularization: Elastic Net
|
||||
* the update rule is parallel coordinate descent (shotgun)
|
||||
@@ -37,6 +37,17 @@ struct GBLinearTrainParam : public XGBoostParameter<GBLinearTrainParam> {
|
||||
std::string updater;
|
||||
float tolerance;
|
||||
size_t max_row_perbatch;
|
||||
|
||||
void CheckGPUSupport() {
|
||||
auto n_gpus = common::AllVisibleGPUs();
|
||||
if (n_gpus == 0 && this->updater == "gpu_coord_descent") {
|
||||
common::AssertGPUSupport();
|
||||
this->UpdateAllowUnknown(Args{{"updater", "coord_descent"}});
|
||||
LOG(WARNING) << "Loading configuration on a CPU only machine. Changing "
|
||||
"updater to `coord_descent`.";
|
||||
}
|
||||
}
|
||||
|
||||
DMLC_DECLARE_PARAMETER(GBLinearTrainParam) {
|
||||
DMLC_DECLARE_FIELD(updater)
|
||||
.set_default("shotgun")
|
||||
@@ -74,12 +85,10 @@ class GBLinear : public GradientBooster {
|
||||
model_.Configure(cfg);
|
||||
}
|
||||
param_.UpdateAllowUnknown(cfg);
|
||||
param_.CheckGPUSupport();
|
||||
updater_.reset(LinearUpdater::Create(param_.updater, generic_param_));
|
||||
updater_->Configure(cfg);
|
||||
monitor_.Init("GBLinear");
|
||||
if (param_.updater == "gpu_coord_descent") {
|
||||
common::AssertGPUSupport();
|
||||
}
|
||||
}
|
||||
|
||||
int32_t BoostedRounds() const override {
|
||||
@@ -110,6 +119,7 @@ class GBLinear : public GradientBooster {
|
||||
void LoadConfig(Json const& in) override {
|
||||
CHECK_EQ(get<String>(in["name"]), "gblinear");
|
||||
FromJson(in["gblinear_train_param"], ¶m_);
|
||||
param_.CheckGPUSupport();
|
||||
updater_.reset(LinearUpdater::Create(param_.updater, generic_param_));
|
||||
this->updater_->LoadConfig(in["updater"]);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user