[BREAKING] prevent multi-gpu usage (#4749)
* prevent multi-gpu usage * fix distributed test * combine gpu predictor tests * set upper bound on n_gpus
This commit is contained in:
@@ -40,10 +40,10 @@ struct GenericParameter : public dmlc::Parameter<GenericParameter> {
|
||||
.describe("The primary GPU device ordinal.");
|
||||
DMLC_DECLARE_FIELD(n_gpus)
|
||||
.set_default(0)
|
||||
.set_lower_bound(-1)
|
||||
.describe("Deprecated, please use distributed training with one "
|
||||
"process per GPU. "
|
||||
"Number of GPUs to use for multi-gpu algorithms.");
|
||||
.set_range(0, 1)
|
||||
.describe("Deprecated. Single process multi-GPU training is no longer supported. "
|
||||
"Please switch to distributed training with one process per GPU. "
|
||||
"This can be done using Dask or Spark.");
|
||||
}
|
||||
};
|
||||
} // namespace xgboost
|
||||
|
||||
Reference in New Issue
Block a user