Add GPU support to NVFlare demo (#9552)
This commit is contained in:
@@ -85,8 +85,8 @@ shutdown server
|
||||
## Training with GPUs
|
||||
|
||||
To demo with Federated Learning using GPUs, make sure your machine has at least 2 GPUs.
|
||||
Build XGBoost with the federated learning plugin enabled along with CUDA, but with NCCL
|
||||
turned off (see the [README](../../plugin/federated/README.md)).
|
||||
Build XGBoost with the federated learning plugin enabled along with CUDA
|
||||
(see the [README](../../plugin/federated/README.md)).
|
||||
|
||||
Modify `config/config_fed_client.json` and set `use_gpus` to `true`, then repeat the steps
|
||||
Modify `../config/config_fed_client.json` and set `use_gpus` to `true`, then repeat the steps
|
||||
above.
|
||||
|
||||
@@ -67,7 +67,7 @@ class XGBoostTrainer(Executor):
|
||||
dtest = xgb.DMatrix('agaricus.txt.test?format=libsvm')
|
||||
|
||||
# Specify parameters via map, definition are same as c++ version
|
||||
param = {'max_depth': 2, 'eta': 1, 'objective': 'binary:logistic'}
|
||||
param = {'tree_method': 'hist', 'max_depth': 2, 'eta': 1, 'objective': 'binary:logistic'}
|
||||
if self._use_gpus:
|
||||
self.log_info(fl_ctx, f'Training with GPU {rank}')
|
||||
param['device'] = f"cuda:{rank}"
|
||||
|
||||
Reference in New Issue
Block a user