Add GPU support to NVFlare demo (#9552)
This commit is contained in:
parent
3b9e5909fb
commit
0f35493b65
@ -85,8 +85,8 @@ shutdown server
|
|||||||
## Training with GPUs
|
## Training with GPUs
|
||||||
|
|
||||||
To demo with Federated Learning using GPUs, make sure your machine has at least 2 GPUs.
|
To demo with Federated Learning using GPUs, make sure your machine has at least 2 GPUs.
|
||||||
Build XGBoost with the federated learning plugin enabled along with CUDA, but with NCCL
|
Build XGBoost with the federated learning plugin enabled along with CUDA
|
||||||
turned off (see the [README](../../plugin/federated/README.md)).
|
(see the [README](../../plugin/federated/README.md)).
|
||||||
|
|
||||||
Modify `config/config_fed_client.json` and set `use_gpus` to `true`, then repeat the steps
|
Modify `../config/config_fed_client.json` and set `use_gpus` to `true`, then repeat the steps
|
||||||
above.
|
above.
|
||||||
|
|||||||
@ -67,7 +67,7 @@ class XGBoostTrainer(Executor):
|
|||||||
dtest = xgb.DMatrix('agaricus.txt.test?format=libsvm')
|
dtest = xgb.DMatrix('agaricus.txt.test?format=libsvm')
|
||||||
|
|
||||||
# Specify parameters via map, definition are same as c++ version
|
# Specify parameters via map, definition are same as c++ version
|
||||||
param = {'max_depth': 2, 'eta': 1, 'objective': 'binary:logistic'}
|
param = {'tree_method': 'hist', 'max_depth': 2, 'eta': 1, 'objective': 'binary:logistic'}
|
||||||
if self._use_gpus:
|
if self._use_gpus:
|
||||||
self.log_info(fl_ctx, f'Training with GPU {rank}')
|
self.log_info(fl_ctx, f'Training with GPU {rank}')
|
||||||
param['device'] = f"cuda:{rank}"
|
param['device'] = f"cuda:{rank}"
|
||||||
|
|||||||
@ -56,4 +56,9 @@ shutdown server
|
|||||||
|
|
||||||
## Training with GPUs
|
## Training with GPUs
|
||||||
|
|
||||||
Currently GPUs are not yet supported by vertical federated XGBoost.
|
To demo with Vertical Federated Learning using GPUs, make sure your machine has at least 2 GPUs.
|
||||||
|
Build XGBoost with the federated learning plugin enabled along with CUDA
|
||||||
|
(see the [README](../../plugin/federated/README.md)).
|
||||||
|
|
||||||
|
Modify `../config/config_fed_client.json` and set `use_gpus` to `true`, then repeat the steps
|
||||||
|
above.
|
||||||
|
|||||||
@ -77,13 +77,15 @@ class XGBoostTrainer(Executor):
|
|||||||
'gamma': 1.0,
|
'gamma': 1.0,
|
||||||
'max_depth': 8,
|
'max_depth': 8,
|
||||||
'min_child_weight': 100,
|
'min_child_weight': 100,
|
||||||
'tree_method': 'approx',
|
'tree_method': 'hist',
|
||||||
'grow_policy': 'depthwise',
|
'grow_policy': 'depthwise',
|
||||||
'objective': 'binary:logistic',
|
'objective': 'binary:logistic',
|
||||||
'eval_metric': 'auc',
|
'eval_metric': 'auc',
|
||||||
}
|
}
|
||||||
if self._use_gpus:
|
if self._use_gpus:
|
||||||
self.log_info(fl_ctx, 'GPUs are not currently supported by vertical federated XGBoost')
|
if self._use_gpus:
|
||||||
|
self.log_info(fl_ctx, f'Training with GPU {rank}')
|
||||||
|
param['device'] = f"cuda:{rank}"
|
||||||
|
|
||||||
# specify validations set to watch performance
|
# specify validations set to watch performance
|
||||||
watchlist = [(dtest, "eval"), (dtrain, "train")]
|
watchlist = [(dtest, "eval"), (dtrain, "train")]
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user