[fed] Fixes for the encrypted GRPC backend. (#10503)
This commit is contained in:
@@ -34,6 +34,8 @@ class LintersPaths:
|
||||
"tests/python/test_with_pandas.py",
|
||||
"tests/python-gpu/",
|
||||
"tests/python-sycl/",
|
||||
"tests/test_distributed/test_federated/",
|
||||
"tests/test_distributed/test_gpu_federated/",
|
||||
"tests/test_distributed/test_with_dask/",
|
||||
"tests/test_distributed/test_gpu_with_dask/",
|
||||
"tests/test_distributed/test_with_spark/",
|
||||
@@ -94,6 +96,8 @@ class LintersPaths:
|
||||
"tests/python-gpu/load_pickle.py",
|
||||
"tests/python-gpu/test_gpu_training_continuation.py",
|
||||
"tests/python/test_model_io.py",
|
||||
"tests/test_distributed/test_federated/",
|
||||
"tests/test_distributed/test_gpu_federated/",
|
||||
"tests/test_distributed/test_with_spark/test_data.py",
|
||||
"tests/test_distributed/test_gpu_with_spark/test_data.py",
|
||||
"tests/test_distributed/test_gpu_with_dask/test_gpu_with_dask.py",
|
||||
|
||||
@@ -70,6 +70,7 @@ case "$suite" in
|
||||
pytest -v -s -rxXs --fulltrace --durations=0 -m "mgpu" ${args} tests/python-gpu
|
||||
pytest -v -s -rxXs --fulltrace --durations=0 -m "mgpu" ${args} tests/test_distributed/test_gpu_with_dask
|
||||
pytest -v -s -rxXs --fulltrace --durations=0 -m "mgpu" ${args} tests/test_distributed/test_gpu_with_spark
|
||||
pytest -v -s -rxXs --fulltrace --durations=0 -m "mgpu" ${args} tests/test_distributed/test_gpu_federated
|
||||
unset_pyspark_envs
|
||||
uninstall_xgboost
|
||||
set +x
|
||||
@@ -84,6 +85,7 @@ case "$suite" in
|
||||
pytest -v -s -rxXs --fulltrace --durations=0 ${args} tests/python
|
||||
pytest -v -s -rxXs --fulltrace --durations=0 ${args} tests/test_distributed/test_with_dask
|
||||
pytest -v -s -rxXs --fulltrace --durations=0 ${args} tests/test_distributed/test_with_spark
|
||||
pytest -v -s -rxXs --fulltrace --durations=0 ${args} tests/test_distributed/test_federated
|
||||
unset_pyspark_envs
|
||||
uninstall_xgboost
|
||||
set +x
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
rm -f ./*.model* ./agaricus* ./*.pem
|
||||
|
||||
world_size=$(nvidia-smi -L | wc -l)
|
||||
|
||||
# Generate server and client certificates.
|
||||
openssl req -x509 -newkey rsa:2048 -days 7 -nodes -keyout server-key.pem -out server-cert.pem -subj "/C=US/CN=localhost"
|
||||
openssl req -x509 -newkey rsa:2048 -days 7 -nodes -keyout client-key.pem -out client-cert.pem -subj "/C=US/CN=localhost"
|
||||
|
||||
# Split train and test files manually to simulate a federated environment.
|
||||
split -n l/"${world_size}" -d ../../../demo/data/agaricus.txt.train agaricus.txt.train-
|
||||
split -n l/"${world_size}" -d ../../../demo/data/agaricus.txt.test agaricus.txt.test-
|
||||
|
||||
python test_federated.py "${world_size}"
|
||||
@@ -1,86 +1,8 @@
|
||||
#!/usr/bin/python
|
||||
import multiprocessing
|
||||
import sys
|
||||
import time
|
||||
import pytest
|
||||
|
||||
import xgboost as xgb
|
||||
import xgboost.federated
|
||||
|
||||
SERVER_KEY = 'server-key.pem'
|
||||
SERVER_CERT = 'server-cert.pem'
|
||||
CLIENT_KEY = 'client-key.pem'
|
||||
CLIENT_CERT = 'client-cert.pem'
|
||||
from xgboost.testing.federated import run_federated_learning
|
||||
|
||||
|
||||
def run_server(port: int, world_size: int, with_ssl: bool) -> None:
|
||||
if with_ssl:
|
||||
xgboost.federated.run_federated_server(port, world_size, SERVER_KEY, SERVER_CERT,
|
||||
CLIENT_CERT)
|
||||
else:
|
||||
xgboost.federated.run_federated_server(port, world_size)
|
||||
|
||||
|
||||
def run_worker(port: int, world_size: int, rank: int, with_ssl: bool, with_gpu: bool) -> None:
|
||||
communicator_env = {
|
||||
'xgboost_communicator': 'federated',
|
||||
'federated_server_address': f'localhost:{port}',
|
||||
'federated_world_size': world_size,
|
||||
'federated_rank': rank
|
||||
}
|
||||
if with_ssl:
|
||||
communicator_env['federated_server_cert'] = SERVER_CERT
|
||||
communicator_env['federated_client_key'] = CLIENT_KEY
|
||||
communicator_env['federated_client_cert'] = CLIENT_CERT
|
||||
|
||||
# Always call this before using distributed module
|
||||
with xgb.collective.CommunicatorContext(**communicator_env):
|
||||
# Load file, file will not be sharded in federated mode.
|
||||
dtrain = xgb.DMatrix('agaricus.txt.train-%02d?format=libsvm' % rank)
|
||||
dtest = xgb.DMatrix('agaricus.txt.test-%02d?format=libsvm' % rank)
|
||||
|
||||
# Specify parameters via map, definition are same as c++ version
|
||||
param = {'max_depth': 2, 'eta': 1, 'objective': 'binary:logistic'}
|
||||
if with_gpu:
|
||||
param['tree_method'] = 'hist'
|
||||
param['device'] = f"cuda:{rank}"
|
||||
|
||||
# Specify validations set to watch performance
|
||||
watchlist = [(dtest, 'eval'), (dtrain, 'train')]
|
||||
num_round = 20
|
||||
|
||||
# Run training, all the features in training API is available.
|
||||
bst = xgb.train(param, dtrain, num_round, evals=watchlist,
|
||||
early_stopping_rounds=2)
|
||||
|
||||
# Save the model, only ask process 0 to save the model.
|
||||
if xgb.collective.get_rank() == 0:
|
||||
bst.save_model("test.model.json")
|
||||
xgb.collective.communicator_print("Finished training\n")
|
||||
|
||||
|
||||
def run_federated(with_ssl: bool = True, with_gpu: bool = False) -> None:
|
||||
port = 9091
|
||||
world_size = int(sys.argv[1])
|
||||
|
||||
server = multiprocessing.Process(target=run_server, args=(port, world_size, with_ssl))
|
||||
server.start()
|
||||
time.sleep(1)
|
||||
if not server.is_alive():
|
||||
raise Exception("Error starting Federated Learning server")
|
||||
|
||||
workers = []
|
||||
for rank in range(world_size):
|
||||
worker = multiprocessing.Process(target=run_worker,
|
||||
args=(port, world_size, rank, with_ssl, with_gpu))
|
||||
workers.append(worker)
|
||||
worker.start()
|
||||
for worker in workers:
|
||||
worker.join()
|
||||
server.terminate()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_federated(with_ssl=True, with_gpu=False)
|
||||
run_federated(with_ssl=False, with_gpu=False)
|
||||
run_federated(with_ssl=True, with_gpu=True)
|
||||
run_federated(with_ssl=False, with_gpu=True)
|
||||
@pytest.mark.parametrize("with_ssl", [True, False])
|
||||
def test_federated_learning(with_ssl: bool) -> None:
|
||||
run_federated_learning(with_ssl, False, __file__)
|
||||
|
||||
@@ -0,0 +1,9 @@
|
||||
import pytest
|
||||
|
||||
from xgboost.testing.federated import run_federated_learning
|
||||
|
||||
|
||||
@pytest.mark.parametrize("with_ssl", [True, False])
|
||||
@pytest.mark.mgpu
|
||||
def test_federated_learning(with_ssl: bool) -> None:
|
||||
run_federated_learning(with_ssl, True, __file__)
|
||||
Reference in New Issue
Block a user