[breaking] Bump Python requirement to 3.10. (#10434)
- Bump the Python requirement. - Fix type hints. - Use loky to avoid deadlock. - Workaround cupy-numpy compatibility issue on Windows caused by the `safe` casting rule. - Simplify the repartitioning logic to avoid dask errors.
This commit is contained in:
@@ -425,8 +425,8 @@ class TestModels:
|
||||
np.testing.assert_allclose(merged, single, atol=1e-6)
|
||||
|
||||
@pytest.mark.skipif(**tm.no_sklearn())
|
||||
@pytest.mark.parametrize("booster", ["gbtree", "dart"])
|
||||
def test_slice(self, booster):
|
||||
@pytest.mark.parametrize("booster_name", ["gbtree", "dart"])
|
||||
def test_slice(self, booster_name: str) -> None:
|
||||
from sklearn.datasets import make_classification
|
||||
|
||||
num_classes = 3
|
||||
@@ -442,7 +442,7 @@ class TestModels:
|
||||
"num_parallel_tree": num_parallel_tree,
|
||||
"subsample": 0.5,
|
||||
"num_class": num_classes,
|
||||
"booster": booster,
|
||||
"booster": booster_name,
|
||||
"objective": "multi:softprob",
|
||||
},
|
||||
num_boost_round=num_boost_round,
|
||||
@@ -452,6 +452,8 @@ class TestModels:
|
||||
|
||||
assert len(booster.get_dump()) == total_trees
|
||||
|
||||
assert booster[...].num_boosted_rounds() == num_boost_round
|
||||
|
||||
self.run_slice(
|
||||
booster, dtrain, num_parallel_tree, num_classes, num_boost_round, False
|
||||
)
|
||||
|
||||
@@ -1,44 +1,46 @@
|
||||
import multiprocessing
|
||||
import socket
|
||||
import sys
|
||||
from threading import Thread
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
from loky import get_reusable_executor
|
||||
|
||||
import xgboost as xgb
|
||||
from xgboost import RabitTracker, build_info, federated
|
||||
from xgboost import testing as tm
|
||||
|
||||
|
||||
def run_rabit_worker(rabit_env, world_size):
|
||||
def run_rabit_worker(rabit_env: dict, world_size: int) -> int:
|
||||
with xgb.collective.CommunicatorContext(**rabit_env):
|
||||
assert xgb.collective.get_world_size() == world_size
|
||||
assert xgb.collective.is_distributed()
|
||||
assert xgb.collective.get_processor_name() == socket.gethostname()
|
||||
ret = xgb.collective.broadcast("test1234", 0)
|
||||
assert str(ret) == "test1234"
|
||||
ret = xgb.collective.allreduce(np.asarray([1, 2, 3]), xgb.collective.Op.SUM)
|
||||
assert np.array_equal(ret, np.asarray([2, 4, 6]))
|
||||
reduced = xgb.collective.allreduce(np.asarray([1, 2, 3]), xgb.collective.Op.SUM)
|
||||
assert np.array_equal(reduced, np.asarray([2, 4, 6]))
|
||||
return 0
|
||||
|
||||
|
||||
@pytest.mark.skipif(**tm.no_loky())
|
||||
def test_rabit_communicator() -> None:
|
||||
world_size = 2
|
||||
tracker = RabitTracker(host_ip="127.0.0.1", n_workers=world_size)
|
||||
tracker.start()
|
||||
workers = []
|
||||
for _ in range(world_size):
|
||||
worker = multiprocessing.Process(
|
||||
target=run_rabit_worker, args=(tracker.worker_args(), world_size)
|
||||
)
|
||||
workers.append(worker)
|
||||
worker.start()
|
||||
for worker in workers:
|
||||
worker.join()
|
||||
assert worker.exitcode == 0
|
||||
with get_reusable_executor(max_workers=world_size) as pool:
|
||||
for _ in range(world_size):
|
||||
worker = pool.submit(
|
||||
run_rabit_worker, rabit_env=tracker.worker_args(), world_size=world_size
|
||||
)
|
||||
workers.append(worker)
|
||||
|
||||
for worker in workers:
|
||||
assert worker.result() == 0
|
||||
|
||||
|
||||
def run_federated_worker(port: int, world_size: int, rank: int) -> None:
|
||||
def run_federated_worker(port: int, world_size: int, rank: int) -> int:
|
||||
with xgb.collective.CommunicatorContext(
|
||||
dmlc_communicator="federated",
|
||||
federated_server_address=f"localhost:{port}",
|
||||
@@ -52,30 +54,28 @@ def run_federated_worker(port: int, world_size: int, rank: int) -> None:
|
||||
assert str(bret) == "test1234"
|
||||
aret = xgb.collective.allreduce(np.asarray([1, 2, 3]), xgb.collective.Op.SUM)
|
||||
assert np.array_equal(aret, np.asarray([2, 4, 6]))
|
||||
return 0
|
||||
|
||||
|
||||
@pytest.mark.skipif(**tm.skip_win())
|
||||
@pytest.mark.skipif(**tm.no_loky())
|
||||
def test_federated_communicator():
|
||||
if not build_info()["USE_FEDERATED"]:
|
||||
pytest.skip("XGBoost not built with federated learning enabled")
|
||||
|
||||
port = 9091
|
||||
world_size = 2
|
||||
tracker = multiprocessing.Process(
|
||||
target=federated.run_federated_server,
|
||||
kwargs={"port": port, "n_workers": world_size, "blocking": False},
|
||||
)
|
||||
tracker.start()
|
||||
if not tracker.is_alive():
|
||||
raise Exception("Error starting Federated Learning server")
|
||||
with get_reusable_executor(max_workers=world_size+1) as pool:
|
||||
kwargs={"port": port, "n_workers": world_size, "blocking": False}
|
||||
tracker = pool.submit(federated.run_federated_server, **kwargs)
|
||||
if not tracker.running():
|
||||
raise RuntimeError("Error starting Federated Learning server")
|
||||
|
||||
workers = []
|
||||
for rank in range(world_size):
|
||||
worker = multiprocessing.Process(
|
||||
target=run_federated_worker, args=(port, world_size, rank)
|
||||
)
|
||||
workers.append(worker)
|
||||
worker.start()
|
||||
for worker in workers:
|
||||
worker.join()
|
||||
assert worker.exitcode == 0
|
||||
workers = []
|
||||
for rank in range(world_size):
|
||||
worker = pool.submit(
|
||||
run_federated_worker, port=port, world_size=world_size, rank=rank
|
||||
)
|
||||
workers.append(worker)
|
||||
for worker in workers:
|
||||
assert worker.result() == 0
|
||||
|
||||
Reference in New Issue
Block a user