[CI] Add timeout for distributed GPU tests. (#9917)
This commit is contained in:
parent
b807f3e30c
commit
6a5f6ba694
@ -5,9 +5,13 @@ import pytest
|
|||||||
|
|
||||||
from xgboost import testing as tm
|
from xgboost import testing as tm
|
||||||
|
|
||||||
|
pytestmark = [
|
||||||
|
pytest.mark.skipif(**tm.no_dask()),
|
||||||
|
pytest.mark.skipif(**tm.no_dask_cuda()),
|
||||||
|
tm.timeout(60),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(**tm.no_dask())
|
|
||||||
@pytest.mark.skipif(**tm.no_dask_cuda())
|
|
||||||
@pytest.mark.skipif(**tm.no_cupy())
|
@pytest.mark.skipif(**tm.no_cupy())
|
||||||
@pytest.mark.mgpu
|
@pytest.mark.mgpu
|
||||||
def test_dask_training():
|
def test_dask_training():
|
||||||
@ -16,8 +20,6 @@ def test_dask_training():
|
|||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(**tm.no_dask_cuda())
|
|
||||||
@pytest.mark.skipif(**tm.no_dask())
|
|
||||||
@pytest.mark.mgpu
|
@pytest.mark.mgpu
|
||||||
def test_dask_sklearn_demo():
|
def test_dask_sklearn_demo():
|
||||||
script = os.path.join(tm.demo_dir(__file__), "dask", "sklearn_gpu_training.py")
|
script = os.path.join(tm.demo_dir(__file__), "dask", "sklearn_gpu_training.py")
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
"""Copyright 2019-2022 XGBoost contributors"""
|
"""Copyright 2019-2023, XGBoost contributors"""
|
||||||
import asyncio
|
import asyncio
|
||||||
import json
|
import json
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
@ -18,6 +18,7 @@ from xgboost.testing.params import hist_parameter_strategy
|
|||||||
pytestmark = [
|
pytestmark = [
|
||||||
pytest.mark.skipif(**tm.no_dask()),
|
pytest.mark.skipif(**tm.no_dask()),
|
||||||
pytest.mark.skipif(**tm.no_dask_cuda()),
|
pytest.mark.skipif(**tm.no_dask_cuda()),
|
||||||
|
tm.timeout(60),
|
||||||
]
|
]
|
||||||
|
|
||||||
from ..test_with_dask.test_with_dask import generate_array
|
from ..test_with_dask.test_with_dask import generate_array
|
||||||
@ -629,6 +630,7 @@ def test_nccl_load(local_cuda_client: Client, tree_method: str) -> None:
|
|||||||
def run(wid: int) -> None:
|
def run(wid: int) -> None:
|
||||||
# FIXME(jiamingy): https://github.com/dmlc/xgboost/issues/9147
|
# FIXME(jiamingy): https://github.com/dmlc/xgboost/issues/9147
|
||||||
from xgboost.core import _LIB, _register_log_callback
|
from xgboost.core import _LIB, _register_log_callback
|
||||||
|
|
||||||
_register_log_callback(_LIB)
|
_register_log_callback(_LIB)
|
||||||
|
|
||||||
with CommunicatorContext(**args):
|
with CommunicatorContext(**args):
|
||||||
|
|||||||
@ -2,7 +2,10 @@ import pytest
|
|||||||
|
|
||||||
from xgboost import testing as tm
|
from xgboost import testing as tm
|
||||||
|
|
||||||
pytestmark = pytest.mark.skipif(**tm.no_spark())
|
pytestmark = [
|
||||||
|
pytest.mark.skipif(**tm.no_spark()),
|
||||||
|
tm.timeout(120),
|
||||||
|
]
|
||||||
|
|
||||||
from ..test_with_spark.test_data import run_dmatrix_ctor
|
from ..test_with_spark.test_data import run_dmatrix_ctor
|
||||||
|
|
||||||
|
|||||||
@ -8,7 +8,10 @@ import sklearn
|
|||||||
|
|
||||||
from xgboost import testing as tm
|
from xgboost import testing as tm
|
||||||
|
|
||||||
pytestmark = pytest.mark.skipif(**tm.no_spark())
|
pytestmark = [
|
||||||
|
pytest.mark.skipif(**tm.no_spark()),
|
||||||
|
tm.timeout(240),
|
||||||
|
]
|
||||||
|
|
||||||
from pyspark.ml.linalg import Vectors
|
from pyspark.ml.linalg import Vectors
|
||||||
from pyspark.ml.tuning import CrossValidator, ParamGridBuilder
|
from pyspark.ml.tuning import CrossValidator, ParamGridBuilder
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user