diff --git a/tests/ci_build/test_gpu.sh b/tests/ci_build/test_gpu.sh index dbef1bdde..cbbe86254 100755 --- a/tests/ci_build/test_gpu.sh +++ b/tests/ci_build/test_gpu.sh @@ -4,6 +4,6 @@ set -e cd python-package python setup.py install --user cd .. -python -m nose --attr='!slow' tests/python-gpu/ +python -m nose -v --attr='!slow' tests/python-gpu/ ./testxgboost diff --git a/tests/python-gpu/test_gpu_prediction.py b/tests/python-gpu/test_gpu_prediction.py index 07e86d8de..db50cdb46 100644 --- a/tests/python-gpu/test_gpu_prediction.py +++ b/tests/python-gpu/test_gpu_prediction.py @@ -49,7 +49,7 @@ class TestGPUPredict(unittest.TestCase): # Test case for a bug where multiple batch predictions made on a test set produce incorrect results def test_multi_predict(self): from sklearn.datasets import make_regression - from sklearn.cross_validation import train_test_split + from sklearn.model_selection import train_test_split n = 1000 X, y = make_regression(n, random_state=rng) diff --git a/tests/python/test_with_sklearn.py b/tests/python/test_with_sklearn.py index b184b3952..67b7033c0 100644 --- a/tests/python/test_with_sklearn.py +++ b/tests/python/test_with_sklearn.py @@ -22,21 +22,13 @@ class TemporaryDirectory(object): def test_binary_classification(): tm._skip_if_no_sklearn() from sklearn.datasets import load_digits - try: - from sklearn.model_selection import KFold - except: - from sklearn.cross_validation import KFold + from sklearn.model_selection import KFold digits = load_digits(2) y = digits['target'] X = digits['data'] - try: - kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng) - except TypeError: # sklearn.model_selection.KFold uses n_split - kf = KFold( - n_splits=2, shuffle=True, random_state=rng - ).split(np.arange(y.shape[0])) - for train_index, test_index in kf: + kf = KFold(n_splits=2, shuffle=True, random_state=rng) + for train_index, test_index in kf.split(X, y): xgb_model = xgb.XGBClassifier().fit(X[train_index], y[train_index]) preds = xgb_model.predict(X[test_index]) labels = y[test_index] @@ -48,10 +40,7 @@ def test_binary_classification(): def test_multiclass_classification(): tm._skip_if_no_sklearn() from sklearn.datasets import load_iris - try: - from sklearn.cross_validation import KFold - except: - from sklearn.model_selection import KFold + from sklearn.model_selection import KFold def check_pred(preds, labels): err = sum(1 for i in range(len(preds)) @@ -61,8 +50,8 @@ def test_multiclass_classification(): iris = load_iris() y = iris['target'] X = iris['data'] - kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng) - for train_index, test_index in kf: + kf = KFold(n_splits=2, shuffle=True, random_state=rng) + for train_index, test_index in kf.split(X, y): xgb_model = xgb.XGBClassifier().fit(X[train_index], y[train_index]) preds = xgb_model.predict(X[test_index]) # test other params in XGBClassifier().fit @@ -111,13 +100,13 @@ def test_boston_housing_regression(): tm._skip_if_no_sklearn() from sklearn.metrics import mean_squared_error from sklearn.datasets import load_boston - from sklearn.cross_validation import KFold + from sklearn.model_selection import KFold boston = load_boston() y = boston['target'] X = boston['data'] - kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng) - for train_index, test_index in kf: + kf = KFold(n_splits=2, shuffle=True, random_state=rng) + for train_index, test_index in kf.split(X, y): xgb_model = xgb.XGBRegressor().fit(X[train_index], y[train_index]) preds = xgb_model.predict(X[test_index]) @@ -135,7 +124,7 @@ def test_boston_housing_regression(): def test_parameter_tuning(): tm._skip_if_no_sklearn() - from sklearn.grid_search import GridSearchCV + from sklearn.model_selection import GridSearchCV from sklearn.datasets import load_boston boston = load_boston() @@ -143,7 +132,8 @@ def test_parameter_tuning(): X = boston['data'] xgb_model = xgb.XGBRegressor() clf = GridSearchCV(xgb_model, {'max_depth': [2, 4, 6], - 'n_estimators': [50, 100, 200]}, verbose=1) + 'n_estimators': [50, 100, 200]}, + cv=3, verbose=1, iid=True) clf.fit(X, y) assert clf.best_score_ < 0.7 assert clf.best_params_ == {'n_estimators': 100, 'max_depth': 4} @@ -153,7 +143,7 @@ def test_regression_with_custom_objective(): tm._skip_if_no_sklearn() from sklearn.metrics import mean_squared_error from sklearn.datasets import load_boston - from sklearn.cross_validation import KFold + from sklearn.model_selection import KFold def objective_ls(y_true, y_pred): grad = (y_pred - y_true) @@ -163,8 +153,8 @@ def test_regression_with_custom_objective(): boston = load_boston() y = boston['target'] X = boston['data'] - kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng) - for train_index, test_index in kf: + kf = KFold(n_splits=2, shuffle=True, random_state=rng) + for train_index, test_index in kf.split(X, y): xgb_model = xgb.XGBRegressor(objective=objective_ls).fit( X[train_index], y[train_index] ) @@ -186,7 +176,7 @@ def test_regression_with_custom_objective(): def test_classification_with_custom_objective(): tm._skip_if_no_sklearn() from sklearn.datasets import load_digits - from sklearn.cross_validation import KFold + from sklearn.model_selection import KFold def logregobj(y_true, y_pred): y_pred = 1.0 / (1.0 + np.exp(-y_pred)) @@ -197,8 +187,8 @@ def test_classification_with_custom_objective(): digits = load_digits(2) y = digits['target'] X = digits['data'] - kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng) - for train_index, test_index in kf: + kf = KFold(n_splits=2, shuffle=True, random_state=rng) + for train_index, test_index in kf.split(X, y): xgb_model = xgb.XGBClassifier(objective=logregobj) xgb_model.fit(X[train_index], y[train_index]) preds = xgb_model.predict(X[test_index]) @@ -225,10 +215,11 @@ def test_classification_with_custom_objective(): def test_sklearn_api(): tm._skip_if_no_sklearn() from sklearn.datasets import load_iris - from sklearn.cross_validation import train_test_split + from sklearn.model_selection import train_test_split iris = load_iris() - tr_d, te_d, tr_l, te_l = train_test_split(iris.data, iris.target, train_size=120) + tr_d, te_d, tr_l, te_l = train_test_split(iris.data, iris.target, + train_size=120, test_size=0.2) classifier = xgb.XGBClassifier(booster='gbtree', n_estimators=10) classifier.fit(tr_d, tr_l) @@ -242,7 +233,7 @@ def test_sklearn_api(): def test_sklearn_api_gblinear(): tm._skip_if_no_sklearn() from sklearn.datasets import load_iris - from sklearn.cross_validation import train_test_split + from sklearn.model_selection import train_test_split iris = load_iris() tr_d, te_d, tr_l, te_l = train_test_split(iris.data, iris.target, train_size=120) @@ -476,23 +467,15 @@ def test_validation_weights_xgbclassifier(): def test_save_load_model(): tm._skip_if_no_sklearn() from sklearn.datasets import load_digits - try: - from sklearn.model_selection import KFold - except: - from sklearn.cross_validation import KFold + from sklearn.model_selection import KFold digits = load_digits(2) y = digits['target'] X = digits['data'] - try: - kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng) - except TypeError: # sklearn.model_selection.KFold uses n_split - kf = KFold( - n_splits=2, shuffle=True, random_state=rng - ).split(np.arange(y.shape[0])) + kf = KFold(n_splits=2, shuffle=True, random_state=rng) with TemporaryDirectory() as tempdir: model_path = os.path.join(tempdir, 'digits.model') - for train_index, test_index in kf: + for train_index, test_index in kf.split(X, y): xgb_model = xgb.XGBClassifier().fit(X[train_index], y[train_index]) xgb_model.save_model(model_path) xgb_model = xgb.XGBModel() diff --git a/tests/travis/run_test.sh b/tests/travis/run_test.sh index 01278dcab..58c7b1fc8 100755 --- a/tests/travis/run_test.sh +++ b/tests/travis/run_test.sh @@ -56,7 +56,7 @@ if [ ${TASK} == "python_test" ]; then python -m pip install datatable --no-binary datatable python -m pip install graphviz pytest pytest-cov codecov - python -m nose tests/python || exit -1 + python -m nose -v tests/python || exit -1 py.test tests/python --cov=python-package/xgboost codecov source activate python2 @@ -64,7 +64,7 @@ if [ ${TASK} == "python_test" ]; then python --version conda install numpy scipy pandas matplotlib nose scikit-learn python -m pip install graphviz - python -m nose tests/python || exit -1 + python -m nose -v tests/python || exit -1 exit 0 fi @@ -75,7 +75,7 @@ if [ ${TASK} == "python_lightweight_test" ]; then python --version conda install numpy scipy nose python -m pip install graphviz pytest pytest-cov codecov - python -m nose tests/python || exit -1 + python -m nose -v tests/python || exit -1 py.test tests/python --cov=python-package/xgboost codecov source activate python2 @@ -83,7 +83,7 @@ if [ ${TASK} == "python_lightweight_test" ]; then python --version conda install numpy scipy nose python -m pip install graphviz - python -m nose tests/python || exit -1 + python -m nose -v tests/python || exit -1 python -m pip install flake8==3.4.1 flake8 --ignore E501 python-package || exit -1 flake8 --ignore E501 tests/python || exit -1