Fix #3730: scikit-learn 0.20 compatibility fix (#3731)

* Fix #3730: scikit-learn 0.20 compatibility fix

sklearn.cross_validation has been removed from scikit-learn 0.20,
so replace it with sklearn.model_selection

* Display test names for Python tests for clarity
This commit is contained in:
Philip Hyunsu Cho 2018-09-27 15:03:05 -07:00 committed by Philip Cho
parent b1233ef2ae
commit bc35b8e97b
No known key found for this signature in database
GPG Key ID: A758FA046E1F6BB8
4 changed files with 28 additions and 37 deletions

View File

@ -3,6 +3,6 @@
cd python-package cd python-package
python setup.py install --user python setup.py install --user
cd .. cd ..
python -m nose --attr='!slow' tests/python-gpu/ python -m nose -v --attr='!slow' tests/python-gpu/
./testxgboost ./testxgboost

View File

@ -49,7 +49,7 @@ class TestGPUPredict(unittest.TestCase):
# Test case for a bug where multiple batch predictions made on a test set produce incorrect results # Test case for a bug where multiple batch predictions made on a test set produce incorrect results
def test_multi_predict(self): def test_multi_predict(self):
from sklearn.datasets import make_regression from sklearn.datasets import make_regression
from sklearn.cross_validation import train_test_split from sklearn.model_selection import train_test_split
n = 1000 n = 1000
X, y = make_regression(n, random_state=rng) X, y = make_regression(n, random_state=rng)

View File

@ -9,21 +9,13 @@ rng = np.random.RandomState(1994)
def test_binary_classification(): def test_binary_classification():
tm._skip_if_no_sklearn() tm._skip_if_no_sklearn()
from sklearn.datasets import load_digits from sklearn.datasets import load_digits
try: from sklearn.model_selection import KFold
from sklearn.model_selection import KFold
except:
from sklearn.cross_validation import KFold
digits = load_digits(2) digits = load_digits(2)
y = digits['target'] y = digits['target']
X = digits['data'] X = digits['data']
try: kf = KFold(n_splits=2, shuffle=True, random_state=rng)
kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng) for train_index, test_index in kf.split(X, y):
except TypeError: # sklearn.model_selection.KFold uses n_split
kf = KFold(
n_splits=2, shuffle=True, random_state=rng
).split(np.arange(y.shape[0]))
for train_index, test_index in kf:
xgb_model = xgb.XGBClassifier().fit(X[train_index], y[train_index]) xgb_model = xgb.XGBClassifier().fit(X[train_index], y[train_index])
preds = xgb_model.predict(X[test_index]) preds = xgb_model.predict(X[test_index])
labels = y[test_index] labels = y[test_index]
@ -35,10 +27,7 @@ def test_binary_classification():
def test_multiclass_classification(): def test_multiclass_classification():
tm._skip_if_no_sklearn() tm._skip_if_no_sklearn()
from sklearn.datasets import load_iris from sklearn.datasets import load_iris
try: from sklearn.model_selection import KFold
from sklearn.cross_validation import KFold
except:
from sklearn.model_selection import KFold
def check_pred(preds, labels): def check_pred(preds, labels):
err = sum(1 for i in range(len(preds)) err = sum(1 for i in range(len(preds))
@ -48,8 +37,8 @@ def test_multiclass_classification():
iris = load_iris() iris = load_iris()
y = iris['target'] y = iris['target']
X = iris['data'] X = iris['data']
kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng) kf = KFold(n_splits=2, shuffle=True, random_state=rng)
for train_index, test_index in kf: for train_index, test_index in kf.split(X, y):
xgb_model = xgb.XGBClassifier().fit(X[train_index], y[train_index]) xgb_model = xgb.XGBClassifier().fit(X[train_index], y[train_index])
preds = xgb_model.predict(X[test_index]) preds = xgb_model.predict(X[test_index])
# test other params in XGBClassifier().fit # test other params in XGBClassifier().fit
@ -98,13 +87,13 @@ def test_boston_housing_regression():
tm._skip_if_no_sklearn() tm._skip_if_no_sklearn()
from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_squared_error
from sklearn.datasets import load_boston from sklearn.datasets import load_boston
from sklearn.cross_validation import KFold from sklearn.model_selection import KFold
boston = load_boston() boston = load_boston()
y = boston['target'] y = boston['target']
X = boston['data'] X = boston['data']
kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng) kf = KFold(n_splits=2, shuffle=True, random_state=rng)
for train_index, test_index in kf: for train_index, test_index in kf.split(X, y):
xgb_model = xgb.XGBRegressor().fit(X[train_index], y[train_index]) xgb_model = xgb.XGBRegressor().fit(X[train_index], y[train_index])
preds = xgb_model.predict(X[test_index]) preds = xgb_model.predict(X[test_index])
@ -122,7 +111,7 @@ def test_boston_housing_regression():
def test_parameter_tuning(): def test_parameter_tuning():
tm._skip_if_no_sklearn() tm._skip_if_no_sklearn()
from sklearn.grid_search import GridSearchCV from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_boston from sklearn.datasets import load_boston
boston = load_boston() boston = load_boston()
@ -130,7 +119,8 @@ def test_parameter_tuning():
X = boston['data'] X = boston['data']
xgb_model = xgb.XGBRegressor() xgb_model = xgb.XGBRegressor()
clf = GridSearchCV(xgb_model, {'max_depth': [2, 4, 6], clf = GridSearchCV(xgb_model, {'max_depth': [2, 4, 6],
'n_estimators': [50, 100, 200]}, verbose=1) 'n_estimators': [50, 100, 200]},
cv=3, verbose=1, iid=True)
clf.fit(X, y) clf.fit(X, y)
assert clf.best_score_ < 0.7 assert clf.best_score_ < 0.7
assert clf.best_params_ == {'n_estimators': 100, 'max_depth': 4} assert clf.best_params_ == {'n_estimators': 100, 'max_depth': 4}
@ -140,7 +130,7 @@ def test_regression_with_custom_objective():
tm._skip_if_no_sklearn() tm._skip_if_no_sklearn()
from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_squared_error
from sklearn.datasets import load_boston from sklearn.datasets import load_boston
from sklearn.cross_validation import KFold from sklearn.model_selection import KFold
def objective_ls(y_true, y_pred): def objective_ls(y_true, y_pred):
grad = (y_pred - y_true) grad = (y_pred - y_true)
@ -150,8 +140,8 @@ def test_regression_with_custom_objective():
boston = load_boston() boston = load_boston()
y = boston['target'] y = boston['target']
X = boston['data'] X = boston['data']
kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng) kf = KFold(n_splits=2, shuffle=True, random_state=rng)
for train_index, test_index in kf: for train_index, test_index in kf.split(X, y):
xgb_model = xgb.XGBRegressor(objective=objective_ls).fit( xgb_model = xgb.XGBRegressor(objective=objective_ls).fit(
X[train_index], y[train_index] X[train_index], y[train_index]
) )
@ -173,7 +163,7 @@ def test_regression_with_custom_objective():
def test_classification_with_custom_objective(): def test_classification_with_custom_objective():
tm._skip_if_no_sklearn() tm._skip_if_no_sklearn()
from sklearn.datasets import load_digits from sklearn.datasets import load_digits
from sklearn.cross_validation import KFold from sklearn.model_selection import KFold
def logregobj(y_true, y_pred): def logregobj(y_true, y_pred):
y_pred = 1.0 / (1.0 + np.exp(-y_pred)) y_pred = 1.0 / (1.0 + np.exp(-y_pred))
@ -184,8 +174,8 @@ def test_classification_with_custom_objective():
digits = load_digits(2) digits = load_digits(2)
y = digits['target'] y = digits['target']
X = digits['data'] X = digits['data']
kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng) kf = KFold(n_splits=2, shuffle=True, random_state=rng)
for train_index, test_index in kf: for train_index, test_index in kf.split(X, y):
xgb_model = xgb.XGBClassifier(objective=logregobj) xgb_model = xgb.XGBClassifier(objective=logregobj)
xgb_model.fit(X[train_index], y[train_index]) xgb_model.fit(X[train_index], y[train_index])
preds = xgb_model.predict(X[test_index]) preds = xgb_model.predict(X[test_index])
@ -212,10 +202,11 @@ def test_classification_with_custom_objective():
def test_sklearn_api(): def test_sklearn_api():
tm._skip_if_no_sklearn() tm._skip_if_no_sklearn()
from sklearn.datasets import load_iris from sklearn.datasets import load_iris
from sklearn.cross_validation import train_test_split from sklearn.model_selection import train_test_split
iris = load_iris() iris = load_iris()
tr_d, te_d, tr_l, te_l = train_test_split(iris.data, iris.target, train_size=120) tr_d, te_d, tr_l, te_l = train_test_split(iris.data, iris.target,
train_size=120, test_size=0.2)
classifier = xgb.XGBClassifier(booster='gbtree', n_estimators=10) classifier = xgb.XGBClassifier(booster='gbtree', n_estimators=10)
classifier.fit(tr_d, tr_l) classifier.fit(tr_d, tr_l)
@ -229,7 +220,7 @@ def test_sklearn_api():
def test_sklearn_api_gblinear(): def test_sklearn_api_gblinear():
tm._skip_if_no_sklearn() tm._skip_if_no_sklearn()
from sklearn.datasets import load_iris from sklearn.datasets import load_iris
from sklearn.cross_validation import train_test_split from sklearn.model_selection import train_test_split
iris = load_iris() iris = load_iris()
tr_d, te_d, tr_l, te_l = train_test_split(iris.data, iris.target, train_size=120) tr_d, te_d, tr_l, te_l = train_test_split(iris.data, iris.target, train_size=120)

View File

@ -48,7 +48,7 @@ if [ ${TASK} == "python_test" ]; then
python --version python --version
conda install numpy scipy pandas matplotlib nose scikit-learn conda install numpy scipy pandas matplotlib nose scikit-learn
python -m pip install graphviz pytest pytest-cov codecov python -m pip install graphviz pytest pytest-cov codecov
python -m nose tests/python || exit -1 python -m nose -v tests/python || exit -1
py.test tests/python --cov=python-package/xgboost py.test tests/python --cov=python-package/xgboost
codecov codecov
source activate python2 source activate python2
@ -56,7 +56,7 @@ if [ ${TASK} == "python_test" ]; then
python --version python --version
conda install numpy scipy pandas matplotlib nose scikit-learn conda install numpy scipy pandas matplotlib nose scikit-learn
python -m pip install graphviz python -m pip install graphviz
python -m nose tests/python || exit -1 python -m nose -v tests/python || exit -1
exit 0 exit 0
fi fi
@ -67,7 +67,7 @@ if [ ${TASK} == "python_lightweight_test" ]; then
python --version python --version
conda install numpy scipy nose conda install numpy scipy nose
python -m pip install graphviz pytest pytest-cov codecov python -m pip install graphviz pytest pytest-cov codecov
python -m nose tests/python || exit -1 python -m nose -v tests/python || exit -1
py.test tests/python --cov=python-package/xgboost py.test tests/python --cov=python-package/xgboost
codecov codecov
source activate python2 source activate python2
@ -75,7 +75,7 @@ if [ ${TASK} == "python_lightweight_test" ]; then
python --version python --version
conda install numpy scipy nose conda install numpy scipy nose
python -m pip install graphviz python -m pip install graphviz
python -m nose tests/python || exit -1 python -m nose -v tests/python || exit -1
python -m pip install flake8==3.4.1 python -m pip install flake8==3.4.1
flake8 --ignore E501 python-package || exit -1 flake8 --ignore E501 python-package || exit -1
flake8 --ignore E501 tests/python || exit -1 flake8 --ignore E501 tests/python || exit -1