Fix #3730: scikit-learn 0.20 compatibility fix (#3731)

* Fix #3730: scikit-learn 0.20 compatibility fix

sklearn.cross_validation has been removed from scikit-learn 0.20,
so replace it with sklearn.model_selection

* Display test names for Python tests for clarity
This commit is contained in:
Philip Hyunsu Cho
2018-09-27 15:03:05 -07:00
committed by GitHub
parent fbe9d41dd0
commit 51478a39c9
4 changed files with 31 additions and 48 deletions

View File

@@ -22,21 +22,13 @@ class TemporaryDirectory(object):
def test_binary_classification():
tm._skip_if_no_sklearn()
from sklearn.datasets import load_digits
try:
from sklearn.model_selection import KFold
except:
from sklearn.cross_validation import KFold
from sklearn.model_selection import KFold
digits = load_digits(2)
y = digits['target']
X = digits['data']
try:
kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng)
except TypeError: # sklearn.model_selection.KFold uses n_split
kf = KFold(
n_splits=2, shuffle=True, random_state=rng
).split(np.arange(y.shape[0]))
for train_index, test_index in kf:
kf = KFold(n_splits=2, shuffle=True, random_state=rng)
for train_index, test_index in kf.split(X, y):
xgb_model = xgb.XGBClassifier().fit(X[train_index], y[train_index])
preds = xgb_model.predict(X[test_index])
labels = y[test_index]
@@ -48,10 +40,7 @@ def test_binary_classification():
def test_multiclass_classification():
tm._skip_if_no_sklearn()
from sklearn.datasets import load_iris
try:
from sklearn.cross_validation import KFold
except:
from sklearn.model_selection import KFold
from sklearn.model_selection import KFold
def check_pred(preds, labels, output_margin):
if output_margin:
@@ -65,8 +54,8 @@ def test_multiclass_classification():
iris = load_iris()
y = iris['target']
X = iris['data']
kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng)
for train_index, test_index in kf:
kf = KFold(n_splits=2, shuffle=True, random_state=rng)
for train_index, test_index in kf.split(X, y):
xgb_model = xgb.XGBClassifier().fit(X[train_index], y[train_index])
preds = xgb_model.predict(X[test_index])
# test other params in XGBClassifier().fit
@@ -149,13 +138,13 @@ def test_boston_housing_regression():
tm._skip_if_no_sklearn()
from sklearn.metrics import mean_squared_error
from sklearn.datasets import load_boston
from sklearn.cross_validation import KFold
from sklearn.model_selection import KFold
boston = load_boston()
y = boston['target']
X = boston['data']
kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng)
for train_index, test_index in kf:
kf = KFold(n_splits=2, shuffle=True, random_state=rng)
for train_index, test_index in kf.split(X, y):
xgb_model = xgb.XGBRegressor().fit(X[train_index], y[train_index])
preds = xgb_model.predict(X[test_index])
@@ -173,7 +162,7 @@ def test_boston_housing_regression():
def test_parameter_tuning():
tm._skip_if_no_sklearn()
from sklearn.grid_search import GridSearchCV
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_boston
boston = load_boston()
@@ -181,7 +170,8 @@ def test_parameter_tuning():
X = boston['data']
xgb_model = xgb.XGBRegressor()
clf = GridSearchCV(xgb_model, {'max_depth': [2, 4, 6],
'n_estimators': [50, 100, 200]}, verbose=1)
'n_estimators': [50, 100, 200]},
cv=3, verbose=1, iid=True)
clf.fit(X, y)
assert clf.best_score_ < 0.7
assert clf.best_params_ == {'n_estimators': 100, 'max_depth': 4}
@@ -191,7 +181,7 @@ def test_regression_with_custom_objective():
tm._skip_if_no_sklearn()
from sklearn.metrics import mean_squared_error
from sklearn.datasets import load_boston
from sklearn.cross_validation import KFold
from sklearn.model_selection import KFold
def objective_ls(y_true, y_pred):
grad = (y_pred - y_true)
@@ -201,8 +191,8 @@ def test_regression_with_custom_objective():
boston = load_boston()
y = boston['target']
X = boston['data']
kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng)
for train_index, test_index in kf:
kf = KFold(n_splits=2, shuffle=True, random_state=rng)
for train_index, test_index in kf.split(X, y):
xgb_model = xgb.XGBRegressor(objective=objective_ls).fit(
X[train_index], y[train_index]
)
@@ -224,7 +214,7 @@ def test_regression_with_custom_objective():
def test_classification_with_custom_objective():
tm._skip_if_no_sklearn()
from sklearn.datasets import load_digits
from sklearn.cross_validation import KFold
from sklearn.model_selection import KFold
def logregobj(y_true, y_pred):
y_pred = 1.0 / (1.0 + np.exp(-y_pred))
@@ -235,8 +225,8 @@ def test_classification_with_custom_objective():
digits = load_digits(2)
y = digits['target']
X = digits['data']
kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng)
for train_index, test_index in kf:
kf = KFold(n_splits=2, shuffle=True, random_state=rng)
for train_index, test_index in kf.split(X, y):
xgb_model = xgb.XGBClassifier(objective=logregobj)
xgb_model.fit(X[train_index], y[train_index])
preds = xgb_model.predict(X[test_index])
@@ -263,10 +253,11 @@ def test_classification_with_custom_objective():
def test_sklearn_api():
tm._skip_if_no_sklearn()
from sklearn.datasets import load_iris
from sklearn.cross_validation import train_test_split
from sklearn.model_selection import train_test_split
iris = load_iris()
tr_d, te_d, tr_l, te_l = train_test_split(iris.data, iris.target, train_size=120)
tr_d, te_d, tr_l, te_l = train_test_split(iris.data, iris.target,
train_size=120, test_size=0.2)
classifier = xgb.XGBClassifier(booster='gbtree', n_estimators=10)
classifier.fit(tr_d, tr_l)
@@ -280,7 +271,7 @@ def test_sklearn_api():
def test_sklearn_api_gblinear():
tm._skip_if_no_sklearn()
from sklearn.datasets import load_iris
from sklearn.cross_validation import train_test_split
from sklearn.model_selection import train_test_split
iris = load_iris()
tr_d, te_d, tr_l, te_l = train_test_split(iris.data, iris.target, train_size=120)
@@ -514,23 +505,15 @@ def test_validation_weights_xgbclassifier():
def test_save_load_model():
tm._skip_if_no_sklearn()
from sklearn.datasets import load_digits
try:
from sklearn.model_selection import KFold
except:
from sklearn.cross_validation import KFold
from sklearn.model_selection import KFold
digits = load_digits(2)
y = digits['target']
X = digits['data']
try:
kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng)
except TypeError: # sklearn.model_selection.KFold uses n_split
kf = KFold(
n_splits=2, shuffle=True, random_state=rng
).split(np.arange(y.shape[0]))
kf = KFold(n_splits=2, shuffle=True, random_state=rng)
with TemporaryDirectory() as tempdir:
model_path = os.path.join(tempdir, 'digits.model')
for train_index, test_index in kf:
for train_index, test_index in kf.split(X, y):
xgb_model = xgb.XGBClassifier().fit(X[train_index], y[train_index])
xgb_model.save_model(model_path)
xgb_model = xgb.XGBModel()