From 166e87883099a451cd654c4a0aae7d938a89dc19 Mon Sep 17 00:00:00 2001 From: terrytangyuan Date: Mon, 2 Nov 2015 19:42:21 -0600 Subject: [PATCH] Added tests for additional params in sklearn wrapper (+1 squashed commit) Squashed commits: [43892b9] Added tests for additional params in sklearn wrapper --- CHANGES.md | 1 + tests/python/test_with_sklearn.py | 97 +++++++++++++++++-------------- 2 files changed, 55 insertions(+), 43 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 1d31271be..8c06b38fd 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -42,6 +42,7 @@ on going at master * Python module now throw exception instead of crash terminal when a parameter error happens. * Python module now has importance plot and tree plot functions. * Python module now accepts different learning rates for each boosting round. +* Additional parameters added for sklearn wrapper * Java api is ready for use * Added more test cases and continuous integration to make each build more robust * Improvements in sklearn compatible module diff --git a/tests/python/test_with_sklearn.py b/tests/python/test_with_sklearn.py index f32374d56..cc62f1c27 100644 --- a/tests/python/test_with_sklearn.py +++ b/tests/python/test_with_sklearn.py @@ -4,54 +4,65 @@ from sklearn.cross_validation import KFold, train_test_split from sklearn.metrics import mean_squared_error from sklearn.grid_search import GridSearchCV from sklearn.datasets import load_iris, load_digits, load_boston +import unittest rng = np.random.RandomState(1994) -def test_binary_classification(): - digits = load_digits(2) - y = digits['target'] - X = digits['data'] - kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng) - for train_index, test_index in kf: - xgb_model = xgb.XGBClassifier().fit(X[train_index],y[train_index]) - preds = xgb_model.predict(X[test_index]) - labels = y[test_index] - err = sum(1 for i in range(len(preds)) if int(preds[i]>0.5)!=labels[i]) / float(len(preds)) - assert err < 0.1 +class TestSklearn(unittest.TestCase): -def test_multiclass_classification(): - iris = load_iris() - y = iris['target'] - X = iris['data'] - kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng) - for train_index, test_index in kf: - xgb_model = xgb.XGBClassifier().fit(X[train_index],y[train_index]) - preds = xgb_model.predict(X[test_index]) - labels = y[test_index] - err = sum(1 for i in range(len(preds)) if int(preds[i]>0.5)!=labels[i]) / float(len(preds)) - assert err < 0.4 + def test_binary_classification(): + digits = load_digits(2) + y = digits['target'] + X = digits['data'] + kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng) + for train_index, test_index in kf: + xgb_model = xgb.XGBClassifier().fit(X[train_index],y[train_index]) + preds = xgb_model.predict(X[test_index]) + labels = y[test_index] + err = sum(1 for i in range(len(preds)) if int(preds[i]>0.5)!=labels[i]) / float(len(preds)) + assert err < 0.1 -def test_boston_housing_regression(): - boston = load_boston() - y = boston['target'] - X = boston['data'] - kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng) - for train_index, test_index in kf: - xgb_model = xgb.XGBRegressor().fit(X[train_index],y[train_index]) - preds = xgb_model.predict(X[test_index]) - labels = y[test_index] - assert mean_squared_error(preds, labels) < 15 + def test_multiclass_classification(): + iris = load_iris() + y = iris['target'] + X = iris['data'] + kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng) + for train_index, test_index in kf: + xgb_model = xgb.XGBClassifier().fit(X[train_index],y[train_index]) + preds = xgb_model.predict(X[test_index]) + # test other params in XGBClassifier().fit + preds2 = xgb_model.predict(X[test_index], output_margin=True, ntree_limit=3) + preds3 = xgb_model.predict(X[test_index], output_margin=True, ntree_limit=0) + preds4 = xgb_model.predict(X[test_index], output_margin=False, ntree_limit=3) + labels = y[test_index] + err = sum(1 for i in range(len(preds)) if int(preds[i]>0.5)!=labels[i]) / float(len(preds)) + assert err < 0.4 -def test_parameter_tuning(): - boston = load_boston() - y = boston['target'] - X = boston['data'] - xgb_model = xgb.XGBRegressor() - clf = GridSearchCV(xgb_model, - {'max_depth': [2,4,6], - 'n_estimators': [50,100,200]}, verbose=1) - clf.fit(X,y) - assert clf.best_score_ < 0.7 - assert clf.best_params_ == {'n_estimators': 100, 'max_depth': 4} + def test_boston_housing_regression(): + boston = load_boston() + y = boston['target'] + X = boston['data'] + kf = KFold(y.shape[0], n_folds=2, shuffle=True, random_state=rng) + for train_index, test_index in kf: + xgb_model = xgb.XGBRegressor().fit(X[train_index],y[train_index]) + preds = xgb_model.predict(X[test_index]) + # test other params in XGBRegressor().fit + preds2 = xgb_model.predict(X[test_index], output_margin=True, ntree_limit=3) + preds3 = xgb_model.predict(X[test_index], output_margin=True, ntree_limit=0) + preds4 = xgb_model.predict(X[test_index], output_margin=False, ntree_limit=3) + labels = y[test_index] + assert mean_squared_error(preds, labels) < 15 + + def test_parameter_tuning(): + boston = load_boston() + y = boston['target'] + X = boston['data'] + xgb_model = xgb.XGBRegressor() + clf = GridSearchCV(xgb_model, + {'max_depth': [2,4,6], + 'n_estimators': [50,100,200]}, verbose=1) + clf.fit(X,y) + assert clf.best_score_ < 0.7 + assert clf.best_params_ == {'n_estimators': 100, 'max_depth': 4}