Merge pull request #591 from terrytangyuan/test

More test coverage for Python package
This commit is contained in:
Yuan (Terry) Tang 2015-11-02 21:00:52 -06:00
commit a71ccd8372
3 changed files with 32 additions and 11 deletions

View File

@ -42,6 +42,7 @@ on going at master
* Python module now throw exception instead of crash terminal when a parameter error happens.
* Python module now has importance plot and tree plot functions.
* Python module now accepts different learning rates for each boosting round.
* Additional parameters added for sklearn wrapper
* Java api is ready for use
* Added more test cases and continuous integration to make each build more robust
* Improvements in sklearn compatible module

View File

@ -2,18 +2,31 @@ import xgboost as xgb
import numpy as np
from sklearn.datasets import load_digits
from sklearn.cross_validation import KFold, train_test_split
import unittest
rng = np.random.RandomState(1994)
def test_early_stopping_nonparallel():
# digits = load_digits(2)
# X = digits['data']
# y = digits['target']
# X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# clf = xgb.XGBClassifier()
# clf.fit(X_train, y_train, early_stopping_rounds=10, eval_metric="auc",
# eval_set=[(X_test, y_test)])
print("This test will be re-visited later. ")
class TestEarlyStopping(unittest.TestCase):
def test_early_stopping_nonparallel(self):
digits = load_digits(2)
X = digits['data']
y = digits['target']
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf1 = xgb.XGBClassifier()
clf1.fit(X_train, y_train, early_stopping_rounds=5, eval_metric="auc",
eval_set=[(X_test, y_test)])
clf2 = xgb.XGBClassifier()
clf2.fit(X_train, y_train, early_stopping_rounds=4, eval_metric="auc",
eval_set=[(X_test, y_test)])
# should be the same
assert clf1.best_score == clf2.best_score
assert clf1.best_score != 1
# check overfit
clf3 = xgb.XGBClassifier()
clf3.fit(X_train, y_train, early_stopping_rounds=10, eval_metric="auc",
eval_set=[(X_test, y_test)])
assert clf3.best_score == 1
# TODO: parallel test for early stopping
# TODO: comment out for now. Will re-visit later

View File

@ -27,6 +27,10 @@ def test_multiclass_classification():
for train_index, test_index in kf:
xgb_model = xgb.XGBClassifier().fit(X[train_index],y[train_index])
preds = xgb_model.predict(X[test_index])
# test other params in XGBClassifier().fit
preds2 = xgb_model.predict(X[test_index], output_margin=True, ntree_limit=3)
preds3 = xgb_model.predict(X[test_index], output_margin=True, ntree_limit=0)
preds4 = xgb_model.predict(X[test_index], output_margin=False, ntree_limit=3)
labels = y[test_index]
err = sum(1 for i in range(len(preds)) if int(preds[i]>0.5)!=labels[i]) / float(len(preds))
assert err < 0.4
@ -39,8 +43,12 @@ def test_boston_housing_regression():
for train_index, test_index in kf:
xgb_model = xgb.XGBRegressor().fit(X[train_index],y[train_index])
preds = xgb_model.predict(X[test_index])
# test other params in XGBRegressor().fit
preds2 = xgb_model.predict(X[test_index], output_margin=True, ntree_limit=3)
preds3 = xgb_model.predict(X[test_index], output_margin=True, ntree_limit=0)
preds4 = xgb_model.predict(X[test_index], output_margin=False, ntree_limit=3)
labels = y[test_index]
assert mean_squared_error(preds, labels) < 15
assert mean_squared_error(preds, labels) < 25
def test_parameter_tuning():
boston = load_boston()
@ -54,4 +62,3 @@ def test_parameter_tuning():
assert clf.best_score_ < 0.7
assert clf.best_params_ == {'n_estimators': 100, 'max_depth': 4}