Brought the silent parameter for the SKLearn-like API back, marked it deprecated. (#4255)
* Brought the silent parameter for the SKLearn-like API back, marked it deprecated. - added deprecation notice and warning - removed silent from the tests for the SKLearn-like API
This commit is contained in:
parent
b833b642ec
commit
4352fcdb15
@ -64,6 +64,8 @@ class XGBModel(XGBModelBase):
|
|||||||
Number of trees to fit.
|
Number of trees to fit.
|
||||||
verbosity : int
|
verbosity : int
|
||||||
The degree of verbosity. Valid values are 0 (silent) - 3 (debug).
|
The degree of verbosity. Valid values are 0 (silent) - 3 (debug).
|
||||||
|
silent : boolean
|
||||||
|
Whether to print messages while running boosting. Deprecated. Use verbosity instead.
|
||||||
objective : string or callable
|
objective : string or callable
|
||||||
Specify the learning task and the corresponding learning objective or
|
Specify the learning task and the corresponding learning objective or
|
||||||
a custom objective function to be used (see note below).
|
a custom objective function to be used (see note below).
|
||||||
@ -134,7 +136,7 @@ class XGBModel(XGBModelBase):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, max_depth=3, learning_rate=0.1, n_estimators=100,
|
def __init__(self, max_depth=3, learning_rate=0.1, n_estimators=100,
|
||||||
verbosity=1, objective="reg:linear", booster='gbtree',
|
verbosity=1, silent=None, objective="reg:linear", booster='gbtree',
|
||||||
n_jobs=1, nthread=None, gamma=0, min_child_weight=1,
|
n_jobs=1, nthread=None, gamma=0, min_child_weight=1,
|
||||||
max_delta_step=0, subsample=1, colsample_bytree=1, colsample_bylevel=1,
|
max_delta_step=0, subsample=1, colsample_bytree=1, colsample_bylevel=1,
|
||||||
colsample_bynode=1, reg_alpha=0, reg_lambda=1, scale_pos_weight=1,
|
colsample_bynode=1, reg_alpha=0, reg_lambda=1, scale_pos_weight=1,
|
||||||
@ -146,6 +148,7 @@ class XGBModel(XGBModelBase):
|
|||||||
self.learning_rate = learning_rate
|
self.learning_rate = learning_rate
|
||||||
self.n_estimators = n_estimators
|
self.n_estimators = n_estimators
|
||||||
self.verbosity = verbosity
|
self.verbosity = verbosity
|
||||||
|
self.silent = silent
|
||||||
self.objective = objective
|
self.objective = objective
|
||||||
self.booster = booster
|
self.booster = booster
|
||||||
self.gamma = gamma
|
self.gamma = gamma
|
||||||
@ -240,6 +243,16 @@ class XGBModel(XGBModelBase):
|
|||||||
else:
|
else:
|
||||||
xgb_params['nthread'] = n_jobs
|
xgb_params['nthread'] = n_jobs
|
||||||
|
|
||||||
|
if 'silent' in xgb_params and xgb_params['silent'] is not None:
|
||||||
|
warnings.warn('The silent parameter is deprecated.'
|
||||||
|
'Please use verbosity instead.'
|
||||||
|
'silent is depreated', DeprecationWarning)
|
||||||
|
# TODO(canonizer): set verbosity explicitly if silent is removed from xgboost,
|
||||||
|
# but remains in this API
|
||||||
|
else:
|
||||||
|
# silent=None shouldn't be passed to xgboost
|
||||||
|
xgb_params.pop('silent', None)
|
||||||
|
|
||||||
if xgb_params['nthread'] <= 0:
|
if xgb_params['nthread'] <= 0:
|
||||||
xgb_params.pop('nthread', None)
|
xgb_params.pop('nthread', None)
|
||||||
return xgb_params
|
return xgb_params
|
||||||
@ -588,7 +601,8 @@ class XGBClassifier(XGBModel, XGBClassifierBase):
|
|||||||
__doc__ = "Implementation of the scikit-learn API for XGBoost classification.\n\n" \
|
__doc__ = "Implementation of the scikit-learn API for XGBoost classification.\n\n" \
|
||||||
+ '\n'.join(XGBModel.__doc__.split('\n')[2:])
|
+ '\n'.join(XGBModel.__doc__.split('\n')[2:])
|
||||||
|
|
||||||
def __init__(self, max_depth=3, learning_rate=0.1, n_estimators=100, verbosity=1,
|
def __init__(self, max_depth=3, learning_rate=0.1, n_estimators=100,
|
||||||
|
verbosity=1, silent=None,
|
||||||
objective="binary:logistic", booster='gbtree',
|
objective="binary:logistic", booster='gbtree',
|
||||||
n_jobs=1, nthread=None, gamma=0, min_child_weight=1, max_delta_step=0,
|
n_jobs=1, nthread=None, gamma=0, min_child_weight=1, max_delta_step=0,
|
||||||
subsample=1, colsample_bytree=1, colsample_bylevel=1,
|
subsample=1, colsample_bytree=1, colsample_bylevel=1,
|
||||||
@ -596,7 +610,7 @@ class XGBClassifier(XGBModel, XGBClassifierBase):
|
|||||||
base_score=0.5, random_state=0, seed=None, missing=None, **kwargs):
|
base_score=0.5, random_state=0, seed=None, missing=None, **kwargs):
|
||||||
super(XGBClassifier, self).__init__(
|
super(XGBClassifier, self).__init__(
|
||||||
max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators,
|
max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators,
|
||||||
verbosity=verbosity, objective=objective, booster=booster,
|
verbosity=verbosity, silent=silent, objective=objective, booster=booster,
|
||||||
n_jobs=n_jobs, nthread=nthread, gamma=gamma,
|
n_jobs=n_jobs, nthread=nthread, gamma=gamma,
|
||||||
min_child_weight=min_child_weight, max_delta_step=max_delta_step,
|
min_child_weight=min_child_weight, max_delta_step=max_delta_step,
|
||||||
subsample=subsample, colsample_bytree=colsample_bytree,
|
subsample=subsample, colsample_bytree=colsample_bytree,
|
||||||
@ -874,7 +888,8 @@ class XGBRFClassifier(XGBClassifier):
|
|||||||
+ "for XGBoost random forest classification.\n\n"\
|
+ "for XGBoost random forest classification.\n\n"\
|
||||||
+ '\n'.join(XGBModel.__doc__.split('\n')[2:])
|
+ '\n'.join(XGBModel.__doc__.split('\n')[2:])
|
||||||
|
|
||||||
def __init__(self, max_depth=3, learning_rate=1, n_estimators=100, verbosity=1,
|
def __init__(self, max_depth=3, learning_rate=1, n_estimators=100,
|
||||||
|
verbosity=1, silent=None,
|
||||||
objective="binary:logistic", n_jobs=1, nthread=None, gamma=0,
|
objective="binary:logistic", n_jobs=1, nthread=None, gamma=0,
|
||||||
min_child_weight=1, max_delta_step=0, subsample=0.8, colsample_bytree=1,
|
min_child_weight=1, max_delta_step=0, subsample=0.8, colsample_bytree=1,
|
||||||
colsample_bylevel=1, colsample_bynode=0.8, reg_alpha=0, reg_lambda=1,
|
colsample_bylevel=1, colsample_bynode=0.8, reg_alpha=0, reg_lambda=1,
|
||||||
@ -882,7 +897,7 @@ class XGBRFClassifier(XGBClassifier):
|
|||||||
missing=None, **kwargs):
|
missing=None, **kwargs):
|
||||||
super(XGBRFClassifier, self).__init__(
|
super(XGBRFClassifier, self).__init__(
|
||||||
max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators,
|
max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators,
|
||||||
verbosity=verbosity, objective=objective, booster='gbtree',
|
verbosity=verbosity, silent=silent, objective=objective, booster='gbtree',
|
||||||
n_jobs=n_jobs, nthread=nthread, gamma=gamma,
|
n_jobs=n_jobs, nthread=nthread, gamma=gamma,
|
||||||
min_child_weight=min_child_weight, max_delta_step=max_delta_step,
|
min_child_weight=min_child_weight, max_delta_step=max_delta_step,
|
||||||
subsample=subsample, colsample_bytree=colsample_bytree,
|
subsample=subsample, colsample_bytree=colsample_bytree,
|
||||||
@ -912,7 +927,8 @@ class XGBRFRegressor(XGBRegressor):
|
|||||||
+ "for XGBoost random forest regression.\n\n"\
|
+ "for XGBoost random forest regression.\n\n"\
|
||||||
+ '\n'.join(XGBModel.__doc__.split('\n')[2:])
|
+ '\n'.join(XGBModel.__doc__.split('\n')[2:])
|
||||||
|
|
||||||
def __init__(self, max_depth=3, learning_rate=1, n_estimators=100, verbosity=1,
|
def __init__(self, max_depth=3, learning_rate=1, n_estimators=100,
|
||||||
|
verbosity=1, silent=None,
|
||||||
objective="reg:linear", n_jobs=1, nthread=None, gamma=0,
|
objective="reg:linear", n_jobs=1, nthread=None, gamma=0,
|
||||||
min_child_weight=1, max_delta_step=0, subsample=0.8, colsample_bytree=1,
|
min_child_weight=1, max_delta_step=0, subsample=0.8, colsample_bytree=1,
|
||||||
colsample_bylevel=1, colsample_bynode=0.8, reg_alpha=0, reg_lambda=1,
|
colsample_bylevel=1, colsample_bynode=0.8, reg_alpha=0, reg_lambda=1,
|
||||||
@ -920,7 +936,7 @@ class XGBRFRegressor(XGBRegressor):
|
|||||||
missing=None, **kwargs):
|
missing=None, **kwargs):
|
||||||
super(XGBRFRegressor, self).__init__(
|
super(XGBRFRegressor, self).__init__(
|
||||||
max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators,
|
max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators,
|
||||||
verbosity=verbosity, objective=objective, booster='gbtree',
|
verbosity=verbosity, silent=silent, objective=objective, booster='gbtree',
|
||||||
n_jobs=n_jobs, nthread=nthread, gamma=gamma,
|
n_jobs=n_jobs, nthread=nthread, gamma=gamma,
|
||||||
min_child_weight=min_child_weight, max_delta_step=max_delta_step,
|
min_child_weight=min_child_weight, max_delta_step=max_delta_step,
|
||||||
subsample=subsample, colsample_bytree=colsample_bytree,
|
subsample=subsample, colsample_bytree=colsample_bytree,
|
||||||
@ -952,6 +968,8 @@ class XGBRanker(XGBModel):
|
|||||||
Number of boosted trees to fit.
|
Number of boosted trees to fit.
|
||||||
verbosity : int
|
verbosity : int
|
||||||
The degree of verbosity. Valid values are 0 (silent) - 3 (debug).
|
The degree of verbosity. Valid values are 0 (silent) - 3 (debug).
|
||||||
|
silent : boolean
|
||||||
|
Whether to print messages while running boosting. Deprecated. Use verbosity instead.
|
||||||
objective : string
|
objective : string
|
||||||
Specify the learning task and the corresponding learning objective.
|
Specify the learning task and the corresponding learning objective.
|
||||||
The objective name must start with "rank:".
|
The objective name must start with "rank:".
|
||||||
@ -1037,7 +1055,7 @@ class XGBRanker(XGBModel):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, max_depth=3, learning_rate=0.1, n_estimators=100,
|
def __init__(self, max_depth=3, learning_rate=0.1, n_estimators=100,
|
||||||
verbosity=1, objective="rank:pairwise", booster='gbtree',
|
verbosity=1, silent=None, objective="rank:pairwise", booster='gbtree',
|
||||||
n_jobs=-1, nthread=None, gamma=0, min_child_weight=1, max_delta_step=0,
|
n_jobs=-1, nthread=None, gamma=0, min_child_weight=1, max_delta_step=0,
|
||||||
subsample=1, colsample_bytree=1, colsample_bylevel=1, colsample_bynode=1,
|
subsample=1, colsample_bytree=1, colsample_bylevel=1, colsample_bynode=1,
|
||||||
reg_alpha=0, reg_lambda=1, scale_pos_weight=1,
|
reg_alpha=0, reg_lambda=1, scale_pos_weight=1,
|
||||||
@ -1045,7 +1063,7 @@ class XGBRanker(XGBModel):
|
|||||||
|
|
||||||
super(XGBRanker, self).__init__(
|
super(XGBRanker, self).__init__(
|
||||||
max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators,
|
max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators,
|
||||||
verbosity=verbosity, objective=objective, booster=booster,
|
verbosity=verbosity, silent=silent, objective=objective, booster=booster,
|
||||||
n_jobs=n_jobs, nthread=nthread, gamma=gamma,
|
n_jobs=n_jobs, nthread=nthread, gamma=gamma,
|
||||||
min_child_weight=min_child_weight, max_delta_step=max_delta_step,
|
min_child_weight=min_child_weight, max_delta_step=max_delta_step,
|
||||||
subsample=subsample, colsample_bytree=colsample_bytree,
|
subsample=subsample, colsample_bytree=colsample_bytree,
|
||||||
|
|||||||
@ -380,7 +380,7 @@ def test_sklearn_nfolds_cv():
|
|||||||
params = {
|
params = {
|
||||||
'max_depth': 2,
|
'max_depth': 2,
|
||||||
'eta': 1,
|
'eta': 1,
|
||||||
'silent': 1,
|
'verbosity': 0,
|
||||||
'objective':
|
'objective':
|
||||||
'multi:softprob',
|
'multi:softprob',
|
||||||
'num_class': 3
|
'num_class': 3
|
||||||
@ -408,7 +408,7 @@ def test_split_value_histograms():
|
|||||||
y = digits_2class['target']
|
y = digits_2class['target']
|
||||||
|
|
||||||
dm = xgb.DMatrix(X, label=y)
|
dm = xgb.DMatrix(X, label=y)
|
||||||
params = {'max_depth': 6, 'eta': 0.01, 'silent': 1,
|
params = {'max_depth': 6, 'eta': 0.01, 'verbosity': 0,
|
||||||
'objective': 'binary:logistic'}
|
'objective': 'binary:logistic'}
|
||||||
|
|
||||||
gbdt = xgb.train(params, dm, num_boost_round=10)
|
gbdt = xgb.train(params, dm, num_boost_round=10)
|
||||||
@ -604,7 +604,7 @@ def test_RFECV():
|
|||||||
X, y = load_boston(return_X_y=True)
|
X, y = load_boston(return_X_y=True)
|
||||||
bst = xgb.XGBClassifier(booster='gblinear', learning_rate=0.1,
|
bst = xgb.XGBClassifier(booster='gblinear', learning_rate=0.1,
|
||||||
n_estimators=10, n_jobs=1, objective='reg:linear',
|
n_estimators=10, n_jobs=1, objective='reg:linear',
|
||||||
random_state=0, silent=True)
|
random_state=0, verbosity=0)
|
||||||
rfecv = RFECV(
|
rfecv = RFECV(
|
||||||
estimator=bst, step=1, cv=3, scoring='neg_mean_squared_error')
|
estimator=bst, step=1, cv=3, scoring='neg_mean_squared_error')
|
||||||
rfecv.fit(X, y)
|
rfecv.fit(X, y)
|
||||||
@ -614,7 +614,7 @@ def test_RFECV():
|
|||||||
bst = xgb.XGBClassifier(booster='gblinear', learning_rate=0.1,
|
bst = xgb.XGBClassifier(booster='gblinear', learning_rate=0.1,
|
||||||
n_estimators=10, n_jobs=1,
|
n_estimators=10, n_jobs=1,
|
||||||
objective='binary:logistic',
|
objective='binary:logistic',
|
||||||
random_state=0, silent=True)
|
random_state=0, verbosity=0)
|
||||||
rfecv = RFECV(estimator=bst, step=1, cv=3, scoring='roc_auc')
|
rfecv = RFECV(estimator=bst, step=1, cv=3, scoring='roc_auc')
|
||||||
rfecv.fit(X, y)
|
rfecv.fit(X, y)
|
||||||
|
|
||||||
@ -625,7 +625,7 @@ def test_RFECV():
|
|||||||
n_estimators=10, n_jobs=1,
|
n_estimators=10, n_jobs=1,
|
||||||
objective='multi:softprob',
|
objective='multi:softprob',
|
||||||
random_state=0, reg_alpha=0.001, reg_lambda=0.01,
|
random_state=0, reg_alpha=0.001, reg_lambda=0.01,
|
||||||
scale_pos_weight=0.5, silent=True)
|
scale_pos_weight=0.5, verbosity=0)
|
||||||
rfecv = RFECV(estimator=bst, step=1, cv=3, scoring='neg_log_loss')
|
rfecv = RFECV(estimator=bst, step=1, cv=3, scoring='neg_log_loss')
|
||||||
rfecv.fit(X, y)
|
rfecv.fit(X, y)
|
||||||
|
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user