Deprecate positional arguments. (#6365)

Deprecate positional arguments in following functions:

- `__init__` for all classes in sklearn module.
- `fit` method for all classes in sklearn module.
- dask interface.
- `set_info` for `DMatrix` class.

Refactor the evaluation matrices handling.
This commit is contained in:
Jiaming Yuan 2020-11-13 11:10:30 +08:00 committed by GitHub
parent e5193c21a1
commit fcfeb4959c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 186 additions and 86 deletions

View File

@ -12,6 +12,8 @@ import re
import sys
import json
import warnings
from functools import wraps
from inspect import signature, Parameter
import numpy as np
import scipy.sparse
@ -369,6 +371,58 @@ class DataIter:
raise NotImplementedError()
# Notice for `_deprecate_positional_args`
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# Sylvain Marie
# License: BSD 3 clause
def _deprecate_positional_args(f):
"""Decorator for methods that issues warnings for positional arguments
Using the keyword-only argument syntax in pep 3102, arguments after the
* will issue a warning when passed as a positional argument.
Modifed from sklearn utils.validation.
Parameters
----------
f : function
function to check arguments on
"""
sig = signature(f)
kwonly_args = []
all_args = []
for name, param in sig.parameters.items():
if param.kind == Parameter.POSITIONAL_OR_KEYWORD:
all_args.append(name)
elif param.kind == Parameter.KEYWORD_ONLY:
kwonly_args.append(name)
@wraps(f)
def inner_f(*args, **kwargs):
extra_args = len(args) - len(all_args)
if extra_args > 0:
# ignore first 'self' argument for instance methods
args_msg = [
'{}'.format(name) for name, _ in zip(
kwonly_args[:extra_args], args[-extra_args:])
]
warnings.warn(
"Pass `{}` as keyword args. Passing these as positional "
"arguments will be considered as error in future releases.".
format(", ".join(args_msg)), FutureWarning)
for k, arg in zip(sig.parameters, args):
kwargs[k] = arg
return f(**kwargs)
return inner_f
class DMatrix: # pylint: disable=too-many-instance-attributes
"""Data Matrix used in XGBoost.
@ -461,7 +515,8 @@ class DMatrix: # pylint: disable=too-many-instance-attributes
_check_call(_LIB.XGDMatrixFree(self.handle))
self.handle = None
def set_info(self,
@_deprecate_positional_args
def set_info(self, *,
label=None, weight=None, base_margin=None,
group=None,
label_lower_bound=None,

View File

@ -31,6 +31,7 @@ from .compat import CUDF_concat
from .compat import lazy_isinstance
from .core import DMatrix, DeviceQuantileDMatrix, Booster, _expect, DataIter
from .core import _deprecate_positional_args
from .training import train as worker_train
from .tracker import RabitTracker
from .sklearn import XGBModel, XGBRegressorBase, XGBClassifierBase
@ -1026,7 +1027,8 @@ class DaskScikitLearnBase(XGBModel):
_client = None
# pylint: disable=arguments-differ
def fit(self, X, y,
@_deprecate_positional_args
def fit(self, X, y, *,
sample_weight=None,
base_margin=None,
eval_set=None,
@ -1050,6 +1052,8 @@ class DaskScikitLearnBase(XGBModel):
sample_weight_eval_set : list, optional
A list of the form [L_1, L_2, ..., L_n], where each L_i is a list
of group weights on the i-th validation set.
early_stopping_rounds : int
Activates early stopping.
verbose : bool
If `verbose` and an evaluation set is used, writes the evaluation
metric measured on the validation set to stderr.'''
@ -1112,9 +1116,11 @@ class DaskXGBRegressor(DaskScikitLearnBase, XGBRegressorBase):
return self
# pylint: disable=missing-docstring
@_deprecate_positional_args
def fit(self,
X,
y,
*,
sample_weight=None,
base_margin=None,
eval_set=None,
@ -1195,9 +1201,11 @@ class DaskXGBClassifier(DaskScikitLearnBase, XGBClassifierBase):
self.evals_result_ = results['history']
return self
@_deprecate_positional_args
def fit(self,
X,
y,
*,
sample_weight=None,
base_margin=None,
eval_set=None,

View File

@ -5,7 +5,7 @@ import copy
import warnings
import json
import numpy as np
from .core import Booster, DMatrix, XGBoostError
from .core import Booster, DMatrix, XGBoostError, _deprecate_positional_args
from .training import train
from .data import _is_cudf_df, _is_cudf_ser, _is_cupy_array
@ -248,6 +248,51 @@ class XGBModel(XGBModelBase):
self.gpu_id = gpu_id
self.validate_parameters = validate_parameters
def _wrap_evaluation_matrices(self, X, y, group,
sample_weight, base_margin, feature_weights,
eval_set, sample_weight_eval_set, eval_group,
label_transform=lambda x: x):
'''Convert array_like evaluation matrices into DMatrix'''
if sample_weight_eval_set is not None:
assert eval_set is not None
assert len(sample_weight_eval_set) == len(eval_set)
if eval_group is not None:
assert eval_set is not None
assert len(eval_group) == len(eval_set)
y = label_transform(y)
train_dmatrix = DMatrix(data=X, label=y, weight=sample_weight,
base_margin=base_margin,
missing=self.missing, nthread=self.n_jobs)
train_dmatrix.set_info(feature_weights=feature_weights, group=group)
if eval_set is not None:
if sample_weight_eval_set is None:
sample_weight_eval_set = [None] * len(eval_set)
if eval_group is None:
eval_group = [None] * len(eval_set)
evals = []
for i, (valid_X, valid_y) in enumerate(eval_set):
# Skip the duplicated entry.
if valid_X is X and valid_y is y and \
sample_weight_eval_set[i] is sample_weight and eval_group[i] is group:
evals.append(train_dmatrix)
else:
m = DMatrix(valid_X,
label=label_transform(valid_y),
missing=self.missing, weight=sample_weight_eval_set[i],
nthread=self.n_jobs)
m.set_info(group=eval_group[i])
evals.append(m)
nevals = len(evals)
eval_names = ["validation_{}".format(i) for i in range(nevals)]
evals = list(zip(evals, eval_names))
else:
evals = ()
return train_dmatrix, evals
def _more_tags(self):
'''Tags used for scikit-learn data validation.'''
return {'allow_nan': True, 'no_validation': True}
@ -445,7 +490,8 @@ class XGBModel(XGBModelBase):
# Delete the attribute after load
self.get_booster().set_attr(scikit_learn=None)
def fit(self, X, y, sample_weight=None, base_margin=None,
@_deprecate_positional_args
def fit(self, X, y, *, sample_weight=None, base_margin=None,
eval_set=None, eval_metric=None, early_stopping_rounds=None,
verbose=True, xgb_model=None, sample_weight_eval_set=None,
feature_weights=None,
@ -524,22 +570,10 @@ class XGBModel(XGBModelBase):
evals_result = {}
if eval_set is not None:
if not isinstance(eval_set[0], (list, tuple)):
raise TypeError('Unexpected input type for `eval_set`')
if sample_weight_eval_set is None:
sample_weight_eval_set = [None] * len(eval_set)
else:
assert len(eval_set) == len(sample_weight_eval_set)
evals = list(
DMatrix(eval_set[i][0], label=eval_set[i][1], missing=self.missing,
weight=sample_weight_eval_set[i], nthread=self.n_jobs)
for i in range(len(eval_set)))
evals = list(zip(evals, ["validation_{}".format(i) for i in
range(len(evals))]))
else:
evals = ()
train_dmatrix, evals = self._wrap_evaluation_matrices(
X, y, group=None, sample_weight=sample_weight, base_margin=base_margin,
feature_weights=feature_weights, eval_set=eval_set,
sample_weight_eval_set=sample_weight_eval_set, eval_group=None)
params = self.get_xgb_params()
if callable(self.objective):
@ -775,11 +809,13 @@ class XGBModel(XGBModelBase):
''')
class XGBClassifier(XGBModel, XGBClassifierBase):
# pylint: disable=missing-docstring,invalid-name,too-many-instance-attributes
def __init__(self, objective="binary:logistic", use_label_encoder=True, **kwargs):
@_deprecate_positional_args
def __init__(self, *, objective="binary:logistic", use_label_encoder=True, **kwargs):
self.use_label_encoder = use_label_encoder
super().__init__(objective=objective, **kwargs)
def fit(self, X, y, sample_weight=None, base_margin=None,
@_deprecate_positional_args
def fit(self, X, y, *, sample_weight=None, base_margin=None,
eval_set=None, eval_metric=None,
early_stopping_rounds=None, verbose=True, xgb_model=None,
sample_weight_eval_set=None, feature_weights=None, callbacks=None):
@ -850,25 +886,6 @@ class XGBClassifier(XGBModel, XGBClassifierBase):
label_transform = self._le.transform
else:
label_transform = (lambda x: x)
training_labels = label_transform(y)
if eval_set is not None:
if sample_weight_eval_set is None:
sample_weight_eval_set = [None] * len(eval_set)
else:
assert len(sample_weight_eval_set) == len(eval_set)
evals = list(
DMatrix(eval_set[i][0],
label=label_transform(eval_set[i][1]),
missing=self.missing, weight=sample_weight_eval_set[i],
nthread=self.n_jobs)
for i in range(len(eval_set))
)
nevals = len(evals)
eval_names = ["validation_{}".format(i) for i in range(nevals)]
evals = list(zip(evals, eval_names))
else:
evals = ()
if len(X.shape) != 2:
# Simply raise an error here since there might be many
@ -879,10 +896,11 @@ class XGBClassifier(XGBModel, XGBClassifierBase):
self._features_count = X.shape[1]
self.n_features_in_ = self._features_count
train_dmatrix = DMatrix(X, label=training_labels, weight=sample_weight,
base_margin=base_margin,
missing=self.missing, nthread=self.n_jobs)
train_dmatrix.set_info(feature_weights=feature_weights)
train_dmatrix, evals = self._wrap_evaluation_matrices(
X, y, group=None, sample_weight=sample_weight, base_margin=base_margin,
feature_weights=feature_weights,
eval_set=eval_set, sample_weight_eval_set=sample_weight_eval_set,
eval_group=None, label_transform=label_transform)
self._Booster = train(xgb_options, train_dmatrix,
self.get_num_boosting_rounds(),
@ -1064,7 +1082,8 @@ class XGBClassifier(XGBModel, XGBClassifierBase):
''')
class XGBRFClassifier(XGBClassifier):
# pylint: disable=missing-docstring
def __init__(self,
@_deprecate_positional_args
def __init__(self, *,
learning_rate=1,
subsample=0.8,
colsample_bynode=0.8,
@ -1092,7 +1111,8 @@ class XGBRFClassifier(XGBClassifier):
['estimators', 'model', 'objective'])
class XGBRegressor(XGBModel, XGBRegressorBase):
# pylint: disable=missing-docstring
def __init__(self, objective="reg:squarederror", **kwargs):
@_deprecate_positional_args
def __init__(self, *, objective="reg:squarederror", **kwargs):
super().__init__(objective=objective, **kwargs)
@ -1104,7 +1124,8 @@ class XGBRegressor(XGBModel, XGBRegressorBase):
''')
class XGBRFRegressor(XGBRegressor):
# pylint: disable=missing-docstring
def __init__(self, learning_rate=1, subsample=0.8, colsample_bynode=0.8,
@_deprecate_positional_args
def __init__(self, *, learning_rate=1, subsample=0.8, colsample_bynode=0.8,
reg_lambda=1e-5, **kwargs):
super().__init__(learning_rate=learning_rate, subsample=subsample,
colsample_bynode=colsample_bynode,
@ -1160,7 +1181,8 @@ class XGBRFRegressor(XGBRegressor):
''')
class XGBRanker(XGBModel):
# pylint: disable=missing-docstring,too-many-arguments,invalid-name
def __init__(self, objective='rank:pairwise', **kwargs):
@_deprecate_positional_args
def __init__(self, *, objective='rank:pairwise', **kwargs):
super().__init__(objective=objective, **kwargs)
if callable(self.objective):
raise ValueError(
@ -1168,7 +1190,8 @@ class XGBRanker(XGBModel):
if "rank:" not in self.objective:
raise ValueError("please use XGBRanker for ranking task")
def fit(self, X, y, group, sample_weight=None, base_margin=None,
@_deprecate_positional_args
def fit(self, X, y, *, group, sample_weight=None, base_margin=None,
eval_set=None, sample_weight_eval_set=None,
eval_group=None, eval_metric=None,
early_stopping_rounds=None, verbose=False, xgb_model=None,
@ -1269,37 +1292,15 @@ class XGBRanker(XGBModel):
raise ValueError(
"group is required for all eval datasets for ranking task")
def _dmat_init(group, **params):
ret = DMatrix(**params)
ret.set_group(group)
return ret
self.n_features_in_ = X.shape[1]
train_dmatrix = DMatrix(data=X, label=y, weight=sample_weight,
base_margin=base_margin,
missing=self.missing, nthread=self.n_jobs)
train_dmatrix.set_info(feature_weights=feature_weights)
train_dmatrix.set_group(group)
train_dmatrix, evals = self._wrap_evaluation_matrices(
X, y, group=group, sample_weight=sample_weight, base_margin=base_margin,
feature_weights=feature_weights, eval_set=eval_set,
sample_weight_eval_set=sample_weight_eval_set,
eval_group=eval_group)
evals_result = {}
if eval_set is not None:
if sample_weight_eval_set is None:
sample_weight_eval_set = [None] * len(eval_set)
evals = [_dmat_init(eval_group[i],
data=eval_set[i][0],
label=eval_set[i][1],
missing=self.missing,
weight=sample_weight_eval_set[i],
nthread=self.n_jobs)
for i in range(len(eval_set))]
nevals = len(evals)
eval_names = ["eval_{}".format(i) for i in range(nevals)]
evals = list(zip(evals, eval_names))
else:
evals = ()
params = self.get_xgb_params()
feval = eval_metric if callable(eval_metric) else None

View File

@ -29,7 +29,7 @@ def test_binary_classification():
from sklearn.datasets import load_digits
from sklearn.model_selection import KFold
digits = load_digits(2)
digits = load_digits(n_class=2)
y = digits['target']
X = digits['data']
kf = KFold(n_splits=2, shuffle=True, random_state=rng)
@ -93,7 +93,7 @@ def test_ranking():
'learning_rate': 0.1, 'gamma': 1.0, 'min_child_weight': 0.1,
'max_depth': 6, 'n_estimators': 4}
model = xgb.sklearn.XGBRanker(**params)
model.fit(x_train, y_train, train_group,
model.fit(x_train, y_train, group=train_group,
eval_set=[(x_valid, y_valid)], eval_group=[valid_group])
pred = model.predict(x_test)
@ -163,7 +163,7 @@ def test_stacking_classification():
def test_feature_importances_weight():
from sklearn.datasets import load_digits
digits = load_digits(2)
digits = load_digits(n_class=2)
y = digits['target']
X = digits['data']
xgb_model = xgb.XGBClassifier(random_state=0,
@ -201,7 +201,7 @@ def test_feature_importances_weight():
def test_feature_importances_gain():
from sklearn.datasets import load_digits
digits = load_digits(2)
digits = load_digits(n_class=2)
y = digits['target']
X = digits['data']
xgb_model = xgb.XGBClassifier(
@ -240,7 +240,7 @@ def test_feature_importances_gain():
def test_select_feature():
from sklearn.datasets import load_digits
from sklearn.feature_selection import SelectFromModel
digits = load_digits(2)
digits = load_digits(n_class=2)
y = digits['target']
X = digits['data']
cls = xgb.XGBClassifier()
@ -373,7 +373,7 @@ def test_classification_with_custom_objective():
hess = y_pred * (1.0 - y_pred)
return grad, hess
digits = load_digits(2)
digits = load_digits(n_class=2)
y = digits['target']
X = digits['data']
kf = KFold(n_splits=2, shuffle=True, random_state=rng)
@ -470,7 +470,7 @@ def test_sklearn_nfolds_cv():
from sklearn.datasets import load_digits
from sklearn.model_selection import StratifiedKFold
digits = load_digits(3)
digits = load_digits(n_class=3)
X = digits['data']
y = digits['target']
dm = xgb.DMatrix(X, label=y)
@ -502,7 +502,7 @@ def test_sklearn_nfolds_cv():
def test_split_value_histograms():
from sklearn.datasets import load_digits
digits_2class = load_digits(2)
digits_2class = load_digits(n_class=2)
X = digits_2class['data']
y = digits_2class['target']
@ -588,7 +588,7 @@ def test_sklearn_clone():
def test_sklearn_get_default_params():
from sklearn.datasets import load_digits
digits_2class = load_digits(2)
digits_2class = load_digits(n_class=2)
X = digits_2class['data']
y = digits_2class['target']
cls = xgb.XGBClassifier()
@ -886,6 +886,42 @@ def test_parameter_validation():
assert len(output) == 0
def test_deprecate_position_arg():
from sklearn.datasets import load_digits
X, y = load_digits(return_X_y=True, n_class=2)
w = y
with pytest.warns(FutureWarning):
xgb.XGBRegressor(3, learning_rate=0.1)
model = xgb.XGBRegressor(n_estimators=1)
with pytest.warns(FutureWarning):
model.fit(X, y, w)
with pytest.warns(FutureWarning):
xgb.XGBClassifier(1, use_label_encoder=False)
model = xgb.XGBClassifier(n_estimators=1, use_label_encoder=False)
with pytest.warns(FutureWarning):
model.fit(X, y, w)
with pytest.warns(FutureWarning):
xgb.XGBRanker('rank:ndcg', learning_rate=0.1)
model = xgb.XGBRanker(n_estimators=1)
group = np.repeat(1, X.shape[0])
with pytest.warns(FutureWarning):
model.fit(X, y, group)
with pytest.warns(FutureWarning):
xgb.XGBRFRegressor(1, learning_rate=0.1)
model = xgb.XGBRFRegressor(n_estimators=1)
with pytest.warns(FutureWarning):
model.fit(X, y, w)
with pytest.warns(FutureWarning):
xgb.XGBRFClassifier(1, use_label_encoder=True)
model = xgb.XGBRFClassifier(n_estimators=1)
with pytest.warns(FutureWarning):
model.fit(X, y, w)
@pytest.mark.skipif(**tm.no_pandas())
def test_pandas_input():
import pandas as pd