diff --git a/demo/guide-python/README.md b/demo/guide-python/README.md index 6fd6a090c..de6e8e024 100644 --- a/demo/guide-python/README.md +++ b/demo/guide-python/README.md @@ -1,7 +1,6 @@ XGBoost Python Feature Walkthrough ================================== * [Basic walkthrough of wrappers](basic_walkthrough.py) -* [Customize loss function, and evaluation metric](custom_objective.py) * [Re-implement RMSLE as customized metric and objective](custom_rmsle.py) * [Re-Implement `multi:softmax` objective as customized objective](custom_softmax.py) * [Boosting from existing prediction](boost_from_prediction.py) diff --git a/demo/guide-python/custom_objective.py b/demo/guide-python/custom_objective.py deleted file mode 100644 index 6fa8bab52..000000000 --- a/demo/guide-python/custom_objective.py +++ /dev/null @@ -1,61 +0,0 @@ -### -# advanced: customized loss function -# -import os -import numpy as np -import xgboost as xgb - -print('start running example to used customized objective function') - -CURRENT_DIR = os.path.dirname(__file__) -dtrain = xgb.DMatrix(os.path.join(CURRENT_DIR, '../data/agaricus.txt.train')) -dtest = xgb.DMatrix(os.path.join(CURRENT_DIR, '../data/agaricus.txt.test')) - -# note: what we are getting is margin value in prediction you must know what -# you are doing -param = {'max_depth': 2, 'eta': 1, 'objective': 'reg:logistic'} -watchlist = [(dtest, 'eval'), (dtrain, 'train')] -num_round = 10 - - -# user define objective function, given prediction, return gradient and second -# order gradient this is log likelihood loss -def logregobj(preds, dtrain): - labels = dtrain.get_label() - preds = 1.0 / (1.0 + np.exp(-preds)) # transform raw leaf weight - grad = preds - labels - hess = preds * (1.0 - preds) - return grad, hess - - -# user defined evaluation function, return a pair metric_name, result - -# NOTE: when you do customized loss function, the default prediction value is -# margin, which means the prediction is score before logistic transformation. -def evalerror(preds, dtrain): - labels = dtrain.get_label() - preds = 1.0 / (1.0 + np.exp(-preds)) # transform raw leaf weight - # return a pair metric_name, result. The metric name must not contain a - # colon (:) or a space - return 'my-error', float(sum(labels != (preds > 0.5))) / len(labels) - - -py_evals_result = {} - -# training with customized objective, we can also do step by step training -# simply look at training.py's implementation of train -py_params = param.copy() -py_params.update({'disable_default_eval_metric': True}) -py_logreg = xgb.train(py_params, dtrain, num_round, watchlist, obj=logregobj, - feval=evalerror, evals_result=py_evals_result) - -evals_result = {} -params = param.copy() -params.update({'eval_metric': 'error'}) -logreg = xgb.train(params, dtrain, num_boost_round=num_round, evals=watchlist, - evals_result=evals_result) - - -for i in range(len(py_evals_result['train']['my-error'])): - np.testing.assert_almost_equal(py_evals_result['train']['my-error'], - evals_result['train']['error']) diff --git a/tests/python/test_demos.py b/tests/python/test_demos.py index 2df773d22..e4d1b804c 100644 --- a/tests/python/test_demos.py +++ b/tests/python/test_demos.py @@ -87,12 +87,6 @@ def test_generalized_linear_model_demo(): subprocess.check_call(cmd) -def test_custom_objective_demo(): - script = os.path.join(PYTHON_DEMO_DIR, 'custom_objective.py') - cmd = ['python', script] - subprocess.check_call(cmd) - - def test_cross_validation_demo(): script = os.path.join(PYTHON_DEMO_DIR, 'cross_validation.py') cmd = ['python', script]