From e77df138157916f716845996f07d855e4f5c221c Mon Sep 17 00:00:00 2001 From: tqchen Date: Sun, 17 Aug 2014 18:49:54 -0700 Subject: [PATCH] ok --- python/example/demo.py | 7 +++++++ python/xgboost.py | 3 --- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/python/example/demo.py b/python/example/demo.py index e14c806aa..a099f56bf 100755 --- a/python/example/demo.py +++ b/python/example/demo.py @@ -76,8 +76,15 @@ def logregobj(preds, dtrain): return grad, hess # user defined evaluation function, return a pair metric_name, result +# NOTE: when you do customized loss function, the default prediction value is margin +# this may make buildin evalution metric not function properly +# for example, we are doing logistic loss, the prediction is score before logistic transformation +# the buildin evaluation error assumes input is after logistic transformation +# Take this in mind when you use the customization, and maybe you need write customized evaluation function def evalerror(preds, dtrain): labels = dtrain.get_label() + # return a pair metric_name, result + # since preds are margin(before logistic transformation, cutoff at 0) return 'error', float(sum(labels != (preds > 0.0))) / len(labels) # training with customized objective, we can also do step by step training diff --git a/python/xgboost.py b/python/xgboost.py index c7a04d4c3..f47642898 100644 --- a/python/xgboost.py +++ b/python/xgboost.py @@ -226,9 +226,6 @@ def train(params, dtrain, num_boost_round = 10, evals = [], obj=None, feval=None if len(evals) != 0: sys.stderr.write(evaluate(bst, evals, i, feval)+'\n') else: - if len(evals) != 0 and feval == None: - print 'you need to provide your own evaluation function' - # try customized objective function for i in range(num_boost_round): pred = bst.predict( dtrain )