This commit is contained in:
tqchen 2014-08-17 18:49:54 -07:00
parent 301685e0a4
commit e77df13815
2 changed files with 7 additions and 3 deletions

View File

@ -76,8 +76,15 @@ def logregobj(preds, dtrain):
return grad, hess
# user defined evaluation function, return a pair metric_name, result
# NOTE: when you do customized loss function, the default prediction value is margin
# this may make buildin evalution metric not function properly
# for example, we are doing logistic loss, the prediction is score before logistic transformation
# the buildin evaluation error assumes input is after logistic transformation
# Take this in mind when you use the customization, and maybe you need write customized evaluation function
def evalerror(preds, dtrain):
labels = dtrain.get_label()
# return a pair metric_name, result
# since preds are margin(before logistic transformation, cutoff at 0)
return 'error', float(sum(labels != (preds > 0.0))) / len(labels)
# training with customized objective, we can also do step by step training

View File

@ -226,9 +226,6 @@ def train(params, dtrain, num_boost_round = 10, evals = [], obj=None, feval=None
if len(evals) != 0:
sys.stderr.write(evaluate(bst, evals, i, feval)+'\n')
else:
if len(evals) != 0 and feval == None:
print 'you need to provide your own evaluation function'
# try customized objective function
for i in range(num_boost_round):
pred = bst.predict( dtrain )