Add option to disable default metric (#3606)
This commit is contained in:
parent
993e62b9e7
commit
983cb0b374
@ -33,9 +33,9 @@ def logregobj(preds, dtrain):
|
|||||||
# Take this in mind when you use the customization, and maybe you need write customized evaluation function
|
# Take this in mind when you use the customization, and maybe you need write customized evaluation function
|
||||||
def evalerror(preds, dtrain):
|
def evalerror(preds, dtrain):
|
||||||
labels = dtrain.get_label()
|
labels = dtrain.get_label()
|
||||||
# return a pair metric_name, result. The metric name must not contain a colon (:)
|
# return a pair metric_name, result. The metric name must not contain a colon (:) or a space
|
||||||
# since preds are margin(before logistic transformation, cutoff at 0)
|
# since preds are margin(before logistic transformation, cutoff at 0)
|
||||||
return 'error', float(sum(labels != (preds > 0.0))) / len(labels)
|
return 'my-error', float(sum(labels != (preds > 0.0))) / len(labels)
|
||||||
|
|
||||||
# training with customized objective, we can also do step by step training
|
# training with customized objective, we can also do step by step training
|
||||||
# simply look at xgboost.py's implementation of train
|
# simply look at xgboost.py's implementation of train
|
||||||
|
|||||||
@ -31,6 +31,10 @@ General Parameters
|
|||||||
|
|
||||||
- Number of parallel threads used to run XGBoost
|
- Number of parallel threads used to run XGBoost
|
||||||
|
|
||||||
|
* ``disable_default_eval_metric`` [default=0]
|
||||||
|
|
||||||
|
- Flag to disable default metric. Set to >0 to disable.
|
||||||
|
|
||||||
* ``num_pbuffer`` [set automatically by XGBoost, no need to be set by user]
|
* ``num_pbuffer`` [set automatically by XGBoost, no need to be set by user]
|
||||||
|
|
||||||
- Size of prediction buffer, normally set to number of training instances. The buffers are used to save the prediction results of last boosting step.
|
- Size of prediction buffer, normally set to number of training instances. The buffers are used to save the prediction results of last boosting step.
|
||||||
|
|||||||
@ -92,6 +92,8 @@ struct LearnerTrainParam : public dmlc::Parameter<LearnerTrainParam> {
|
|||||||
int nthread;
|
int nthread;
|
||||||
// flag to print out detailed breakdown of runtime
|
// flag to print out detailed breakdown of runtime
|
||||||
int debug_verbose;
|
int debug_verbose;
|
||||||
|
// flag to disable default metric
|
||||||
|
int disable_default_eval_metric;
|
||||||
// declare parameters
|
// declare parameters
|
||||||
DMLC_DECLARE_PARAMETER(LearnerTrainParam) {
|
DMLC_DECLARE_PARAMETER(LearnerTrainParam) {
|
||||||
DMLC_DECLARE_FIELD(seed).set_default(0).describe(
|
DMLC_DECLARE_FIELD(seed).set_default(0).describe(
|
||||||
@ -128,6 +130,9 @@ struct LearnerTrainParam : public dmlc::Parameter<LearnerTrainParam> {
|
|||||||
.set_lower_bound(0)
|
.set_lower_bound(0)
|
||||||
.set_default(0)
|
.set_default(0)
|
||||||
.describe("flag to print out detailed breakdown of runtime");
|
.describe("flag to print out detailed breakdown of runtime");
|
||||||
|
DMLC_DECLARE_FIELD(disable_default_eval_metric)
|
||||||
|
.set_default(0)
|
||||||
|
.describe("flag to disable default metric. Set to >0 to disable");
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -403,7 +408,7 @@ class LearnerImpl : public Learner {
|
|||||||
monitor_.Start("EvalOneIter");
|
monitor_.Start("EvalOneIter");
|
||||||
std::ostringstream os;
|
std::ostringstream os;
|
||||||
os << '[' << iter << ']' << std::setiosflags(std::ios::fixed);
|
os << '[' << iter << ']' << std::setiosflags(std::ios::fixed);
|
||||||
if (metrics_.size() == 0) {
|
if (metrics_.size() == 0 && tparam_.disable_default_eval_metric <= 0) {
|
||||||
metrics_.emplace_back(Metric::Create(obj_->DefaultEvalMetric()));
|
metrics_.emplace_back(Metric::Create(obj_->DefaultEvalMetric()));
|
||||||
}
|
}
|
||||||
for (size_t i = 0; i < data_sets.size(); ++i) {
|
for (size_t i = 0; i < data_sets.size(); ++i) {
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user