44 lines
1.2 KiB
Python
44 lines
1.2 KiB
Python
##
|
|
# This script demonstrate how to access the xgboost eval metrics by using sklearn
|
|
##
|
|
|
|
import xgboost as xgb
|
|
import numpy as np
|
|
from sklearn.datasets import make_hastie_10_2
|
|
|
|
X, y = make_hastie_10_2(n_samples=2000, random_state=42)
|
|
|
|
# Map labels from {-1, 1} to {0, 1}
|
|
labels, y = np.unique(y, return_inverse=True)
|
|
|
|
X_train, X_test = X[:1600], X[1600:]
|
|
y_train, y_test = y[:1600], y[1600:]
|
|
|
|
param_dist = {'objective':'binary:logistic', 'n_estimators':2}
|
|
|
|
clf = xgb.XGBModel(**param_dist)
|
|
# Or you can use: clf = xgb.XGBClassifier(**param_dist)
|
|
|
|
clf.fit(X_train, y_train,
|
|
eval_set=[(X_train, y_train), (X_test, y_test)],
|
|
eval_metric='logloss',
|
|
verbose=True)
|
|
|
|
# Load evals result by calling the evals_result() function
|
|
evals_result = clf.evals_result()
|
|
|
|
print('Access logloss metric directly from validation_0:')
|
|
print(evals_result['validation_0']['logloss'])
|
|
|
|
print('')
|
|
print('Access metrics through a loop:')
|
|
for e_name, e_mtrs in evals_result.items():
|
|
print('- {}'.format(e_name))
|
|
for e_mtr_name, e_mtr_vals in e_mtrs.items():
|
|
print(' - {}'.format(e_mtr_name))
|
|
print(' - {}'.format(e_mtr_vals))
|
|
|
|
print('')
|
|
print('Access complete dict:')
|
|
print(evals_result)
|