[BP] Fix num_boosted_rounds for linear model. (#7538) (#7559)

* Add note.

* Fix n boosted rounds.
This commit is contained in:
Jiaming Yuan 2022-01-14 00:20:57 +08:00 committed by GitHub
parent 328d1e18db
commit 1311a20f49
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 21 additions and 4 deletions

View File

@ -18,6 +18,7 @@ void GBLinearModel::SaveModel(Json* p_out) const {
j_weights[i] = weight[i];
}
out["weights"] = std::move(j_weights);
out["boosted_rounds"] = Json{this->num_boosted_rounds};
}
void GBLinearModel::LoadModel(Json const& in) {
@ -27,6 +28,13 @@ void GBLinearModel::LoadModel(Json const& in) {
for (size_t i = 0; i < n_weights; ++i) {
weight[i] = get<Number const>(j_weights[i]);
}
auto const& obj = get<Object const>(in);
auto boosted_rounds = obj.find("boosted_rounds");
if (boosted_rounds != obj.cend()) {
this->num_boosted_rounds = get<Integer const>(boosted_rounds->second);
} else {
this->num_boosted_rounds = 0;
}
}
DMLC_REGISTER_PARAMETER(DeprecatedGBLinearModelParam);

View File

@ -1,7 +1,6 @@
import sys
from hypothesis import strategies, given, settings, assume
from hypothesis import strategies, given, settings, assume, note
import pytest
import numpy
import xgboost as xgb
sys.path.append("tests/python")
import testing as tm
@ -17,10 +16,14 @@ parameter_strategy = strategies.fixed_dictionaries({
'top_k': strategies.integers(1, 10),
})
def train_result(param, dmat, num_rounds):
result = {}
xgb.train(param, dmat, num_rounds, [(dmat, 'train')], verbose_eval=False,
evals_result=result)
booster = xgb.train(
param, dmat, num_rounds, [(dmat, 'train')], verbose_eval=False,
evals_result=result
)
assert booster.num_boosted_rounds() == num_rounds
return result
@ -33,6 +36,7 @@ class TestGPULinear:
param['updater'] = 'gpu_coord_descent'
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing(result)
# Loss is not guaranteed to always decrease because of regularisation parameters
@ -49,6 +53,7 @@ class TestGPULinear:
param['lambda'] = lambd
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing([result[0], result[-1]])
@pytest.mark.skipif(**tm.no_cupy())

View File

@ -32,6 +32,7 @@ class TestLinear:
param.update(coord_param)
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing(result, 5e-4)
# Loss is not guaranteed to always decrease because of regularisation parameters
@ -48,6 +49,7 @@ class TestLinear:
param.update(coord_param)
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing([result[0], result[-1]])
@given(parameter_strategy, strategies.integers(10, 50),
@ -57,6 +59,7 @@ class TestLinear:
param['updater'] = 'shotgun'
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
# shotgun is non-deterministic, so we relax the test by only using first and last
# iteration.
if len(result) > 2:
@ -75,4 +78,5 @@ class TestLinear:
param['lambda'] = lambd
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing([result[0], result[-1]])