[BP] Fix num_boosted_rounds for linear model. (#7538) (#7559)

* Add note.

* Fix n boosted rounds.
This commit is contained in:
Jiaming Yuan
2022-01-14 00:20:57 +08:00
committed by GitHub
parent 328d1e18db
commit 1311a20f49
3 changed files with 21 additions and 4 deletions

View File

@@ -32,6 +32,7 @@ class TestLinear:
param.update(coord_param)
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing(result, 5e-4)
# Loss is not guaranteed to always decrease because of regularisation parameters
@@ -48,6 +49,7 @@ class TestLinear:
param.update(coord_param)
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing([result[0], result[-1]])
@given(parameter_strategy, strategies.integers(10, 50),
@@ -57,6 +59,7 @@ class TestLinear:
param['updater'] = 'shotgun'
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
# shotgun is non-deterministic, so we relax the test by only using first and last
# iteration.
if len(result) > 2:
@@ -75,4 +78,5 @@ class TestLinear:
param['lambda'] = lambd
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing([result[0], result[-1]])