* Add note. * Fix n boosted rounds.
This commit is contained in:
parent
328d1e18db
commit
1311a20f49
@ -18,6 +18,7 @@ void GBLinearModel::SaveModel(Json* p_out) const {
|
|||||||
j_weights[i] = weight[i];
|
j_weights[i] = weight[i];
|
||||||
}
|
}
|
||||||
out["weights"] = std::move(j_weights);
|
out["weights"] = std::move(j_weights);
|
||||||
|
out["boosted_rounds"] = Json{this->num_boosted_rounds};
|
||||||
}
|
}
|
||||||
|
|
||||||
void GBLinearModel::LoadModel(Json const& in) {
|
void GBLinearModel::LoadModel(Json const& in) {
|
||||||
@ -27,6 +28,13 @@ void GBLinearModel::LoadModel(Json const& in) {
|
|||||||
for (size_t i = 0; i < n_weights; ++i) {
|
for (size_t i = 0; i < n_weights; ++i) {
|
||||||
weight[i] = get<Number const>(j_weights[i]);
|
weight[i] = get<Number const>(j_weights[i]);
|
||||||
}
|
}
|
||||||
|
auto const& obj = get<Object const>(in);
|
||||||
|
auto boosted_rounds = obj.find("boosted_rounds");
|
||||||
|
if (boosted_rounds != obj.cend()) {
|
||||||
|
this->num_boosted_rounds = get<Integer const>(boosted_rounds->second);
|
||||||
|
} else {
|
||||||
|
this->num_boosted_rounds = 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
DMLC_REGISTER_PARAMETER(DeprecatedGBLinearModelParam);
|
DMLC_REGISTER_PARAMETER(DeprecatedGBLinearModelParam);
|
||||||
|
|||||||
@ -1,7 +1,6 @@
|
|||||||
import sys
|
import sys
|
||||||
from hypothesis import strategies, given, settings, assume
|
from hypothesis import strategies, given, settings, assume, note
|
||||||
import pytest
|
import pytest
|
||||||
import numpy
|
|
||||||
import xgboost as xgb
|
import xgboost as xgb
|
||||||
sys.path.append("tests/python")
|
sys.path.append("tests/python")
|
||||||
import testing as tm
|
import testing as tm
|
||||||
@ -17,10 +16,14 @@ parameter_strategy = strategies.fixed_dictionaries({
|
|||||||
'top_k': strategies.integers(1, 10),
|
'top_k': strategies.integers(1, 10),
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
||||||
def train_result(param, dmat, num_rounds):
|
def train_result(param, dmat, num_rounds):
|
||||||
result = {}
|
result = {}
|
||||||
xgb.train(param, dmat, num_rounds, [(dmat, 'train')], verbose_eval=False,
|
booster = xgb.train(
|
||||||
evals_result=result)
|
param, dmat, num_rounds, [(dmat, 'train')], verbose_eval=False,
|
||||||
|
evals_result=result
|
||||||
|
)
|
||||||
|
assert booster.num_boosted_rounds() == num_rounds
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
@ -33,6 +36,7 @@ class TestGPULinear:
|
|||||||
param['updater'] = 'gpu_coord_descent'
|
param['updater'] = 'gpu_coord_descent'
|
||||||
param = dataset.set_params(param)
|
param = dataset.set_params(param)
|
||||||
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
|
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
|
||||||
|
note(result)
|
||||||
assert tm.non_increasing(result)
|
assert tm.non_increasing(result)
|
||||||
|
|
||||||
# Loss is not guaranteed to always decrease because of regularisation parameters
|
# Loss is not guaranteed to always decrease because of regularisation parameters
|
||||||
@ -49,6 +53,7 @@ class TestGPULinear:
|
|||||||
param['lambda'] = lambd
|
param['lambda'] = lambd
|
||||||
param = dataset.set_params(param)
|
param = dataset.set_params(param)
|
||||||
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
|
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
|
||||||
|
note(result)
|
||||||
assert tm.non_increasing([result[0], result[-1]])
|
assert tm.non_increasing([result[0], result[-1]])
|
||||||
|
|
||||||
@pytest.mark.skipif(**tm.no_cupy())
|
@pytest.mark.skipif(**tm.no_cupy())
|
||||||
|
|||||||
@ -32,6 +32,7 @@ class TestLinear:
|
|||||||
param.update(coord_param)
|
param.update(coord_param)
|
||||||
param = dataset.set_params(param)
|
param = dataset.set_params(param)
|
||||||
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
|
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
|
||||||
|
note(result)
|
||||||
assert tm.non_increasing(result, 5e-4)
|
assert tm.non_increasing(result, 5e-4)
|
||||||
|
|
||||||
# Loss is not guaranteed to always decrease because of regularisation parameters
|
# Loss is not guaranteed to always decrease because of regularisation parameters
|
||||||
@ -48,6 +49,7 @@ class TestLinear:
|
|||||||
param.update(coord_param)
|
param.update(coord_param)
|
||||||
param = dataset.set_params(param)
|
param = dataset.set_params(param)
|
||||||
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
|
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
|
||||||
|
note(result)
|
||||||
assert tm.non_increasing([result[0], result[-1]])
|
assert tm.non_increasing([result[0], result[-1]])
|
||||||
|
|
||||||
@given(parameter_strategy, strategies.integers(10, 50),
|
@given(parameter_strategy, strategies.integers(10, 50),
|
||||||
@ -57,6 +59,7 @@ class TestLinear:
|
|||||||
param['updater'] = 'shotgun'
|
param['updater'] = 'shotgun'
|
||||||
param = dataset.set_params(param)
|
param = dataset.set_params(param)
|
||||||
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
|
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
|
||||||
|
note(result)
|
||||||
# shotgun is non-deterministic, so we relax the test by only using first and last
|
# shotgun is non-deterministic, so we relax the test by only using first and last
|
||||||
# iteration.
|
# iteration.
|
||||||
if len(result) > 2:
|
if len(result) > 2:
|
||||||
@ -75,4 +78,5 @@ class TestLinear:
|
|||||||
param['lambda'] = lambd
|
param['lambda'] = lambd
|
||||||
param = dataset.set_params(param)
|
param = dataset.set_params(param)
|
||||||
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
|
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
|
||||||
|
note(result)
|
||||||
assert tm.non_increasing([result[0], result[-1]])
|
assert tm.non_increasing([result[0], result[-1]])
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user