Remove experimental_json_serialization from tests. (#6640)
This commit is contained in:
parent
8968ca7c0a
commit
bc08e0c9d1
@ -632,8 +632,7 @@ class LearnerIO : public LearnerConfiguration {
|
|||||||
private:
|
private:
|
||||||
std::set<std::string> saved_configs_ = {"num_round"};
|
std::set<std::string> saved_configs_ = {"num_round"};
|
||||||
// Used to identify the offset of JSON string when
|
// Used to identify the offset of JSON string when
|
||||||
// `enable_experimental_json_serialization' is set to false. Will be removed once JSON
|
// Will be removed once JSON takes over. Right now we still loads some RDS files from R.
|
||||||
// takes over.
|
|
||||||
std::string const serialisation_header_ { u8"CONFIG-offset:" };
|
std::string const serialisation_header_ { u8"CONFIG-offset:" };
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@ -816,10 +815,7 @@ class LearnerIO : public LearnerConfiguration {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Save model into binary format. The code is about to be deprecated by more robust
|
// Save model into binary format. The code is about to be deprecated by more robust
|
||||||
// JSON serialization format. This function is uneffected by
|
// JSON serialization format.
|
||||||
// `enable_experimental_json_serialization` as user might enable this flag for pickle
|
|
||||||
// while still want a binary output. As we are progressing at replacing the binary
|
|
||||||
// format, there's no need to put too much effort on it.
|
|
||||||
void SaveModel(dmlc::Stream* fo) const override {
|
void SaveModel(dmlc::Stream* fo) const override {
|
||||||
LearnerModelParamLegacy mparam = mparam_; // make a copy to potentially modify
|
LearnerModelParamLegacy mparam = mparam_; // make a copy to potentially modify
|
||||||
std::vector<std::pair<std::string, std::string> > extra_attr;
|
std::vector<std::pair<std::string, std::string> > extra_attr;
|
||||||
|
|||||||
@ -339,7 +339,6 @@ TEST_F(SerializationTest, ConfigurationCount) {
|
|||||||
auto learner = std::unique_ptr<Learner>(Learner::Create(mat));
|
auto learner = std::unique_ptr<Learner>(Learner::Create(mat));
|
||||||
|
|
||||||
learner->SetParam("tree_method", "gpu_hist");
|
learner->SetParam("tree_method", "gpu_hist");
|
||||||
learner->SetParam("enable_experimental_json_serialization", "1");
|
|
||||||
|
|
||||||
for (size_t i = 0; i < 10; ++i) {
|
for (size_t i = 0; i < 10; ++i) {
|
||||||
learner->UpdateOneIter(i, p_dmat);
|
learner->UpdateOneIter(i, p_dmat);
|
||||||
|
|||||||
@ -283,9 +283,10 @@ class TestGPUPredict:
|
|||||||
y = (x0 * 10 - 20) + (x1 - 2)
|
y = (x0 * 10 - 20) + (x1 - 2)
|
||||||
dtrain = xgb.DMatrix(df, label=y, enable_categorical=True)
|
dtrain = xgb.DMatrix(df, label=y, enable_categorical=True)
|
||||||
|
|
||||||
params = {'tree_method': 'gpu_hist', 'predictor': 'gpu_predictor',
|
params = {
|
||||||
'enable_experimental_json_serialization': True,
|
'tree_method': 'gpu_hist', 'predictor': 'gpu_predictor',
|
||||||
'max_depth': 3, 'learning_rate': 1.0, 'base_score': 0.0, 'eval_metric': 'rmse'}
|
'max_depth': 3, 'learning_rate': 1.0, 'base_score': 0.0, 'eval_metric': 'rmse'
|
||||||
|
}
|
||||||
|
|
||||||
eval_history = {}
|
eval_history = {}
|
||||||
bst = xgb.train(params, dtrain, num_boost_round=5, evals=[(dtrain, 'train')],
|
bst = xgb.train(params, dtrain, num_boost_round=5, evals=[(dtrain, 'train')],
|
||||||
|
|||||||
@ -6,15 +6,14 @@ rng = np.random.RandomState(1994)
|
|||||||
|
|
||||||
|
|
||||||
class TestGPUTrainingContinuation:
|
class TestGPUTrainingContinuation:
|
||||||
def run_training_continuation(self, use_json):
|
def test_training_continuation(self):
|
||||||
kRows = 64
|
kRows = 64
|
||||||
kCols = 32
|
kCols = 32
|
||||||
X = np.random.randn(kRows, kCols)
|
X = np.random.randn(kRows, kCols)
|
||||||
y = np.random.randn(kRows)
|
y = np.random.randn(kRows)
|
||||||
dtrain = xgb.DMatrix(X, y)
|
dtrain = xgb.DMatrix(X, y)
|
||||||
params = {'tree_method': 'gpu_hist', 'max_depth': '2',
|
params = {'tree_method': 'gpu_hist', 'max_depth': '2',
|
||||||
'gamma': '0.1', 'alpha': '0.01',
|
'gamma': '0.1', 'alpha': '0.01'}
|
||||||
'enable_experimental_json_serialization': use_json}
|
|
||||||
bst_0 = xgb.train(params, dtrain, num_boost_round=64)
|
bst_0 = xgb.train(params, dtrain, num_boost_round=64)
|
||||||
dump_0 = bst_0.get_dump(dump_format='json')
|
dump_0 = bst_0.get_dump(dump_format='json')
|
||||||
|
|
||||||
@ -48,9 +47,3 @@ class TestGPUTrainingContinuation:
|
|||||||
obj_0 = json.loads(dump_0[i])
|
obj_0 = json.loads(dump_0[i])
|
||||||
obj_1 = json.loads(dump_1[i])
|
obj_1 = json.loads(dump_1[i])
|
||||||
recursive_compare(obj_0, obj_1)
|
recursive_compare(obj_0, obj_1)
|
||||||
|
|
||||||
def test_gpu_training_continuation_binary(self):
|
|
||||||
self.run_training_continuation(False)
|
|
||||||
|
|
||||||
def test_gpu_training_continuation_json(self):
|
|
||||||
self.run_training_continuation(True)
|
|
||||||
|
|||||||
@ -63,9 +63,7 @@ class TestGPUUpdaters:
|
|||||||
by_etl_results = {}
|
by_etl_results = {}
|
||||||
by_builtin_results = {}
|
by_builtin_results = {}
|
||||||
|
|
||||||
parameters = {'tree_method': 'gpu_hist',
|
parameters = {'tree_method': 'gpu_hist', 'predictor': 'gpu_predictor'}
|
||||||
'predictor': 'gpu_predictor',
|
|
||||||
'enable_experimental_json_serialization': True}
|
|
||||||
|
|
||||||
m = xgb.DMatrix(onehot, label, enable_categorical=True)
|
m = xgb.DMatrix(onehot, label, enable_categorical=True)
|
||||||
xgb.train(parameters, m,
|
xgb.train(parameters, m,
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user