Add test for eta and mitigate float error. (#7446)

* Add eta test.
* Don't skip test.
This commit is contained in:
Jiaming Yuan 2021-11-18 20:42:48 +08:00 committed by GitHub
parent 7cfb310eb4
commit 9fb4338964
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 65 additions and 2 deletions

View File

@ -149,6 +149,7 @@ void TestLearnerSerialization(Args args, FeatureMap const& fmap, std::shared_ptr
Json m_0 = Json::Load(StringView{continued_model.c_str(), continued_model.size()});
Json m_1 = Json::Load(StringView{model_at_2kiter.c_str(), model_at_2kiter.size()});
CompareJSON(m_0, m_1);
}
@ -610,7 +611,6 @@ TEST_F(MultiClassesSerializationTest, CPUCoordDescent) {
#if defined(XGBOOST_USE_CUDA)
TEST_F(MultiClassesSerializationTest, GpuHist) {
GTEST_SKIP() << "This test is broken for CUDA 11.0 + Windows combination, skipping";
TestLearnerSerialization({{"booster", "gbtree"},
{"num_class", std::to_string(kClasses)},
{"seed", "0"},
@ -620,6 +620,9 @@ TEST_F(MultiClassesSerializationTest, GpuHist) {
// different result (1e-7) with CPU predictor for some
// entries.
{"predictor", "gpu_predictor"},
// Mitigate the difference caused by hardware fused multiply
// add to tree weight during update prediction cache.
{"learning_rate", "1.0"},
{"tree_method", "gpu_hist"}},
fmap_, p_dmat_);
@ -630,7 +633,8 @@ TEST_F(MultiClassesSerializationTest, GpuHist) {
{"max_depth", std::to_string(kClasses)},
// GPU_Hist has higher floating point error. 1e-6 doesn't work
// after num_parallel_tree goes to 4
{"num_parallel_tree", "3"},
{"num_parallel_tree", "4"},
{"learning_rate", "1.0"},
{"tree_method", "gpu_hist"}},
fmap_, p_dmat_);
@ -638,6 +642,7 @@ TEST_F(MultiClassesSerializationTest, GpuHist) {
{"num_class", std::to_string(kClasses)},
{"seed", "0"},
{"nthread", "1"},
{"learning_rate", "1.0"},
{"max_depth", std::to_string(kClasses)},
{"tree_method", "gpu_hist"}},
fmap_, p_dmat_);

View File

@ -56,4 +56,62 @@ TEST_F(UpdaterTreeStatTest, Exact) {
TEST_F(UpdaterTreeStatTest, Approx) {
this->RunTest("grow_histmaker");
}
class UpdaterEtaTest : public ::testing::Test {
protected:
std::shared_ptr<DMatrix> p_dmat_;
HostDeviceVector<GradientPair> gpairs_;
size_t constexpr static kRows = 10;
size_t constexpr static kCols = 10;
size_t constexpr static kClasses = 10;
void SetUp() override {
p_dmat_ = RandomDataGenerator(kRows, kCols, .5f).GenerateDMatrix(true, false, kClasses);
auto g = GenerateRandomGradients(kRows);
gpairs_.Resize(kRows);
gpairs_.Copy(g);
}
void RunTest(std::string updater) {
auto tparam = CreateEmptyGenericParam(0);
float eta = 0.4;
auto up_0 = std::unique_ptr<TreeUpdater>{
TreeUpdater::Create(updater, &tparam, ObjInfo{ObjInfo::kClassification})};
up_0->Configure(Args{{"eta", std::to_string(eta)}});
auto up_1 = std::unique_ptr<TreeUpdater>{
TreeUpdater::Create(updater, &tparam, ObjInfo{ObjInfo::kClassification})};
up_1->Configure(Args{{"eta", "1.0"}});
for (size_t iter = 0; iter < 4; ++iter) {
RegTree tree_0;
{
tree_0.param.num_feature = kCols;
up_0->Update(&gpairs_, p_dmat_.get(), {&tree_0});
}
RegTree tree_1;
{
tree_1.param.num_feature = kCols;
up_1->Update(&gpairs_, p_dmat_.get(), {&tree_1});
}
tree_0.WalkTree([&](bst_node_t nidx) {
if (tree_0[nidx].IsLeaf()) {
EXPECT_NEAR(tree_1[nidx].LeafValue() * eta, tree_0[nidx].LeafValue(), kRtEps);
}
return true;
});
}
}
};
TEST_F(UpdaterEtaTest, Hist) { this->RunTest("grow_quantile_histmaker"); }
TEST_F(UpdaterEtaTest, Exact) { this->RunTest("grow_colmaker"); }
TEST_F(UpdaterEtaTest, Approx) { this->RunTest("grow_histmaker"); }
#if defined(XGBOOST_USE_CUDA)
TEST_F(UpdaterEtaTest, GpuHist) { this->RunTest("grow_gpu_hist"); }
#endif // defined(XGBOOST_USE_CUDA)
} // namespace xgboost