xgboost/tests/cpp/gbm/test_gbtree.cc
Jiaming Yuan f0064c07ab
Refactor configuration [Part II]. (#4577)
* Refactor configuration [Part II].

* General changes:
** Remove `Init` methods to avoid ambiguity.
** Remove `Configure(std::map<>)` to avoid redundant copying and prepare for
   parameter validation. (`std::vector` is returned from `InitAllowUnknown`).
** Add name to tree updaters for easier debugging.

* Learner changes:
** Make `LearnerImpl` the only source of configuration.

    All configurations are stored and carried out by `LearnerImpl::Configure()`.

** Remove booster in C API.

    Originally kept for "compatibility reason", but did not state why.  So here
    we just remove it.

** Add a `metric_names_` field in `LearnerImpl`.
** Remove `LazyInit`.  Configuration will always be lazy.
** Run `Configure` before every iteration.

* Predictor changes:
** Allocate both cpu and gpu predictor.
** Remove cpu_predictor from gpu_predictor.

    `GBTree` is now used to dispatch the predictor.

** Remove some GPU Predictor tests.

* IO

No IO changes.  The binary model format stability is tested by comparing
hashing value of save models between two commits
2019-07-20 08:34:56 -04:00

55 lines
2.5 KiB
C++

#include <gtest/gtest.h>
#include <xgboost/generic_parameters.h>
#include "../helpers.h"
#include "../../../src/gbm/gbtree.h"
namespace xgboost {
TEST(GBTree, SelectTreeMethod) {
using Arg = std::pair<std::string, std::string>;
size_t constexpr kRows = 10;
size_t constexpr kCols = 10;
auto p_shared_ptr_dmat = CreateDMatrix(kRows, kCols, 0);
auto p_dmat {(*p_shared_ptr_dmat).get()};
GenericParameter generic_param;
generic_param.InitAllowUnknown(std::vector<Arg>{Arg("n_gpus", "0")});
std::unique_ptr<GradientBooster> p_gbm{
GradientBooster::Create("gbtree", &generic_param, {}, 0)};
auto& gbtree = dynamic_cast<gbm::GBTree&> (*p_gbm);
// Test if `tree_method` can be set
std::string n_feat = std::to_string(kCols);
std::map<std::string, std::string> args {Arg{"tree_method", "approx"}, Arg{"num_feature", n_feat}};
gbtree.Configure({args.cbegin(), args.cend()});
gbtree.ConfigureWithKnownData(args, p_dmat);
auto const& tparam = gbtree.GetTrainParam();
gbtree.ConfigureWithKnownData({Arg{"tree_method", "approx"}, Arg{"num_feature", n_feat}}, p_dmat);
ASSERT_EQ(tparam.updater_seq, "grow_histmaker,prune");
gbtree.ConfigureWithKnownData({Arg("tree_method", "exact"), Arg("num_feature", n_feat)}, p_dmat);
ASSERT_EQ(tparam.updater_seq, "grow_colmaker,prune");
gbtree.ConfigureWithKnownData({Arg("tree_method", "hist"), Arg("num_feature", n_feat)}, p_dmat);
ASSERT_EQ(tparam.updater_seq, "grow_quantile_histmaker");
ASSERT_EQ(tparam.predictor, "cpu_predictor");
gbtree.ConfigureWithKnownData({Arg{"booster", "dart"}, Arg{"tree_method", "hist"},
Arg{"num_feature", n_feat}}, p_dmat);
ASSERT_EQ(tparam.updater_seq, "grow_quantile_histmaker");
#ifdef XGBOOST_USE_CUDA
generic_param.InitAllowUnknown(std::vector<Arg>{Arg{"n_gpus", "1"}});
gbtree.ConfigureWithKnownData({Arg("tree_method", "gpu_exact"),
Arg("num_feature", n_feat)}, p_dmat);
ASSERT_EQ(tparam.updater_seq, "grow_gpu,prune");
ASSERT_EQ(tparam.predictor, "gpu_predictor");
gbtree.ConfigureWithKnownData({Arg("tree_method", "gpu_hist"), Arg("num_feature", n_feat)},
p_dmat);
ASSERT_EQ(tparam.updater_seq, "grow_gpu_hist");
ASSERT_EQ(tparam.predictor, "gpu_predictor");
gbtree.ConfigureWithKnownData({Arg{"booster", "dart"}, Arg{"tree_method", "gpu_hist"},
Arg{"num_feature", n_feat}}, p_dmat);
ASSERT_EQ(tparam.updater_seq, "grow_gpu_hist");
#endif
delete p_shared_ptr_dmat;
}
} // namespace xgboost