De-duplicate GPU parameters. (#4454)
* Only define `gpu_id` and `n_gpus` in `LearnerTrainParam` * Pass LearnerTrainParam through XGBoost vid factory method. * Disable all GPU usage when GPU related parameters are not specified (fixes XGBoost choosing GPU over aggressively). * Test learner train param io. * Fix gpu pickling.
This commit is contained in:
@@ -2,6 +2,7 @@
|
||||
#include <gtest/gtest.h>
|
||||
#include <vector>
|
||||
#include "helpers.h"
|
||||
|
||||
#include "xgboost/learner.h"
|
||||
#include "dmlc/filesystem.h"
|
||||
|
||||
@@ -115,4 +116,119 @@ TEST(Learner, SLOW_CheckMultiBatch) {
|
||||
learner->UpdateOneIter(0, dmat.get());
|
||||
}
|
||||
|
||||
#if defined(XGBOOST_USE_CUDA)
|
||||
|
||||
TEST(Learner, IO) {
|
||||
using Arg = std::pair<std::string, std::string>;
|
||||
size_t constexpr kRows = 10;
|
||||
auto pp_dmat = CreateDMatrix(kRows, 10, 0);
|
||||
auto p_dmat = *pp_dmat;
|
||||
|
||||
std::vector<bst_float> labels(kRows);
|
||||
for (size_t i = 0; i < labels.size(); ++i) {
|
||||
labels[i] = i;
|
||||
}
|
||||
p_dmat->Info().labels_.HostVector() = labels;
|
||||
std::vector<std::shared_ptr<DMatrix>> mat {p_dmat};
|
||||
|
||||
std::unique_ptr<Learner> learner {Learner::Create(mat)};
|
||||
learner->Configure({Arg{"tree_method", "auto"},
|
||||
Arg{"predictor", "gpu_predictor"},
|
||||
Arg{"n_gpus", "-1"}});
|
||||
learner->InitModel();
|
||||
learner->UpdateOneIter(0, p_dmat.get());
|
||||
ASSERT_EQ(learner->GetLearnerTrainParameter().gpu_id, 0);
|
||||
ASSERT_EQ(learner->GetLearnerTrainParameter().n_gpus, -1);
|
||||
|
||||
dmlc::TemporaryDirectory tempdir;
|
||||
const std::string fname = tempdir.path + "/model.bst";
|
||||
|
||||
{
|
||||
// Create a scope to close the stream before next read.
|
||||
std::unique_ptr<dmlc::Stream> fo(dmlc::Stream::Create(fname.c_str(), "w"));
|
||||
learner->Save(fo.get());
|
||||
}
|
||||
|
||||
std::unique_ptr<dmlc::Stream> fi(dmlc::Stream::Create(fname.c_str(), "r"));
|
||||
learner->Load(fi.get());
|
||||
ASSERT_EQ(learner->GetLearnerTrainParameter().gpu_id, 0);
|
||||
ASSERT_EQ(learner->GetLearnerTrainParameter().n_gpus, 0);
|
||||
|
||||
delete pp_dmat;
|
||||
}
|
||||
|
||||
// Tests for automatic GPU configuration.
|
||||
TEST(Learner, GPUConfiguration) {
|
||||
using Arg = std::pair<std::string, std::string>;
|
||||
size_t constexpr kRows = 10;
|
||||
auto pp_dmat = CreateDMatrix(kRows, 10, 0);
|
||||
auto p_dmat = *pp_dmat;
|
||||
std::vector<std::shared_ptr<DMatrix>> mat {p_dmat};
|
||||
std::vector<bst_float> labels(kRows);
|
||||
for (size_t i = 0; i < labels.size(); ++i) {
|
||||
labels[i] = i;
|
||||
}
|
||||
p_dmat->Info().labels_.HostVector() = labels;
|
||||
{
|
||||
std::unique_ptr<Learner> learner {Learner::Create(mat)};
|
||||
learner->Configure({Arg{"booster", "gblinear"},
|
||||
Arg{"updater", "gpu_coord_descent"}});
|
||||
learner->InitModel();
|
||||
learner->UpdateOneIter(0, p_dmat.get());
|
||||
ASSERT_EQ(learner->GetLearnerTrainParameter().gpu_id, 0);
|
||||
ASSERT_EQ(learner->GetLearnerTrainParameter().n_gpus, 1);
|
||||
}
|
||||
{
|
||||
std::unique_ptr<Learner> learner {Learner::Create(mat)};
|
||||
learner->Configure({Arg{"tree_method", "gpu_exact"}});
|
||||
learner->InitModel();
|
||||
learner->UpdateOneIter(0, p_dmat.get());
|
||||
ASSERT_EQ(learner->GetLearnerTrainParameter().gpu_id, 0);
|
||||
ASSERT_EQ(learner->GetLearnerTrainParameter().n_gpus, 1);
|
||||
}
|
||||
{
|
||||
std::unique_ptr<Learner> learner {Learner::Create(mat)};
|
||||
learner->Configure({Arg{"tree_method", "gpu_hist"}});
|
||||
learner->InitModel();
|
||||
learner->UpdateOneIter(0, p_dmat.get());
|
||||
ASSERT_EQ(learner->GetLearnerTrainParameter().gpu_id, 0);
|
||||
ASSERT_EQ(learner->GetLearnerTrainParameter().n_gpus, 1);
|
||||
}
|
||||
{
|
||||
// with CPU algorithm
|
||||
std::unique_ptr<Learner> learner {Learner::Create(mat)};
|
||||
learner->Configure({Arg{"tree_method", "hist"}});
|
||||
learner->InitModel();
|
||||
learner->UpdateOneIter(0, p_dmat.get());
|
||||
ASSERT_EQ(learner->GetLearnerTrainParameter().gpu_id, 0);
|
||||
ASSERT_EQ(learner->GetLearnerTrainParameter().n_gpus, 0);
|
||||
}
|
||||
{
|
||||
// with CPU algorithm, but `n_gpus` takes priority
|
||||
std::unique_ptr<Learner> learner {Learner::Create(mat)};
|
||||
learner->Configure({Arg{"tree_method", "hist"},
|
||||
Arg{"n_gpus", "1"}});
|
||||
learner->InitModel();
|
||||
learner->UpdateOneIter(0, p_dmat.get());
|
||||
ASSERT_EQ(learner->GetLearnerTrainParameter().gpu_id, 0);
|
||||
ASSERT_EQ(learner->GetLearnerTrainParameter().n_gpus, 1);
|
||||
}
|
||||
{
|
||||
// With CPU algorithm but GPU Predictor, this is to simulate when
|
||||
// XGBoost is only used for prediction, so tree method is not
|
||||
// specified.
|
||||
std::unique_ptr<Learner> learner {Learner::Create(mat)};
|
||||
learner->Configure({Arg{"tree_method", "hist"},
|
||||
Arg{"predictor", "gpu_predictor"}});
|
||||
learner->InitModel();
|
||||
learner->UpdateOneIter(0, p_dmat.get());
|
||||
ASSERT_EQ(learner->GetLearnerTrainParameter().gpu_id, 0);
|
||||
ASSERT_EQ(learner->GetLearnerTrainParameter().n_gpus, 1);
|
||||
}
|
||||
|
||||
delete pp_dmat;
|
||||
}
|
||||
|
||||
#endif // XGBOOST_USE_CUDA
|
||||
|
||||
} // namespace xgboost
|
||||
|
||||
Reference in New Issue
Block a user