Run training with empty DMatrix. (#4990)

This makes GPU Hist robust in distributed environment as some workers might not
be associated with any data in either training or evaluation.

* Disable rabit mock test for now: See #5012 .

* Disable dask-cudf test at prediction for now: See #5003

* Launch dask job for all workers despite they might not have any data.
* Check 0 rows in elementwise evaluation metrics.

   Using AUC and AUC-PR still throws an error.  See #4663 for a robust fix.

* Add tests for edge cases.
* Add `LaunchKernel` wrapper handling zero sized grid.
* Move some parts of allreducer into a cu file.
* Don't validate feature names when the booster is empty.

* Sync number of columns in DMatrix.

  As num_feature is required to be the same across all workers in data split
  mode.

* Filtering in dask interface now by default syncs all booster that's not
empty, instead of using rank 0.

* Fix Jenkins' GPU tests.

* Install dask-cuda from source in Jenkins' test.

  Now all tests are actually running.

* Restore GPU Hist tree synchronization test.

* Check UUID of running devices.

  The check is only performed on CUDA version >= 10.x, as 9.x doesn't have UUID field.

* Fix CMake policy and project variables.

  Use xgboost_SOURCE_DIR uniformly, add policy for CMake >= 3.13.

* Fix copying data to CPU

* Fix race condition in cpu predictor.

* Fix duplicated DMatrix construction.

* Don't download extra nccl in CI script.
This commit is contained in:
Jiaming Yuan
2019-11-06 16:13:13 +08:00
committed by GitHub
parent 807a244517
commit 7663de956c
44 changed files with 603 additions and 272 deletions

View File

@@ -165,10 +165,11 @@ __global__ void ClearBuffersKernel(
void FeatureInteractionConstraint::ClearBuffers() {
CHECK_EQ(output_buffer_bits_.Size(), input_buffer_bits_.Size());
CHECK_LE(feature_buffer_.Size(), output_buffer_bits_.Size());
int constexpr kBlockThreads = 256;
const int n_grids = static_cast<int>(
uint32_t constexpr kBlockThreads = 256;
auto const n_grids = static_cast<uint32_t>(
common::DivRoundUp(input_buffer_bits_.Size(), kBlockThreads));
ClearBuffersKernel<<<n_grids, kBlockThreads>>>(
dh::LaunchKernel {n_grids, kBlockThreads} (
ClearBuffersKernel,
output_buffer_bits_, input_buffer_bits_);
}
@@ -222,12 +223,14 @@ common::Span<int32_t> FeatureInteractionConstraint::Query(
LBitField64 node_constraints = s_node_constraints_[nid];
CHECK_EQ(input_buffer_bits_.Size(), output_buffer_bits_.Size());
int constexpr kBlockThreads = 256;
const int n_grids = static_cast<int>(
uint32_t constexpr kBlockThreads = 256;
auto n_grids = static_cast<uint32_t>(
common::DivRoundUp(output_buffer_bits_.Size(), kBlockThreads));
SetInputBufferKernel<<<n_grids, kBlockThreads>>>(feature_list, input_buffer_bits_);
QueryFeatureListKernel<<<n_grids, kBlockThreads>>>(
dh::LaunchKernel {n_grids, kBlockThreads} (
SetInputBufferKernel,
feature_list, input_buffer_bits_);
dh::LaunchKernel {n_grids, kBlockThreads} (
QueryFeatureListKernel,
node_constraints, input_buffer_bits_, output_buffer_bits_);
thrust::counting_iterator<int32_t> begin(0);
@@ -327,20 +330,20 @@ void FeatureInteractionConstraint::Split(
dim3 const block3(16, 64, 1);
dim3 const grid3(common::DivRoundUp(n_sets_, 16),
common::DivRoundUp(s_fconstraints_.size(), 64));
RestoreFeatureListFromSetsKernel<<<grid3, block3>>>
(feature_buffer_,
feature_id,
s_fconstraints_,
s_fconstraints_ptr_,
s_sets_,
s_sets_ptr_);
dh::LaunchKernel {grid3, block3} (
RestoreFeatureListFromSetsKernel,
feature_buffer_, feature_id,
s_fconstraints_, s_fconstraints_ptr_,
s_sets_, s_sets_ptr_);
int constexpr kBlockThreads = 256;
const int n_grids = static_cast<int>(common::DivRoundUp(node.Size(), kBlockThreads));
InteractionConstraintSplitKernel<<<n_grids, kBlockThreads>>>
(feature_buffer_,
feature_id,
node, left, right);
uint32_t constexpr kBlockThreads = 256;
auto n_grids = static_cast<uint32_t>(common::DivRoundUp(node.Size(), kBlockThreads));
dh::LaunchKernel {n_grids, kBlockThreads} (
InteractionConstraintSplitKernel,
feature_buffer_,
feature_id,
node, left, right);
}
} // namespace xgboost

View File

@@ -603,12 +603,12 @@ struct GPUHistMakerDevice {
}
// One block for each feature
int constexpr kBlockThreads = 256;
EvaluateSplitKernel<kBlockThreads, GradientSumT>
<<<uint32_t(d_feature_set.size()), kBlockThreads, 0, streams[i]>>>(
hist.GetNodeHistogram(nidx), d_feature_set, node, page->matrix,
gpu_param, d_split_candidates, node_value_constraints[nidx],
monotone_constraints);
uint32_t constexpr kBlockThreads = 256;
dh::LaunchKernel {uint32_t(d_feature_set.size()), kBlockThreads, 0, streams[i]} (
EvaluateSplitKernel<kBlockThreads, GradientSumT>,
hist.GetNodeHistogram(nidx), d_feature_set, node, page->matrix,
gpu_param, d_split_candidates, node_value_constraints[nidx],
monotone_constraints);
// Reduce over features to find best feature
auto d_cub_memory =
@@ -638,14 +638,12 @@ struct GPUHistMakerDevice {
use_shared_memory_histograms
? sizeof(GradientSumT) * page->matrix.BinCount()
: 0;
const int items_per_thread = 8;
const int block_threads = 256;
const int grid_size = static_cast<int>(
uint32_t items_per_thread = 8;
uint32_t block_threads = 256;
auto grid_size = static_cast<uint32_t>(
common::DivRoundUp(n_elements, items_per_thread * block_threads));
if (grid_size <= 0) {
return;
}
SharedMemHistKernel<<<grid_size, block_threads, smem_size>>>(
dh::LaunchKernel {grid_size, block_threads, smem_size} (
SharedMemHistKernel<GradientSumT>,
page->matrix, d_ridx, d_node_hist.data(), d_gpair, n_elements,
use_shared_memory_histograms);
}
@@ -886,6 +884,7 @@ struct GPUHistMakerDevice {
monitor.StartCuda("InitRoot");
this->InitRoot(p_tree, gpair_all, reducer, p_fmat->Info().num_col_);
monitor.StopCuda("InitRoot");
auto timestamp = qexpand->size();
auto num_leaves = 1;
@@ -895,7 +894,6 @@ struct GPUHistMakerDevice {
if (!candidate.IsValid(param, num_leaves)) {
continue;
}
this->ApplySplit(candidate, p_tree);
num_leaves++;
@@ -996,18 +994,22 @@ class GPUHistMakerSpecialised {
try {
for (xgboost::RegTree* tree : trees) {
this->UpdateTree(gpair, dmat, tree);
if (hist_maker_param_.debug_synchronize) {
this->CheckTreesSynchronized(tree);
}
}
dh::safe_cuda(cudaGetLastError());
} catch (const std::exception& e) {
LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl;
}
param_.learning_rate = lr;
monitor_.StopCuda("Update");
}
void InitDataOnce(DMatrix* dmat) {
info_ = &dmat->Info();
reducer_.Init({device_});
// Synchronise the column sampling seed
@@ -1048,20 +1050,18 @@ class GPUHistMakerSpecialised {
}
// Only call this method for testing
void CheckTreesSynchronized(const std::vector<RegTree>& local_trees) const {
void CheckTreesSynchronized(RegTree* local_tree) const {
std::string s_model;
common::MemoryBufferStream fs(&s_model);
int rank = rabit::GetRank();
if (rank == 0) {
local_trees.front().SaveModel(&fs);
local_tree->SaveModel(&fs);
}
fs.Seek(0);
rabit::Broadcast(&s_model, 0);
RegTree reference_tree{};
RegTree reference_tree {}; // rank 0 tree
reference_tree.LoadModel(&fs);
for (const auto& tree : local_trees) {
CHECK(tree == reference_tree);
}
CHECK(*local_tree == reference_tree);
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat,