Run training with empty DMatrix. (#4990)

This makes GPU Hist robust in distributed environment as some workers might not
be associated with any data in either training or evaluation.

* Disable rabit mock test for now: See #5012 .

* Disable dask-cudf test at prediction for now: See #5003

* Launch dask job for all workers despite they might not have any data.
* Check 0 rows in elementwise evaluation metrics.

   Using AUC and AUC-PR still throws an error.  See #4663 for a robust fix.

* Add tests for edge cases.
* Add `LaunchKernel` wrapper handling zero sized grid.
* Move some parts of allreducer into a cu file.
* Don't validate feature names when the booster is empty.

* Sync number of columns in DMatrix.

  As num_feature is required to be the same across all workers in data split
  mode.

* Filtering in dask interface now by default syncs all booster that's not
empty, instead of using rank 0.

* Fix Jenkins' GPU tests.

* Install dask-cuda from source in Jenkins' test.

  Now all tests are actually running.

* Restore GPU Hist tree synchronization test.

* Check UUID of running devices.

  The check is only performed on CUDA version >= 10.x, as 9.x doesn't have UUID field.

* Fix CMake policy and project variables.

  Use xgboost_SOURCE_DIR uniformly, add policy for CMake >= 3.13.

* Fix copying data to CPU

* Fix race condition in cpu predictor.

* Fix duplicated DMatrix construction.

* Don't download extra nccl in CI script.
This commit is contained in:
Jiaming Yuan
2019-11-06 16:13:13 +08:00
committed by GitHub
parent 807a244517
commit 7663de956c
44 changed files with 603 additions and 272 deletions

View File

@@ -60,6 +60,9 @@ class CPUPredictor : public Predictor {
constexpr int kUnroll = 8;
const auto nsize = static_cast<bst_omp_uint>(batch.Size());
const bst_omp_uint rest = nsize % kUnroll;
// Pull to host before entering omp block, as this is not thread safe.
batch.data.HostVector();
batch.offset.HostVector();
#pragma omp parallel for schedule(static)
for (bst_omp_uint i = 0; i < nsize - rest; i += kUnroll) {
const int tid = omp_get_thread_num();

View File

@@ -225,12 +225,12 @@ class GPUPredictor : public xgboost::Predictor {
HostDeviceVector<bst_float>* predictions,
size_t batch_offset) {
dh::safe_cuda(cudaSetDevice(device_));
const int BLOCK_THREADS = 128;
const uint32_t BLOCK_THREADS = 128;
size_t num_rows = batch.Size();
const int GRID_SIZE = static_cast<int>(common::DivRoundUp(num_rows, BLOCK_THREADS));
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
int shared_memory_bytes = static_cast<int>
(sizeof(float) * num_features * BLOCK_THREADS);
auto shared_memory_bytes =
static_cast<size_t>(sizeof(float) * num_features * BLOCK_THREADS);
bool use_shared = true;
if (shared_memory_bytes > max_shared_memory_bytes_) {
shared_memory_bytes = 0;
@@ -238,11 +238,12 @@ class GPUPredictor : public xgboost::Predictor {
}
size_t entry_start = 0;
PredictKernel<BLOCK_THREADS><<<GRID_SIZE, BLOCK_THREADS, shared_memory_bytes>>>
(dh::ToSpan(nodes_), predictions->DeviceSpan().subspan(batch_offset),
dh::ToSpan(tree_segments_), dh::ToSpan(tree_group_), batch.offset.DeviceSpan(),
batch.data.DeviceSpan(), this->tree_begin_, this->tree_end_, num_features, num_rows,
entry_start, use_shared, this->num_group_);
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
PredictKernel<BLOCK_THREADS>,
dh::ToSpan(nodes_), predictions->DeviceSpan().subspan(batch_offset),
dh::ToSpan(tree_segments_), dh::ToSpan(tree_group_), batch.offset.DeviceSpan(),
batch.data.DeviceSpan(), this->tree_begin_, this->tree_end_, num_features, num_rows,
entry_start, use_shared, this->num_group_);
}
void InitModel(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end) {