Initial support for external memory in gpu_predictor (#4284)

This commit is contained in:
Rong Ou
2019-05-02 18:01:27 -07:00
committed by Rory Mitchell
parent 54980b8959
commit feb6ae3e18
11 changed files with 73 additions and 15 deletions

View File

@@ -246,6 +246,7 @@ class GPUPredictor : public xgboost::Predictor {
void PredictInternal
(const SparsePage& batch, const MetaInfo& info,
HostDeviceVector<bst_float>* predictions,
const size_t batch_offset,
const gbm::GBTreeModel& model,
const thrust::host_vector<size_t>& h_tree_segments,
const thrust::host_vector<DevicePredictionNode>& h_nodes,
@@ -284,8 +285,8 @@ class GPUPredictor : public xgboost::Predictor {
data_distr.Devices().Index(device_));
PredictKernel<BLOCK_THREADS><<<GRID_SIZE, BLOCK_THREADS, shared_memory_bytes>>>
(dh::ToSpan(nodes_), predictions->DeviceSpan(device_), dh::ToSpan(tree_segments_),
dh::ToSpan(tree_group_), batch.offset.DeviceSpan(device_),
(dh::ToSpan(nodes_), predictions->DeviceSpan(device_).subspan(batch_offset),
dh::ToSpan(tree_segments_), dh::ToSpan(tree_group_), batch.offset.DeviceSpan(device_),
batch.data.DeviceSpan(device_), tree_begin, tree_end, info.num_col_,
num_rows, entry_start, use_shared, model.param.num_output_group);
}
@@ -324,18 +325,19 @@ class GPUPredictor : public xgboost::Predictor {
}
size_t i_batch = 0;
for (const auto &batch : dmat->GetRowBatches()) {
CHECK_EQ(i_batch, 0) << "External memory not supported";
size_t batch_offset = 0;
for (auto &batch : dmat->GetRowBatches()) {
CHECK(i_batch == 0 || devices_.Size() == 1) << "External memory not supported for multi-GPU";
// out_preds have been sharded and resized in InitOutPredictions()
batch.offset.Shard(GPUDistribution::Overlap(devices_, 1));
std::vector<size_t> device_offsets;
DeviceOffsets(batch.offset, &device_offsets);
batch.data.Shard(GPUDistribution::Explicit(devices_, device_offsets));
batch.data.Reshard(GPUDistribution::Explicit(devices_, device_offsets));
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.PredictInternal(batch, dmat->Info(), out_preds, model,
shard.PredictInternal(batch, dmat->Info(), out_preds, batch_offset, model,
h_tree_segments, h_nodes, tree_begin, tree_end);
});
batch_offset += batch.Size() * model.param.num_output_group;
i_batch++;
}
monitor_.StopCuda("DevicePredictInternal");