Make HostDeviceVector single gpu only (#4773)

* Make HostDeviceVector single gpu only
This commit is contained in:
Rong Ou
2019-08-25 14:51:13 -07:00
committed by Rory Mitchell
parent 41227d1933
commit 38ab79f889
54 changed files with 641 additions and 1621 deletions

View File

@@ -20,12 +20,6 @@ namespace predictor {
DMLC_REGISTRY_FILE_TAG(gpu_predictor);
template <typename IterT>
void IncrementOffset(IterT begin_itr, IterT end_itr, size_t amount) {
thrust::transform(begin_itr, end_itr, begin_itr,
[=] __device__(size_t elem) { return elem + amount; });
}
/**
* \struct DevicePredictionNode
*
@@ -44,7 +38,7 @@ struct DevicePredictionNode {
int fidx;
int left_child_idx;
int right_child_idx;
NodeValue val;
NodeValue val{};
DevicePredictionNode(const RegTree::Node& n) { // NOLINT
static_assert(sizeof(DevicePredictionNode) == 16, "Size is not 16 bytes");
@@ -200,58 +194,14 @@ __global__ void PredictKernel(common::Span<const DevicePredictionNode> d_nodes,
}
class GPUPredictor : public xgboost::Predictor {
protected:
struct DevicePredictionCacheEntry {
std::shared_ptr<DMatrix> data;
HostDeviceVector<bst_float> predictions;
};
private:
void DeviceOffsets(const HostDeviceVector<size_t>& data,
size_t total_size,
std::vector<size_t>* out_offsets) {
auto& offsets = *out_offsets;
offsets.resize(devices_.Size() + 1);
offsets[0] = 0;
#pragma omp parallel for schedule(static, 1) if (devices_.Size() > 1)
for (int shard = 0; shard < devices_.Size(); ++shard) {
int device = devices_.DeviceId(shard);
auto data_span = data.DeviceSpan(device);
dh::safe_cuda(cudaSetDevice(device));
if (data_span.size() == 0) {
offsets[shard + 1] = total_size;
} else {
// copy the last element from every shard
dh::safe_cuda(cudaMemcpy(&offsets.at(shard + 1),
&data_span[data_span.size()-1],
sizeof(size_t), cudaMemcpyDeviceToHost));
}
}
}
// This function populates the explicit offsets that can be used to create a window into the
// underlying host vector. The window starts from the `batch_offset` and has a size of
// `batch_size`, and is sharded across all the devices. Each shard is granular depending on
// the number of output classes `n_classes`.
void PredictionDeviceOffsets(size_t total_size, size_t batch_offset, size_t batch_size,
int n_classes, std::vector<size_t>* out_offsets) {
auto& offsets = *out_offsets;
size_t n_shards = devices_.Size();
offsets.resize(n_shards + 2);
size_t rows_per_shard = common::DivRoundUp(batch_size, n_shards);
for (size_t shard = 0; shard < devices_.Size(); ++shard) {
size_t n_rows = std::min(batch_size, shard * rows_per_shard);
offsets[shard] = batch_offset + n_rows * n_classes;
}
offsets[n_shards] = batch_offset + batch_size * n_classes;
offsets[n_shards + 1] = total_size;
}
struct DeviceShard {
DeviceShard() : device_{-1} {}
~DeviceShard() {
dh::safe_cuda(cudaSetDevice(device_));
if (device_ >= 0) {
dh::safe_cuda(cudaSetDevice(device_));
}
}
void Init(int device) {
@@ -284,10 +234,9 @@ class GPUPredictor : public xgboost::Predictor {
void PredictInternal
(const SparsePage& batch, size_t num_features,
HostDeviceVector<bst_float>* predictions) {
if (predictions->DeviceSize(device_) == 0) { return; }
dh::safe_cuda(cudaSetDevice(device_));
const int BLOCK_THREADS = 128;
size_t num_rows = batch.offset.DeviceSize(device_) - 1;
size_t num_rows = batch.offset.DeviceSize() - 1;
const int GRID_SIZE = static_cast<int>(common::DivRoundUp(num_rows, BLOCK_THREADS));
int shared_memory_bytes = static_cast<int>
@@ -297,14 +246,12 @@ class GPUPredictor : public xgboost::Predictor {
shared_memory_bytes = 0;
use_shared = false;
}
const auto& data_distr = batch.data.Distribution();
size_t entry_start = data_distr.ShardStart(batch.data.Size(),
data_distr.Devices().Index(device_));
size_t entry_start = 0;
PredictKernel<BLOCK_THREADS><<<GRID_SIZE, BLOCK_THREADS, shared_memory_bytes>>>
(dh::ToSpan(nodes_), predictions->DeviceSpan(device_), dh::ToSpan(tree_segments_),
dh::ToSpan(tree_group_), batch.offset.DeviceSpan(device_),
batch.data.DeviceSpan(device_), this->tree_begin_, this->tree_end_, num_features,
(dh::ToSpan(nodes_), predictions->DeviceSpan(), dh::ToSpan(tree_segments_),
dh::ToSpan(tree_group_), batch.offset.DeviceSpan(),
batch.data.DeviceSpan(), this->tree_begin_, this->tree_end_, num_features,
num_rows, entry_start, use_shared, this->num_group_);
}
@@ -322,7 +269,7 @@ class GPUPredictor : public xgboost::Predictor {
void InitModel(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end) {
CHECK_EQ(model.param.size_leaf_vector, 0);
// Copy decision trees to device
thrust::host_vector<size_t> h_tree_segments;
thrust::host_vector<size_t> h_tree_segments{};
h_tree_segments.reserve((tree_end - tree_begin) + 1);
size_t sum = 0;
h_tree_segments.push_back(sum);
@@ -337,9 +284,7 @@ class GPUPredictor : public xgboost::Predictor {
std::copy(src_nodes.begin(), src_nodes.end(),
h_nodes.begin() + h_tree_segments[tree_idx - tree_begin]);
}
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard &shard) {
shard.InitModel(model, h_tree_segments, h_nodes, tree_begin, tree_end);
});
shard_.InitModel(model, h_tree_segments, h_nodes, tree_begin, tree_end);
}
void DevicePredictInternal(DMatrix* dmat,
@@ -352,40 +297,43 @@ class GPUPredictor : public xgboost::Predictor {
InitModel(model, tree_begin, tree_end);
size_t batch_offset = 0;
auto* preds = out_preds;
std::unique_ptr<HostDeviceVector<bst_float>> batch_preds{nullptr};
for (auto &batch : dmat->GetBatches<SparsePage>()) {
bool is_external_memory = batch.Size() < dmat->Info().num_row_;
if (is_external_memory) {
std::vector<size_t> out_preds_offsets;
PredictionDeviceOffsets(out_preds->Size(), batch_offset, batch.Size(),
model.param.num_output_group, &out_preds_offsets);
out_preds->Reshard(GPUDistribution::Explicit(devices_, out_preds_offsets));
batch_preds.reset(new HostDeviceVector<bst_float>);
batch_preds->Resize(batch.Size() * model.param.num_output_group);
std::copy(out_preds->ConstHostVector().begin() + batch_offset,
out_preds->ConstHostVector().begin() + batch_offset + batch_preds->Size(),
batch_preds->HostVector().begin());
preds = batch_preds.get();
}
batch.offset.Shard(GPUDistribution::Overlap(devices_, 1));
std::vector<size_t> device_offsets;
DeviceOffsets(batch.offset, batch.data.Size(), &device_offsets);
batch.data.Reshard(GPUDistribution::Explicit(devices_, device_offsets));
batch.offset.SetDevice(device_);
batch.data.SetDevice(device_);
preds->SetDevice(device_);
shard_.PredictInternal(batch, model.param.num_feature, preds);
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.PredictInternal(batch, model.param.num_feature, out_preds);
});
if (is_external_memory) {
auto h_preds = preds->ConstHostVector();
std::copy(h_preds.begin(), h_preds.end(), out_preds->HostVector().begin() + batch_offset);
}
batch_offset += batch.Size() * model.param.num_output_group;
}
out_preds->Reshard(GPUDistribution::Granular(devices_, model.param.num_output_group));
monitor_.StopCuda("DevicePredictInternal");
}
public:
GPUPredictor() = default;
GPUPredictor() : device_{-1} {};
void PredictBatch(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, int tree_begin,
unsigned ntree_limit = 0) override {
GPUSet devices = GPUSet::All(learner_param_->gpu_id, learner_param_->n_gpus,
dmat->Info().num_row_);
CHECK_NE(devices.Size(), 0);
ConfigureShards(devices);
int device = learner_param_->gpu_id;
CHECK_GE(device, 0);
ConfigureShard(device);
if (this->PredictFromCache(dmat, out_preds, model, ntree_limit)) {
return;
@@ -408,10 +356,9 @@ class GPUPredictor : public xgboost::Predictor {
size_t n_classes = model.param.num_output_group;
size_t n = n_classes * info.num_row_;
const HostDeviceVector<bst_float>& base_margin = info.base_margin_;
out_preds->Shard(GPUDistribution::Granular(devices_, n_classes));
out_preds->Resize(n);
if (base_margin.Size() != 0) {
CHECK_EQ(out_preds->Size(), n);
CHECK_EQ(base_margin.Size(), n);
out_preds->Copy(base_margin);
} else {
out_preds->Fill(model.base_margin);
@@ -427,7 +374,7 @@ class GPUPredictor : public xgboost::Predictor {
const HostDeviceVector<bst_float>& y = it->second.predictions;
if (y.Size() != 0) {
monitor_.StartCuda("PredictFromCache");
out_preds->Shard(y.Distribution());
out_preds->SetDevice(y.DeviceIdx());
out_preds->Resize(y.Size());
out_preds->Copy(y);
monitor_.StopCuda("PredictFromCache");
@@ -500,25 +447,23 @@ class GPUPredictor : public xgboost::Predictor {
const std::vector<std::shared_ptr<DMatrix>>& cache) override {
Predictor::Configure(cfg, cache);
GPUSet devices = GPUSet::All(learner_param_->gpu_id, learner_param_->n_gpus);
ConfigureShards(devices);
int device = learner_param_->gpu_id;
if (device >= 0) {
ConfigureShard(device);
}
}
private:
/*! \brief Re configure shards when GPUSet is changed. */
void ConfigureShards(GPUSet devices) {
if (devices_ == devices) return;
void ConfigureShard(int device) {
if (device_ == device) return;
devices_ = devices;
shards_.clear();
shards_.resize(devices_.Size());
dh::ExecuteIndexShards(&shards_, [=](size_t i, DeviceShard& shard){
shard.Init(devices_.DeviceId(i));
});
device_ = device;
shard_.Init(device_);
}
std::vector<DeviceShard> shards_;
GPUSet devices_;
DeviceShard shard_;
int device_;
common::Monitor monitor_;
};