GPU memory usage fixes + column sampling refactor (#3635)

* Remove thrust copy calls

* Fix  histogram memory usage

* Cap extreme histogram memory usage

* More efficient column sampling

* Use column sampler across updaters

* More efficient split evaluation on GPU with column sampling
This commit is contained in:
Rory Mitchell
2018-08-27 16:26:46 +12:00
committed by GitHub
parent 60787ecebc
commit 686e990ffc
9 changed files with 198 additions and 182 deletions

View File

@@ -124,7 +124,7 @@ __device__ void EvaluateFeature(int fidx, const GradientPairSumT* hist,
template <int BLOCK_THREADS>
__global__ void evaluate_split_kernel(
const GradientPairSumT* d_hist, int nidx, uint64_t n_features,
DeviceNodeStats nodes, const int* d_feature_segments,
int* feature_set, DeviceNodeStats nodes, const int* d_feature_segments,
const float* d_fidx_min_map, const float* d_gidx_fvalue_map,
GPUTrainingParam gpu_param, DeviceSplitCandidate* d_split,
ValueConstraint value_constraint, int* d_monotonic_constraints) {
@@ -151,7 +151,7 @@ __global__ void evaluate_split_kernel(
__syncthreads();
auto fidx = blockIdx.x;
auto fidx = feature_set[blockIdx.x];
auto constraint = d_monotonic_constraints[fidx];
EvaluateFeature<BLOCK_THREADS, SumReduceT, BlockScanT, MaxReduceT>(
fidx, d_hist, d_feature_segments, d_fidx_min_map[fidx], d_gidx_fvalue_map,
@@ -204,7 +204,8 @@ __device__ int BinarySearchRow(bst_uint begin, bst_uint end, GidxIterT data,
struct DeviceHistogram {
std::map<int, size_t>
nidx_map; // Map nidx to starting index of its histogram
thrust::device_vector<GradientPairSumT> data;
thrust::device_vector<GradientPairSumT::ValueT> data;
const size_t kStopGrowingSize = 1 << 26; // Do not grow beyond this size
int n_bins;
int device_idx;
void Init(int device_idx, int n_bins) {
@@ -214,29 +215,42 @@ struct DeviceHistogram {
void Reset() {
dh::safe_cuda(cudaSetDevice(device_idx));
thrust::fill(data.begin(), data.end(), GradientPairSumT());
data.resize(0);
nidx_map.clear();
}
bool HistogramExists(int nidx) {
return nidx_map.find(nidx) != nidx_map.end();
}
void AllocateHistogram(int nidx) {
if (HistogramExists(nidx)) return;
if (data.size() > kStopGrowingSize) {
// Recycle histogram memory
auto old_entry = *nidx_map.begin();
nidx_map.erase(old_entry.first);
dh::safe_cuda(cudaMemset(data.data().get() + old_entry.second, 0,
n_bins * sizeof(GradientPairSumT)));
nidx_map[nidx] = old_entry.second;
} else {
// Append new node histogram
nidx_map[nidx] = data.size();
dh::safe_cuda(cudaSetDevice(device_idx));
data.resize(data.size() + (n_bins * 2));
}
}
/**
* \summary Return pointer to histogram memory for a given node. Be aware that this function
* may reallocate the underlying memory, invalidating previous pointers.
*
* \author Rory
* \date 28/07/2018
*
* \summary Return pointer to histogram memory for a given node.
* \param nidx Tree node index.
*
* \return hist pointer.
*/
GradientPairSumT* GetHistPtr(int nidx) {
if (nidx_map.find(nidx) == nidx_map.end()) {
// Append new node histogram
nidx_map[nidx] = data.size();
dh::safe_cuda(cudaSetDevice(device_idx));
data.resize(data.size() + n_bins, GradientPairSumT());
}
return data.data().get() + nidx_map[nidx];
CHECK(this->HistogramExists(nidx));
auto ptr = data.data().get() + nidx_map[nidx];
return reinterpret_cast<GradientPairSumT*>(ptr);
}
};
@@ -576,6 +590,7 @@ struct DeviceShard {
}
void BuildHist(int nidx) {
hist.AllocateHistogram(nidx);
if (can_use_smem_atomics) {
BuildHistUsingSharedMem(nidx);
} else {
@@ -585,10 +600,6 @@ struct DeviceShard {
void SubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
// Make sure histograms are already allocated
hist.GetHistPtr(nidx_parent);
hist.GetHistPtr(nidx_histogram);
hist.GetHistPtr(nidx_subtraction);
auto d_node_hist_parent = hist.GetHistPtr(nidx_parent);
auto d_node_hist_histogram = hist.GetHistPtr(nidx_histogram);
auto d_node_hist_subtraction = hist.GetHistPtr(nidx_subtraction);
@@ -599,6 +610,14 @@ struct DeviceShard {
});
}
bool CanDoSubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
// Make sure histograms are already allocated
hist.AllocateHistogram(nidx_subtraction);
return hist.HistogramExists(nidx_histogram) &&
hist.HistogramExists(nidx_parent);
}
__device__ void CountLeft(int64_t* d_count, int val, int left_nidx) {
unsigned ballot = __ballot(val == left_nidx);
if (threadIdx.x % 32 == 0) {
@@ -817,7 +836,7 @@ class GPUHistMaker : public TreeUpdater {
}
monitor_.Stop("InitDataOnce", devices_);
column_sampler_.Init(info_->num_col_, param_);
column_sampler_.Init(info_->num_col_, param_.colsample_bylevel, param_.colsample_bytree);
// Copy gpair & reset memory
monitor_.Start("InitDataReset", devices_);
@@ -860,16 +879,34 @@ class GPUHistMaker : public TreeUpdater {
subtraction_trick_nidx = nidx_left;
}
// Build histogram for node with the smallest number of training examples
dh::ExecuteShards(&shards_, [&](std::unique_ptr<DeviceShard>& shard) {
shard->BuildHist(build_hist_nidx);
});
this->AllReduceHist(build_hist_nidx);
dh::ExecuteShards(&shards_, [&](std::unique_ptr<DeviceShard>& shard) {
// Check whether we can use the subtraction trick to calculate the other
bool do_subtraction_trick = true;
for (auto& shard : shards_) {
do_subtraction_trick &= shard->CanDoSubtractionTrick(
nidx_parent, build_hist_nidx, subtraction_trick_nidx);
}
if (do_subtraction_trick) {
// Calculate other histogram using subtraction trick
dh::ExecuteShards(&shards_, [&](std::unique_ptr<DeviceShard>& shard) {
shard->SubtractionTrick(nidx_parent, build_hist_nidx,
subtraction_trick_nidx);
subtraction_trick_nidx);
});
} else {
// Calculate other histogram manually
dh::ExecuteShards(&shards_, [&](std::unique_ptr<DeviceShard>& shard) {
shard->BuildHist(subtraction_trick_nidx);
});
this->AllReduceHist(subtraction_trick_nidx);
}
}
// Returns best loss
@@ -877,8 +914,9 @@ class GPUHistMaker : public TreeUpdater {
const std::vector<int>& nidx_set, RegTree* p_tree) {
auto columns = info_->num_col_;
std::vector<DeviceSplitCandidate> best_splits(nidx_set.size());
std::vector<DeviceSplitCandidate> candidate_splits(nidx_set.size() *
columns);
DeviceSplitCandidate* candidate_splits;
dh::safe_cuda(cudaMallocHost(&candidate_splits, nidx_set.size() *
columns * sizeof(DeviceSplitCandidate)));
// Use first device
auto& shard = shards_.front();
dh::safe_cuda(cudaSetDevice(shard->device_idx));
@@ -892,34 +930,37 @@ class GPUHistMaker : public TreeUpdater {
for (auto i = 0; i < nidx_set.size(); i++) {
auto nidx = nidx_set[i];
DeviceNodeStats node(shard->node_sum_gradients[nidx], nidx, param_);
auto depth = p_tree->GetDepth(nidx);
auto& feature_set = column_sampler_.GetFeatureSet(depth);
feature_set.Reshard(GPUSet(shard->device_idx, 1));
const int BLOCK_THREADS = 256;
evaluate_split_kernel<BLOCK_THREADS>
<<<uint32_t(columns), BLOCK_THREADS, 0, streams[i]>>>(
shard->hist.GetHistPtr(nidx), nidx, info_->num_col_, node,
<<<uint32_t(feature_set.Size()), BLOCK_THREADS, 0, streams[i]>>>(
shard->hist.GetHistPtr(nidx), nidx, info_->num_col_,
feature_set.DevicePointer(shard->device_idx), node,
shard->feature_segments.Data(), shard->min_fvalue.Data(),
shard->gidx_fvalue_map.Data(), GPUTrainingParam(param_),
d_split + i * columns, node_value_constraints_[nidx],
shard->monotone_constraints.Data());
}
dh::safe_cuda(cudaDeviceSynchronize());
dh::safe_cuda(
cudaMemcpy(candidate_splits.data(), shard->temp_memory.d_temp_storage,
cudaMemcpy(candidate_splits, shard->temp_memory.d_temp_storage,
sizeof(DeviceSplitCandidate) * columns * nidx_set.size(),
cudaMemcpyDeviceToHost));
for (auto i = 0; i < nidx_set.size(); i++) {
auto nidx = nidx_set[i];
auto depth = p_tree->GetDepth(nidx_set[i]);
DeviceSplitCandidate nidx_best;
for (auto fidx = 0; fidx < columns; fidx++) {
for (auto fidx : column_sampler_.GetFeatureSet(depth).HostVector()) {
auto& candidate = candidate_splits[i * columns + fidx];
if (column_sampler_.ColumnUsed(candidate.findex,
p_tree->GetDepth(nidx))) {
nidx_best.Update(candidate_splits[i * columns + fidx], param_);
}
nidx_best.Update(candidate, param_);
}
best_splits[i] = nidx_best;
}
dh::safe_cuda(cudaFreeHost(candidate_splits));
return std::move(best_splits);
}
@@ -1113,8 +1154,8 @@ class GPUHistMaker : public TreeUpdater {
static bool ChildIsValid(const TrainParam& param, int depth,
int num_leaves) {
if (param.max_depth > 0 && depth == param.max_depth) return false;
if (param.max_leaves > 0 && num_leaves == param.max_leaves) return false;
if (param.max_depth > 0 && depth >= param.max_depth) return false;
if (param.max_leaves > 0 && num_leaves >= param.max_leaves) return false;
return true;
}
@@ -1152,7 +1193,7 @@ class GPUHistMaker : public TreeUpdater {
int n_bins_;
std::vector<std::unique_ptr<DeviceShard>> shards_;
ColumnSampler column_sampler_;
common::ColumnSampler column_sampler_;
typedef std::priority_queue<ExpandEntry, std::vector<ExpandEntry>,
std::function<bool(ExpandEntry, ExpandEntry)>>
ExpandQueue;