Purge device_helpers.cuh (#5534)

* Simplifications with caching_device_vector

* Purge device helpers
This commit is contained in:
Rory Mitchell
2020-04-15 21:51:56 +12:00
committed by GitHub
parent a2f54963b6
commit ca4e05660e
9 changed files with 182 additions and 733 deletions

View File

@@ -85,19 +85,6 @@ inline int32_t CudaGetPointerDevice(void* ptr) {
return device;
}
inline void CudaCheckPointerDevice(void* ptr) {
auto ptr_device = CudaGetPointerDevice(ptr);
int cur_device = -1;
dh::safe_cuda(cudaGetDevice(&cur_device));
CHECK_EQ(ptr_device, cur_device) << "pointer device: " << ptr_device
<< "current device: " << cur_device;
}
template <typename T>
const T *Raw(const thrust::device_vector<T> &v) { // NOLINT
return raw_pointer_cast(v.data());
}
inline size_t AvailableMemory(int device_idx) {
size_t device_free = 0;
size_t device_total = 0;
@@ -552,161 +539,6 @@ void CopyDeviceSpanToVector(std::vector<T> *dst, xgboost::common::Span<const T>
cudaMemcpyDeviceToHost));
}
/**
* \brief Copies std::vector to device span.
*
* \tparam T Generic type parameter.
* \param dst Copy destination. Must be device memory.
* \param src Copy source.
*/
template <typename T>
void CopyVectorToDeviceSpan(xgboost::common::Span<T> dst ,const std::vector<T>&src)
{
CHECK_EQ(dst.size(), src.size());
dh::safe_cuda(cudaMemcpyAsync(dst.data(), src.data(), dst.size() * sizeof(T),
cudaMemcpyHostToDevice));
}
/**
* \brief Device to device memory copy from src to dst. Spans must be the same size. Use subspan to
* copy from a smaller array to a larger array.
*
* \tparam T Generic type parameter.
* \param dst Copy destination. Must be device memory.
* \param src Copy source. Must be device memory.
*/
template <typename T>
void CopyDeviceSpan(xgboost::common::Span<T> dst,
xgboost::common::Span<T> src) {
CHECK_EQ(dst.size(), src.size());
dh::safe_cuda(cudaMemcpyAsync(dst.data(), src.data(), dst.size() * sizeof(T),
cudaMemcpyDeviceToDevice));
}
/*! \brief Helper for allocating large block of memory. */
class BulkAllocator {
std::vector<char *> d_ptr_;
std::vector<size_t> size_;
int device_idx_{-1};
static const int kAlign = 256;
size_t AlignRoundUp(size_t n) const {
n = (n + kAlign - 1) / kAlign;
return n * kAlign;
}
template <typename T>
size_t GetSizeBytes(xgboost::common::Span<T> *first_vec, size_t first_size) {
return AlignRoundUp(first_size * sizeof(T));
}
template <typename T, typename... Args>
size_t GetSizeBytes(xgboost::common::Span<T> *first_vec, size_t first_size, Args... args) {
return GetSizeBytes<T>(first_vec, first_size) + GetSizeBytes(args...);
}
template <typename T>
void AllocateSpan(int device_idx, char *ptr, xgboost::common::Span<T> *first_vec,
size_t first_size) {
*first_vec = xgboost::common::Span<T>(reinterpret_cast<T *>(ptr), first_size);
}
template <typename T, typename... Args>
void AllocateSpan(int device_idx, char *ptr, xgboost::common::Span<T> *first_vec,
size_t first_size, Args... args) {
AllocateSpan<T>(device_idx, ptr, first_vec, first_size);
ptr += AlignRoundUp(first_size * sizeof(T));
AllocateSpan(device_idx, ptr, args...);
}
char *AllocateDevice(int device_idx, size_t bytes) {
safe_cuda(cudaSetDevice(device_idx));
XGBDeviceAllocator<char> allocator;
return allocator.allocate(bytes).get();
}
template <typename T>
size_t GetSizeBytes(DoubleBuffer<T> *first_vec, size_t first_size) {
return 2 * AlignRoundUp(first_size * sizeof(T));
}
template <typename T, typename... Args>
size_t GetSizeBytes(DoubleBuffer<T> *first_vec, size_t first_size, Args... args) {
return GetSizeBytes<T>(first_vec, first_size) + GetSizeBytes(args...);
}
template <typename T>
void AllocateSpan(int device_idx, char *ptr, DoubleBuffer<T> *first_vec,
size_t first_size) {
auto ptr1 = reinterpret_cast<T *>(ptr);
auto ptr2 = ptr1 + first_size;
first_vec->a = xgboost::common::Span<T>(ptr1, first_size);
first_vec->b = xgboost::common::Span<T>(ptr2, first_size);
first_vec->buff.d_buffers[0] = ptr1;
first_vec->buff.d_buffers[1] = ptr2;
first_vec->buff.selector = 0;
}
template <typename T, typename... Args>
void AllocateSpan(int device_idx, char *ptr, DoubleBuffer<T> *first_vec,
size_t first_size, Args... args) {
AllocateSpan<T>(device_idx, ptr, first_vec, first_size);
ptr += (AlignRoundUp(first_size * sizeof(T)) * 2);
AllocateSpan(device_idx, ptr, args...);
}
public:
BulkAllocator() = default;
// prevent accidental copying, moving or assignment of this object
BulkAllocator(const BulkAllocator&) = delete;
BulkAllocator(BulkAllocator&&) = delete;
void operator=(const BulkAllocator&) = delete;
void operator=(BulkAllocator&&) = delete;
/*!
* \brief Clear the bulk allocator.
*
* This frees the GPU memory managed by this allocator.
*/
void Clear() {
if (d_ptr_.empty()) return;
safe_cuda(cudaSetDevice(device_idx_));
size_t idx = 0;
std::for_each(d_ptr_.begin(), d_ptr_.end(), [&](char *dptr) {
XGBDeviceAllocator<char>().deallocate(thrust::device_ptr<char>(dptr), size_[idx++]);
});
d_ptr_.clear();
size_.clear();
}
~BulkAllocator() {
Clear();
}
// returns sum of bytes for all allocations
size_t Size() {
return std::accumulate(size_.begin(), size_.end(), static_cast<size_t>(0));
}
template <typename... Args>
void Allocate(int device_idx, Args... args) {
if (device_idx_ == -1) {
device_idx_ = device_idx;
}
else CHECK(device_idx_ == device_idx);
size_t size = GetSizeBytes(args...);
char *ptr = AllocateDevice(device_idx, size);
AllocateSpan(device_idx, ptr, args...);
d_ptr_.push_back(ptr);
size_.push_back(size);
}
};
// Keep track of pinned memory allocation
struct PinnedMemory {
void *temp_storage{nullptr};
@@ -787,196 +619,6 @@ struct CubMemory {
* Utility functions
*/
// Load balancing search
template <typename CoordinateT, typename SegmentT, typename OffsetT>
void FindMergePartitions(int device_idx, CoordinateT *d_tile_coordinates,
size_t num_tiles, int tile_size, SegmentT segments,
OffsetT num_rows, OffsetT num_elements) {
dh::LaunchN(device_idx, num_tiles + 1, [=] __device__(int idx) {
OffsetT diagonal = idx * tile_size;
CoordinateT tile_coordinate;
cub::CountingInputIterator<OffsetT> nonzero_indices(0);
// Search the merge path
// Cast to signed integer as this function can have negatives
cub::MergePathSearch(static_cast<int64_t>(diagonal), segments + 1,
nonzero_indices, static_cast<int64_t>(num_rows),
static_cast<int64_t>(num_elements), tile_coordinate);
// Output starting offset
d_tile_coordinates[idx] = tile_coordinate;
});
}
template <int TILE_SIZE, int ITEMS_PER_THREAD, int BLOCK_THREADS,
typename OffsetT, typename CoordinateT, typename FunctionT,
typename SegmentIterT>
__global__ void LbsKernel(CoordinateT *d_coordinates,
SegmentIterT segment_end_offsets, FunctionT f,
OffsetT num_segments) {
int tile = blockIdx.x;
CoordinateT tile_start_coord = d_coordinates[tile];
CoordinateT tile_end_coord = d_coordinates[tile + 1];
int64_t tile_num_rows = tile_end_coord.x - tile_start_coord.x;
int64_t tile_num_elements = tile_end_coord.y - tile_start_coord.y;
cub::CountingInputIterator<OffsetT> tile_element_indices(tile_start_coord.y);
CoordinateT thread_start_coord;
using SegmentT = typename std::iterator_traits<SegmentIterT>::value_type;
__shared__ struct {
SegmentT tile_segment_end_offsets[TILE_SIZE + 1];
SegmentT output_segment[TILE_SIZE];
} temp_storage;
for (auto item : dh::BlockStrideRange(int(0), int(tile_num_rows + 1))) {
temp_storage.tile_segment_end_offsets[item] =
segment_end_offsets[min(static_cast<size_t>(tile_start_coord.x + item),
static_cast<size_t>(num_segments - 1))];
}
__syncthreads();
int64_t diag = threadIdx.x * ITEMS_PER_THREAD;
// Cast to signed integer as this function can have negatives
cub::MergePathSearch(diag, // Diagonal
temp_storage.tile_segment_end_offsets, // List A
tile_element_indices, // List B
tile_num_rows, tile_num_elements, thread_start_coord);
CoordinateT thread_current_coord = thread_start_coord;
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) {
if (tile_element_indices[thread_current_coord.y] <
temp_storage.tile_segment_end_offsets[thread_current_coord.x]) {
temp_storage.output_segment[thread_current_coord.y] =
thread_current_coord.x + tile_start_coord.x;
++thread_current_coord.y;
} else {
++thread_current_coord.x;
}
}
__syncthreads();
for (auto item : dh::BlockStrideRange(int(0), int(tile_num_elements))) {
f(tile_start_coord.y + item, temp_storage.output_segment[item]);
}
}
template <typename FunctionT, typename SegmentIterT, typename OffsetT>
void SparseTransformLbs(int device_idx, dh::CubMemory *temp_memory,
OffsetT count, SegmentIterT segments,
OffsetT num_segments, FunctionT f) {
using CoordinateT = typename cub::CubVector<OffsetT, 2>::Type;
dh::safe_cuda(cudaSetDevice(device_idx));
const int BLOCK_THREADS = 256;
const int ITEMS_PER_THREAD = 1;
const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD;
auto num_tiles = xgboost::common::DivRoundUp(count + num_segments, BLOCK_THREADS);
CHECK(num_tiles < std::numeric_limits<unsigned int>::max());
temp_memory->LazyAllocate(sizeof(CoordinateT) * (num_tiles + 1));
CoordinateT *tmp_tile_coordinates =
reinterpret_cast<CoordinateT *>(temp_memory->d_temp_storage);
FindMergePartitions(device_idx, tmp_tile_coordinates, num_tiles,
BLOCK_THREADS, segments, num_segments, count);
LbsKernel<TILE_SIZE, ITEMS_PER_THREAD, BLOCK_THREADS, OffsetT>
<<<uint32_t(num_tiles), BLOCK_THREADS>>>(tmp_tile_coordinates, // NOLINT
segments + 1, f, num_segments);
}
template <typename FunctionT, typename OffsetT>
void DenseTransformLbs(int device_idx, OffsetT count, OffsetT num_segments,
FunctionT f) {
CHECK(count % num_segments == 0) << "Data is not dense.";
LaunchN(device_idx, count, [=] __device__(OffsetT idx) {
OffsetT segment = idx / (count / num_segments);
f(idx, segment);
});
}
/**
* \fn template <typename FunctionT, typename SegmentIterT, typename OffsetT>
* void TransformLbs(int device_idx, dh::CubMemory *temp_memory, OffsetT count,
* SegmentIterT segments, OffsetT num_segments, bool is_dense, FunctionT f)
*
* \brief Load balancing search function. Reads a CSR type matrix description
* and allows a function to be executed on each element. Search 'modern GPU load
* balancing search' for more information.
*
* \author Rory
* \date 7/9/2017
*
* \tparam FunctionT Type of the function t.
* \tparam SegmentIterT Type of the segments iterator.
* \tparam OffsetT Type of the offset.
* \param device_idx Zero-based index of the device.
* \param [in,out] temp_memory Temporary memory allocator.
* \param count Number of elements.
* \param segments Device pointer to segments.
* \param num_segments Number of segments.
* \param is_dense True if this object is dense.
* \param f Lambda to be executed on matrix elements.
*/
template <typename FunctionT, typename SegmentIterT, typename OffsetT>
void TransformLbs(int device_idx, dh::CubMemory *temp_memory, OffsetT count,
SegmentIterT segments, OffsetT num_segments, bool is_dense,
FunctionT f) {
if (is_dense) {
DenseTransformLbs(device_idx, count, num_segments, f);
} else {
SparseTransformLbs(device_idx, temp_memory, count, segments, num_segments,
f);
}
}
/**
* @brief Helper function to sort the pairs using cub's segmented RadixSortPairs
* @param tmp_mem cub temporary memory info
* @param keys keys double-buffer array
* @param vals the values double-buffer array
* @param nVals number of elements in the array
* @param nSegs number of segments
* @param offsets the segments
*/
template <typename T1, typename T2>
void SegmentedSort(dh::CubMemory *tmp_mem, dh::DoubleBuffer<T1> *keys,
dh::DoubleBuffer<T2> *vals, int nVals, int nSegs,
xgboost::common::Span<int> offsets, int start = 0,
int end = sizeof(T1) * 8) {
size_t tmpSize;
dh::safe_cuda(cub::DeviceSegmentedRadixSort::SortPairs(
NULL, tmpSize, keys->CubBuffer(), vals->CubBuffer(), nVals, nSegs,
offsets.data(), offsets.data() + 1, start, end));
tmp_mem->LazyAllocate(tmpSize);
dh::safe_cuda(cub::DeviceSegmentedRadixSort::SortPairs(
tmp_mem->d_temp_storage, tmpSize, keys->CubBuffer(), vals->CubBuffer(),
nVals, nSegs, offsets.data(), offsets.data() + 1, start, end));
}
/**
* @brief Helper function to perform device-wide sum-reduction
* @param tmp_mem cub temporary memory info
* @param in the input array to be reduced
* @param out the output reduced value
* @param nVals number of elements in the input array
*/
template <typename T>
void SumReduction(dh::CubMemory* tmp_mem, xgboost::common::Span<T> in, xgboost::common::Span<T> out,
int nVals) {
size_t tmpSize;
dh::safe_cuda(
cub::DeviceReduce::Sum(NULL, tmpSize, in.data(), out.data(), nVals));
tmp_mem->LazyAllocate(tmpSize);
dh::safe_cuda(cub::DeviceReduce::Sum(tmp_mem->d_temp_storage, tmpSize,
in.data(), out.data(), nVals));
}
/**
* @brief Helper function to perform device-wide sum-reduction, returns to the
* host
@@ -1004,79 +646,6 @@ typename std::iterator_traits<T>::value_type SumReduction(
return sum;
}
/**
* @brief Fill a given constant value across all elements in the buffer
* @param out the buffer to be filled
* @param len number of elements i the buffer
* @param def default value to be filled
*/
template <typename T, int BlkDim = 256, int ItemsPerThread = 4>
void FillConst(int device_idx, T *out, int len, T def) {
dh::LaunchN<ItemsPerThread, BlkDim>(device_idx, len,
[=] __device__(int i) { out[i] = def; });
}
/**
* @brief gather elements
* @param out1 output gathered array for the first buffer
* @param in1 first input buffer
* @param out2 output gathered array for the second buffer
* @param in2 second input buffer
* @param instId gather indices
* @param nVals length of the buffers
*/
template <typename T1, typename T2, int BlkDim = 256, int ItemsPerThread = 4>
void Gather(int device_idx, T1 *out1, const T1 *in1, T2 *out2, const T2 *in2,
const int *instId, int nVals) {
dh::LaunchN<ItemsPerThread, BlkDim>(device_idx, nVals,
[=] __device__(int i) {
int iid = instId[i];
T1 v1 = in1[iid];
T2 v2 = in2[iid];
out1[i] = v1;
out2[i] = v2;
});
}
/**
* @brief gather elements
* @param out output gathered array
* @param in input buffer
* @param instId gather indices
* @param nVals length of the buffers
*/
template <typename T, int BlkDim = 256, int ItemsPerThread = 4>
void Gather(int device_idx, T *out, const T *in, const int *instId, int nVals) {
dh::LaunchN<ItemsPerThread, BlkDim>(device_idx, nVals,
[=] __device__(int i) {
int iid = instId[i];
out[i] = in[iid];
});
}
class SaveCudaContext {
private:
int saved_device_;
public:
template <typename Functor>
explicit SaveCudaContext (Functor func) : saved_device_{-1} {
// When compiled with CUDA but running on CPU only device,
// cudaGetDevice will fail.
try {
safe_cuda(cudaGetDevice(&saved_device_));
} catch (const dmlc::Error &except) {
saved_device_ = -1;
}
func();
}
~SaveCudaContext() {
if (saved_device_ != -1) {
safe_cuda(cudaSetDevice(saved_device_));
}
}
};
/**
* \class AllReducer
*
@@ -1200,50 +769,12 @@ class AllReducer {
return id;
}
#endif
/** \brief Perform max all reduce operation on the host. This function first
* reduces over omp threads then over nodes using rabit (which is not thread
* safe) using the master thread. Uses naive reduce algorithm for local
* threads, don't expect this to scale.*/
void HostMaxAllReduce(std::vector<size_t> *p_data) {
#ifdef XGBOOST_USE_NCCL
auto &data = *p_data;
// Wait in case some other thread is accessing host_data_
#pragma omp barrier
// Reset shared buffer
#pragma omp single
{
host_data_.resize(data.size());
std::fill(host_data_.begin(), host_data_.end(), size_t(0));
}
// Threads update shared array
for (auto i = 0ull; i < data.size(); i++) {
#pragma omp critical
{ host_data_[i] = std::max(host_data_[i], data[i]); }
}
// Wait until all threads are finished
#pragma omp barrier
// One thread performs all reduce across distributed nodes
#pragma omp master
{
rabit::Allreduce<rabit::op::Max, size_t>(host_data_.data(),
host_data_.size());
}
#pragma omp barrier
// Threads can now read back all reduced values
for (auto i = 0ull; i < data.size(); i++) {
data[i] = host_data_[i];
}
#endif
}
};
template <typename T,
template <typename VectorT, typename T = typename VectorT::value_type,
typename IndexT = typename xgboost::common::Span<T>::index_type>
xgboost::common::Span<T> ToSpan(
device_vector<T>& vec,
VectorT &vec,
IndexT offset = 0,
IndexT size = std::numeric_limits<size_t>::max()) {
size = size == std::numeric_limits<size_t>::max() ? vec.size() : size;
@@ -1467,6 +998,26 @@ class SegmentSorter {
}
};
// Atomic add function for gradients
template <typename OutputGradientT, typename InputGradientT>
DEV_INLINE void AtomicAddGpair(OutputGradientT* dest,
const InputGradientT& gpair) {
auto dst_ptr = reinterpret_cast<typename OutputGradientT::ValueT*>(dest);
atomicAdd(dst_ptr,
static_cast<typename OutputGradientT::ValueT>(gpair.GetGrad()));
atomicAdd(dst_ptr + 1,
static_cast<typename OutputGradientT::ValueT>(gpair.GetHess()));
}
// Thrust version of this function causes error on Windows
template <typename ReturnT, typename IterT, typename FuncT>
thrust::transform_iterator<FuncT, IterT, ReturnT> MakeTransformIterator(
IterT iter, FuncT func) {
return thrust::transform_iterator<FuncT, IterT, ReturnT>(iter, func);
}
template <typename FunctionT>
class LauncherItr {
public:
@@ -1481,35 +1032,35 @@ public:
};
/**
* \brief Thrust compatible iterator type - discards algorithm output and launches device lambda
* with the index of the output and the algorithm output as arguments.
*
* \author Rory
* \date 7/9/2017
*
* \tparam FunctionT Type of the function t.
*/
* \brief Thrust compatible iterator type - discards algorithm output and launches device lambda
* with the index of the output and the algorithm output as arguments.
*
* \author Rory
* \date 7/9/2017
*
* \tparam FunctionT Type of the function t.
*/
template <typename FunctionT>
class DiscardLambdaItr {
public:
// Required iterator traits
using self_type = DiscardLambdaItr; // NOLINT
using difference_type = ptrdiff_t; // NOLINT
using value_type = void; // NOLINT
using pointer = value_type *; // NOLINT
using reference = LauncherItr<FunctionT>; // NOLINT
using iterator_category = typename thrust::detail::iterator_facade_category< // NOLINT
thrust::any_system_tag, thrust::random_access_traversal_tag, value_type,
reference>::type; // NOLINT
// Required iterator traits
using self_type = DiscardLambdaItr; // NOLINT
using difference_type = ptrdiff_t; // NOLINT
using value_type = void; // NOLINT
using pointer = value_type *; // NOLINT
using reference = LauncherItr<FunctionT>; // NOLINT
using iterator_category = typename thrust::detail::iterator_facade_category< // NOLINT
thrust::any_system_tag, thrust::random_access_traversal_tag, value_type,
reference>::type; // NOLINT
private:
difference_type offset_;
FunctionT f_;
public:
XGBOOST_DEVICE explicit DiscardLambdaItr(FunctionT f) : offset_(0), f_(f) {}
XGBOOST_DEVICE DiscardLambdaItr(difference_type offset, FunctionT f)
: offset_(offset), f_(f) {}
XGBOOST_DEVICE self_type operator+(const int &b) const {
return DiscardLambdaItr(offset_ + b, f_);
XGBOOST_DEVICE explicit DiscardLambdaItr(FunctionT f) : offset_(0), f_(f) {}
XGBOOST_DEVICE DiscardLambdaItr(difference_type offset, FunctionT f)
: offset_(offset), f_(f) {}
XGBOOST_DEVICE self_type operator+(const int &b) const {
return DiscardLambdaItr(offset_ + b, f_);
}
XGBOOST_DEVICE self_type operator++() {
offset_++;
@@ -1533,24 +1084,4 @@ public:
}
};
// Atomic add function for gradients
template <typename OutputGradientT, typename InputGradientT>
DEV_INLINE void AtomicAddGpair(OutputGradientT* dest,
const InputGradientT& gpair) {
auto dst_ptr = reinterpret_cast<typename OutputGradientT::ValueT*>(dest);
atomicAdd(dst_ptr,
static_cast<typename OutputGradientT::ValueT>(gpair.GetGrad()));
atomicAdd(dst_ptr + 1,
static_cast<typename OutputGradientT::ValueT>(gpair.GetHess()));
}
// Thrust version of this function causes error on Windows
template <typename ReturnT, typename IterT, typename FuncT>
thrust::transform_iterator<FuncT, IterT, ReturnT> MakeTransformIterator(
IterT iter, FuncT func) {
return thrust::transform_iterator<FuncT, IterT, ReturnT>(iter, func);
}
} // namespace dh