[SYC]. Implementation of HostDeviceVector (#10842)
This commit is contained in:
committed by
GitHub
parent
bc69a3e877
commit
2179baa50c
@@ -19,15 +19,15 @@ namespace common {
|
||||
* \brief Fill histogram with zeroes
|
||||
*/
|
||||
template<typename GradientSumT>
|
||||
void InitHist(::sycl::queue qu, GHistRow<GradientSumT, MemoryType::on_device>* hist,
|
||||
void InitHist(::sycl::queue* qu, GHistRow<GradientSumT, MemoryType::on_device>* hist,
|
||||
size_t size, ::sycl::event* event) {
|
||||
*event = qu.fill(hist->Begin(),
|
||||
*event = qu->fill(hist->Begin(),
|
||||
xgboost::detail::GradientPairInternal<GradientSumT>(), size, *event);
|
||||
}
|
||||
template void InitHist(::sycl::queue qu,
|
||||
template void InitHist(::sycl::queue* qu,
|
||||
GHistRow<float, MemoryType::on_device>* hist,
|
||||
size_t size, ::sycl::event* event);
|
||||
template void InitHist(::sycl::queue qu,
|
||||
template void InitHist(::sycl::queue* qu,
|
||||
GHistRow<double, MemoryType::on_device>* hist,
|
||||
size_t size, ::sycl::event* event);
|
||||
|
||||
@@ -35,25 +35,25 @@ template void InitHist(::sycl::queue qu,
|
||||
* \brief Copy histogram from src to dst
|
||||
*/
|
||||
template<typename GradientSumT>
|
||||
void CopyHist(::sycl::queue qu,
|
||||
void CopyHist(::sycl::queue* qu,
|
||||
GHistRow<GradientSumT, MemoryType::on_device>* dst,
|
||||
const GHistRow<GradientSumT, MemoryType::on_device>& src,
|
||||
size_t size) {
|
||||
GradientSumT* pdst = reinterpret_cast<GradientSumT*>(dst->Data());
|
||||
const GradientSumT* psrc = reinterpret_cast<const GradientSumT*>(src.DataConst());
|
||||
|
||||
qu.submit([&](::sycl::handler& cgh) {
|
||||
qu->submit([&](::sycl::handler& cgh) {
|
||||
cgh.parallel_for<>(::sycl::range<1>(2 * size), [=](::sycl::item<1> pid) {
|
||||
const size_t i = pid.get_id(0);
|
||||
pdst[i] = psrc[i];
|
||||
});
|
||||
}).wait();
|
||||
}
|
||||
template void CopyHist(::sycl::queue qu,
|
||||
template void CopyHist(::sycl::queue* qu,
|
||||
GHistRow<float, MemoryType::on_device>* dst,
|
||||
const GHistRow<float, MemoryType::on_device>& src,
|
||||
size_t size);
|
||||
template void CopyHist(::sycl::queue qu,
|
||||
template void CopyHist(::sycl::queue* qu,
|
||||
GHistRow<double, MemoryType::on_device>* dst,
|
||||
const GHistRow<double, MemoryType::on_device>& src,
|
||||
size_t size);
|
||||
@@ -62,7 +62,7 @@ template void CopyHist(::sycl::queue qu,
|
||||
* \brief Compute Subtraction: dst = src1 - src2
|
||||
*/
|
||||
template<typename GradientSumT>
|
||||
::sycl::event SubtractionHist(::sycl::queue qu,
|
||||
::sycl::event SubtractionHist(::sycl::queue* qu,
|
||||
GHistRow<GradientSumT, MemoryType::on_device>* dst,
|
||||
const GHistRow<GradientSumT, MemoryType::on_device>& src1,
|
||||
const GHistRow<GradientSumT, MemoryType::on_device>& src2,
|
||||
@@ -71,7 +71,7 @@ template<typename GradientSumT>
|
||||
const GradientSumT* psrc1 = reinterpret_cast<const GradientSumT*>(src1.DataConst());
|
||||
const GradientSumT* psrc2 = reinterpret_cast<const GradientSumT*>(src2.DataConst());
|
||||
|
||||
auto event_final = qu.submit([&](::sycl::handler& cgh) {
|
||||
auto event_final = qu->submit([&](::sycl::handler& cgh) {
|
||||
cgh.depends_on(event_priv);
|
||||
cgh.parallel_for<>(::sycl::range<1>(2 * size), [pdst, psrc1, psrc2](::sycl::item<1> pid) {
|
||||
const size_t i = pid.get_id(0);
|
||||
@@ -80,25 +80,25 @@ template<typename GradientSumT>
|
||||
});
|
||||
return event_final;
|
||||
}
|
||||
template ::sycl::event SubtractionHist(::sycl::queue qu,
|
||||
template ::sycl::event SubtractionHist(::sycl::queue* qu,
|
||||
GHistRow<float, MemoryType::on_device>* dst,
|
||||
const GHistRow<float, MemoryType::on_device>& src1,
|
||||
const GHistRow<float, MemoryType::on_device>& src2,
|
||||
size_t size, ::sycl::event event_priv);
|
||||
template ::sycl::event SubtractionHist(::sycl::queue qu,
|
||||
template ::sycl::event SubtractionHist(::sycl::queue* qu,
|
||||
GHistRow<double, MemoryType::on_device>* dst,
|
||||
const GHistRow<double, MemoryType::on_device>& src1,
|
||||
const GHistRow<double, MemoryType::on_device>& src2,
|
||||
size_t size, ::sycl::event event_priv);
|
||||
|
||||
inline auto GetBlocksParameters(const ::sycl::queue& qu, size_t size, size_t max_nblocks) {
|
||||
inline auto GetBlocksParameters(::sycl::queue* qu, size_t size, size_t max_nblocks) {
|
||||
struct _ {
|
||||
size_t block_size, nblocks;
|
||||
};
|
||||
|
||||
const size_t min_block_size = 32;
|
||||
const size_t max_compute_units =
|
||||
qu.get_device().get_info<::sycl::info::device::max_compute_units>();
|
||||
qu->get_device().get_info<::sycl::info::device::max_compute_units>();
|
||||
|
||||
size_t nblocks = max_compute_units;
|
||||
|
||||
@@ -117,7 +117,7 @@ inline auto GetBlocksParameters(const ::sycl::queue& qu, size_t size, size_t max
|
||||
|
||||
// Kernel with buffer using
|
||||
template<typename FPType, typename BinIdxType, bool isDense>
|
||||
::sycl::event BuildHistKernel(::sycl::queue qu,
|
||||
::sycl::event BuildHistKernel(::sycl::queue* qu,
|
||||
const USMVector<GradientPair, MemoryType::on_device>& gpair_device,
|
||||
const RowSetCollection::Elem& row_indices,
|
||||
const GHistIndexMatrix& gmat,
|
||||
@@ -134,7 +134,7 @@ template<typename FPType, typename BinIdxType, bool isDense>
|
||||
const size_t nbins = gmat.nbins;
|
||||
|
||||
const size_t max_work_group_size =
|
||||
qu.get_device().get_info<::sycl::info::device::max_work_group_size>();
|
||||
qu->get_device().get_info<::sycl::info::device::max_work_group_size>();
|
||||
const size_t work_group_size = n_columns < max_work_group_size ? n_columns : max_work_group_size;
|
||||
|
||||
// Captured structured bindings are a C++20 extension
|
||||
@@ -143,8 +143,9 @@ template<typename FPType, typename BinIdxType, bool isDense>
|
||||
const size_t nblocks = block_params.nblocks;
|
||||
|
||||
GradientPairT* hist_buffer_data = hist_buffer->Data();
|
||||
auto event_fill = qu.fill(hist_buffer_data, GradientPairT(0, 0), nblocks * nbins * 2, event_priv);
|
||||
auto event_main = qu.submit([&](::sycl::handler& cgh) {
|
||||
auto event_fill = qu->fill(hist_buffer_data, GradientPairT(0, 0),
|
||||
nblocks * nbins * 2, event_priv);
|
||||
auto event_main = qu->submit([&](::sycl::handler& cgh) {
|
||||
cgh.depends_on(event_fill);
|
||||
cgh.parallel_for<>(::sycl::nd_range<2>(::sycl::range<2>(nblocks, work_group_size),
|
||||
::sycl::range<2>(1, work_group_size)),
|
||||
@@ -178,7 +179,7 @@ template<typename FPType, typename BinIdxType, bool isDense>
|
||||
});
|
||||
|
||||
GradientPairT* hist_data = hist->Data();
|
||||
auto event_save = qu.submit([&](::sycl::handler& cgh) {
|
||||
auto event_save = qu->submit([&](::sycl::handler& cgh) {
|
||||
cgh.depends_on(event_main);
|
||||
cgh.parallel_for<>(::sycl::range<1>(nbins), [=](::sycl::item<1> pid) {
|
||||
size_t idx_bin = pid.get_id(0);
|
||||
@@ -197,7 +198,7 @@ template<typename FPType, typename BinIdxType, bool isDense>
|
||||
|
||||
// Kernel with atomic using
|
||||
template<typename FPType, typename BinIdxType, bool isDense>
|
||||
::sycl::event BuildHistKernel(::sycl::queue qu,
|
||||
::sycl::event BuildHistKernel(::sycl::queue* qu,
|
||||
const USMVector<GradientPair, MemoryType::on_device>& gpair_device,
|
||||
const RowSetCollection::Elem& row_indices,
|
||||
const GHistIndexMatrix& gmat,
|
||||
@@ -216,8 +217,8 @@ template<typename FPType, typename BinIdxType, bool isDense>
|
||||
constexpr size_t work_group_size = 32;
|
||||
const size_t n_work_groups = n_columns / work_group_size + (n_columns % work_group_size > 0);
|
||||
|
||||
auto event_fill = qu.fill(hist_data, FPType(0), nbins * 2, event_priv);
|
||||
auto event_main = qu.submit([&](::sycl::handler& cgh) {
|
||||
auto event_fill = qu->fill(hist_data, FPType(0), nbins * 2, event_priv);
|
||||
auto event_main = qu->submit([&](::sycl::handler& cgh) {
|
||||
cgh.depends_on(event_fill);
|
||||
cgh.parallel_for<>(::sycl::nd_range<2>(::sycl::range<2>(size, n_work_groups * work_group_size),
|
||||
::sycl::range<2>(1, work_group_size)),
|
||||
@@ -252,7 +253,7 @@ template<typename FPType, typename BinIdxType, bool isDense>
|
||||
|
||||
template<typename FPType, typename BinIdxType>
|
||||
::sycl::event BuildHistDispatchKernel(
|
||||
::sycl::queue qu,
|
||||
::sycl::queue* qu,
|
||||
const USMVector<GradientPair, MemoryType::on_device>& gpair_device,
|
||||
const RowSetCollection::Elem& row_indices,
|
||||
const GHistIndexMatrix& gmat,
|
||||
@@ -292,7 +293,7 @@ template<typename FPType, typename BinIdxType>
|
||||
}
|
||||
|
||||
template<typename FPType>
|
||||
::sycl::event BuildHistKernel(::sycl::queue qu,
|
||||
::sycl::event BuildHistKernel(::sycl::queue* qu,
|
||||
const USMVector<GradientPair, MemoryType::on_device>& gpair_device,
|
||||
const RowSetCollection::Elem& row_indices,
|
||||
const GHistIndexMatrix& gmat, const bool isDense,
|
||||
|
||||
@@ -32,7 +32,7 @@ class ColumnMatrix;
|
||||
* \brief Fill histogram with zeroes
|
||||
*/
|
||||
template<typename GradientSumT>
|
||||
void InitHist(::sycl::queue qu,
|
||||
void InitHist(::sycl::queue* qu,
|
||||
GHistRow<GradientSumT, MemoryType::on_device>* hist,
|
||||
size_t size, ::sycl::event* event);
|
||||
|
||||
@@ -40,7 +40,7 @@ void InitHist(::sycl::queue qu,
|
||||
* \brief Copy histogram from src to dst
|
||||
*/
|
||||
template<typename GradientSumT>
|
||||
void CopyHist(::sycl::queue qu,
|
||||
void CopyHist(::sycl::queue* qu,
|
||||
GHistRow<GradientSumT, MemoryType::on_device>* dst,
|
||||
const GHistRow<GradientSumT, MemoryType::on_device>& src,
|
||||
size_t size);
|
||||
@@ -49,7 +49,7 @@ void CopyHist(::sycl::queue qu,
|
||||
* \brief Compute subtraction: dst = src1 - src2
|
||||
*/
|
||||
template<typename GradientSumT>
|
||||
::sycl::event SubtractionHist(::sycl::queue qu,
|
||||
::sycl::event SubtractionHist(::sycl::queue* qu,
|
||||
GHistRow<GradientSumT, MemoryType::on_device>* dst,
|
||||
const GHistRow<GradientSumT, MemoryType::on_device>& src1,
|
||||
const GHistRow<GradientSumT, MemoryType::on_device>& src2,
|
||||
@@ -73,7 +73,7 @@ class HistCollection {
|
||||
}
|
||||
|
||||
// Initialize histogram collection
|
||||
void Init(::sycl::queue qu, uint32_t nbins) {
|
||||
void Init(::sycl::queue* qu, uint32_t nbins) {
|
||||
qu_ = qu;
|
||||
if (nbins_ != nbins) {
|
||||
nbins_ = nbins;
|
||||
@@ -86,11 +86,11 @@ class HistCollection {
|
||||
::sycl::event event;
|
||||
if (data_.count(nid) == 0) {
|
||||
data_[nid] =
|
||||
std::make_shared<GHistRowT>(&qu_, nbins_,
|
||||
std::make_shared<GHistRowT>(qu_, nbins_,
|
||||
xgboost::detail::GradientPairInternal<GradientSumT>(0, 0),
|
||||
&event);
|
||||
} else {
|
||||
data_[nid]->Resize(&qu_, nbins_,
|
||||
data_[nid]->Resize(qu_, nbins_,
|
||||
xgboost::detail::GradientPairInternal<GradientSumT>(0, 0),
|
||||
&event);
|
||||
}
|
||||
@@ -103,7 +103,7 @@ class HistCollection {
|
||||
|
||||
std::unordered_map<uint32_t, std::shared_ptr<GHistRowT>> data_;
|
||||
|
||||
::sycl::queue qu_;
|
||||
::sycl::queue* qu_;
|
||||
};
|
||||
|
||||
/*!
|
||||
@@ -114,7 +114,7 @@ class ParallelGHistBuilder {
|
||||
public:
|
||||
using GHistRowT = GHistRow<GradientSumT, MemoryType::on_device>;
|
||||
|
||||
void Init(::sycl::queue qu, size_t nbins) {
|
||||
void Init(::sycl::queue* qu, size_t nbins) {
|
||||
qu_ = qu;
|
||||
if (nbins != nbins_) {
|
||||
hist_buffer_.Init(qu_, nbins);
|
||||
@@ -123,7 +123,7 @@ class ParallelGHistBuilder {
|
||||
}
|
||||
|
||||
void Reset(size_t nblocks) {
|
||||
hist_device_buffer_.Resize(&qu_, nblocks * nbins_ * 2);
|
||||
hist_device_buffer_.Resize(qu_, nblocks * nbins_ * 2);
|
||||
}
|
||||
|
||||
GHistRowT& GetDeviceBuffer() {
|
||||
@@ -139,7 +139,7 @@ class ParallelGHistBuilder {
|
||||
/*! \brief Buffer for additional histograms for Parallel processing */
|
||||
GHistRowT hist_device_buffer_;
|
||||
|
||||
::sycl::queue qu_;
|
||||
::sycl::queue* qu_;
|
||||
};
|
||||
|
||||
/*!
|
||||
@@ -152,7 +152,7 @@ class GHistBuilder {
|
||||
using GHistRowT = GHistRow<GradientSumT, memory_type>;
|
||||
|
||||
GHistBuilder() = default;
|
||||
GHistBuilder(::sycl::queue qu, uint32_t nbins) : qu_{qu}, nbins_{nbins} {}
|
||||
GHistBuilder(::sycl::queue* qu, uint32_t nbins) : qu_{qu}, nbins_{nbins} {}
|
||||
|
||||
// Construct a histogram via histogram aggregation
|
||||
::sycl::event BuildHist(const USMVector<GradientPair, MemoryType::on_device>& gpair_device,
|
||||
@@ -177,7 +177,7 @@ class GHistBuilder {
|
||||
/*! \brief Number of all bins over all features */
|
||||
uint32_t nbins_ { 0 };
|
||||
|
||||
::sycl::queue qu_;
|
||||
::sycl::queue* qu_;
|
||||
};
|
||||
} // namespace common
|
||||
} // namespace sycl
|
||||
|
||||
410
plugin/sycl/common/host_device_vector.cc
Normal file
410
plugin/sycl/common/host_device_vector.cc
Normal file
@@ -0,0 +1,410 @@
|
||||
/**
|
||||
* Copyright 2017-2024 by XGBoost contributors
|
||||
*/
|
||||
|
||||
#ifdef XGBOOST_USE_SYCL
|
||||
|
||||
// implementation of HostDeviceVector with sycl support
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-W#pragma-messages"
|
||||
#include "xgboost/host_device_vector.h"
|
||||
#pragma GCC diagnostic pop
|
||||
|
||||
#include "../device_manager.h"
|
||||
#include "../data.h"
|
||||
|
||||
namespace xgboost {
|
||||
template <typename T>
|
||||
class HostDeviceVectorImpl {
|
||||
using DeviceStorage = sycl::USMVector<T, sycl::MemoryType::on_device>;
|
||||
|
||||
public:
|
||||
explicit HostDeviceVectorImpl(size_t size, T v, DeviceOrd device) : device_(device) {
|
||||
if (device.IsSycl()) {
|
||||
device_access_ = GPUAccess::kWrite;
|
||||
SetDevice();
|
||||
data_d_->Resize(qu_, size, v);
|
||||
} else {
|
||||
data_h_.resize(size, v);
|
||||
}
|
||||
}
|
||||
|
||||
template <class Initializer>
|
||||
HostDeviceVectorImpl(const Initializer& init, DeviceOrd device) : device_(device) {
|
||||
if (device.IsSycl()) {
|
||||
device_access_ = GPUAccess::kWrite;
|
||||
|
||||
ResizeDevice(init.size());
|
||||
Copy(init);
|
||||
} else {
|
||||
data_h_ = init;
|
||||
}
|
||||
}
|
||||
|
||||
HostDeviceVectorImpl(HostDeviceVectorImpl<T>&& that) : device_{that.device_},
|
||||
data_h_{std::move(that.data_h_)},
|
||||
data_d_{std::move(that.data_d_)},
|
||||
device_access_{that.device_access_} {}
|
||||
|
||||
std::vector<T>& HostVector() {
|
||||
SyncHost(GPUAccess::kNone);
|
||||
return data_h_;
|
||||
}
|
||||
|
||||
const std::vector<T>& ConstHostVector() {
|
||||
SyncHost(GPUAccess::kRead);
|
||||
return data_h_;
|
||||
}
|
||||
|
||||
void SetDevice(DeviceOrd device) {
|
||||
if (device_ == device) { return; }
|
||||
if (device_.IsSycl()) {
|
||||
SyncHost(GPUAccess::kNone);
|
||||
}
|
||||
|
||||
if (device_.IsSycl() && device.IsSycl()) {
|
||||
CHECK_EQ(device_, device)
|
||||
<< "New device is different from previous one.";
|
||||
}
|
||||
device_ = device;
|
||||
if (device_.IsSycl()) {
|
||||
ResizeDevice(data_h_.size());
|
||||
}
|
||||
}
|
||||
|
||||
template <typename... U>
|
||||
void Resize(size_t new_size, U&&... args) {
|
||||
if (new_size == Size()) {
|
||||
return;
|
||||
}
|
||||
if ((Size() == 0 && device_.IsSycl()) || (DeviceCanWrite() && device_.IsSycl())) {
|
||||
// fast on-device resize
|
||||
device_access_ = GPUAccess::kWrite;
|
||||
SetDevice();
|
||||
auto old_size = data_d_->Size();
|
||||
data_d_->Resize(qu_, new_size, std::forward<U>(args)...);
|
||||
} else {
|
||||
// resize on host
|
||||
SyncHost(GPUAccess::kNone);
|
||||
auto old_size = data_h_.size();
|
||||
data_h_.resize(new_size, std::forward<U>(args)...);
|
||||
}
|
||||
}
|
||||
|
||||
void SyncHost(GPUAccess access) {
|
||||
if (HostCanAccess(access)) { return; }
|
||||
if (HostCanRead()) {
|
||||
// data is present, just need to deny access to the device
|
||||
device_access_ = access;
|
||||
return;
|
||||
}
|
||||
device_access_ = access;
|
||||
if (data_h_.size() != data_d_->Size()) { data_h_.resize(data_d_->Size()); }
|
||||
SetDevice();
|
||||
qu_->memcpy(data_h_.data(), data_d_->Data(), data_d_->Size() * sizeof(T)).wait();
|
||||
}
|
||||
|
||||
void SyncDevice(GPUAccess access) {
|
||||
if (DeviceCanAccess(access)) { return; }
|
||||
if (DeviceCanRead()) {
|
||||
device_access_ = access;
|
||||
return;
|
||||
}
|
||||
// data is on the host
|
||||
ResizeDevice(data_h_.size());
|
||||
SetDevice();
|
||||
qu_->memcpy(data_d_->Data(), data_h_.data(), data_d_->Size() * sizeof(T)).wait();
|
||||
device_access_ = access;
|
||||
}
|
||||
|
||||
bool HostCanAccess(GPUAccess access) const { return device_access_ <= access; }
|
||||
bool HostCanRead() const { return HostCanAccess(GPUAccess::kRead); }
|
||||
bool HostCanWrite() const { return HostCanAccess(GPUAccess::kNone); }
|
||||
bool DeviceCanAccess(GPUAccess access) const { return device_access_ >= access; }
|
||||
bool DeviceCanRead() const { return DeviceCanAccess(GPUAccess::kRead); }
|
||||
bool DeviceCanWrite() const { return DeviceCanAccess(GPUAccess::kWrite); }
|
||||
GPUAccess Access() const { return device_access_; }
|
||||
|
||||
size_t Size() const {
|
||||
return HostCanRead() ? data_h_.size() : data_d_ ? data_d_->Size() : 0;
|
||||
}
|
||||
|
||||
DeviceOrd Device() const { return device_; }
|
||||
|
||||
T* DevicePointer() {
|
||||
SyncDevice(GPUAccess::kWrite);
|
||||
return data_d_->Data();
|
||||
}
|
||||
|
||||
const T* ConstDevicePointer() {
|
||||
SyncDevice(GPUAccess::kRead);
|
||||
return data_d_->DataConst();
|
||||
}
|
||||
|
||||
common::Span<T> DeviceSpan() {
|
||||
SyncDevice(GPUAccess::kWrite);
|
||||
return {this->DevicePointer(), Size()};
|
||||
}
|
||||
|
||||
common::Span<const T> ConstDeviceSpan() {
|
||||
SyncDevice(GPUAccess::kRead);
|
||||
return {this->ConstDevicePointer(), Size()};
|
||||
}
|
||||
|
||||
void Fill(T v) {
|
||||
if (HostCanWrite()) {
|
||||
std::fill(data_h_.begin(), data_h_.end(), v);
|
||||
} else {
|
||||
device_access_ = GPUAccess::kWrite;
|
||||
SetDevice();
|
||||
qu_->fill(data_d_->Data(), v, data_d_->Size()).wait();
|
||||
}
|
||||
}
|
||||
|
||||
void Copy(HostDeviceVectorImpl<T>* other) {
|
||||
CHECK_EQ(Size(), other->Size());
|
||||
SetDevice(other->device_);
|
||||
// Data is on host.
|
||||
if (HostCanWrite() && other->HostCanWrite()) {
|
||||
std::copy(other->data_h_.begin(), other->data_h_.end(), data_h_.begin());
|
||||
return;
|
||||
}
|
||||
SetDevice();
|
||||
CopyToDevice(other);
|
||||
}
|
||||
|
||||
void Copy(const std::vector<T>& other) {
|
||||
CHECK_EQ(Size(), other.size());
|
||||
if (HostCanWrite()) {
|
||||
std::copy(other.begin(), other.end(), data_h_.begin());
|
||||
} else {
|
||||
CopyToDevice(other.data());
|
||||
}
|
||||
}
|
||||
|
||||
void Copy(std::initializer_list<T> other) {
|
||||
CHECK_EQ(Size(), other.size());
|
||||
if (HostCanWrite()) {
|
||||
std::copy(other.begin(), other.end(), data_h_.begin());
|
||||
} else {
|
||||
CopyToDevice(other.begin());
|
||||
}
|
||||
}
|
||||
|
||||
void Extend(HostDeviceVectorImpl* other) {
|
||||
auto ori_size = this->Size();
|
||||
this->Resize(ori_size + other->Size(), T{});
|
||||
if (HostCanWrite() && other->HostCanRead()) {
|
||||
auto& h_vec = this->HostVector();
|
||||
auto& other_vec = other->HostVector();
|
||||
CHECK_EQ(h_vec.size(), ori_size + other->Size());
|
||||
std::copy(other_vec.cbegin(), other_vec.cend(), h_vec.begin() + ori_size);
|
||||
} else {
|
||||
auto ptr = other->ConstDevicePointer();
|
||||
SetDevice();
|
||||
CHECK_EQ(this->Device(), other->Device());
|
||||
qu_->memcpy(this->DevicePointer() + ori_size, ptr, other->Size() * sizeof(T)).wait();
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
void ResizeDevice(size_t new_size) {
|
||||
if (data_d_ && new_size == data_d_->Size()) { return; }
|
||||
SetDevice();
|
||||
data_d_->Resize(qu_, new_size);
|
||||
}
|
||||
|
||||
void SetDevice() {
|
||||
if (!qu_) {
|
||||
qu_ = device_manager_.GetQueue(device_);
|
||||
}
|
||||
if (!data_d_) {
|
||||
data_d_.reset(new DeviceStorage());
|
||||
}
|
||||
}
|
||||
|
||||
void CopyToDevice(HostDeviceVectorImpl* other) {
|
||||
if (other->HostCanWrite()) {
|
||||
CopyToDevice(other->data_h_.data());
|
||||
} else {
|
||||
ResizeDevice(Size());
|
||||
device_access_ = GPUAccess::kWrite;
|
||||
SetDevice();
|
||||
qu_->memcpy(data_d_->Data(), other->data_d_->Data(), data_d_->Size() * sizeof(T)).wait();
|
||||
}
|
||||
}
|
||||
|
||||
void CopyToDevice(const T* begin) {
|
||||
data_d_->ResizeNoCopy(qu_, Size());
|
||||
qu_->memcpy(data_d_->Data(), begin, data_d_->Size() * sizeof(T)).wait();
|
||||
device_access_ = GPUAccess::kWrite;
|
||||
}
|
||||
|
||||
sycl::DeviceManager device_manager_;
|
||||
::sycl::queue* qu_ = nullptr;
|
||||
DeviceOrd device_{DeviceOrd::CPU()};
|
||||
std::vector<T> data_h_{};
|
||||
std::unique_ptr<DeviceStorage> data_d_{};
|
||||
GPUAccess device_access_{GPUAccess::kNone};
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
HostDeviceVector<T>::HostDeviceVector(size_t size, T v, DeviceOrd device)
|
||||
: impl_(nullptr) {
|
||||
impl_ = new HostDeviceVectorImpl<T>(size, v, device);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
HostDeviceVector<T>::HostDeviceVector(std::initializer_list<T> init, DeviceOrd device)
|
||||
: impl_(nullptr) {
|
||||
impl_ = new HostDeviceVectorImpl<T>(init, device);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
HostDeviceVector<T>::HostDeviceVector(const std::vector<T>& init, DeviceOrd device)
|
||||
: impl_(nullptr) {
|
||||
impl_ = new HostDeviceVectorImpl<T>(init, device);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
HostDeviceVector<T>::HostDeviceVector(HostDeviceVector<T>&& that) {
|
||||
impl_ = new HostDeviceVectorImpl<T>(std::move(*that.impl_));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
HostDeviceVector<T>& HostDeviceVector<T>::operator=(HostDeviceVector<T>&& that) {
|
||||
if (this == &that) { return *this; }
|
||||
|
||||
std::unique_ptr<HostDeviceVectorImpl<T>> new_impl(
|
||||
new HostDeviceVectorImpl<T>(std::move(*that.impl_)));
|
||||
delete impl_;
|
||||
impl_ = new_impl.release();
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
HostDeviceVector<T>::~HostDeviceVector() {
|
||||
delete impl_;
|
||||
impl_ = nullptr;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
size_t HostDeviceVector<T>::Size() const { return impl_->Size(); }
|
||||
|
||||
template <typename T>
|
||||
DeviceOrd HostDeviceVector<T>::Device() const {
|
||||
return impl_->Device();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T* HostDeviceVector<T>::DevicePointer() {
|
||||
return impl_->DevicePointer();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
const T* HostDeviceVector<T>::ConstDevicePointer() const {
|
||||
return impl_->ConstDevicePointer();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
common::Span<T> HostDeviceVector<T>::DeviceSpan() {
|
||||
return impl_->DeviceSpan();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
common::Span<const T> HostDeviceVector<T>::ConstDeviceSpan() const {
|
||||
return impl_->ConstDeviceSpan();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::vector<T>& HostDeviceVector<T>::HostVector() { return impl_->HostVector(); }
|
||||
|
||||
template <typename T>
|
||||
const std::vector<T>& HostDeviceVector<T>::ConstHostVector() const {
|
||||
return impl_->ConstHostVector();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void HostDeviceVector<T>::Resize(size_t new_size, T v) {
|
||||
impl_->Resize(new_size, v);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void HostDeviceVector<T>::Resize(size_t new_size) {
|
||||
impl_->Resize(new_size);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void HostDeviceVector<T>::Fill(T v) {
|
||||
impl_->Fill(v);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void HostDeviceVector<T>::Copy(const HostDeviceVector<T>& other) {
|
||||
impl_->Copy(other.impl_);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void HostDeviceVector<T>::Copy(const std::vector<T>& other) {
|
||||
impl_->Copy(other);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void HostDeviceVector<T>::Copy(std::initializer_list<T> other) {
|
||||
impl_->Copy(other);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void HostDeviceVector<T>::Extend(HostDeviceVector const& other) {
|
||||
impl_->Extend(other.impl_);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
bool HostDeviceVector<T>::HostCanRead() const {
|
||||
return impl_->HostCanRead();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
bool HostDeviceVector<T>::HostCanWrite() const {
|
||||
return impl_->HostCanWrite();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
bool HostDeviceVector<T>::DeviceCanRead() const {
|
||||
return impl_->DeviceCanRead();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
bool HostDeviceVector<T>::DeviceCanWrite() const {
|
||||
return impl_->DeviceCanWrite();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
GPUAccess HostDeviceVector<T>::DeviceAccess() const {
|
||||
return impl_->Access();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void HostDeviceVector<T>::SetDevice(DeviceOrd device) const {
|
||||
impl_->SetDevice(device);
|
||||
}
|
||||
|
||||
// explicit instantiations are required, as HostDeviceVector isn't header-only
|
||||
template class HostDeviceVector<bst_float>;
|
||||
template class HostDeviceVector<double>;
|
||||
template class HostDeviceVector<GradientPair>;
|
||||
template class HostDeviceVector<GradientPairPrecise>;
|
||||
template class HostDeviceVector<int32_t>; // bst_node_t
|
||||
template class HostDeviceVector<uint8_t>;
|
||||
template class HostDeviceVector<int8_t>;
|
||||
template class HostDeviceVector<FeatureType>;
|
||||
template class HostDeviceVector<Entry>;
|
||||
template class HostDeviceVector<bst_idx_t>;
|
||||
template class HostDeviceVector<uint32_t>; // bst_feature_t
|
||||
|
||||
} // namespace xgboost
|
||||
|
||||
#endif // XGBOOST_USE_SYCL
|
||||
@@ -37,14 +37,14 @@ enum class MemoryType { shared, on_device};
|
||||
template <typename T>
|
||||
class USMDeleter {
|
||||
public:
|
||||
explicit USMDeleter(::sycl::queue qu) : qu_(qu) {}
|
||||
explicit USMDeleter(::sycl::queue* qu) : qu_(qu) {}
|
||||
|
||||
void operator()(T* data) const {
|
||||
::sycl::free(data, qu_);
|
||||
::sycl::free(data, *qu_);
|
||||
}
|
||||
|
||||
private:
|
||||
::sycl::queue qu_;
|
||||
::sycl::queue* qu_;
|
||||
};
|
||||
|
||||
template <typename T, MemoryType memory_type = MemoryType::shared>
|
||||
@@ -53,9 +53,9 @@ class USMVector {
|
||||
|
||||
std::shared_ptr<T> allocate_memory_(::sycl::queue* qu, size_t size) {
|
||||
if constexpr (memory_type == MemoryType::shared) {
|
||||
return std::shared_ptr<T>(::sycl::malloc_shared<T>(size_, *qu), USMDeleter<T>(*qu));
|
||||
return std::shared_ptr<T>(::sycl::malloc_shared<T>(size_, *qu), USMDeleter<T>(qu));
|
||||
} else {
|
||||
return std::shared_ptr<T>(::sycl::malloc_device<T>(size_, *qu), USMDeleter<T>(*qu));
|
||||
return std::shared_ptr<T>(::sycl::malloc_device<T>(size_, *qu), USMDeleter<T>(qu));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -227,14 +227,14 @@ class USMVector {
|
||||
/* Wrapper for DMatrix which stores all batches in a single USM buffer */
|
||||
struct DeviceMatrix {
|
||||
DMatrix* p_mat; // Pointer to the original matrix on the host
|
||||
::sycl::queue qu_;
|
||||
::sycl::queue* qu_;
|
||||
USMVector<size_t, MemoryType::on_device> row_ptr;
|
||||
USMVector<Entry, MemoryType::on_device> data;
|
||||
size_t total_offset;
|
||||
|
||||
DeviceMatrix() = default;
|
||||
|
||||
void Init(::sycl::queue qu, DMatrix* dmat) {
|
||||
void Init(::sycl::queue* qu, DMatrix* dmat) {
|
||||
qu_ = qu;
|
||||
p_mat = dmat;
|
||||
|
||||
@@ -247,9 +247,9 @@ struct DeviceMatrix {
|
||||
num_row += batch.Size();
|
||||
}
|
||||
|
||||
row_ptr.Resize(&qu_, num_row + 1);
|
||||
row_ptr.Resize(qu_, num_row + 1);
|
||||
size_t* rows = row_ptr.Data();
|
||||
data.Resize(&qu_, num_nonzero);
|
||||
data.Resize(qu_, num_nonzero);
|
||||
|
||||
size_t data_offset = 0;
|
||||
::sycl::event event;
|
||||
@@ -259,10 +259,10 @@ struct DeviceMatrix {
|
||||
size_t batch_size = batch.Size();
|
||||
if (batch_size > 0) {
|
||||
const auto base_rowid = batch.base_rowid;
|
||||
event = qu.memcpy(row_ptr.Data() + base_rowid, offset_vec.data(),
|
||||
event = qu->memcpy(row_ptr.Data() + base_rowid, offset_vec.data(),
|
||||
sizeof(size_t) * batch_size, event);
|
||||
if (base_rowid > 0) {
|
||||
qu.submit([&](::sycl::handler& cgh) {
|
||||
qu->submit([&](::sycl::handler& cgh) {
|
||||
cgh.depends_on(event);
|
||||
cgh.parallel_for<>(::sycl::range<1>(batch_size), [=](::sycl::id<1> pid) {
|
||||
int row_id = pid[0];
|
||||
@@ -270,19 +270,19 @@ struct DeviceMatrix {
|
||||
});
|
||||
});
|
||||
}
|
||||
event = qu.memcpy(data.Data() + data_offset, data_vec.data(),
|
||||
event = qu->memcpy(data.Data() + data_offset, data_vec.data(),
|
||||
sizeof(Entry) * offset_vec[batch_size], event);
|
||||
data_offset += offset_vec[batch_size];
|
||||
qu.wait();
|
||||
qu->wait();
|
||||
}
|
||||
}
|
||||
qu.submit([&](::sycl::handler& cgh) {
|
||||
qu_->submit([&](::sycl::handler& cgh) {
|
||||
cgh.depends_on(event);
|
||||
cgh.single_task<>([=] {
|
||||
rows[num_row] = data_offset;
|
||||
});
|
||||
});
|
||||
qu.wait();
|
||||
qu_->wait();
|
||||
total_offset = data_offset;
|
||||
}
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ void mergeSort(BinIdxType* begin, BinIdxType* end, BinIdxType* buf) {
|
||||
}
|
||||
|
||||
template <typename BinIdxType>
|
||||
void GHistIndexMatrix::SetIndexData(::sycl::queue qu,
|
||||
void GHistIndexMatrix::SetIndexData(::sycl::queue* qu,
|
||||
BinIdxType* index_data,
|
||||
const DeviceMatrix &dmat,
|
||||
size_t nbins,
|
||||
@@ -66,11 +66,11 @@ void GHistIndexMatrix::SetIndexData(::sycl::queue qu,
|
||||
// Sparse case only
|
||||
if (!offsets) {
|
||||
// sort_buff has type uint8_t
|
||||
sort_buff.Resize(&qu, num_rows * row_stride * sizeof(BinIdxType));
|
||||
sort_buff.Resize(qu, num_rows * row_stride * sizeof(BinIdxType));
|
||||
}
|
||||
BinIdxType* sort_data = reinterpret_cast<BinIdxType*>(sort_buff.Data());
|
||||
|
||||
auto event = qu.submit([&](::sycl::handler& cgh) {
|
||||
auto event = qu->submit([&](::sycl::handler& cgh) {
|
||||
cgh.parallel_for<>(::sycl::range<1>(num_rows), [=](::sycl::item<1> pid) {
|
||||
const size_t i = pid.get_id(0);
|
||||
const size_t ibegin = offset_vec[i];
|
||||
@@ -92,8 +92,8 @@ void GHistIndexMatrix::SetIndexData(::sycl::queue qu,
|
||||
}
|
||||
});
|
||||
});
|
||||
qu.memcpy(hit_count.data(), hit_count_ptr, nbins * sizeof(size_t), event);
|
||||
qu.wait();
|
||||
qu->memcpy(hit_count.data(), hit_count_ptr, nbins * sizeof(size_t), event);
|
||||
qu->wait();
|
||||
}
|
||||
|
||||
void GHistIndexMatrix::ResizeIndex(size_t n_index, bool isDense) {
|
||||
@@ -110,7 +110,7 @@ void GHistIndexMatrix::ResizeIndex(size_t n_index, bool isDense) {
|
||||
}
|
||||
}
|
||||
|
||||
void GHistIndexMatrix::Init(::sycl::queue qu,
|
||||
void GHistIndexMatrix::Init(::sycl::queue* qu,
|
||||
Context const * ctx,
|
||||
const DeviceMatrix& p_fmat_device,
|
||||
int max_bins) {
|
||||
@@ -123,7 +123,7 @@ void GHistIndexMatrix::Init(::sycl::queue qu,
|
||||
const uint32_t nbins = cut.Ptrs().back();
|
||||
this->nbins = nbins;
|
||||
hit_count.resize(nbins, 0);
|
||||
hit_count_buff.Resize(&qu, nbins, 0);
|
||||
hit_count_buff.Resize(qu, nbins, 0);
|
||||
|
||||
this->p_fmat = p_fmat_device.p_mat;
|
||||
const bool isDense = p_fmat_device.p_mat->IsDense();
|
||||
@@ -150,7 +150,7 @@ void GHistIndexMatrix::Init(::sycl::queue qu,
|
||||
if (isDense) {
|
||||
index.ResizeOffset(n_offsets);
|
||||
offsets = index.Offset();
|
||||
qu.memcpy(offsets, cut_device.Ptrs().DataConst(),
|
||||
qu->memcpy(offsets, cut_device.Ptrs().DataConst(),
|
||||
sizeof(uint32_t) * n_offsets).wait_and_throw();
|
||||
}
|
||||
|
||||
|
||||
@@ -26,16 +26,16 @@ class HistogramCuts {
|
||||
public:
|
||||
HistogramCuts() {}
|
||||
|
||||
explicit HistogramCuts(::sycl::queue qu) {}
|
||||
explicit HistogramCuts(::sycl::queue* qu) {}
|
||||
|
||||
~HistogramCuts() {
|
||||
}
|
||||
|
||||
void Init(::sycl::queue qu, xgboost::common::HistogramCuts const& cuts) {
|
||||
void Init(::sycl::queue* qu, xgboost::common::HistogramCuts const& cuts) {
|
||||
qu_ = qu;
|
||||
cut_values_.Init(&qu_, cuts.cut_values_.HostVector());
|
||||
cut_ptrs_.Init(&qu_, cuts.cut_ptrs_.HostVector());
|
||||
min_vals_.Init(&qu_, cuts.min_vals_.HostVector());
|
||||
cut_values_.Init(qu_, cuts.cut_values_.HostVector());
|
||||
cut_ptrs_.Init(qu_, cuts.cut_ptrs_.HostVector());
|
||||
min_vals_.Init(qu_, cuts.min_vals_.HostVector());
|
||||
}
|
||||
|
||||
// Getters for USM buffers to pass pointers into device kernels
|
||||
@@ -47,7 +47,7 @@ class HistogramCuts {
|
||||
USMVector<bst_float> cut_values_;
|
||||
USMVector<uint32_t> cut_ptrs_;
|
||||
USMVector<float> min_vals_;
|
||||
::sycl::queue qu_;
|
||||
::sycl::queue* qu_;
|
||||
};
|
||||
|
||||
using BinTypeSize = ::xgboost::common::BinTypeSize;
|
||||
@@ -115,11 +115,11 @@ struct Index {
|
||||
}
|
||||
|
||||
void Resize(const size_t nBytesData) {
|
||||
data_.Resize(&qu_, nBytesData);
|
||||
data_.Resize(qu_, nBytesData);
|
||||
}
|
||||
|
||||
void ResizeOffset(const size_t nDisps) {
|
||||
offset_.Resize(&qu_, nDisps);
|
||||
offset_.Resize(qu_, nDisps);
|
||||
p_ = nDisps;
|
||||
}
|
||||
|
||||
@@ -131,7 +131,7 @@ struct Index {
|
||||
return data_.End();
|
||||
}
|
||||
|
||||
void setQueue(::sycl::queue qu) {
|
||||
void setQueue(::sycl::queue* qu) {
|
||||
qu_ = qu;
|
||||
}
|
||||
|
||||
@@ -155,7 +155,7 @@ struct Index {
|
||||
size_t p_ {1};
|
||||
Func func_;
|
||||
|
||||
::sycl::queue qu_;
|
||||
::sycl::queue* qu_;
|
||||
};
|
||||
|
||||
/*!
|
||||
@@ -182,11 +182,11 @@ struct GHistIndexMatrix {
|
||||
size_t row_stride;
|
||||
|
||||
// Create a global histogram matrix based on a given DMatrix device wrapper
|
||||
void Init(::sycl::queue qu, Context const * ctx,
|
||||
void Init(::sycl::queue* qu, Context const * ctx,
|
||||
const sycl::DeviceMatrix& p_fmat_device, int max_num_bins);
|
||||
|
||||
template <typename BinIdxType>
|
||||
void SetIndexData(::sycl::queue qu, BinIdxType* index_data,
|
||||
void SetIndexData(::sycl::queue* qu, BinIdxType* index_data,
|
||||
const sycl::DeviceMatrix &dmat_device,
|
||||
size_t nbins, size_t row_stride, uint32_t* offsets);
|
||||
|
||||
|
||||
@@ -9,85 +9,50 @@
|
||||
namespace xgboost {
|
||||
namespace sycl {
|
||||
|
||||
::sycl::device DeviceManager::GetDevice(const DeviceOrd& device_spec) const {
|
||||
::sycl::queue* DeviceManager::GetQueue(const DeviceOrd& device_spec) const {
|
||||
if (!device_spec.IsSycl()) {
|
||||
LOG(WARNING) << "Sycl kernel is executed with non-sycl context: "
|
||||
<< device_spec.Name() << ". "
|
||||
<< "Default sycl device_selector will be used.";
|
||||
}
|
||||
|
||||
size_t queue_idx;
|
||||
bool not_use_default_selector = (device_spec.ordinal != kDefaultOrdinal) ||
|
||||
(collective::IsDistributed());
|
||||
DeviceRegister& device_register = GetDevicesRegister();
|
||||
if (not_use_default_selector) {
|
||||
DeviceRegister& device_register = GetDevicesRegister();
|
||||
const int device_idx =
|
||||
collective::IsDistributed() ? collective::GetRank() : device_spec.ordinal;
|
||||
if (device_spec.IsSyclDefault()) {
|
||||
auto& devices = device_register.devices;
|
||||
CHECK_LT(device_idx, devices.size());
|
||||
return devices[device_idx];
|
||||
} else if (device_spec.IsSyclCPU()) {
|
||||
auto& cpu_devices = device_register.cpu_devices;
|
||||
CHECK_LT(device_idx, cpu_devices.size());
|
||||
return cpu_devices[device_idx];
|
||||
} else {
|
||||
auto& gpu_devices = device_register.gpu_devices;
|
||||
CHECK_LT(device_idx, gpu_devices.size());
|
||||
return gpu_devices[device_idx];
|
||||
}
|
||||
} else {
|
||||
if (device_spec.IsSyclCPU()) {
|
||||
return ::sycl::device(::sycl::cpu_selector_v);
|
||||
} else if (device_spec.IsSyclGPU()) {
|
||||
return ::sycl::device(::sycl::gpu_selector_v);
|
||||
} else {
|
||||
return ::sycl::device(::sycl::default_selector_v);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
::sycl::queue DeviceManager::GetQueue(const DeviceOrd& device_spec) const {
|
||||
if (!device_spec.IsSycl()) {
|
||||
LOG(WARNING) << "Sycl kernel is executed with non-sycl context: "
|
||||
<< device_spec.Name() << ". "
|
||||
<< "Default sycl device_selector will be used.";
|
||||
}
|
||||
|
||||
QueueRegister_t& queue_register = GetQueueRegister();
|
||||
if (queue_register.count(device_spec.Name()) > 0) {
|
||||
return queue_register.at(device_spec.Name());
|
||||
}
|
||||
|
||||
bool not_use_default_selector = (device_spec.ordinal != kDefaultOrdinal) ||
|
||||
(collective::IsDistributed());
|
||||
std::lock_guard<std::mutex> guard(queue_registering_mutex);
|
||||
if (not_use_default_selector) {
|
||||
DeviceRegister& device_register = GetDevicesRegister();
|
||||
const int device_idx =
|
||||
collective::IsDistributed() ? collective::GetRank() : device_spec.ordinal;
|
||||
if (device_spec.IsSyclDefault()) {
|
||||
auto& devices = device_register.devices;
|
||||
CHECK_LT(device_idx, devices.size());
|
||||
queue_register[device_spec.Name()] = ::sycl::queue(devices[device_idx]);
|
||||
queue_idx = device_idx;
|
||||
} else if (device_spec.IsSyclCPU()) {
|
||||
auto& cpu_devices = device_register.cpu_devices;
|
||||
CHECK_LT(device_idx, cpu_devices.size());
|
||||
queue_register[device_spec.Name()] = ::sycl::queue(cpu_devices[device_idx]);
|
||||
auto& cpu_devices_idxes = device_register.cpu_devices_idxes;
|
||||
CHECK_LT(device_idx, cpu_devices_idxes.size());
|
||||
queue_idx = cpu_devices_idxes[device_idx];
|
||||
} else if (device_spec.IsSyclGPU()) {
|
||||
auto& gpu_devices = device_register.gpu_devices;
|
||||
CHECK_LT(device_idx, gpu_devices.size());
|
||||
queue_register[device_spec.Name()] = ::sycl::queue(gpu_devices[device_idx]);
|
||||
auto& gpu_devices_idxes = device_register.gpu_devices_idxes;
|
||||
CHECK_LT(device_idx, gpu_devices_idxes.size());
|
||||
queue_idx = gpu_devices_idxes[device_idx];
|
||||
} else {
|
||||
LOG(WARNING) << device_spec << " is not sycl, sycl:cpu or sycl:gpu";
|
||||
auto device = ::sycl::queue(::sycl::default_selector_v).get_device();
|
||||
queue_idx = device_register.devices.at(device);
|
||||
}
|
||||
} else {
|
||||
if (device_spec.IsSyclCPU()) {
|
||||
queue_register[device_spec.Name()] = ::sycl::queue(::sycl::cpu_selector_v);
|
||||
auto device = ::sycl::queue(::sycl::cpu_selector_v).get_device();
|
||||
queue_idx = device_register.devices.at(device);
|
||||
} else if (device_spec.IsSyclGPU()) {
|
||||
queue_register[device_spec.Name()] = ::sycl::queue(::sycl::gpu_selector_v);
|
||||
auto device = ::sycl::queue(::sycl::gpu_selector_v).get_device();
|
||||
queue_idx = device_register.devices.at(device);
|
||||
} else {
|
||||
queue_register[device_spec.Name()] = ::sycl::queue(::sycl::default_selector_v);
|
||||
auto device = ::sycl::queue(::sycl::default_selector_v).get_device();
|
||||
queue_idx = device_register.devices.at(device);
|
||||
}
|
||||
}
|
||||
return queue_register.at(device_spec.Name());
|
||||
return &(device_register.queues[queue_idx]);
|
||||
}
|
||||
|
||||
DeviceManager::DeviceRegister& DeviceManager::GetDevicesRegister() const {
|
||||
@@ -102,21 +67,17 @@ DeviceManager::DeviceRegister& DeviceManager::GetDevicesRegister() const {
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < devices.size(); i++) {
|
||||
device_register.devices.push_back(devices[i]);
|
||||
device_register.devices[devices[i]] = i;
|
||||
device_register.queues.push_back(::sycl::queue(devices[i]));
|
||||
if (devices[i].is_cpu()) {
|
||||
device_register.cpu_devices.push_back(devices[i]);
|
||||
device_register.cpu_devices_idxes.push_back(i);
|
||||
} else if (devices[i].is_gpu()) {
|
||||
device_register.gpu_devices.push_back(devices[i]);
|
||||
device_register.gpu_devices_idxes.push_back(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
return device_register;
|
||||
}
|
||||
|
||||
DeviceManager::QueueRegister_t& DeviceManager::GetQueueRegister() const {
|
||||
static QueueRegister_t queue_register;
|
||||
return queue_register;
|
||||
}
|
||||
|
||||
} // namespace sycl
|
||||
} // namespace xgboost
|
||||
|
||||
@@ -23,25 +23,20 @@ namespace sycl {
|
||||
|
||||
class DeviceManager {
|
||||
public:
|
||||
::sycl::queue GetQueue(const DeviceOrd& device_spec) const;
|
||||
|
||||
::sycl::device GetDevice(const DeviceOrd& device_spec) const;
|
||||
::sycl::queue* GetQueue(const DeviceOrd& device_spec) const;
|
||||
|
||||
private:
|
||||
using QueueRegister_t = std::unordered_map<std::string, ::sycl::queue>;
|
||||
constexpr static int kDefaultOrdinal = -1;
|
||||
|
||||
struct DeviceRegister {
|
||||
std::vector<::sycl::device> devices;
|
||||
std::vector<::sycl::device> cpu_devices;
|
||||
std::vector<::sycl::device> gpu_devices;
|
||||
std::vector<::sycl::queue> queues;
|
||||
std::unordered_map<::sycl::device, size_t> devices;
|
||||
std::vector<size_t> cpu_devices_idxes;
|
||||
std::vector<size_t> gpu_devices_idxes;
|
||||
};
|
||||
|
||||
QueueRegister_t& GetQueueRegister() const;
|
||||
|
||||
DeviceRegister& GetDevicesRegister() const;
|
||||
|
||||
mutable std::mutex queue_registering_mutex;
|
||||
mutable std::mutex device_registering_mutex;
|
||||
};
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ class SoftmaxMultiClassObj : public ObjFunction {
|
||||
|
||||
void InitBuffers(const std::vector<int>& sample_rate) const {
|
||||
if (!are_buffs_init) {
|
||||
batch_processor_.InitBuffers(&qu_, sample_rate);
|
||||
batch_processor_.InitBuffers(qu_, sample_rate);
|
||||
are_buffs_init = true;
|
||||
}
|
||||
}
|
||||
@@ -88,7 +88,7 @@ class SoftmaxMultiClassObj : public ObjFunction {
|
||||
const bst_float* weights) {
|
||||
const size_t wg_size = 32;
|
||||
const size_t nwgs = ndata / wg_size + (ndata % wg_size > 0);
|
||||
return linalg::GroupWiseKernel(&qu_, &flag, events, {nwgs, wg_size},
|
||||
return linalg::GroupWiseKernel(qu_, &flag, events, {nwgs, wg_size},
|
||||
[=] (size_t idx, auto flag) {
|
||||
const bst_float* pred = preds + idx * nclass;
|
||||
|
||||
@@ -133,7 +133,7 @@ class SoftmaxMultiClassObj : public ObjFunction {
|
||||
*(info.labels.Data()),
|
||||
info.weights_);
|
||||
}
|
||||
qu_.wait_and_throw();
|
||||
qu_->wait_and_throw();
|
||||
|
||||
if (flag == 0) {
|
||||
LOG(FATAL) << "SYCL::SoftmaxMultiClassObj: label must be in [0, num_class).";
|
||||
@@ -160,7 +160,7 @@ class SoftmaxMultiClassObj : public ObjFunction {
|
||||
::sycl::buffer<bst_float, 1> io_preds_buf(io_preds->HostPointer(), io_preds->Size());
|
||||
|
||||
if (prob) {
|
||||
qu_.submit([&](::sycl::handler& cgh) {
|
||||
qu_->submit([&](::sycl::handler& cgh) {
|
||||
auto io_preds_acc = io_preds_buf.get_access<::sycl::access::mode::read_write>(cgh);
|
||||
cgh.parallel_for<>(::sycl::range<1>(ndata), [=](::sycl::id<1> pid) {
|
||||
int idx = pid[0];
|
||||
@@ -171,7 +171,7 @@ class SoftmaxMultiClassObj : public ObjFunction {
|
||||
} else {
|
||||
::sycl::buffer<bst_float, 1> max_preds_buf(max_preds_.HostPointer(), max_preds_.Size());
|
||||
|
||||
qu_.submit([&](::sycl::handler& cgh) {
|
||||
qu_->submit([&](::sycl::handler& cgh) {
|
||||
auto io_preds_acc = io_preds_buf.get_access<::sycl::access::mode::read>(cgh);
|
||||
auto max_preds_acc = max_preds_buf.get_access<::sycl::access::mode::read_write>(cgh);
|
||||
cgh.parallel_for<>(::sycl::range<1>(ndata), [=](::sycl::id<1> pid) {
|
||||
@@ -215,7 +215,7 @@ class SoftmaxMultiClassObj : public ObjFunction {
|
||||
|
||||
sycl::DeviceManager device_manager;
|
||||
|
||||
mutable ::sycl::queue qu_;
|
||||
mutable ::sycl::queue* qu_;
|
||||
static constexpr size_t kBatchSize = 1u << 22;
|
||||
mutable linalg::BatchProcessingHelper<GradientPair, bst_float, kBatchSize, 3> batch_processor_;
|
||||
};
|
||||
|
||||
@@ -48,7 +48,7 @@ class RegLossObj : public ObjFunction {
|
||||
|
||||
void InitBuffers() const {
|
||||
if (!are_buffs_init) {
|
||||
batch_processor_.InitBuffers(&qu_, {1, 1, 1, 1});
|
||||
batch_processor_.InitBuffers(qu_, {1, 1, 1, 1});
|
||||
are_buffs_init = true;
|
||||
}
|
||||
}
|
||||
@@ -58,13 +58,16 @@ class RegLossObj : public ObjFunction {
|
||||
|
||||
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
|
||||
param_.UpdateAllowUnknown(args);
|
||||
qu_ = device_manager.GetQueue(ctx_->Device());
|
||||
}
|
||||
|
||||
void GetGradient(const HostDeviceVector<bst_float>& preds,
|
||||
const MetaInfo &info,
|
||||
int iter,
|
||||
xgboost::linalg::Matrix<GradientPair>* out_gpair) override {
|
||||
if (qu_ == nullptr) {
|
||||
LOG(WARNING) << ctx_->Device();
|
||||
qu_ = device_manager.GetQueue(ctx_->Device());
|
||||
}
|
||||
if (info.labels.Size() == 0) return;
|
||||
CHECK_EQ(preds.Size(), info.labels.Size())
|
||||
<< " " << "labels are not correctly provided"
|
||||
@@ -97,7 +100,7 @@ class RegLossObj : public ObjFunction {
|
||||
const bst_float* weights) {
|
||||
const size_t wg_size = 32;
|
||||
const size_t nwgs = ndata / wg_size + (ndata % wg_size > 0);
|
||||
return linalg::GroupWiseKernel(&qu_, &flag, events, {nwgs, wg_size},
|
||||
return linalg::GroupWiseKernel(qu_, &flag, events, {nwgs, wg_size},
|
||||
[=] (size_t idx, auto flag) {
|
||||
const bst_float pred = Loss::PredTransform(preds[idx]);
|
||||
bst_float weight = is_null_weight ? 1.0f : weights[idx/n_targets];
|
||||
@@ -129,7 +132,7 @@ class RegLossObj : public ObjFunction {
|
||||
*(info.labels.Data()),
|
||||
info.weights_);
|
||||
}
|
||||
qu_.wait_and_throw();
|
||||
qu_->wait_and_throw();
|
||||
|
||||
if (flag == 0) {
|
||||
LOG(FATAL) << Loss::LabelErrorMsg();
|
||||
@@ -142,6 +145,10 @@ class RegLossObj : public ObjFunction {
|
||||
}
|
||||
|
||||
void PredTransform(HostDeviceVector<bst_float> *io_preds) const override {
|
||||
if (qu_ == nullptr) {
|
||||
LOG(WARNING) << ctx_->Device();
|
||||
qu_ = device_manager.GetQueue(ctx_->Device());
|
||||
}
|
||||
size_t const ndata = io_preds->Size();
|
||||
if (ndata == 0) return;
|
||||
InitBuffers();
|
||||
@@ -149,7 +156,7 @@ class RegLossObj : public ObjFunction {
|
||||
batch_processor_.Calculate([=] (const std::vector<::sycl::event>& events,
|
||||
size_t ndata,
|
||||
bst_float* io_preds) {
|
||||
return qu_.submit([&](::sycl::handler& cgh) {
|
||||
return qu_->submit([&](::sycl::handler& cgh) {
|
||||
cgh.depends_on(events);
|
||||
cgh.parallel_for<>(::sycl::range<1>(ndata), [=](::sycl::id<1> pid) {
|
||||
int idx = pid[0];
|
||||
@@ -157,7 +164,7 @@ class RegLossObj : public ObjFunction {
|
||||
});
|
||||
});
|
||||
}, io_preds);
|
||||
qu_.wait_and_throw();
|
||||
qu_->wait_and_throw();
|
||||
}
|
||||
|
||||
float ProbToMargin(float base_score) const override {
|
||||
@@ -187,7 +194,7 @@ class RegLossObj : public ObjFunction {
|
||||
xgboost::obj::RegLossParam param_;
|
||||
sycl::DeviceManager device_manager;
|
||||
|
||||
mutable ::sycl::queue qu_;
|
||||
mutable ::sycl::queue* qu_ = nullptr;
|
||||
static constexpr size_t kBatchSize = 1u << 22;
|
||||
mutable linalg::BatchProcessingHelper<GradientPair, bst_float, kBatchSize, 3> batch_processor_;
|
||||
};
|
||||
|
||||
@@ -277,7 +277,7 @@ class Predictor : public xgboost::Predictor {
|
||||
void PredictBatch(DMatrix *dmat, PredictionCacheEntry *predts,
|
||||
const gbm::GBTreeModel &model, uint32_t tree_begin,
|
||||
uint32_t tree_end = 0) const override {
|
||||
::sycl::queue qu = device_manager.GetQueue(ctx_->Device());
|
||||
::sycl::queue* qu = device_manager.GetQueue(ctx_->Device());
|
||||
// TODO(razdoburdin): remove temporary workaround after cache fix
|
||||
sycl::DeviceMatrix device_matrix;
|
||||
device_matrix.Init(qu, dmat);
|
||||
@@ -290,9 +290,9 @@ class Predictor : public xgboost::Predictor {
|
||||
if (tree_begin < tree_end) {
|
||||
const bool any_missing = !(dmat->IsDense());
|
||||
if (any_missing) {
|
||||
DevicePredictInternal<true>(&qu, device_matrix, out_preds, model, tree_begin, tree_end);
|
||||
DevicePredictInternal<true>(qu, device_matrix, out_preds, model, tree_begin, tree_end);
|
||||
} else {
|
||||
DevicePredictInternal<false>(&qu, device_matrix, out_preds, model, tree_begin, tree_end);
|
||||
DevicePredictInternal<false>(qu, device_matrix, out_preds, model, tree_begin, tree_end);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -48,7 +48,7 @@ class BatchHistSynchronizer: public HistSynchronizer<GradientSumT> {
|
||||
this_hist, nbins, ::sycl::event());
|
||||
}
|
||||
}
|
||||
builder->qu_.wait_and_throw();
|
||||
builder->qu_->wait_and_throw();
|
||||
|
||||
builder->builder_monitor_.Stop("SyncHistograms");
|
||||
}
|
||||
@@ -84,7 +84,7 @@ class DistributedHistSynchronizer: public HistSynchronizer<GradientSumT> {
|
||||
auto& sibling_hist = builder->hist_[sibling_nid];
|
||||
common::SubtractionHist(builder->qu_, &sibling_hist, parent_hist,
|
||||
this_hist, nbins, ::sycl::event());
|
||||
builder->qu_.wait_and_throw();
|
||||
builder->qu_->wait_and_throw();
|
||||
// Store posible parent node
|
||||
auto& sibling_local = builder->hist_local_worker_[sibling_nid];
|
||||
common::CopyHist(builder->qu_, &sibling_local, sibling_hist, nbins);
|
||||
@@ -113,7 +113,7 @@ class DistributedHistSynchronizer: public HistSynchronizer<GradientSumT> {
|
||||
auto& sibling_hist = builder->hist_[entry.GetSiblingId(p_tree, parent_id)];
|
||||
common::SubtractionHist(builder->qu_, &this_hist, parent_hist,
|
||||
sibling_hist, nbins, ::sycl::event());
|
||||
builder->qu_.wait_and_throw();
|
||||
builder->qu_->wait_and_throw();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,7 +31,7 @@ void HistUpdater<GradientSumT>::ReduceHists(const std::vector<int>& sync_ids,
|
||||
for (size_t i = 0; i < sync_ids.size(); i++) {
|
||||
auto& this_hist = hist_[sync_ids[i]];
|
||||
const GradientPairT* psrc = reinterpret_cast<const GradientPairT*>(this_hist.DataConst());
|
||||
qu_.memcpy(reduce_buffer_.data() + i * nbins, psrc, nbins*sizeof(GradientPairT)).wait();
|
||||
qu_->memcpy(reduce_buffer_.data() + i * nbins, psrc, nbins*sizeof(GradientPairT)).wait();
|
||||
}
|
||||
|
||||
auto buffer_vec = linalg::MakeVec(reinterpret_cast<GradientSumT*>(reduce_buffer_.data()),
|
||||
@@ -42,7 +42,7 @@ void HistUpdater<GradientSumT>::ReduceHists(const std::vector<int>& sync_ids,
|
||||
for (size_t i = 0; i < sync_ids.size(); i++) {
|
||||
auto& this_hist = hist_[sync_ids[i]];
|
||||
GradientPairT* psrc = reinterpret_cast<GradientPairT*>(this_hist.Data());
|
||||
qu_.memcpy(psrc, reduce_buffer_.data() + i * nbins, nbins*sizeof(GradientPairT)).wait();
|
||||
qu_->memcpy(psrc, reduce_buffer_.data() + i * nbins, nbins*sizeof(GradientPairT)).wait();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ void HistUpdater<GradientSumT>::BuildHistogramsLossGuide(
|
||||
|
||||
std::vector<int> sync_ids;
|
||||
hist_rows_adder_->AddHistRows(this, &sync_ids, p_tree);
|
||||
qu_.wait_and_throw();
|
||||
qu_->wait_and_throw();
|
||||
BuildLocalHistograms(gmat, p_tree, gpair_device);
|
||||
hist_synchronizer_->SyncHistograms(this, sync_ids, p_tree);
|
||||
}
|
||||
@@ -99,7 +99,7 @@ void HistUpdater<GradientSumT>::BuildLocalHistograms(
|
||||
common::InitHist(qu_, &(hist_[nid]), hist_[nid].Size(), &event);
|
||||
}
|
||||
}
|
||||
qu_.wait_and_throw();
|
||||
qu_->wait_and_throw();
|
||||
builder_monitor_.Stop("BuildLocalHistograms");
|
||||
}
|
||||
|
||||
@@ -382,9 +382,10 @@ bool HistUpdater<GradientSumT>::UpdatePredictionCache(
|
||||
|
||||
::sycl::event event;
|
||||
if (is_first_group) {
|
||||
out_preds_buf_.ResizeNoCopy(&qu_, buffer_size);
|
||||
out_preds_buf_.ResizeNoCopy(qu_, buffer_size);
|
||||
out_pred_ptr = &out_preds(0);
|
||||
event = qu_.memcpy(out_preds_buf_.Data(), out_pred_ptr, buffer_size * sizeof(bst_float), event);
|
||||
event = qu_->memcpy(out_preds_buf_.Data(), out_pred_ptr,
|
||||
buffer_size * sizeof(bst_float), event);
|
||||
}
|
||||
auto* out_preds_buf_ptr = out_preds_buf_.Data();
|
||||
|
||||
@@ -406,7 +407,7 @@ bool HistUpdater<GradientSumT>::UpdatePredictionCache(
|
||||
const size_t* rid = rowset.begin;
|
||||
const size_t num_rows = rowset.Size();
|
||||
|
||||
events[node] = qu_.submit([&](::sycl::handler& cgh) {
|
||||
events[node] = qu_->submit([&](::sycl::handler& cgh) {
|
||||
cgh.depends_on(event);
|
||||
cgh.parallel_for<>(::sycl::range<1>(num_rows), [=](::sycl::item<1> pid) {
|
||||
out_preds_buf_ptr[rid[pid.get_id(0)]*stride + gid] += leaf_value;
|
||||
@@ -415,10 +416,10 @@ bool HistUpdater<GradientSumT>::UpdatePredictionCache(
|
||||
}
|
||||
}
|
||||
if (is_last_group) {
|
||||
qu_.memcpy(out_pred_ptr, out_preds_buf_ptr, buffer_size * sizeof(bst_float), events);
|
||||
qu_->memcpy(out_pred_ptr, out_preds_buf_ptr, buffer_size * sizeof(bst_float), events);
|
||||
out_pred_ptr = nullptr;
|
||||
}
|
||||
qu_.wait();
|
||||
qu_->wait();
|
||||
|
||||
builder_monitor_.Stop("UpdatePredictionCache");
|
||||
return true;
|
||||
@@ -447,7 +448,7 @@ void HistUpdater<GradientSumT>::InitSampling(
|
||||
*/
|
||||
if (has_fp64_support_) {
|
||||
// Use oneDPL bernoulli_distribution for better perf
|
||||
event = qu_.submit([&](::sycl::handler& cgh) {
|
||||
event = qu_->submit([&](::sycl::handler& cgh) {
|
||||
auto flag_buf_acc = flag_buf.get_access<::sycl::access::mode::read_write>(cgh);
|
||||
cgh.parallel_for<>(::sycl::range<1>(::sycl::range<1>(num_rows)),
|
||||
[=](::sycl::item<1> pid) {
|
||||
@@ -465,7 +466,7 @@ void HistUpdater<GradientSumT>::InitSampling(
|
||||
});
|
||||
} else {
|
||||
// Use oneDPL uniform, as far as bernoulli_distribution uses fp64
|
||||
event = qu_.submit([&](::sycl::handler& cgh) {
|
||||
event = qu_->submit([&](::sycl::handler& cgh) {
|
||||
auto flag_buf_acc = flag_buf.get_access<::sycl::access::mode::read_write>(cgh);
|
||||
cgh.parallel_for<>(::sycl::range<1>(::sycl::range<1>(num_rows)),
|
||||
[=](::sycl::item<1> pid) {
|
||||
@@ -485,8 +486,8 @@ void HistUpdater<GradientSumT>::InitSampling(
|
||||
/* After calling a destructor for flag_buf, content will be copyed to num_samples */
|
||||
}
|
||||
|
||||
row_indices->Resize(&qu_, num_samples, 0, &event);
|
||||
qu_.wait();
|
||||
row_indices->Resize(qu_, num_samples, 0, &event);
|
||||
qu_->wait();
|
||||
}
|
||||
|
||||
template<typename GradientSumT>
|
||||
@@ -526,7 +527,7 @@ void HistUpdater<GradientSumT>::InitData(
|
||||
hist_builder_ = common::GHistBuilder<GradientSumT>(qu_, nbins);
|
||||
|
||||
USMVector<size_t, MemoryType::on_device>* row_indices = &(row_set_collection_.Data());
|
||||
row_indices->Resize(&qu_, info.num_row_);
|
||||
row_indices->Resize(qu_, info.num_row_);
|
||||
size_t* p_row_indices = row_indices->Data();
|
||||
// mark subsample and build list of member rows
|
||||
if (param_.subsample < 1.0f) {
|
||||
@@ -540,7 +541,7 @@ void HistUpdater<GradientSumT>::InitData(
|
||||
::sycl::event event;
|
||||
{
|
||||
::sycl::buffer<int, 1> flag_buf(&has_neg_hess, 1);
|
||||
event = qu_.submit([&](::sycl::handler& cgh) {
|
||||
event = qu_->submit([&](::sycl::handler& cgh) {
|
||||
auto flag_buf_acc = flag_buf.get_access<::sycl::access::mode::read_write>(cgh);
|
||||
cgh.parallel_for<>(::sycl::range<1>(::sycl::range<1>(info.num_row_)),
|
||||
[=](::sycl::item<1> pid) {
|
||||
@@ -558,7 +559,7 @@ void HistUpdater<GradientSumT>::InitData(
|
||||
size_t max_idx = 0;
|
||||
{
|
||||
::sycl::buffer<size_t, 1> flag_buf(&max_idx, 1);
|
||||
event = qu_.submit([&](::sycl::handler& cgh) {
|
||||
event = qu_->submit([&](::sycl::handler& cgh) {
|
||||
cgh.depends_on(event);
|
||||
auto flag_buf_acc = flag_buf.get_access<::sycl::access::mode::read_write>(cgh);
|
||||
cgh.parallel_for<>(::sycl::range<1>(::sycl::range<1>(info.num_row_)),
|
||||
@@ -571,9 +572,9 @@ void HistUpdater<GradientSumT>::InitData(
|
||||
});
|
||||
});
|
||||
}
|
||||
row_indices->Resize(&qu_, max_idx, 0, &event);
|
||||
row_indices->Resize(qu_, max_idx, 0, &event);
|
||||
}
|
||||
qu_.wait_and_throw();
|
||||
qu_->wait_and_throw();
|
||||
}
|
||||
}
|
||||
row_set_collection_.Init();
|
||||
@@ -661,7 +662,7 @@ void HistUpdater<GradientSumT>::ApplySplit(
|
||||
std::vector<int32_t> split_conditions(n_nodes);
|
||||
CommonRowPartitioner::FindSplitConditions(nodes, *p_tree, gmat, &split_conditions);
|
||||
|
||||
partition_builder_.Init(&qu_, n_nodes, [&](size_t node_in_set) {
|
||||
partition_builder_.Init(qu_, n_nodes, [&](size_t node_in_set) {
|
||||
const int32_t nid = nodes[node_in_set].nid;
|
||||
return row_set_collection_[nid].Size();
|
||||
});
|
||||
@@ -669,14 +670,14 @@ void HistUpdater<GradientSumT>::ApplySplit(
|
||||
::sycl::event event;
|
||||
partition_builder_.Partition(gmat, nodes, row_set_collection_,
|
||||
split_conditions, p_tree, &event);
|
||||
qu_.wait_and_throw();
|
||||
qu_->wait_and_throw();
|
||||
|
||||
for (size_t node_in_set = 0; node_in_set < n_nodes; node_in_set++) {
|
||||
const int32_t nid = nodes[node_in_set].nid;
|
||||
size_t* data_result = const_cast<size_t*>(row_set_collection_[nid].begin);
|
||||
partition_builder_.MergeToArray(node_in_set, data_result, &event);
|
||||
}
|
||||
qu_.wait_and_throw();
|
||||
qu_->wait_and_throw();
|
||||
|
||||
AddSplitsToRowSet(nodes, p_tree);
|
||||
|
||||
@@ -702,7 +703,7 @@ void HistUpdater<GradientSumT>::InitNewNode(int nid,
|
||||
const auto* hist = reinterpret_cast<GradStats<GradientSumT>*>(hist_[nid].Data());
|
||||
|
||||
std::vector<GradStats<GradientSumT>> ets(iend - ibegin);
|
||||
qu_.memcpy(ets.data(), hist + ibegin,
|
||||
qu_->memcpy(ets.data(), hist + ibegin,
|
||||
(iend - ibegin) * sizeof(GradStats<GradientSumT>)).wait_and_throw();
|
||||
for (const auto& et : ets) {
|
||||
grad_stat += et;
|
||||
@@ -714,7 +715,7 @@ void HistUpdater<GradientSumT>::InitNewNode(int nid,
|
||||
const GradientPair* gpair_ptr = gpair.DataConst();
|
||||
|
||||
::sycl::buffer<GradStats<GradientSumT>> buff(&grad_stat, 1);
|
||||
qu_.submit([&](::sycl::handler& cgh) {
|
||||
qu_->submit([&](::sycl::handler& cgh) {
|
||||
auto reduction = ::sycl::reduction(buff, cgh, ::sycl::plus<>());
|
||||
cgh.parallel_for<>(::sycl::range<1>(size), reduction,
|
||||
[=](::sycl::item<1> pid, auto& sum) {
|
||||
@@ -786,8 +787,8 @@ void HistUpdater<GradientSumT>::EvaluateSplits(
|
||||
}
|
||||
const size_t total_features = pos;
|
||||
|
||||
split_queries_device_.Resize(&qu_, total_features);
|
||||
auto event = qu_.memcpy(split_queries_device_.Data(), split_queries_host_.data(),
|
||||
split_queries_device_.Resize(qu_, total_features);
|
||||
auto event = qu_->memcpy(split_queries_device_.Data(), split_queries_host_.data(),
|
||||
total_features * sizeof(SplitQuery));
|
||||
|
||||
auto evaluator = tree_evaluator_.GetEvaluator();
|
||||
@@ -796,18 +797,18 @@ void HistUpdater<GradientSumT>::EvaluateSplits(
|
||||
const bst_float* cut_val = gmat.cut_device.Values().DataConst();
|
||||
const bst_float* cut_minval = gmat.cut_device.MinValues().DataConst();
|
||||
|
||||
snode_device_.ResizeNoCopy(&qu_, snode_host_.size());
|
||||
event = qu_.memcpy(snode_device_.Data(), snode_host_.data(),
|
||||
snode_device_.ResizeNoCopy(qu_, snode_host_.size());
|
||||
event = qu_->memcpy(snode_device_.Data(), snode_host_.data(),
|
||||
snode_host_.size() * sizeof(NodeEntry<GradientSumT>), event);
|
||||
const NodeEntry<GradientSumT>* snode = snode_device_.Data();
|
||||
|
||||
const float min_child_weight = param_.min_child_weight;
|
||||
|
||||
best_splits_device_.ResizeNoCopy(&qu_, total_features);
|
||||
best_splits_device_.ResizeNoCopy(qu_, total_features);
|
||||
if (best_splits_host_.size() < total_features) best_splits_host_.resize(total_features);
|
||||
SplitEntry<GradientSumT>* best_splits = best_splits_device_.Data();
|
||||
|
||||
event = qu_.submit([&](::sycl::handler& cgh) {
|
||||
event = qu_->submit([&](::sycl::handler& cgh) {
|
||||
cgh.depends_on(event);
|
||||
cgh.parallel_for<>(::sycl::nd_range<2>(::sycl::range<2>(total_features, sub_group_size_),
|
||||
::sycl::range<2>(1, sub_group_size_)),
|
||||
@@ -823,10 +824,10 @@ void HistUpdater<GradientSumT>::EvaluateSplits(
|
||||
&(best_splits[i]), fid, nid, evaluator, min_child_weight);
|
||||
});
|
||||
});
|
||||
event = qu_.memcpy(best_splits_host_.data(), best_splits,
|
||||
event = qu_->memcpy(best_splits_host_.data(), best_splits,
|
||||
total_features * sizeof(SplitEntry<GradientSumT>), event);
|
||||
|
||||
qu_.wait();
|
||||
qu_->wait();
|
||||
for (size_t i = 0; i < total_features; i++) {
|
||||
int nid = split_queries_host_[i].nid;
|
||||
snode_host_[nid].best.Update(best_splits_host_[i]);
|
||||
|
||||
@@ -52,7 +52,7 @@ class HistUpdater {
|
||||
using GradientPairT = xgboost::detail::GradientPairInternal<GradientSumT>;
|
||||
|
||||
explicit HistUpdater(const Context* ctx,
|
||||
::sycl::queue qu,
|
||||
::sycl::queue* qu,
|
||||
const xgboost::tree::TrainParam& param,
|
||||
FeatureInteractionConstraintHost int_constraints_,
|
||||
DMatrix const* fmat)
|
||||
@@ -63,11 +63,11 @@ class HistUpdater {
|
||||
builder_monitor_.Init("SYCL::Quantile::HistUpdater");
|
||||
kernel_monitor_.Init("SYCL::Quantile::HistUpdater");
|
||||
if (param.max_depth > 0) {
|
||||
snode_device_.Resize(&qu, 1u << (param.max_depth + 1));
|
||||
snode_device_.Resize(qu, 1u << (param.max_depth + 1));
|
||||
}
|
||||
has_fp64_support_ = qu_.get_device().has(::sycl::aspect::fp64);
|
||||
has_fp64_support_ = qu_->get_device().has(::sycl::aspect::fp64);
|
||||
const auto sub_group_sizes =
|
||||
qu_.get_device().get_info<::sycl::info::device::sub_group_sizes>();
|
||||
qu_->get_device().get_info<::sycl::info::device::sub_group_sizes>();
|
||||
sub_group_size_ = sub_group_sizes.back();
|
||||
}
|
||||
|
||||
@@ -266,8 +266,7 @@ class HistUpdater {
|
||||
bst_float* out_pred_ptr = nullptr;
|
||||
|
||||
std::vector<GradientPairT> reduce_buffer_;
|
||||
|
||||
::sycl::queue qu_;
|
||||
::sycl::queue* qu_;
|
||||
};
|
||||
|
||||
} // namespace tree
|
||||
|
||||
@@ -42,11 +42,11 @@ class TreeEvaluator {
|
||||
USMVector<GradType> upper_bounds_;
|
||||
USMVector<int> monotone_;
|
||||
TrainParam param_;
|
||||
::sycl::queue qu_;
|
||||
::sycl::queue* qu_;
|
||||
bool has_constraint_;
|
||||
|
||||
public:
|
||||
void Reset(::sycl::queue qu, xgboost::tree::TrainParam const& p, bst_feature_t n_features) {
|
||||
void Reset(::sycl::queue* qu, xgboost::tree::TrainParam const& p, bst_feature_t n_features) {
|
||||
qu_ = qu;
|
||||
|
||||
has_constraint_ = false;
|
||||
@@ -58,13 +58,13 @@ class TreeEvaluator {
|
||||
}
|
||||
|
||||
if (has_constraint_) {
|
||||
monotone_.Resize(&qu_, n_features, 0);
|
||||
qu_.memcpy(monotone_.Data(), p.monotone_constraints.data(),
|
||||
monotone_.Resize(qu_, n_features, 0);
|
||||
qu_->memcpy(monotone_.Data(), p.monotone_constraints.data(),
|
||||
sizeof(int) * p.monotone_constraints.size());
|
||||
qu_.wait();
|
||||
qu_->wait();
|
||||
|
||||
lower_bounds_.Resize(&qu_, p.MaxNodes(), std::numeric_limits<GradType>::lowest());
|
||||
upper_bounds_.Resize(&qu_, p.MaxNodes(), std::numeric_limits<GradType>::max());
|
||||
lower_bounds_.Resize(qu_, p.MaxNodes(), std::numeric_limits<GradType>::lowest());
|
||||
upper_bounds_.Resize(qu_, p.MaxNodes(), std::numeric_limits<GradType>::max());
|
||||
}
|
||||
param_ = TrainParam(p);
|
||||
}
|
||||
@@ -73,7 +73,7 @@ class TreeEvaluator {
|
||||
return has_constraint_;
|
||||
}
|
||||
|
||||
TreeEvaluator(::sycl::queue qu, xgboost::tree::TrainParam const& p, bst_feature_t n_features) {
|
||||
TreeEvaluator(::sycl::queue* qu, xgboost::tree::TrainParam const& p, bst_feature_t n_features) {
|
||||
Reset(qu, p, n_features);
|
||||
}
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ void QuantileHistMaker::Configure(const Args& args) {
|
||||
param_.UpdateAllowUnknown(args);
|
||||
hist_maker_param_.UpdateAllowUnknown(args);
|
||||
|
||||
bool has_fp64_support = qu_.get_device().has(::sycl::aspect::fp64);
|
||||
bool has_fp64_support = qu_->get_device().has(::sycl::aspect::fp64);
|
||||
if (hist_maker_param_.single_precision_histogram || !has_fp64_support) {
|
||||
if (!hist_maker_param_.single_precision_histogram) {
|
||||
LOG(WARNING) << "Target device doesn't support fp64, using single_precision_histogram=True";
|
||||
@@ -68,9 +68,9 @@ void QuantileHistMaker::CallUpdate(
|
||||
xgboost::common::Span<HostDeviceVector<bst_node_t>> out_position,
|
||||
const std::vector<RegTree *> &trees) {
|
||||
const auto* gpair_h = gpair->Data();
|
||||
gpair_device_.Resize(&qu_, gpair_h->Size());
|
||||
qu_.memcpy(gpair_device_.Data(), gpair_h->HostPointer(), gpair_h->Size() * sizeof(GradientPair));
|
||||
qu_.wait();
|
||||
gpair_device_.Resize(qu_, gpair_h->Size());
|
||||
qu_->memcpy(gpair_device_.Data(), gpair_h->HostPointer(), gpair_h->Size() * sizeof(GradientPair));
|
||||
qu_->wait();
|
||||
|
||||
for (auto tree : trees) {
|
||||
pimpl->Update(param, gmat_, gpair_device_, dmat, out_position, tree);
|
||||
|
||||
@@ -105,7 +105,7 @@ class QuantileHistMaker: public TreeUpdater {
|
||||
|
||||
FeatureInteractionConstraintHost int_constraint_;
|
||||
|
||||
::sycl::queue qu_;
|
||||
::sycl::queue* qu_;
|
||||
DeviceManager device_manager;
|
||||
ObjInfo const *task_{nullptr};
|
||||
|
||||
|
||||
Reference in New Issue
Block a user