[EM] Support mmap backed ellpack. (#10602)

- Support resource view in ellpack.
- Define the CUDA version of MMAP resource.
- Define the CUDA version of malloc resource.
- Refactor cuda runtime API wrappers, and add memory access related wrappers.
- gather windows macros into a single header.
This commit is contained in:
Jiaming Yuan
2024-07-18 08:20:21 +08:00
committed by GitHub
parent e9fbce9791
commit 292bb677e5
59 changed files with 889 additions and 646 deletions

View File

@@ -27,15 +27,15 @@ TEST(EllpackPage, EmptyDMatrix) {
auto impl = page.Impl();
ASSERT_EQ(impl->row_stride, 0);
ASSERT_EQ(impl->Cuts().TotalBins(), 0);
ASSERT_EQ(impl->gidx_buffer.Size(), 4);
ASSERT_EQ(impl->gidx_buffer.size(), 4);
}
TEST(EllpackPage, BuildGidxDense) {
int constexpr kNRows = 16, kNCols = 8;
auto page = BuildEllpackPage(kNRows, kNCols);
std::vector<common::CompressedByteT> h_gidx_buffer(page->gidx_buffer.HostVector());
common::CompressedIterator<uint32_t> gidx(h_gidx_buffer.data(), page->NumSymbols());
auto ctx = MakeCUDACtx(0);
auto page = BuildEllpackPage(&ctx, kNRows, kNCols);
std::vector<common::CompressedByteT> h_gidx_buffer;
auto h_accessor = page->GetHostAccessor(&ctx, &h_gidx_buffer);
ASSERT_EQ(page->row_stride, kNCols);
@@ -58,16 +58,17 @@ TEST(EllpackPage, BuildGidxDense) {
1, 4, 7, 10, 14, 16, 19, 21,
};
for (size_t i = 0; i < kNRows * kNCols; ++i) {
ASSERT_EQ(solution[i], gidx[i]);
ASSERT_EQ(solution[i], h_accessor.gidx_iter[i]);
}
}
TEST(EllpackPage, BuildGidxSparse) {
int constexpr kNRows = 16, kNCols = 8;
auto page = BuildEllpackPage(kNRows, kNCols, 0.9f);
auto ctx = MakeCUDACtx(0);
auto page = BuildEllpackPage(&ctx, kNRows, kNCols, 0.9f);
std::vector<common::CompressedByteT> h_gidx_buffer(page->gidx_buffer.HostVector());
common::CompressedIterator<uint32_t> gidx(h_gidx_buffer.data(), 25);
std::vector<common::CompressedByteT> h_gidx_buffer;
auto h_accessor = page->GetHostAccessor(&ctx, &h_gidx_buffer);
ASSERT_LE(page->row_stride, 3);
@@ -78,7 +79,7 @@ TEST(EllpackPage, BuildGidxSparse) {
24, 7, 14, 16, 4, 24, 24, 24, 24, 24, 9, 24, 24, 1, 24, 24
};
for (size_t i = 0; i < kNRows * page->row_stride; ++i) {
ASSERT_EQ(solution[i], gidx[i]);
ASSERT_EQ(solution[i], h_accessor.gidx_iter[i]);
}
}
@@ -94,7 +95,7 @@ TEST(EllpackPage, FromCategoricalBasic) {
Context ctx{MakeCUDACtx(0)};
auto p = BatchParam{max_bins, tree::TrainParam::DftSparseThreshold()};
auto ellpack = EllpackPage(&ctx, m.get(), p);
auto accessor = ellpack.Impl()->GetDeviceAccessor(FstCU());
auto accessor = ellpack.Impl()->GetDeviceAccessor(ctx.Device());
ASSERT_EQ(kCats, accessor.NumBins());
auto x_copy = x;
@@ -110,13 +111,11 @@ TEST(EllpackPage, FromCategoricalBasic) {
ASSERT_EQ(h_cuts_ptr.size(), 2);
ASSERT_EQ(h_cuts_values.size(), kCats);
std::vector<common::CompressedByteT> const &h_gidx_buffer =
ellpack.Impl()->gidx_buffer.HostVector();
auto h_gidx_iter = common::CompressedIterator<uint32_t>(
h_gidx_buffer.data(), accessor.NumSymbols());
std::vector<common::CompressedByteT> h_gidx_buffer;
auto h_accessor = ellpack.Impl()->GetHostAccessor(&ctx, &h_gidx_buffer);
for (size_t i = 0; i < x.size(); ++i) {
auto bin = h_gidx_iter[i];
auto bin = h_accessor.gidx_iter[i];
auto bin_value = h_cuts_values.at(bin);
ASSERT_EQ(AsCat(x[i]), AsCat(bin_value));
}
@@ -152,12 +151,12 @@ TEST(EllpackPage, Copy) {
auto page = (*dmat->GetBatches<EllpackPage>(&ctx, param).begin()).Impl();
// Create an empty result page.
EllpackPageImpl result(FstCU(), page->CutsShared(), page->is_dense, page->row_stride, kRows);
EllpackPageImpl result(&ctx, page->CutsShared(), page->is_dense, page->row_stride, kRows);
// Copy batch pages into the result page.
size_t offset = 0;
for (auto& batch : dmat->GetBatches<EllpackPage>(&ctx, param)) {
size_t num_elements = result.Copy(FstCU(), batch.Impl(), offset);
size_t num_elements = result.Copy(&ctx, batch.Impl(), offset);
offset += num_elements;
}
@@ -171,11 +170,11 @@ TEST(EllpackPage, Copy) {
EXPECT_EQ(impl->base_rowid, current_row);
for (size_t i = 0; i < impl->Size(); i++) {
dh::LaunchN(kCols, ReadRowFunction(impl->GetDeviceAccessor(FstCU()), current_row,
dh::LaunchN(kCols, ReadRowFunction(impl->GetDeviceAccessor(ctx.Device()), current_row,
row_d.data().get()));
thrust::copy(row_d.begin(), row_d.end(), row.begin());
dh::LaunchN(kCols, ReadRowFunction(result.GetDeviceAccessor(FstCU()), current_row,
dh::LaunchN(kCols, ReadRowFunction(result.GetDeviceAccessor(ctx.Device()), current_row,
row_result_d.data().get()));
thrust::copy(row_result_d.begin(), row_result_d.end(), row_result.begin());
@@ -200,7 +199,7 @@ TEST(EllpackPage, Compact) {
auto page = (*dmat->GetBatches<EllpackPage>(&ctx, param).begin()).Impl();
// Create an empty result page.
EllpackPageImpl result(ctx.Device(), page->CutsShared(), page->is_dense, page->row_stride,
EllpackPageImpl result(&ctx, page->CutsShared(), page->is_dense, page->row_stride,
kCompactedRows);
// Compact batch pages into the result page.
@@ -229,14 +228,13 @@ TEST(EllpackPage, Compact) {
continue;
}
dh::LaunchN(kCols, ReadRowFunction(impl->GetDeviceAccessor(FstCU()),
current_row, row_d.data().get()));
dh::LaunchN(kCols, ReadRowFunction(impl->GetDeviceAccessor(ctx.Device()), current_row,
row_d.data().get()));
dh::safe_cuda(cudaDeviceSynchronize());
thrust::copy(row_d.begin(), row_d.end(), row.begin());
dh::LaunchN(kCols,
ReadRowFunction(result.GetDeviceAccessor(FstCU()), compacted_row,
row_result_d.data().get()));
dh::LaunchN(kCols, ReadRowFunction(result.GetDeviceAccessor(ctx.Device()), compacted_row,
row_result_d.data().get()));
thrust::copy(row_result_d.begin(), row_result_d.end(), row_result.begin());
EXPECT_EQ(row, row_result);
@@ -269,16 +267,13 @@ class EllpackPageTest : public testing::TestWithParam<float> {
ASSERT_EQ(from_sparse_page->base_rowid, 0);
ASSERT_EQ(from_sparse_page->base_rowid, from_ghist->base_rowid);
ASSERT_EQ(from_sparse_page->n_rows, from_ghist->n_rows);
ASSERT_EQ(from_sparse_page->gidx_buffer.Size(), from_ghist->gidx_buffer.Size());
auto const& h_gidx_from_sparse = from_sparse_page->gidx_buffer.HostVector();
auto const& h_gidx_from_ghist = from_ghist->gidx_buffer.HostVector();
ASSERT_EQ(from_sparse_page->gidx_buffer.size(), from_ghist->gidx_buffer.size());
std::vector<common::CompressedByteT> h_gidx_from_sparse, h_gidx_from_ghist;
auto from_ghist_acc = from_ghist->GetHostAccessor(&gpu_ctx, &h_gidx_from_ghist);
auto from_sparse_acc = from_sparse_page->GetHostAccessor(&gpu_ctx, &h_gidx_from_sparse);
ASSERT_EQ(from_sparse_page->NumSymbols(), from_ghist->NumSymbols());
common::CompressedIterator<uint32_t> from_ghist_it(h_gidx_from_ghist.data(),
from_ghist->NumSymbols());
common::CompressedIterator<uint32_t> from_sparse_it(h_gidx_from_sparse.data(),
from_sparse_page->NumSymbols());
for (size_t i = 0; i < from_ghist->n_rows * from_ghist->row_stride; ++i) {
EXPECT_EQ(from_ghist_it[i], from_sparse_it[i]);
EXPECT_EQ(from_ghist_acc.gidx_iter[i], from_sparse_acc.gidx_iter[i]);
}
}
}

View File

@@ -14,9 +14,8 @@
namespace xgboost::data {
namespace {
template <typename FormatStreamPolicy>
void TestEllpackPageRawFormat() {
FormatStreamPolicy policy;
void TestEllpackPageRawFormat(FormatStreamPolicy *p_policy) {
auto &policy = *p_policy;
Context ctx{MakeCUDACtx(0)};
auto param = BatchParam{256, tree::TrainParam::DftSparseThreshold()};
@@ -55,16 +54,30 @@ void TestEllpackPageRawFormat() {
ASSERT_EQ(loaded->Cuts().Values(), orig->Cuts().Values());
ASSERT_EQ(loaded->base_rowid, orig->base_rowid);
ASSERT_EQ(loaded->row_stride, orig->row_stride);
ASSERT_EQ(loaded->gidx_buffer.HostVector(), orig->gidx_buffer.HostVector());
std::vector<common::CompressedByteT> h_loaded, h_orig;
[[maybe_unused]] auto h_loaded_acc = loaded->GetHostAccessor(&ctx, &h_loaded);
[[maybe_unused]] auto h_orig_acc = orig->GetHostAccessor(&ctx, &h_orig);
ASSERT_EQ(h_loaded, h_orig);
}
}
} // anonymous namespace
TEST(EllpackPageRawFormat, DiskIO) {
TestEllpackPageRawFormat<DefaultFormatStreamPolicy<EllpackPage, EllpackFormatPolicy>>();
EllpackMmapStreamPolicy<EllpackPage, EllpackFormatPolicy> policy{false};
TestEllpackPageRawFormat(&policy);
}
TEST(EllpackPageRawFormat, DiskIOHmm) {
if (common::SupportsPageableMem()) {
EllpackMmapStreamPolicy<EllpackPage, EllpackFormatPolicy> policy{true};
TestEllpackPageRawFormat(&policy);
} else {
GTEST_SKIP_("HMM is not supported.");
}
}
TEST(EllpackPageRawFormat, HostIO) {
TestEllpackPageRawFormat<EllpackFormatStreamPolicy<EllpackPage, EllpackFormatPolicy>>();
EllpackCacheStreamPolicy<EllpackPage, EllpackFormatPolicy> policy;
TestEllpackPageRawFormat(&policy);
}
} // namespace xgboost::data

View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020-2023, XGBoost contributors
* Copyright 2020-2024, XGBoost contributors
*/
#include <gtest/gtest.h>
@@ -21,10 +21,10 @@ void TestEquivalent(float sparsity) {
std::size_t offset = 0;
auto first = (*m.GetEllpackBatches(&ctx, {}).begin()).Impl();
std::unique_ptr<EllpackPageImpl> page_concatenated{new EllpackPageImpl(
ctx.Device(), first->CutsShared(), first->is_dense, first->row_stride, 1000 * 100)};
&ctx, first->CutsShared(), first->is_dense, first->row_stride, 1000 * 100)};
for (auto& batch : m.GetBatches<EllpackPage>(&ctx, {})) {
auto page = batch.Impl();
size_t num_elements = page_concatenated->Copy(ctx.Device(), page, offset);
size_t num_elements = page_concatenated->Copy(&ctx, page, offset);
offset += num_elements;
}
auto from_iter = page_concatenated->GetDeviceAccessor(ctx.Device());
@@ -66,18 +66,15 @@ void TestEquivalent(float sparsity) {
ASSERT_EQ(cut_ptrs_iter[i], cut_ptrs_data[i]);
}
auto const& buffer_from_iter = page_concatenated->gidx_buffer;
auto const& buffer_from_data = ellpack.Impl()->gidx_buffer;
ASSERT_NE(buffer_from_data.Size(), 0);
common::CompressedIterator<uint32_t> data_buf{
buffer_from_data.ConstHostPointer(), from_data.NumSymbols()};
common::CompressedIterator<uint32_t> data_iter{
buffer_from_iter.ConstHostPointer(), from_iter.NumSymbols()};
std::vector<common::CompressedByteT> buffer_from_iter, buffer_from_data;
auto data_iter = page_concatenated->GetHostAccessor(&ctx, &buffer_from_iter);
auto data_buf = ellpack.Impl()->GetHostAccessor(&ctx, &buffer_from_data);
ASSERT_NE(buffer_from_data.size(), 0);
ASSERT_NE(buffer_from_iter.size(), 0);
CHECK_EQ(from_data.NumSymbols(), from_iter.NumSymbols());
CHECK_EQ(from_data.n_rows * from_data.row_stride, from_data.n_rows * from_iter.row_stride);
for (size_t i = 0; i < from_data.n_rows * from_data.row_stride; ++i) {
CHECK_EQ(data_buf[i], data_iter[i]);
CHECK_EQ(data_buf.gidx_iter[i], data_iter.gidx_iter[i]);
}
}
}
@@ -97,8 +94,8 @@ TEST(IterativeDeviceDMatrix, RowMajor) {
for (auto& ellpack : m.GetBatches<EllpackPage>(&ctx, {})) {
n_batches ++;
auto impl = ellpack.Impl();
common::CompressedIterator<uint32_t> iterator(
impl->gidx_buffer.HostVector().data(), impl->NumSymbols());
std::vector<common::CompressedByteT> h_gidx;
auto h_accessor = impl->GetHostAccessor(&ctx, &h_gidx);
auto cols = CudaArrayIterForTest::Cols();
auto rows = CudaArrayIterForTest::Rows();
@@ -111,7 +108,7 @@ TEST(IterativeDeviceDMatrix, RowMajor) {
for(auto i = 0ull; i < rows * cols; i++) {
int column_idx = i % cols;
EXPECT_EQ(impl->Cuts().SearchBin(h_data[i], column_idx), iterator[i]);
EXPECT_EQ(impl->Cuts().SearchBin(h_data[i], column_idx), h_accessor.gidx_iter[i]);
}
EXPECT_EQ(m.Info().num_col_, cols);
EXPECT_EQ(m.Info().num_row_, rows);
@@ -147,12 +144,12 @@ TEST(IterativeDeviceDMatrix, RowMajorMissing) {
*m.GetBatches<EllpackPage>(&ctx, BatchParam{256, tree::TrainParam::DftSparseThreshold()})
.begin();
auto impl = ellpack.Impl();
common::CompressedIterator<uint32_t> iterator(
impl->gidx_buffer.HostVector().data(), impl->NumSymbols());
EXPECT_EQ(iterator[1], impl->GetDeviceAccessor(ctx.Device()).NullValue());
EXPECT_EQ(iterator[5], impl->GetDeviceAccessor(ctx.Device()).NullValue());
std::vector<common::CompressedByteT> h_gidx;
auto h_accessor = impl->GetHostAccessor(&ctx, &h_gidx);
EXPECT_EQ(h_accessor.gidx_iter[1], impl->GetDeviceAccessor(ctx.Device()).NullValue());
EXPECT_EQ(h_accessor.gidx_iter[5], impl->GetDeviceAccessor(ctx.Device()).NullValue());
// null values get placed after valid values in a row
EXPECT_EQ(iterator[7], impl->GetDeviceAccessor(ctx.Device()).NullValue());
EXPECT_EQ(h_accessor.gidx_iter[7], impl->GetDeviceAccessor(ctx.Device()).NullValue());
EXPECT_EQ(m.Info().num_col_, cols);
EXPECT_EQ(m.Info().num_row_, rows);
EXPECT_EQ(m.Info().num_nonzero_, rows* cols - 3);

View File

@@ -154,13 +154,18 @@ TEST(SparsePageDMatrix, RetainEllpackPage) {
for (auto it = begin; it != end; ++it) {
iterators.push_back(it.Page());
gidx_buffers.emplace_back();
gidx_buffers.back().Resize((*it).Impl()->gidx_buffer.Size());
gidx_buffers.back().Copy((*it).Impl()->gidx_buffer);
gidx_buffers.back().SetDevice(ctx.Device());
gidx_buffers.back().Resize((*it).Impl()->gidx_buffer.size());
auto d_dst = gidx_buffers.back().DevicePointer();
auto const& d_src = (*it).Impl()->gidx_buffer;
dh::safe_cuda(cudaMemcpyAsync(d_dst, d_src.data(), d_src.size_bytes(), cudaMemcpyDefault));
}
ASSERT_GE(iterators.size(), 2);
for (size_t i = 0; i < iterators.size(); ++i) {
ASSERT_EQ((*iterators[i]).Impl()->gidx_buffer.HostVector(), gidx_buffers.at(i).HostVector());
std::vector<common::CompressedByteT> h_buf;
[[maybe_unused]] auto h_acc = (*iterators[i]).Impl()->GetHostAccessor(&ctx, &h_buf);
ASSERT_EQ(h_buf, gidx_buffers.at(i).HostVector());
ASSERT_EQ(iterators[i].use_count(), 1);
}
@@ -210,11 +215,11 @@ class TestEllpackPageExt : public ::testing::TestWithParam<std::tuple<bool, bool
size_t offset = 0;
for (auto& batch : p_ext_fmat->GetBatches<EllpackPage>(&ctx, param)) {
if (!impl_ext) {
impl_ext = std::make_unique<EllpackPageImpl>(
batch.Impl()->gidx_buffer.Device(), batch.Impl()->CutsShared(), batch.Impl()->is_dense,
batch.Impl()->row_stride, kRows);
impl_ext = std::make_unique<EllpackPageImpl>(&ctx, batch.Impl()->CutsShared(),
batch.Impl()->is_dense,
batch.Impl()->row_stride, kRows);
}
auto n_elems = impl_ext->Copy(ctx.Device(), batch.Impl(), offset);
auto n_elems = impl_ext->Copy(&ctx, batch.Impl(), offset);
offset += n_elems;
}
ASSERT_EQ(impl_ext->base_rowid, 0);
@@ -223,8 +228,10 @@ class TestEllpackPageExt : public ::testing::TestWithParam<std::tuple<bool, bool
ASSERT_EQ(impl_ext->row_stride, 2);
ASSERT_EQ(impl_ext->Cuts().TotalBins(), 4);
std::vector<common::CompressedByteT> buffer(impl->gidx_buffer.HostVector());
std::vector<common::CompressedByteT> buffer_ext(impl_ext->gidx_buffer.HostVector());
std::vector<common::CompressedByteT> buffer;
[[maybe_unused]] auto h_acc = impl->GetHostAccessor(&ctx, &buffer);
std::vector<common::CompressedByteT> buffer_ext;
[[maybe_unused]] auto h_ext_acc = impl_ext->GetHostAccessor(&ctx, &buffer_ext);
ASSERT_EQ(buffer, buffer_ext);
}
};