xgboost/tests/cpp/tree/gpu_hist/test_row_partitioner.cu
Philip Hyunsu Cho 9adb812a0a
RMM integration plugin (#5873)
* [CI] Add RMM as an optional dependency

* Replace caching allocator with pool allocator from RMM

* Revert "Replace caching allocator with pool allocator from RMM"

This reverts commit e15845d4e72e890c2babe31a988b26503a7d9038.

* Use rmm::mr::get_default_resource()

* Try setting default resource (doesn't work yet)

* Allocate pool_mr in the heap

* Prevent leaking pool_mr handle

* Separate EXPECT_DEATH() in separate test suite suffixed DeathTest

* Turn off death tests for RMM

* Address reviewer's feedback

* Prevent leaking of cuda_mr

* Fix Jenkinsfile syntax

* Remove unnecessary function in Jenkinsfile

* [CI] Install NCCL into RMM container

* Run Python tests

* Try building with RMM, CUDA 10.0

* Do not use RMM for CUDA 10.0 target

* Actually test for test_rmm flag

* Fix TestPythonGPU

* Use CNMeM allocator, since pool allocator doesn't yet support multiGPU

* Use 10.0 container to build RMM-enabled XGBoost

* Revert "Use 10.0 container to build RMM-enabled XGBoost"

This reverts commit 789021fa31112e25b683aef39fff375403060141.

* Fix Jenkinsfile

* [CI] Assign larger /dev/shm to NCCL

* Use 10.2 artifact to run multi-GPU Python tests

* Add CUDA 10.0 -> 11.0 cross-version test; remove CUDA 10.0 target

* Rename Conda env rmm_test -> gpu_test

* Use env var to opt into CNMeM pool for C++ tests

* Use identical CUDA version for RMM builds and tests

* Use Pytest fixtures to enable RMM pool in Python tests

* Move RMM to plugin/CMakeLists.txt; use PLUGIN_RMM

* Use per-device MR; use command arg in gtest

* Set CMake prefix path to use Conda env

* Use 0.15 nightly version of RMM

* Remove unnecessary header

* Fix a unit test when cudf is missing

* Add RMM demos

* Remove print()

* Use HostDeviceVector in GPU predictor

* Simplify pytest setup; use LocalCUDACluster fixture

* Address reviewers' commments

Co-authored-by: Hyunsu Cho <chohyu01@cs.wasshington.edu>
2020-08-12 01:26:02 -07:00

127 lines
3.8 KiB
Plaintext

#include <gtest/gtest.h>
#include <vector>
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include "../../../../src/tree/gpu_hist/row_partitioner.cuh"
#include "../../helpers.h"
namespace xgboost {
namespace tree {
void TestSortPosition(const std::vector<int>& position_in, int left_idx,
int right_idx) {
dh::safe_cuda(cudaSetDevice(0));
std::vector<int64_t> left_count = {
std::count(position_in.begin(), position_in.end(), left_idx)};
dh::caching_device_vector<int64_t> d_left_count = left_count;
dh::caching_device_vector<int> position = position_in;
dh::caching_device_vector<int> position_out(position.size());
dh::caching_device_vector<RowPartitioner::RowIndexT> ridx(position.size());
thrust::sequence(ridx.begin(), ridx.end());
dh::caching_device_vector<RowPartitioner::RowIndexT> ridx_out(ridx.size());
RowPartitioner rp(0,10);
rp.SortPosition(
common::Span<int>(position.data().get(), position.size()),
common::Span<int>(position_out.data().get(), position_out.size()),
common::Span<RowPartitioner::RowIndexT>(ridx.data().get(), ridx.size()),
common::Span<RowPartitioner::RowIndexT>(ridx_out.data().get(), ridx_out.size()), left_idx,
right_idx, d_left_count.data().get(), nullptr);
thrust::host_vector<int> position_result = position_out;
thrust::host_vector<int> ridx_result = ridx_out;
// Check position is sorted
EXPECT_TRUE(std::is_sorted(position_result.begin(), position_result.end()));
// Check row indices are sorted inside left and right segment
EXPECT_TRUE(
std::is_sorted(ridx_result.begin(), ridx_result.begin() + left_count[0]));
EXPECT_TRUE(
std::is_sorted(ridx_result.begin() + left_count[0], ridx_result.end()));
// Check key value pairs are the same
for (auto i = 0ull; i < ridx_result.size(); i++) {
EXPECT_EQ(position_result[i], position_in[ridx_result[i]]);
}
}
TEST(GpuHist, SortPosition) {
TestSortPosition({1, 2, 1, 2, 1}, 1, 2);
TestSortPosition({1, 1, 1, 1}, 1, 2);
TestSortPosition({2, 2, 2, 2}, 1, 2);
TestSortPosition({1, 2, 1, 2, 3}, 1, 2);
}
void TestUpdatePosition() {
const int kNumRows = 10;
RowPartitioner rp(0, kNumRows);
auto rows = rp.GetRowsHost(0);
EXPECT_EQ(rows.size(), kNumRows);
for (auto i = 0ull; i < kNumRows; i++) {
EXPECT_EQ(rows[i], i);
}
// Send the first five training instances to the right node
// and the second 5 to the left node
rp.UpdatePosition(0, 1, 2,
[=] __device__(RowPartitioner::RowIndexT ridx) {
if (ridx > 4) {
return 1;
}
else {
return 2;
}
});
rows = rp.GetRowsHost(1);
for (auto r : rows) {
EXPECT_GT(r, 4);
}
rows = rp.GetRowsHost(2);
for (auto r : rows) {
EXPECT_LT(r, 5);
}
// Split the left node again
rp.UpdatePosition(1, 3, 4, [=]__device__(RowPartitioner::RowIndexT ridx)
{
if (ridx < 7) {
return 3
;
}
return 4;
});
EXPECT_EQ(rp.GetRows(3).size(), 2);
EXPECT_EQ(rp.GetRows(4).size(), 3);
// Check position is as expected
EXPECT_EQ(rp.GetPositionHost(), std::vector<bst_node_t>({3,3,4,4,4,2,2,2,2,2}));
}
TEST(RowPartitioner, Basic) { TestUpdatePosition(); }
void TestFinalise() {
const int kNumRows = 10;
RowPartitioner rp(0, kNumRows);
rp.FinalisePosition([=]__device__(RowPartitioner::RowIndexT ridx, int position)
{
return 7;
});
auto position = rp.GetPositionHost();
for(auto p:position)
{
EXPECT_EQ(p, 7);
}
}
TEST(RowPartitioner, Finalise) { TestFinalise(); }
void TestIncorrectRow() {
RowPartitioner rp(0, 1);
rp.UpdatePosition(0, 1, 2, [=]__device__ (RowPartitioner::RowIndexT ridx)
{
return 4; // This is not the left branch or the right branch
});
}
TEST(RowPartitionerDeathTest, IncorrectRow) {
ASSERT_DEATH({ TestIncorrectRow(); },".*");
}
} // namespace tree
} // namespace xgboost