Some comments for row partitioner. (#4832)
This commit is contained in:
parent
a5f232feb8
commit
f90e7f9aa8
@ -19,7 +19,9 @@ struct IndicateLeftTransform {
|
|||||||
return x == left_nidx ? 1 : 0;
|
return x == left_nidx ? 1 : 0;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
/*
|
||||||
|
* position: Position of rows belonged to current split node.
|
||||||
|
*/
|
||||||
void RowPartitioner::SortPosition(common::Span<TreePositionT> position,
|
void RowPartitioner::SortPosition(common::Span<TreePositionT> position,
|
||||||
common::Span<TreePositionT> position_out,
|
common::Span<TreePositionT> position_out,
|
||||||
common::Span<RowIndexT> ridx,
|
common::Span<RowIndexT> ridx,
|
||||||
@ -27,27 +29,37 @@ void RowPartitioner::SortPosition(common::Span<TreePositionT> position,
|
|||||||
TreePositionT left_nidx,
|
TreePositionT left_nidx,
|
||||||
TreePositionT right_nidx,
|
TreePositionT right_nidx,
|
||||||
int64_t* d_left_count, cudaStream_t stream) {
|
int64_t* d_left_count, cudaStream_t stream) {
|
||||||
|
// radix sort over 1 bit, see:
|
||||||
|
// https://developer.nvidia.com/gpugems/GPUGems3/gpugems3_ch39.html
|
||||||
auto d_position_out = position_out.data();
|
auto d_position_out = position_out.data();
|
||||||
auto d_position_in = position.data();
|
auto d_position_in = position.data();
|
||||||
auto d_ridx_out = ridx_out.data();
|
auto d_ridx_out = ridx_out.data();
|
||||||
auto d_ridx_in = ridx.data();
|
auto d_ridx_in = ridx.data();
|
||||||
auto write_results = [=] __device__(size_t idx, int ex_scan_result) {
|
auto write_results = [=] __device__(size_t idx, int ex_scan_result) {
|
||||||
|
// the ex_scan_result represents how many rows have been assigned to left node so far
|
||||||
|
// during scan.
|
||||||
int scatter_address;
|
int scatter_address;
|
||||||
if (d_position_in[idx] == left_nidx) {
|
if (d_position_in[idx] == left_nidx) {
|
||||||
scatter_address = ex_scan_result;
|
scatter_address = ex_scan_result;
|
||||||
} else {
|
} else {
|
||||||
|
// current number of rows belong to right node + total number of rows belong to left
|
||||||
|
// node
|
||||||
scatter_address = (idx - ex_scan_result) + *d_left_count;
|
scatter_address = (idx - ex_scan_result) + *d_left_count;
|
||||||
}
|
}
|
||||||
|
// copy the node id to output
|
||||||
d_position_out[scatter_address] = d_position_in[idx];
|
d_position_out[scatter_address] = d_position_in[idx];
|
||||||
d_ridx_out[scatter_address] = d_ridx_in[idx];
|
d_ridx_out[scatter_address] = d_ridx_in[idx];
|
||||||
}; // NOLINT
|
}; // NOLINT
|
||||||
|
|
||||||
IndicateLeftTransform conversion_op(left_nidx);
|
IndicateLeftTransform is_left(left_nidx);
|
||||||
|
// an iterator that given a old position returns whether it belongs to left or right
|
||||||
|
// node.
|
||||||
cub::TransformInputIterator<TreePositionT, IndicateLeftTransform,
|
cub::TransformInputIterator<TreePositionT, IndicateLeftTransform,
|
||||||
TreePositionT*>
|
TreePositionT*>
|
||||||
in_itr(d_position_in, conversion_op);
|
in_itr(d_position_in, is_left);
|
||||||
dh::DiscardLambdaItr<decltype(write_results)> out_itr(write_results);
|
dh::DiscardLambdaItr<decltype(write_results)> out_itr(write_results);
|
||||||
size_t temp_storage_bytes = 0;
|
size_t temp_storage_bytes = 0;
|
||||||
|
// position is of the same size with current split node's row segment
|
||||||
cub::DeviceScan::ExclusiveSum(nullptr, temp_storage_bytes, in_itr, out_itr,
|
cub::DeviceScan::ExclusiveSum(nullptr, temp_storage_bytes, in_itr, out_itr,
|
||||||
position.size(), stream);
|
position.size(), stream);
|
||||||
dh::caching_device_vector<uint8_t> temp_storage(temp_storage_bytes);
|
dh::caching_device_vector<uint8_t> temp_storage(temp_storage_bytes);
|
||||||
@ -125,11 +137,15 @@ void RowPartitioner::SortPositionAndCopy(const Segment& segment,
|
|||||||
int64_t* d_left_count,
|
int64_t* d_left_count,
|
||||||
cudaStream_t stream) {
|
cudaStream_t stream) {
|
||||||
SortPosition(
|
SortPosition(
|
||||||
|
// position_in
|
||||||
common::Span<TreePositionT>(position.Current() + segment.begin,
|
common::Span<TreePositionT>(position.Current() + segment.begin,
|
||||||
segment.Size()),
|
segment.Size()),
|
||||||
|
// position_out
|
||||||
common::Span<TreePositionT>(position.other() + segment.begin,
|
common::Span<TreePositionT>(position.other() + segment.begin,
|
||||||
segment.Size()),
|
segment.Size()),
|
||||||
|
// row index in
|
||||||
common::Span<RowIndexT>(ridx.Current() + segment.begin, segment.Size()),
|
common::Span<RowIndexT>(ridx.Current() + segment.begin, segment.Size()),
|
||||||
|
// row index out
|
||||||
common::Span<RowIndexT>(ridx.other() + segment.begin, segment.Size()),
|
common::Span<RowIndexT>(ridx.other() + segment.begin, segment.Size()),
|
||||||
left_nidx, right_nidx, d_left_count, stream);
|
left_nidx, right_nidx, d_left_count, stream);
|
||||||
// Copy back key/value
|
// Copy back key/value
|
||||||
|
|||||||
@ -30,19 +30,32 @@ __forceinline__ __device__ void AtomicIncrement(int64_t* d_count, bool increment
|
|||||||
* partition training rows into different leaf nodes. */
|
* partition training rows into different leaf nodes. */
|
||||||
class RowPartitioner {
|
class RowPartitioner {
|
||||||
public:
|
public:
|
||||||
using TreePositionT = int;
|
using TreePositionT = int32_t;
|
||||||
using RowIndexT = bst_uint;
|
using RowIndexT = bst_uint;
|
||||||
struct Segment;
|
struct Segment;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
int device_idx;
|
int device_idx;
|
||||||
/*! \brief Range of rows for each node. */
|
/*! \brief In here if you want to find the rows belong to a node nid, first you need to
|
||||||
|
* get the indices segment from ridx_segments[nid], then get the row index that
|
||||||
|
* represents position of row in input data X. `RowPartitioner::GetRows` would be a
|
||||||
|
* good starting place to get a sense what are these vector storing.
|
||||||
|
*
|
||||||
|
* node id -> segment -> indices of rows belonging to node
|
||||||
|
*/
|
||||||
|
/*! \brief Range of row index for each node, pointers into ridx below. */
|
||||||
std::vector<Segment> ridx_segments;
|
std::vector<Segment> ridx_segments;
|
||||||
dh::caching_device_vector<RowIndexT> ridx_a;
|
dh::caching_device_vector<RowIndexT> ridx_a;
|
||||||
dh::caching_device_vector<RowIndexT> ridx_b;
|
dh::caching_device_vector<RowIndexT> ridx_b;
|
||||||
dh::caching_device_vector<TreePositionT> position_a;
|
dh::caching_device_vector<TreePositionT> position_a;
|
||||||
dh::caching_device_vector<TreePositionT> position_b;
|
dh::caching_device_vector<TreePositionT> position_b;
|
||||||
|
/*! \brief mapping for node id -> rows.
|
||||||
|
* This looks like:
|
||||||
|
* node id | 1 | 2 |
|
||||||
|
* rows idx | 3, 5, 1 | 13, 31 |
|
||||||
|
*/
|
||||||
dh::DoubleBuffer<RowIndexT> ridx;
|
dh::DoubleBuffer<RowIndexT> ridx;
|
||||||
|
/*! \brief mapping for row -> node id. */
|
||||||
dh::DoubleBuffer<TreePositionT> position;
|
dh::DoubleBuffer<TreePositionT> position;
|
||||||
dh::caching_device_vector<int64_t>
|
dh::caching_device_vector<int64_t>
|
||||||
left_counts; // Useful to keep a bunch of zeroed memory for sort position
|
left_counts; // Useful to keep a bunch of zeroed memory for sort position
|
||||||
@ -95,20 +108,22 @@ class RowPartitioner {
|
|||||||
void UpdatePosition(TreePositionT nidx, TreePositionT left_nidx,
|
void UpdatePosition(TreePositionT nidx, TreePositionT left_nidx,
|
||||||
TreePositionT right_nidx, UpdatePositionOpT op) {
|
TreePositionT right_nidx, UpdatePositionOpT op) {
|
||||||
dh::safe_cuda(cudaSetDevice(device_idx));
|
dh::safe_cuda(cudaSetDevice(device_idx));
|
||||||
Segment segment = ridx_segments.at(nidx);
|
Segment segment = ridx_segments.at(nidx); // rows belongs to node nidx
|
||||||
auto d_ridx = ridx.CurrentSpan();
|
auto d_ridx = ridx.CurrentSpan();
|
||||||
auto d_position = position.CurrentSpan();
|
auto d_position = position.CurrentSpan();
|
||||||
if (left_counts.size() <= nidx) {
|
if (left_counts.size() <= nidx) {
|
||||||
left_counts.resize((nidx * 2) + 1);
|
left_counts.resize((nidx * 2) + 1);
|
||||||
thrust::fill(left_counts.begin(), left_counts.end(), 0);
|
thrust::fill(left_counts.begin(), left_counts.end(), 0);
|
||||||
}
|
}
|
||||||
|
// Now we divide the row segment into left and right node.
|
||||||
|
|
||||||
int64_t* d_left_count = left_counts.data().get() + nidx;
|
int64_t* d_left_count = left_counts.data().get() + nidx;
|
||||||
// Launch 1 thread for each row
|
// Launch 1 thread for each row
|
||||||
dh::LaunchN<1, 128>(device_idx, segment.Size(), [=] __device__(size_t idx) {
|
dh::LaunchN<1, 128>(device_idx, segment.Size(), [=] __device__(size_t idx) {
|
||||||
|
// LaunchN starts from zero, so we restore the row index by adding segment.begin
|
||||||
idx += segment.begin;
|
idx += segment.begin;
|
||||||
RowIndexT ridx = d_ridx[idx];
|
RowIndexT ridx = d_ridx[idx];
|
||||||
// Missing value
|
TreePositionT new_position = op(ridx); // new node id
|
||||||
TreePositionT new_position = op(ridx);
|
|
||||||
KERNEL_CHECK(new_position == left_nidx || new_position == right_nidx);
|
KERNEL_CHECK(new_position == left_nidx || new_position == right_nidx);
|
||||||
AtomicIncrement(d_left_count, new_position == left_nidx);
|
AtomicIncrement(d_left_count, new_position == left_nidx);
|
||||||
d_position[idx] = new_position;
|
d_position[idx] = new_position;
|
||||||
|
|||||||
@ -152,8 +152,8 @@ struct ELLPackMatrix {
|
|||||||
|
|
||||||
XGBOOST_DEVICE size_t BinCount() const { return gidx_fvalue_map.size(); }
|
XGBOOST_DEVICE size_t BinCount() const { return gidx_fvalue_map.size(); }
|
||||||
|
|
||||||
// Get a matrix element, uses binary search for look up
|
// Get a matrix element, uses binary search for look up Return NaN if missing
|
||||||
// Return NaN if missing
|
// Given a row index and a feature index, returns the corresponding cut value
|
||||||
__device__ bst_float GetElement(size_t ridx, size_t fidx) const {
|
__device__ bst_float GetElement(size_t ridx, size_t fidx) const {
|
||||||
auto row_begin = row_stride * ridx;
|
auto row_begin = row_stride * ridx;
|
||||||
auto row_end = row_begin + row_stride;
|
auto row_end = row_begin + row_stride;
|
||||||
@ -832,14 +832,15 @@ struct DeviceShard {
|
|||||||
row_partitioner->UpdatePosition(
|
row_partitioner->UpdatePosition(
|
||||||
nidx, split_node.LeftChild(), split_node.RightChild(),
|
nidx, split_node.LeftChild(), split_node.RightChild(),
|
||||||
[=] __device__(bst_uint ridx) {
|
[=] __device__(bst_uint ridx) {
|
||||||
bst_float element =
|
// given a row index, returns the node id it belongs to
|
||||||
|
bst_float cut_value =
|
||||||
d_matrix.GetElement(ridx, split_node.SplitIndex());
|
d_matrix.GetElement(ridx, split_node.SplitIndex());
|
||||||
// Missing value
|
// Missing value
|
||||||
int new_position = 0;
|
int new_position = 0;
|
||||||
if (isnan(element)) {
|
if (isnan(cut_value)) {
|
||||||
new_position = split_node.DefaultChild();
|
new_position = split_node.DefaultChild();
|
||||||
} else {
|
} else {
|
||||||
if (element <= split_node.SplitCond()) {
|
if (cut_value <= split_node.SplitCond()) {
|
||||||
new_position = split_node.LeftChild();
|
new_position = split_node.LeftChild();
|
||||||
} else {
|
} else {
|
||||||
new_position = split_node.RightChild();
|
new_position = split_node.RightChild();
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user