Copy data from Ellpack to GHist. (#8215)

This commit is contained in:
Jiaming Yuan
2022-09-06 23:05:49 +08:00
committed by GitHub
parent 7ee10e3dbd
commit 441ffc017a
16 changed files with 466 additions and 112 deletions

27
src/common/algorithm.cuh Normal file
View File

@@ -0,0 +1,27 @@
/*!
* Copyright 2022 by XGBoost Contributors
*/
#pragma once
#include <thrust/binary_search.h> // thrust::upper_bound
#include <thrust/execution_policy.h> // thrust::seq
#include "xgboost/base.h"
#include "xgboost/span.h"
namespace xgboost {
namespace common {
namespace cuda {
template <typename It>
size_t XGBOOST_DEVICE SegmentId(It first, It last, size_t idx) {
size_t segment_id = thrust::upper_bound(thrust::seq, first, last, idx) - 1 - first;
return segment_id;
}
template <typename T>
size_t XGBOOST_DEVICE SegmentId(Span<T> segments_ptr, size_t idx) {
return SegmentId(segments_ptr.cbegin(), segments_ptr.cend(), idx);
}
} // namespace cuda
} // namespace common
} // namespace xgboost

16
src/common/algorithm.h Normal file
View File

@@ -0,0 +1,16 @@
/*!
* Copyright 2022 by XGBoost Contributors
*/
#pragma once
#include <algorithm> // std::upper_bound
#include <cinttypes> // std::size_t
namespace xgboost {
namespace common {
template <typename It, typename Idx>
auto SegmentId(It first, It last, Idx idx) {
std::size_t segment_id = std::upper_bound(first, last, idx) - 1 - first;
return segment_id;
}
} // namespace common
} // namespace xgboost

View File

@@ -18,6 +18,7 @@
#include "../data/adapter.h"
#include "../data/gradient_index.h"
#include "algorithm.h"
#include "hist_util.h"
namespace xgboost {
@@ -135,6 +136,22 @@ class DenseColumnIter : public Column<BinIdxT> {
class ColumnMatrix {
void InitStorage(GHistIndexMatrix const& gmat, double sparse_threshold);
template <typename ColumnBinT, typename BinT, typename RIdx>
void SetBinSparse(BinT bin_id, RIdx rid, bst_feature_t fid, ColumnBinT* local_index) {
if (type_[fid] == kDenseColumn) {
ColumnBinT* begin = &local_index[feature_offsets_[fid]];
begin[rid] = bin_id - index_base_[fid];
// not thread-safe with bool vector. FIXME(jiamingy): We can directly assign
// kMissingId to the index to avoid missing flags.
missing_flags_[feature_offsets_[fid] + rid] = false;
} else {
ColumnBinT* begin = &local_index[feature_offsets_[fid]];
begin[num_nonzeros_[fid]] = bin_id - index_base_[fid];
row_ind_[feature_offsets_[fid] + num_nonzeros_[fid]] = rid;
++num_nonzeros_[fid];
}
}
public:
// get number of features
bst_feature_t GetNumFeature() const { return static_cast<bst_feature_t>(type_.size()); }
@@ -144,34 +161,66 @@ class ColumnMatrix {
this->InitStorage(gmat, sparse_threshold);
}
template <typename Batch>
void PushBatch(int32_t n_threads, Batch const& batch, float missing, GHistIndexMatrix const& gmat,
size_t base_rowid) {
// pre-fill index_ for dense columns
auto n_features = gmat.Features();
if (!any_missing_) {
missing_flags_.resize(feature_offsets_[n_features], false);
// row index is compressed, we need to dispatch it.
DispatchBinType(gmat.index.GetBinTypeSize(), [&, size = batch.Size(), n_features = n_features,
n_threads = n_threads](auto t) {
using RowBinIdxT = decltype(t);
SetIndexNoMissing(base_rowid, gmat.index.data<RowBinIdxT>(), size, n_features, n_threads);
});
} else {
missing_flags_.resize(feature_offsets_[n_features], true);
SetIndexMixedColumns(base_rowid, batch, gmat, n_features, missing);
}
}
// construct column matrix from GHistIndexMatrix
void Init(SparsePage const& page, const GHistIndexMatrix& gmat, double sparse_threshold,
int32_t n_threads) {
/**
* \brief Initialize ColumnMatrix from GHistIndexMatrix with reference to the original
* SparsePage.
*/
void InitFromSparse(SparsePage const& page, const GHistIndexMatrix& gmat, double sparse_threshold,
int32_t n_threads) {
auto batch = data::SparsePageAdapterBatch{page.GetView()};
this->InitStorage(gmat, sparse_threshold);
// ignore base row id here as we always has one column matrix for each sparse page.
this->PushBatch(n_threads, batch, std::numeric_limits<float>::quiet_NaN(), gmat, 0);
}
/**
* \brief Initialize ColumnMatrix from GHistIndexMatrix without reference to actual
* data.
*
* This function requires a binary search for each bin to get back the feature index
* for those bins.
*/
void InitFromGHist(Context const* ctx, GHistIndexMatrix const& gmat) {
auto n_threads = ctx->Threads();
if (!any_missing_) {
// row index is compressed, we need to dispatch it.
DispatchBinType(gmat.index.GetBinTypeSize(), [&, size = gmat.Size(), n_threads = n_threads,
n_features = gmat.Features()](auto t) {
using RowBinIdxT = decltype(t);
SetIndexNoMissing(gmat.base_rowid, gmat.index.data<RowBinIdxT>(), size, n_features,
n_threads);
});
} else {
SetIndexMixedColumns(gmat);
}
}
/**
* \brief Push batch of data for Quantile DMatrix support.
*
* \param batch Input data wrapped inside a adapter batch.
* \param gmat The row-major histogram index that contains index for ALL data.
* \param base_rowid The beginning row index for current batch.
*/
template <typename Batch>
void PushBatch(int32_t n_threads, Batch const& batch, float missing, GHistIndexMatrix const& gmat,
size_t base_rowid) {
// pre-fill index_ for dense columns
if (!any_missing_) {
// row index is compressed, we need to dispatch it.
// use base_rowid from input parameter as gmat is a single matrix that contains all
// the histogram index instead of being only a batch.
DispatchBinType(gmat.index.GetBinTypeSize(), [&, size = batch.Size(), n_threads = n_threads,
n_features = gmat.Features()](auto t) {
using RowBinIdxT = decltype(t);
SetIndexNoMissing(base_rowid, gmat.index.data<RowBinIdxT>(), size, n_features, n_threads);
});
} else {
SetIndexMixedColumns(base_rowid, batch, gmat, missing);
}
}
/* Set the number of bytes based on numeric limit of maximum number of bins provided by user */
void SetTypeSize(size_t max_bin_per_feat) {
if ((max_bin_per_feat - 1) <= static_cast<int>(std::numeric_limits<uint8_t>::max())) {
@@ -210,6 +259,7 @@ class ColumnMatrix {
template <typename RowBinIdxT>
void SetIndexNoMissing(bst_row_t base_rowid, RowBinIdxT const* row_index, const size_t n_samples,
const size_t n_features, int32_t n_threads) {
missing_flags_.resize(feature_offsets_[n_features], false);
DispatchBinType(bins_type_size_, [&](auto t) {
using ColumnBinT = decltype(t);
auto column_index = Span<ColumnBinT>{reinterpret_cast<ColumnBinT*>(index_.data()),
@@ -232,29 +282,16 @@ class ColumnMatrix {
*/
template <typename Batch>
void SetIndexMixedColumns(size_t base_rowid, Batch const& batch, const GHistIndexMatrix& gmat,
size_t n_features, float missing) {
float missing) {
auto n_features = gmat.Features();
missing_flags_.resize(feature_offsets_[n_features], true);
auto const* row_index = gmat.index.data<uint32_t>() + gmat.row_ptr[base_rowid];
auto is_valid = data::IsValidFunctor {missing};
num_nonzeros_.resize(n_features, 0);
auto is_valid = data::IsValidFunctor{missing};
DispatchBinType(bins_type_size_, [&](auto t) {
using ColumnBinT = decltype(t);
ColumnBinT* local_index = reinterpret_cast<ColumnBinT*>(index_.data());
num_nonzeros_.resize(n_features, 0);
auto get_bin_idx = [&](auto bin_id, auto rid, bst_feature_t fid) {
if (type_[fid] == kDenseColumn) {
ColumnBinT* begin = reinterpret_cast<ColumnBinT*>(&local_index[feature_offsets_[fid]]);
begin[rid] = bin_id - index_base_[fid];
// not thread-safe with bool vector. FIXME(jiamingy): We can directly assign
// kMissingId to the index to avoid missing flags.
missing_flags_[feature_offsets_[fid] + rid] = false;
} else {
ColumnBinT* begin = reinterpret_cast<ColumnBinT*>(&local_index[feature_offsets_[fid]]);
begin[num_nonzeros_[fid]] = bin_id - index_base_[fid];
row_ind_[feature_offsets_[fid] + num_nonzeros_[fid]] = rid;
++num_nonzeros_[fid];
}
};
size_t const batch_size = batch.Size();
size_t k{0};
for (size_t rid = 0; rid < batch_size; ++rid) {
@@ -264,7 +301,7 @@ class ColumnMatrix {
if (is_valid(coo)) {
auto fid = coo.column_idx;
const uint32_t bin_id = row_index[k];
get_bin_idx(bin_id, rid + base_rowid, fid);
SetBinSparse(bin_id, rid + base_rowid, fid, local_index);
++k;
}
}
@@ -272,6 +309,40 @@ class ColumnMatrix {
});
}
/**
* \brief Set column index for both dense and sparse columns, but with only GHistMatrix
* available and requires a search for each bin.
*/
void SetIndexMixedColumns(const GHistIndexMatrix& gmat) {
auto n_features = gmat.Features();
missing_flags_.resize(feature_offsets_[n_features], true);
auto const* row_index = gmat.index.data<uint32_t>() + gmat.row_ptr[gmat.base_rowid];
num_nonzeros_.resize(n_features, 0);
auto const& ptrs = gmat.cut.Ptrs();
DispatchBinType(bins_type_size_, [&](auto t) {
using ColumnBinT = decltype(t);
ColumnBinT* local_index = reinterpret_cast<ColumnBinT*>(index_.data());
auto const batch_size = gmat.Size();
size_t k{0};
for (size_t ridx = 0; ridx < batch_size; ++ridx) {
auto r_beg = gmat.row_ptr[ridx];
auto r_end = gmat.row_ptr[ridx + 1];
bst_feature_t fidx{0};
for (size_t j = r_beg; j < r_end; ++j) {
const uint32_t bin_idx = row_index[k];
// find the feature index for current bin.
while (bin_idx >= ptrs[fidx + 1]) {
fidx++;
}
SetBinSparse(bin_idx, ridx, fidx, local_index);
++k;
}
}
});
}
BinTypeSize GetTypeSize() const { return bins_type_size_; }
auto GetColumnType(bst_feature_t fidx) const { return type_[fidx]; }

View File

@@ -35,6 +35,7 @@
#include "xgboost/global_config.h"
#include "common.h"
#include "algorithm.cuh"
#ifdef XGBOOST_USE_NCCL
#include "nccl.h"
@@ -1556,17 +1557,7 @@ XGBOOST_DEVICE thrust::transform_iterator<FuncT, IterT, ReturnT> MakeTransformIt
return thrust::transform_iterator<FuncT, IterT, ReturnT>(iter, func);
}
template <typename It>
size_t XGBOOST_DEVICE SegmentId(It first, It last, size_t idx) {
size_t segment_id = thrust::upper_bound(thrust::seq, first, last, idx) -
1 - first;
return segment_id;
}
template <typename T>
size_t XGBOOST_DEVICE SegmentId(xgboost::common::Span<T> segments_ptr, size_t idx) {
return SegmentId(segments_ptr.cbegin(), segments_ptr.cend(), idx);
}
using xgboost::common::cuda::SegmentId; // import it for compatibility
namespace detail {
template <typename Key, typename KeyOutIt>

View File

@@ -22,6 +22,7 @@
#include "row_set.h"
#include "threading_utils.h"
#include "timer.h"
#include "algorithm.h" // SegmentId
namespace xgboost {
class GHistIndexMatrix;
@@ -130,19 +131,23 @@ class HistogramCuts {
/**
* \brief Search the bin index for categorical feature.
*/
bst_bin_t SearchCatBin(float value, bst_feature_t fidx) const {
auto const &ptrs = this->Ptrs();
auto const &vals = this->Values();
bst_bin_t SearchCatBin(float value, bst_feature_t fidx, std::vector<uint32_t> const& ptrs,
std::vector<float> const& vals) const {
auto end = ptrs.at(fidx + 1) + vals.cbegin();
auto beg = ptrs[fidx] + vals.cbegin();
// Truncates the value in case it's not perfectly rounded.
auto v = static_cast<float>(common::AsCat(value));
auto v = static_cast<float>(common::AsCat(value));
auto bin_idx = std::lower_bound(beg, end, v) - vals.cbegin();
if (bin_idx == ptrs.at(fidx + 1)) {
bin_idx -= 1;
}
return bin_idx;
}
bst_bin_t SearchCatBin(float value, bst_feature_t fidx) const {
auto const& ptrs = this->Ptrs();
auto const& vals = this->Values();
return this->SearchCatBin(value, fidx, ptrs, vals);
}
bst_bin_t SearchCatBin(Entry const& e) const { return SearchCatBin(e.fvalue, e.index); }
};
@@ -189,6 +194,28 @@ auto DispatchBinType(BinTypeSize type, Fn&& fn) {
* storage class.
*/
struct Index {
// Inside the compressor, bin_idx is the index for cut value across all features. By
// subtracting it with starting pointer of each feature, we can reduce it to smaller
// value and store it with smaller types. Usable only with dense data.
//
// For sparse input we have to store an addition feature index (similar to sparse matrix
// formats like CSR) for each bin in index field to choose the right offset.
template <typename T>
struct CompressBin {
uint32_t const* offsets;
template <typename Bin, typename Feat>
auto operator()(Bin bin_idx, Feat fidx) const {
return static_cast<T>(bin_idx - offsets[fidx]);
}
};
template <typename T>
CompressBin<T> MakeCompressor() const {
uint32_t const* offsets = this->Offset();
return CompressBin<T>{offsets};
}
Index() { SetBinTypeSize(binTypeSize_); }
Index(const Index& i) = delete;
Index& operator=(Index i) = delete;