Support hessian in host sketch container. (#7081)
Prepare for migrating approx onto hist's codebase.
This commit is contained in:
parent
84d359efb8
commit
77f6cf2d13
@ -39,6 +39,10 @@ struct GenericParameter : public XGBoostParameter<GenericParameter> {
|
|||||||
* \param require_gpu Whether GPU is explicitly required from user.
|
* \param require_gpu Whether GPU is explicitly required from user.
|
||||||
*/
|
*/
|
||||||
void ConfigureGpuId(bool require_gpu);
|
void ConfigureGpuId(bool require_gpu);
|
||||||
|
/*!
|
||||||
|
* Return automatically chosen threads.
|
||||||
|
*/
|
||||||
|
int32_t Threads() const;
|
||||||
|
|
||||||
// declare parameters
|
// declare parameters
|
||||||
DMLC_DECLARE_PARAMETER(GenericParameter) {
|
DMLC_DECLARE_PARAMETER(GenericParameter) {
|
||||||
|
|||||||
@ -110,7 +110,8 @@ class HistogramCuts {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
inline HistogramCuts SketchOnDMatrix(DMatrix *m, int32_t max_bins) {
|
inline HistogramCuts SketchOnDMatrix(DMatrix *m, int32_t max_bins,
|
||||||
|
std::vector<float> const &hessian = {}) {
|
||||||
HistogramCuts out;
|
HistogramCuts out;
|
||||||
auto const& info = m->Info();
|
auto const& info = m->Info();
|
||||||
const auto threads = omp_get_max_threads();
|
const auto threads = omp_get_max_threads();
|
||||||
@ -127,9 +128,9 @@ inline HistogramCuts SketchOnDMatrix(DMatrix *m, int32_t max_bins) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
HostSketchContainer container(reduced, max_bins,
|
HostSketchContainer container(reduced, max_bins,
|
||||||
HostSketchContainer::UseGroup(info));
|
HostSketchContainer::UseGroup(info), threads);
|
||||||
for (auto const &page : m->GetBatches<SparsePage>()) {
|
for (auto const &page : m->GetBatches<SparsePage>()) {
|
||||||
container.PushRowPage(page, info);
|
container.PushRowPage(page, info, hessian);
|
||||||
}
|
}
|
||||||
container.MakeCuts(&out);
|
container.MakeCuts(&out);
|
||||||
return out;
|
return out;
|
||||||
|
|||||||
@ -10,19 +10,21 @@ namespace xgboost {
|
|||||||
namespace common {
|
namespace common {
|
||||||
|
|
||||||
HostSketchContainer::HostSketchContainer(std::vector<bst_row_t> columns_size,
|
HostSketchContainer::HostSketchContainer(std::vector<bst_row_t> columns_size,
|
||||||
int32_t max_bins, bool use_group)
|
int32_t max_bins, bool use_group,
|
||||||
|
int32_t n_threads)
|
||||||
: columns_size_{std::move(columns_size)}, max_bins_{max_bins},
|
: columns_size_{std::move(columns_size)}, max_bins_{max_bins},
|
||||||
use_group_ind_{use_group} {
|
use_group_ind_{use_group}, n_threads_{n_threads} {
|
||||||
monitor_.Init(__func__);
|
monitor_.Init(__func__);
|
||||||
CHECK_NE(columns_size_.size(), 0);
|
CHECK_NE(columns_size_.size(), 0);
|
||||||
sketches_.resize(columns_size_.size());
|
sketches_.resize(columns_size_.size());
|
||||||
for (size_t i = 0; i < sketches_.size(); ++i) {
|
CHECK_GE(n_threads_, 1);
|
||||||
|
ParallelFor(sketches_.size(), n_threads_, Sched::Auto(), [&](auto i) {
|
||||||
auto n_bins = std::min(static_cast<size_t>(max_bins_), columns_size_[i]);
|
auto n_bins = std::min(static_cast<size_t>(max_bins_), columns_size_[i]);
|
||||||
n_bins = std::max(n_bins, static_cast<decltype(n_bins)>(1));
|
n_bins = std::max(n_bins, static_cast<decltype(n_bins)>(1));
|
||||||
auto eps = 1.0 / (static_cast<float>(n_bins) * WQSketch::kFactor);
|
auto eps = 1.0 / (static_cast<float>(n_bins) * WQSketch::kFactor);
|
||||||
sketches_[i].Init(columns_size_[i], eps);
|
sketches_[i].Init(columns_size_[i], eps);
|
||||||
sketches_[i].inqueue.queue.resize(sketches_[i].limit_size * 2);
|
sketches_[i].inqueue.queue.resize(sketches_[i].limit_size * 2);
|
||||||
}
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<bst_row_t>
|
std::vector<bst_row_t>
|
||||||
@ -89,40 +91,94 @@ std::vector<bst_feature_t> HostSketchContainer::LoadBalance(
|
|||||||
return cols_ptr;
|
return cols_ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
void HostSketchContainer::PushRowPage(SparsePage const &page,
|
namespace {
|
||||||
MetaInfo const &info) {
|
// Function to merge hessian and sample weights
|
||||||
monitor_.Start(__func__);
|
std::vector<float> MergeWeights(MetaInfo const &info,
|
||||||
int nthread = omp_get_max_threads();
|
std::vector<float> const &hessian,
|
||||||
CHECK_EQ(sketches_.size(), info.num_col_);
|
bool use_group, int32_t n_threads) {
|
||||||
|
CHECK_EQ(hessian.size(), info.num_row_);
|
||||||
|
std::vector<float> results(hessian.size());
|
||||||
|
auto const &group_ptr = info.group_ptr_;
|
||||||
|
if (use_group) {
|
||||||
|
auto const &group_weights = info.weights_.HostVector();
|
||||||
|
CHECK_GE(group_ptr.size(), 2);
|
||||||
|
CHECK_EQ(group_ptr.back(), hessian.size());
|
||||||
|
size_t cur_group = 0;
|
||||||
|
for (size_t i = 0; i < hessian.size(); ++i) {
|
||||||
|
results[i] = hessian[i] * group_weights[cur_group];
|
||||||
|
if (i == group_ptr[cur_group + 1]) {
|
||||||
|
cur_group++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
auto const &sample_weights = info.weights_.HostVector();
|
||||||
|
ParallelFor(hessian.size(), n_threads, Sched::Auto(),
|
||||||
|
[&](auto i) { results[i] = hessian[i] * sample_weights[i]; });
|
||||||
|
}
|
||||||
|
return results;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<float> UnrollGroupWeights(MetaInfo const &info) {
|
||||||
|
std::vector<float> const &group_weights = info.weights_.HostVector();
|
||||||
|
if (group_weights.empty()) {
|
||||||
|
return group_weights;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t n_samples = info.num_row_;
|
||||||
|
auto const &group_ptr = info.group_ptr_;
|
||||||
|
std::vector<float> results(n_samples);
|
||||||
|
CHECK_GE(group_ptr.size(), 2);
|
||||||
|
CHECK_EQ(group_ptr.back(), n_samples);
|
||||||
|
size_t cur_group = 0;
|
||||||
|
for (size_t i = 0; i < n_samples; ++i) {
|
||||||
|
results[i] = group_weights[cur_group];
|
||||||
|
if (i == group_ptr[cur_group + 1]) {
|
||||||
|
cur_group++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return results;
|
||||||
|
}
|
||||||
|
} // anonymous namespace
|
||||||
|
|
||||||
|
void HostSketchContainer::PushRowPage(
|
||||||
|
SparsePage const &page, MetaInfo const &info, std::vector<float> const &hessian) {
|
||||||
|
monitor_.Start(__func__);
|
||||||
|
bst_feature_t n_columns = info.num_col_;
|
||||||
|
auto is_dense = info.num_nonzero_ == info.num_col_ * info.num_row_;
|
||||||
|
CHECK_GE(n_threads_, 1);
|
||||||
|
CHECK_EQ(sketches_.size(), n_columns);
|
||||||
|
|
||||||
|
// glue these conditions using ternary operator to avoid making data copies.
|
||||||
|
auto const &weights =
|
||||||
|
hessian.empty()
|
||||||
|
? (use_group_ind_ ? UnrollGroupWeights(info) // use group weight
|
||||||
|
: info.weights_.HostVector()) // use sample weight
|
||||||
|
: MergeWeights(
|
||||||
|
info, hessian, use_group_ind_,
|
||||||
|
n_threads_); // use hessian merged with group/sample weights
|
||||||
|
if (!weights.empty()) {
|
||||||
|
CHECK_EQ(weights.size(), info.num_row_);
|
||||||
|
}
|
||||||
|
|
||||||
// Data groups, used in ranking.
|
|
||||||
std::vector<bst_uint> const &group_ptr = info.group_ptr_;
|
|
||||||
// Use group index for weights?
|
|
||||||
auto batch = page.GetView();
|
auto batch = page.GetView();
|
||||||
// Parallel over columns. Each thread owns a set of consecutive columns.
|
// Parallel over columns. Each thread owns a set of consecutive columns.
|
||||||
auto const ncol = static_cast<uint32_t>(info.num_col_);
|
auto const ncol = static_cast<bst_feature_t>(info.num_col_);
|
||||||
auto const is_dense = info.num_nonzero_ == info.num_col_ * info.num_row_;
|
auto thread_columns_ptr = LoadBalance(page, info.num_col_, n_threads_);
|
||||||
auto thread_columns_ptr = LoadBalance(page, info.num_col_, nthread);
|
|
||||||
|
|
||||||
dmlc::OMPException exc;
|
dmlc::OMPException exc;
|
||||||
#pragma omp parallel num_threads(nthread)
|
#pragma omp parallel num_threads(n_threads_)
|
||||||
{
|
{
|
||||||
exc.Run([&]() {
|
exc.Run([&]() {
|
||||||
auto tid = static_cast<uint32_t>(omp_get_thread_num());
|
auto tid = static_cast<uint32_t>(omp_get_thread_num());
|
||||||
auto const begin = thread_columns_ptr[tid];
|
auto const begin = thread_columns_ptr[tid];
|
||||||
auto const end = thread_columns_ptr[tid + 1];
|
auto const end = thread_columns_ptr[tid + 1];
|
||||||
size_t group_ind = 0;
|
|
||||||
|
|
||||||
// do not iterate if no columns are assigned to the thread
|
// do not iterate if no columns are assigned to the thread
|
||||||
if (begin < end && end <= ncol) {
|
if (begin < end && end <= ncol) {
|
||||||
for (size_t i = 0; i < batch.Size(); ++i) {
|
for (size_t i = 0; i < batch.Size(); ++i) {
|
||||||
size_t const ridx = page.base_rowid + i;
|
size_t const ridx = page.base_rowid + i;
|
||||||
SparsePage::Inst const inst = batch[i];
|
SparsePage::Inst const inst = batch[i];
|
||||||
if (use_group_ind_) {
|
auto w = weights.empty() ? 1.0f : weights[ridx];
|
||||||
group_ind = this->SearchGroupIndFromRow(group_ptr, i + page.base_rowid);
|
|
||||||
}
|
|
||||||
size_t w_idx = use_group_ind_ ? group_ind : ridx;
|
|
||||||
auto w = info.GetWeight(w_idx);
|
|
||||||
auto p_inst = inst.data();
|
auto p_inst = inst.data();
|
||||||
if (is_dense) {
|
if (is_dense) {
|
||||||
for (size_t ii = begin; ii < end; ii++) {
|
for (size_t ii = begin; ii < end; ii++) {
|
||||||
@ -201,6 +257,8 @@ void HostSketchContainer::AllReduce(
|
|||||||
monitor_.Start(__func__);
|
monitor_.Start(__func__);
|
||||||
auto& num_cuts = *p_num_cuts;
|
auto& num_cuts = *p_num_cuts;
|
||||||
CHECK_EQ(num_cuts.size(), 0);
|
CHECK_EQ(num_cuts.size(), 0);
|
||||||
|
num_cuts.resize(sketches_.size());
|
||||||
|
|
||||||
auto &reduced = *p_reduced;
|
auto &reduced = *p_reduced;
|
||||||
reduced.resize(sketches_.size());
|
reduced.resize(sketches_.size());
|
||||||
|
|
||||||
@ -212,25 +270,23 @@ void HostSketchContainer::AllReduce(
|
|||||||
std::vector<bst_row_t> global_column_size(columns_size_);
|
std::vector<bst_row_t> global_column_size(columns_size_);
|
||||||
rabit::Allreduce<rabit::op::Sum>(global_column_size.data(), global_column_size.size());
|
rabit::Allreduce<rabit::op::Sum>(global_column_size.data(), global_column_size.size());
|
||||||
|
|
||||||
size_t nbytes = 0;
|
ParallelFor(sketches_.size(), n_threads_, [&](size_t i) {
|
||||||
for (size_t i = 0; i < sketches_.size(); ++i) {
|
int32_t intermediate_num_cuts = static_cast<int32_t>(
|
||||||
int32_t intermediate_num_cuts = static_cast<int32_t>(std::min(
|
std::min(global_column_size[i],
|
||||||
global_column_size[i], static_cast<size_t>(max_bins_ * WQSketch::kFactor)));
|
static_cast<size_t>(max_bins_ * WQSketch::kFactor)));
|
||||||
if (global_column_size[i] != 0) {
|
if (global_column_size[i] != 0) {
|
||||||
WQSketch::SummaryContainer out;
|
WQSketch::SummaryContainer out;
|
||||||
sketches_[i].GetSummary(&out);
|
sketches_[i].GetSummary(&out);
|
||||||
reduced[i].Reserve(intermediate_num_cuts);
|
reduced[i].Reserve(intermediate_num_cuts);
|
||||||
CHECK(reduced[i].data);
|
CHECK(reduced[i].data);
|
||||||
reduced[i].SetPrune(out, intermediate_num_cuts);
|
reduced[i].SetPrune(out, intermediate_num_cuts);
|
||||||
nbytes = std::max(
|
|
||||||
WQSketch::SummaryContainer::CalcMemCost(intermediate_num_cuts),
|
|
||||||
nbytes);
|
|
||||||
}
|
}
|
||||||
|
num_cuts[i] = intermediate_num_cuts;
|
||||||
|
});
|
||||||
|
|
||||||
num_cuts.push_back(intermediate_num_cuts);
|
|
||||||
}
|
|
||||||
auto world = rabit::GetWorldSize();
|
auto world = rabit::GetWorldSize();
|
||||||
if (world == 1) {
|
if (world == 1) {
|
||||||
|
monitor_.Stop(__func__);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -242,7 +298,7 @@ size_t nbytes = 0;
|
|||||||
&global_sketches);
|
&global_sketches);
|
||||||
|
|
||||||
std::vector<WQSketch::SummaryContainer> final_sketches(n_columns);
|
std::vector<WQSketch::SummaryContainer> final_sketches(n_columns);
|
||||||
ParallelFor(omp_ulong(n_columns), [&](omp_ulong fidx) {
|
ParallelFor(n_columns, n_threads_, [&](auto fidx) {
|
||||||
int32_t intermediate_num_cuts = num_cuts[fidx];
|
int32_t intermediate_num_cuts = num_cuts[fidx];
|
||||||
auto nbytes =
|
auto nbytes =
|
||||||
WQSketch::SummaryContainer::CalcMemCost(intermediate_num_cuts);
|
WQSketch::SummaryContainer::CalcMemCost(intermediate_num_cuts);
|
||||||
@ -276,7 +332,7 @@ void AddCutPoint(WQuantileSketch<float, float>::SummaryContainer const &summary,
|
|||||||
auto& cut_values = cuts->cut_values_.HostVector();
|
auto& cut_values = cuts->cut_values_.HostVector();
|
||||||
for (size_t i = 1; i < required_cuts; ++i) {
|
for (size_t i = 1; i < required_cuts; ++i) {
|
||||||
bst_float cpt = summary.data[i].value;
|
bst_float cpt = summary.data[i].value;
|
||||||
if (i == 1 || cpt > cuts->cut_values_.ConstHostVector().back()) {
|
if (i == 1 || cpt > cut_values.back()) {
|
||||||
cut_values.push_back(cpt);
|
cut_values.push_back(cpt);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -289,23 +345,28 @@ void HostSketchContainer::MakeCuts(HistogramCuts* cuts) {
|
|||||||
this->AllReduce(&reduced, &num_cuts);
|
this->AllReduce(&reduced, &num_cuts);
|
||||||
|
|
||||||
cuts->min_vals_.HostVector().resize(sketches_.size(), 0.0f);
|
cuts->min_vals_.HostVector().resize(sketches_.size(), 0.0f);
|
||||||
|
std::vector<WQSketch::SummaryContainer> final_summaries(reduced.size());
|
||||||
|
|
||||||
for (size_t fid = 0; fid < reduced.size(); ++fid) {
|
ParallelFor(reduced.size(), n_threads_, Sched::Guided(), [&](size_t fidx) {
|
||||||
WQSketch::SummaryContainer a;
|
WQSketch::SummaryContainer &a = final_summaries[fidx];
|
||||||
size_t max_num_bins = std::min(num_cuts[fid], max_bins_);
|
size_t max_num_bins = std::min(num_cuts[fidx], max_bins_);
|
||||||
a.Reserve(max_num_bins + 1);
|
a.Reserve(max_num_bins + 1);
|
||||||
CHECK(a.data);
|
CHECK(a.data);
|
||||||
if (num_cuts[fid] != 0) {
|
if (num_cuts[fidx] != 0) {
|
||||||
a.SetPrune(reduced[fid], max_num_bins + 1);
|
a.SetPrune(reduced[fidx], max_num_bins + 1);
|
||||||
CHECK(a.data && reduced[fid].data);
|
CHECK(a.data && reduced[fidx].data);
|
||||||
const bst_float mval = a.data[0].value;
|
const bst_float mval = a.data[0].value;
|
||||||
cuts->min_vals_.HostVector()[fid] = mval - fabs(mval) - 1e-5f;
|
cuts->min_vals_.HostVector()[fidx] = mval - fabs(mval) - 1e-5f;
|
||||||
} else {
|
} else {
|
||||||
// Empty column.
|
// Empty column.
|
||||||
const float mval = 1e-5f;
|
const float mval = 1e-5f;
|
||||||
cuts->min_vals_.HostVector()[fid] = mval;
|
cuts->min_vals_.HostVector()[fidx] = mval;
|
||||||
}
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
for (size_t fid = 0; fid < reduced.size(); ++fid) {
|
||||||
|
size_t max_num_bins = std::min(num_cuts[fid], max_bins_);
|
||||||
|
WQSketch::SummaryContainer const& a = final_summaries[fid];
|
||||||
AddCutPoint(a, max_num_bins, cuts);
|
AddCutPoint(a, max_num_bins, cuts);
|
||||||
// push a value that is greater than anything
|
// push a value that is greater than anything
|
||||||
const bst_float cpt
|
const bst_float cpt
|
||||||
|
|||||||
@ -710,6 +710,7 @@ class HostSketchContainer {
|
|||||||
std::vector<bst_row_t> columns_size_;
|
std::vector<bst_row_t> columns_size_;
|
||||||
int32_t max_bins_;
|
int32_t max_bins_;
|
||||||
bool use_group_ind_{false};
|
bool use_group_ind_{false};
|
||||||
|
int32_t n_threads_;
|
||||||
Monitor monitor_;
|
Monitor monitor_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@ -720,7 +721,7 @@ class HostSketchContainer {
|
|||||||
* \param use_group whether is assigned to group to data instance.
|
* \param use_group whether is assigned to group to data instance.
|
||||||
*/
|
*/
|
||||||
HostSketchContainer(std::vector<bst_row_t> columns_size, int32_t max_bins,
|
HostSketchContainer(std::vector<bst_row_t> columns_size, int32_t max_bins,
|
||||||
bool use_group);
|
bool use_group, int32_t n_threads);
|
||||||
|
|
||||||
static bool UseGroup(MetaInfo const &info) {
|
static bool UseGroup(MetaInfo const &info) {
|
||||||
size_t const num_groups =
|
size_t const num_groups =
|
||||||
@ -758,7 +759,8 @@ class HostSketchContainer {
|
|||||||
std::vector<int32_t>* p_num_cuts);
|
std::vector<int32_t>* p_num_cuts);
|
||||||
|
|
||||||
/* \brief Push a CSR matrix. */
|
/* \brief Push a CSR matrix. */
|
||||||
void PushRowPage(SparsePage const& page, MetaInfo const& info);
|
void PushRowPage(SparsePage const &page, MetaInfo const &info,
|
||||||
|
std::vector<float> const &hessian = {});
|
||||||
|
|
||||||
void MakeCuts(HistogramCuts* cuts);
|
void MakeCuts(HistogramCuts* cuts);
|
||||||
};
|
};
|
||||||
|
|||||||
@ -9,6 +9,7 @@
|
|||||||
#include <dmlc/common.h>
|
#include <dmlc/common.h>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
#include <type_traits> // std::is_signed
|
||||||
#include "xgboost/logging.h"
|
#include "xgboost/logging.h"
|
||||||
|
|
||||||
namespace xgboost {
|
namespace xgboost {
|
||||||
@ -133,19 +134,92 @@ void ParallelFor2d(const BlockedSpace2d& space, int nthreads, Func func) {
|
|||||||
exc.Rethrow();
|
exc.Rethrow();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* OpenMP schedule
|
||||||
|
*/
|
||||||
|
struct Sched {
|
||||||
|
enum {
|
||||||
|
kAuto,
|
||||||
|
kDynamic,
|
||||||
|
kStatic,
|
||||||
|
kGuided,
|
||||||
|
} sched;
|
||||||
|
size_t chunk{0};
|
||||||
|
|
||||||
|
Sched static Auto() { return Sched{kAuto}; }
|
||||||
|
Sched static Dyn(size_t n = 0) { return Sched{kDynamic, n}; }
|
||||||
|
Sched static Static(size_t n = 0) { return Sched{kStatic, n}; }
|
||||||
|
Sched static Guided() { return Sched{kGuided}; }
|
||||||
|
};
|
||||||
|
|
||||||
template <typename Index, typename Func>
|
template <typename Index, typename Func>
|
||||||
void ParallelFor(Index size, size_t nthreads, Func fn) {
|
void ParallelFor(Index size, size_t n_threads, Sched sched, Func fn) {
|
||||||
|
#if defined(_MSC_VER)
|
||||||
|
// msvc doesn't support unsigned integer as openmp index.
|
||||||
|
using OmpInd = std::conditional_t<std::is_signed<Index>::value, Index, omp_ulong>;
|
||||||
|
#else
|
||||||
|
using OmpInd = Index;
|
||||||
|
#endif
|
||||||
|
OmpInd length = static_cast<OmpInd>(size);
|
||||||
|
|
||||||
dmlc::OMPException exc;
|
dmlc::OMPException exc;
|
||||||
#pragma omp parallel for num_threads(nthreads) schedule(static)
|
switch (sched.sched) {
|
||||||
for (Index i = 0; i < size; ++i) {
|
case Sched::kAuto: {
|
||||||
exc.Run(fn, i);
|
#pragma omp parallel for num_threads(n_threads)
|
||||||
|
for (OmpInd i = 0; i < length; ++i) {
|
||||||
|
exc.Run(fn, i);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case Sched::kDynamic: {
|
||||||
|
if (sched.chunk == 0) {
|
||||||
|
#pragma omp parallel for num_threads(n_threads) schedule(dynamic)
|
||||||
|
for (OmpInd i = 0; i < length; ++i) {
|
||||||
|
exc.Run(fn, i);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
#pragma omp parallel for num_threads(n_threads) schedule(dynamic, sched.chunk)
|
||||||
|
for (OmpInd i = 0; i < length; ++i) {
|
||||||
|
exc.Run(fn, i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case Sched::kStatic: {
|
||||||
|
if (sched.chunk == 0) {
|
||||||
|
#pragma omp parallel for num_threads(n_threads) schedule(static)
|
||||||
|
for (OmpInd i = 0; i < length; ++i) {
|
||||||
|
exc.Run(fn, i);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
#pragma omp parallel for num_threads(n_threads) schedule(static, sched.chunk)
|
||||||
|
for (OmpInd i = 0; i < length; ++i) {
|
||||||
|
exc.Run(fn, i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case Sched::kGuided: {
|
||||||
|
#pragma omp parallel for num_threads(n_threads) schedule(guided)
|
||||||
|
for (OmpInd i = 0; i < length; ++i) {
|
||||||
|
exc.Run(fn, i);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
exc.Rethrow();
|
exc.Rethrow();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename Index, typename Func>
|
||||||
|
void ParallelFor(Index size, size_t n_threads, Func fn) {
|
||||||
|
ParallelFor(size, n_threads, Sched::Static(), fn);
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIXME(jiamingy): Remove this function to get rid of `omp_set_num_threads`, which sets a
|
||||||
|
// global variable in runtime and affects other programs in the same process.
|
||||||
template <typename Index, typename Func>
|
template <typename Index, typename Func>
|
||||||
void ParallelFor(Index size, Func fn) {
|
void ParallelFor(Index size, Func fn) {
|
||||||
ParallelFor(size, omp_get_max_threads(), fn);
|
ParallelFor(size, omp_get_max_threads(), Sched::Static(), fn);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* \brief Configure parallel threads.
|
/* \brief Configure parallel threads.
|
||||||
@ -174,6 +248,12 @@ inline int32_t OmpSetNumThreadsWithoutHT(int32_t* p_threads) {
|
|||||||
return nthread_original;
|
return nthread_original;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline int32_t OmpGetNumThreads(int32_t n_threads) {
|
||||||
|
if (n_threads <= 0) {
|
||||||
|
n_threads = omp_get_num_procs();
|
||||||
|
}
|
||||||
|
return n_threads;
|
||||||
|
}
|
||||||
} // namespace common
|
} // namespace common
|
||||||
} // namespace xgboost
|
} // namespace xgboost
|
||||||
|
|
||||||
|
|||||||
@ -238,6 +238,10 @@ void GenericParameter::ConfigureGpuId(bool require_gpu) {
|
|||||||
#endif // defined(XGBOOST_USE_CUDA)
|
#endif // defined(XGBOOST_USE_CUDA)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t GenericParameter::Threads() const {
|
||||||
|
return common::OmpGetNumThreads(nthread);
|
||||||
|
}
|
||||||
|
|
||||||
using LearnerAPIThreadLocalStore =
|
using LearnerAPIThreadLocalStore =
|
||||||
dmlc::ThreadLocalStore<std::map<Learner const *, XGBAPIThreadLocalEntry>>;
|
dmlc::ThreadLocalStore<std::map<Learner const *, XGBAPIThreadLocalEntry>>;
|
||||||
|
|
||||||
|
|||||||
@ -226,6 +226,39 @@ TEST(HistUtil, DenseCutsAccuracyTestWeights) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST(HistUtil, QuantileWithHessian) {
|
||||||
|
int bin_sizes[] = {2, 16, 256, 512};
|
||||||
|
int sizes[] = {1000, 1500};
|
||||||
|
int num_columns = 5;
|
||||||
|
for (auto num_rows : sizes) {
|
||||||
|
auto x = GenerateRandom(num_rows, num_columns);
|
||||||
|
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
|
||||||
|
auto w = GenerateRandomWeights(num_rows);
|
||||||
|
auto hessian = GenerateRandomWeights(num_rows);
|
||||||
|
std::mt19937 rng(0);
|
||||||
|
std::shuffle(hessian.begin(), hessian.end(), rng);
|
||||||
|
dmat->Info().weights_.HostVector() = w;
|
||||||
|
|
||||||
|
for (auto num_bins : bin_sizes) {
|
||||||
|
HistogramCuts cuts_hess = SketchOnDMatrix(dmat.get(), num_bins, hessian);
|
||||||
|
for (size_t i = 0; i < w.size(); ++i) {
|
||||||
|
dmat->Info().weights_.HostVector()[i] = w[i] * hessian[i];
|
||||||
|
}
|
||||||
|
ValidateCuts(cuts_hess, dmat.get(), num_bins);
|
||||||
|
|
||||||
|
HistogramCuts cuts_wh = SketchOnDMatrix(dmat.get(), num_bins);
|
||||||
|
ValidateCuts(cuts_wh, dmat.get(), num_bins);
|
||||||
|
|
||||||
|
ASSERT_EQ(cuts_hess.Values().size(), cuts_wh.Values().size());
|
||||||
|
for (size_t i = 0; i < cuts_hess.Values().size(); ++i) {
|
||||||
|
ASSERT_NEAR(cuts_wh.Values()[i], cuts_hess.Values()[i], kRtEps);
|
||||||
|
}
|
||||||
|
|
||||||
|
dmat->Info().weights_.HostVector() = w;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
TEST(HistUtil, DenseCutsExternalMemory) {
|
TEST(HistUtil, DenseCutsExternalMemory) {
|
||||||
int bin_sizes[] = {2, 16, 256, 512};
|
int bin_sizes[] = {2, 16, 256, 512};
|
||||||
int sizes[] = {100, 1000, 1500};
|
int sizes[] = {100, 1000, 1500};
|
||||||
|
|||||||
@ -43,7 +43,7 @@ void TestDistributedQuantile(size_t rows, size_t cols) {
|
|||||||
// Generate cuts for distributed environment.
|
// Generate cuts for distributed environment.
|
||||||
auto sparsity = 0.5f;
|
auto sparsity = 0.5f;
|
||||||
auto rank = rabit::GetRank();
|
auto rank = rabit::GetRank();
|
||||||
HostSketchContainer sketch_distributed(column_size, n_bins, false);
|
HostSketchContainer sketch_distributed(column_size, n_bins, false, OmpGetNumThreads(0));
|
||||||
auto m = RandomDataGenerator{rows, cols, sparsity}
|
auto m = RandomDataGenerator{rows, cols, sparsity}
|
||||||
.Seed(rank)
|
.Seed(rank)
|
||||||
.Lower(.0f)
|
.Lower(.0f)
|
||||||
@ -59,7 +59,7 @@ void TestDistributedQuantile(size_t rows, size_t cols) {
|
|||||||
rabit::Finalize();
|
rabit::Finalize();
|
||||||
CHECK_EQ(rabit::GetWorldSize(), 1);
|
CHECK_EQ(rabit::GetWorldSize(), 1);
|
||||||
std::for_each(column_size.begin(), column_size.end(), [=](auto& size) { size *= world; });
|
std::for_each(column_size.begin(), column_size.end(), [=](auto& size) { size *= world; });
|
||||||
HostSketchContainer sketch_on_single_node(column_size, n_bins, false);
|
HostSketchContainer sketch_on_single_node(column_size, n_bins, false, OmpGetNumThreads(0));
|
||||||
for (auto rank = 0; rank < world; ++rank) {
|
for (auto rank = 0; rank < world; ++rank) {
|
||||||
auto m = RandomDataGenerator{rows, cols, sparsity}
|
auto m = RandomDataGenerator{rows, cols, sparsity}
|
||||||
.Seed(rank)
|
.Seed(rank)
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user