Use matrix for gradient. (#9508)

- Use the `linalg::Matrix` for storing gradients.
- New API for the custom objective.
- Custom objective for multi-class/multi-target is now required to return the correct shape.
- Custom objective for Python can accept arrays with any strides. (row-major, column-major)
This commit is contained in:
Jiaming Yuan
2023-08-24 05:29:52 +08:00
committed by GitHub
parent 6103dca0bb
commit 972730cde0
77 changed files with 1052 additions and 651 deletions

View File

@@ -1,5 +1,5 @@
/*!
* Copyright 2019-2022 by Contributors
/**
* Copyright 2019-2023, XGBoost Contributors
* \file aft_obj.cu
* \brief Definition of AFT loss for survival analysis.
* \author Avinash Barnwal, Hyunsu Cho and Toby Hocking
@@ -41,11 +41,9 @@ class AFTObj : public ObjFunction {
ObjInfo Task() const override { return ObjInfo::kSurvival; }
template <typename Distribution>
void GetGradientImpl(const HostDeviceVector<bst_float> &preds,
const MetaInfo &info,
HostDeviceVector<GradientPair> *out_gpair,
size_t ndata, int device, bool is_null_weight,
float aft_loss_distribution_scale) {
void GetGradientImpl(const HostDeviceVector<bst_float>& preds, const MetaInfo& info,
linalg::Matrix<GradientPair>* out_gpair, size_t ndata, int device,
bool is_null_weight, float aft_loss_distribution_scale) {
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<GradientPair> _out_gpair,
@@ -66,16 +64,17 @@ class AFTObj : public ObjFunction {
_out_gpair[_idx] = GradientPair(grad * w, hess * w);
},
common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval(
out_gpair, &preds, &info.labels_lower_bound_, &info.labels_upper_bound_,
out_gpair->Data(), &preds, &info.labels_lower_bound_, &info.labels_upper_bound_,
&info.weights_);
}
void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo& info, int /*iter*/,
HostDeviceVector<GradientPair>* out_gpair) override {
linalg::Matrix<GradientPair>* out_gpair) override {
const size_t ndata = preds.Size();
CHECK_EQ(info.labels_lower_bound_.Size(), ndata);
CHECK_EQ(info.labels_upper_bound_.Size(), ndata);
out_gpair->Resize(ndata);
out_gpair->SetDevice(ctx_->Device());
out_gpair->Reshape(ndata, 1);
const int device = ctx_->gpu_id;
const float aft_loss_distribution_scale = param_.aft_loss_distribution_scale;
const bool is_null_weight = info.weights_.Size() == 0;

View File

@@ -27,8 +27,8 @@ class HingeObj : public ObjFunction {
void Configure(Args const&) override {}
ObjInfo Task() const override { return ObjInfo::kRegression; }
void GetGradient(const HostDeviceVector<bst_float> &preds, const MetaInfo &info, int /*iter*/,
HostDeviceVector<GradientPair> *out_gpair) override {
void GetGradient(const HostDeviceVector<bst_float> &preds, const MetaInfo &info,
std::int32_t /*iter*/, linalg::Matrix<GradientPair> *out_gpair) override {
CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels.Size())
<< "labels are not correctly provided"
@@ -41,7 +41,8 @@ class HingeObj : public ObjFunction {
CHECK_EQ(info.weights_.Size(), ndata)
<< "Number of weights should be equal to number of data points.";
}
out_gpair->Resize(ndata);
CHECK_EQ(info.labels.Shape(1), 1) << "Multi-target for `binary:hinge` is not yet supported.";
out_gpair->Reshape(ndata, 1);
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<GradientPair> _out_gpair,
@@ -63,7 +64,7 @@ class HingeObj : public ObjFunction {
},
common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(),
ctx_->gpu_id).Eval(
out_gpair, &preds, info.labels.Data(), &info.weights_);
out_gpair->Data(), &preds, info.labels.Data(), &info.weights_);
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) const override {

View File

@@ -21,7 +21,7 @@ void FitIntercept::InitEstimation(MetaInfo const& info, linalg::Vector<float>* b
}
// Avoid altering any state in child objective.
HostDeviceVector<float> dummy_predt(info.labels.Size(), 0.0f, this->ctx_->gpu_id);
HostDeviceVector<GradientPair> gpair(info.labels.Size(), GradientPair{}, this->ctx_->gpu_id);
linalg::Matrix<GradientPair> gpair(info.labels.Shape(), this->ctx_->gpu_id);
Json config{Object{}};
this->SaveConfig(&config);

View File

@@ -165,9 +165,8 @@ class LambdaRankObj : public FitIntercept {
void CalcLambdaForGroup(std::int32_t iter, common::Span<float const> g_predt,
linalg::VectorView<float const> g_label, float w,
common::Span<std::size_t const> g_rank, bst_group_t g, Delta delta,
common::Span<GradientPair> g_gpair) {
std::fill_n(g_gpair.data(), g_gpair.size(), GradientPair{});
auto p_gpair = g_gpair.data();
linalg::VectorView<GradientPair> g_gpair) {
std::fill_n(g_gpair.Values().data(), g_gpair.Size(), GradientPair{});
auto ti_plus = ti_plus_.HostView();
auto tj_minus = tj_minus_.HostView();
@@ -198,8 +197,8 @@ class LambdaRankObj : public FitIntercept {
std::size_t idx_high = g_rank[rank_high];
std::size_t idx_low = g_rank[rank_low];
p_gpair[idx_high] += pg;
p_gpair[idx_low] += ng;
g_gpair(idx_high) += pg;
g_gpair(idx_low) += ng;
if (unbiased) {
auto k = ti_plus.Size();
@@ -225,12 +224,13 @@ class LambdaRankObj : public FitIntercept {
MakePairs(ctx_, iter, p_cache_, g, g_label, g_rank, loop);
if (sum_lambda > 0.0) {
double norm = std::log2(1.0 + sum_lambda) / sum_lambda;
std::transform(g_gpair.data(), g_gpair.data() + g_gpair.size(), g_gpair.data(),
[norm](GradientPair const& g) { return g * norm; });
std::transform(g_gpair.Values().data(), g_gpair.Values().data() + g_gpair.Size(),
g_gpair.Values().data(), [norm](GradientPair const& g) { return g * norm; });
}
auto w_norm = p_cache_->WeightNorm();
std::transform(g_gpair.begin(), g_gpair.end(), g_gpair.begin(),
std::transform(g_gpair.Values().data(), g_gpair.Values().data() + g_gpair.Size(),
g_gpair.Values().data(),
[&](GradientPair const& gpair) { return gpair * w * w_norm; });
}
@@ -301,7 +301,7 @@ class LambdaRankObj : public FitIntercept {
}
void GetGradient(HostDeviceVector<float> const& predt, MetaInfo const& info, std::int32_t iter,
HostDeviceVector<GradientPair>* out_gpair) override {
linalg::Matrix<GradientPair>* out_gpair) override {
CHECK_EQ(info.labels.Size(), predt.Size()) << error::LabelScoreSize();
// init/renew cache
@@ -339,7 +339,7 @@ class LambdaRankNDCG : public LambdaRankObj<LambdaRankNDCG, ltr::NDCGCache> {
void CalcLambdaForGroupNDCG(std::int32_t iter, common::Span<float const> g_predt,
linalg::VectorView<float const> g_label, float w,
common::Span<std::size_t const> g_rank,
common::Span<GradientPair> g_gpair,
linalg::VectorView<GradientPair> g_gpair,
linalg::VectorView<double const> inv_IDCG,
common::Span<double const> discount, bst_group_t g) {
auto delta = [&](auto y_high, auto y_low, std::size_t rank_high, std::size_t rank_low,
@@ -351,7 +351,7 @@ class LambdaRankNDCG : public LambdaRankObj<LambdaRankNDCG, ltr::NDCGCache> {
}
void GetGradientImpl(std::int32_t iter, const HostDeviceVector<float>& predt,
const MetaInfo& info, HostDeviceVector<GradientPair>* out_gpair) {
const MetaInfo& info, linalg::Matrix<GradientPair>* out_gpair) {
if (ctx_->IsCUDA()) {
cuda_impl::LambdaRankGetGradientNDCG(
ctx_, iter, predt, info, GetCache(), ti_plus_.View(ctx_->gpu_id),
@@ -363,8 +363,10 @@ class LambdaRankNDCG : public LambdaRankObj<LambdaRankNDCG, ltr::NDCGCache> {
bst_group_t n_groups = p_cache_->Groups();
auto gptr = p_cache_->DataGroupPtr(ctx_);
out_gpair->Resize(info.num_row_);
auto h_gpair = out_gpair->HostSpan();
out_gpair->SetDevice(ctx_->Device());
out_gpair->Reshape(info.num_row_, 1);
auto h_gpair = out_gpair->HostView();
auto h_predt = predt.ConstHostSpan();
auto h_label = info.labels.HostView();
auto h_weight = common::MakeOptionalWeights(ctx_, info.weights_);
@@ -378,7 +380,8 @@ class LambdaRankNDCG : public LambdaRankObj<LambdaRankNDCG, ltr::NDCGCache> {
std::size_t cnt = gptr[g + 1] - gptr[g];
auto w = h_weight[g];
auto g_predt = h_predt.subspan(gptr[g], cnt);
auto g_gpair = h_gpair.subspan(gptr[g], cnt);
auto g_gpair =
h_gpair.Slice(linalg::Range(static_cast<std::size_t>(gptr[g]), gptr[g] + cnt), 0);
auto g_label = h_label.Slice(make_range(g), 0);
auto g_rank = rank_idx.subspan(gptr[g], cnt);
@@ -420,7 +423,7 @@ void LambdaRankGetGradientNDCG(Context const*, std::int32_t, HostDeviceVector<fl
linalg::VectorView<double const>, // input bias ratio
linalg::VectorView<double const>, // input bias ratio
linalg::VectorView<double>, linalg::VectorView<double>,
HostDeviceVector<GradientPair>*) {
linalg::Matrix<GradientPair>*) {
common::AssertGPUSupport();
}
@@ -470,7 +473,7 @@ void MAPStat(Context const* ctx, linalg::VectorView<float const> label,
class LambdaRankMAP : public LambdaRankObj<LambdaRankMAP, ltr::MAPCache> {
public:
void GetGradientImpl(std::int32_t iter, const HostDeviceVector<float>& predt,
const MetaInfo& info, HostDeviceVector<GradientPair>* out_gpair) {
const MetaInfo& info, linalg::Matrix<GradientPair>* out_gpair) {
CHECK(param_.ndcg_exp_gain) << "NDCG gain can not be set for the MAP objective.";
if (ctx_->IsCUDA()) {
return cuda_impl::LambdaRankGetGradientMAP(
@@ -482,8 +485,11 @@ class LambdaRankMAP : public LambdaRankObj<LambdaRankMAP, ltr::MAPCache> {
auto gptr = p_cache_->DataGroupPtr(ctx_).data();
bst_group_t n_groups = p_cache_->Groups();
out_gpair->Resize(info.num_row_);
auto h_gpair = out_gpair->HostSpan();
CHECK_EQ(info.labels.Shape(1), 1) << "multi-target for learning to rank is not yet supported.";
out_gpair->SetDevice(ctx_->Device());
out_gpair->Reshape(info.num_row_, this->Targets(info));
auto h_gpair = out_gpair->HostView();
auto h_label = info.labels.HostView().Slice(linalg::All(), 0);
auto h_predt = predt.ConstHostSpan();
auto rank_idx = p_cache_->SortedIdx(ctx_, h_predt);
@@ -514,7 +520,7 @@ class LambdaRankMAP : public LambdaRankObj<LambdaRankMAP, ltr::MAPCache> {
auto cnt = gptr[g + 1] - gptr[g];
auto w = h_weight[g];
auto g_predt = h_predt.subspan(gptr[g], cnt);
auto g_gpair = h_gpair.subspan(gptr[g], cnt);
auto g_gpair = h_gpair.Slice(linalg::Range(gptr[g], gptr[g] + cnt), 0);
auto g_label = h_label.Slice(make_range(g));
auto g_rank = rank_idx.subspan(gptr[g], cnt);
@@ -545,7 +551,7 @@ void LambdaRankGetGradientMAP(Context const*, std::int32_t, HostDeviceVector<flo
linalg::VectorView<double const>, // input bias ratio
linalg::VectorView<double const>, // input bias ratio
linalg::VectorView<double>, linalg::VectorView<double>,
HostDeviceVector<GradientPair>*) {
linalg::Matrix<GradientPair>*) {
common::AssertGPUSupport();
}
} // namespace cuda_impl
@@ -557,7 +563,7 @@ void LambdaRankGetGradientMAP(Context const*, std::int32_t, HostDeviceVector<flo
class LambdaRankPairwise : public LambdaRankObj<LambdaRankPairwise, ltr::RankingCache> {
public:
void GetGradientImpl(std::int32_t iter, const HostDeviceVector<float>& predt,
const MetaInfo& info, HostDeviceVector<GradientPair>* out_gpair) {
const MetaInfo& info, linalg::Matrix<GradientPair>* out_gpair) {
CHECK(param_.ndcg_exp_gain) << "NDCG gain can not be set for the pairwise objective.";
if (ctx_->IsCUDA()) {
return cuda_impl::LambdaRankGetGradientPairwise(
@@ -569,8 +575,10 @@ class LambdaRankPairwise : public LambdaRankObj<LambdaRankPairwise, ltr::Ranking
auto gptr = p_cache_->DataGroupPtr(ctx_);
bst_group_t n_groups = p_cache_->Groups();
out_gpair->Resize(info.num_row_);
auto h_gpair = out_gpair->HostSpan();
out_gpair->SetDevice(ctx_->Device());
out_gpair->Reshape(info.num_row_, this->Targets(info));
auto h_gpair = out_gpair->HostView();
auto h_label = info.labels.HostView().Slice(linalg::All(), 0);
auto h_predt = predt.ConstHostSpan();
auto h_weight = common::MakeOptionalWeights(ctx_, info.weights_);
@@ -585,7 +593,7 @@ class LambdaRankPairwise : public LambdaRankObj<LambdaRankPairwise, ltr::Ranking
auto cnt = gptr[g + 1] - gptr[g];
auto w = h_weight[g];
auto g_predt = h_predt.subspan(gptr[g], cnt);
auto g_gpair = h_gpair.subspan(gptr[g], cnt);
auto g_gpair = h_gpair.Slice(linalg::Range(gptr[g], gptr[g] + cnt), 0);
auto g_label = h_label.Slice(make_range(g));
auto g_rank = rank_idx.subspan(gptr[g], cnt);
@@ -611,7 +619,7 @@ void LambdaRankGetGradientPairwise(Context const*, std::int32_t, HostDeviceVecto
linalg::VectorView<double const>, // input bias ratio
linalg::VectorView<double const>, // input bias ratio
linalg::VectorView<double>, linalg::VectorView<double>,
HostDeviceVector<GradientPair>*) {
linalg::Matrix<GradientPair>*) {
common::AssertGPUSupport();
}
} // namespace cuda_impl

View File

@@ -93,7 +93,7 @@ struct GetGradOp {
// obtain group segment data.
auto g_label = args.labels.Slice(linalg::Range(data_group_begin, data_group_begin + n_data), 0);
auto g_predt = args.predts.subspan(data_group_begin, n_data);
auto g_gpair = args.gpairs.subspan(data_group_begin, n_data).data();
auto g_gpair = args.gpairs.Slice(linalg::Range(data_group_begin, data_group_begin + n_data));
auto g_rank = args.d_sorted_idx.subspan(data_group_begin, n_data);
auto [i, j] = make_pair(idx, g);
@@ -128,8 +128,8 @@ struct GetGradOp {
auto ngt = GradientPair{common::TruncateWithRounding(gr.GetGrad(), ng.GetGrad()),
common::TruncateWithRounding(gr.GetHess(), ng.GetHess())};
dh::AtomicAddGpair(g_gpair + idx_high, pgt);
dh::AtomicAddGpair(g_gpair + idx_low, ngt);
dh::AtomicAddGpair(&g_gpair(idx_high), pgt);
dh::AtomicAddGpair(&g_gpair(idx_low), ngt);
}
if (unbiased && need_update) {
@@ -266,16 +266,16 @@ void CalcGrad(Context const* ctx, MetaInfo const& info, std::shared_ptr<ltr::Ran
*/
auto d_weights = common::MakeOptionalWeights(ctx, info.weights_);
auto w_norm = p_cache->WeightNorm();
thrust::for_each_n(ctx->CUDACtx()->CTP(), thrust::make_counting_iterator(0ul), d_gpair.size(),
[=] XGBOOST_DEVICE(std::size_t i) {
thrust::for_each_n(ctx->CUDACtx()->CTP(), thrust::make_counting_iterator(0ul), d_gpair.Size(),
[=] XGBOOST_DEVICE(std::size_t i) mutable {
auto g = dh::SegmentId(d_gptr, i);
auto sum_lambda = thrust::get<2>(d_max_lambdas[g]);
// Normalization
if (sum_lambda > 0.0) {
double norm = std::log2(1.0 + sum_lambda) / sum_lambda;
d_gpair[i] *= norm;
d_gpair(i, 0) *= norm;
}
d_gpair[i] *= (d_weights[g] * w_norm);
d_gpair(i, 0) *= (d_weights[g] * w_norm);
});
}
@@ -288,7 +288,7 @@ void Launch(Context const* ctx, std::int32_t iter, HostDeviceVector<float> const
linalg::VectorView<double const> ti_plus, // input bias ratio
linalg::VectorView<double const> tj_minus, // input bias ratio
linalg::VectorView<double> li, linalg::VectorView<double> lj,
HostDeviceVector<GradientPair>* out_gpair) {
linalg::Matrix<GradientPair>* out_gpair) {
// boilerplate
std::int32_t device_id = ctx->gpu_id;
dh::safe_cuda(cudaSetDevice(device_id));
@@ -296,8 +296,8 @@ void Launch(Context const* ctx, std::int32_t iter, HostDeviceVector<float> const
info.labels.SetDevice(device_id);
preds.SetDevice(device_id);
out_gpair->SetDevice(device_id);
out_gpair->Resize(preds.Size());
out_gpair->SetDevice(ctx->Device());
out_gpair->Reshape(preds.Size(), 1);
CHECK(p_cache);
@@ -308,8 +308,9 @@ void Launch(Context const* ctx, std::int32_t iter, HostDeviceVector<float> const
auto label = info.labels.View(ctx->gpu_id);
auto predts = preds.ConstDeviceSpan();
auto gpairs = out_gpair->DeviceSpan();
thrust::fill_n(ctx->CUDACtx()->CTP(), gpairs.data(), gpairs.size(), GradientPair{0.0f, 0.0f});
auto gpairs = out_gpair->View(ctx->Device());
thrust::fill_n(ctx->CUDACtx()->CTP(), gpairs.Values().data(), gpairs.Size(),
GradientPair{0.0f, 0.0f});
auto const d_threads_group_ptr = p_cache->CUDAThreadsGroupPtr();
auto const d_gptr = p_cache->DataGroupPtr(ctx);
@@ -371,7 +372,7 @@ void LambdaRankGetGradientNDCG(Context const* ctx, std::int32_t iter,
linalg::VectorView<double const> ti_plus, // input bias ratio
linalg::VectorView<double const> tj_minus, // input bias ratio
linalg::VectorView<double> li, linalg::VectorView<double> lj,
HostDeviceVector<GradientPair>* out_gpair) {
linalg::Matrix<GradientPair>* out_gpair) {
// boilerplate
std::int32_t device_id = ctx->gpu_id;
dh::safe_cuda(cudaSetDevice(device_id));
@@ -440,7 +441,7 @@ void LambdaRankGetGradientMAP(Context const* ctx, std::int32_t iter,
linalg::VectorView<double const> ti_plus, // input bias ratio
linalg::VectorView<double const> tj_minus, // input bias ratio
linalg::VectorView<double> li, linalg::VectorView<double> lj,
HostDeviceVector<GradientPair>* out_gpair) {
linalg::Matrix<GradientPair>* out_gpair) {
std::int32_t device_id = ctx->gpu_id;
dh::safe_cuda(cudaSetDevice(device_id));
@@ -479,7 +480,7 @@ void LambdaRankGetGradientPairwise(Context const* ctx, std::int32_t iter,
linalg::VectorView<double const> ti_plus, // input bias ratio
linalg::VectorView<double const> tj_minus, // input bias ratio
linalg::VectorView<double> li, linalg::VectorView<double> lj,
HostDeviceVector<GradientPair>* out_gpair) {
linalg::Matrix<GradientPair>* out_gpair) {
std::int32_t device_id = ctx->gpu_id;
dh::safe_cuda(cudaSetDevice(device_id));

View File

@@ -61,7 +61,7 @@ struct KernelInputs {
linalg::MatrixView<float const> labels;
common::Span<float const> predts;
common::Span<GradientPair> gpairs;
linalg::MatrixView<GradientPair> gpairs;
linalg::VectorView<GradientPair const> d_roundings;
double const *d_cost_rounding;
@@ -79,8 +79,8 @@ struct MakePairsOp {
/**
* \brief Make pair for the topk pair method.
*/
XGBOOST_DEVICE std::tuple<std::size_t, std::size_t> WithTruncation(std::size_t idx,
bst_group_t g) const {
[[nodiscard]] XGBOOST_DEVICE std::tuple<std::size_t, std::size_t> WithTruncation(
std::size_t idx, bst_group_t g) const {
auto thread_group_begin = args.d_threads_group_ptr[g];
auto idx_in_thread_group = idx - thread_group_begin;

View File

@@ -154,7 +154,7 @@ void LambdaRankGetGradientNDCG(Context const* ctx, std::int32_t iter,
linalg::VectorView<double const> t_plus, // input bias ratio
linalg::VectorView<double const> t_minus, // input bias ratio
linalg::VectorView<double> li, linalg::VectorView<double> lj,
HostDeviceVector<GradientPair>* out_gpair);
linalg::Matrix<GradientPair>* out_gpair);
/**
* \brief Generate statistic for MAP used for calculating \Delta Z in lambda mart.
@@ -168,7 +168,7 @@ void LambdaRankGetGradientMAP(Context const* ctx, std::int32_t iter,
linalg::VectorView<double const> t_plus, // input bias ratio
linalg::VectorView<double const> t_minus, // input bias ratio
linalg::VectorView<double> li, linalg::VectorView<double> lj,
HostDeviceVector<GradientPair>* out_gpair);
linalg::Matrix<GradientPair>* out_gpair);
void LambdaRankGetGradientPairwise(Context const* ctx, std::int32_t iter,
HostDeviceVector<float> const& predt, const MetaInfo& info,
@@ -176,7 +176,7 @@ void LambdaRankGetGradientPairwise(Context const* ctx, std::int32_t iter,
linalg::VectorView<double const> ti_plus, // input bias ratio
linalg::VectorView<double const> tj_minus, // input bias ratio
linalg::VectorView<double> li, linalg::VectorView<double> lj,
HostDeviceVector<GradientPair>* out_gpair);
linalg::Matrix<GradientPair>* out_gpair);
void LambdaRankUpdatePositionBias(Context const* ctx, linalg::VectorView<double const> li_full,
linalg::VectorView<double const> lj_full,

View File

@@ -1,5 +1,5 @@
/*!
* Copyright 2015-2022 by XGBoost Contributors
/**
* Copyright 2015-2023, XGBoost Contributors
* \file multi_class.cc
* \brief Definition of multi-class classification objectives.
* \author Tianqi Chen
@@ -48,13 +48,8 @@ class SoftmaxMultiClassObj : public ObjFunction {
ObjInfo Task() const override { return ObjInfo::kClassification; }
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
int iter,
HostDeviceVector<GradientPair>* out_gpair) override {
// Remove unused parameter compiler warning.
(void) iter;
void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo& info, std::int32_t,
linalg::Matrix<GradientPair>* out_gpair) override {
if (info.labels.Size() == 0) {
return;
}
@@ -77,7 +72,7 @@ class SoftmaxMultiClassObj : public ObjFunction {
label_correct_.Resize(1);
label_correct_.SetDevice(device);
out_gpair->Resize(preds.Size());
out_gpair->Reshape(info.num_row_, static_cast<std::uint64_t>(nclass));
label_correct_.Fill(1);
const bool is_null_weight = info.weights_.Size() == 0;
@@ -115,7 +110,7 @@ class SoftmaxMultiClassObj : public ObjFunction {
gpair[idx * nclass + k] = GradientPair(p * wt, h);
}
}, common::Range{0, ndata}, ctx_->Threads(), device)
.Eval(out_gpair, info.labels.Data(), &preds, &info.weights_, &label_correct_);
.Eval(out_gpair->Data(), info.labels.Data(), &preds, &info.weights_, &label_correct_);
std::vector<int>& label_correct_h = label_correct_.HostVector();
for (auto const flag : label_correct_h) {

View File

@@ -27,13 +27,12 @@
#endif // defined(XGBOOST_USE_CUDA)
namespace xgboost {
namespace obj {
namespace xgboost::obj {
class QuantileRegression : public ObjFunction {
common::QuantileLossParam param_;
HostDeviceVector<float> alpha_;
bst_target_t Targets(MetaInfo const& info) const override {
[[nodiscard]] bst_target_t Targets(MetaInfo const& info) const override {
auto const& alpha = param_.quantile_alpha.Get();
CHECK_EQ(alpha.size(), alpha_.Size()) << "The objective is not yet configured.";
if (info.ShouldHaveLabels()) {
@@ -50,7 +49,7 @@ class QuantileRegression : public ObjFunction {
public:
void GetGradient(HostDeviceVector<float> const& preds, const MetaInfo& info, std::int32_t iter,
HostDeviceVector<GradientPair>* out_gpair) override {
linalg::Matrix<GradientPair>* out_gpair) override {
if (iter == 0) {
CheckInitInputs(info);
}
@@ -65,10 +64,11 @@ class QuantileRegression : public ObjFunction {
auto labels = info.labels.View(ctx_->gpu_id);
out_gpair->SetDevice(ctx_->gpu_id);
out_gpair->Resize(n_targets * info.num_row_);
auto gpair =
linalg::MakeTensorView(ctx_, out_gpair, info.num_row_, n_alphas, n_targets / n_alphas);
out_gpair->SetDevice(ctx_->Device());
CHECK_EQ(info.labels.Shape(1), 1)
<< "Multi-target for quantile regression is not yet supported.";
out_gpair->Reshape(info.num_row_, n_targets);
auto gpair = out_gpair->View(ctx_->Device());
info.weights_.SetDevice(ctx_->gpu_id);
common::OptionalWeights weight{ctx_->IsCPU() ? info.weights_.ConstHostSpan()
@@ -85,15 +85,16 @@ class QuantileRegression : public ObjFunction {
ctx_, gpair, [=] XGBOOST_DEVICE(std::size_t i, GradientPair const&) mutable {
auto [sample_id, quantile_id, target_id] =
linalg::UnravelIndex(i, n_samples, alpha.size(), n_targets / alpha.size());
assert(target_id == 0);
auto d = predt(i) - labels(sample_id, target_id);
auto h = weight[sample_id];
if (d >= 0) {
auto g = (1.0f - alpha[quantile_id]) * weight[sample_id];
gpair(sample_id, quantile_id, target_id) = GradientPair{g, h};
gpair(sample_id, quantile_id) = GradientPair{g, h};
} else {
auto g = (-alpha[quantile_id] * weight[sample_id]);
gpair(sample_id, quantile_id, target_id) = GradientPair{g, h};
gpair(sample_id, quantile_id) = GradientPair{g, h};
}
});
}
@@ -192,7 +193,7 @@ class QuantileRegression : public ObjFunction {
param_.Validate();
this->alpha_.HostVector() = param_.quantile_alpha.Get();
}
ObjInfo Task() const override { return {ObjInfo::kRegression, true, true}; }
[[nodiscard]] ObjInfo Task() const override { return {ObjInfo::kRegression, true, true}; }
static char const* Name() { return "reg:quantileerror"; }
void SaveConfig(Json* p_out) const override {
@@ -206,8 +207,8 @@ class QuantileRegression : public ObjFunction {
alpha_.HostVector() = param_.quantile_alpha.Get();
}
const char* DefaultEvalMetric() const override { return "quantile"; }
Json DefaultMetricConfig() const override {
[[nodiscard]] const char* DefaultEvalMetric() const override { return "quantile"; }
[[nodiscard]] Json DefaultMetricConfig() const override {
CHECK(param_.GetInitialised());
Json config{Object{}};
config["name"] = String{this->DefaultEvalMetric()};
@@ -223,5 +224,4 @@ XGBOOST_REGISTER_OBJECTIVE(QuantileRegression, QuantileRegression::Name())
#if defined(XGBOOST_USE_CUDA)
DMLC_REGISTRY_FILE_TAG(quantile_obj_gpu);
#endif // defined(XGBOOST_USE_CUDA)
} // namespace obj
} // namespace xgboost
} // namespace xgboost::obj

View File

@@ -36,12 +36,12 @@
#include "xgboost/tree_model.h" // RegTree
#if defined(XGBOOST_USE_CUDA)
#include "../common/cuda_context.cuh" // for CUDAContext
#include "../common/device_helpers.cuh"
#include "../common/linalg_op.cuh"
#endif // defined(XGBOOST_USE_CUDA)
namespace xgboost {
namespace obj {
namespace xgboost::obj {
namespace {
void CheckRegInputs(MetaInfo const& info, HostDeviceVector<bst_float> const& preds) {
CheckInitInputs(info);
@@ -68,33 +68,60 @@ class RegLossObj : public FitIntercept {
HostDeviceVector<float> additional_input_;
public:
// 0 - label_correct flag, 1 - scale_pos_weight, 2 - is_null_weight
RegLossObj(): additional_input_(3) {}
void ValidateLabel(MetaInfo const& info) {
auto label = info.labels.View(ctx_->Ordinal());
auto valid = ctx_->DispatchDevice(
[&] {
return std::all_of(linalg::cbegin(label), linalg::cend(label),
[](float y) -> bool { return Loss::CheckLabel(y); });
},
[&] {
#if defined(XGBOOST_USE_CUDA)
auto cuctx = ctx_->CUDACtx();
auto it = dh::MakeTransformIterator<bool>(
thrust::make_counting_iterator(0ul), [=] XGBOOST_DEVICE(std::size_t i) -> bool {
auto [m, n] = linalg::UnravelIndex(i, label.Shape());
return Loss::CheckLabel(label(m, n));
});
return dh::Reduce(cuctx->CTP(), it, it + label.Size(), true, thrust::logical_and<>{});
#else
common::AssertGPUSupport();
return false;
#endif // defined(XGBOOST_USE_CUDA)
});
if (!valid) {
LOG(FATAL) << Loss::LabelErrorMsg();
}
}
// 0 - scale_pos_weight, 1 - is_null_weight
RegLossObj(): additional_input_(2) {}
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.UpdateAllowUnknown(args);
}
ObjInfo Task() const override { return Loss::Info(); }
[[nodiscard]] ObjInfo Task() const override { return Loss::Info(); }
bst_target_t Targets(MetaInfo const& info) const override {
[[nodiscard]] bst_target_t Targets(MetaInfo const& info) const override {
// Multi-target regression.
return std::max(static_cast<size_t>(1), info.labels.Shape(1));
return std::max(static_cast<std::size_t>(1), info.labels.Shape(1));
}
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair>* out_gpair) override {
void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo& info,
std::int32_t iter, linalg::Matrix<GradientPair>* out_gpair) override {
CheckRegInputs(info, preds);
if (iter == 0) {
ValidateLabel(info);
}
size_t const ndata = preds.Size();
out_gpair->Resize(ndata);
out_gpair->SetDevice(ctx_->Device());
auto device = ctx_->gpu_id;
additional_input_.HostVector().begin()[0] = 1; // Fill the label_correct flag
bool is_null_weight = info.weights_.Size() == 0;
auto scale_pos_weight = param_.scale_pos_weight;
additional_input_.HostVector().begin()[1] = scale_pos_weight;
additional_input_.HostVector().begin()[2] = is_null_weight;
additional_input_.HostVector().begin()[0] = scale_pos_weight;
additional_input_.HostVector().begin()[1] = is_null_weight;
const size_t nthreads = ctx_->Threads();
bool on_device = device >= 0;
@@ -102,7 +129,8 @@ class RegLossObj : public FitIntercept {
// for better performance.
const size_t n_data_blocks = std::max(static_cast<size_t>(1), (on_device ? ndata : nthreads));
const size_t block_size = ndata / n_data_blocks + !!(ndata % n_data_blocks);
auto const n_targets = std::max(info.labels.Shape(1), static_cast<size_t>(1));
auto const n_targets = this->Targets(info);
out_gpair->Reshape(info.num_row_, n_targets);
common::Transform<>::Init(
[block_size, ndata, n_targets] XGBOOST_DEVICE(
@@ -117,8 +145,8 @@ class RegLossObj : public FitIntercept {
GradientPair* out_gpair_ptr = _out_gpair.data();
const size_t begin = data_block_idx*block_size;
const size_t end = std::min(ndata, begin + block_size);
const float _scale_pos_weight = _additional_input[1];
const bool _is_null_weight = _additional_input[2];
const float _scale_pos_weight = _additional_input[0];
const bool _is_null_weight = _additional_input[1];
for (size_t idx = begin; idx < end; ++idx) {
bst_float p = Loss::PredTransform(preds_ptr[idx]);
@@ -127,26 +155,17 @@ class RegLossObj : public FitIntercept {
if (label == 1.0f) {
w *= _scale_pos_weight;
}
if (!Loss::CheckLabel(label)) {
// If there is an incorrect label, the host code will know.
_additional_input[0] = 0;
}
out_gpair_ptr[idx] = GradientPair(Loss::FirstOrderGradient(p, label) * w,
Loss::SecondOrderGradient(p, label) * w);
}
},
common::Range{0, static_cast<int64_t>(n_data_blocks)}, nthreads, device)
.Eval(&additional_input_, out_gpair, &preds, info.labels.Data(),
.Eval(&additional_input_, out_gpair->Data(), &preds, info.labels.Data(),
&info.weights_);
auto const flag = additional_input_.HostVector().begin()[0];
if (flag == 0) {
LOG(FATAL) << Loss::LabelErrorMsg();
}
}
public:
const char* DefaultEvalMetric() const override {
[[nodiscard]] const char* DefaultEvalMetric() const override {
return Loss::DefaultEvalMetric();
}
@@ -160,7 +179,7 @@ class RegLossObj : public FitIntercept {
.Eval(io_preds);
}
float ProbToMargin(float base_score) const override {
[[nodiscard]] float ProbToMargin(float base_score) const override {
return Loss::ProbToMargin(base_score);
}
@@ -215,21 +234,21 @@ class PseudoHuberRegression : public FitIntercept {
public:
void Configure(Args const& args) override { param_.UpdateAllowUnknown(args); }
ObjInfo Task() const override { return ObjInfo::kRegression; }
bst_target_t Targets(MetaInfo const& info) const override {
return std::max(static_cast<size_t>(1), info.labels.Shape(1));
[[nodiscard]] ObjInfo Task() const override { return ObjInfo::kRegression; }
[[nodiscard]] bst_target_t Targets(MetaInfo const& info) const override {
return std::max(static_cast<std::size_t>(1), info.labels.Shape(1));
}
void GetGradient(HostDeviceVector<bst_float> const& preds, const MetaInfo& info, int /*iter*/,
HostDeviceVector<GradientPair>* out_gpair) override {
linalg::Matrix<GradientPair>* out_gpair) override {
CheckRegInputs(info, preds);
auto slope = param_.huber_slope;
CHECK_NE(slope, 0.0) << "slope for pseudo huber cannot be 0.";
auto labels = info.labels.View(ctx_->gpu_id);
out_gpair->SetDevice(ctx_->gpu_id);
out_gpair->Resize(info.labels.Size());
auto gpair = linalg::MakeVec(out_gpair);
out_gpair->Reshape(info.num_row_, this->Targets(info));
auto gpair = out_gpair->View(ctx_->Device());
preds.SetDevice(ctx_->gpu_id);
auto predt = linalg::MakeVec(&preds);
@@ -252,7 +271,7 @@ class PseudoHuberRegression : public FitIntercept {
});
}
const char* DefaultEvalMetric() const override { return "mphe"; }
[[nodiscard]] const char* DefaultEvalMetric() const override { return "mphe"; }
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
@@ -292,15 +311,15 @@ class PoissonRegression : public FitIntercept {
param_.UpdateAllowUnknown(args);
}
ObjInfo Task() const override { return ObjInfo::kRegression; }
[[nodiscard]] ObjInfo Task() const override { return ObjInfo::kRegression; }
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair> *out_gpair) override {
void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo& info, int,
linalg::Matrix<GradientPair>* out_gpair) override {
CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided";
size_t const ndata = preds.Size();
out_gpair->Resize(ndata);
out_gpair->SetDevice(ctx_->Device());
out_gpair->Reshape(info.num_row_, this->Targets(info));
auto device = ctx_->gpu_id;
label_correct_.Resize(1);
label_correct_.Fill(1);
@@ -328,7 +347,7 @@ class PoissonRegression : public FitIntercept {
expf(p + max_delta_step) * w};
},
common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval(
&label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_);
&label_correct_, out_gpair->Data(), &preds, info.labels.Data(), &info.weights_);
// copy "label correct" flags back to host
std::vector<int>& label_correct_h = label_correct_.HostVector();
for (auto const flag : label_correct_h) {
@@ -349,10 +368,10 @@ class PoissonRegression : public FitIntercept {
void EvalTransform(HostDeviceVector<bst_float> *io_preds) override {
PredTransform(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
[[nodiscard]] float ProbToMargin(bst_float base_score) const override {
return std::log(base_score);
}
const char* DefaultEvalMetric() const override {
[[nodiscard]] const char* DefaultEvalMetric() const override {
return "poisson-nloglik";
}
@@ -383,16 +402,15 @@ XGBOOST_REGISTER_OBJECTIVE(PoissonRegression, "count:poisson")
class CoxRegression : public FitIntercept {
public:
void Configure(Args const&) override {}
ObjInfo Task() const override { return ObjInfo::kRegression; }
[[nodiscard]] ObjInfo Task() const override { return ObjInfo::kRegression; }
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair> *out_gpair) override {
void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo& info, int,
linalg::Matrix<GradientPair>* out_gpair) override {
CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided";
const auto& preds_h = preds.HostVector();
out_gpair->Resize(preds_h.size());
auto& gpair = out_gpair->HostVector();
out_gpair->Reshape(info.num_row_, this->Targets(info));
auto gpair = out_gpair->HostView();
const std::vector<size_t> &label_order = info.LabelAbsSort(ctx_);
const omp_ulong ndata = static_cast<omp_ulong>(preds_h.size()); // NOLINT(*)
@@ -440,8 +458,8 @@ class CoxRegression : public FitIntercept {
}
const double grad = exp_p*r_k - static_cast<bst_float>(y > 0);
const double hess = exp_p*r_k - exp_p*exp_p * s_k;
gpair.at(ind) = GradientPair(grad * w, hess * w);
const double hess = exp_p * r_k - exp_p * exp_p * s_k;
gpair(ind) = GradientPair(grad * w, hess * w);
last_abs_y = abs_y;
last_exp_p = exp_p;
@@ -457,10 +475,10 @@ class CoxRegression : public FitIntercept {
void EvalTransform(HostDeviceVector<bst_float> *io_preds) override {
PredTransform(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
[[nodiscard]] float ProbToMargin(bst_float base_score) const override {
return std::log(base_score);
}
const char* DefaultEvalMetric() const override {
[[nodiscard]] const char* DefaultEvalMetric() const override {
return "cox-nloglik";
}
@@ -480,16 +498,16 @@ XGBOOST_REGISTER_OBJECTIVE(CoxRegression, "survival:cox")
class GammaRegression : public FitIntercept {
public:
void Configure(Args const&) override {}
ObjInfo Task() const override { return ObjInfo::kRegression; }
[[nodiscard]] ObjInfo Task() const override { return ObjInfo::kRegression; }
void GetGradient(const HostDeviceVector<bst_float> &preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair> *out_gpair) override {
void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo& info, std::int32_t,
linalg::Matrix<GradientPair>* out_gpair) override {
CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided";
const size_t ndata = preds.Size();
auto device = ctx_->gpu_id;
out_gpair->Resize(ndata);
out_gpair->SetDevice(ctx_->Device());
out_gpair->Reshape(info.num_row_, this->Targets(info));
label_correct_.Resize(1);
label_correct_.Fill(1);
@@ -514,7 +532,7 @@ class GammaRegression : public FitIntercept {
_out_gpair[_idx] = GradientPair((1 - y / expf(p)) * w, y / expf(p) * w);
},
common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval(
&label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_);
&label_correct_, out_gpair->Data(), &preds, info.labels.Data(), &info.weights_);
// copy "label correct" flags back to host
std::vector<int>& label_correct_h = label_correct_.HostVector();
@@ -536,10 +554,10 @@ class GammaRegression : public FitIntercept {
void EvalTransform(HostDeviceVector<bst_float> *io_preds) override {
PredTransform(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
[[nodiscard]] float ProbToMargin(bst_float base_score) const override {
return std::log(base_score);
}
const char* DefaultEvalMetric() const override {
[[nodiscard]] const char* DefaultEvalMetric() const override {
return "gamma-nloglik";
}
void SaveConfig(Json* p_out) const override {
@@ -578,15 +596,15 @@ class TweedieRegression : public FitIntercept {
metric_ = os.str();
}
ObjInfo Task() const override { return ObjInfo::kRegression; }
[[nodiscard]] ObjInfo Task() const override { return ObjInfo::kRegression; }
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair> *out_gpair) override {
void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo& info, std::int32_t,
linalg::Matrix<GradientPair>* out_gpair) override {
CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided";
const size_t ndata = preds.Size();
out_gpair->Resize(ndata);
out_gpair->SetDevice(ctx_->Device());
out_gpair->Reshape(info.num_row_, this->Targets(info));
auto device = ctx_->gpu_id;
label_correct_.Resize(1);
@@ -619,7 +637,7 @@ class TweedieRegression : public FitIntercept {
_out_gpair[_idx] = GradientPair(grad * w, hess * w);
},
common::Range{0, static_cast<int64_t>(ndata), 1}, this->ctx_->Threads(), device)
.Eval(&label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_);
.Eval(&label_correct_, out_gpair->Data(), &preds, info.labels.Data(), &info.weights_);
// copy "label correct" flags back to host
std::vector<int>& label_correct_h = label_correct_.HostVector();
@@ -639,11 +657,11 @@ class TweedieRegression : public FitIntercept {
.Eval(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
[[nodiscard]] float ProbToMargin(bst_float base_score) const override {
return std::log(base_score);
}
const char* DefaultEvalMetric() const override {
[[nodiscard]] const char* DefaultEvalMetric() const override {
return metric_.c_str();
}
@@ -672,19 +690,19 @@ XGBOOST_REGISTER_OBJECTIVE(TweedieRegression, "reg:tweedie")
class MeanAbsoluteError : public ObjFunction {
public:
void Configure(Args const&) override {}
ObjInfo Task() const override { return {ObjInfo::kRegression, true, true}; }
bst_target_t Targets(MetaInfo const& info) const override {
return std::max(static_cast<size_t>(1), info.labels.Shape(1));
[[nodiscard]] ObjInfo Task() const override { return {ObjInfo::kRegression, true, true}; }
[[nodiscard]] bst_target_t Targets(MetaInfo const& info) const override {
return std::max(static_cast<std::size_t>(1), info.labels.Shape(1));
}
void GetGradient(HostDeviceVector<bst_float> const& preds, const MetaInfo& info, int /*iter*/,
HostDeviceVector<GradientPair>* out_gpair) override {
void GetGradient(HostDeviceVector<float> const& preds, const MetaInfo& info,
std::int32_t /*iter*/, linalg::Matrix<GradientPair>* out_gpair) override {
CheckRegInputs(info, preds);
auto labels = info.labels.View(ctx_->gpu_id);
out_gpair->SetDevice(ctx_->gpu_id);
out_gpair->Resize(info.labels.Size());
auto gpair = linalg::MakeVec(out_gpair);
out_gpair->SetDevice(ctx_->Device());
out_gpair->Reshape(info.num_row_, this->Targets(info));
auto gpair = out_gpair->View(ctx_->Device());
preds.SetDevice(ctx_->gpu_id);
auto predt = linalg::MakeVec(&preds);
@@ -692,14 +710,14 @@ class MeanAbsoluteError : public ObjFunction {
common::OptionalWeights weight{ctx_->IsCPU() ? info.weights_.ConstHostSpan()
: info.weights_.ConstDeviceSpan()};
linalg::ElementWiseKernel(ctx_, labels, [=] XGBOOST_DEVICE(size_t i, float const y) mutable {
linalg::ElementWiseKernel(ctx_, labels, [=] XGBOOST_DEVICE(std::size_t i, float y) mutable {
auto sign = [](auto x) {
return (x > static_cast<decltype(x)>(0)) - (x < static_cast<decltype(x)>(0));
};
auto sample_id = std::get<0>(linalg::UnravelIndex(i, labels.Shape()));
auto [sample_id, target_id] = linalg::UnravelIndex(i, labels.Shape());
auto grad = sign(predt(i) - y) * weight[sample_id];
auto hess = weight[sample_id];
gpair(i) = GradientPair{grad, hess};
gpair(sample_id, target_id) = GradientPair{grad, hess};
});
}
@@ -748,7 +766,7 @@ class MeanAbsoluteError : public ObjFunction {
p_tree);
}
const char* DefaultEvalMetric() const override { return "mae"; }
[[nodiscard]] const char* DefaultEvalMetric() const override { return "mae"; }
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
@@ -763,5 +781,4 @@ class MeanAbsoluteError : public ObjFunction {
XGBOOST_REGISTER_OBJECTIVE(MeanAbsoluteError, "reg:absoluteerror")
.describe("Mean absoluate error.")
.set_body([]() { return new MeanAbsoluteError(); });
} // namespace obj
} // namespace xgboost
} // namespace xgboost::obj