Multi-target support for L1 error. (#8652)
- Add matrix support to the median function. - Iterate through each target for quantile computation.
This commit is contained in:
@@ -1,11 +1,13 @@
|
||||
/*!
|
||||
* Copyright 2022 by XGBoost Contributors
|
||||
/**
|
||||
* Copyright 2022-2023 by XGBoost Contributors
|
||||
*/
|
||||
#include "stats.h"
|
||||
|
||||
#include <cstddef> // std::size_t
|
||||
#include <numeric> // std::accumulate
|
||||
|
||||
#include "common.h" // OptionalWeights
|
||||
#include "linalg_op.h"
|
||||
#include "threading_utils.h" // ParallelFor, MemStackAllocator
|
||||
#include "transform_iterator.h" // MakeIndexTransformIter
|
||||
#include "xgboost/context.h" // Context
|
||||
@@ -15,32 +17,32 @@
|
||||
|
||||
namespace xgboost {
|
||||
namespace common {
|
||||
float Median(Context const* ctx, linalg::Tensor<float, 2> const& t,
|
||||
HostDeviceVector<float> const& weights) {
|
||||
CHECK_LE(t.Shape(1), 1) << "Matrix is not yet supported.";
|
||||
void Median(Context const* ctx, linalg::Tensor<float, 2> const& t,
|
||||
HostDeviceVector<float> const& weights, linalg::Tensor<float, 1>* out) {
|
||||
if (!ctx->IsCPU()) {
|
||||
weights.SetDevice(ctx->gpu_id);
|
||||
auto opt_weights = OptionalWeights(weights.ConstDeviceSpan());
|
||||
auto t_v = t.View(ctx->gpu_id);
|
||||
return cuda_impl::Median(ctx, t_v, opt_weights);
|
||||
cuda_impl::Median(ctx, t_v, opt_weights, out);
|
||||
}
|
||||
|
||||
auto opt_weights = OptionalWeights(weights.ConstHostSpan());
|
||||
auto t_v = t.HostView();
|
||||
auto iter = common::MakeIndexTransformIter(
|
||||
[&](size_t i) { return linalg::detail::Apply(t_v, linalg::UnravelIndex(i, t_v.Shape())); });
|
||||
float q{0};
|
||||
if (opt_weights.Empty()) {
|
||||
q = common::Quantile(0.5, iter, iter + t_v.Size());
|
||||
} else {
|
||||
CHECK_NE(t_v.Shape(1), 0);
|
||||
auto w_it = common::MakeIndexTransformIter([&](size_t i) {
|
||||
auto sample_idx = i / t_v.Shape(1);
|
||||
return opt_weights[sample_idx];
|
||||
});
|
||||
q = common::WeightedQuantile(0.5, iter, iter + t_v.Size(), w_it);
|
||||
out->Reshape(t.Shape(1));
|
||||
auto h_out = out->HostView();
|
||||
for (std::size_t i{0}; i < t.Shape(1); ++i) {
|
||||
auto ti_v = t_v.Slice(linalg::All(), i);
|
||||
auto iter = linalg::cbegin(ti_v);
|
||||
float q{0};
|
||||
if (opt_weights.Empty()) {
|
||||
q = common::Quantile(0.5, iter, iter + ti_v.Size());
|
||||
} else {
|
||||
CHECK_NE(t_v.Shape(1), 0);
|
||||
auto w_it = common::MakeIndexTransformIter([&](std::size_t i) { return opt_weights[i]; });
|
||||
q = common::WeightedQuantile(0.5, iter, iter + ti_v.Size(), w_it);
|
||||
}
|
||||
h_out(i) = q;
|
||||
}
|
||||
return q;
|
||||
}
|
||||
|
||||
void Mean(Context const* ctx, linalg::Vector<float> const& v, linalg::Vector<float>* out) {
|
||||
|
||||
Reference in New Issue
Block a user