Implement slope for Pseduo-Huber. (#7727)
* Add objective and metric. * Some refactoring for CPU/GPU dispatching using linalg module.
This commit is contained in:
@@ -188,6 +188,16 @@ std::vector<Idx> ArgSort(Container const &array, Comp comp = std::less<V>{}) {
|
||||
XGBOOST_PARALLEL_STABLE_SORT(result.begin(), result.end(), op);
|
||||
return result;
|
||||
}
|
||||
|
||||
struct OptionalWeights {
|
||||
Span<float const> weights;
|
||||
float dft{1.0f};
|
||||
|
||||
explicit OptionalWeights(Span<float const> w) : weights{w} {}
|
||||
explicit OptionalWeights(float w) : dft{w} {}
|
||||
|
||||
XGBOOST_DEVICE float operator[](size_t i) const { return weights.empty() ? dft : weights[i]; }
|
||||
};
|
||||
} // namespace common
|
||||
} // namespace xgboost
|
||||
#endif // XGBOOST_COMMON_COMMON_H_
|
||||
|
||||
@@ -1,15 +1,33 @@
|
||||
/*!
|
||||
* Copyright 2021 by XGBoost Contributors
|
||||
* Copyright 2021-2022 by XGBoost Contributors
|
||||
*/
|
||||
#ifndef XGBOOST_COMMON_LINALG_OP_CUH_
|
||||
#define XGBOOST_COMMON_LINALG_OP_CUH_
|
||||
|
||||
#include "xgboost/generic_parameters.h"
|
||||
#include "device_helpers.cuh"
|
||||
#include "linalg_op.h"
|
||||
#include "xgboost/linalg.h"
|
||||
|
||||
namespace xgboost {
|
||||
namespace linalg {
|
||||
template <typename T, int32_t D, typename Fn>
|
||||
void ElementWiseKernelDevice(linalg::TensorView<T, D> t, Fn&& fn, cudaStream_t s = nullptr) {
|
||||
static_assert(std::is_void<std::result_of_t<Fn(size_t, T&)>>::value,
|
||||
"For function with return, use transform instead.");
|
||||
if (t.Contiguous()) {
|
||||
auto ptr = t.Values().data();
|
||||
dh::LaunchN(t.Size(), s, [=] __device__(size_t i) mutable { fn(i, ptr[i]); });
|
||||
} else {
|
||||
dh::LaunchN(t.Size(), s, [=] __device__(size_t i) mutable {
|
||||
T& v = detail::Apply(t, linalg::UnravelIndex(i, t.Shape()));
|
||||
fn(i, v);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T, int32_t D, typename Fn>
|
||||
void ElementWiseTransformDevice(linalg::TensorView<T, D> t, Fn&& fn, cudaStream_t s = nullptr) {
|
||||
if (t.Contiguous()) {
|
||||
auto ptr = t.Values().data();
|
||||
dh::LaunchN(t.Size(), s, [=] __device__(size_t i) { ptr[i] = fn(i, ptr[i]); });
|
||||
@@ -20,6 +38,11 @@ void ElementWiseKernelDevice(linalg::TensorView<T, D> t, Fn&& fn, cudaStream_t s
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T, int32_t D, typename Fn>
|
||||
void ElementWiseKernel(GenericParameter const* ctx, linalg::TensorView<T, D> t, Fn&& fn) {
|
||||
ctx->IsCPU() ? ElementWiseKernelHost(t, ctx->Threads(), fn) : ElementWiseKernelDevice(t, fn);
|
||||
}
|
||||
} // namespace linalg
|
||||
} // namespace xgboost
|
||||
#endif // XGBOOST_COMMON_LINALG_OP_CUH_
|
||||
|
||||
@@ -1,15 +1,19 @@
|
||||
/*!
|
||||
* Copyright 2021 by XGBoost Contributors
|
||||
* Copyright 2021-2022 by XGBoost Contributors
|
||||
*/
|
||||
#ifndef XGBOOST_COMMON_LINALG_OP_H_
|
||||
#define XGBOOST_COMMON_LINALG_OP_H_
|
||||
#include <type_traits>
|
||||
|
||||
#include "common.h"
|
||||
#include "threading_utils.h"
|
||||
#include "xgboost/generic_parameters.h"
|
||||
#include "xgboost/linalg.h"
|
||||
|
||||
namespace xgboost {
|
||||
namespace linalg {
|
||||
template <typename T, int32_t D, typename Fn>
|
||||
void ElementWiseKernelHost(linalg::TensorView<T, D> t, int32_t n_threads, Fn&& fn) {
|
||||
void ElementWiseTransformHost(linalg::TensorView<T, D> t, int32_t n_threads, Fn&& fn) {
|
||||
if (t.Contiguous()) {
|
||||
auto ptr = t.Values().data();
|
||||
common::ParallelFor(t.Size(), n_threads, [&](size_t i) { ptr[i] = fn(i, ptr[i]); });
|
||||
@@ -20,6 +24,41 @@ void ElementWiseKernelHost(linalg::TensorView<T, D> t, int32_t n_threads, Fn&& f
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T, int32_t D, typename Fn>
|
||||
void ElementWiseKernelHost(linalg::TensorView<T, D> t, int32_t n_threads, Fn&& fn) {
|
||||
static_assert(std::is_void<std::result_of_t<Fn(size_t, T&)>>::value,
|
||||
"For function with return, use transform instead.");
|
||||
if (t.Contiguous()) {
|
||||
auto ptr = t.Values().data();
|
||||
common::ParallelFor(t.Size(), n_threads, [&](size_t i) { fn(i, ptr[i]); });
|
||||
} else {
|
||||
common::ParallelFor(t.Size(), n_threads, [&](size_t i) {
|
||||
auto& v = detail::Apply(t, linalg::UnravelIndex(i, t.Shape()));
|
||||
fn(i, v);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
#if !defined(XGBOOST_USE_CUDA)
|
||||
template <typename T, int32_t D, typename Fn>
|
||||
void ElementWiseKernelDevice(linalg::TensorView<T, D> t, Fn&& fn, void* s = nullptr) {
|
||||
common::AssertGPUSupport();
|
||||
}
|
||||
|
||||
template <typename T, int32_t D, typename Fn>
|
||||
void ElementWiseTransformDevice(linalg::TensorView<T, D> t, Fn&& fn, void* s = nullptr) {
|
||||
common::AssertGPUSupport();
|
||||
}
|
||||
|
||||
template <typename T, int32_t D, typename Fn>
|
||||
void ElementWiseKernel(GenericParameter const* ctx, linalg::TensorView<T, D> t, Fn&& fn) {
|
||||
if (!ctx->IsCPU()) {
|
||||
common::AssertGPUSupport();
|
||||
}
|
||||
ElementWiseKernelHost(t, ctx->Threads(), fn);
|
||||
}
|
||||
#endif // !defined(XGBOOST_USE_CUDA)
|
||||
} // namespace linalg
|
||||
} // namespace xgboost
|
||||
#endif // XGBOOST_COMMON_LINALG_OP_H_
|
||||
|
||||
@@ -23,7 +23,11 @@ namespace common {
|
||||
* \return the transformed value.
|
||||
*/
|
||||
XGBOOST_DEVICE inline float Sigmoid(float x) {
|
||||
return 1.0f / (1.0f + expf(-x));
|
||||
float constexpr kEps = 1e-16; // avoid 0 div
|
||||
x = std::min(-x, 88.7f); // avoid exp overflow
|
||||
auto denom = expf(x) + 1.0f + kEps;
|
||||
auto y = 1.0f / denom;
|
||||
return y;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
|
||||
7
src/common/pseudo_huber.cc
Normal file
7
src/common/pseudo_huber.cc
Normal file
@@ -0,0 +1,7 @@
|
||||
/*!
|
||||
* Copyright 2022, by XGBoost Contributors
|
||||
*/
|
||||
#include "pseudo_huber.h"
|
||||
namespace xgboost {
|
||||
DMLC_REGISTER_PARAMETER(PesudoHuberParam);
|
||||
}
|
||||
19
src/common/pseudo_huber.h
Normal file
19
src/common/pseudo_huber.h
Normal file
@@ -0,0 +1,19 @@
|
||||
#ifndef XGBOOST_COMMON_PSEUDO_HUBER_H_
|
||||
#define XGBOOST_COMMON_PSEUDO_HUBER_H_
|
||||
/*!
|
||||
* Copyright 2022, by XGBoost Contributors
|
||||
*/
|
||||
#include "xgboost/parameter.h"
|
||||
|
||||
namespace xgboost {
|
||||
struct PesudoHuberParam : public XGBoostParameter<PesudoHuberParam> {
|
||||
float huber_slope{1.0};
|
||||
|
||||
DMLC_DECLARE_PARAMETER(PesudoHuberParam) {
|
||||
DMLC_DECLARE_FIELD(huber_slope)
|
||||
.set_default(1.0f)
|
||||
.describe("The delta term in Pseudo-Huber loss.");
|
||||
}
|
||||
};
|
||||
} // namespace xgboost
|
||||
#endif // XGBOOST_COMMON_PSEUDO_HUBER_H_
|
||||
Reference in New Issue
Block a user