From 02882a4b336d07e7a4c0e416d6b29196fc306065 Mon Sep 17 00:00:00 2001 From: Hendrik Groove Date: Tue, 22 Oct 2024 00:05:53 +0200 Subject: [PATCH] reset regression_obj.cu --- src/objective/regression_obj.cu | 87 +++++++++++---------------------- 1 file changed, 28 insertions(+), 59 deletions(-) diff --git a/src/objective/regression_obj.cu b/src/objective/regression_obj.cu index e0fa0101e..a3749068d 100644 --- a/src/objective/regression_obj.cu +++ b/src/objective/regression_obj.cu @@ -36,10 +36,6 @@ #include "xgboost/tree_model.h" // RegTree #include "regression_param.h" -#include -#include -#include -#include "../common/gpu_error_check.h" #if defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP) #include "../common/cuda_context.cuh" // for CUDAContext @@ -67,53 +63,31 @@ class RegLossObj : public FitIntercept { HostDeviceVector additional_input_; public: -void ValidateLabel(MetaInfo const& info) { - auto label = info.labels.View(ctx_->Device()); - - bool valid = false; - try { - valid = ctx_->DispatchDevice( - [&] { - return std::all_of(linalg::cbegin(label), linalg::cend(label), - [](float y) -> bool { - if (!std::isfinite(y)) { - return false; - } - return Loss::CheckLabel(y); - }); - }, - [&] { + void ValidateLabel(MetaInfo const& info) { + auto label = info.labels.View(ctx_->Device()); + auto valid = ctx_->DispatchDevice( + [&] { + return std::all_of(linalg::cbegin(label), linalg::cend(label), + [](float y) -> bool { return Loss::CheckLabel(y); }); + }, + [&] { #if defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP) - auto cuctx = ctx_->CUDACtx(); - - auto it = dh::MakeTransformIterator( - thrust::make_counting_iterator(0ul), [=] XGBOOST_DEVICE(std::size_t i) -> bool { - auto [m, n] = linalg::UnravelIndex(i, label.Shape()); - float y = label(m, n); - if (!isfinite(y)) { - printf("Non-finite label value found on GPU: %f\n", y); - return false; - } - return Loss::CheckLabel(y); - }); - - bool result = dh::Reduce(cuctx->CTP(), it, it + label.Size(), true, thrust::logical_and<>{}); - return result; + auto cuctx = ctx_->CUDACtx(); + auto it = dh::MakeTransformIterator( + thrust::make_counting_iterator(0ul), [=] XGBOOST_DEVICE(std::size_t i) -> bool { + auto [m, n] = linalg::UnravelIndex(i, label.Shape()); + return Loss::CheckLabel(label(m, n)); + }); + return dh::Reduce(cuctx->CTP(), it, it + label.Size(), true, thrust::logical_and<>{}); #else - common::AssertGPUSupport(); - return false; -#endif // defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP) - }); - } catch (const std::exception& e) { - std::cerr << "Exception during label validation: " << e.what() << std::endl; - valid = false; + common::AssertGPUSupport(); + return false; +#endif // defined(XGBOOST_USE_CUDA) + }); + if (!valid) { + LOG(FATAL) << Loss::LabelErrorMsg(); + } } - - if (!valid) { - hipError_t error = hipGetLastError(); - LOG(FATAL) << Loss::LabelErrorMsg(); - } -} // 0 - scale_pos_weight, 1 - is_null_weight RegLossObj(): additional_input_(2) {} @@ -646,20 +620,21 @@ class MeanAbsoluteError : public ObjFunction { return std::max(static_cast(1), info.labels.Shape(1)); } -void GetGradient(HostDeviceVector const& preds, const MetaInfo& info, - std::int32_t iter, linalg::Matrix* out_gpair) override { - try { + void GetGradient(HostDeviceVector const& preds, const MetaInfo& info, + std::int32_t /*iter*/, linalg::Matrix* out_gpair) override { CheckRegInputs(info, preds); auto labels = info.labels.View(ctx_->Device()); + out_gpair->SetDevice(ctx_->Device()); out_gpair->Reshape(info.num_row_, this->Targets(info)); auto gpair = out_gpair->View(ctx_->Device()); + preds.SetDevice(ctx_->Device()); auto predt = linalg::MakeTensorView(ctx_, &preds, info.num_row_, this->Targets(info)); - info.weights_.SetDevice(ctx_->Device()); common::OptionalWeights weight{ctx_->IsCUDA() ? info.weights_.ConstDeviceSpan() : info.weights_.ConstHostSpan()}; + linalg::ElementWiseKernel( ctx_, labels, [=] XGBOOST_DEVICE(std::size_t i, std::size_t j) mutable { auto sign = [](auto x) { @@ -670,13 +645,7 @@ void GetGradient(HostDeviceVector const& preds, const MetaInfo& info, auto grad = sign(predt(i, j) - y) * hess; gpair(i, j) = GradientPair{grad, hess}; }); - - } catch (const std::exception& e) { - std::cerr << "Exception in GetGradient: " << e.what() << std::endl; - GPU_CHECK_LAST(); // Check for GPU errors in case of exception - throw; } -} void InitEstimation(MetaInfo const& info, linalg::Tensor* base_margin) const override { CheckInitInputs(info); @@ -742,4 +711,4 @@ void GetGradient(HostDeviceVector const& preds, const MetaInfo& info, XGBOOST_REGISTER_OBJECTIVE(MeanAbsoluteError, "reg:absoluteerror") .describe("Mean absoluate error.") .set_body([]() { return new MeanAbsoluteError(); }); -} // namespace xgboost::obj +} // namespace xgboost::obj \ No newline at end of file