reset regression_obj.cu
This commit is contained in:
parent
0d92f6ca9c
commit
02882a4b33
@ -36,10 +36,6 @@
|
|||||||
#include "xgboost/tree_model.h" // RegTree
|
#include "xgboost/tree_model.h" // RegTree
|
||||||
|
|
||||||
#include "regression_param.h"
|
#include "regression_param.h"
|
||||||
#include <iostream>
|
|
||||||
#include <cmath>
|
|
||||||
#include <exception>
|
|
||||||
#include "../common/gpu_error_check.h"
|
|
||||||
|
|
||||||
#if defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
|
#if defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
|
||||||
#include "../common/cuda_context.cuh" // for CUDAContext
|
#include "../common/cuda_context.cuh" // for CUDAContext
|
||||||
@ -67,53 +63,31 @@ class RegLossObj : public FitIntercept {
|
|||||||
HostDeviceVector<float> additional_input_;
|
HostDeviceVector<float> additional_input_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
void ValidateLabel(MetaInfo const& info) {
|
void ValidateLabel(MetaInfo const& info) {
|
||||||
auto label = info.labels.View(ctx_->Device());
|
auto label = info.labels.View(ctx_->Device());
|
||||||
|
auto valid = ctx_->DispatchDevice(
|
||||||
bool valid = false;
|
|
||||||
try {
|
|
||||||
valid = ctx_->DispatchDevice(
|
|
||||||
[&] {
|
[&] {
|
||||||
return std::all_of(linalg::cbegin(label), linalg::cend(label),
|
return std::all_of(linalg::cbegin(label), linalg::cend(label),
|
||||||
[](float y) -> bool {
|
[](float y) -> bool { return Loss::CheckLabel(y); });
|
||||||
if (!std::isfinite(y)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return Loss::CheckLabel(y);
|
|
||||||
});
|
|
||||||
},
|
},
|
||||||
[&] {
|
[&] {
|
||||||
#if defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
|
#if defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
|
||||||
auto cuctx = ctx_->CUDACtx();
|
auto cuctx = ctx_->CUDACtx();
|
||||||
|
|
||||||
auto it = dh::MakeTransformIterator<bool>(
|
auto it = dh::MakeTransformIterator<bool>(
|
||||||
thrust::make_counting_iterator(0ul), [=] XGBOOST_DEVICE(std::size_t i) -> bool {
|
thrust::make_counting_iterator(0ul), [=] XGBOOST_DEVICE(std::size_t i) -> bool {
|
||||||
auto [m, n] = linalg::UnravelIndex(i, label.Shape());
|
auto [m, n] = linalg::UnravelIndex(i, label.Shape());
|
||||||
float y = label(m, n);
|
return Loss::CheckLabel(label(m, n));
|
||||||
if (!isfinite(y)) {
|
|
||||||
printf("Non-finite label value found on GPU: %f\n", y);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return Loss::CheckLabel(y);
|
|
||||||
});
|
});
|
||||||
|
return dh::Reduce(cuctx->CTP(), it, it + label.Size(), true, thrust::logical_and<>{});
|
||||||
bool result = dh::Reduce(cuctx->CTP(), it, it + label.Size(), true, thrust::logical_and<>{});
|
|
||||||
return result;
|
|
||||||
#else
|
#else
|
||||||
common::AssertGPUSupport();
|
common::AssertGPUSupport();
|
||||||
return false;
|
return false;
|
||||||
#endif // defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP)
|
#endif // defined(XGBOOST_USE_CUDA)
|
||||||
});
|
});
|
||||||
} catch (const std::exception& e) {
|
|
||||||
std::cerr << "Exception during label validation: " << e.what() << std::endl;
|
|
||||||
valid = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!valid) {
|
if (!valid) {
|
||||||
hipError_t error = hipGetLastError();
|
|
||||||
LOG(FATAL) << Loss::LabelErrorMsg();
|
LOG(FATAL) << Loss::LabelErrorMsg();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// 0 - scale_pos_weight, 1 - is_null_weight
|
// 0 - scale_pos_weight, 1 - is_null_weight
|
||||||
RegLossObj(): additional_input_(2) {}
|
RegLossObj(): additional_input_(2) {}
|
||||||
|
|
||||||
@ -646,20 +620,21 @@ class MeanAbsoluteError : public ObjFunction {
|
|||||||
return std::max(static_cast<std::size_t>(1), info.labels.Shape(1));
|
return std::max(static_cast<std::size_t>(1), info.labels.Shape(1));
|
||||||
}
|
}
|
||||||
|
|
||||||
void GetGradient(HostDeviceVector<float> const& preds, const MetaInfo& info,
|
void GetGradient(HostDeviceVector<float> const& preds, const MetaInfo& info,
|
||||||
std::int32_t iter, linalg::Matrix<GradientPair>* out_gpair) override {
|
std::int32_t /*iter*/, linalg::Matrix<GradientPair>* out_gpair) override {
|
||||||
try {
|
|
||||||
CheckRegInputs(info, preds);
|
CheckRegInputs(info, preds);
|
||||||
auto labels = info.labels.View(ctx_->Device());
|
auto labels = info.labels.View(ctx_->Device());
|
||||||
|
|
||||||
out_gpair->SetDevice(ctx_->Device());
|
out_gpair->SetDevice(ctx_->Device());
|
||||||
out_gpair->Reshape(info.num_row_, this->Targets(info));
|
out_gpair->Reshape(info.num_row_, this->Targets(info));
|
||||||
auto gpair = out_gpair->View(ctx_->Device());
|
auto gpair = out_gpair->View(ctx_->Device());
|
||||||
|
|
||||||
preds.SetDevice(ctx_->Device());
|
preds.SetDevice(ctx_->Device());
|
||||||
auto predt = linalg::MakeTensorView(ctx_, &preds, info.num_row_, this->Targets(info));
|
auto predt = linalg::MakeTensorView(ctx_, &preds, info.num_row_, this->Targets(info));
|
||||||
|
|
||||||
info.weights_.SetDevice(ctx_->Device());
|
info.weights_.SetDevice(ctx_->Device());
|
||||||
common::OptionalWeights weight{ctx_->IsCUDA() ? info.weights_.ConstDeviceSpan()
|
common::OptionalWeights weight{ctx_->IsCUDA() ? info.weights_.ConstDeviceSpan()
|
||||||
: info.weights_.ConstHostSpan()};
|
: info.weights_.ConstHostSpan()};
|
||||||
|
|
||||||
linalg::ElementWiseKernel(
|
linalg::ElementWiseKernel(
|
||||||
ctx_, labels, [=] XGBOOST_DEVICE(std::size_t i, std::size_t j) mutable {
|
ctx_, labels, [=] XGBOOST_DEVICE(std::size_t i, std::size_t j) mutable {
|
||||||
auto sign = [](auto x) {
|
auto sign = [](auto x) {
|
||||||
@ -670,13 +645,7 @@ void GetGradient(HostDeviceVector<float> const& preds, const MetaInfo& info,
|
|||||||
auto grad = sign(predt(i, j) - y) * hess;
|
auto grad = sign(predt(i, j) - y) * hess;
|
||||||
gpair(i, j) = GradientPair{grad, hess};
|
gpair(i, j) = GradientPair{grad, hess};
|
||||||
});
|
});
|
||||||
|
|
||||||
} catch (const std::exception& e) {
|
|
||||||
std::cerr << "Exception in GetGradient: " << e.what() << std::endl;
|
|
||||||
GPU_CHECK_LAST(); // Check for GPU errors in case of exception
|
|
||||||
throw;
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
void InitEstimation(MetaInfo const& info, linalg::Tensor<float, 1>* base_margin) const override {
|
void InitEstimation(MetaInfo const& info, linalg::Tensor<float, 1>* base_margin) const override {
|
||||||
CheckInitInputs(info);
|
CheckInitInputs(info);
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user