GPU implementation of AFT survival objective and metric (#5714)

* Add interval accuracy

* De-virtualize AFT functions

* Lint

* Refactor AFT metric using GPU-CPU reducer

* Fix R build

* Fix build on Windows

* Fix copyright header

* Clang-tidy

* Fix crashing demo

* Fix typos in comment; explain GPU ID

* Remove unnecessary #include

* Add C++ test for interval accuracy

* Fix a bug in accuracy metric: use log pred

* Refactor AFT objective using GPU-CPU Transform

* Lint

* Fix lint

* Use Ninja to speed up build

* Use time, not /usr/bin/time

* Add cpu_build worker class, with concurrency = 1

* Use concurrency = 1 only for CUDA build

* concurrency = 1 for clang-tidy

* Address reviewer's feedback

* Update link to AFT paper
This commit is contained in:
Philip Hyunsu Cho
2020-07-17 01:18:13 -07:00
committed by GitHub
parent 7c2686146e
commit 71b0528a2f
20 changed files with 1050 additions and 822 deletions

View File

@@ -1,107 +0,0 @@
/*!
* Copyright 2020 by Contributors
* \file probability_distribution.cc
* \brief Implementation of a few useful probability distributions
* \author Avinash Barnwal and Hyunsu Cho
*/
#include <xgboost/logging.h>
#include <cmath>
#include "probability_distribution.h"
namespace xgboost {
namespace common {
ProbabilityDistribution* ProbabilityDistribution::Create(ProbabilityDistributionType dist) {
switch (dist) {
case ProbabilityDistributionType::kNormal:
return new NormalDist;
case ProbabilityDistributionType::kLogistic:
return new LogisticDist;
case ProbabilityDistributionType::kExtreme:
return new ExtremeDist;
default:
LOG(FATAL) << "Unknown distribution";
}
return nullptr;
}
double NormalDist::PDF(double z) {
const double pdf = std::exp(-z * z / 2) / std::sqrt(2 * probability_constant::kPI);
return pdf;
}
double NormalDist::CDF(double z) {
const double cdf = 0.5 * (1 + std::erf(z / std::sqrt(2)));
return cdf;
}
double NormalDist::GradPDF(double z) {
const double pdf = this->PDF(z);
const double grad = -1 * z * pdf;
return grad;
}
double NormalDist::HessPDF(double z) {
const double pdf = this->PDF(z);
const double hess = (z * z - 1) * pdf;
return hess;
}
double LogisticDist::PDF(double z) {
const double w = std::exp(z);
const double sqrt_denominator = 1 + w;
const double pdf
= (std::isinf(w) || std::isinf(w * w)) ? 0.0 : (w / (sqrt_denominator * sqrt_denominator));
return pdf;
}
double LogisticDist::CDF(double z) {
const double w = std::exp(z);
const double cdf = std::isinf(w) ? 1.0 : (w / (1 + w));
return cdf;
}
double LogisticDist::GradPDF(double z) {
const double pdf = this->PDF(z);
const double w = std::exp(z);
const double grad = std::isinf(w) ? 0.0 : pdf * (1 - w) / (1 + w);
return grad;
}
double LogisticDist::HessPDF(double z) {
const double pdf = this->PDF(z);
const double w = std::exp(z);
const double hess
= (std::isinf(w) || std::isinf(w * w)) ? 0.0 : pdf * (w * w - 4 * w + 1) / ((1 + w) * (1 + w));
return hess;
}
double ExtremeDist::PDF(double z) {
const double w = std::exp(z);
const double pdf = std::isinf(w) ? 0.0 : (w * std::exp(-w));
return pdf;
}
double ExtremeDist::CDF(double z) {
const double w = std::exp(z);
const double cdf = 1 - std::exp(-w);
return cdf;
}
double ExtremeDist::GradPDF(double z) {
const double pdf = this->PDF(z);
const double w = std::exp(z);
const double grad = std::isinf(w) ? 0.0 : ((1 - w) * pdf);
return grad;
}
double ExtremeDist::HessPDF(double z) {
const double pdf = this->PDF(z);
const double w = std::exp(z);
const double hess = (std::isinf(w) || std::isinf(w * w)) ? 0.0 : ((w * w - 3 * w + 1) * pdf);
return hess;
}
} // namespace common
} // namespace xgboost

View File

@@ -1,5 +1,5 @@
/*!
* Copyright 2020 by Contributors
* Copyright 2019-2020 by Contributors
* \file probability_distribution.h
* \brief Implementation of a few useful probability distributions
* \author Avinash Barnwal and Hyunsu Cho
@@ -8,85 +8,115 @@
#ifndef XGBOOST_COMMON_PROBABILITY_DISTRIBUTION_H_
#define XGBOOST_COMMON_PROBABILITY_DISTRIBUTION_H_
#include <cmath>
namespace xgboost {
namespace common {
namespace probability_constant {
#ifndef __CUDACC__
using std::exp;
using std::sqrt;
using std::isinf;
using std::isnan;
#endif // __CUDACC__
/*! \brief Constant PI */
const double kPI = 3.14159265358979323846;
constexpr double kPI = 3.14159265358979323846;
/*! \brief The Euler-Mascheroni_constant */
const double kEulerMascheroni = 0.57721566490153286060651209008240243104215933593992;
} // namespace probability_constant
constexpr double kEulerMascheroni = 0.57721566490153286060651209008240243104215933593992;
/*! \brief Enum encoding possible choices of probability distribution */
enum class ProbabilityDistributionType : int {
kNormal = 0, kLogistic = 1, kExtreme = 2
};
/*! \brief Interface for a probability distribution */
class ProbabilityDistribution {
public:
/*!
* \brief Evaluate Probability Density Function (PDF) at a particular point
* \param z point at which to evaluate PDF
* \return Value of PDF evaluated
*/
virtual double PDF(double z) = 0;
/*!
* \brief Evaluate Cumulative Distribution Function (CDF) at a particular point
* \param z point at which to evaluate CDF
* \return Value of CDF evaluated
*/
virtual double CDF(double z) = 0;
/*!
* \brief Evaluate first derivative of PDF at a particular point
* \param z point at which to evaluate first derivative of PDF
* \return Value of first derivative of PDF evaluated
*/
virtual double GradPDF(double z) = 0;
/*!
* \brief Evaluate second derivative of PDF at a particular point
* \param z point at which to evaluate second derivative of PDF
* \return Value of second derivative of PDF evaluated
*/
virtual double HessPDF(double z) = 0;
struct NormalDistribution {
XGBOOST_DEVICE static double PDF(double z) {
return exp(-z * z / 2.0) / sqrt(2.0 * kPI);
}
/*!
* \brief Factory function to instantiate a new probability distribution object
* \param dist kind of probability distribution
* \return Reference to the newly created probability distribution object
*/
static ProbabilityDistribution* Create(ProbabilityDistributionType dist);
virtual ~ProbabilityDistribution() = default;
XGBOOST_DEVICE static double CDF(double z) {
return 0.5 * (1 + erf(z / sqrt(2.0)));
}
XGBOOST_DEVICE static double GradPDF(double z) {
return -z * PDF(z);
}
XGBOOST_DEVICE static double HessPDF(double z) {
return (z * z - 1.0) * PDF(z);
}
XGBOOST_DEVICE static ProbabilityDistributionType Type() {
return ProbabilityDistributionType::kNormal;
}
};
/*! \brief The (standard) normal distribution */
class NormalDist : public ProbabilityDistribution {
public:
double PDF(double z) override;
double CDF(double z) override;
double GradPDF(double z) override;
double HessPDF(double z) override;
struct LogisticDistribution {
XGBOOST_DEVICE static double PDF(double z) {
const double w = exp(z);
const double sqrt_denominator = 1 + w;
if (isinf(w) || isinf(w * w)) {
return 0.0;
} else {
return w / (sqrt_denominator * sqrt_denominator);
}
}
XGBOOST_DEVICE static double CDF(double z) {
const double w = exp(z);
return isinf(w) ? 1.0 : (w / (1 + w));
}
XGBOOST_DEVICE static double GradPDF(double z) {
const double w = exp(z);
return isinf(w) ? 0.0 : (PDF(z) * (1 - w) / (1 + w));
}
XGBOOST_DEVICE static double HessPDF(double z) {
const double w = exp(z);
if (isinf(w) || isinf(w * w)) {
return 0.0;
} else {
return PDF(z) * (w * w - 4 * w + 1) / ((1 + w) * (1 + w));
}
}
XGBOOST_DEVICE static ProbabilityDistributionType Type() {
return ProbabilityDistributionType::kLogistic;
}
};
/*! \brief The (standard) logistic distribution */
class LogisticDist : public ProbabilityDistribution {
public:
double PDF(double z) override;
double CDF(double z) override;
double GradPDF(double z) override;
double HessPDF(double z) override;
};
struct ExtremeDistribution {
XGBOOST_DEVICE static double PDF(double z) {
const double w = exp(z);
return isinf(w) ? 0.0 : (w * exp(-w));
}
/*! \brief The extreme distribution, also known as the Gumbel (minimum) distribution */
class ExtremeDist : public ProbabilityDistribution {
public:
double PDF(double z) override;
double CDF(double z) override;
double GradPDF(double z) override;
double HessPDF(double z) override;
XGBOOST_DEVICE static double CDF(double z) {
const double w = exp(z);
return 1 - exp(-w);
}
XGBOOST_DEVICE static double GradPDF(double z) {
const double w = exp(z);
return isinf(w) ? 0.0 : ((1 - w) * PDF(z));
}
XGBOOST_DEVICE static double HessPDF(double z) {
const double w = exp(z);
if (isinf(w) || isinf(w * w)) {
return 0.0;
} else {
return (w * w - 3 * w + 1) * PDF(z);
}
}
XGBOOST_DEVICE static ProbabilityDistributionType Type() {
return ProbabilityDistributionType::kExtreme;
}
};
} // namespace common

View File

@@ -1,5 +1,5 @@
/*!
* Copyright 2019 by Contributors
* Copyright 2019-2020 by Contributors
* \file survival_util.cc
* \brief Utility functions, useful for implementing objective and metric functions for survival
* analysis
@@ -7,258 +7,12 @@
*/
#include <dmlc/registry.h>
#include <algorithm>
#include <cmath>
#include "survival_util.h"
/*
- Formulas are motivated from document -
http://members.cbio.mines-paristech.fr/~thocking/survival.pdf
- Detailed Derivation of Loss/Gradient/Hessian -
https://github.com/avinashbarnwal/GSOC-2019/blob/master/doc/Accelerated_Failure_Time.pdf
*/
namespace {
// Allowable range for gradient and hessian. Used for regularization
constexpr double kMinGradient = -15.0;
constexpr double kMaxGradient = 15.0;
constexpr double kMinHessian = 1e-16; // Ensure that no data point gets zero hessian
constexpr double kMaxHessian = 15.0;
constexpr double kEps = 1e-12; // A denomitor in a fraction should not be too small
// Clip (limit) x to fit range [x_min, x_max].
// If x < x_min, return x_min; if x > x_max, return x_max; if x_min <= x <= x_max, return x.
// This function assumes x_min < x_max; behavior is undefined if this assumption does not hold.
inline double Clip(double x, double x_min, double x_max) {
if (x < x_min) {
return x_min;
}
if (x > x_max) {
return x_max;
}
return x;
}
using xgboost::common::ProbabilityDistributionType;
enum class CensoringType : uint8_t {
kUncensored, kRightCensored, kLeftCensored, kIntervalCensored
};
using xgboost::GradientPairPrecise;
inline GradientPairPrecise GetLimitAtInfPred(ProbabilityDistributionType dist_type,
CensoringType censor_type,
double sign, double sigma) {
switch (censor_type) {
case CensoringType::kUncensored:
switch (dist_type) {
case ProbabilityDistributionType::kNormal:
return sign ? GradientPairPrecise{ kMinGradient, 1.0 / (sigma * sigma) }
: GradientPairPrecise{ kMaxGradient, 1.0 / (sigma * sigma) };
case ProbabilityDistributionType::kLogistic:
return sign ? GradientPairPrecise{ -1.0 / sigma, kMinHessian }
: GradientPairPrecise{ 1.0 / sigma, kMinHessian };
case ProbabilityDistributionType::kExtreme:
return sign ? GradientPairPrecise{ kMinGradient, kMaxHessian }
: GradientPairPrecise{ 1.0 / sigma, kMinHessian };
default:
LOG(FATAL) << "Unknown distribution type";
}
case CensoringType::kRightCensored:
switch (dist_type) {
case ProbabilityDistributionType::kNormal:
return sign ? GradientPairPrecise{ kMinGradient, 1.0 / (sigma * sigma) }
: GradientPairPrecise{ 0.0, kMinHessian };
case ProbabilityDistributionType::kLogistic:
return sign ? GradientPairPrecise{ -1.0 / sigma, kMinHessian }
: GradientPairPrecise{ 0.0, kMinHessian };
case ProbabilityDistributionType::kExtreme:
return sign ? GradientPairPrecise{ kMinGradient, kMaxHessian }
: GradientPairPrecise{ 0.0, kMinHessian };
default:
LOG(FATAL) << "Unknown distribution type";
}
case CensoringType::kLeftCensored:
switch (dist_type) {
case ProbabilityDistributionType::kNormal:
return sign ? GradientPairPrecise{ 0.0, kMinHessian }
: GradientPairPrecise{ kMaxGradient, 1.0 / (sigma * sigma) };
case ProbabilityDistributionType::kLogistic:
return sign ? GradientPairPrecise{ 0.0, kMinHessian }
: GradientPairPrecise{ 1.0 / sigma, kMinHessian };
case ProbabilityDistributionType::kExtreme:
return sign ? GradientPairPrecise{ 0.0, kMinHessian }
: GradientPairPrecise{ 1.0 / sigma, kMinHessian };
default:
LOG(FATAL) << "Unknown distribution type";
}
case CensoringType::kIntervalCensored:
switch (dist_type) {
case ProbabilityDistributionType::kNormal:
return sign ? GradientPairPrecise{ kMinGradient, 1.0 / (sigma * sigma) }
: GradientPairPrecise{ kMaxGradient, 1.0 / (sigma * sigma) };
case ProbabilityDistributionType::kLogistic:
return sign ? GradientPairPrecise{ -1.0 / sigma, kMinHessian }
: GradientPairPrecise{ 1.0 / sigma, kMinHessian };
case ProbabilityDistributionType::kExtreme:
return sign ? GradientPairPrecise{ kMinGradient, kMaxHessian }
: GradientPairPrecise{ 1.0 / sigma, kMinHessian };
default:
LOG(FATAL) << "Unknown distribution type";
}
default:
LOG(FATAL) << "Unknown censoring type";
}
return { 0.0, 0.0 };
}
} // anonymous namespace
namespace xgboost {
namespace common {
DMLC_REGISTER_PARAMETER(AFTParam);
double AFTLoss::Loss(double y_lower, double y_upper, double y_pred, double sigma) {
const double log_y_lower = std::log(y_lower);
const double log_y_upper = std::log(y_upper);
double cost;
if (y_lower == y_upper) { // uncensored
const double z = (log_y_lower - y_pred) / sigma;
const double pdf = dist_->PDF(z);
// Regularize the denominator with eps, to avoid INF or NAN
cost = -std::log(std::max(pdf / (sigma * y_lower), kEps));
} else { // censored; now check what type of censorship we have
double z_u, z_l, cdf_u, cdf_l;
if (std::isinf(y_upper)) { // right-censored
cdf_u = 1;
} else { // left-censored or interval-censored
z_u = (log_y_upper - y_pred) / sigma;
cdf_u = dist_->CDF(z_u);
}
if (std::isinf(y_lower)) { // left-censored
cdf_l = 0;
} else { // right-censored or interval-censored
z_l = (log_y_lower - y_pred) / sigma;
cdf_l = dist_->CDF(z_l);
}
// Regularize the denominator with eps, to avoid INF or NAN
cost = -std::log(std::max(cdf_u - cdf_l, kEps));
}
return cost;
}
double AFTLoss::Gradient(double y_lower, double y_upper, double y_pred, double sigma) {
const double log_y_lower = std::log(y_lower);
const double log_y_upper = std::log(y_upper);
double numerator, denominator, gradient; // numerator and denominator of gradient
CensoringType censor_type;
bool z_sign; // sign of z-score
if (y_lower == y_upper) { // uncensored
const double z = (log_y_lower - y_pred) / sigma;
const double pdf = dist_->PDF(z);
const double grad_pdf = dist_->GradPDF(z);
censor_type = CensoringType::kUncensored;
numerator = grad_pdf;
denominator = sigma * pdf;
z_sign = (z > 0);
} else { // censored; now check what type of censorship we have
double z_u = 0.0, z_l = 0.0, pdf_u, pdf_l, cdf_u, cdf_l;
censor_type = CensoringType::kIntervalCensored;
if (std::isinf(y_upper)) { // right-censored
pdf_u = 0;
cdf_u = 1;
censor_type = CensoringType::kRightCensored;
} else { // interval-censored or left-censored
z_u = (log_y_upper - y_pred) / sigma;
pdf_u = dist_->PDF(z_u);
cdf_u = dist_->CDF(z_u);
}
if (std::isinf(y_lower)) { // left-censored
pdf_l = 0;
cdf_l = 0;
censor_type = CensoringType::kLeftCensored;
} else { // interval-censored or right-censored
z_l = (log_y_lower - y_pred) / sigma;
pdf_l = dist_->PDF(z_l);
cdf_l = dist_->CDF(z_l);
}
z_sign = (z_u > 0 || z_l > 0);
numerator = pdf_u - pdf_l;
denominator = sigma * (cdf_u - cdf_l);
}
gradient = numerator / denominator;
if (denominator < kEps && (std::isnan(gradient) || std::isinf(gradient))) {
gradient = GetLimitAtInfPred(dist_type_, censor_type, z_sign, sigma).GetGrad();
}
return Clip(gradient, kMinGradient, kMaxGradient);
}
double AFTLoss::Hessian(double y_lower, double y_upper, double y_pred, double sigma) {
const double log_y_lower = std::log(y_lower);
const double log_y_upper = std::log(y_upper);
double numerator, denominator, hessian; // numerator and denominator of hessian
CensoringType censor_type;
bool z_sign; // sign of z-score
if (y_lower == y_upper) { // uncensored
const double z = (log_y_lower - y_pred) / sigma;
const double pdf = dist_->PDF(z);
const double grad_pdf = dist_->GradPDF(z);
const double hess_pdf = dist_->HessPDF(z);
censor_type = CensoringType::kUncensored;
numerator = -(pdf * hess_pdf - grad_pdf * grad_pdf);
denominator = sigma * sigma * pdf * pdf;
z_sign = (z > 0);
} else { // censored; now check what type of censorship we have
double z_u = 0.0, z_l = 0.0, grad_pdf_u, grad_pdf_l, pdf_u, pdf_l, cdf_u, cdf_l;
censor_type = CensoringType::kIntervalCensored;
if (std::isinf(y_upper)) { // right-censored
pdf_u = 0;
cdf_u = 1;
grad_pdf_u = 0;
censor_type = CensoringType::kRightCensored;
} else { // interval-censored or left-censored
z_u = (log_y_upper - y_pred) / sigma;
pdf_u = dist_->PDF(z_u);
cdf_u = dist_->CDF(z_u);
grad_pdf_u = dist_->GradPDF(z_u);
}
if (std::isinf(y_lower)) { // left-censored
pdf_l = 0;
cdf_l = 0;
grad_pdf_l = 0;
censor_type = CensoringType::kLeftCensored;
} else { // interval-censored or right-censored
z_l = (log_y_lower - y_pred) / sigma;
pdf_l = dist_->PDF(z_l);
cdf_l = dist_->CDF(z_l);
grad_pdf_l = dist_->GradPDF(z_l);
}
const double cdf_diff = cdf_u - cdf_l;
const double pdf_diff = pdf_u - pdf_l;
const double grad_diff = grad_pdf_u - grad_pdf_l;
const double sqrt_denominator = sigma * cdf_diff;
z_sign = (z_u > 0 || z_l > 0);
numerator = -(cdf_diff * grad_diff - pdf_diff * pdf_diff);
denominator = sqrt_denominator * sqrt_denominator;
}
hessian = numerator / denominator;
if (denominator < kEps && (std::isnan(hessian) || std::isinf(hessian))) {
hessian = GetLimitAtInfPred(dist_type_, censor_type, z_sign, sigma).GetHess();
}
return Clip(hessian, kMinHessian, kMaxHessian);
}
} // namespace common
} // namespace xgboost

View File

@@ -1,5 +1,5 @@
/*!
* Copyright 2019 by Contributors
* Copyright 2019-2020 by Contributors
* \file survival_util.h
* \brief Utility functions, useful for implementing objective and metric functions for survival
* analysis
@@ -8,8 +8,16 @@
#ifndef XGBOOST_COMMON_SURVIVAL_UTIL_H_
#define XGBOOST_COMMON_SURVIVAL_UTIL_H_
/*
* For the derivation of the loss, gradient, and hessian for the Accelerated Failure Time model,
* refer to the paper "Survival regression with accelerated failure time model in XGBoost"
* at https://arxiv.org/abs/2006.04920.
*/
#include <xgboost/parameter.h>
#include <memory>
#include <algorithm>
#include <limits>
#include "probability_distribution.h"
DECLARE_FIELD_ENUM_CLASS(xgboost::common::ProbabilityDistributionType);
@@ -17,6 +25,51 @@ DECLARE_FIELD_ENUM_CLASS(xgboost::common::ProbabilityDistributionType);
namespace xgboost {
namespace common {
#ifndef __CUDACC__
using std::log;
using std::fmax;
#endif // __CUDACC__
enum class CensoringType : uint8_t {
kUncensored, kRightCensored, kLeftCensored, kIntervalCensored
};
namespace aft {
// Allowable range for gradient and hessian. Used for regularization
constexpr double kMinGradient = -15.0;
constexpr double kMaxGradient = 15.0;
constexpr double kMinHessian = 1e-16; // Ensure that no data point gets zero hessian
constexpr double kMaxHessian = 15.0;
constexpr double kEps = 1e-12; // A denomitor in a fraction should not be too small
// Clip (limit) x to fit range [x_min, x_max].
// If x < x_min, return x_min; if x > x_max, return x_max; if x_min <= x <= x_max, return x.
// This function assumes x_min < x_max; behavior is undefined if this assumption does not hold.
XGBOOST_DEVICE
inline double Clip(double x, double x_min, double x_max) {
if (x < x_min) {
return x_min;
}
if (x > x_max) {
return x_max;
}
return x;
}
template<typename Distribution>
XGBOOST_DEVICE inline double
GetLimitGradAtInfPred(CensoringType censor_type, bool sign, double sigma);
template<typename Distribution>
XGBOOST_DEVICE inline double
GetLimitHessAtInfPred(CensoringType censor_type, bool sign, double sigma);
} // namespace aft
/*! \brief Parameter structure for AFT loss and metric */
struct AFTParam : public XGBoostParameter<AFTParam> {
/*! \brief Choice of probability distribution for the noise term in AFT */
@@ -39,47 +92,245 @@ struct AFTParam : public XGBoostParameter<AFTParam> {
};
/*! \brief The AFT loss function */
class AFTLoss {
private:
std::unique_ptr<ProbabilityDistribution> dist_;
ProbabilityDistributionType dist_type_;
template<typename Distribution>
struct AFTLoss {
XGBOOST_DEVICE inline static
double Loss(double y_lower, double y_upper, double y_pred, double sigma) {
const double log_y_lower = log(y_lower);
const double log_y_upper = log(y_upper);
public:
/*!
* \brief Constructor for AFT loss function
* \param dist_type Choice of probability distribution for the noise term in AFT
*/
explicit AFTLoss(ProbabilityDistributionType dist_type)
: dist_(ProbabilityDistribution::Create(dist_type)),
dist_type_(dist_type) {}
double cost;
public:
/*!
* \brief Compute the AFT loss
* \param y_lower Lower bound for the true label
* \param y_upper Upper bound for the true label
* \param y_pred Predicted label
* \param sigma Scaling factor to be applied to the distribution of the noise term
*/
double Loss(double y_lower, double y_upper, double y_pred, double sigma);
/*!
* \brief Compute the gradient of the AFT loss
* \param y_lower Lower bound for the true label
* \param y_upper Upper bound for the true label
* \param y_pred Predicted label
* \param sigma Scaling factor to be applied to the distribution of the noise term
*/
double Gradient(double y_lower, double y_upper, double y_pred, double sigma);
/*!
* \brief Compute the hessian of the AFT loss
* \param y_lower Lower bound for the true label
* \param y_upper Upper bound for the true label
* \param y_pred Predicted label
* \param sigma Scaling factor to be applied to the distribution of the noise term
*/
double Hessian(double y_lower, double y_upper, double y_pred, double sigma);
if (y_lower == y_upper) { // uncensored
const double z = (log_y_lower - y_pred) / sigma;
const double pdf = Distribution::PDF(z);
// Regularize the denominator with eps, to avoid INF or NAN
cost = -log(fmax(pdf / (sigma * y_lower), aft::kEps));
} else { // censored; now check what type of censorship we have
double z_u, z_l, cdf_u, cdf_l;
if (isinf(y_upper)) { // right-censored
cdf_u = 1;
} else { // left-censored or interval-censored
z_u = (log_y_upper - y_pred) / sigma;
cdf_u = Distribution::CDF(z_u);
}
if (y_lower <= 0.0) { // left-censored
cdf_l = 0;
} else { // right-censored or interval-censored
z_l = (log_y_lower - y_pred) / sigma;
cdf_l = Distribution::CDF(z_l);
}
// Regularize the denominator with eps, to avoid INF or NAN
cost = -log(fmax(cdf_u - cdf_l, aft::kEps));
}
return cost;
}
XGBOOST_DEVICE inline static
double Gradient(double y_lower, double y_upper, double y_pred, double sigma) {
const double log_y_lower = log(y_lower);
const double log_y_upper = log(y_upper);
double numerator, denominator, gradient; // numerator and denominator of gradient
CensoringType censor_type;
bool z_sign; // sign of z-score
if (y_lower == y_upper) { // uncensored
const double z = (log_y_lower - y_pred) / sigma;
const double pdf = Distribution::PDF(z);
const double grad_pdf = Distribution::GradPDF(z);
censor_type = CensoringType::kUncensored;
numerator = grad_pdf;
denominator = sigma * pdf;
z_sign = (z > 0);
} else { // censored; now check what type of censorship we have
double z_u = 0.0, z_l = 0.0, pdf_u, pdf_l, cdf_u, cdf_l;
censor_type = CensoringType::kIntervalCensored;
if (isinf(y_upper)) { // right-censored
pdf_u = 0;
cdf_u = 1;
censor_type = CensoringType::kRightCensored;
} else { // interval-censored or left-censored
z_u = (log_y_upper - y_pred) / sigma;
pdf_u = Distribution::PDF(z_u);
cdf_u = Distribution::CDF(z_u);
}
if (y_lower <= 0.0) { // left-censored
pdf_l = 0;
cdf_l = 0;
censor_type = CensoringType::kLeftCensored;
} else { // interval-censored or right-censored
z_l = (log_y_lower - y_pred) / sigma;
pdf_l = Distribution::PDF(z_l);
cdf_l = Distribution::CDF(z_l);
}
z_sign = (z_u > 0 || z_l > 0);
numerator = pdf_u - pdf_l;
denominator = sigma * (cdf_u - cdf_l);
}
gradient = numerator / denominator;
if (denominator < aft::kEps && (isnan(gradient) || isinf(gradient))) {
gradient = aft::GetLimitGradAtInfPred<Distribution>(censor_type, z_sign, sigma);
}
return aft::Clip(gradient, aft::kMinGradient, aft::kMaxGradient);
}
XGBOOST_DEVICE inline static
double Hessian(double y_lower, double y_upper, double y_pred, double sigma) {
const double log_y_lower = log(y_lower);
const double log_y_upper = log(y_upper);
double numerator, denominator, hessian; // numerator and denominator of hessian
CensoringType censor_type;
bool z_sign; // sign of z-score
if (y_lower == y_upper) { // uncensored
const double z = (log_y_lower - y_pred) / sigma;
const double pdf = Distribution::PDF(z);
const double grad_pdf = Distribution::GradPDF(z);
const double hess_pdf = Distribution::HessPDF(z);
censor_type = CensoringType::kUncensored;
numerator = -(pdf * hess_pdf - grad_pdf * grad_pdf);
denominator = sigma * sigma * pdf * pdf;
z_sign = (z > 0);
} else { // censored; now check what type of censorship we have
double z_u = 0.0, z_l = 0.0, grad_pdf_u, grad_pdf_l, pdf_u, pdf_l, cdf_u, cdf_l;
censor_type = CensoringType::kIntervalCensored;
if (isinf(y_upper)) { // right-censored
pdf_u = 0;
cdf_u = 1;
grad_pdf_u = 0;
censor_type = CensoringType::kRightCensored;
} else { // interval-censored or left-censored
z_u = (log_y_upper - y_pred) / sigma;
pdf_u = Distribution::PDF(z_u);
cdf_u = Distribution::CDF(z_u);
grad_pdf_u = Distribution::GradPDF(z_u);
}
if (y_lower <= 0.0) { // left-censored
pdf_l = 0;
cdf_l = 0;
grad_pdf_l = 0;
censor_type = CensoringType::kLeftCensored;
} else { // interval-censored or right-censored
z_l = (log_y_lower - y_pred) / sigma;
pdf_l = Distribution::PDF(z_l);
cdf_l = Distribution::CDF(z_l);
grad_pdf_l = Distribution::GradPDF(z_l);
}
const double cdf_diff = cdf_u - cdf_l;
const double pdf_diff = pdf_u - pdf_l;
const double grad_diff = grad_pdf_u - grad_pdf_l;
const double sqrt_denominator = sigma * cdf_diff;
z_sign = (z_u > 0 || z_l > 0);
numerator = -(cdf_diff * grad_diff - pdf_diff * pdf_diff);
denominator = sqrt_denominator * sqrt_denominator;
}
hessian = numerator / denominator;
if (denominator < aft::kEps && (isnan(hessian) || isinf(hessian))) {
hessian = aft::GetLimitHessAtInfPred<Distribution>(censor_type, z_sign, sigma);
}
return aft::Clip(hessian, aft::kMinHessian, aft::kMaxHessian);
}
};
namespace aft {
template <>
XGBOOST_DEVICE inline double
GetLimitGradAtInfPred<NormalDistribution>(CensoringType censor_type, bool sign, double sigma) {
switch (censor_type) {
case CensoringType::kUncensored:
return sign ? kMinGradient : kMaxGradient;
case CensoringType::kRightCensored:
return sign ? kMinGradient : 0.0;
case CensoringType::kLeftCensored:
return sign ? 0.0 : kMaxGradient;
case CensoringType::kIntervalCensored:
return sign ? kMinGradient : kMaxGradient;
}
return std::numeric_limits<double>::quiet_NaN();
}
template <>
XGBOOST_DEVICE inline double
GetLimitHessAtInfPred<NormalDistribution>(CensoringType censor_type, bool sign, double sigma) {
switch (censor_type) {
case CensoringType::kUncensored:
return 1.0 / (sigma * sigma);
case CensoringType::kRightCensored:
return sign ? (1.0 / (sigma * sigma)) : kMinHessian;
case CensoringType::kLeftCensored:
return sign ? kMinHessian : (1.0 / (sigma * sigma));
case CensoringType::kIntervalCensored:
return 1.0 / (sigma * sigma);
}
return std::numeric_limits<double>::quiet_NaN();
}
template <>
XGBOOST_DEVICE inline double
GetLimitGradAtInfPred<LogisticDistribution>(CensoringType censor_type, bool sign, double sigma) {
switch (censor_type) {
case CensoringType::kUncensored:
return sign ? (-1.0 / sigma) : (1.0 / sigma);
case CensoringType::kRightCensored:
return sign ? (-1.0 / sigma) : 0.0;
case CensoringType::kLeftCensored:
return sign ? 0.0 : (1.0 / sigma);
case CensoringType::kIntervalCensored:
return sign ? (-1.0 / sigma) : (1.0 / sigma);
}
return std::numeric_limits<double>::quiet_NaN();
}
template <>
XGBOOST_DEVICE inline double
GetLimitHessAtInfPred<LogisticDistribution>(CensoringType censor_type, bool sign, double sigma) {
switch (censor_type) {
case CensoringType::kUncensored:
case CensoringType::kRightCensored:
case CensoringType::kLeftCensored:
case CensoringType::kIntervalCensored:
return kMinHessian;
}
return std::numeric_limits<double>::quiet_NaN();
}
template <>
XGBOOST_DEVICE inline double
GetLimitGradAtInfPred<ExtremeDistribution>(CensoringType censor_type, bool sign, double sigma) {
switch (censor_type) {
case CensoringType::kUncensored:
return sign ? kMinGradient : (1.0 / sigma);
case CensoringType::kRightCensored:
return sign ? kMinGradient : 0.0;
case CensoringType::kLeftCensored:
return sign ? 0.0 : (1.0 / sigma);
case CensoringType::kIntervalCensored:
return sign ? kMinGradient : (1.0 / sigma);
}
return std::numeric_limits<double>::quiet_NaN();
}
template <>
XGBOOST_DEVICE inline double
GetLimitHessAtInfPred<ExtremeDistribution>(CensoringType censor_type, bool sign, double sigma) {
switch (censor_type) {
case CensoringType::kUncensored:
case CensoringType::kRightCensored:
return sign ? kMaxHessian : kMinHessian;
case CensoringType::kLeftCensored:
return kMinHessian;
case CensoringType::kIntervalCensored:
return sign ? kMaxHessian : kMinHessian;
}
return std::numeric_limits<double>::quiet_NaN();
}
} // namespace aft
} // namespace common
} // namespace xgboost

View File

@@ -1,5 +1,5 @@
/*!
* Copyright 2015-2019 by Contributors
* Copyright 2015-2020 by Contributors
* \file metric_registry.cc
* \brief Registry of objective functions.
*/
@@ -80,6 +80,7 @@ namespace metric {
// List of files that will be force linked in static links.
DMLC_REGISTRY_LINK_TAG(elementwise_metric);
DMLC_REGISTRY_LINK_TAG(multiclass_metric);
DMLC_REGISTRY_LINK_TAG(survival_metric);
DMLC_REGISTRY_LINK_TAG(rank_metric);
#ifdef XGBOOST_USE_CUDA
DMLC_REGISTRY_LINK_TAG(rank_metric_gpu);

View File

@@ -1,105 +1,11 @@
/*!
* Copyright 2019 by Contributors
* Copyright 2019-2020 by Contributors
* \file survival_metric.cc
* \brief Metrics for survival analysis
* \author Avinash Barnwal, Hyunsu Cho and Toby Hocking
*/
#include <rabit/rabit.h>
#include <xgboost/metric.h>
#include <xgboost/host_device_vector.h>
#include <dmlc/registry.h>
#include <cmath>
#include <memory>
#include <vector>
#include <limits>
#include "xgboost/json.h"
#include "../common/math.h"
#include "../common/survival_util.h"
using AFTParam = xgboost::common::AFTParam;
using AFTLoss = xgboost::common::AFTLoss;
namespace xgboost {
namespace metric {
// tag the this file, used by force static link later.
DMLC_REGISTRY_FILE_TAG(survival_metric);
/*! \brief Negative log likelihood of Accelerated Failure Time model */
struct EvalAFT : public Metric {
public:
explicit EvalAFT(const char* param) {}
void Configure(const Args& args) override {
param_.UpdateAllowUnknown(args);
loss_.reset(new AFTLoss(param_.aft_loss_distribution));
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String(this->Name());
out["aft_loss_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
FromJson(in["aft_loss_param"], &param_);
}
bst_float Eval(const HostDeviceVector<bst_float> &preds,
const MetaInfo &info,
bool distributed) override {
CHECK_NE(info.labels_lower_bound_.Size(), 0U)
<< "y_lower cannot be empty";
CHECK_NE(info.labels_upper_bound_.Size(), 0U)
<< "y_higher cannot be empty";
CHECK_EQ(preds.Size(), info.labels_lower_bound_.Size());
CHECK_EQ(preds.Size(), info.labels_upper_bound_.Size());
/* Compute negative log likelihood for each data point and compute weighted average */
const auto& yhat = preds.HostVector();
const auto& y_lower = info.labels_lower_bound_.HostVector();
const auto& y_upper = info.labels_upper_bound_.HostVector();
const auto& weights = info.weights_.HostVector();
const bool is_null_weight = weights.empty();
const float aft_loss_distribution_scale = param_.aft_loss_distribution_scale;
CHECK_LE(yhat.size(), static_cast<size_t>(std::numeric_limits<omp_ulong>::max()))
<< "yhat is too big";
const omp_ulong nsize = static_cast<omp_ulong>(yhat.size());
double nloglik_sum = 0.0;
double weight_sum = 0.0;
#pragma omp parallel for \
shared(weights, y_lower, y_upper, yhat) reduction(+:nloglik_sum, weight_sum)
for (omp_ulong i = 0; i < nsize; ++i) {
// If weights are empty, data is unweighted so we use 1.0 everywhere
const double w = is_null_weight ? 1.0 : weights[i];
const double loss
= loss_->Loss(y_lower[i], y_upper[i], yhat[i], aft_loss_distribution_scale);
nloglik_sum += loss;
weight_sum += w;
}
double dat[2]{nloglik_sum, weight_sum};
if (distributed) {
rabit::Allreduce<rabit::op::Sum>(dat, 2);
}
return static_cast<bst_float>(dat[0] / dat[1]);
}
const char* Name() const override {
return "aft-nloglik";
}
private:
AFTParam param_;
std::unique_ptr<AFTLoss> loss_;
};
XGBOOST_REGISTER_METRIC(AFT, "aft-nloglik")
.describe("Negative log likelihood of Accelerated Failure Time model.")
.set_body([](const char* param) { return new EvalAFT(param); });
} // namespace metric
} // namespace xgboost
// Dummy file to keep the CUDA conditional compile trick.
#if !defined(XGBOOST_USE_CUDA)
#include "survival_metric.cu"
#endif // !defined(XGBOOST_USE_CUDA)

View File

@@ -0,0 +1,304 @@
/*!
* Copyright 2019-2020 by Contributors
* \file survival_metric.cu
* \brief Metrics for survival analysis
* \author Avinash Barnwal, Hyunsu Cho and Toby Hocking
*/
#include <rabit/rabit.h>
#include <dmlc/registry.h>
#include <memory>
#include <vector>
#include "xgboost/json.h"
#include "xgboost/metric.h"
#include "xgboost/host_device_vector.h"
#include "metric_common.h"
#include "../common/math.h"
#include "../common/survival_util.h"
#if defined(XGBOOST_USE_CUDA)
#include <thrust/execution_policy.h> // thrust::cuda::par
#include "../common/device_helpers.cuh"
#endif // XGBOOST_USE_CUDA
using AFTParam = xgboost::common::AFTParam;
using ProbabilityDistributionType = xgboost::common::ProbabilityDistributionType;
template <typename Distribution>
using AFTLoss = xgboost::common::AFTLoss<Distribution>;
namespace xgboost {
namespace metric {
// tag the this file, used by force static link later.
DMLC_REGISTRY_FILE_TAG(survival_metric);
template <typename EvalRow>
class ElementWiseSurvivalMetricsReduction {
public:
ElementWiseSurvivalMetricsReduction() = default;
void Configure(EvalRow policy) {
policy_ = policy;
}
PackedReduceResult CpuReduceMetrics(
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels_lower_bound,
const HostDeviceVector<bst_float>& labels_upper_bound,
const HostDeviceVector<bst_float>& preds) const {
size_t ndata = labels_lower_bound.Size();
CHECK_EQ(ndata, labels_upper_bound.Size());
const auto& h_labels_lower_bound = labels_lower_bound.HostVector();
const auto& h_labels_upper_bound = labels_upper_bound.HostVector();
const auto& h_weights = weights.HostVector();
const auto& h_preds = preds.HostVector();
double residue_sum = 0;
double weights_sum = 0;
#pragma omp parallel for reduction(+: residue_sum, weights_sum) schedule(static)
for (omp_ulong i = 0; i < ndata; ++i) {
const double wt = h_weights.empty() ? 1.0 : static_cast<double>(h_weights[i]);
residue_sum += policy_.EvalRow(
static_cast<double>(h_labels_lower_bound[i]),
static_cast<double>(h_labels_upper_bound[i]),
static_cast<double>(h_preds[i])) * wt;
weights_sum += wt;
}
PackedReduceResult res{residue_sum, weights_sum};
return res;
}
#if defined(XGBOOST_USE_CUDA)
PackedReduceResult DeviceReduceMetrics(
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels_lower_bound,
const HostDeviceVector<bst_float>& labels_upper_bound,
const HostDeviceVector<bst_float>& preds) {
size_t ndata = labels_lower_bound.Size();
CHECK_EQ(ndata, labels_upper_bound.Size());
thrust::counting_iterator<size_t> begin(0);
thrust::counting_iterator<size_t> end = begin + ndata;
auto s_label_lower_bound = labels_lower_bound.DeviceSpan();
auto s_label_upper_bound = labels_upper_bound.DeviceSpan();
auto s_preds = preds.DeviceSpan();
auto s_weights = weights.DeviceSpan();
const bool is_null_weight = (weights.Size() == 0);
auto d_policy = policy_;
dh::XGBCachingDeviceAllocator<char> alloc;
PackedReduceResult result = thrust::transform_reduce(
thrust::cuda::par(alloc),
begin, end,
[=] XGBOOST_DEVICE(size_t idx) {
double weight = is_null_weight ? 1.0 : static_cast<double>(s_weights[idx]);
double residue = d_policy.EvalRow(
static_cast<double>(s_label_lower_bound[idx]),
static_cast<double>(s_label_upper_bound[idx]),
static_cast<double>(s_preds[idx]));
residue *= weight;
return PackedReduceResult{residue, weight};
},
PackedReduceResult(),
thrust::plus<PackedReduceResult>());
return result;
}
#endif // XGBOOST_USE_CUDA
PackedReduceResult Reduce(
int device,
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels_lower_bound,
const HostDeviceVector<bst_float>& labels_upper_bound,
const HostDeviceVector<bst_float>& preds) {
PackedReduceResult result;
if (device < 0) {
result = CpuReduceMetrics(weights, labels_lower_bound, labels_upper_bound, preds);
}
#if defined(XGBOOST_USE_CUDA)
else { // NOLINT
device_ = device;
preds.SetDevice(device_);
labels_lower_bound.SetDevice(device_);
labels_upper_bound.SetDevice(device_);
weights.SetDevice(device_);
dh::safe_cuda(cudaSetDevice(device_));
result = DeviceReduceMetrics(weights, labels_lower_bound, labels_upper_bound, preds);
}
#endif // defined(XGBOOST_USE_CUDA)
return result;
}
private:
EvalRow policy_;
#if defined(XGBOOST_USE_CUDA)
int device_{-1};
#endif // defined(XGBOOST_USE_CUDA)
};
struct EvalIntervalRegressionAccuracy {
void Configure(const Args& args) {}
const char* Name() const {
return "interval-regression-accuracy";
}
XGBOOST_DEVICE double EvalRow(
double label_lower_bound, double label_upper_bound, double log_pred) const {
const double pred = exp(log_pred);
return (pred >= label_lower_bound && pred <= label_upper_bound) ? 1.0 : 0.0;
}
static double GetFinal(double esum, double wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
/*! \brief Negative log likelihood of Accelerated Failure Time model */
template <typename Distribution>
struct EvalAFTNLogLik {
void Configure(const Args& args) {
param_.UpdateAllowUnknown(args);
}
const char* Name() const {
return "aft-nloglik";
}
XGBOOST_DEVICE double EvalRow(
double label_lower_bound, double label_upper_bound, double pred) const {
return AFTLoss<Distribution>::Loss(
label_lower_bound, label_upper_bound, pred, param_.aft_loss_distribution_scale);
}
static double GetFinal(double esum, double wsum) {
return wsum == 0 ? esum : esum / wsum;
}
private:
AFTParam param_;
};
template<typename Policy>
struct EvalEWiseSurvivalBase : public Metric {
EvalEWiseSurvivalBase() = default;
void Configure(const Args& args) override {
policy_.Configure(args);
for (const auto& e : args) {
if (e.first == "gpu_id") {
device_ = dmlc::ParseSignedInt<int>(e.second.c_str(), nullptr, 10);
}
}
reducer_.Configure(policy_);
}
bst_float Eval(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
bool distributed) override {
CHECK_NE(info.labels_lower_bound_.Size(), 0U)
<< "labels_lower_bound cannot be empty";
CHECK_NE(info.labels_upper_bound_.Size(), 0U)
<< "labels_upper_bound cannot be empty";
CHECK_EQ(preds.Size(), info.labels_lower_bound_.Size());
CHECK_EQ(preds.Size(), info.labels_upper_bound_.Size());
auto result = reducer_.Reduce(
device_, info.weights_, info.labels_lower_bound_, info.labels_upper_bound_, preds);
double dat[2] {result.Residue(), result.Weights()};
if (distributed) {
rabit::Allreduce<rabit::op::Sum>(dat, 2);
}
return static_cast<bst_float>(Policy::GetFinal(dat[0], dat[1]));
}
const char* Name() const override {
return policy_.Name();
}
private:
Policy policy_;
ElementWiseSurvivalMetricsReduction<Policy> reducer_;
int device_{-1}; // used only for GPU metric
};
// This class exists because we want to perform dispatch according to the distribution type at
// configuration time, not at prediction time.
struct AFTNLogLikDispatcher : public Metric {
const char* Name() const override {
return "aft-nloglik";
}
bst_float Eval(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
bool distributed) override {
CHECK(metric_) << "AFT metric must be configured first, with distribution type and scale";
return metric_->Eval(preds, info, distributed);
}
void Configure(const Args& args) override {
param_.UpdateAllowUnknown(args);
switch (param_.aft_loss_distribution) {
case common::ProbabilityDistributionType::kNormal:
metric_.reset(new EvalEWiseSurvivalBase<EvalAFTNLogLik<common::NormalDistribution>>());
break;
case common::ProbabilityDistributionType::kLogistic:
metric_.reset(new EvalEWiseSurvivalBase<EvalAFTNLogLik<common::LogisticDistribution>>());
break;
case common::ProbabilityDistributionType::kExtreme:
metric_.reset(new EvalEWiseSurvivalBase<EvalAFTNLogLik<common::ExtremeDistribution>>());
break;
default:
LOG(FATAL) << "Unknown probability distribution";
}
Args new_args{args};
// tparam_ doesn't get propagated to the inner metric object because we didn't use
// Metric::Create(). I don't think it's a good idea to pollute the metric registry with
// specialized versions of the AFT metric, so as a work-around, manually pass the GPU ID
// into the inner metric via configuration.
new_args.emplace_back("gpu_id", std::to_string(tparam_->gpu_id));
metric_->Configure(new_args);
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String(this->Name());
out["aft_loss_param"] = ToJson(param_);
}
void LoadConfig(const Json& in) override {
FromJson(in["aft_loss_param"], &param_);
}
private:
AFTParam param_;
std::unique_ptr<Metric> metric_;
};
XGBOOST_REGISTER_METRIC(AFTNLogLik, "aft-nloglik")
.describe("Negative log likelihood of Accelerated Failure Time model.")
.set_body([](const char* param) {
return new AFTNLogLikDispatcher();
});
XGBOOST_REGISTER_METRIC(IntervalRegressionAccuracy, "interval-regression-accuracy")
.describe("")
.set_body([](const char* param) {
return new EvalEWiseSurvivalBase<EvalIntervalRegressionAccuracy>();
});
} // namespace metric
} // namespace xgboost

View File

@@ -1,116 +1,21 @@
/*!
* Copyright 2015 by Contributors
* \file rank.cc
* \brief Definition of aft loss.
* Copyright 2019-2020 by Contributors
* \file aft_obj.cc
* \brief Definition of AFT loss for survival analysis.
* \author Avinash Barnwal, Hyunsu Cho and Toby Hocking
*/
#include <dmlc/omp.h>
#include <xgboost/logging.h>
#include <xgboost/objective.h>
#include <vector>
#include <limits>
#include <algorithm>
#include <memory>
#include <utility>
#include <cmath>
#include "xgboost/json.h"
#include "../common/math.h"
#include "../common/random.h"
#include "../common/survival_util.h"
using AFTParam = xgboost::common::AFTParam;
using AFTLoss = xgboost::common::AFTLoss;
// Dummy file to keep the CUDA conditional compile trick.
#include <dmlc/registry.h>
namespace xgboost {
namespace obj {
DMLC_REGISTRY_FILE_TAG(aft_obj);
class AFTObj : public ObjFunction {
public:
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.UpdateAllowUnknown(args);
loss_.reset(new AFTLoss(param_.aft_loss_distribution));
}
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
int iter,
HostDeviceVector<GradientPair>* out_gpair) override {
/* Boilerplate */
CHECK_EQ(preds.Size(), info.labels_lower_bound_.Size());
CHECK_EQ(preds.Size(), info.labels_upper_bound_.Size());
const auto& yhat = preds.HostVector();
const auto& y_lower = info.labels_lower_bound_.HostVector();
const auto& y_upper = info.labels_upper_bound_.HostVector();
const auto& weights = info.weights_.HostVector();
const bool is_null_weight = weights.empty();
out_gpair->Resize(yhat.size());
std::vector<GradientPair>& gpair = out_gpair->HostVector();
CHECK_LE(yhat.size(), static_cast<size_t>(std::numeric_limits<omp_ulong>::max()))
<< "yhat is too big";
const omp_ulong nsize = static_cast<omp_ulong>(yhat.size());
const float aft_loss_distribution_scale = param_.aft_loss_distribution_scale;
#pragma omp parallel for \
shared(weights, y_lower, y_upper, yhat, gpair)
for (omp_ulong i = 0; i < nsize; ++i) {
// If weights are empty, data is unweighted so we use 1.0 everywhere
const double w = is_null_weight ? 1.0 : weights[i];
const double grad = loss_->Gradient(y_lower[i], y_upper[i],
yhat[i], aft_loss_distribution_scale);
const double hess = loss_->Hessian(y_lower[i], y_upper[i],
yhat[i], aft_loss_distribution_scale);
gpair[i] = GradientPair(grad * w, hess * w);
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) override {
// Trees give us a prediction in log scale, so exponentiate
std::vector<bst_float> &preds = io_preds->HostVector();
const long ndata = static_cast<long>(preds.size()); // NOLINT(*)
#pragma omp parallel for shared(preds)
for (long j = 0; j < ndata; ++j) { // NOLINT(*)
preds[j] = std::exp(preds[j]);
}
}
void EvalTransform(HostDeviceVector<bst_float> *io_preds) override {
// do nothing here, since the AFT metric expects untransformed prediction score
}
bst_float ProbToMargin(bst_float base_score) const override {
return std::log(base_score);
}
const char* DefaultEvalMetric() const override {
return "aft-nloglik";
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("survival:aft");
out["aft_loss_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
FromJson(in["aft_loss_param"], &param_);
loss_.reset(new AFTLoss(param_.aft_loss_distribution));
}
private:
AFTParam param_;
std::unique_ptr<AFTLoss> loss_;
};
// register the objective functions
XGBOOST_REGISTER_OBJECTIVE(AFTObj, "survival:aft")
.describe("AFT loss function")
.set_body([]() { return new AFTObj(); });
} // namespace obj
} // namespace xgboost
#ifndef XGBOOST_USE_CUDA
#include "aft_obj.cu"
#endif // XGBOOST_USE_CUDA

147
src/objective/aft_obj.cu Normal file
View File

@@ -0,0 +1,147 @@
/*!
* Copyright 2019-2020 by Contributors
* \file aft_obj.cu
* \brief Definition of AFT loss for survival analysis.
* \author Avinash Barnwal, Hyunsu Cho and Toby Hocking
*/
#include <vector>
#include <limits>
#include <memory>
#include <utility>
#include "xgboost/host_device_vector.h"
#include "xgboost/json.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "xgboost/logging.h"
#include "xgboost/objective.h"
#include "../common/transform.h"
#include "../common/survival_util.h"
using AFTParam = xgboost::common::AFTParam;
using ProbabilityDistributionType = xgboost::common::ProbabilityDistributionType;
template <typename Distribution>
using AFTLoss = xgboost::common::AFTLoss<Distribution>;
namespace xgboost {
namespace obj {
#if defined(XGBOOST_USE_CUDA)
DMLC_REGISTRY_FILE_TAG(aft_obj_gpu);
#endif // defined(XGBOOST_USE_CUDA)
class AFTObj : public ObjFunction {
public:
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.UpdateAllowUnknown(args);
}
template <typename Distribution>
void GetGradientImpl(const HostDeviceVector<bst_float> &preds,
const MetaInfo &info,
HostDeviceVector<GradientPair> *out_gpair,
size_t ndata, int device, bool is_null_weight,
float aft_loss_distribution_scale) {
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels_lower_bound,
common::Span<const bst_float> _labels_upper_bound,
common::Span<const bst_float> _weights) {
const double pred = static_cast<double>(_preds[_idx]);
const double label_lower_bound = static_cast<double>(_labels_lower_bound[_idx]);
const double label_upper_bound = static_cast<double>(_labels_upper_bound[_idx]);
const float grad = static_cast<float>(
AFTLoss<Distribution>::Gradient(label_lower_bound, label_upper_bound,
pred, aft_loss_distribution_scale));
const float hess = static_cast<float>(
AFTLoss<Distribution>::Hessian(label_lower_bound, label_upper_bound,
pred, aft_loss_distribution_scale));
const bst_float w = is_null_weight ? 1.0f : _weights[_idx];
_out_gpair[_idx] = GradientPair(grad * w, hess * w);
},
common::Range{0, static_cast<int64_t>(ndata)}, device).Eval(
out_gpair, &preds, &info.labels_lower_bound_, &info.labels_upper_bound_,
&info.weights_);
}
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
int iter,
HostDeviceVector<GradientPair>* out_gpair) override {
const size_t ndata = preds.Size();
CHECK_EQ(info.labels_lower_bound_.Size(), ndata);
CHECK_EQ(info.labels_upper_bound_.Size(), ndata);
out_gpair->Resize(ndata);
const int device = tparam_->gpu_id;
const float aft_loss_distribution_scale = param_.aft_loss_distribution_scale;
const bool is_null_weight = info.weights_.Size() == 0;
if (!is_null_weight) {
CHECK_EQ(info.weights_.Size(), ndata)
<< "Number of weights should be equal to number of data points.";
}
switch (param_.aft_loss_distribution) {
case common::ProbabilityDistributionType::kNormal:
GetGradientImpl<common::NormalDistribution>(preds, info, out_gpair, ndata, device,
is_null_weight, aft_loss_distribution_scale);
break;
case common::ProbabilityDistributionType::kLogistic:
GetGradientImpl<common::LogisticDistribution>(preds, info, out_gpair, ndata, device,
is_null_weight, aft_loss_distribution_scale);
break;
case common::ProbabilityDistributionType::kExtreme:
GetGradientImpl<common::ExtremeDistribution>(preds, info, out_gpair, ndata, device,
is_null_weight, aft_loss_distribution_scale);
break;
default:
LOG(FATAL) << "Unrecognized distribution";
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) override {
// Trees give us a prediction in log scale, so exponentiate
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = exp(_preds[_idx]);
}, common::Range{0, static_cast<int64_t>(io_preds->Size())},
tparam_->gpu_id)
.Eval(io_preds);
}
void EvalTransform(HostDeviceVector<bst_float> *io_preds) override {
// do nothing here, since the AFT metric expects untransformed prediction score
}
bst_float ProbToMargin(bst_float base_score) const override {
return std::log(base_score);
}
const char* DefaultEvalMetric() const override {
return "aft-nloglik";
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("survival:aft");
out["aft_loss_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
FromJson(in["aft_loss_param"], &param_);
}
private:
AFTParam param_;
};
// register the objective functions
XGBOOST_REGISTER_OBJECTIVE(AFTObj, "survival:aft")
.describe("AFT loss function")
.set_body([]() { return new AFTObj(); });
} // namespace obj
} // namespace xgboost