cleanup of evaluation metric, move c++11 codes into sample.h for backup, add lambda in a clean way latter

This commit is contained in:
tqchen 2014-05-01 11:00:50 -07:00
parent cce96e8f41
commit 439d4725a0
4 changed files with 279 additions and 329 deletions

View File

@ -13,8 +13,6 @@
#include "../utils/xgboost_omp.h"
#include "../utils/xgboost_random.h"
#include "xgboost_regrank_data.h"
#include <functional>
#include <tuple>
namespace xgboost{
namespace regrank{
@ -36,6 +34,9 @@ namespace xgboost{
inline static bool CmpFirst(const std::pair<float, unsigned> &a, const std::pair<float, unsigned> &b){
return a.first > b.first;
}
inline static bool CmpSecond(const std::pair<float, unsigned> &a, const std::pair<float, unsigned> &b){
return a.second > b.second;
}
/*! \brief RMSE */
struct EvalRMSE : public IEvaluator{
@ -43,7 +44,7 @@ namespace xgboost{
const DMatrix::Info &info) const {
const unsigned ndata = static_cast<unsigned>(preds.size());
float sum = 0.0, wsum = 0.0;
#pragma omp parallel for reduction(+:sum,wsum) schedule( static )
#pragma omp parallel for reduction(+:sum,wsum) schedule( static )
for (unsigned i = 0; i < ndata; ++i){
const float wt = info.GetWeight(i);
const float diff = info.labels[i] - preds[i];
@ -63,7 +64,7 @@ namespace xgboost{
const DMatrix::Info &info) const {
const unsigned ndata = static_cast<unsigned>(preds.size());
float sum = 0.0f, wsum = 0.0f;
#pragma omp parallel for reduction(+:sum,wsum) schedule( static )
#pragma omp parallel for reduction(+:sum,wsum) schedule( static )
for (unsigned i = 0; i < ndata; ++i){
const float y = info.labels[i];
const float py = preds[i];
@ -84,7 +85,7 @@ namespace xgboost{
const DMatrix::Info &info) const {
const unsigned ndata = static_cast<unsigned>(preds.size());
float sum = 0.0f, wsum = 0.0f;
#pragma omp parallel for reduction(+:sum,wsum) schedule( static )
#pragma omp parallel for reduction(+:sum,wsum) schedule( static )
for (unsigned i = 0; i < ndata; ++i){
const float wt = info.GetWeight(i);
if (preds[i] > 0.5f){
@ -112,11 +113,11 @@ namespace xgboost{
const unsigned ngroup = static_cast<unsigned>(gptr.size() - 1);
double sum_auc = 0.0f;
#pragma omp parallel reduction(+:sum_auc)
#pragma omp parallel reduction(+:sum_auc)
{
// each thread takes a local rec
std::vector< std::pair<float, unsigned> > rec;
#pragma omp for schedule(static)
#pragma omp for schedule(static)
for (unsigned k = 0; k < ngroup; ++k){
rec.clear();
for (unsigned j = gptr[k]; j < gptr[k + 1]; ++j){
@ -153,142 +154,109 @@ namespace xgboost{
}
};
/*! \brief Precison at N, for both classification and rank */
struct EvalPrecision : public IEvaluator{
unsigned topn_;
std::string name_;
EvalPrecision(const char *name){
name_ = name;
utils::Assert(sscanf(name, "pre@%u", &topn_));
}
/*! \brief Evaluate rank list */
struct EvalRankList : public IEvaluator{
public:
virtual float Eval(const std::vector<float> &preds,
const DMatrix::Info &info) const {
const std::vector<unsigned> &gptr = info.group_ptr;
utils::Assert(gptr.size() != 0 && gptr.back() == preds.size(), "EvalAuc: group structure must match number of prediction");
const unsigned ngroup = static_cast<unsigned>(gptr.size() - 1);
double sum_pre = 0.0f;
#pragma omp parallel reduction(+:sum_pre)
double sum_metric = 0.0f;
#pragma omp parallel reduction(+:sum_metric)
{
// each thread takes a local rec
std::vector< std::pair<float, unsigned> > rec;
#pragma omp for schedule(static)
#pragma omp for schedule(static)
for (unsigned k = 0; k < ngroup; ++k){
rec.clear();
for (unsigned j = gptr[k]; j < gptr[k + 1]; ++j){
rec.push_back(std::make_pair(preds[j], (int)info.labels[j]));
}
std::sort(rec.begin(), rec.end(), CmpFirst);
// calculate Preicsion
unsigned nhit = 0;
for (size_t j = 0; j < rec.size() && j < topn_; ++j){
nhit += rec[j].second;
}
sum_pre += ((float)nhit) / topn_;
sum_metric += this->EvalMetric( rec );
}
}
return static_cast<float>(sum_pre) / ngroup;
return static_cast<float>(sum_metric) / ngroup;
}
virtual const char *Name(void) const{
return name_.c_str();
}
protected:
EvalRankList(const char *name){
name_ = name;
if( sscanf(name, "%*[^@]@%u", &topn_) != 1 ){
topn_ = UINT_MAX;
}
}
/*! \return evaluation metric, given the pair_sort record, (pred,label) */
virtual float EvalMetric( std::vector< std::pair<float, unsigned> > &pair_sort ) const = 0;
protected:
unsigned topn_;
std::string name_;
};
/*! \brief Normalized DCG */
class EvalNDCG : public IEvaluator {
/*! \brief Precison at N, for both classification and rank */
struct EvalPrecision : public EvalRankList{
public:
virtual float Eval(const std::vector<float> &preds,
const DMatrix::Info &info) const{
if (info.group_ptr.size() <= 1) return 0;
float acc = 0;
std::vector< std::pair<float, float> > pairs_sort;
for (int i = 0; i < info.group_ptr.size() - 1; i++){
for (int j = info.group_ptr[i]; j < info.group_ptr[i + 1]; j++){
pairs_sort.push_back(std::make_pair(preds[j], info.labels[j]));
EvalPrecision(const char *name):EvalRankList(name){}
protected:
virtual float EvalMetric( std::vector< std::pair<float, unsigned> > &rec ) const {
// calculate Preicsion
std::sort(rec.begin(), rec.end(), CmpFirst);
unsigned nhit = 0;
for (size_t j = 0; j < rec.size() && j < this->topn_; ++j){
nhit += (rec[j].second != 0 );
}
acc += NDCG(pairs_sort);
}
return acc / (info.group_ptr.size() - 1);
}
static float DCG(const std::vector<float> &labels){
float ans = 0.0;
for (int i = 0; i < labels.size(); i++){
ans += (pow(2, labels[i]) - 1) / log(i + 2);
}
return ans;
}
virtual const char *Name(void) const {
return "NDCG";
}
private:
/*\brief Obtain NDCG given the list of labels and predictions
* \param pairs_sort the first field is prediction and the second is label
*/
float NDCG(std::vector< std::pair<float, float> > pairs_sort) const{
std::sort(pairs_sort.begin(), pairs_sort.end(), [](std::pair<float, float> a, std::pair<float, float> b){
return std::get<0>(a) > std::get<0>(b);
});
float dcg = DCG(pairs_sort);
std::sort(pairs_sort.begin(), pairs_sort.end(), [](std::pair<float, float> a, std::pair<float, float> b){
return std::get<1>(a) > std::get<1>(b);
});
float IDCG = DCG(pairs_sort);
if (IDCG == 0) return 0;
return dcg / IDCG;
}
float DCG(std::vector< std::pair<float, float> > pairs_sort) const{
std::vector<float> labels;
for (int i = 1; i < pairs_sort.size(); i++){
labels.push_back(std::get<1>(pairs_sort[i]));
}
return DCG(labels);
return static_cast<float>( nhit ) / topn_;
}
};
/*! \brief Mean Average Precision */
class EvalMAP : public IEvaluator {
/*! \brief NDCG */
struct EvalNDCG : public EvalRankList{
public:
virtual float Eval(const std::vector<float> &preds,
const DMatrix::Info &info) const{
if (info.group_ptr.size() <= 1) return 0;
float acc = 0;
std::vector<std::pair<float,float>> pairs_sort;
for (int i = 0; i < info.group_ptr.size() - 1; i++){
for (int j = info.group_ptr[i]; j < info.group_ptr[i + 1]; j++){
pairs_sort.push_back(std::make_pair(preds[j], info.labels[j]));
EvalNDCG(const char *name):EvalRankList(name){}
protected:
inline float CalcDCG( const std::vector< std::pair<float,unsigned> > &rec ) const {
double sumdcg = 0.0;
for( size_t i = 0; i < rec.size() && i < this->topn_; i ++ ){
const unsigned rel = rec[i].second;
if( rel != 0 ){
sumdcg += logf( 2.0f ) *((1<<rel)-1) / logf( i + 1 );
}
acc += average_precision(pairs_sort);
}
return acc / (info.group_ptr.size() - 1);
return static_cast<float>(sumdcg);
}
virtual float EvalMetric( std::vector< std::pair<float, unsigned> > &rec ) const {
std::sort(rec.begin(), rec.end(), CmpFirst);
float idcg = this->CalcDCG(rec);
std::sort(rec.begin(), rec.end(), CmpSecond);
float dcg = this->CalcDCG(rec);
if( idcg == 0.0f ) return 0.0f;
else return dcg/idcg;
}
};
virtual const char *Name(void) const {
return "MAP";
}
private:
/*\brief Obtain average precision given the list of labels and predictions
* \param pairs_sort the first field is prediction and the second is label
*/
float average_precision(std::vector< std::pair<float,float> > pairs_sort) const{
std::sort(pairs_sort.begin(), pairs_sort.end(), [](std::pair<float, float> a, std::pair<float, float> b){
return std::get<0>(a) > std::get<0>(b);
});
float hits = 0;
float average_precision = 0;
for (int j = 0; j < pairs_sort.size(); j++){
if (std::get<1>(pairs_sort[j]) == 1){
hits++;
average_precision += hits / (j + 1);
/*! \brief Precison at N, for both classification and rank */
struct EvalMAP : public EvalRankList{
public:
EvalMAP(const char *name):EvalRankList(name){}
protected:
virtual float EvalMetric( std::vector< std::pair<float, unsigned> > &rec ) const {
std::sort(rec.begin(), rec.end(), CmpFirst);
unsigned nhits = 0;
double sumap = 0.0;
for( size_t i = 0; i < rec.size(); ++i){
if( rec[i].second != 0 ){
nhits += 1;
if( i < this->topn_ ){
sumap += static_cast<float>(nhits) / (i+1);
}
}
if (hits != 0) average_precision /= hits;
return average_precision;
}
if (nhits != 0) sumap /= nhits;
return static_cast<float>(sumap);
}
};
};
@ -306,6 +274,8 @@ namespace xgboost{
if (!strcmp(name, "logloss")) evals_.push_back(new EvalLogLoss());
if (!strcmp(name, "auc")) evals_.push_back(new EvalAuc());
if (!strncmp(name, "pre@", 4)) evals_.push_back(new EvalPrecision(name));
if (!strncmp(name, "map", 3)) evals_.push_back(new EvalMAP(name));
if (!strncmp(name, "ndcg", 3)) evals_.push_back(new EvalNDCG(name));
}
~EvalSet(){
for (size_t i = 0; i < evals_.size(); ++i){

View File

@ -5,10 +5,9 @@
* \brief implementation of objective functions
* \author Tianqi Chen, Kailong Chen
*/
#include "xgboost_regrank_sample.h"
#include <tuple>
//#include "xgboost_regrank_sample.h"
#include <vector>
#include <functional>
namespace xgboost{
namespace regrank{
class RegressionObj : public IObjFunction{
@ -206,208 +205,5 @@ namespace xgboost{
LossType loss;
};
};
namespace regrank{
// simple pairwise rank
class LambdaRankObj : public IObjFunction{
public:
LambdaRankObj(void){}
virtual ~LambdaRankObj(){}
virtual void SetParam(const char *name, const char *val){
if (!strcmp("loss_type", name)) loss_.loss_type = atoi(val);
if (!strcmp("sampler", name)) sampler_.AssignSampler(atoi(val));
if (!strcmp("lambda", name)) lambda_ = atoi(val);
}
virtual void GetGradient(const std::vector<float>& preds,
const DMatrix::Info &info,
int iter,
std::vector<float> &grad,
std::vector<float> &hess) {
grad.resize(preds.size()); hess.resize(preds.size());
const std::vector<unsigned> &group_index = info.group_ptr;
utils::Assert(group_index.size() != 0 && group_index.back() == preds.size(), "rank loss must have group file");
for (int i = 0; i < group_index.size() - 1; i++){
sample::Pairs pairs = sampler_.GenPairs(preds, info.labels, group_index[i], group_index[i + 1]);
//pairs.GetPairs()
std::vector< std::tuple<float, float, int> > sorted_triple = GetSortedTuple(preds, info.labels, group_index, i);
std::vector<int> index_remap = GetIndexMap(sorted_triple, group_index[i]);
GetGroupGradient(preds, info.labels, group_index,
grad, hess, sorted_triple, index_remap, pairs, i);
}
}
virtual const char* DefaultEvalMetric(void) {
return "auc";
}
private:
int lambda_;
const static int PAIRWISE = 0;
const static int MAP = 1;
const static int NDCG = 2;
sample::PairSamplerWrapper sampler_;
LossType loss_;
/* \brief Sorted tuples of a group by the predictions, and
* the fields in the return tuples successively are predicions,
* labels, and the index of the instance
*/
inline std::vector< std::tuple<float, float, int> > GetSortedTuple(const std::vector<float> &preds,
const std::vector<float> &labels,
const std::vector<unsigned> &group_index,
int group){
std::vector< std::tuple<float, float, int> > sorted_triple;
for (int j = group_index[group]; j < group_index[group + 1]; j++){
sorted_triple.push_back(std::tuple<float, float, int>(preds[j], labels[j], j));
}
std::sort(sorted_triple.begin(), sorted_triple.end(),
[](std::tuple<float, float, int> a, std::tuple<float, float, int> b){
return std::get<0>(a) > std::get<0>(b);
});
return sorted_triple;
}
inline std::vector<int> GetIndexMap(std::vector< std::tuple<float, float, int> > sorted_triple, int start){
std::vector<int> index_remap;
index_remap.resize(sorted_triple.size());
for (int i = 0; i < sorted_triple.size(); i++){
index_remap[std::get<2>(sorted_triple[i]) - start] = i;
}
return index_remap;
}
inline float GetLambdaMAP(const std::vector< std::tuple<float, float, int> > sorted_triple,
int index1, int index2,
std::vector< std::tuple<float, float, float, float> > map_acc){
if (index1 > index2) std::swap(index1, index2);
float original = std::get<0>(map_acc[index2]);
if (index1 != 0) original -= std::get<0>(map_acc[index1 - 1]);
float changed = 0;
if (std::get<1>(sorted_triple[index1]) < std::get<1>(sorted_triple[index2])){
changed += std::get<2>(map_acc[index2 - 1]) - std::get<2>(map_acc[index1]);
changed += (std::get<3>(map_acc[index1])+ 1.0f) / (index1 + 1);
}
else{
changed += std::get<1>(map_acc[index2 - 1]) - std::get<1>(map_acc[index1]);
changed += std::get<3>(map_acc[index2]) / (index2 + 1);
}
float ans = (changed - original) / (std::get<3>(map_acc[map_acc.size() - 1]));
if (ans < 0) ans = -ans;
return ans;
}
inline float GetLambdaNDCG(const std::vector< std::tuple<float, float, int> > sorted_triple,
int index1,
int index2, float IDCG){
float original = pow(2, std::get<1>(sorted_triple[index1])) / log(index1 + 2)
+ pow(2, std::get<1>(sorted_triple[index2])) / log(index2 + 2);
float changed = pow(2, std::get<1>(sorted_triple[index2])) / log(index1 + 2)
+ pow(2, std::get<1>(sorted_triple[index1])) / log(index2 + 2);
float ans = (original - changed) / IDCG;
if (ans < 0) ans = -ans;
return ans;
}
inline float GetIDCG(const std::vector< std::tuple<float, float, int> > sorted_triple){
std::vector<float> labels;
for (int i = 0; i < sorted_triple.size(); i++){
labels.push_back(std::get<1>(sorted_triple[i]));
}
std::sort(labels.begin(), labels.end(), std::greater<float>());
return EvalNDCG::DCG(labels);
}
inline std::vector< std::tuple<float, float, float, float> > GetMAPAcc(const std::vector< std::tuple<float, float, int> > sorted_triple){
std::vector< std::tuple<float, float, float, float> > map_acc;
float hit = 0, acc1 = 0, acc2 = 0, acc3 = 0;
for (int i = 0; i < sorted_triple.size(); i++){
if (std::get<1>(sorted_triple[i]) == 1) {
hit++;
acc1 += hit / (i + 1);
acc2 += (hit - 1) / (i + 1);
acc3 += (hit + 1) / (i + 1);
}
map_acc.push_back(std::make_tuple(acc1, acc2, acc3, hit));
}
return map_acc;
}
inline void GetGroupGradient(const std::vector<float> &preds,
const std::vector<float> &labels,
const std::vector<unsigned> &group_index,
std::vector<float> &grad,
std::vector<float> &hess,
const std::vector< std::tuple<float, float, int> > sorted_triple,
const std::vector<int> index_remap,
const sample::Pairs& pairs,
int group){
bool j_better;
float IDCG, pred_diff, pred_diff_exp, delta;
float first_order_gradient, second_order_gradient;
std::vector< std::tuple<float, float, float, float> > map_acc;
if (lambda_ == NDCG){
IDCG = GetIDCG(sorted_triple);
}
else if (lambda_ == MAP){
map_acc = GetMAPAcc(sorted_triple);
}
for (int j = group_index[group]; j < group_index[group + 1]; j++){
std::vector<int> pair_instance = pairs.GetPairs(j);
for (int k = 0; k < pair_instance.size(); k++){
j_better = labels[j] > labels[pair_instance[k]];
if (j_better){
switch (lambda_){
case PAIRWISE: delta = 1.0; break;
case MAP: delta = GetLambdaMAP(sorted_triple, index_remap[j - group_index[group]], index_remap[pair_instance[k] - group_index[group]], map_acc); break;
case NDCG: delta = GetLambdaNDCG(sorted_triple, index_remap[j - group_index[group]], index_remap[pair_instance[k] - group_index[group]], IDCG); break;
default: utils::Error("Cannot find the specified loss type");
}
pred_diff = preds[preds[j] - pair_instance[k]];
pred_diff_exp = j_better ? expf(-pred_diff) : expf(pred_diff);
first_order_gradient = delta * FirstOrderGradient(pred_diff_exp);
second_order_gradient = 2 * delta * SecondOrderGradient(pred_diff_exp);
hess[j] += second_order_gradient;
grad[j] += first_order_gradient;
hess[pair_instance[k]] += second_order_gradient;
grad[pair_instance[k]] += -first_order_gradient;
}
}
}
}
/*!
* \brief calculate first order gradient of pairwise loss function(f(x) = ln(1+exp(-x)),
* given the exponential of the difference of intransformed pair predictions
* \param the intransformed prediction of positive instance
* \param the intransformed prediction of negative instance
* \return first order gradient
*/
inline float FirstOrderGradient(float pred_diff_exp) const {
return -pred_diff_exp / (1 + pred_diff_exp);
}
/*!
* \brief calculate second order gradient of pairwise loss function(f(x) = ln(1+exp(-x)),
* given the exponential of the difference of intransformed pair predictions
* \param the intransformed prediction of positive instance
* \param the intransformed prediction of negative instance
* \return second order gradient
*/
inline float SecondOrderGradient(float pred_diff_exp) const {
return pred_diff_exp / pow(1 + pred_diff_exp, 2);
}
};
};
};
#endif

View File

@ -125,5 +125,191 @@ namespace xgboost {
};
}
}
namespace regrank{
// simple pairwise rank
class LambdaRankObj : public IObjFunction{
public:
LambdaRankObj(void){}
virtual ~LambdaRankObj(){}
virtual void SetParam(const char *name, const char *val){
if (!strcmp("loss_type", name)) loss_.loss_type = atoi(val);
if (!strcmp("sampler", name)) sampler_.AssignSampler(atoi(val));
if (!strcmp("lambda", name)) lambda_ = atoi(val);
}
virtual void GetGradient(const std::vector<float>& preds,
const DMatrix::Info &info,
int iter,
std::vector<float> &grad,
std::vector<float> &hess) {
grad.resize(preds.size()); hess.resize(preds.size());
const std::vector<unsigned> &group_index = info.group_ptr;
utils::Assert(group_index.size() != 0 && group_index.back() == preds.size(), "rank loss must have group file");
for (int i = 0; i < group_index.size() - 1; i++){
sample::Pairs pairs = sampler_.GenPairs(preds, info.labels, group_index[i], group_index[i + 1]);
//pairs.GetPairs()
std::vector< std::tuple<float, float, int> > sorted_triple = GetSortedTuple(preds, info.labels, group_index, i);
std::vector<int> index_remap = GetIndexMap(sorted_triple, group_index[i]);
GetGroupGradient(preds, info.labels, group_index,
grad, hess, sorted_triple, index_remap, pairs, i);
}
}
virtual const char* DefaultEvalMetric(void) {
return "auc";
}
private:
int lambda_;
const static int PAIRWISE = 0;
const static int MAP = 1;
const static int NDCG = 2;
sample::PairSamplerWrapper sampler_;
LossType loss_;
/* \brief Sorted tuples of a group by the predictions, and
* the fields in the return tuples successively are predicions,
* labels, and the index of the instance
*/
inline std::vector< std::tuple<float, float, int> > GetSortedTuple(const std::vector<float> &preds,
const std::vector<float> &labels,
const std::vector<unsigned> &group_index,
int group){
std::vector< std::tuple<float, float, int> > sorted_triple;
for (int j = group_index[group]; j < group_index[group + 1]; j++){
sorted_triple.push_back(std::tuple<float, float, int>(preds[j], labels[j], j));
}
std::sort(sorted_triple.begin(), sorted_triple.end(),
[](std::tuple<float, float, int> a, std::tuple<float, float, int> b){
return std::get<0>(a) > std::get<0>(b);
});
return sorted_triple;
}
inline std::vector<int> GetIndexMap(std::vector< std::tuple<float, float, int> > sorted_triple, int start){
std::vector<int> index_remap;
index_remap.resize(sorted_triple.size());
for (int i = 0; i < sorted_triple.size(); i++){
index_remap[std::get<2>(sorted_triple[i]) - start] = i;
}
return index_remap;
}
inline float GetLambdaMAP(const std::vector< std::tuple<float, float, int> > sorted_triple,
int index1, int index2,
std::vector< std::tuple<float, float, float, float> > map_acc){
if (index1 > index2) std::swap(index1, index2);
float original = std::get<0>(map_acc[index2]);
if (index1 != 0) original -= std::get<0>(map_acc[index1 - 1]);
float changed = 0;
if (std::get<1>(sorted_triple[index1]) < std::get<1>(sorted_triple[index2])){
changed += std::get<2>(map_acc[index2 - 1]) - std::get<2>(map_acc[index1]);
changed += (std::get<3>(map_acc[index1])+ 1.0f) / (index1 + 1);
}
else{
changed += std::get<1>(map_acc[index2 - 1]) - std::get<1>(map_acc[index1]);
changed += std::get<3>(map_acc[index2]) / (index2 + 1);
}
float ans = (changed - original) / (std::get<3>(map_acc[map_acc.size() - 1]));
if (ans < 0) ans = -ans;
return ans;
}
inline float GetLambdaNDCG(const std::vector< std::tuple<float, float, int> > sorted_triple,
int index1,
int index2, float IDCG){
float original = pow(2, std::get<1>(sorted_triple[index1])) / log(index1 + 2)
+ pow(2, std::get<1>(sorted_triple[index2])) / log(index2 + 2);
float changed = pow(2, std::get<1>(sorted_triple[index2])) / log(index1 + 2)
+ pow(2, std::get<1>(sorted_triple[index1])) / log(index2 + 2);
float ans = (original - changed) / IDCG;
if (ans < 0) ans = -ans;
return ans;
}
inline float GetIDCG(const std::vector< std::tuple<float, float, int> > sorted_triple){
std::vector<float> labels;
for (int i = 0; i < sorted_triple.size(); i++){
labels.push_back(std::get<1>(sorted_triple[i]));
}
std::sort(labels.begin(), labels.end(), std::greater<float>());
return EvalNDCG::DCG(labels);
}
inline std::vector< std::tuple<float, float, float, float> > GetMAPAcc(const std::vector< std::tuple<float, float, int> > sorted_triple){
std::vector< std::tuple<float, float, float, float> > map_acc;
float hit = 0, acc1 = 0, acc2 = 0, acc3 = 0;
for (int i = 0; i < sorted_triple.size(); i++){
if (std::get<1>(sorted_triple[i]) == 1) {
hit++;
acc1 += hit / (i + 1);
acc2 += (hit - 1) / (i + 1);
acc3 += (hit + 1) / (i + 1);
}
map_acc.push_back(std::make_tuple(acc1, acc2, acc3, hit));
}
return map_acc;
}
inline void GetGroupGradient(const std::vector<float> &preds,
const std::vector<float> &labels,
const std::vector<unsigned> &group_index,
std::vector<float> &grad,
std::vector<float> &hess,
const std::vector< std::tuple<float, float, int> > sorted_triple,
const std::vector<int> index_remap,
const sample::Pairs& pairs,
int group){
bool j_better;
float IDCG, pred_diff, pred_diff_exp, delta;
float first_order_gradient, second_order_gradient;
std::vector< std::tuple<float, float, float, float> > map_acc;
if (lambda_ == NDCG){
IDCG = GetIDCG(sorted_triple);
}
else if (lambda_ == MAP){
map_acc = GetMAPAcc(sorted_triple);
}
for (int j = group_index[group]; j < group_index[group + 1]; j++){
std::vector<int> pair_instance = pairs.GetPairs(j);
for (int k = 0; k < pair_instance.size(); k++){
j_better = labels[j] > labels[pair_instance[k]];
if (j_better){
switch (lambda_){
case PAIRWISE: delta = 1.0; break;
case MAP: delta = GetLambdaMAP(sorted_triple, index_remap[j - group_index[group]], index_remap[pair_instance[k] - group_index[group]], map_acc); break;
case NDCG: delta = GetLambdaNDCG(sorted_triple, index_remap[j - group_index[group]], index_remap[pair_instance[k] - group_index[group]], IDCG); break;
default: utils::Error("Cannot find the specified loss type");
}
pred_diff = preds[preds[j] - pair_instance[k]];
pred_diff_exp = j_better ? expf(-pred_diff) : expf(pred_diff);
first_order_gradient = delta * FirstOrderGradient(pred_diff_exp);
second_order_gradient = 2 * delta * SecondOrderGradient(pred_diff_exp);
hess[j] += second_order_gradient;
grad[j] += first_order_gradient;
hess[pair_instance[k]] += second_order_gradient;
grad[pair_instance[k]] += -first_order_gradient;
}
}
}
}
inline float FirstOrderGradient(float pred_diff_exp) const {
return -pred_diff_exp / (1 + pred_diff_exp);
}
inline float SecondOrderGradient(float pred_diff_exp) const {
return pred_diff_exp / pow(1 + pred_diff_exp, 2);
}
};
}
#endif

View File

@ -129,6 +129,7 @@ namespace xgboost{
};
namespace random{
/*! \brief random number generator with independent random number seed*/
struct Random{
/*! \brief set random number seed */
inline void Seed( unsigned sd ){
@ -136,10 +137,7 @@ namespace xgboost{
}
/*! \brief return a real number uniform in [0,1) */
inline double RandDouble( void ){
// return static_cast<double>( rand_( &rseed ) ) / (static_cast<double>( RAND_MAX )+1.0);
return static_cast<double>(rand()) / (static_cast<double>(RAND_MAX)+1.0);
return static_cast<double>( rand_r( &rseed ) ) / (static_cast<double>( RAND_MAX )+1.0);
}
// random number seed
unsigned rseed;