fix omp for bug in obj
This commit is contained in:
parent
fa5afe2141
commit
f8cacc7308
@ -200,6 +200,11 @@ namespace xgboost{
|
|||||||
fprintf(fo, "[%d]", iter);
|
fprintf(fo, "[%d]", iter);
|
||||||
for (size_t i = 0; i < evals.size(); ++i){
|
for (size_t i = 0; i < evals.size(); ++i){
|
||||||
this->PredictRaw(preds_, *evals[i]);
|
this->PredictRaw(preds_, *evals[i]);
|
||||||
|
for( size_t j = 0 ; j < preds_.size(); ++ j){
|
||||||
|
if( fabsf(preds_[j]- 0.5f)>1e-6f){
|
||||||
|
printf("p[%lu]=%f\n", j,preds_[j]);
|
||||||
|
}
|
||||||
|
}
|
||||||
obj_->PredTransform(preds_);
|
obj_->PredTransform(preds_);
|
||||||
evaluator_.Eval(fo, evname[i].c_str(), preds_, evals[i]->info);
|
evaluator_.Eval(fo, evname[i].c_str(), preds_, evals[i]->info);
|
||||||
}
|
}
|
||||||
@ -283,6 +288,10 @@ namespace xgboost{
|
|||||||
#pragma omp parallel for schedule( static )
|
#pragma omp parallel for schedule( static )
|
||||||
for (unsigned j = 0; j < ndata; ++j){
|
for (unsigned j = 0; j < ndata; ++j){
|
||||||
preds[j] = mparam.base_score + base_gbm.Predict(data.data, j, buffer_offset + j, data.info.GetRoot(j), bst_group );
|
preds[j] = mparam.base_score + base_gbm.Predict(data.data, j, buffer_offset + j, data.info.GetRoot(j), bst_group );
|
||||||
|
if( preds[j] != 0.5f ){
|
||||||
|
printf("pred[%d:%u]=%f\n", bst_group, j, preds[j]);
|
||||||
|
}
|
||||||
|
utils::Assert( preds[j] == 0.5f, "BUG");
|
||||||
}
|
}
|
||||||
}else
|
}else
|
||||||
#pragma omp parallel for schedule( static )
|
#pragma omp parallel for schedule( static )
|
||||||
|
|||||||
@ -83,7 +83,7 @@ namespace xgboost{
|
|||||||
float sum = 0.0f, wsum = 0.0f;
|
float sum = 0.0f, wsum = 0.0f;
|
||||||
#pragma omp parallel for reduction(+:sum,wsum) schedule( static )
|
#pragma omp parallel for reduction(+:sum,wsum) schedule( static )
|
||||||
for (unsigned i = 0; i < ndata; ++i){
|
for (unsigned i = 0; i < ndata; ++i){
|
||||||
const float wt = info.GetWeight(i);
|
const float wt = info.GetWeight(i);
|
||||||
if (preds[i] > 0.5f){
|
if (preds[i] > 0.5f){
|
||||||
if (info.labels[i] < 0.5f) sum += wt;
|
if (info.labels[i] < 0.5f) sum += wt;
|
||||||
}
|
}
|
||||||
@ -99,6 +99,28 @@ namespace xgboost{
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/*! \brief Error */
|
||||||
|
struct EvalMatchError : public IEvaluator{
|
||||||
|
virtual float Eval(const std::vector<float> &preds,
|
||||||
|
const DMatrix::Info &info) const {
|
||||||
|
const unsigned ndata = static_cast<unsigned>(preds.size());
|
||||||
|
float sum = 0.0f, wsum = 0.0f;
|
||||||
|
#pragma omp parallel for reduction(+:sum,wsum) schedule( static )
|
||||||
|
for (unsigned i = 0; i < ndata; ++i){
|
||||||
|
const float wt = info.GetWeight(i);
|
||||||
|
if (static_cast<int>(preds[i]) != static_cast<int>(info.labels[i]) ){
|
||||||
|
sum += wt;
|
||||||
|
}
|
||||||
|
wsum += wt;
|
||||||
|
}
|
||||||
|
return sum / wsum;
|
||||||
|
}
|
||||||
|
virtual const char *Name(void) const{
|
||||||
|
return "merror";
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
/*! \brief Area under curve, for both classification and rank */
|
/*! \brief Area under curve, for both classification and rank */
|
||||||
struct EvalAuc : public IEvaluator{
|
struct EvalAuc : public IEvaluator{
|
||||||
virtual float Eval(const std::vector<float> &preds,
|
virtual float Eval(const std::vector<float> &preds,
|
||||||
@ -270,6 +292,7 @@ namespace xgboost{
|
|||||||
}
|
}
|
||||||
if (!strcmp(name, "rmse")) evals_.push_back(new EvalRMSE());
|
if (!strcmp(name, "rmse")) evals_.push_back(new EvalRMSE());
|
||||||
if (!strcmp(name, "error")) evals_.push_back(new EvalError());
|
if (!strcmp(name, "error")) evals_.push_back(new EvalError());
|
||||||
|
if (!strcmp(name, "merror")) evals_.push_back(new EvalMatchError());
|
||||||
if (!strcmp(name, "logloss")) evals_.push_back(new EvalLogLoss());
|
if (!strcmp(name, "logloss")) evals_.push_back(new EvalLogLoss());
|
||||||
if (!strcmp(name, "auc")) evals_.push_back(new EvalAuc());
|
if (!strcmp(name, "auc")) evals_.push_back(new EvalAuc());
|
||||||
if (!strncmp(name, "pre@", 4)) evals_.push_back(new EvalPrecision(name));
|
if (!strncmp(name, "pre@", 4)) evals_.push_back(new EvalPrecision(name));
|
||||||
|
|||||||
@ -75,7 +75,7 @@ namespace xgboost{
|
|||||||
#pragma omp parallel
|
#pragma omp parallel
|
||||||
{
|
{
|
||||||
std::vector< float > rec;
|
std::vector< float > rec;
|
||||||
#pragma for schedule(static)
|
#pragma omp for schedule(static)
|
||||||
for (unsigned k = 0; k < ngroup; ++k){
|
for (unsigned k = 0; k < ngroup; ++k){
|
||||||
rec.clear();
|
rec.clear();
|
||||||
int nhit = 0;
|
int nhit = 0;
|
||||||
@ -125,7 +125,7 @@ namespace xgboost{
|
|||||||
#pragma omp parallel
|
#pragma omp parallel
|
||||||
{
|
{
|
||||||
std::vector<float> rec(nclass);
|
std::vector<float> rec(nclass);
|
||||||
#pragma for schedule(static)
|
#pragma omp for schedule(static)
|
||||||
for (unsigned j = 0; j < ndata; ++j){
|
for (unsigned j = 0; j < ndata; ++j){
|
||||||
for( int k = 0; k < nclass; ++ k ){
|
for( int k = 0; k < nclass; ++ k ){
|
||||||
rec[k] = preds[j + k * ndata];
|
rec[k] = preds[j + k * ndata];
|
||||||
@ -149,22 +149,22 @@ namespace xgboost{
|
|||||||
utils::Assert( nclass != 0, "must set num_class to use softmax" );
|
utils::Assert( nclass != 0, "must set num_class to use softmax" );
|
||||||
utils::Assert( preds.size() % nclass == 0, "SoftmaxMultiClassObj: label size and pred size does not match" );
|
utils::Assert( preds.size() % nclass == 0, "SoftmaxMultiClassObj: label size and pred size does not match" );
|
||||||
const unsigned ndata = static_cast<unsigned>(preds.size()/nclass);
|
const unsigned ndata = static_cast<unsigned>(preds.size()/nclass);
|
||||||
|
|
||||||
#pragma omp parallel
|
#pragma omp parallel
|
||||||
{
|
{
|
||||||
std::vector<float> rec(nclass);
|
std::vector<float> rec(nclass);
|
||||||
#pragma for schedule(static)
|
#pragma omp for schedule(static)
|
||||||
for (unsigned j = 0; j < ndata; ++j){
|
for (unsigned j = 0; j < ndata; ++j){
|
||||||
for( int k = 0; k < nclass; ++ k ){
|
for( int k = 0; k < nclass; ++ k ){
|
||||||
rec[k] = preds[j + k * ndata];
|
rec[k] = preds[j + k * ndata];
|
||||||
}
|
}
|
||||||
Softmax( rec );
|
|
||||||
preds[j] = FindMaxIndex( rec );
|
preds[j] = FindMaxIndex( rec );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
preds.resize( ndata );
|
preds.resize( ndata );
|
||||||
}
|
}
|
||||||
virtual const char* DefaultEvalMetric(void) {
|
virtual const char* DefaultEvalMetric(void) {
|
||||||
return "error";
|
return "merror";
|
||||||
}
|
}
|
||||||
private:
|
private:
|
||||||
int nclass;
|
int nclass;
|
||||||
@ -201,7 +201,7 @@ namespace xgboost{
|
|||||||
// thread use its own random number generator, seed by thread id and current iteration
|
// thread use its own random number generator, seed by thread id and current iteration
|
||||||
random::Random rnd; rnd.Seed( iter * 1111 + omp_get_thread_num() );
|
random::Random rnd; rnd.Seed( iter * 1111 + omp_get_thread_num() );
|
||||||
std::vector< std::pair<float,unsigned> > rec;
|
std::vector< std::pair<float,unsigned> > rec;
|
||||||
#pragma for schedule(static)
|
#pragma omp for schedule(static)
|
||||||
for (unsigned k = 0; k < ngroup; ++k){
|
for (unsigned k = 0; k < ngroup; ++k){
|
||||||
rec.clear();
|
rec.clear();
|
||||||
for(unsigned j = gptr[k]; j < gptr[k+1]; ++j ){
|
for(unsigned j = gptr[k]; j < gptr[k+1]; ++j ){
|
||||||
|
|||||||
@ -26,7 +26,9 @@ namespace xgboost{
|
|||||||
inline static int FindMaxIndex( std::vector<float>& rec ){
|
inline static int FindMaxIndex( std::vector<float>& rec ){
|
||||||
size_t mxid = 0;
|
size_t mxid = 0;
|
||||||
for( size_t i = 1; i < rec.size(); ++ i ){
|
for( size_t i = 1; i < rec.size(); ++ i ){
|
||||||
if( rec[i] > rec[mxid] ) mxid = i;
|
if( rec[i] > rec[mxid]+1e-6f ){
|
||||||
|
mxid = i;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return (int)mxid;
|
return (int)mxid;
|
||||||
}
|
}
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user