diff --git a/python/Makefile b/python/Makefile index 0db0a1ed0..f21957a2e 100644 --- a/python/Makefile +++ b/python/Makefile @@ -1,6 +1,6 @@ export CC = gcc export CXX = g++ -export CFLAGS = -Wall -msse2 -Wno-unknown-pragmas -fopenmp +export CFLAGS = -Wall -O3 -msse2 -Wno-unknown-pragmas -fopenmp # specify tensor path SLIB = libxgboostpy.so diff --git a/python/xgboost.py b/python/xgboost.py index 922ca085d..d37566065 100644 --- a/python/xgboost.py +++ b/python/xgboost.py @@ -22,6 +22,13 @@ xglib.XGDMatrixGetLabel.restype = ctypes.POINTER( ctypes.c_float ) xglib.XGDMatrixGetRow.restype = ctypes.POINTER( REntry ) xglib.XGBoosterPredict.restype = ctypes.POINTER( ctypes.c_float ) +def ctypes2numpy( cptr, length ): + # convert a ctypes pointer array to numpy + assert isinstance( cptr, ctypes.POINTER( ctypes.c_float ) ) + res = numpy.zeros( length, dtype='float32' ) + assert ctypes.memmove( res.ctypes.data, cptr, length * res.strides[0] ) + return res + # data matrix used in xgboost class DMatrix: # constructor @@ -73,7 +80,7 @@ class DMatrix: def get_label(self): length = ctypes.c_ulong() labels = xglib.XGDMatrixGetLabel(self.handle, ctypes.byref(length)) - return numpy.array( [labels[i] for i in xrange(length.value)] ) + return ctypes2numpy( labels, length.value ); # clear everything def clear(self): xglib.XGDMatrixClear(self.handle) @@ -138,7 +145,7 @@ class Booster: def predict(self, data, bst_group = -1): length = ctypes.c_ulong() preds = xglib.XGBoosterPredict( self.handle, data.handle, ctypes.byref(length), bst_group) - return numpy.array( [ preds[i] for i in xrange(length.value)]) + return ctypes2numpy( preds, length.value ) def save_model(self, fname): """ save model to file """ xglib.XGBoosterSaveModel( self.handle, ctypes.c_char_p(fname) ) diff --git a/python/xgboost_python.cpp b/python/xgboost_python.cpp index ee80429f8..d5442c8c3 100644 --- a/python/xgboost_python.cpp +++ b/python/xgboost_python.cpp @@ -75,6 +75,7 @@ namespace xgboost{ inline void CheckInit(void){ if(!init_col_){ this->data.InitData(); + init_col_ = true; } utils::Assert( this->data.NumRow() == this->info.labels.size(), "DMatrix: number of labels must match number of rows in matrix"); } diff --git a/regrank/xgboost_regrank.h b/regrank/xgboost_regrank.h index b06280b2c..d0148ab1a 100644 --- a/regrank/xgboost_regrank.h +++ b/regrank/xgboost_regrank.h @@ -283,6 +283,7 @@ namespace xgboost{ #pragma omp parallel for schedule( static ) for (unsigned j = 0; j < ndata; ++j){ preds[j] = mparam.base_score + base_gbm.Predict(data.data, j, buffer_offset + j, data.info.GetRoot(j), bst_group ); + } }else #pragma omp parallel for schedule( static ) diff --git a/regrank/xgboost_regrank_eval.h b/regrank/xgboost_regrank_eval.h index a2a3b0012..c999c3849 100644 --- a/regrank/xgboost_regrank_eval.h +++ b/regrank/xgboost_regrank_eval.h @@ -83,7 +83,7 @@ namespace xgboost{ float sum = 0.0f, wsum = 0.0f; #pragma omp parallel for reduction(+:sum,wsum) schedule( static ) for (unsigned i = 0; i < ndata; ++i){ - const float wt = info.GetWeight(i); + const float wt = info.GetWeight(i); if (preds[i] > 0.5f){ if (info.labels[i] < 0.5f) sum += wt; } @@ -99,6 +99,39 @@ namespace xgboost{ } }; + + /*! \brief Error */ + struct EvalMatchError : public IEvaluator{ + public: + EvalMatchError(const char *name){ + name_ = name; + abs_ = 0; + if(!strcmp("mabserror", name)) abs_ =1; + } + virtual float Eval(const std::vector &preds, + const DMatrix::Info &info) const { + const unsigned ndata = static_cast(preds.size()); + float sum = 0.0f, wsum = 0.0f; + #pragma omp parallel for reduction(+:sum,wsum) schedule( static ) + for (unsigned i = 0; i < ndata; ++i){ + const float wt = info.GetWeight(i); + int label = static_cast(info.labels[i]); + if( label < 0 && abs_ != 0 ) label = -label-1; + if (static_cast(preds[i]) != label ){ + sum += wt; + } + wsum += wt; + } + return sum / wsum; + } + virtual const char *Name(void) const{ + return name_.c_str(); + } + int abs_; + std::string name_; + }; + + /*! \brief Area under curve, for both classification and rank */ struct EvalAuc : public IEvaluator{ virtual float Eval(const std::vector &preds, @@ -281,6 +314,8 @@ namespace xgboost{ } if (!strcmp(name, "rmse")) evals_.push_back(new EvalRMSE()); if (!strcmp(name, "error")) evals_.push_back(new EvalError()); + if (!strcmp(name, "merror")) evals_.push_back(new EvalMatchError("merror")); + if (!strcmp(name, "mabserror")) evals_.push_back(new EvalMatchError("mabserror")); if (!strcmp(name, "logloss")) evals_.push_back(new EvalLogLoss()); if (!strcmp(name, "auc")) evals_.push_back(new EvalAuc()); if (!strncmp(name, "pre@", 4)) evals_.push_back(new EvalPrecision(name)); diff --git a/regrank/xgboost_regrank_obj.hpp b/regrank/xgboost_regrank_obj.hpp index 2a0ad80b2..5fded0aa2 100644 --- a/regrank/xgboost_regrank_obj.hpp +++ b/regrank/xgboost_regrank_obj.hpp @@ -77,7 +77,7 @@ namespace xgboost{ #pragma omp parallel { std::vector< float > rec; - #pragma for schedule(static) + #pragma omp for schedule(static) for (unsigned k = 0; k < ngroup; ++k){ rec.clear(); int nhit = 0; @@ -127,13 +127,16 @@ namespace xgboost{ #pragma omp parallel { std::vector rec(nclass); - #pragma for schedule(static) + #pragma omp for schedule(static) for (unsigned j = 0; j < ndata; ++j){ for( int k = 0; k < nclass; ++ k ){ rec[k] = preds[j + k * ndata]; } Softmax( rec ); int label = static_cast(info.labels[j]); + if( label < 0 ){ + label = -label - 1; + } utils::Assert( label < nclass, "SoftmaxMultiClassObj: label exceed num_class" ); for( int k = 0; k < nclass; ++ k ){ float p = rec[ k ]; @@ -151,22 +154,22 @@ namespace xgboost{ utils::Assert( nclass != 0, "must set num_class to use softmax" ); utils::Assert( preds.size() % nclass == 0, "SoftmaxMultiClassObj: label size and pred size does not match" ); const unsigned ndata = static_cast(preds.size()/nclass); + #pragma omp parallel { std::vector rec(nclass); - #pragma for schedule(static) + #pragma omp for schedule(static) for (unsigned j = 0; j < ndata; ++j){ for( int k = 0; k < nclass; ++ k ){ rec[k] = preds[j + k * ndata]; } - Softmax( rec ); preds[j] = FindMaxIndex( rec ); } } preds.resize( ndata ); } virtual const char* DefaultEvalMetric(void) { - return "error"; + return "merror"; } private: int nclass; @@ -203,7 +206,7 @@ namespace xgboost{ // thread use its own random number generator, seed by thread id and current iteration random::Random rnd; rnd.Seed( iter * 1111 + omp_get_thread_num() ); std::vector< std::pair > rec; - #pragma for schedule(static) + #pragma omp for schedule(static) for (unsigned k = 0; k < ngroup; ++k){ rec.clear(); for(unsigned j = gptr[k]; j < gptr[k+1]; ++j ){ diff --git a/regrank/xgboost_regrank_utils.h b/regrank/xgboost_regrank_utils.h index 6f7ce596d..49ab715d5 100644 --- a/regrank/xgboost_regrank_utils.h +++ b/regrank/xgboost_regrank_utils.h @@ -26,7 +26,9 @@ namespace xgboost{ inline static int FindMaxIndex( std::vector& rec ){ size_t mxid = 0; for( size_t i = 1; i < rec.size(); ++ i ){ - if( rec[i] > rec[mxid] ) mxid = i; + if( rec[i] > rec[mxid]+1e-6f ){ + mxid = i; + } } return (int)mxid; }