Comments added

This commit is contained in:
kalenhaha 2014-02-13 13:04:55 +08:00
parent 06ce8c9f3a
commit f22139c659
7 changed files with 315 additions and 153 deletions

View File

@ -9,6 +9,7 @@
#include <vector> #include <vector>
#include "../utils/xgboost_utils.h" #include "../utils/xgboost_utils.h"
#include "../utils/xgboost_stream.h"
namespace xgboost{ namespace xgboost{
namespace booster{ namespace booster{
@ -143,7 +144,7 @@ namespace xgboost{
* the function is not consistent between 64bit and 32bit machine * the function is not consistent between 64bit and 32bit machine
* \param fo output stream * \param fo output stream
*/ */
inline void SaveBinary( utils::IStream &fo ) const{ inline void SaveBinary(utils::IStream &fo ) const{
size_t nrow = this->NumRow(); size_t nrow = this->NumRow();
fo.Write( &nrow, sizeof(size_t) ); fo.Write( &nrow, sizeof(size_t) );
fo.Write( &row_ptr[0], row_ptr.size() * sizeof(size_t) ); fo.Write( &row_ptr[0], row_ptr.size() * sizeof(size_t) );

View File

@ -1,10 +1,10 @@
#ifndef _XGBOOST_REG_H_ #ifndef _XGBOOST_REG_H_
#define _XGBOOST_REG_H_ #define _XGBOOST_REG_H_
/*! /*!
* \file xgboost_reg.h * \file xgboost_reg.h
* \brief class for gradient boosted regression * \brief class for gradient boosted regression
* \author Kailong Chen: chenkl198812@gmail.com, Tianqi Chen: tianqi.tchen@gmail.com * \author Kailong Chen: chenkl198812@gmail.com, Tianqi Chen: tianqi.tchen@gmail.com
*/ */
#include <cmath> #include <cmath>
#include "xgboost_regdata.h" #include "xgboost_regdata.h"
#include "../booster/xgboost_gbmbase.h" #include "../booster/xgboost_gbmbase.h"
@ -16,6 +16,11 @@ namespace xgboost{
/*! \brief class for gradient boosted regression */ /*! \brief class for gradient boosted regression */
class RegBoostLearner{ class RegBoostLearner{
public: public:
RegBoostLearner(bool silent = false){
this->silent = silent;
}
/*! /*!
* \brief a regression booter associated with training and evaluating data * \brief a regression booter associated with training and evaluating data
* \param train pointer to the training data * \param train pointer to the training data
@ -24,12 +29,33 @@ namespace xgboost{
*/ */
RegBoostLearner( const DMatrix *train, RegBoostLearner( const DMatrix *train,
std::vector<const DMatrix *> evals, std::vector<const DMatrix *> evals,
std::vector<std::string> evname ){ std::vector<std::string> evname, bool silent = false ){
this->silent = silent;
SetData(train,evals,evname);
}
/*!
* \brief associate regression booster with training and evaluating data
* \param train pointer to the training data
* \param evals array of evaluating data
* \param evname name of evaluation data, used print statistics
*/
inline void SetData(const DMatrix *train,
std::vector<const DMatrix *> evals,
std::vector<std::string> evname){
this->train_ = train; this->train_ = train;
this->evals_ = evals; this->evals_ = evals;
this->evname_ = evname; this->evname_ = evname;
//TODO: assign buffer index //assign buffer index
int buffer_size = (*train).size();
for(int i = 0; i < evals.size(); i++){
buffer_size += (*evals[i]).size();
} }
char str[25];
itoa(buffer_size,str,10);
base_model.SetParam("num_pbuffer",str);
}
/*! /*!
* \brief set parameters from outside * \brief set parameters from outside
* \param name name of the parameter * \param name name of the parameter
@ -47,6 +73,14 @@ namespace xgboost{
base_model.InitTrainer(); base_model.InitTrainer();
mparam.AdjustBase(); mparam.AdjustBase();
} }
/*!
* \brief initialize the current data storage for model, if the model is used first time, call this function
*/
inline void InitModel( void ){
base_model.InitModel();
}
/*! /*!
* \brief load model from stream * \brief load model from stream
* \param fi input stream * \param fi input stream
@ -63,23 +97,66 @@ namespace xgboost{
fo.Write( &mparam, sizeof(ModelParam) ); fo.Write( &mparam, sizeof(ModelParam) );
base_model.SaveModel( fo ); base_model.SaveModel( fo );
} }
/*! /*!
* \brief update the model for one iteration * \brief update the model for one iteration
* \param iteration the number of updating iteration
*/ */
inline void UpdateOneIter( void ){ inline void UpdateOneIter( int iteration ){
//TODO std::vector<float> grad,hess,preds;
std::vector<unsigned> root_index;
booster::FMatrixS::Image train_image((*train_).data);
Predict(preds,*train_,0);
Gradient(preds,(*train_).labels,grad,hess);
base_model.DoBoost(grad,hess,train_image,root_index);
int buffer_index_offset = (*train_).size();
float loss = 0.0;
for(int i = 0; i < evals_.size();i++){
Predict(preds, *evals_[i], buffer_index_offset);
loss = mparam.Loss(preds,(*evals_[i]).labels);
if(!silent){
printf("The loss of %s data set in %d the \
iteration is %f",evname_[i].c_str(),&iteration,&loss);
} }
/*! \brief predict the results, given data */ buffer_index_offset += (*evals_[i]).size();
inline void Predict( std::vector<float> &preds, const DMatrix &data ){
//TODO
} }
}
/*! \brief get the transformed predictions, given data */
inline void Predict( std::vector<float> &preds, const DMatrix &data,int buffer_index_offset = 0 ){
int data_size = data.size();
preds.resize(data_size);
for(int j = 0; j < data_size; j++){
preds[j] = mparam.PredTransform(mparam.base_score +
base_model.Predict(data.data[j],buffer_index_offset + j));
}
}
private: private:
/*! \brief get the first order and second order gradient, given the transformed predictions and labels*/
inline void Gradient(const std::vector<float> &preds, const std::vector<float> &labels, std::vector<float> &grad,
std::vector<float> &hess){
grad.clear();
hess.clear();
for(int j = 0; j < preds.size(); j++){
grad.push_back(mparam.FirstOrderGradient(preds[j],labels[j]));
hess.push_back(mparam.SecondOrderGradient(preds[j],labels[j]));
}
}
enum LOSS_TYPE_LIST{
LINEAR_SQUARE,
LOGISTIC_NEGLOGLIKELIHOOD,
};
/*! \brief training parameter for regression */ /*! \brief training parameter for regression */
struct ModelParam{ struct ModelParam{
/* \brief global bias */ /* \brief global bias */
float base_score; float base_score;
/* \brief type of loss function */ /* \brief type of loss function */
int loss_type; int loss_type;
ModelParam( void ){ ModelParam( void ){
base_score = 0.5f; base_score = 0.5f;
loss_type = 0; loss_type = 0;
@ -110,7 +187,7 @@ namespace xgboost{
*/ */
inline float FirstOrderGradient( float predt, float label ) const{ inline float FirstOrderGradient( float predt, float label ) const{
switch( loss_type ){ switch( loss_type ){
case 0: return predt - label; case LINEAR_SQUARE: return predt - label;
case 1: return predt - label; case 1: return predt - label;
default: utils::Error("unknown loss_type"); return 0.0f; default: utils::Error("unknown loss_type"); return 0.0f;
} }
@ -123,11 +200,53 @@ namespace xgboost{
*/ */
inline float SecondOrderGradient( float predt, float label ) const{ inline float SecondOrderGradient( float predt, float label ) const{
switch( loss_type ){ switch( loss_type ){
case 0: return 1.0f; case LINEAR_SQUARE: return 1.0f;
case 1: return predt * ( 1 - predt ); case LOGISTIC_NEGLOGLIKELIHOOD: return predt * ( 1 - predt );
default: utils::Error("unknown loss_type"); return 0.0f; default: utils::Error("unknown loss_type"); return 0.0f;
} }
} }
/*!
* \brief calculating the loss, given the predictions, labels and the loss type
* \param preds the given predictions
* \param labels the given labels
* \return the specified loss
*/
inline float Loss(const std::vector<float> &preds, const std::vector<float> &labels) const{
switch( loss_type ){
case LINEAR_SQUARE: return SquareLoss(preds,labels);
case LOGISTIC_NEGLOGLIKELIHOOD: return NegLoglikelihoodLoss(preds,labels);
default: utils::Error("unknown loss_type"); return 0.0f;
}
}
/*!
* \brief calculating the square loss, given the predictions and labels
* \param preds the given predictions
* \param labels the given labels
* \return the summation of square loss
*/
inline float SquareLoss(const std::vector<float> &preds, const std::vector<float> &labels) const{
float ans = 0.0;
for(int i = 0; i < preds.size(); i++)
ans += pow(preds[i] - labels[i], 2);
return ans;
}
/*!
* \brief calculating the square loss, given the predictions and labels
* \param preds the given predictions
* \param labels the given labels
* \return the summation of square loss
*/
inline float NegLoglikelihoodLoss(const std::vector<float> &preds, const std::vector<float> &labels) const{
float ans = 0.0;
for(int i = 0; i < preds.size(); i++)
ans -= labels[i] * log(preds[i]) + ( 1 - labels[i] ) * log(1 - preds[i]);
return ans;
}
/*! /*!
* \brief transform the linear sum to prediction * \brief transform the linear sum to prediction
* \param x linear sum of boosting ensemble * \param x linear sum of boosting ensemble
@ -135,11 +254,13 @@ namespace xgboost{
*/ */
inline float PredTransform( float x ){ inline float PredTransform( float x ){
switch( loss_type ){ switch( loss_type ){
case 0: return x; case LINEAR_SQUARE: return x;
case 1: return 1.0f/(1.0f + expf(-x)); case LOGISTIC_NEGLOGLIKELIHOOD: return 1.0f/(1.0f + expf(-x));
default: utils::Error("unknown loss_type"); return 0.0f; default: utils::Error("unknown loss_type"); return 0.0f;
} }
} }
}; };
private: private:
booster::GBMBaseModel base_model; booster::GBMBaseModel base_model;
@ -147,8 +268,9 @@ namespace xgboost{
const DMatrix *train_; const DMatrix *train_;
std::vector<const DMatrix *> evals_; std::vector<const DMatrix *> evals_;
std::vector<std::string> evname_; std::vector<std::string> evname_;
bool silent;
}; };
}; }
}; };
#endif #endif

View File

@ -1,4 +1,4 @@
#include"xgbooost_reg_train.h" #include"xgboost_reg_train.h"
#include"xgboost_reg_test.h" #include"xgboost_reg_test.h"
using namespace xgboost::regression; using namespace xgboost::regression;

View File

@ -12,8 +12,20 @@
using namespace xgboost::utils; using namespace xgboost::utils;
namespace xgboost{ namespace xgboost{
namespace regression{ namespace regression{
/*!
* \brief wrapping the testing process of the gradient
boosting regression model,given the configuation
* \author Kailong Chen: chenkl198812@gmail.com
*/
class RegBoostTest{ class RegBoostTest{
public: public:
/*!
* \brief to start the testing process of gradient boosting regression
* model given the configuation, and finally save the prediction
* results to the specified paths.
* \param config_path the location of the configuration
* \param silent whether to print feedback messages
*/
void test(char* config_path,bool silent = false){ void test(char* config_path,bool silent = false){
reg_boost_learner = new xgboost::regression::RegBoostLearner(silent); reg_boost_learner = new xgboost::regression::RegBoostLearner(silent);
ConfigIterator config_itr(config_path); ConfigIterator config_itr(config_path);

View File

@ -10,10 +10,23 @@
#include"../utils/xgboost_string.h" #include"../utils/xgboost_string.h"
using namespace xgboost::utils; using namespace xgboost::utils;
namespace xgboost{ namespace xgboost{
namespace regression{ namespace regression{
/*!
* \brief wrapping the training process of the gradient
boosting regression model,given the configuation
* \author Kailong Chen: chenkl198812@gmail.com
*/
class RegBoostTrain{ class RegBoostTrain{
public: public:
/*!
* \brief to start the training process of gradient boosting regression
* model given the configuation, and finally saved the models
* to the specified model directory
* \param config_path the location of the configuration
* \param silent whether to print feedback messages
*/
void train(char* config_path,bool silent = false){ void train(char* config_path,bool silent = false){
reg_boost_learner = new xgboost::regression::RegBoostLearner(silent); reg_boost_learner = new xgboost::regression::RegBoostLearner(silent);
ConfigIterator config_itr(config_path); ConfigIterator config_itr(config_path);
@ -39,28 +52,31 @@ namespace xgboost{
//begin training //begin training
reg_boost_learner->InitTrainer(); reg_boost_learner->InitTrainer();
char model_path[256]; char suffix[256];
for(int i = 1; i <= train_param.boost_iterations; i++){ for(int i = 1; i <= train_param.boost_iterations; i++){
reg_boost_learner->UpdateOneIter(i); reg_boost_learner->UpdateOneIter(i);
//save the models during the iterations
if(train_param.save_period != 0 && i % train_param.save_period == 0){ if(train_param.save_period != 0 && i % train_param.save_period == 0){
sscanf(model_path,"%s/%d.model",train_param.model_dir_path,i); sscanf(suffix,"%d.model",i);
FILE* file = fopen(model_path,"w"); SaveModel(suffix);
FileStream fin(file);
reg_boost_learner->SaveModel(fin);
fin.Close();
} }
} }
//save the final model //save the final round model
sscanf(model_path,"%s/final.model",train_param.model_dir_path); SaveModel("final.model");
FILE* file = fopen(model_path,"w");
FileStream fin(file);
reg_boost_learner->SaveModel(fin);
fin.Close();
} }
private: private:
/*! \brief save model in the model directory with specified suffix*/
void SaveModel(const char* suffix){
char model_path[256];
//save the final round model
sscanf(model_path,"%s/%s",train_param.model_dir_path,suffix);
FILE* file = fopen(model_path,"w");
FileStream fin(file);
reg_boost_learner->SaveModel(fin);
fin.Close();
}
struct TrainParam{ struct TrainParam{
/* \brief upperbound of the number of boosters */ /* \brief upperbound of the number of boosters */
int boost_iterations; int boost_iterations;
@ -99,7 +115,10 @@ namespace xgboost{
} }
}; };
/*! \brief the parameters of the training process*/
TrainParam train_param; TrainParam train_param;
/*! \brief the gradient boosting regression tree model*/
xgboost::regression::RegBoostLearner* reg_boost_learner; xgboost::regression::RegBoostLearner* reg_boost_learner;
}; };
} }

View File

@ -30,6 +30,13 @@ namespace xgboost{
public: public:
/*! \brief default constructor */ /*! \brief default constructor */
DMatrix( void ){} DMatrix( void ){}
/*! \brief get the number of instances */
inline int size() const{
return labels.size();
}
/*! /*!
* \brief load from text file * \brief load from text file
* \param fname name of text data * \param fname name of text data

View File

@ -10,6 +10,7 @@
#include <cstring> #include <cstring>
#include <string> #include <string>
#include "xgboost_utils.h" #include "xgboost_utils.h"
#include <vector>
namespace xgboost{ namespace xgboost{
namespace utils{ namespace utils{