Merge branch 'dev' of https://github.com/tqchen/xgboost into dev
Conflicts: demo/rank/mq2008.conf demo/rank/runexp.sh regrank/xgboost_regrank_obj.h
This commit is contained in:
@@ -2,7 +2,7 @@
|
||||
# choose the tree booster, 0: tree, 1: linear
|
||||
booster_type = 0
|
||||
# choose logistic regression loss function for binary classification
|
||||
loss_type = 2
|
||||
objective = binary:logistic
|
||||
|
||||
# Tree Booster Parameters
|
||||
# step size shrinkage
|
||||
|
||||
@@ -17,4 +17,4 @@ make
|
||||
|
||||
|
||||
|
||||
|
||||
speedtest.py compares xgboost's speed on this dataset with sklearn.GBM
|
||||
|
||||
@@ -31,8 +31,9 @@ xgmat = xgb.DMatrix( data, label=label, missing = -999.0, weight=weight )
|
||||
|
||||
# setup parameters for xgboost
|
||||
param = {}
|
||||
# use logistic regression loss
|
||||
param['loss_type'] = 3
|
||||
# use logistic regression loss, use raw prediction before logistic transformation
|
||||
# since we only need the rank
|
||||
param['objective'] = 'binary:logitraw'
|
||||
# scale weight of positive examples
|
||||
param['scale_pos_weight'] = sum_wneg/sum_wpos
|
||||
param['bst:eta'] = 0.1
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
./higgs-numpy.py
|
||||
./higgs-pred.py
|
||||
python higgs-numpy.py
|
||||
python higgs-pred.py
|
||||
66
demo/kaggle-higgs/speedtest.py
Executable file
66
demo/kaggle-higgs/speedtest.py
Executable file
@@ -0,0 +1,66 @@
|
||||
#!/usr/bin/python
|
||||
# this is the example script to use xgboost to train
|
||||
import sys
|
||||
import numpy as np
|
||||
# add path of xgboost python module
|
||||
sys.path.append('../../python/')
|
||||
import xgboost as xgb
|
||||
from sklearn.ensemble import GradientBoostingClassifier
|
||||
import time
|
||||
test_size = 550000
|
||||
|
||||
# path to where the data lies
|
||||
dpath = 'data'
|
||||
|
||||
# load in training data, directly use numpy
|
||||
dtrain = np.loadtxt( dpath+'/training.csv', delimiter=',', skiprows=1, converters={32: lambda x:int(x=='s') } )
|
||||
print 'finish loading from csv '
|
||||
|
||||
label = dtrain[:,32]
|
||||
data = dtrain[:,1:31]
|
||||
# rescale weight to make it same as test set
|
||||
weight = dtrain[:,31] * float(test_size) / len(label)
|
||||
|
||||
sum_wpos = sum( weight[i] for i in xrange(len(label)) if label[i] == 1.0 )
|
||||
sum_wneg = sum( weight[i] for i in xrange(len(label)) if label[i] == 0.0 )
|
||||
|
||||
# print weight statistics
|
||||
print 'weight statistics: wpos=%g, wneg=%g, ratio=%g' % ( sum_wpos, sum_wneg, sum_wneg/sum_wpos )
|
||||
|
||||
# construct xgboost.DMatrix from numpy array, treat -999.0 as missing value
|
||||
xgmat = xgb.DMatrix( data, label=label, missing = -999.0, weight=weight )
|
||||
|
||||
# setup parameters for xgboost
|
||||
param = {}
|
||||
# use logistic regression loss
|
||||
param['objective'] = 'binary:logitraw'
|
||||
# scale weight of positive examples
|
||||
param['scale_pos_weight'] = sum_wneg/sum_wpos
|
||||
param['bst:eta'] = 0.1
|
||||
param['bst:max_depth'] = 6
|
||||
param['eval_metric'] = 'auc'
|
||||
param['silent'] = 1
|
||||
param['nthread'] = 4
|
||||
|
||||
plst = param.items()+[('eval_metric', 'ams@0.15')]
|
||||
|
||||
watchlist = [ (xgmat,'train') ]
|
||||
# boost 10 tres
|
||||
num_round = 10
|
||||
print 'loading data end, start to boost trees'
|
||||
print "training GBM from sklearn"
|
||||
tmp = time.time()
|
||||
gbm = GradientBoostingClassifier(n_estimators=num_round, max_depth=6, verbose=2)
|
||||
gbm.fit(data, label)
|
||||
print "sklearn.GBM costs: %s seconds" % str(time.time() - tmp)
|
||||
#raw_input()
|
||||
print "training xgboost"
|
||||
threads = [1, 2, 4, 16]
|
||||
for i in threads:
|
||||
param['nthread'] = i
|
||||
tmp = time.time()
|
||||
plst = param.items()+[('eval_metric', 'ams@0.15')]
|
||||
bst = xgb.train( plst, xgmat, num_round, watchlist );
|
||||
print "XGBoost with %d thread costs: %s seconds" % (i, str(time.time() - tmp))
|
||||
|
||||
print 'finish training'
|
||||
@@ -1 +1,13 @@
|
||||
The dataset for ranking demo is from LETOR04 MQ2008 fold1,http://research.microsoft.com/en-us/um/beijing/projects/letor/letor4download.aspx
|
||||
Instructions:
|
||||
The dataset for ranking demo is from LETOR04 MQ2008 fold1,
|
||||
You can use the following command to run the example
|
||||
|
||||
|
||||
Get the data: ./wgetdata.sh
|
||||
Run the example: ./runexp.sh
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -2,10 +2,8 @@
|
||||
# choose the tree booster, 0: tree, 1: linear
|
||||
booster_type = 0
|
||||
|
||||
# so far, we have pairwise rank
|
||||
objective="rank:pairwise"
|
||||
#objective="rank:softmax"
|
||||
#objective="rank:map"
|
||||
#objective="rank:ndcg"
|
||||
|
||||
# Tree Booster Parameters
|
||||
# step size shrinkage
|
||||
@@ -16,8 +14,7 @@ bst:gamma = 1.0
|
||||
bst:min_child_weight = 0.1
|
||||
# maximum depth of a tree
|
||||
bst:max_depth = 6
|
||||
eval_metric = "ndcg"
|
||||
eval_metric = "map"
|
||||
|
||||
# Task parameters
|
||||
# the number of round to do boosting
|
||||
num_round = 4
|
||||
|
||||
0
demo/rank/runexp.sh
Normal file → Executable file
0
demo/rank/runexp.sh
Normal file → Executable file
4
demo/rank/wgetdata.sh
Executable file
4
demo/rank/wgetdata.sh
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
wget http://research.microsoft.com/en-us/um/beijing/projects/letor/LETOR4.0/Data/MQ2008.rar
|
||||
unrar x MQ2008.rar
|
||||
mv -f MQ2008/Fold1/*.txt .
|
||||
@@ -1,9 +1,9 @@
|
||||
# General Parameters, see comment for each definition
|
||||
# choose the tree booster, 0: tree, 1: linear
|
||||
booster_type = 0
|
||||
# this is the only difference with classification, use 0: linear regression
|
||||
# when labels are in [0,1] we can also use 1: logistic regression
|
||||
loss_type = 0
|
||||
# this is the only difference with classification, use reg:linear to do linear classification
|
||||
# when labels are in [0,1] we can also use reg:logistic
|
||||
objective = reg:linear
|
||||
|
||||
# Tree Booster Parameters
|
||||
# step size shrinkage
|
||||
|
||||
Reference in New Issue
Block a user