From 6648a15817902a39994dbd68e3d69c90a4a6e2aa Mon Sep 17 00:00:00 2001 From: kalenhaha Date: Sun, 11 May 2014 14:25:30 +0800 Subject: [PATCH] small change --- demo/rank/runexp.sh | 16 ---------------- demo/rank/toy.conf | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 16 deletions(-) delete mode 100644 demo/rank/runexp.sh create mode 100644 demo/rank/toy.conf diff --git a/demo/rank/runexp.sh b/demo/rank/runexp.sh deleted file mode 100644 index 900a80cce..000000000 --- a/demo/rank/runexp.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -# map the data to features. For convenience we only use 7 original attributes and encode them as features in a trivial way -python mapfeat.py -# split train and test -python mknfold.py machine.txt 1 -# training and output the models -../../xgboost machine.conf -# output predictions of test data -../../xgboost machine.conf task=pred model_in=0002.model -# print the boosters of 0002.model in dump.raw.txt -../../xgboost machine.conf task=dump model_in=0002.model name_dump=dump.raw.txt -# print the boosters of 0002.model in dump.nice.txt with feature map -../../xgboost machine.conf task=dump model_in=0002.model fmap=featmap.txt name_dump=dump.nice.txt - -# cat the result -cat dump.nice.txt diff --git a/demo/rank/toy.conf b/demo/rank/toy.conf new file mode 100644 index 000000000..3379826d6 --- /dev/null +++ b/demo/rank/toy.conf @@ -0,0 +1,35 @@ +# General Parameters, see comment for each definition +# choose the tree booster, 0: tree, 1: linear +booster_type = 0 +# this is the only difference with classification, use 0: linear regression +# when labels are in [0,1] we can also use 1: logistic regression +loss_type = 0 + +objective="rank:pairwise" +#objective="rank:softmax" +#objective="lambdarank:map" +#objective="lambdarank:ndcg" + +# Tree Booster Parameters +# step size shrinkage +bst:eta = 1.0 +# minimum loss reduction required to make a further partition +bst:gamma = 1.0 +# minimum sum of instance weight(hessian) needed in a child +bst:min_child_weight = 1 +# maximum depth of a tree +bst:max_depth = 3 + +# Task parameters +# the number of round to do boosting +num_round = 2 +# 0 means do not save any model except the final round model +save_period = 0 +# The path of training data +data = "toy.train" +# The path of validation data, used to monitor training process, here [test] sets name of the validation set +eval[test] = "toy.eval" +# The path of test data +test:data = "toy.test" + +