Compare commits

..

261 Commits
v0.60 ... v0.21

Author SHA1 Message Date
tqchen
76c44072d1 fix sometimes python cachelist problem 2014-05-20 15:42:19 -07:00
tqchen
ccde443590 more clean demo 2014-05-20 08:33:35 -07:00
tqchen
cf710bfa59 fix bug in classification, scale_pos_weight initialization 2014-05-20 08:30:19 -07:00
tqchen
be2c3d299e chg 2014-05-19 10:02:01 -07:00
Tianqi Chen
2eba59000a Merge pull request #7 from jrings/master
Compatibility with both Python 2(.7) and 3
2014-05-19 09:48:34 -07:00
Joerg Rings
a958fe8d52 Compatibility with both Python 2(.7) and 3 2014-05-19 11:23:53 -05:00
Tianqi Chen
96667b8bad Merge pull request #6 from tqchen/dev
Fix the bug in MAC
2014-05-17 11:07:42 -07:00
tqchen
95f4052aae add omp flag back 2014-05-17 11:07:12 -07:00
tqchen
e9e3e0281d use back g++ 2014-05-17 11:06:36 -07:00
tqchen
c23d8c8b88 force handle as void_p, seems fix mac problem 2014-05-17 11:03:21 -07:00
Tianqi Chen
e59f4d5a18 Merge pull request #5 from tqchen/dev
add return type for xgboost, don't know if it is mac problem. #4
2014-05-17 09:19:20 -07:00
tqchen
e267f4c5f9 add return type for xgboost, don't know if it is mac problem 2014-05-17 09:13:54 -07:00
Tianqi Chen
505e65ac08 Update README.md 2014-05-16 22:54:24 -07:00
Tianqi Chen
13fc48623e Merge pull request #2 from tqchen/dev
fix loss_type
2014-05-16 21:30:09 -07:00
tqchen
591a43ac0e some cleanup 2014-05-16 21:29:14 -07:00
tqchen
5375ac5c23 fix for loss_type problem in outside reset base 2014-05-16 21:28:03 -07:00
tqchen
6930758294 Merge branch 'master' of ssh://github.com/tqchen/xgboost 2014-05-16 20:58:03 -07:00
tqchen
e09d6ab9de chg 2014-05-16 20:57:54 -07:00
antinucleon
db4a100f6b del 2014-05-17 03:57:38 +00:00
Tianqi Chen
495e37e0dc Merge pull request #1 from tqchen/dev
2.0 version, lots of changes
2014-05-16 20:53:19 -07:00
Tianqi Chen
b56b34944e Update README.md 2014-05-16 20:49:05 -07:00
tqchen
d4530b7a47 Merge branch 'dev' of ssh://github.com/tqchen/xgboost into dev 2014-05-16 20:46:18 -07:00
tqchen
334cf5de9b add ignore 2014-05-16 20:46:08 -07:00
tqchen
004e8d811e final check 2014-05-16 20:44:02 -07:00
Tianqi Chen
4baefd857e Update README.md 2014-05-16 20:41:59 -07:00
Tianqi Chen
b52f01d61d Update README.md 2014-05-16 20:41:43 -07:00
Tianqi Chen
35f9ef684a Update README.md 2014-05-16 20:41:21 -07:00
Tianqi Chen
6f34096613 Update README.md 2014-05-16 20:41:05 -07:00
tqchen
31c5d7843f Merge branch 'dev' of ssh://github.com/tqchen/xgboost into dev 2014-05-16 20:37:55 -07:00
tqchen
f60dbe299e ok 2014-05-16 20:37:45 -07:00
yepyao
a77debc0c5 Merge branch 'dev' of https://github.com/tqchen/xgboost into dev 2014-05-17 11:36:12 +08:00
yepyao
dc2b9c86e6 small change 2014-05-17 11:35:43 +08:00
yepyao
73bc8c0de4 small change 2014-05-17 11:34:24 +08:00
tqchen
ad8eb21fcd Merge branch 'dev' of ssh://github.com/tqchen/xgboost into dev 2014-05-16 20:29:17 -07:00
tqchen
416050d5c0 fix softmax 2014-05-16 20:28:07 -07:00
antinucleon
d5f6fba82d chg 2014-05-16 21:27:37 -06:00
tqchen
23f4c41035 chg 2014-05-16 20:18:34 -07:00
Tianqi Chen
7ea988a76b Update train.py 2014-05-16 20:16:10 -07:00
tqchen
d3c0ed14f3 multi class 2014-05-16 20:12:04 -07:00
antinucleon
2fcd875675 demo 2014-05-16 21:05:11 -06:00
antinucleon
615074efb6 Merge branch 'dev' of github.com:tqchen/xgboost into dev 2014-05-16 21:03:32 -06:00
Tianqi Chen
945b336fc6 Update README.md 2014-05-16 20:00:20 -07:00
antinucleon
8e8b8a8ee3 demo 2014-05-17 02:59:10 +00:00
antinucleon
42267807f5 demo 2014-05-16 20:57:42 -06:00
tqchen
df23464a20 do not need to dump in rank 2014-05-16 19:52:39 -07:00
tqchen
2ea8d9c511 Merge branch 'dev' of ssh://github.com/tqchen/xgboost into dev 2014-05-16 19:51:41 -07:00
tqchen
3206235a5e before commit 2014-05-16 19:51:33 -07:00
yepyao
956fc09da0 small change 2014-05-17 10:50:15 +08:00
yepyao
da482500c7 Merge branch 'dev' of https://github.com/tqchen/xgboost into dev
Conflicts:
	demo/rank/mq2008.conf
	demo/rank/runexp.sh
	regrank/xgboost_regrank_obj.h
2014-05-17 10:40:12 +08:00
yepyao
b19f2bfda8 fix small bug 2014-05-17 10:35:10 +08:00
tqchen
21b21e69de add bing to author list 2014-05-16 19:33:59 -07:00
Tianqi Chen
b90d1dc92b Update demo.py 2014-05-16 19:30:32 -07:00
tqchen
3429ab3447 chgs 2014-05-16 19:24:53 -07:00
tqchen
ebcce4a2bf chg all settings to obj 2014-05-16 19:10:52 -07:00
tqchen
1839e6efe9 pre-release version 2014-05-16 18:49:02 -07:00
tqchen
9bc6e83afe chg scripts 2014-05-16 18:46:43 -07:00
tqchen
fd2774e133 cleanup 2014-05-16 18:40:46 -07:00
tqchen
72d3a6a3cc chg rank demo 2014-05-16 18:38:40 -07:00
tqchen
5febbecd88 Merge branch 'dev' of ssh://github.com/tqchen/xgboost into dev 2014-05-16 18:29:37 -07:00
tqchen
b3c3ecd9c9 chng few things 2014-05-16 18:25:01 -07:00
tqchen
c28a1be34c minor changes 2014-05-16 18:19:57 -07:00
antinucleon
ae70b9b152 new speed test 2014-05-16 18:05:17 -06:00
antinucleon
e0a0343ae6 speedtest 2014-05-16 17:48:03 -06:00
yepyao
0e0d3efd6a use ndcg@all in lambdarank for ndcg 2014-05-16 23:06:24 +08:00
yepyao
a3bd5000ba small change 2014-05-16 21:20:41 +08:00
yepyao
dd71c0e070 Download data set from web site 2014-05-16 21:18:32 +08:00
kalenhaha
d9ea324057 Impement new Lambda rank interface 2014-05-16 20:42:46 +08:00
tqchen
0d29610c40 new lambda rank interface 2014-05-16 00:02:26 -07:00
Bing Xu
0af2c92d3b Update README.md 2014-05-16 01:30:29 -04:00
tqchen
f9cdce077b ok 2014-05-15 21:17:17 -07:00
tqchen
59183b9ed8 a correct version 2014-05-15 21:11:46 -07:00
tqchen
6ff272eec6 fix numpy convert 2014-05-15 20:28:34 -07:00
tqchen
c8073e13e4 ok 2014-05-15 20:05:22 -07:00
tqchen
698fa87bc3 ok 2014-05-15 18:56:28 -07:00
tqchen
8f56671901 bug fix in pairwise rank 2014-05-15 15:37:58 -07:00
tqchen
9ea9a7a01e cleanup code 2014-05-15 15:01:41 -07:00
tqchen
d59940f1d5 add xgcombine_buffer with weights 2014-05-15 14:41:11 -07:00
tqchen
6aa190e10c change data format to include weight in binary file, add get weight to python 2014-05-15 14:37:56 -07:00
tqchen
54c486bcf1 ok 2014-05-15 14:25:44 -07:00
tqchen
88ff293de5 add ams 2014-05-14 23:23:27 -07:00
tqchen
50af92e29e some fix 2014-05-14 16:55:59 -07:00
tqchen
bbe4957cd2 add AMS metric 2014-05-14 11:30:45 -07:00
kalenhaha
789ad18d36 add in grad and hess rescale in lambdarank 2014-05-14 23:13:27 +08:00
kalenhaha
2b34d5a25e small bug in ndcg eval 2014-05-13 14:30:42 +08:00
kalenhaha
bd574e4967 Merge branch 'dev' of https://github.com/tqchen/xgboost into dev 2014-05-12 22:22:32 +08:00
kalenhaha
e8d81c1da5 Add LETOR MQ2008 for rank demo 2014-05-12 22:21:07 +08:00
kalenhaha
c84bbc91d1 remove sampler 2014-05-11 14:31:57 +08:00
kalenhaha
61e3d1562c small change 2014-05-11 14:25:30 +08:00
kalenhaha
97db8c29f2 small change 2014-05-11 14:03:21 +08:00
tqchen
f2552f8ef2 simple chgs 2014-05-09 20:39:15 -07:00
kalenhaha
2563b6d2d6 fix some warnings 2014-05-09 14:14:43 +08:00
kalenhaha
e90ffece67 Merge branch 'dev' of https://github.com/tqchen/xgboost into dev 2014-05-09 14:07:06 +08:00
kalenhaha
85f92681f9 Separating Lambda MAP and Lambda NDCG 2014-05-09 14:05:52 +08:00
tqchen
5e0d52cb8c add python o3 2014-05-08 20:15:23 -07:00
tqchen
c9d156d99e faster convert to numpy array 2014-05-08 19:35:06 -07:00
tqchen
ecf6e8f49f commit the fix 2014-05-08 19:31:32 -07:00
tqchen
93778aa4aa Merge branch 'dev' of ssh://github.com/tqchen/xgboost into dev 2014-05-07 12:00:17 -07:00
tqchen
f8cacc7308 fix omp for bug in obj 2014-05-07 11:52:12 -07:00
kalenhaha
c0e1e9fe7a Merge branch 'dev' of https://github.com/tqchen/xgboost into dev
Conflicts:
	regrank/xgboost_regrank_obj.hpp
2014-05-07 22:15:59 +08:00
tqchen
fa5afe2141 fix 2014-05-06 16:53:37 -07:00
tqchen
f7789ecf14 Merge branch 'dev' of ssh://github.com/tqchen/xgboost into dev 2014-05-06 16:51:18 -07:00
tqchen
a57fbe091a Merge branch 'dev' of ssh://github.com/tqchen/xgboost into dev
Conflicts:
	regrank/xgboost_regrank_data.h
2014-05-06 16:51:11 -07:00
tqchen
9f82b53366 add regrank utils 2014-05-06 16:50:46 -07:00
tqchen
248b2cf74d right group size 2014-05-06 16:49:10 -07:00
tqchen
5fb9376af4 add cutomized training 2014-05-04 13:57:10 -07:00
tqchen
9c2bb12cd1 add cutomized training 2014-05-04 13:55:58 -07:00
tqchen
ebde99bde8 add boost group support to xgboost. now have beta multi-class classification 2014-05-04 12:10:03 -07:00
kalenhaha
ef7be5398d c++11 features removed 2014-05-04 16:58:44 +08:00
kalenhaha
2ef61bf982 c++11 features removed 2014-05-04 16:56:57 +08:00
tqchen
d4d141347a fix 2014-05-04 00:09:16 -07:00
tqchen
e18ba04751 add interact mode 2014-05-03 23:24:22 -07:00
tqchen
3388d1a8b5 add python interface for xgboost 2014-05-03 23:04:02 -07:00
tqchen
65917bb831 finish python lib 2014-05-03 22:18:25 -07:00
tqchen
140499ac9e finish matrix 2014-05-03 17:12:25 -07:00
tqchen
ccd037292d good 2014-05-03 16:15:44 -07:00
tqchen
59939d0b14 ok 2014-05-03 14:24:00 -07:00
tqchen
9a2c00554d important change to regrank interface, need some more test 2014-05-03 14:20:27 -07:00
tqchen
ee30c1728b try python 2014-05-03 10:54:08 -07:00
tqchen
8f75b0ef75 pass test 2014-05-02 18:04:45 -07:00
tqchen
3128e718e2 add new combine tool as promised 2014-05-02 12:55:34 -07:00
tqchen
657c617215 Merge branch 'dev' of ssh://github.com/tqchen/xgboost into dev 2014-05-01 11:01:05 -07:00
tqchen
439d4725a0 cleanup of evaluation metric, move c++11 codes into sample.h for backup, add lambda in a clean way latter 2014-05-01 11:00:50 -07:00
Tianqi Chen
8491bb3651 Update xgboost_omp.h 2014-05-01 10:16:05 -07:00
kalenhaha
cce96e8f41 fix some bugs in linux 2014-05-02 00:16:12 +08:00
kalenhaha
f02dd68713 lambda rank added 2014-05-01 22:17:26 +08:00
tqchen
ec14d32756 add softmax 2014-04-30 22:11:26 -07:00
tqchen
38577d45b0 add pre @ n 2014-04-30 22:00:53 -07:00
tqchen
ab0e7a3ddc use omp parallel sortting 2014-04-30 09:48:41 -07:00
tqchen
bbd952a021 add rank 2014-04-30 09:32:42 -07:00
tqchen
77e3051b1d add pairwise rank first version 2014-04-29 21:12:30 -07:00
tqchen
924e164c14 new AUC code 2014-04-29 17:26:58 -07:00
tqchen
25ff5ef169 new AUC evaluator, now compatible with weighted loss 2014-04-29 17:03:34 -07:00
tqchen
3ea29eccae make regression module compatible with rank loss, now support weighted loss 2014-04-29 16:16:02 -07:00
tqchen
0f8a3d21a5 chg fmap format 2014-04-29 09:59:10 -07:00
tqchen
7487c2f668 add auc evaluation metric 2014-04-24 22:20:40 -07:00
tqchen
88787b8573 remove unwanted private field 2014-04-21 10:42:19 -07:00
tqchen
17559a90f9 expose fmatrixs 2014-04-18 18:18:19 -07:00
tqchen
24696071a8 Merge branch 'master' of ssh://github.com/tqchen/xgboost
Conflicts:
	regression/xgboost_reg_data.h
2014-04-18 17:46:44 -07:00
tqchen
cca67af8d7 simplify data 2014-04-18 17:43:44 -07:00
kalenhaha
2beb92745f Lambda rank added 2014-04-11 10:50:13 +08:00
kalenhaha
d6b582dc70 Merge branch 'master' of https://github.com/tqchen/xgboost 2014-04-11 10:48:45 +08:00
kalenhaha
218320daf2 Lambda rank added 2014-04-10 22:11:15 +08:00
kalenhaha
f83942d3e9 lambda rank added 2014-04-10 22:09:19 +08:00
Tianqi Chen
60d79eb2e7 Update xgboost_utils.h 2014-04-07 16:25:21 -07:00
kalenhaha
1136c71e64 rank pass toy 2014-04-07 23:25:35 +08:00
tqchen
1bbbb0cf7f add deleted main back 2014-04-06 09:32:27 -07:00
kalenhaha
1756fde0c6 small fix 2014-04-06 22:54:41 +08:00
kalenhaha
7f30fc1468 compiled 2014-04-06 22:51:52 +08:00
tqchen
d5607fbb55 add dev 2014-04-04 10:42:13 -07:00
kalenhaha
05d984d83d pairwise ranking implemented 2014-04-05 00:14:55 +08:00
kalenhaha
1110ae7421 Adding ranking task 2014-04-03 16:22:55 +08:00
tqchen
2aa1031d24 add dump nice to regression demo 2014-03-26 16:47:01 -07:00
tqchen
1440dc9c8f update regression 2014-03-26 16:25:44 -07:00
kalenhaha
27bd5496a8 small fix 2014-03-27 00:08:47 +08:00
kalenhaha
81b32525e0 Merge branch 'master' of https://github.com/tqchen/xgboost 2014-03-26 23:50:56 +08:00
tqchen
6fa0948461 Merge branch 'master' of ssh://github.com/tqchen/xgboost 2014-03-25 17:18:27 -07:00
tqchen
61123f86aa small fix 2014-03-25 17:17:00 -07:00
Tianqi Chen
110b97fea2 Update README.md 2014-03-26 08:01:47 +08:00
Tianqi Chen
b2eb4e956b Update README.md 2014-03-26 08:01:24 +08:00
Tianqi Chen
56ae0e32e3 Update README 2014-03-26 07:21:15 +08:00
kalenhaha
e350c38483 change the regression demo data set 2014-03-24 23:23:11 +08:00
tqchen
e59ed018e6 fix test to pred 2014-03-24 00:31:53 -07:00
kalenhaha
3123d11655 remove test directory 2014-03-23 00:05:46 +08:00
kalenhaha
ca74cba9ec adding regression demo 2014-03-22 21:52:29 +08:00
kalenhaha
a84d4f3e68 Merge branch 'master' of https://github.com/tqchen/xgboost 2014-03-22 21:50:31 +08:00
kalenhaha
76cd1561a0 separate binary classification and regression demo 2014-03-22 21:48:27 +08:00
Tianqi Chen
5b4f77488c Update README.md 2014-03-20 23:12:41 -07:00
Tianqi Chen
b0676fc682 Update README.md 2014-03-20 23:12:16 -07:00
tqchen
97418b113e add batch running 2014-03-20 16:27:24 -07:00
tqchen
d56394d2ef add feature constraint 2014-03-19 10:47:56 -07:00
tqchen
6a91438634 fixed remove bug 2014-03-13 13:42:40 -07:00
tqchen
da3b3c8136 neglok 2014-03-12 20:28:21 -07:00
tqchen
fcf06a7164 support int type 2014-03-12 17:58:14 -07:00
tqchen
8f9efa2725 more compact 2014-03-11 13:07:20 -07:00
tqchen
6e48a938c6 add accuracy 2014-03-11 13:06:22 -07:00
tqchen
19b28b978d fix delete 2014-03-11 12:40:51 -07:00
tqchen
8f16ef8e75 add remove tree 2014-03-11 11:25:50 -07:00
tqchen
d2377b26bd add name dumpath 2014-03-06 11:23:51 -08:00
tqchen
70f3f31206 add add and remove 2014-03-05 16:39:07 -08:00
tqchen
f62c5dc3c1 try interact mode 2014-03-05 15:28:53 -08:00
tqchen
2d67377a96 add a test folder 2014-03-05 15:20:11 -08:00
tqchen
d982be9dca complete row maker 2014-03-05 14:38:13 -08:00
tqchen
98114cabce add row tree maker, to be finished 2014-03-05 11:00:03 -08:00
tqchen
2910bdedf4 split new base treemaker, not very good abstraction, but ok 2014-03-05 10:20:36 -08:00
tqchen
128e94be1a fix reg model_out 2014-03-05 09:34:37 -08:00
tqchen
eade6ddf7c reupdate data 2014-03-04 22:47:39 -08:00
tqchen
9b45210fa7 fix text 2014-03-04 16:22:24 -08:00
tqchen
ddd61b43be fix fmatrix 2014-03-04 11:45:22 -08:00
tqchen
98e851d80f add simple text loader 2014-03-04 11:33:33 -08:00
tqchen
3d223232e3 ok fix 2014-03-03 22:20:45 -08:00
tqchen
b689b4525a big change, change interface to template, everything still OK 2014-03-03 22:16:37 -08:00
tqchen
a3ca03cfc1 backup makefile 2014-03-03 15:21:50 -08:00
tqchen
2aa1978cb6 compatibility issue with openmp 2014-03-03 15:11:41 -08:00
tqchen
e3b7abfb47 ok 2014-03-03 12:26:40 -08:00
tqchen
2adf905dcf maptree is not needed 2014-03-03 11:06:24 -08:00
tqchen
cfbeeef9c1 fix fmap 2014-03-03 11:05:10 -08:00
tqchen
8ae1d37828 auto do reboost 2014-03-02 16:42:22 -08:00
tqchen
0fc64d1c2a chg file name of reg 2014-03-02 16:39:00 -08:00
tqchen
1eca127f69 chg file name of reg 2014-03-02 16:38:59 -08:00
tqchen
c7b29774c2 change test task to pred 2014-03-02 16:20:42 -08:00
tqchen
a8f69878eb make style more like Google style 2014-03-02 13:30:24 -08:00
tqchen
51b6d86c17 add smart decision of nfeatures 2014-03-01 21:49:29 -08:00
tqchen
082a57ba0b fix type 2014-03-01 21:29:07 -08:00
tqchen
f3c98d0c4b add smart load 2014-03-01 21:15:54 -08:00
tqchen
1748e4517a full omp support for regression 2014-03-01 20:56:25 -08:00
tqchen
328e41244c fix col maker, make it default 2014-03-01 15:16:30 -08:00
tqchen
155b593984 add col maker 2014-03-01 14:00:09 -08:00
Tianqi Chen
76cbc754c9 Update README.md 2014-02-28 20:13:01 -08:00
Tianqi Chen
97ca3bf739 Update README.md 2014-02-28 20:10:57 -08:00
tqchen
752f336cb3 chg license, README 2014-02-28 20:09:40 -08:00
tqchen
fffad41e53 start add coltree maker 2014-02-28 11:44:50 -08:00
tqchen
10382f6365 add dump2json 2014-02-26 18:54:12 -08:00
tqchen
7b2fe1bf5d add pathdump 2014-02-26 17:08:23 -08:00
tqchen
88c982012a modify tree so that training is standalone 2014-02-26 16:03:00 -08:00
tqchen
b6f98bf37a modify tree so that training is standalone 2014-02-26 16:02:58 -08:00
tqchen
3a4d0f28d9 change input data structure 2014-02-26 11:51:58 -08:00
tqchen
e58daa6d52 fix mushroom 2014-02-24 23:19:58 -08:00
tqchen
a5b37e0395 finish mushroom 2014-02-24 23:06:57 -08:00
tqchen
e75488b578 add mushroom classification 2014-02-24 22:25:43 -08:00
tqchen
1160a38323 add mushroom 2014-02-24 22:19:40 -08:00
tqchen
4401d549f1 pass simple test 2014-02-20 22:28:05 -08:00
tqchen
fd120a8f5c changes to reg booster 2014-02-20 22:08:31 -08:00
kalenhaha
00add6dd1d tab eliminated 2014-02-19 13:25:01 +08:00
kalenhaha
cd009f2541 add toy data 2014-02-19 13:01:15 +08:00
kalenhaha
582be45810 add in reg.conf for configuration demo 2014-02-18 16:49:23 +08:00
kalenhaha
3c93216850 Merge branch 'master' of https://github.com/tqchen/xgboost 2014-02-16 14:34:35 +08:00
kalenhaha
787f76e952 fix some bugs 2014-02-16 11:44:03 +08:00
tqchen
91c170e463 fix nboosters 2014-02-15 19:42:02 -08:00
tqchen
0c44347e82 update license 2014-02-15 17:45:48 -08:00
tqchen
603704287d Merge branch 'master' of ssh://github.com/tqchen/xgboost 2014-02-15 17:42:31 -08:00
tqchen
c933625f94 update license 2014-02-15 17:42:23 -08:00
tqchen
cebf39ea47 Update README.md 2014-02-15 11:22:50 -08:00
kalenhaha
f22139c659 Comments added 2014-02-13 13:04:55 +08:00
kalenhaha
06ce8c9f3a GBRT Train and Test Phase added 2014-02-12 23:30:32 +08:00
tqchen
98a60b3610 Update README.md 2014-02-11 20:38:06 -08:00
tqchen
2dc6c9c683 chg fmt to libsvm 2014-02-10 21:41:43 -08:00
tqchen
3e53fcf465 cleanup reg 2014-02-10 21:09:09 -08:00
tqchen
cb0fa75252 add regression data 2014-02-10 20:32:23 -08:00
kalenhaha
51a63d80d0 Merge branch 'master' of https://github.com/tqchen/xgboost 2014-02-11 11:19:27 +08:00
kalenhaha
1e356c5bd2 gbrt modified 2014-02-11 11:07:00 +08:00
kalenhaha
c5ada79be5 gbrt implemented 2014-02-10 23:40:38 +08:00
tqchen
dd924becd8 Update README.md 2014-02-08 19:02:33 -08:00
tqchen
7fa301a8ce Update README.md 2014-02-08 13:01:10 -08:00
tqchen
3d1e0badd3 Update README.md 2014-02-08 13:00:49 -08:00
tqchen
7e605306ad Update README.md 2014-02-08 12:50:24 -08:00
tqchen
5e5acdc121 finish readme 2014-02-08 11:47:37 -08:00
tqchen
7302a4e1b5 add linear booster 2014-02-08 11:24:35 -08:00
tqchen
21dd4b5904 add ok 2014-02-07 22:51:16 -08:00
tqchen
61e5410789 chg makefile 2014-02-07 22:43:13 -08:00
tqchen
0febb1a443 adapt tree booster 2014-02-07 22:41:32 -08:00
tqchen
36a04f17df adapt svdfeature tree 2014-02-07 22:38:26 -08:00
tqchen
3dd477c4b2 add detailed comment about gbmcore 2014-02-07 20:30:39 -08:00
tqchen
779d6a34de add empty folder for regression. TODO 2014-02-07 20:20:09 -08:00
tqchen
4535ab7e5c move core code to booster 2014-02-07 20:13:27 -08:00
tqchen
75c36a0667 add base code 2014-02-07 18:40:53 -08:00
tqchen
790c76e814 sync everything 2014-02-06 21:28:47 -08:00
tqchen
a81ea03022 add config 2014-02-06 21:26:27 -08:00
tqchen
a198759df6 update this folder 2014-02-06 16:06:59 -08:00
tqchen
a607444038 update this folder 2014-02-06 16:06:18 -08:00
tqchen
ee6a0c7f4a initial cleanup of interface 2014-02-06 16:03:04 -08:00
tqchen
57fef8bc54 init commit 2014-02-06 15:50:50 -08:00
419 changed files with 15370 additions and 53320 deletions

67
.gitignore vendored
View File

@@ -2,80 +2,25 @@
*.slo
*.lo
*.o
*.page
# Compiled Dynamic libraries
*.so
*.dylib
*.page
# Compiled Static libraries
*.lai
*.la
*.a
*~
*.Rcheck
*.rds
*.tar.gz
*txt*
*conf
*buffer
*model
xgboost
*pyc
*.train
*.test
*.tar
*train
*test
*group
*rar
*vali
*sdf
Release
*exe*
*exp
ipch
*.filters
*.user
*log
Debug
*suo
.Rhistory
*.dll
*i386
*x64
*dump
*save
*csv
.Rproj.user
*.cpage.col
*.cpage
*.Rproj
./xgboost
./xgboost.mpi
./xgboost.mock
#.Rbuildignore
R-package.Rproj
*.cache*
#java
java/xgboost4j/target
java/xgboost4j/tmp
java/xgboost4j-demo/target
java/xgboost4j-demo/data/
java/xgboost4j-demo/tmp/
java/xgboost4j-demo/model/
nb-configuration*
# Eclipse
.project
.cproject
.pydevproject
.settings/
build
config.mk
xgboost
*.data
build_plugin
dmlc-core
.idea
recommonmark/
tags
*.iml
*.class
target
*.swp
*data

6
.gitmodules vendored
View File

@@ -1,6 +0,0 @@
[submodule "dmlc-core"]
path = dmlc-core
url = https://github.com/dmlc/dmlc-core
[submodule "rabit"]
path = rabit
url = https://github.com/dmlc/rabit

View File

@@ -1,74 +0,0 @@
# disable sudo for container build.
sudo: false
# Enabling test on Linux and OS X
os:
- linux
- osx
# Use Build Matrix to do lint and build seperately
env:
matrix:
# code lint
- TASK=lint
# r package test
- TASK=r_test
# python package test
- TASK=python_test
- TASK=python_lightweight_test
# java package test
- TASK=java_test
# cmake test
- TASK=cmake_test
os:
- linux
- osx
matrix:
exclude:
- os: osx
env: TASK=lint
- os: linux
env: TASK=r_test
- os: osx
env: TASK=java_test
- os: osx
env: TASK=python_lightweight_test
# dependent apt packages
addons:
apt:
packages:
- doxygen
- wget
- libcurl4-openssl-dev
- unzip
- graphviz
before_install:
- source dmlc-core/scripts/travis/travis_setup_env.sh
- export PYTHONPATH=${PYTHONPATH}:${PWD}/python-package
- echo "MAVEN_OPTS='-Xmx2048m -XX:MaxPermSize=1024m -XX:ReservedCodeCacheSize=512m'" > ~/.mavenrc
install:
- source tests/travis/setup.sh
script:
- tests/travis/run_test.sh
cache:
directories:
- ${HOME}/.cache/usr
- ${HOME}/.cache/pip
before_cache:
- dmlc-core/scripts/travis/travis_before_cache.sh
after_failure:
- tests/travis/travis_after_failure.sh
notifications:
email:
on_success: change
on_failure: always

View File

@@ -1,79 +0,0 @@
cmake_minimum_required (VERSION 2.6)
project (xgboost)
find_package(OpenMP)
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS} -fPIC")
# Make sure we are using C++11
# Visual Studio 12.0 and newer supports enough c++11 to make this work
if(MSVC AND MSVC_VERSION LESS 1800)
message(STATUS "The compiler ${CMAKE_CXX_COMPILER} has no C++11 support. Please use a different C++ compiler.")
else()
# GCC 4.6 with c++0x supports enough to make this work
include(CheckCXXCompilerFlag)
CHECK_CXX_COMPILER_FLAG("-std=c++11" COMPILER_SUPPORTS_CXX11)
CHECK_CXX_COMPILER_FLAG("-std=c++0x" COMPILER_SUPPORTS_CXX0X)
if(COMPILER_SUPPORTS_CXX11)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
elseif(COMPILER_SUPPORTS_CXX0X)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x")
else()
message(STATUS "The compiler ${CMAKE_CXX_COMPILER} has no C++11 support. Please use a different C++ compiler.")
endif()
endif()
#Make sure we are using the static runtime
if(MSVC)
set(variables
CMAKE_C_FLAGS_DEBUG
CMAKE_C_FLAGS_MINSIZEREL
CMAKE_C_FLAGS_RELEASE
CMAKE_C_FLAGS_RELWITHDEBINFO
CMAKE_CXX_FLAGS_DEBUG
CMAKE_CXX_FLAGS_MINSIZEREL
CMAKE_CXX_FLAGS_RELEASE
CMAKE_CXX_FLAGS_RELWITHDEBINFO
)
foreach(variable ${variables})
if(${variable} MATCHES "/MD")
string(REGEX REPLACE "/MD" "/MT" ${variable} "${${variable}}")
endif()
endforeach()
endif()
include_directories (
${PROJECT_SOURCE_DIR}/include
${PROJECT_SOURCE_DIR}/dmlc-core/include
${PROJECT_SOURCE_DIR}/rabit/include
)
file(GLOB SOURCES
src/c_api/*.cc
src/common/*.cc
src/data/*.cc
src/gbm/*.cc
src/metric/*.cc
src/objective/*.cc
src/tree/*.cc
src/*.cc
)
set(RABIT_SOURCES
rabit/src/allreduce_base.cc
rabit/src/allreduce_robust.cc
rabit/src/engine.cc
rabit/src/c_api.cc
)
add_subdirectory(dmlc-core)
add_library(rabit STATIC ${RABIT_SOURCES})
add_executable(xgboost ${SOURCES})
add_library(libxgboost SHARED ${SOURCES})
target_link_libraries(xgboost dmlccore rabit)
target_link_libraries(libxgboost dmlccore rabit)

View File

@@ -1,62 +0,0 @@
Contributors of DMLC/XGBoost
============================
XGBoost has been developed and used by a group of active community. Everyone is more than welcomed to is a great way to make the project better and more accessible to more users.
Comitters
---------
Committers are people who have made substantial contribution to the project and granted write access to the project.
* [Tianqi Chen](https://github.com/tqchen), University of Washington
- Tianqi is a PhD working on large-scale machine learning, he is the creator of the project.
* [Tong He](https://github.com/hetong007), Simon Fraser University
- Tong is a master student working on data mining, he is the maintainer of xgboost R package.
* [Bing Xu](https://github.com/antinucleon)
- Bing is the original creator of xgboost python package and currently the maintainer of [XGBoost.jl](https://github.com/antinucleon/XGBoost.jl).
* [Michael Benesty](https://github.com/pommedeterresautee)
- Micheal is a lawyer, data scientist in France, he is the creator of xgboost interactive analysis module in R.
* [Yuan Tang](https://github.com/terrytangyuan)
- Yuan is a data scientist in Chicago, US. He contributed mostly in R and Python packages.
Become a Comitter
-----------------
XGBoost is a opensource project and we are actively looking for new comitters who are willing to help maintaining and lead the project.
Committers comes from contributors who:
* Made substantial contribution to the project.
* Willing to spent time on maintaining and lead the project.
New committers will be proposed by current comitter memembers, with support from more than two of current comitters.
List of Contributors
--------------------
* [Full List of Contributors](https://github.com/dmlc/xgboost/graphs/contributors)
- To contributors: please add your name to the list when you submit a patch to the project:)
* [Kailong Chen](https://github.com/kalenhaha)
- Kailong is an early contributor of xgboost, he is creator of ranking objectives in xgboost.
* [Skipper Seabold](https://github.com/jseabold)
- Skipper is the major contributor to the scikit-learn module of xgboost.
* [Zygmunt Zając](https://github.com/zygmuntz)
- Zygmunt is the master behind the early stopping feature frequently used by kagglers.
* [Ajinkya Kale](https://github.com/ajkl)
* [Boliang Chen](https://github.com/cblsjtu)
* [Vadim Khotilovich](https://github.com/khotilov)
* [Yangqing Men](https://github.com/yanqingmen)
- Yangqing is the creator of xgboost java package.
* [Engpeng Yao](https://github.com/yepyao)
* [Giulio](https://github.com/giuliohome)
- Giulio is the creator of windows project of xgboost
* [Jamie Hall](https://github.com/nerdcha)
- Jamie is the initial creator of xgboost sklearn modue.
* [Yen-Ying Lee](https://github.com/white1033)
* [Masaaki Horikoshi](https://github.com/sinhrks)
- Masaaki is the initial creator of xgboost python plotting module.
* [Hongliang Liu](https://github.com/phunterlau)
- Hongliang is the maintainer of xgboost python PyPI package for pip installation.
* [daiyl0320](https://github.com/daiyl0320)
- daiyl0320 contributed patch to xgboost distributed version more robust, and scales stably on TB scale datasets.
* [Huayi Zhang](https://github.com/irachex)
* [Johan Manders](https://github.com/johanmanders)
* [yoori](https://github.com/yoori)
* [Mathias Müller](https://github.com/far0n)
* [Sam Thomson](https://github.com/sammthomson)
* [ganesh-krishnan](https://github.com/ganesh-krishnan)
* [Damien Carol](https://github.com/damiencarol)
* [Alex Bain](https://github.com/convexquad)

View File

@@ -1,4 +1,4 @@
Copyright (c) 2016 by Contributors
Copyright (c) 2014 by Tianqi Chen and Contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

198
Makefile
View File

@@ -1,188 +1,26 @@
ifndef config
ifneq ("$(wildcard ./config.mk)","")
config = config.mk
else
config = make/config.mk
endif
endif
ifndef DMLC_CORE
DMLC_CORE = dmlc-core
endif
ifndef RABIT
RABIT = rabit
endif
ROOTDIR = $(CURDIR)
ifeq ($(OS), Windows_NT)
UNAME="Windows"
else
UNAME=$(shell uname)
endif
include $(config)
ifeq ($(USE_OPENMP), 0)
export NO_OPENMP = 1
endif
include $(DMLC_CORE)/make/dmlc.mk
# include the plugins
include $(XGB_PLUGINS)
# use customized config file
ifndef CC
export CC = $(if $(shell which gcc-5),gcc-5,gcc)
endif
ifndef CXX
export CXX = $(if $(shell which g++-5),g++-5,g++)
endif
export LDFLAGS= -pthread -lm $(ADD_LDFLAGS) $(DMLC_LDFLAGS) $(PLUGIN_LDFLAGS)
export CFLAGS= -std=c++0x -Wall -O3 -msse2 -Wno-unknown-pragmas -funroll-loops -Iinclude $(ADD_CFLAGS) $(PLUGIN_CFLAGS)
CFLAGS += -I$(DMLC_CORE)/include -I$(RABIT)/include
#java include path
export JAVAINCFLAGS = -I${JAVA_HOME}/include -I./java
ifndef LINT_LANG
LINT_LANG= "all"
endif
ifneq ($(UNAME), Windows)
CFLAGS += -fPIC
XGBOOST_DYLIB = lib/libxgboost.so
else
XGBOOST_DYLIB = lib/libxgboost.dll
endif
ifeq ($(UNAME), Linux)
LDFLAGS += -lrt
JAVAINCFLAGS += -I${JAVA_HOME}/include/linux
endif
ifeq ($(UNAME), Darwin)
JAVAINCFLAGS += -I${JAVA_HOME}/include/darwin
endif
ifeq ($(USE_OPENMP), 1)
CFLAGS += -fopenmp
else
CFLAGS += -DDISABLE_OPENMP
endif
export CC = gcc
export CXX = g++
export CFLAGS = -Wall -O3 -msse2 -Wno-unknown-pragmas -fopenmp
# specify tensor path
.PHONY: clean all lint clean_all doxygen rcpplint pypack Rpack Rbuild Rcheck java pylint
BIN = xgboost
OBJ =
.PHONY: clean all
all: $(BIN) $(OBJ)
export LDFLAGS= -pthread -lm
xgboost: regrank/xgboost_regrank_main.cpp regrank/*.h regrank/*.hpp booster/*.h booster/*/*.hpp booster/*.hpp
all: lib/libxgboost.a $(XGBOOST_DYLIB) xgboost
$(BIN) :
$(CXX) $(CFLAGS) $(LDFLAGS) -o $@ $(filter %.cpp %.o %.c, $^)
$(DMLC_CORE)/libdmlc.a: $(wildcard $(DMLC_CORE)/src/*.cc $(DMLC_CORE)/src/*/*.cc)
+ cd $(DMLC_CORE); $(MAKE) libdmlc.a config=$(ROOTDIR)/$(config); cd $(ROOTDIR)
$(OBJ) :
$(CXX) -c $(CFLAGS) -o $@ $(firstword $(filter %.cpp %.c, $^) )
$(RABIT)/lib/$(LIB_RABIT): $(wildcard $(RABIT)/src/*.cc)
+ cd $(RABIT); $(MAKE) lib/$(LIB_RABIT); cd $(ROOTDIR)
install:
cp -f -r $(BIN) $(INSTALL_PATH)
jvm: jvm-packages/lib/libxgboost4j.so
SRC = $(wildcard src/*.cc src/*/*.cc)
ALL_OBJ = $(patsubst src/%.cc, build/%.o, $(SRC)) $(PLUGIN_OBJS)
AMALGA_OBJ = amalgamation/xgboost-all0.o
LIB_DEP = $(DMLC_CORE)/libdmlc.a $(RABIT)/lib/$(LIB_RABIT)
ALL_DEP = $(filter-out build/cli_main.o, $(ALL_OBJ)) $(LIB_DEP)
CLI_OBJ = build/cli_main.o
build/%.o: src/%.cc
@mkdir -p $(@D)
$(CXX) $(CFLAGS) -MM -MT build/$*.o $< >build/$*.d
$(CXX) -c $(CFLAGS) -c $< -o $@
build_plugin/%.o: plugin/%.cc
@mkdir -p $(@D)
$(CXX) $(CFLAGS) -MM -MT build_plugin/$*.o $< >build_plugin/$*.d
$(CXX) -c $(CFLAGS) -c $< -o $@
# The should be equivalent to $(ALL_OBJ) except for build/cli_main.o
amalgamation/xgboost-all0.o: amalgamation/xgboost-all0.cc
$(CXX) -c $(CFLAGS) -c $< -o $@
# Equivalent to lib/libxgboost_all.so
lib/libxgboost_all.so: $(AMALGA_OBJ) $(LIB_DEP)
@mkdir -p $(@D)
$(CXX) $(CFLAGS) -shared -o $@ $(filter %.o %.a, $^) $(LDFLAGS)
lib/libxgboost.a: $(ALL_DEP)
@mkdir -p $(@D)
ar crv $@ $(filter %.o, $?)
lib/libxgboost.dll lib/libxgboost.so: $(ALL_DEP)
@mkdir -p $(@D)
$(CXX) $(CFLAGS) -shared -o $@ $(filter %.o %a, $^) $(LDFLAGS)
jvm-packages/lib/libxgboost4j.so: jvm-packages/xgboost4j/src/native/xgboost4j.cpp $(ALL_DEP)
@mkdir -p $(@D)
$(CXX) $(CFLAGS) $(JAVAINCFLAGS) -shared -o $@ $(filter %.cpp %.o %.a, $^) $(LDFLAGS)
xgboost: $(CLI_OBJ) $(ALL_DEP)
$(CXX) $(CFLAGS) -o $@ $(filter %.o %.a, $^) $(LDFLAGS)
rcpplint:
python2 dmlc-core/scripts/lint.py xgboost ${LINT_LANG} R-package/src
lint: rcpplint
python2 dmlc-core/scripts/lint.py xgboost ${LINT_LANG} include src plugin python-package
pylint:
flake8 --ignore E501 python-package
flake8 --ignore E501 tests/python
clean:
$(RM) -rf build build_plugin lib bin *~ */*~ */*/*~ */*/*/*~ */*.o */*/*.o */*/*/*.o xgboost
clean_all: clean
cd $(DMLC_CORE); $(MAKE) clean; cd $(ROODIR)
cd $(RABIT); $(MAKE) clean; cd $(ROODIR)
doxygen:
doxygen doc/Doxyfile
# create standalone python tar file.
pypack: ${XGBOOST_DYLIB}
cp ${XGBOOST_DYLIB} python-package/xgboost
cd python-package; tar cf xgboost.tar xgboost; cd ..
# Script to make a clean installable R package.
Rpack:
$(MAKE) clean_all
rm -rf xgboost xgboost*.tar.gz
cp -r R-package xgboost
rm -rf xgboost/src/*.o xgboost/src/*.so xgboost/src/*.dll
rm -rf xgboost/src/*/*.o
rm -rf xgboost/demo/*.model xgboost/demo/*.buffer xgboost/demo/*.txt
rm -rf xgboost/demo/runall.R
cp -r src xgboost/src/src
cp -r include xgboost/src/include
cp -r amalgamation xgboost/src/amalgamation
mkdir -p xgboost/src/rabit
cp -r rabit/include xgboost/src/rabit/include
cp -r rabit/src xgboost/src/rabit/src
rm -rf xgboost/src/rabit/src/*.o
mkdir -p xgboost/src/dmlc-core
cp -r dmlc-core/include xgboost/src/dmlc-core/include
cp -r dmlc-core/src xgboost/src/dmlc-core/src
cp ./LICENSE xgboost
cat R-package/src/Makevars|sed '2s/.*/PKGROOT=./' | sed '3s/.*/ENABLE_STD_THREAD=0/' > xgboost/src/Makevars
cp xgboost/src/Makevars xgboost/src/Makevars.win
Rbuild:
$(MAKE) Rpack
R CMD build --no-build-vignettes xgboost
rm -rf xgboost
Rcheck:
$(MAKE) Rbuild
R CMD check xgboost*.tar.gz
-include build/*.d
-include build/*/*.d
-include build_plugin/*/*.d
$(RM) $(OBJ) $(BIN) *~

104
NEWS.md
View File

@@ -1,104 +0,0 @@
XGBoost Change Log
==================
This file records the changes in xgboost library in reverse chronological order.
## v0.6 (2016.07.29)
* Version 0.5 is skipped due to major improvements in the core
* Major refactor of core library.
- Goal: more flexible and modular code as a portable library.
- Switch to use of c++11 standard code.
- Random number generator defaults to ```std::mt19937```.
- Share the data loading pipeline and logging module from dmlc-core.
- Enable registry pattern to allow optionally plugin of objective, metric, tree constructor, data loader.
- Future plugin modules can be put into xgboost/plugin and register back to the library.
- Remove most of the raw pointers to smart ptrs, for RAII safety.
* Add official option to approximate algorithm `tree_method` to parameter.
- Change default behavior to switch to prefer faster algorithm.
- User will get a message when approximate algorithm is chosen.
* Change library name to libxgboost.so
* Backward compatiblity
- The binary buffer file is not backward compatible with previous version.
- The model file is backward compatible on 64 bit platforms.
* The model file is compatible between 64/32 bit platforms(not yet tested).
* External memory version and other advanced features will be exposed to R library as well on linux.
- Previously some of the features are blocked due to C++11 and threading limits.
- The windows version is still blocked due to Rtools do not support ```std::thread```.
* rabit and dmlc-core are maintained through git submodule
- Anyone can open PR to update these dependencies now.
* Improvements
- Rabit and xgboost libs are not thread-safe and use thread local PRNGs
- This could fix some of the previous problem which runs xgboost on multiple threads.
* JVM Package
- Enable xgboost4j for java and scala
- XGBoost distributed now runs on Flink and Spark.
* Support model attributes listing for meta data.
- https://github.com/dmlc/xgboost/pull/1198
- https://github.com/dmlc/xgboost/pull/1166
* Support callback API
- https://github.com/dmlc/xgboost/issues/892
- https://github.com/dmlc/xgboost/pull/1211
- https://github.com/dmlc/xgboost/pull/1264
* Support new booster DART(dropout in tree boosting)
- https://github.com/dmlc/xgboost/pull/1220
* Add CMake build system
- https://github.com/dmlc/xgboost/pull/1314
## v0.47 (2016.01.14)
* Changes in R library
- fixed possible problem of poisson regression.
- switched from 0 to NA for missing values.
- exposed access to additional model parameters.
* Changes in Python library
- throws exception instead of crash terminal when a parameter error happens.
- has importance plot and tree plot functions.
- accepts different learning rates for each boosting round.
- allows model training continuation from previously saved model.
- allows early stopping in CV.
- allows feval to return a list of tuples.
- allows eval_metric to handle additional format.
- improved compatibility in sklearn module.
- additional parameters added for sklearn wrapper.
- added pip installation functionality.
- supports more Pandas DataFrame dtypes.
- added best_ntree_limit attribute, in addition to best_score and best_iteration.
* Java api is ready for use
* Added more test cases and continuous integration to make each build more robust.
## v0.4 (2015.05.11)
* Distributed version of xgboost that runs on YARN, scales to billions of examples
* Direct save/load data and model from/to S3 and HDFS
* Feature importance visualization in R module, by Michael Benesty
* Predict leaf index
* Poisson regression for counts data
* Early stopping option in training
* Native save load support in R and python
- xgboost models now can be saved using save/load in R
- xgboost python model is now pickable
* sklearn wrapper is supported in python module
* Experimental External memory version
## v0.3 (2014.09.07)
* Faster tree construction module
- Allows subsample columns during tree construction via ```bst:col_samplebytree=ratio```
* Support for boosting from initial predictions
* Experimental version of LambdaRank
* Linear booster is now parallelized, using parallel coordinated descent.
* Add [Code Guide](src/README.md) for customizing objective function and evaluation
* Add R module
## v0.2x (2014.05.20)
* Python module
* Weighted samples instances
* Initial version of pairwise rank
## v0.1 (2014.03.26)
* Initial release

View File

@@ -1,6 +0,0 @@
\.o$
\.so$
\.dll$
^.*\.Rproj$
^\.Rproj\.user$
README.md

View File

@@ -1,39 +0,0 @@
Package: xgboost
Type: Package
Title: Extreme Gradient Boosting
Version: 0.6-0
Date: 2015-08-01
Author: Tianqi Chen <tianqi.tchen@gmail.com>, Tong He <hetong007@gmail.com>,
Michael Benesty <michael@benesty.fr>, Vadim Khotilovich <khotilovich@gmail.com>,
Yuan Tang <terrytangyuan@gmail.com>
Maintainer: Tong He <hetong007@gmail.com>
Description: Extreme Gradient Boosting, which is an efficient implementation
of gradient boosting framework. This package is its R interface. The package
includes efficient linear model solver and tree learning algorithms. The package
can automatically do parallel computation on a single machine which could be
more than 10 times faster than existing gradient boosting packages. It supports
various objective functions, including regression, classification and ranking.
The package is made to be extensible, so that users are also allowed to define
their own objectives easily.
License: Apache License (== 2.0) | file LICENSE
URL: https://github.com/dmlc/xgboost
BugReports: https://github.com/dmlc/xgboost/issues
VignetteBuilder: knitr
Suggests:
knitr,
rmarkdown,
ggplot2 (>= 1.0.1),
DiagrammeR (>= 0.8.1),
Ckmeans.1d.dp (>= 3.3.1),
vcd (>= 1.3),
testthat,
igraph (>= 1.0.1)
Depends:
R (>= 2.10)
Imports:
Matrix (>= 1.1-0),
methods,
data.table (>= 1.9.6),
magrittr (>= 1.5),
stringi (>= 0.5.2)
RoxygenNote: 5.0.1

View File

@@ -1,13 +0,0 @@
Copyright (c) 2014 by Tianqi Chen and Contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,71 +0,0 @@
# Generated by roxygen2: do not edit by hand
S3method("[",xgb.DMatrix)
S3method("dimnames<-",xgb.DMatrix)
S3method(dim,xgb.DMatrix)
S3method(dimnames,xgb.DMatrix)
S3method(getinfo,xgb.DMatrix)
S3method(predict,xgb.Booster)
S3method(predict,xgb.Booster.handle)
S3method(print,xgb.Booster)
S3method(print,xgb.DMatrix)
S3method(print,xgb.cv.synchronous)
S3method(setinfo,xgb.DMatrix)
S3method(slice,xgb.DMatrix)
export("xgb.attr<-")
export("xgb.attributes<-")
export("xgb.parameters<-")
export(cb.cv.predict)
export(cb.early.stop)
export(cb.evaluation.log)
export(cb.print.evaluation)
export(cb.reset.parameters)
export(cb.save.model)
export(getinfo)
export(setinfo)
export(slice)
export(xgb.DMatrix)
export(xgb.DMatrix.save)
export(xgb.attr)
export(xgb.attributes)
export(xgb.create.features)
export(xgb.cv)
export(xgb.dump)
export(xgb.ggplot.deepness)
export(xgb.ggplot.importance)
export(xgb.importance)
export(xgb.load)
export(xgb.model.dt.tree)
export(xgb.plot.deepness)
export(xgb.plot.importance)
export(xgb.plot.multi.trees)
export(xgb.plot.tree)
export(xgb.save)
export(xgb.save.raw)
export(xgb.train)
export(xgboost)
import(methods)
importClassesFrom(Matrix,dgCMatrix)
importClassesFrom(Matrix,dgeMatrix)
importFrom(Matrix,cBind)
importFrom(Matrix,colSums)
importFrom(Matrix,sparse.model.matrix)
importFrom(Matrix,sparseVector)
importFrom(data.table,":=")
importFrom(data.table,as.data.table)
importFrom(data.table,data.table)
importFrom(data.table,rbindlist)
importFrom(data.table,setkey)
importFrom(data.table,setkeyv)
importFrom(data.table,setnames)
importFrom(magrittr,"%>%")
importFrom(stats,predict)
importFrom(stringi,stri_detect_regex)
importFrom(stringi,stri_match_first_regex)
importFrom(stringi,stri_replace_all_regex)
importFrom(stringi,stri_replace_first_regex)
importFrom(stringi,stri_split_regex)
importFrom(utils,object.size)
importFrom(utils,str)
importFrom(utils,tail)
useDynLib(xgboost)

View File

@@ -1,608 +0,0 @@
#' Callback closures for booster training.
#'
#' These are used to perform various service tasks either during boosting iterations or at the end.
#' This approach helps to modularize many of such tasks without bloating the main training methods,
#' and it offers .
#'
#' @details
#' By default, a callback function is run after each boosting iteration.
#' An R-attribute \code{is_pre_iteration} could be set for a callback to define a pre-iteration function.
#'
#' When a callback function has \code{finalize} parameter, its finalizer part will also be run after
#' the boosting is completed.
#'
#' WARNING: side-effects!!! Be aware that these callback functions access and modify things in
#' the environment from which they are called from, which is a fairly uncommon thing to do in R.
#'
#' To write a custom callback closure, make sure you first understand the main concepts about R envoronments.
#' Check either R documentation on \code{\link[base]{environment}} or the
#' \href{http://adv-r.had.co.nz/Environments.html}{Environments chapter} from the "Advanced R"
#' book by Hadley Wickham. Further, the best option is to read the code of some of the existing callbacks -
#' choose ones that do something similar to what you want to achieve. Also, you would need to get familiar
#' with the objects available inside of the \code{xgb.train} and \code{xgb.cv} internal environments.
#'
#' @seealso
#' \code{\link{cb.print.evaluation}},
#' \code{\link{cb.evaluation.log}},
#' \code{\link{cb.reset.parameters}},
#' \code{\link{cb.early.stop}},
#' \code{\link{cb.save.model}},
#' \code{\link{cb.cv.predict}},
#' \code{\link{xgb.train}},
#' \code{\link{xgb.cv}}
#'
#' @name callbacks
NULL
#
# Callbacks -------------------------------------------------------------------
#
#' Callback closure for printing the result of evaluation
#'
#' @param period results would be printed every number of periods
#'
#' @details
#' The callback function prints the result of evaluation at every \code{period} iterations.
#' The initial and the last iteration's evaluations are always printed.
#'
#' Callback function expects the following values to be set in its calling frame:
#' \code{bst_evaluation} (also \code{bst_evaluation_err} when available),
#' \code{iteration},
#' \code{begin_iteration},
#' \code{end_iteration}.
#'
#' @seealso
#' \code{\link{callbacks}}
#'
#' @export
cb.print.evaluation <- function(period=1) {
callback <- function(env = parent.frame()) {
if (length(env$bst_evaluation) == 0 ||
period == 0 ||
NVL(env$rank, 0) != 0 )
return()
i <- env$iteration
if ((i-1) %% period == 0 ||
i == env$begin_iteration ||
i == env$end_iteration) {
msg <- format.eval.string(i, env$bst_evaluation, env$bst_evaluation_err)
cat(msg, '\n')
}
}
attr(callback, 'call') <- match.call()
attr(callback, 'name') <- 'cb.print.evaluation'
callback
}
#' Callback closure for logging the evaluation history
#'
#' @details
#' This callback function appends the current iteration evaluation results \code{bst_evaluation}
#' available in the calling parent frame to the \code{evaluation_log} list in a calling frame.
#'
#' The finalizer callback (called with \code{finalize = TURE} in the end) converts
#' the \code{evaluation_log} list into a final data.table.
#'
#' The iteration evaluation result \code{bst_evaluation} must be a named numeric vector.
#'
#' Note: in the column names of the final data.table, the dash '-' character is replaced with
#' the underscore '_' in order to make the column names more like regular R identifiers.
#'
#' Callback function expects the following values to be set in its calling frame:
#' \code{evaluation_log},
#' \code{bst_evaluation},
#' \code{iteration}.
#'
#' @seealso
#' \code{\link{callbacks}}
#'
#' @export
cb.evaluation.log <- function() {
mnames <- NULL
init <- function(env) {
if (!is.list(env$evaluation_log))
stop("'evaluation_log' has to be a list")
mnames <<- names(env$bst_evaluation)
if (is.null(mnames) || any(mnames == ""))
stop("bst_evaluation must have non-empty names")
mnames <<- gsub('-', '_', names(env$bst_evaluation))
if(!is.null(env$bst_evaluation_err))
mnames <<- c(paste0(mnames, '_mean'), paste0(mnames, '_std'))
}
finalizer <- function(env) {
env$evaluation_log <- as.data.table(t(simplify2array(env$evaluation_log)))
setnames(env$evaluation_log, c('iter', mnames))
if(!is.null(env$bst_evaluation_err)) {
# rearrange col order from _mean,_mean,...,_std,_std,...
# to be _mean,_std,_mean,_std,...
len <- length(mnames)
means <- mnames[1:(len/2)]
stds <- mnames[(len/2 + 1):len]
cnames <- numeric(len)
cnames[c(TRUE, FALSE)] <- means
cnames[c(FALSE, TRUE)] <- stds
env$evaluation_log <- env$evaluation_log[, c('iter', cnames), with=FALSE]
}
}
callback <- function(env = parent.frame(), finalize = FALSE) {
if (is.null(mnames))
init(env)
if (finalize)
return(finalizer(env))
ev <- env$bst_evaluation
if(!is.null(env$bst_evaluation_err))
ev <- c(ev, env$bst_evaluation_err)
env$evaluation_log <- c(env$evaluation_log,
list(c(iter = env$iteration, ev)))
}
attr(callback, 'call') <- match.call()
attr(callback, 'name') <- 'cb.evaluation.log'
callback
}
#' Callback closure for restetting the booster's parameters at each iteration.
#'
#' @param new_params a list where each element corresponds to a parameter that needs to be reset.
#' Each element's value must be either a vector of values of length \code{nrounds}
#' to be set at each iteration,
#' or a function of two parameters \code{learning_rates(iteration, nrounds)}
#' which returns a new parameter value by using the current iteration number
#' and the total number of boosting rounds.
#'
#' @details
#' This is a "pre-iteration" callback function used to reset booster's parameters
#' at the beginning of each iteration.
#'
#' Note that when training is resumed from some previous model, and a function is used to
#' reset a parameter value, the \code{nround} argument in this function would be the
#' the number of boosting rounds in the current training.
#'
#' Callback function expects the following values to be set in its calling frame:
#' \code{bst} or \code{bst_folds},
#' \code{iteration},
#' \code{begin_iteration},
#' \code{end_iteration}.
#'
#' @seealso
#' \code{\link{callbacks}}
#'
#' @export
cb.reset.parameters <- function(new_params) {
if (typeof(new_params) != "list")
stop("'new_params' must be a list")
pnames <- gsub("\\.", "_", names(new_params))
nrounds <- NULL
# run some checks in the begining
init <- function(env) {
nrounds <<- env$end_iteration - env$begin_iteration + 1
if (is.null(env$bst) && is.null(env$bst_folds))
stop("Parent frame has neither 'bst' nor 'bst_folds'")
# Some parameters are not allowed to be changed,
# since changing them would simply wreck some chaos
not_allowed <- pnames %in%
c('num_class', 'num_output_group', 'size_leaf_vector', 'updater_seq')
if (any(not_allowed))
stop('Parameters ', paste(pnames[not_allowed]), " cannot be changed during boosting.")
for (n in pnames) {
p <- new_params[[n]]
if (is.function(p)) {
if (length(formals(p)) != 2)
stop("Parameter '", n, "' is a function but not of two arguments")
} else if (is.numeric(p) || is.character(p)) {
if (length(p) != nrounds)
stop("Length of '", n, "' has to be equal to 'nrounds'")
} else {
stop("Parameter '", n, "' is not a function or a vector")
}
}
}
callback <- function(env = parent.frame()) {
if (is.null(nrounds))
init(env)
i <- env$iteration
pars <- lapply(new_params, function(p) {
if (is.function(p))
return(p(i, nrounds))
p[i]
})
if (!is.null(env$bst)) {
xgb.parameters(env$bst$handle) <- pars
} else {
for (fd in env$bst_folds)
xgb.parameters(fd$bst$handle) <- pars
}
}
attr(callback, 'is_pre_iteration') <- TRUE
attr(callback, 'call') <- match.call()
attr(callback, 'name') <- 'cb.reset.parameters'
callback
}
#' Callback closure to activate the early stopping.
#'
#' @param stopping_rounds The number of rounds with no improvement in
#' the evaluation metric in order to stop the training.
#' @param maximize whether to maximize the evaluation metric
#' @param metric_name the name of an evaluation column to use as a criteria for early
#' stopping. If not set, the last column would be used.
#' Let's say the test data in \code{watchlist} was labelled as \code{dtest},
#' and one wants to use the AUC in test data for early stopping regardless of where
#' it is in the \code{watchlist}, then one of the following would need to be set:
#' \code{metric_name='dtest-auc'} or \code{metric_name='dtest_auc'}.
#' All dash '-' characters in metric names are considered equivalent to '_'.
#' @param verbose whether to print the early stopping information.
#'
#' @details
#' This callback function determines the condition for early stopping
#' by setting the \code{stop_condition = TRUE} flag in its calling frame.
#'
#' The following additional fields are assigned to the model's R object:
#' \itemize{
#' \item \code{best_score} the evaluation score at the best iteration
#' \item \code{best_iteration} at which boosting iteration the best score has occurred (1-based index)
#' \item \code{best_ntreelimit} to use with the \code{ntreelimit} parameter in \code{predict}.
#' It differs from \code{best_iteration} in multiclass or random forest settings.
#' }
#'
#' The Same values are also stored as xgb-attributes:
#' \itemize{
#' \item \code{best_iteration} is stored as a 0-based iteration index (for interoperability of binary models)
#' \item \code{best_msg} message string is also stored.
#' }
#'
#' At least one data element is required in the evaluation watchlist for early stopping to work.
#'
#' Callback function expects the following values to be set in its calling frame:
#' \code{stop_condition},
#' \code{bst_evaluation},
#' \code{rank},
#' \code{bst} (or \code{bst_folds} and \code{basket}),
#' \code{iteration},
#' \code{begin_iteration},
#' \code{end_iteration},
#' \code{num_parallel_tree}.
#'
#' @seealso
#' \code{\link{callbacks}},
#' \code{\link{xgb.attr}}
#'
#' @export
cb.early.stop <- function(stopping_rounds, maximize=FALSE,
metric_name=NULL, verbose=TRUE) {
# state variables
best_iteration <- -1
best_ntreelimit <- -1
best_score <- Inf
best_msg <- NULL
metric_idx <- 1
init <- function(env) {
if (length(env$bst_evaluation) == 0)
stop("For early stopping, watchlist must have at least one element")
eval_names <- gsub('-', '_', names(env$bst_evaluation))
if (!is.null(metric_name)) {
metric_idx <<- which(gsub('-', '_', metric_name) == eval_names)
if (length(metric_idx) == 0)
stop("'metric_name' for early stopping is not one of the following:\n",
paste(eval_names, collapse=' '), '\n')
}
if (is.null(metric_name) &&
length(env$bst_evaluation) > 1) {
metric_idx <<- length(eval_names)
if (verbose)
cat('Multiple eval metrics are present. Will use ',
eval_names[metric_idx], ' for early stopping.\n', sep = '')
}
metric_name <<- eval_names[metric_idx]
# maximixe is usually NULL when not set in xgb.train and built-in metrics
if (is.null(maximize))
maximize <<- ifelse(grepl('(_auc|_map|_ndcg)', metric_name), TRUE, FALSE)
if (verbose && NVL(env$rank, 0) == 0)
cat("Will train until ", metric_name, " hasn't improved in ",
stopping_rounds, " rounds.\n\n", sep = '')
best_iteration <<- 1
if (maximize) best_score <<- -Inf
env$stop_condition <- FALSE
if (!is.null(env$bst)) {
if (class(env$bst) != 'xgb.Booster')
stop("'bst' in the parent frame must be an 'xgb.Booster'")
if (!is.null(best_score <- xgb.attr(env$bst$handle, 'best_score'))) {
best_score <<- as.numeric(best_score)
best_iteration <<- as.numeric(xgb.attr(env$bst$handle, 'best_iteration')) + 1
best_msg <<- as.numeric(xgb.attr(env$bst$handle, 'best_msg'))
} else {
xgb.attributes(env$bst$handle) <- list(best_iteration = best_iteration - 1,
best_score = best_score)
}
} else if (is.null(env$bst_folds) || is.null(env$basket)) {
stop("Parent frame has neither 'bst' nor ('bst_folds' and 'basket')")
}
}
finalizer <- function(env) {
if (!is.null(env$bst)) {
attr_best_score = as.numeric(xgb.attr(env$bst$handle, 'best_score'))
if (best_score != attr_best_score)
stop("Inconsistent 'best_score' values between the closure state: ", best_score,
" and the xgb.attr: ", attr_best_score)
env$bst$best_iteration = best_iteration
env$bst$best_ntreelimit = best_ntreelimit
env$bst$best_score = best_score
} else {
env$basket$best_iteration <- best_iteration
env$basket$best_ntreelimit <- best_ntreelimit
}
}
callback <- function(env = parent.frame(), finalize = FALSE) {
if (best_iteration < 0)
init(env)
if (finalize)
return(finalizer(env))
i <- env$iteration
score = env$bst_evaluation[metric_idx]
if (( maximize && score > best_score) ||
(!maximize && score < best_score)) {
best_msg <<- format.eval.string(i, env$bst_evaluation, env$bst_evaluation_err)
best_score <<- score
best_iteration <<- i
best_ntreelimit <<- best_iteration * env$num_parallel_tree
# save the property to attributes, so they will occur in checkpoint
if (!is.null(env$bst)) {
xgb.attributes(env$bst) <- list(
best_iteration = best_iteration - 1, # convert to 0-based index
best_score = best_score,
best_msg = best_msg,
best_ntreelimit = best_ntreelimit)
}
} else if (i - best_iteration >= stopping_rounds) {
env$stop_condition <- TRUE
env$end_iteration <- i
if (verbose && NVL(env$rank, 0) == 0)
cat("Stopping. Best iteration:\n", best_msg, "\n\n", sep = '')
}
}
attr(callback, 'call') <- match.call()
attr(callback, 'name') <- 'cb.early.stop'
callback
}
#' Callback closure for saving a model file.
#'
#' @param save_period save the model to disk after every
#' \code{save_period} iterations; 0 means save the model at the end.
#' @param save_name the name or path for the saved model file.
#' It can contain a \code{\link[base]{sprintf}} formatting specifier
#' to include the integer iteration number in the file name.
#' E.g., with \code{save_name} = 'xgboost_%04d.model',
#' the file saved at iteration 50 would be named "xgboost_0050.model".
#'
#' @details
#' This callback function allows to save an xgb-model file, either periodically after each \code{save_period}'s or at the end.
#'
#' Callback function expects the following values to be set in its calling frame:
#' \code{bst},
#' \code{iteration},
#' \code{begin_iteration},
#' \code{end_iteration}.
#'
#' @seealso
#' \code{\link{callbacks}}
#'
#' @export
cb.save.model <- function(save_period = 0, save_name = "xgboost.model") {
if (save_period < 0)
stop("'save_period' cannot be negative")
callback <- function(env = parent.frame()) {
if (is.null(env$bst))
stop("'save_model' callback requires the 'bst' booster object in its calling frame")
if ((save_period > 0 && (env$iteration - env$begin_iteration) %% save_period == 0) ||
(save_period == 0 && env$iteration == env$end_iteration))
xgb.save(env$bst, sprintf(save_name, env$iteration))
}
attr(callback, 'call') <- match.call()
attr(callback, 'name') <- 'cb.save.model'
callback
}
#' Callback closure for returning cross-validation based predictions.
#'
#' @param save_models a flag for whether to save the folds' models.
#'
#' @details
#' This callback function saves predictions for all of the test folds,
#' and also allows to save the folds' models.
#'
#' It is a "finalizer" callback and it uses early stopping information whenever it is available,
#' thus it must be run after the early stopping callback if the early stopping is used.
#'
#' Callback function expects the following values to be set in its calling frame:
#' \code{bst_folds},
#' \code{basket},
#' \code{data},
#' \code{end_iteration},
#' \code{num_parallel_tree},
#' \code{num_class}.
#'
#' @return
#' Predictions are returned inside of the \code{pred} element, which is either a vector or a matrix,
#' depending on the number of prediction outputs per data row. The order of predictions corresponds
#' to the order of rows in the original dataset. Note that when a custom \code{folds} list is
#' provided in \code{xgb.cv}, the predictions would only be returned properly when this list is a
#' non-overlapping list of k sets of indices, as in a standard k-fold CV. The predictions would not be
#' meaningful when user-profided folds have overlapping indices as in, e.g., random sampling splits.
#' When some of the indices in the training dataset are not included into user-provided \code{folds},
#' their prediction value would be \code{NA}.
#'
#' @seealso
#' \code{\link{callbacks}}
#'
#' @export
cb.cv.predict <- function(save_models = FALSE) {
finalizer <- function(env) {
if (is.null(env$basket) || is.null(env$bst_folds))
stop("'cb.cv.predict' callback requires 'basket' and 'bst_folds' lists in its calling frame")
N <- nrow(env$data)
pred <-
if (env$num_class > 1) {
matrix(NA_real_, N, env$num_class)
} else {
rep(NA_real_, N)
}
ntreelimit <- NVL(env$basket$best_ntreelimit,
env$end_iteration * env$num_parallel_tree)
for (fd in env$bst_folds) {
pr <- predict(fd$bst, fd$watchlist[[2]], ntreelimit = ntreelimit, reshape = TRUE)
if (is.matrix(pred)) {
pred[fd$index,] <- pr
} else {
pred[fd$index] <- pr
}
}
env$basket$pred <- pred
if (save_models) {
env$basket$models <- lapply(env$bst_folds, function(fd) {
xgb.attr(fd$bst, 'niter') <- env$end_iteration - 1
xgb.Booster.check(xgb.handleToBooster(fd$bst), saveraw = TRUE)
})
}
}
callback <- function(env = parent.frame(), finalize = FALSE) {
if (finalize)
return(finalizer(env))
}
attr(callback, 'call') <- match.call()
attr(callback, 'name') <- 'cb.cv.predict'
callback
}
#
# Internal utility functions for callbacks ------------------------------------
#
# Format the evaluation metric string
format.eval.string <- function(iter, eval_res, eval_err=NULL) {
if (length(eval_res) == 0)
stop('no evaluation results')
enames <- names(eval_res)
if (is.null(enames))
stop('evaluation results must have names')
iter <- sprintf('[%d]\t', iter)
if (!is.null(eval_err)) {
if (length(eval_res) != length(eval_err))
stop('eval_res & eval_err lengths mismatch')
res <- paste0(sprintf("%s:%f+%f", enames, eval_res, eval_err), collapse='\t')
} else {
res <- paste0(sprintf("%s:%f", enames, eval_res), collapse='\t')
}
return(paste0(iter, res))
}
# Extract callback names from the list of callbacks
callback.names <- function(cb_list) {
unlist(lapply(cb_list, function(x) attr(x, 'name')))
}
# Extract callback calls from the list of callbacks
callback.calls <- function(cb_list) {
unlist(lapply(cb_list, function(x) attr(x, 'call')))
}
# Add a callback cb to the list and make sure that
# cb.early.stop and cb.cv.predict are at the end of the list
# with cb.cv.predict being the last (when present)
add.cb <- function(cb_list, cb) {
cb_list <- c(cb_list, cb)
names(cb_list) <- callback.names(cb_list)
if ('cb.early.stop' %in% names(cb_list)) {
cb_list <- c(cb_list, cb_list['cb.early.stop'])
# this removes only the first one
cb_list['cb.early.stop'] <- NULL
}
if ('cb.cv.predict' %in% names(cb_list)) {
cb_list <- c(cb_list, cb_list['cb.cv.predict'])
cb_list['cb.cv.predict'] <- NULL
}
cb_list
}
# Sort callbacks list into categories
categorize.callbacks <- function(cb_list) {
list(
pre_iter = Filter(function(x) {
pre <- attr(x, 'is_pre_iteration')
!is.null(pre) && pre
}, cb_list),
post_iter = Filter(function(x) {
pre <- attr(x, 'is_pre_iteration')
is.null(pre) || !pre
}, cb_list),
finalize = Filter(function(x) {
'finalize' %in% names(formals(x))
}, cb_list)
)
}
# Check whether all callback functions with names given by 'query_names' are present in the 'cb_list'.
has.callbacks <- function(cb_list, query_names) {
if (length(cb_list) < length(query_names))
return(FALSE)
if (!is.list(cb_list) ||
any(sapply(cb_list, class) != 'function')) {
stop('`cb_list`` must be a list of callback functions')
}
cb_names <- callback.names(cb_list)
if (!is.character(cb_names) ||
length(cb_names) != length(cb_list) ||
any(cb_names == "")) {
stop('All callbacks in the `cb_list` must have a non-empty `name` attribute')
}
if (!is.character(query_names) ||
length(query_names) == 0 ||
any(query_names == "")) {
stop('query_names must be a non-empty vector of non-empty character names')
}
return(all(query_names %in% cb_names))
}

View File

@@ -1,329 +0,0 @@
#
# This file is for the low level reuseable utility functions
# that are not supposed to be visibe to a user.
#
#
# General helper utilities ----------------------------------------------------
#
# SQL-style NVL shortcut.
NVL <- function(x, val) {
if (is.null(x))
return(val)
if (is.vector(x)) {
x[is.na(x)] <- val
return(x)
}
if (typeof(x) == 'closure')
return(x)
stop('x of unsupported for NVL type')
}
#
# Low-level functions for boosting --------------------------------------------
#
# Merges booster params with whatever is provided in ...
# plus runs some checks
check.booster.params <- function(params, ...) {
if (typeof(params) != "list")
stop("params must be a list")
# in R interface, allow for '.' instead of '_' in parameter names
names(params) <- gsub("\\.", "_", names(params))
# merge parameters from the params and the dots-expansion
dot_params <- list(...)
names(dot_params) <- gsub("\\.", "_", names(dot_params))
if (length(intersect(names(params),
names(dot_params))) > 0)
stop("Same parameters in 'params' and in the call are not allowed. Please check your 'params' list.")
params <- c(params, dot_params)
# providing a parameter multiple times only makes sense for 'eval_metric'
name_freqs <- table(names(params))
multi_names <- setdiff(names(name_freqs[name_freqs > 1]), 'eval_metric')
if (length(multi_names) > 0) {
warning("The following parameters were provided multiple times:\n\t",
paste(multi_names, collapse=', '), "\n Only the last value for each of them will be used.\n")
# While xgboost itself would choose the last value for a multi-parameter,
# will do some clean-up here b/c multi-parameters could be used further in R code, and R would
# pick the 1st (not the last) value when multiple elements with the same name are present in a list.
for (n in multi_names) {
del_idx <- which(n == names(params))
del_idx <- del_idx[-length(del_idx)]
params[[del_idx]] <- NULL
}
}
# for multiclass, expect num_class to be set
if (typeof(params[['objective']]) == "character" &&
substr(NVL(params[['objective']], 'x'), 1, 6) == 'multi:') {
if (as.numeric(NVL(params[['num_class']], 0)) < 2)
stop("'num_class' > 1 parameter must be set for multiclass classification")
}
return(params)
}
# Performs some checks related to custom objective function.
# WARNING: has side-effects and can modify 'params' and 'obj' in its calling frame
check.custom.obj <- function(env = parent.frame()) {
if (!is.null(env$params[['objective']]) && !is.null(env$obj))
stop("Setting objectives in 'params' and 'obj' at the same time is not allowed")
if (!is.null(env$obj) && typeof(env$obj) != 'closure')
stop("'obj' must be a function")
# handle the case when custom objective function was provided through params
if (!is.null(env$params[['objective']]) &&
typeof(env$params$objective) == 'closure') {
env$obj <- env$params$objective
p <- env$params
p$objective <- NULL
env$params <- p
}
}
# Performs some checks related to custom evaluation function.
# WARNING: has side-effects and can modify 'params' and 'feval' in its calling frame
check.custom.eval <- function(env = parent.frame()) {
if (!is.null(env$params[['eval_metric']]) && !is.null(env$feval))
stop("Setting evaluation metrics in 'params' and 'feval' at the same time is not allowed")
if (!is.null(env$feval) && typeof(env$feval) != 'closure')
stop("'feval' must be a function")
if (!is.null(env$feval) && is.null(env$maximize))
stop("Please set 'maximize' to indicate whether the metric needs to be maximized or not")
# handle a situation when custom eval function was provided through params
if (!is.null(env$params[['eval_metric']]) &&
typeof(env$params$eval_metric) == 'closure') {
env$feval <- env$params$eval_metric
p <- env$params
p[ which(names(p) == 'eval_metric') ] <- NULL
env$params <- p
}
}
# Update booster with dtrain for an iteration
xgb.iter.update <- function(booster, dtrain, iter, obj = NULL) {
if (class(booster) != "xgb.Booster.handle") {
stop("first argument type must be xgb.Booster.handle")
}
if (class(dtrain) != "xgb.DMatrix") {
stop("second argument type must be xgb.DMatrix")
}
if (is.null(obj)) {
.Call("XGBoosterUpdateOneIter_R", booster, as.integer(iter), dtrain,
PACKAGE = "xgboost")
} else {
pred <- predict(booster, dtrain)
gpair <- obj(pred, dtrain)
.Call("XGBoosterBoostOneIter_R", booster, dtrain, gpair$grad, gpair$hess, PACKAGE = "xgboost")
}
return(TRUE)
}
# Evaluate one iteration.
# Returns a named vector of evaluation metrics
# with the names in a 'datasetname-metricname' format.
xgb.iter.eval <- function(booster, watchlist, iter, feval = NULL) {
if (class(booster) != "xgb.Booster.handle")
stop("first argument type must be xgb.Booster.handle")
if (length(watchlist) == 0)
return(NULL)
evnames <- names(watchlist)
if (is.null(feval)) {
msg <- .Call("XGBoosterEvalOneIter_R", booster, as.integer(iter), watchlist,
as.list(evnames), PACKAGE = "xgboost")
msg <- stri_split_regex(msg, '(\\s+|:|\\s+)')[[1]][-1]
res <- as.numeric(msg[c(FALSE,TRUE)]) # even indices are the values
names(res) <- msg[c(TRUE,FALSE)] # odds are the names
} else {
res <- sapply(seq_along(watchlist), function(j) {
w <- watchlist[[j]]
preds <- predict(booster, w) # predict using all trees
eval_res <- feval(preds, w)
out <- eval_res$value
names(out) <- paste0(evnames[j], "-", eval_res$metric)
out
})
}
return(res)
}
#
# Helper functions for cross validation ---------------------------------------
#
# Generates random (stratified if needed) CV folds
generate.cv.folds <- function(nfold, nrows, stratified, label, params) {
# cannot do it for rank
if (exists('objective', where=params) &&
is.character(params$objective) &&
strtrim(params$objective, 5) == 'rank:') {
stop("\n\tAutomatic generation of CV-folds is not implemented for ranking!\n",
"\tConsider providing pre-computed CV-folds through the 'folds=' parameter.\n")
}
# shuffle
rnd_idx <- sample(1:nrows)
if (stratified &&
length(label) == length(rnd_idx)) {
y <- label[rnd_idx]
# WARNING: some heuristic logic is employed to identify classification setting!
# - For classification, need to convert y labels to factor before making the folds,
# and then do stratification by factor levels.
# - For regression, leave y numeric and do stratification by quantiles.
if (exists('objective', where=params) &&
is.character(params$objective)) {
# If 'objective' provided in params, assume that y is a classification label
# unless objective is reg:linear
if (params$objective != 'reg:linear')
y <- factor(y)
} else {
# If no 'objective' given in params, it means that user either wants to use
# the default 'reg:linear' objective or has provided a custom obj function.
# Here, assume classification setting when y has 5 or less unique values:
if (length(unique(y)) <= 5)
y <- factor(y)
}
folds <- xgb.createFolds(y, nfold)
} else {
# make simple non-stratified folds
kstep <- length(rnd_idx) %/% nfold
folds <- list()
for (i in 1:(nfold - 1)) {
folds[[i]] <- rnd_idx[1:kstep]
rnd_idx <- rnd_idx[-(1:kstep)]
}
folds[[nfold]] <- rnd_idx
}
return(folds)
}
# Creates CV folds stratified by the values of y.
# It was borrowed from caret::createFolds and simplified
# by always returning an unnamed list of fold indices.
xgb.createFolds <- function(y, k = 10)
{
if (is.numeric(y)) {
## Group the numeric data based on their magnitudes
## and sample within those groups.
## When the number of samples is low, we may have
## issues further slicing the numeric data into
## groups. The number of groups will depend on the
## ratio of the number of folds to the sample size.
## At most, we will use quantiles. If the sample
## is too small, we just do regular unstratified
## CV
cuts <- floor(length(y) / k)
if (cuts < 2) cuts <- 2
if (cuts > 5) cuts <- 5
y <- cut(y,
unique(stats::quantile(y, probs = seq(0, 1, length = cuts))),
include.lowest = TRUE)
}
if (k < length(y)) {
## reset levels so that the possible levels and
## the levels in the vector are the same
y <- factor(as.character(y))
numInClass <- table(y)
foldVector <- vector(mode = "integer", length(y))
## For each class, balance the fold allocation as far
## as possible, then resample the remainder.
## The final assignment of folds is also randomized.
for (i in 1:length(numInClass)) {
## create a vector of integers from 1:k as many times as possible without
## going over the number of samples in the class. Note that if the number
## of samples in a class is less than k, nothing is producd here.
seqVector <- rep(1:k, numInClass[i] %/% k)
## add enough random integers to get length(seqVector) == numInClass[i]
if (numInClass[i] %% k > 0) seqVector <- c(seqVector, sample(1:k, numInClass[i] %% k))
## shuffle the integers for fold assignment and assign to this classes's data
foldVector[which(y == dimnames(numInClass)$y[i])] <- sample(seqVector)
}
} else {
foldVector <- seq(along = y)
}
out <- split(seq(along = y), foldVector)
names(out) <- NULL
out
}
#
# Deprectaion notice utilities ------------------------------------------------
#
#' Deprecation notices.
#'
#' At this time, some of the parameter names were changed in order to make the code style more uniform.
#' The deprecated parameters would be removed in the next release.
#'
#' To see all the current deprecated and new parameters, check the \code{xgboost:::depr_par_lut} table.
#'
#' A deprecation warning is shown when any of the deprecated parameters is used in a call.
#' An additional warning is shown when there was a partial match to a deprecated parameter
#' (as R is able to partially match parameter names).
#'
#' @name xgboost-deprecated
NULL
# Lookup table for the deprecated parameters bookkeeping
depr_par_lut <- matrix(c(
'print.every.n', 'print_every_n',
'early.stop.round', 'early_stopping_rounds',
'training.data', 'data',
'with.stats', 'with_stats',
'numberOfClusters', 'n_clusters',
'features.keep', 'features_keep',
'plot.height','plot_height',
'plot.width','plot_width',
'dummy', 'DUMMY'
), ncol=2, byrow = TRUE)
colnames(depr_par_lut) <- c('old', 'new')
# Checks the dot-parameters for deprecated names
# (including partial matching), gives a deprecation warning,
# and sets new parameters to the old parameters' values within its parent frame.
# WARNING: has side-effects
check.deprecation <- function(..., env = parent.frame()) {
pars <- list(...)
# exact and partial matches
all_match <- pmatch(names(pars), depr_par_lut[,1])
# indices of matched pars' names
idx_pars <- which(!is.na(all_match))
if (length(idx_pars) == 0) return()
# indices of matched LUT rows
idx_lut <- all_match[idx_pars]
# which of idx_lut were the exact matches?
ex_match <- depr_par_lut[idx_lut,1] %in% names(pars)
for (i in seq_along(idx_pars)) {
pars_par <- names(pars)[idx_pars[i]]
old_par <- depr_par_lut[idx_lut[i], 1]
new_par <- depr_par_lut[idx_lut[i], 2]
if (!ex_match[i]) {
warning("'", pars_par, "' was partially matched to '", old_par,"'")
}
.Deprecated(new_par, old=old_par, package = 'xgboost')
if (new_par != 'NULL') {
eval(parse(text = paste(new_par, '<-', pars[[pars_par]])), envir = env)
}
}
}

View File

@@ -1,486 +0,0 @@
# Construct a Booster from cachelist
# internal utility function
xgb.Booster <- function(params = list(), cachelist = list(), modelfile = NULL) {
if (typeof(cachelist) != "list" ||
any(sapply(cachelist, class) != 'xgb.DMatrix')) {
stop("xgb.Booster only accepts list of DMatrix as cachelist")
}
handle <- .Call("XGBoosterCreate_R", cachelist, PACKAGE = "xgboost")
if (!is.null(modelfile)) {
if (typeof(modelfile) == "character") {
.Call("XGBoosterLoadModel_R", handle, modelfile, PACKAGE = "xgboost")
} else if (typeof(modelfile) == "raw") {
.Call("XGBoosterLoadModelFromRaw_R", handle, modelfile, PACKAGE = "xgboost")
} else if (class(modelfile) == "xgb.Booster") {
modelfile <- xgb.Booster.check(modelfile, saveraw=TRUE)
.Call("XGBoosterLoadModelFromRaw_R", handle, modelfile$raw, PACKAGE = "xgboost")
} else {
stop("modelfile must be either character filename, or raw booster dump, or xgb.Booster object")
}
}
class(handle) <- "xgb.Booster.handle"
if (length(params) > 0) {
xgb.parameters(handle) <- params
}
return(handle)
}
# Convert xgb.Booster.handle to xgb.Booster
# internal utility function
xgb.handleToBooster <- function(handle, raw = NULL) {
bst <- list(handle = handle, raw = raw)
class(bst) <- "xgb.Booster"
return(bst)
}
# Return a verified to be valid handle out of either xgb.Booster.handle or xgb.Booster
# internal utility function
xgb.get.handle <- function(object) {
handle <- switch(class(object)[1],
xgb.Booster = object$handle,
xgb.Booster.handle = object,
stop("argument must be of either xgb.Booster or xgb.Booster.handle class")
)
if (is.null(handle) || .Call("XGCheckNullPtr_R", handle, PACKAGE="xgboost")) {
stop("invalid xgb.Booster.handle")
}
handle
}
# Check whether an xgb.Booster object is complete
# internal utility function
xgb.Booster.check <- function(bst, saveraw = TRUE) {
if (class(bst) != "xgb.Booster")
stop("argument type must be xgb.Booster")
isnull <- is.null(bst$handle)
if (!isnull) {
isnull <- .Call("XGCheckNullPtr_R", bst$handle, PACKAGE="xgboost")
}
if (isnull) {
bst$handle <- xgb.Booster(modelfile = bst$raw)
} else {
if (is.null(bst$raw) && saveraw)
bst$raw <- xgb.save.raw(bst$handle)
}
return(bst)
}
#' Predict method for eXtreme Gradient Boosting model
#'
#' Predicted values based on either xgboost model or model handle object.
#'
#' @param object Object of class \code{xgb.Booster} or \code{xgb.Booster.handle}
#' @param newdata takes \code{matrix}, \code{dgCMatrix}, local data file or \code{xgb.DMatrix}.
#' @param missing Missing is only used when input is dense matrix. Pick a float value that represents
#' missing values in data (e.g., sometimes 0 or some other extreme value is used).
#' @param outputmargin whether the prediction should be returned in the for of original untransformed
#' sum of predictions from boosting iterations' results. E.g., setting \code{outputmargin=TRUE} for
#' logistic regression would result in predictions for log-odds instead of probabilities.
#' @param ntreelimit limit the number of model's trees or boosting iterations used in prediction (see Details).
#' It will use all the trees by default (\code{NULL} value).
#' @param predleaf whether predict leaf index instead.
#' @param reshape whether to reshape the vector of predictions to a matrix form when there are several
#' prediction outputs per case. This option has no effect when \code{predleaf = TRUE}.
#' @param ... Parameters passed to \code{predict.xgb.Booster}
#'
#' @details
#' Note that \code{ntreelimit} is not necesserily equal to the number of boosting iterations
#' and it is not necesserily equal to the number of trees in a model.
#' E.g., in a random forest-like model, \code{ntreelimit} would limit the number of trees.
#' But for multiclass classification, there are multiple trees per iteration,
#' but \code{ntreelimit} limits the number of boosting iterations.
#'
#' Also note that \code{ntreelimit} would currently do nothing for predictions from gblinear,
#' since gblinear doesn't keep its boosting history.
#'
#' One possible practical applications of the \code{predleaf} option is to use the model
#' as a generator of new features which capture non-linearity and interactions,
#' e.g., as implemented in \code{\link{xgb.create.features}}.
#'
#' @return
#' For regression or binary classification, it returns a vector of length \code{nrows(newdata)}.
#' For multiclass classification, either a \code{num_class * nrows(newdata)} vector or
#' a \code{(nrows(newdata), num_class)} dimension matrix is returned, depending on
#' the \code{reshape} value.
#'
#' When \code{predleaf = TRUE}, the output is a matrix object with the
#' number of columns corresponding to the number of trees.
#'
#' @seealso
#' \code{\link{xgb.train}}.
#'
#' @examples
#' ## binary classification:
#'
#' data(agaricus.train, package='xgboost')
#' data(agaricus.test, package='xgboost')
#' train <- agaricus.train
#' test <- agaricus.test
#'
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
#' # use all trees by default
#' pred <- predict(bst, test$data)
#' # use only the 1st tree
#' pred <- predict(bst, test$data, ntreelimit = 1)
#'
#'
#' ## multiclass classification in iris dataset:
#'
#' lb <- as.numeric(iris$Species) - 1
#' num_class <- 3
#' set.seed(11)
#' bst <- xgboost(data = as.matrix(iris[, -5]), label = lb,
#' max_depth = 4, eta = 0.5, nthread = 2, nrounds = 10, subsample = 0.5,
#' objective = "multi:softprob", num_class = num_class)
#' # predict for softmax returns num_class probability numbers per case:
#' pred <- predict(bst, as.matrix(iris[, -5]))
#' str(pred)
#' # reshape it to a num_class-columns matrix
#' pred <- matrix(pred, ncol=num_class, byrow=TRUE)
#' # convert the probabilities to softmax labels
#' pred_labels <- max.col(pred) - 1
#' # the following should result in the same error as seen in the last iteration
#' sum(pred_labels != lb)/length(lb)
#'
#' # compare that to the predictions from softmax:
#' set.seed(11)
#' bst <- xgboost(data = as.matrix(iris[, -5]), label = lb,
#' max_depth = 4, eta = 0.5, nthread = 2, nrounds = 10, subsample = 0.5,
#' objective = "multi:softmax", num_class = num_class)
#' pred <- predict(bst, as.matrix(iris[, -5]))
#' str(pred)
#' all.equal(pred, pred_labels)
#' # prediction from using only 5 iterations should result
#' # in the same error as seen in iteration 5:
#' pred5 <- predict(bst, as.matrix(iris[, -5]), ntreelimit=5)
#' sum(pred5 != lb)/length(lb)
#'
#'
#' ## random forest-like model of 25 trees for binary classification:
#'
#' set.seed(11)
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 5,
#' nthread = 2, nrounds = 1, objective = "binary:logistic",
#' num_parallel_tree = 25, subsample = 0.6, colsample_bytree = 0.1)
#' # Inspect the prediction error vs number of trees:
#' lb <- test$label
#' dtest <- xgb.DMatrix(test$data, label=lb)
#' err <- sapply(1:25, function(n) {
#' pred <- predict(bst, dtest, ntreelimit=n)
#' sum((pred > 0.5) != lb)/length(lb)
#' })
#' plot(err, type='l', ylim=c(0,0.1), xlab='#trees')
#'
#' @rdname predict.xgb.Booster
#' @export
predict.xgb.Booster <- function(object, newdata, missing = NA,
outputmargin = FALSE, ntreelimit = NULL, predleaf = FALSE, reshape = FALSE, ...) {
object <- xgb.Booster.check(object, saveraw = FALSE)
if (class(newdata) != "xgb.DMatrix")
newdata <- xgb.DMatrix(newdata, missing = missing)
if (is.null(ntreelimit))
ntreelimit <- NVL(object$best_ntreelimit, 0)
if (ntreelimit < 0)
stop("ntreelimit cannot be negative")
option <- 0L + 1L * as.logical(outputmargin) + 2L * as.logical(predleaf)
ret <- .Call("XGBoosterPredict_R", object$handle, newdata, option[1],
as.integer(ntreelimit), PACKAGE = "xgboost")
if (length(ret) %% nrow(newdata) != 0)
stop("prediction length ", length(ret)," is not multiple of nrows(newdata) ", nrow(newdata))
npred_per_case <- length(ret) / nrow(newdata)
if (predleaf){
len <- nrow(newdata)
ret <- if (length(ret) == len) {
matrix(ret, ncol = 1)
} else {
t(matrix(ret, ncol = len))
}
} else if (reshape && npred_per_case > 1) {
ret <- matrix(ret, ncol = length(ret) / nrow(newdata), byrow = TRUE)
}
return(ret)
}
#' @rdname predict.xgb.Booster
#' @export
predict.xgb.Booster.handle <- function(object, ...) {
bst <- xgb.handleToBooster(object)
ret <- predict(bst, ...)
return(ret)
}
#' Accessors for serializable attributes of a model.
#'
#' These methods allow to manipulate the key-value attribute strings of an xgboost model.
#'
#' @param object Object of class \code{xgb.Booster} or \code{xgb.Booster.handle}.
#' @param name a non-empty character string specifying which attribute is to be accessed.
#' @param value a value of an attribute for \code{xgb.attr<-}; for \code{xgb.attributes<-}
#' it's a list (or an object coercible to a list) with the names of attributes to set
#' and the elements corresponding to attribute values.
#' Non-character values are converted to character.
#' When attribute value is not a scalar, only the first index is used.
#' Use \code{NULL} to remove an attribute.
#'
#' @details
#' The primary purpose of xgboost model attributes is to store some meta-data about the model.
#' Note that they are a separate concept from the object attributes in R.
#' Specifically, they refer to key-value strings that can be attached to an xgboost model,
#' stored together with the model's binary representation, and accessed later
#' (from R or any other interface).
#' In contrast, any R-attribute assigned to an R-object of \code{xgb.Booster} class
#' would not be saved by \code{xgb.save} because an xgboost model is an external memory object
#' and its serialization is handled extrnally.
#' Also, setting an attribute that has the same name as one of xgboost's parameters wouldn't
#' change the value of that parameter for a model.
#' Use \code{\link{xgb.parameters<-}} to set or change model parameters.
#'
#' The attribute setters would usually work more efficiently for \code{xgb.Booster.handle}
#' than for \code{xgb.Booster}, since only just a handle (pointer) would need to be copied.
#' That would only matter if attributes need to be set many times.
#' Note, however, that when feeding a handle of an \code{xgb.Booster} object to the attribute setters,
#' the raw model cache of an \code{xgb.Booster} object would not be automatically updated,
#' and it would be user's responsibility to call \code{xgb.save.raw} to update it.
#'
#' The \code{xgb.attributes<-} setter either updates the existing or adds one or several attributes,
#' but it doesn't delete the other existing attributes.
#'
#' @return
#' \code{xgb.attr} returns either a string value of an attribute
#' or \code{NULL} if an attribute wasn't stored in a model.
#'
#' \code{xgb.attributes} returns a list of all attribute stored in a model
#' or \code{NULL} if a model has no stored attributes.
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' train <- agaricus.train
#'
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
#'
#' xgb.attr(bst, "my_attribute") <- "my attribute value"
#' print(xgb.attr(bst, "my_attribute"))
#' xgb.attributes(bst) <- list(a = 123, b = "abc")
#'
#' xgb.save(bst, 'xgb.model')
#' bst1 <- xgb.load('xgb.model')
#' print(xgb.attr(bst1, "my_attribute"))
#' print(xgb.attributes(bst1))
#'
#' # deletion:
#' xgb.attr(bst1, "my_attribute") <- NULL
#' print(xgb.attributes(bst1))
#' xgb.attributes(bst1) <- list(a = NULL, b = NULL)
#' print(xgb.attributes(bst1))
#'
#' @rdname xgb.attr
#' @export
xgb.attr <- function(object, name) {
if (is.null(name) || nchar(as.character(name[1])) == 0) stop("invalid attribute name")
handle <- xgb.get.handle(object)
.Call("XGBoosterGetAttr_R", handle, as.character(name[1]), PACKAGE="xgboost")
}
#' @rdname xgb.attr
#' @export
`xgb.attr<-` <- function(object, name, value) {
if (is.null(name) || nchar(as.character(name[1])) == 0) stop("invalid attribute name")
handle <- xgb.get.handle(object)
if (!is.null(value)) {
# Coerce the elements to be scalar strings.
# Q: should we warn user about non-scalar elements?
value <- as.character(value[1])
}
.Call("XGBoosterSetAttr_R", handle, as.character(name[1]), value, PACKAGE="xgboost")
if (is(object, 'xgb.Booster') && !is.null(object$raw)) {
object$raw <- xgb.save.raw(object$handle)
}
object
}
#' @rdname xgb.attr
#' @export
xgb.attributes <- function(object) {
handle <- xgb.get.handle(object)
attr_names <- .Call("XGBoosterGetAttrNames_R", handle, PACKAGE="xgboost")
if (is.null(attr_names)) return(NULL)
res <- lapply(attr_names, function(x) {
.Call("XGBoosterGetAttr_R", handle, x, PACKAGE="xgboost")
})
names(res) <- attr_names
res
}
#' @rdname xgb.attr
#' @export
`xgb.attributes<-` <- function(object, value) {
a <- as.list(value)
if (is.null(names(a)) || any(nchar(names(a)) == 0)) {
stop("attribute names cannot be empty strings")
}
# Coerce the elements to be scalar strings.
# Q: should we warn a user about non-scalar elements?
a <- lapply(a, function(x) {
if (is.null(x)) return(NULL)
as.character(x[1])
})
handle <- xgb.get.handle(object)
for (i in seq_along(a)) {
.Call("XGBoosterSetAttr_R", handle, names(a[i]), a[[i]], PACKAGE="xgboost")
}
if (is(object, 'xgb.Booster') && !is.null(object$raw)) {
object$raw <- xgb.save.raw(object$handle)
}
object
}
#' Accessors for model parameters.
#'
#' Only the setter for xgboost parameters is currently implemented.
#'
#' @param object Object of class \code{xgb.Booster} or \code{xgb.Booster.handle}.
#' @param value a list (or an object coercible to a list) with the names of parameters to set
#' and the elements corresponding to parameter values.
#'
#' @details
#' Note that the setter would usually work more efficiently for \code{xgb.Booster.handle}
#' than for \code{xgb.Booster}, since only just a handle would need to be copied.
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' train <- agaricus.train
#'
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
#'
#' xgb.parameters(bst) <- list(eta = 0.1)
#'
#' @rdname xgb.parameters
#' @export
`xgb.parameters<-` <- function(object, value) {
if (length(value) == 0) return(object)
p <- as.list(value)
if (is.null(names(p)) || any(nchar(names(p)) == 0)) {
stop("parameter names cannot be empty strings")
}
names(p) <- gsub("\\.", "_", names(p))
p <- lapply(p, function(x) as.character(x)[1])
handle <- xgb.get.handle(object)
for (i in seq_along(p)) {
.Call("XGBoosterSetParam_R", handle, names(p[i]), p[[i]], PACKAGE = "xgboost")
}
if (is(object, 'xgb.Booster') && !is.null(object$raw)) {
object$raw <- xgb.save.raw(object$handle)
}
object
}
# Extract # of trees in a model
# TODO: either add a getter to C-interface, or simply set an 'ntree' attribute after each iteration
# internal utility function
xgb.ntree <- function(bst) {
length(grep('^booster', xgb.dump(bst)))
}
#' Print xgb.Booster
#'
#' Print information about xgb.Booster.
#'
#' @param x an xgb.Booster object
#' @param verbose whether to print detailed data (e.g., attribute values)
#' @param ... not currently used
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' train <- agaricus.train
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
#' attr(bst, 'myattr') <- 'memo'
#'
#' print(bst)
#' print(bst, verbose=TRUE)
#'
#' @method print xgb.Booster
#' @export
print.xgb.Booster <- function(x, verbose=FALSE, ...) {
cat('##### xgb.Booster\n')
if (is.null(x$handle) || .Call("XGCheckNullPtr_R", x$handle, PACKAGE="xgboost")) {
cat("handle is invalid\n")
return(x)
}
cat('raw: ')
if (!is.null(x$raw)) {
cat(format(object.size(x$raw), units="auto"), '\n')
} else {
cat('NULL\n')
}
if (!is.null(x$call)) {
cat('call:\n ')
print(x$call)
}
if (!is.null(x$params)) {
cat('params (as set within xgb.train):\n')
cat( ' ',
paste(names(x$params),
paste0('"', unlist(x$params), '"'),
sep=' = ', collapse=', '), '\n', sep='')
}
# TODO: need an interface to access all the xgboosts parameters
attrs <- xgb.attributes(x)
if (length(attrs) > 0) {
cat('xgb.attributes:\n')
if (verbose) {
cat( paste(paste0(' ',names(attrs)),
paste0('"', unlist(attrs), '"'),
sep=' = ', collapse='\n'), '\n', sep='')
} else {
cat(' ', paste(names(attrs), collapse=', '), '\n', sep='')
}
}
if (!is.null(x$callbacks) && length(x$callbacks) > 0) {
cat('callbacks:\n')
lapply(callback.calls(x$callbacks), function(x) {
cat(' ')
print(x)
})
}
cat('niter: ', x$niter, '\n', sep='')
# TODO: uncomment when faster xgb.ntree is implemented
#cat('ntree: ', xgb.ntree(x), '\n', sep='')
for (n in setdiff(names(x), c('handle', 'raw', 'call', 'params', 'callbacks','evaluation_log','niter'))) {
if (is.atomic(x[[n]])) {
cat(n, ': ', x[[n]], '\n', sep='')
} else {
cat(n, ':\n\t', sep='')
print(x[[n]])
}
}
if (!is.null(x$evaluation_log)) {
cat('evaluation_log:\n')
print(x$evaluation_log, row.names = FALSE, topn = 2)
}
invisible(x)
}

View File

@@ -1,367 +0,0 @@
#' Contruct xgb.DMatrix object
#'
#' Contruct xgb.DMatrix object from dense matrix, sparse matrix
#' or local file (that was created previously by saving an \code{xgb.DMatrix}).
#'
#' @param data a \code{matrix} object, a \code{dgCMatrix} object or a character representing a filename
#' @param info a list of information of the xgb.DMatrix object
#' @param missing Missing is only used when input is dense matrix, pick a float
#' value that represents missing value. Sometime a data use 0 or other extreme value to represents missing values.
#
#' @param ... other information to pass to \code{info}.
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' train <- agaricus.train
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
#' xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data')
#' dtrain <- xgb.DMatrix('xgb.DMatrix.data')
#' @export
xgb.DMatrix <- function(data, info = list(), missing = NA, ...) {
cnames <- NULL
if (typeof(data) == "character") {
handle <- .Call("XGDMatrixCreateFromFile_R", data, as.integer(FALSE),
PACKAGE = "xgboost")
} else if (is.matrix(data)) {
handle <- .Call("XGDMatrixCreateFromMat_R", data, missing,
PACKAGE = "xgboost")
cnames <- colnames(data)
} else if (class(data) == "dgCMatrix") {
handle <- .Call("XGDMatrixCreateFromCSC_R", data@p, data@i, data@x,
PACKAGE = "xgboost")
cnames <- colnames(data)
} else {
stop(paste("xgb.DMatrix: does not support to construct from ",
typeof(data)))
}
dmat <- handle
attributes(dmat) <- list(.Dimnames = list(NULL, cnames), class = "xgb.DMatrix")
#dmat <- list(handle = handle, colnames = cnames)
#attr(dmat, 'class') <- "xgb.DMatrix"
info <- append(info, list(...))
if (length(info) == 0)
return(dmat)
for (i in 1:length(info)) {
p <- info[i]
setinfo(dmat, names(p), p[[1]])
}
return(dmat)
}
# get dmatrix from data, label
# internal helper method
xgb.get.DMatrix <- function(data, label = NULL, missing = NA, weight = NULL) {
inClass <- class(data)
if ("dgCMatrix" %in% inClass || "matrix" %in% inClass ) {
if (is.null(label)) {
stop("xgboost: need label when data is a matrix")
}
dtrain <- xgb.DMatrix(data, label = label, missing = missing)
if (!is.null(weight)){
setinfo(dtrain, "weight", weight)
}
} else {
if (!is.null(label)) {
warning("xgboost: label will be ignored.")
}
if (inClass == "character") {
dtrain <- xgb.DMatrix(data)
} else if (inClass == "xgb.DMatrix") {
dtrain <- data
} else if (inClass == "data.frame") {
stop("xgboost only support numerical matrix input,
use 'data.matrix' to transform the data.")
} else {
stop("xgboost: Invalid input of data")
}
}
return (dtrain)
}
#' Dimensions of xgb.DMatrix
#'
#' Returns a vector of numbers of rows and of columns in an \code{xgb.DMatrix}.
#' @param x Object of class \code{xgb.DMatrix}
#'
#' @details
#' Note: since \code{nrow} and \code{ncol} internally use \code{dim}, they can also
#' be directly used with an \code{xgb.DMatrix} object.
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' train <- agaricus.train
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
#'
#' stopifnot(nrow(dtrain) == nrow(train$data))
#' stopifnot(ncol(dtrain) == ncol(train$data))
#' stopifnot(all(dim(dtrain) == dim(train$data)))
#'
#' @export
dim.xgb.DMatrix <- function(x) {
c(.Call("XGDMatrixNumRow_R", x, PACKAGE="xgboost"),
.Call("XGDMatrixNumCol_R", x, PACKAGE="xgboost"))
}
#' Handling of column names of \code{xgb.DMatrix}
#'
#' Only column names are supported for \code{xgb.DMatrix}, thus setting of
#' row names would have no effect and returnten row names would be NULL.
#'
#' @param x object of class \code{xgb.DMatrix}
#' @param value a list of two elements: the first one is ignored
#' and the second one is column names
#'
#' @details
#' Generic \code{dimnames} methods are used by \code{colnames}.
#' Since row names are irrelevant, it is recommended to use \code{colnames} directly.
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' train <- agaricus.train
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
#' dimnames(dtrain)
#' colnames(dtrain)
#' colnames(dtrain) <- make.names(1:ncol(train$data))
#' print(dtrain, verbose=TRUE)
#'
#' @rdname dimnames.xgb.DMatrix
#' @export
dimnames.xgb.DMatrix <- function(x) {
attr(x, '.Dimnames')
}
#' @rdname dimnames.xgb.DMatrix
#' @export
`dimnames<-.xgb.DMatrix` <- function(x, value) {
if (!is.list(value) || length(value) != 2L)
stop("invalid 'dimnames' given: must be a list of two elements")
if (!is.null(value[[1L]]))
stop("xgb.DMatrix does not have rownames")
if (is.null(value[[2]])) {
attr(x, '.Dimnames') <- NULL
return(x)
}
if (ncol(x) != length(value[[2]]))
stop("can't assign ", length(value[[2]]), " colnames to a ",
ncol(x), " column xgb.DMatrix")
attr(x, '.Dimnames') <- value
x
}
#' Get information of an xgb.DMatrix object
#'
#' Get information of an xgb.DMatrix object
#' @param object Object of class \code{xgb.DMatrix}
#' @param name the name of the information field to get (see details)
#' @param ... other parameters
#'
#' @details
#' The \code{name} field can be one of the following:
#'
#' \itemize{
#' \item \code{label}: label Xgboost learn from ;
#' \item \code{weight}: to do a weight rescale ;
#' \item \code{base_margin}: base margin is the base prediction Xgboost will boost from ;
#' \item \code{nrow}: number of rows of the \code{xgb.DMatrix}.
#' }
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' train <- agaricus.train
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
#'
#' labels <- getinfo(dtrain, 'label')
#' setinfo(dtrain, 'label', 1-labels)
#'
#' labels2 <- getinfo(dtrain, 'label')
#' stopifnot(all(labels2 == 1-labels))
#' @rdname getinfo
#' @export
getinfo <- function(object, ...) UseMethod("getinfo")
#' @rdname getinfo
#' @export
getinfo.xgb.DMatrix <- function(object, name, ...) {
if (typeof(name) != "character" ||
length(name) != 1 ||
!name %in% c('label', 'weight', 'base_margin', 'nrow')) {
stop("getinfo: name must one of the following\n",
" 'label', 'weight', 'base_margin', 'nrow'")
}
if (name != "nrow"){
ret <- .Call("XGDMatrixGetInfo_R", object, name, PACKAGE = "xgboost")
} else {
ret <- nrow(object)
}
if (length(ret) == 0) return(NULL)
return(ret)
}
#' Set information of an xgb.DMatrix object
#'
#' Set information of an xgb.DMatrix object
#'
#' @param object Object of class "xgb.DMatrix"
#' @param name the name of the field to get
#' @param info the specific field of information to set
#' @param ... other parameters
#'
#' @details
#' The \code{name} field can be one of the following:
#'
#' \itemize{
#' \item \code{label}: label Xgboost learn from ;
#' \item \code{weight}: to do a weight rescale ;
#' \item \code{base_margin}: base margin is the base prediction Xgboost will boost from ;
#' \item \code{group}.
#' }
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' train <- agaricus.train
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
#'
#' labels <- getinfo(dtrain, 'label')
#' setinfo(dtrain, 'label', 1-labels)
#' labels2 <- getinfo(dtrain, 'label')
#' stopifnot(all.equal(labels2, 1-labels))
#' @rdname setinfo
#' @export
setinfo <- function(object, ...) UseMethod("setinfo")
#' @rdname setinfo
#' @export
setinfo.xgb.DMatrix <- function(object, name, info, ...) {
if (name == "label") {
if (length(info) != nrow(object))
stop("The length of labels must equal to the number of rows in the input data")
.Call("XGDMatrixSetInfo_R", object, name, as.numeric(info),
PACKAGE = "xgboost")
return(TRUE)
}
if (name == "weight") {
if (length(info) != nrow(object))
stop("The length of weights must equal to the number of rows in the input data")
.Call("XGDMatrixSetInfo_R", object, name, as.numeric(info),
PACKAGE = "xgboost")
return(TRUE)
}
if (name == "base_margin") {
# if (length(info)!=nrow(object))
# stop("The length of base margin must equal to the number of rows in the input data")
.Call("XGDMatrixSetInfo_R", object, name, as.numeric(info),
PACKAGE = "xgboost")
return(TRUE)
}
if (name == "group") {
if (sum(info) != nrow(object))
stop("The sum of groups must equal to the number of rows in the input data")
.Call("XGDMatrixSetInfo_R", object, name, as.integer(info),
PACKAGE = "xgboost")
return(TRUE)
}
stop(paste("setinfo: unknown info name", name))
return(FALSE)
}
#' Get a new DMatrix containing the specified rows of
#' orginal xgb.DMatrix object
#'
#' Get a new DMatrix containing the specified rows of
#' orginal xgb.DMatrix object
#'
#' @param object Object of class "xgb.DMatrix"
#' @param idxset a integer vector of indices of rows needed
#' @param colset currently not used (columns subsetting is not available)
#' @param ... other parameters (currently not used)
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' train <- agaricus.train
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
#'
#' dsub <- slice(dtrain, 1:42)
#' labels1 <- getinfo(dsub, 'label')
#' dsub <- dtrain[1:42, ]
#' labels2 <- getinfo(dsub, 'label')
#' all.equal(labels1, labels2)
#'
#' @rdname slice.xgb.DMatrix
#' @export
slice <- function(object, ...) UseMethod("slice")
#' @rdname slice.xgb.DMatrix
#' @export
slice.xgb.DMatrix <- function(object, idxset, ...) {
if (class(object) != "xgb.DMatrix") {
stop("slice: first argument dtrain must be xgb.DMatrix")
}
ret <- .Call("XGDMatrixSliceDMatrix_R", object, idxset, PACKAGE = "xgboost")
attr_list <- attributes(object)
nr <- nrow(object)
len <- sapply(attr_list, length)
ind <- which(len == nr)
if (length(ind) > 0) {
nms <- names(attr_list)[ind]
for (i in 1:length(ind)) {
attr(ret, nms[i]) <- attr(object, nms[i])[idxset]
}
}
return(structure(ret, class = "xgb.DMatrix"))
}
#' @rdname slice.xgb.DMatrix
#' @export
`[.xgb.DMatrix` <- function(object, idxset, colset=NULL) {
slice(object, idxset)
}
#' Print xgb.DMatrix
#'
#' Print information about xgb.DMatrix.
#' Currently it displays dimensions and presence of info-fields and colnames.
#'
#' @param x an xgb.DMatrix object
#' @param verbose whether to print colnames (when present)
#' @param ... not currently used
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' train <- agaricus.train
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
#'
#' dtrain
#' print(dtrain, verbose=TRUE)
#'
#' @method print xgb.DMatrix
#' @export
print.xgb.DMatrix <- function(x, verbose=FALSE, ...) {
cat('xgb.DMatrix dim:', nrow(x), 'x', ncol(x), ' info: ')
infos <- c()
if(length(getinfo(x, 'label')) > 0) infos <- 'label'
if(length(getinfo(x, 'weight')) > 0) infos <- c(infos, 'weight')
if(length(getinfo(x, 'base_margin')) > 0) infos <- c(infos, 'base_margin')
if (length(infos) == 0) infos <- 'NA'
cat(infos)
cnames <- colnames(x)
cat(' colnames:')
if (verbose & !is.null(cnames)) {
cat("\n'")
cat(cnames, sep="','")
cat("'")
} else {
if (is.null(cnames)) cat(' no')
else cat(' yes')
}
cat("\n")
invisible(x)
}

View File

@@ -1,23 +0,0 @@
#' Save xgb.DMatrix object to binary file
#'
#' Save xgb.DMatrix object to binary file
#'
#' @param dmatrix the \code{xgb.DMatrix} object
#' @param fname the name of the file to write.
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' train <- agaricus.train
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
#' xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data')
#' dtrain <- xgb.DMatrix('xgb.DMatrix.data')
#' @export
xgb.DMatrix.save <- function(dmatrix, fname) {
if (typeof(fname) != "character")
stop("fname must be character")
if (class(dmatrix) != "xgb.DMatrix")
stop("the input data must be xgb.DMatrix")
.Call("XGDMatrixSaveBinary_R", dmatrix, fname, 0L, PACKAGE = "xgboost")
return(TRUE)
}

View File

@@ -1,84 +0,0 @@
#' Create new features from a previously learned model
#'
#' May improve the learning by adding new features to the training data based on the decision trees from a previously learned model.
#'
#' @param model decision tree boosting model learned on the original data
#' @param data original data (usually provided as a \code{dgCMatrix} matrix)
#' @param ... currently not used
#'
#' @return \code{dgCMatrix} matrix including both the original data and the new features.
#'
#' @details
#' This is the function inspired from the paragraph 3.1 of the paper:
#'
#' \strong{Practical Lessons from Predicting Clicks on Ads at Facebook}
#'
#' \emph{(Xinran He, Junfeng Pan, Ou Jin, Tianbing Xu, Bo Liu, Tao Xu, Yan, xin Shi, Antoine Atallah, Ralf Herbrich, Stuart Bowers,
#' Joaquin Quinonero Candela)}
#'
#' International Workshop on Data Mining for Online Advertising (ADKDD) - August 24, 2014
#'
#' \url{https://research.facebook.com/publications/758569837499391/practical-lessons-from-predicting-clicks-on-ads-at-facebook/}.
#'
#' Extract explaining the method:
#'
#' "We found that boosted decision trees are a powerful and very
#' convenient way to implement non-linear and tuple transformations
#' of the kind we just described. We treat each individual
#' tree as a categorical feature that takes as value the
#' index of the leaf an instance ends up falling in. We use
#' 1-of-K coding of this type of features.
#'
#' For example, consider the boosted tree model in Figure 1 with 2 subtrees,
#' where the first subtree has 3 leafs and the second 2 leafs. If an
#' instance ends up in leaf 2 in the first subtree and leaf 1 in
#' second subtree, the overall input to the linear classifier will
#' be the binary vector \code{[0, 1, 0, 1, 0]}, where the first 3 entries
#' correspond to the leaves of the first subtree and last 2 to
#' those of the second subtree.
#'
#' [...]
#'
#' We can understand boosted decision tree
#' based transformation as a supervised feature encoding that
#' converts a real-valued vector into a compact binary-valued
#' vector. A traversal from root node to a leaf node represents
#' a rule on certain features."
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' data(agaricus.test, package='xgboost')
#' dtrain <- xgb.DMatrix(data = agaricus.train$data, label = agaricus.train$label)
#' dtest <- xgb.DMatrix(data = agaricus.test$data, label = agaricus.test$label)
#'
#' param <- list(max_depth=2, eta=1, silent=1, objective='binary:logistic')
#' nround = 4
#'
#' bst = xgb.train(params = param, data = dtrain, nrounds = nround, nthread = 2)
#'
#' # Model accuracy without new features
#' accuracy.before <- sum((predict(bst, agaricus.test$data) >= 0.5) == agaricus.test$label) / length(agaricus.test$label)
#'
#' # Convert previous features to one hot encoding
#' new.features.train <- xgb.create.features(model = bst, agaricus.train$data)
#' new.features.test <- xgb.create.features(model = bst, agaricus.test$data)
#'
#' # learning with new features
#' new.dtrain <- xgb.DMatrix(data = new.features.train, label = agaricus.train$label)
#' new.dtest <- xgb.DMatrix(data = new.features.test, label = agaricus.test$label)
#' watchlist <- list(train = new.dtrain)
#' bst <- xgb.train(params = param, data = new.dtrain, nrounds = nround, nthread = 2)
#'
#' # Model accuracy with new features
#' accuracy.after <- sum((predict(bst, new.dtest) >= 0.5) == agaricus.test$label) / length(agaricus.test$label)
#'
#' # Here the accuracy was already good and is now perfect.
#' cat(paste("The accuracy was", accuracy.before, "before adding leaf features and it is now", accuracy.after, "!\n"))
#'
#' @export
xgb.create.features <- function(model, data, ...){
check.deprecation(...)
pred_with_leaf <- predict(model, data, predleaf = TRUE)
cols <- lapply(as.data.frame(pred_with_leaf), factor)
cBind(data, sparse.model.matrix( ~ . -1, cols))
}

View File

@@ -1,302 +0,0 @@
#' Cross Validation
#'
#' The cross valudation function of xgboost
#'
#' @param params the list of parameters. Commonly used ones are:
#' \itemize{
#' \item \code{objective} objective function, common ones are
#' \itemize{
#' \item \code{reg:linear} linear regression
#' \item \code{binary:logistic} logistic regression for classification
#' }
#' \item \code{eta} step size of each boosting step
#' \item \code{max_depth} maximum depth of the tree
#' \item \code{nthread} number of thread used in training, if not set, all threads are used
#' }
#'
#' See \code{\link{xgb.train}} for further details.
#' See also demo/ for walkthrough example in R.
#' @param data takes an \code{xgb.DMatrix} or \code{Matrix} as the input.
#' @param nrounds the max number of iterations
#' @param nfold the original dataset is randomly partitioned into \code{nfold} equal size subsamples.
#' @param label vector of response values. Should be provided only when data is \code{DMatrix}.
#' @param missing is only used when input is a dense matrix. By default is set to NA, which means
#' that NA values should be considered as 'missing' by the algorithm.
#' Sometimes, 0 or other extreme value might be used to represent missing values.
#' @param prediction A logical value indicating whether to return the test fold predictions
#' from each CV model. This parameter engages the \code{\link{cb.cv.predict}} callback.
#' @param showsd \code{boolean}, whether to show standard deviation of cross validation
#' @param metrics, list of evaluation metrics to be used in cross validation,
#' when it is not specified, the evaluation metric is chosen according to objective function.
#' Possible options are:
#' \itemize{
#' \item \code{error} binary classification error rate
#' \item \code{rmse} Rooted mean square error
#' \item \code{logloss} negative log-likelihood function
#' \item \code{auc} Area under curve
#' \item \code{merror} Exact matching error, used to evaluate multi-class classification
#' }
#' @param obj customized objective function. Returns gradient and second order
#' gradient with given prediction and dtrain.
#' @param feval custimized evaluation function. Returns
#' \code{list(metric='metric-name', value='metric-value')} with given
#' prediction and dtrain.
#' @param stratified a \code{boolean} indicating whether sampling of folds should be stratified
#' by the values of outcome labels.
#' @param folds \code{list} provides a possibility to use a list of pre-defined CV folds
#' (each element must be a vector of test fold's indices). When folds are supplied,
#' the \code{nfold} and \code{stratified} parameters are ignored.
#' @param verbose \code{boolean}, print the statistics during the process
#' @param print_every_n Print each n-th iteration evaluation messages when \code{verbose>0}.
#' Default is 1 which means all messages are printed. This parameter is passed to the
#' \code{\link{cb.print.evaluation}} callback.
#' @param early_stopping_rounds If \code{NULL}, the early stopping function is not triggered.
#' If set to an integer \code{k}, training with a validation set will stop if the performance
#' doesn't improve for \code{k} rounds.
#' Setting this parameter engages the \code{\link{cb.early.stop}} callback.
#' @param maximize If \code{feval} and \code{early_stopping_rounds} are set,
#' then this parameter must be set as well.
#' When it is \code{TRUE}, it means the larger the evaluation score the better.
#' This parameter is passed to the \code{\link{cb.early.stop}} callback.
#' @param callbacks a list of callback functions to perform various task during boosting.
#' See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the
#' parameters' values. User can provide either existing or their own callback methods in order
#' to customize the training process.
#' @param ... other parameters to pass to \code{params}.
#'
#' @details
#' The original sample is randomly partitioned into \code{nfold} equal size subsamples.
#'
#' Of the \code{nfold} subsamples, a single subsample is retained as the validation data for testing the model, and the remaining \code{nfold - 1} subsamples are used as training data.
#'
#' The cross-validation process is then repeated \code{nrounds} times, with each of the \code{nfold} subsamples used exactly once as the validation data.
#'
#' All observations are used for both training and validation.
#'
#' Adapted from \url{http://en.wikipedia.org/wiki/Cross-validation_\%28statistics\%29#k-fold_cross-validation}
#'
#' @return
#' An object of class \code{xgb.cv.synchronous} with the following elements:
#' \itemize{
#' \item \code{call} a function call.
#' \item \code{params} parameters that were passed to the xgboost library. Note that it does not
#' capture parameters changed by the \code{\link{cb.reset.parameters}} callback.
#' \item \code{callbacks} callback functions that were either automatically assigned or
#' explicitely passed.
#' \item \code{evaluation_log} evaluation history storead as a \code{data.table} with the
#' first column corresponding to iteration number and the rest corresponding to the
#' CV-based evaluation means and standard deviations for the training and test CV-sets.
#' It is created by the \code{\link{cb.evaluation.log}} callback.
#' \item \code{niter} number of boosting iterations.
#' \item \code{folds} the list of CV folds' indices - either those passed through the \code{folds}
#' parameter or randomly generated.
#' \item \code{best_iteration} iteration number with the best evaluation metric value
#' (only available with early stopping).
#' \item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration,
#' which could further be used in \code{predict} method
#' (only available with early stopping).
#' \item \code{pred} CV prediction values available when \code{prediction} is set.
#' It is either vector or matrix (see \code{\link{cb.cv.predict}}).
#' \item \code{models} a liost of the CV folds' models. It is only available with the explicit
#' setting of the \code{cb.cv.predict(save_models = TRUE)} callback.
#' }
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
#' cv <- xgb.cv(data = dtrain, nrounds = 3, nthread = 2, nfold = 5, metrics = list("rmse","auc"),
#' max_depth = 3, eta = 1, objective = "binary:logistic")
#' print(cv)
#' print(cv, verbose=TRUE)
#'
#' @export
xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing = NA,
prediction = FALSE, showsd = TRUE, metrics=list(),
obj = NULL, feval = NULL, stratified = TRUE, folds = NULL,
verbose = TRUE, print_every_n=1L,
early_stopping_rounds = NULL, maximize = NULL, callbacks = list(), ...) {
check.deprecation(...)
params <- check.booster.params(params, ...)
# TODO: should we deprecate the redundant 'metrics' parameter?
for (m in metrics)
params <- c(params, list("eval_metric" = m))
check.custom.obj()
check.custom.eval()
#if (is.null(params[['eval_metric']]) && is.null(feval))
# stop("Either 'eval_metric' or 'feval' must be provided for CV")
# Labels
if (class(data) == 'xgb.DMatrix')
labels <- getinfo(data, 'label')
if (is.null(labels))
stop("Labels must be provided for CV either through xgb.DMatrix, or through 'label=' when 'data' is matrix")
# CV folds
if(!is.null(folds)) {
if(class(folds) != "list" || length(folds) < 2)
stop("'folds' must be a list with 2 or more elements that are vectors of indices for each CV-fold")
nfold <- length(folds)
} else {
if (nfold <= 1)
stop("'nfold' must be > 1")
folds <- generate.cv.folds(nfold, nrow(data), stratified, label, params)
}
# Potential TODO: sequential CV
#if (strategy == 'sequential')
# stop('Sequential CV strategy is not yet implemented')
# verbosity & evaluation printing callback:
params <- c(params, list(silent = 1))
print_every_n <- max( as.integer(print_every_n), 1L)
if (!has.callbacks(callbacks, 'cb.print.evaluation') && verbose) {
callbacks <- add.cb(callbacks, cb.print.evaluation(print_every_n))
}
# evaluation log callback: always is on in CV
evaluation_log <- list()
if (!has.callbacks(callbacks, 'cb.evaluation.log')) {
callbacks <- add.cb(callbacks, cb.evaluation.log())
}
# Early stopping callback
stop_condition <- FALSE
if (!is.null(early_stopping_rounds) &&
!has.callbacks(callbacks, 'cb.early.stop')) {
callbacks <- add.cb(callbacks, cb.early.stop(early_stopping_rounds,
maximize=maximize, verbose=verbose))
}
# CV-predictions callback
if (prediction &&
!has.callbacks(callbacks, 'cb.cv.predict')) {
callbacks <- add.cb(callbacks, cb.cv.predict(save_models=FALSE))
}
# Sort the callbacks into categories
cb <- categorize.callbacks(callbacks)
# create the booster-folds
dall <- xgb.get.DMatrix(data, label, missing)
bst_folds <- lapply(1:length(folds), function(k) {
dtest <- slice(dall, folds[[k]])
dtrain <- slice(dall, unlist(folds[-k]))
bst <- xgb.Booster(params, list(dtrain, dtest))
list(dtrain=dtrain, bst=bst, watchlist=list(train=dtrain, test=dtest), index=folds[[k]])
})
# a "basket" to collect some results from callbacks
basket <- list()
# extract parameters that can affect the relationship b/w #trees and #iterations
num_class <- max(as.numeric(NVL(params[['num_class']], 1)), 1)
num_parallel_tree <- max(as.numeric(NVL(params[['num_parallel_tree']], 1)), 1)
# those are fixed for CV (no training continuation)
begin_iteration <- 1
end_iteration <- nrounds
# synchronous CV boosting: run CV folds' models within each iteration
for (iteration in begin_iteration:end_iteration) {
for (f in cb$pre_iter) f()
msg <- lapply(bst_folds, function(fd) {
xgb.iter.update(fd$bst, fd$dtrain, iteration - 1, obj)
xgb.iter.eval(fd$bst, fd$watchlist, iteration - 1, feval)
})
msg <- simplify2array(msg)
bst_evaluation <- rowMeans(msg)
bst_evaluation_err <- sqrt(rowMeans(msg^2) - bst_evaluation^2)
for (f in cb$post_iter) f()
if (stop_condition) break
}
for (f in cb$finalize) f(finalize=TRUE)
# the CV result
ret <- list(
call = match.call(),
params = params,
callbacks = callbacks,
evaluation_log = evaluation_log,
niter = end_iteration,
folds = folds
)
ret <- c(ret, basket)
class(ret) <- 'xgb.cv.synchronous'
invisible(ret)
}
#' Print xgb.cv result
#'
#' Prints formatted results of \code{xgb.cv}.
#'
#' @param x an \code{xgb.cv.synchronous} object
#' @param verbose whether to print detailed data
#' @param ... passed to \code{data.table.print}
#'
#' @details
#' When not verbose, it would only print the evaluation results,
#' including the best iteration (when available).
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' train <- agaricus.train
#' cv <- xgb.cv(data = train$data, label = train$label, nfold = 5, max_depth = 2,
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
#' print(cv)
#' print(cv, verbose=TRUE)
#'
#' @rdname print.xgb.cv
#' @method print xgb.cv.synchronous
#' @export
print.xgb.cv.synchronous <- function(x, verbose=FALSE, ...) {
cat('##### xgb.cv ', length(x$folds), '-folds\n', sep='')
if (verbose) {
if (!is.null(x$call)) {
cat('call:\n ')
print(x$call)
}
if (!is.null(x$params)) {
cat('params (as set within xgb.cv):\n')
cat( ' ',
paste(names(x$params),
paste0('"', unlist(x$params), '"'),
sep=' = ', collapse=', '), '\n', sep='')
}
if (!is.null(x$callbacks) && length(x$callbacks) > 0) {
cat('callbacks:\n')
lapply(callback.calls(x$callbacks), function(x) {
cat(' ')
print(x)
})
}
for (n in c('niter', 'best_iteration', 'best_ntreelimit')) {
if (is.null(x[[n]]))
next
cat(n, ': ', x[[n]], '\n', sep='')
}
if (!is.null(x$pred)) {
cat('pred:\n')
str(x$pred)
}
}
if (verbose)
cat('evaluation_log:\n')
print(x$evaluation_log, row.names = FALSE, ...)
if (!is.null(x$best_iteration)) {
cat('Best iteration:\n')
print(x$evaluation_log[x$best_iteration], row.names = FALSE, ...)
}
invisible(x)
}

View File

@@ -1,59 +0,0 @@
#' Save xgboost model to text file
#'
#' Save a xgboost model to text file. Could be parsed later.
#'
#' @param model the model object.
#' @param fname the name of the text file where to save the model text dump. If not provided or set to \code{NULL} the function will return the model as a \code{character} vector.
#' @param fmap feature map file representing the type of feature.
#' Detailed description could be found at
#' \url{https://github.com/dmlc/xgboost/wiki/Binary-Classification#dump-model}.
#' See demo/ for walkthrough example in R, and
#' \url{https://github.com/dmlc/xgboost/blob/master/demo/data/featmap.txt}
#' for example Format.
#' @param with_stats whether dump statistics of splits
#' When this option is on, the model dump comes with two additional statistics:
#' gain is the approximate loss function gain we get in each split;
#' cover is the sum of second order gradient in each node.
#' @param ... currently not used
#'
#' @return
#' if fname is not provided or set to \code{NULL} the function will return the model as a \code{character} vector. Otherwise it will return \code{TRUE}.
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' data(agaricus.test, package='xgboost')
#' train <- agaricus.train
#' test <- agaricus.test
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
#' # save the model in file 'xgb.model.dump'
#' xgb.dump(bst, 'xgb.model.dump', with_stats = TRUE)
#'
#' # print the model without saving it to a file
#' print(xgb.dump(bst))
#' @export
xgb.dump <- function(model = NULL, fname = NULL, fmap = "", with_stats=FALSE, ...) {
check.deprecation(...)
if (class(model) != "xgb.Booster")
stop("model: argument must be of type xgb.Booster")
if (!(class(fname) %in% c("character", "NULL") && length(fname) <= 1))
stop("fname: argument must be of type character (when provided)")
if (!(class(fmap) %in% c("character", "NULL") && length(fmap) <= 1))
stop("fmap: argument must be of type character (when provided)")
model <- xgb.Booster.check(model)
model_dump <- .Call("XGBoosterDumpModel_R", model$handle, fmap, as.integer(with_stats), PACKAGE = "xgboost")
if (is.null(fname))
model_dump <- stri_replace_all_regex(model_dump, '\t', '')
model_dump <- unlist(stri_split_regex(model_dump, '\n'))
model_dump <- grep('^\\s*$', model_dump, invert = TRUE, value = TRUE)
if (is.null(fname)) {
return(model_dump)
} else {
writeLines(model_dump, fname)
return(TRUE)
}
}

View File

@@ -1,135 +0,0 @@
# ggplot backend for the xgboost plotting facilities
#' @rdname xgb.plot.importance
#' @export
xgb.ggplot.importance <- function(importance_matrix = NULL, top_n = NULL, measure = NULL,
rel_to_first = FALSE, n_clusters = c(1:10), ...) {
importance_matrix <- xgb.plot.importance(importance_matrix, top_n = top_n, measure = measure,
rel_to_first = rel_to_first, plot = FALSE, ...)
if (!requireNamespace("ggplot2", quietly = TRUE)) {
stop("ggplot2 package is required", call. = FALSE)
}
if (!requireNamespace("Ckmeans.1d.dp", quietly = TRUE)) {
stop("Ckmeans.1d.dp package is required", call. = FALSE)
}
clusters <- suppressWarnings(
Ckmeans.1d.dp::Ckmeans.1d.dp(importance_matrix$Importance, n_clusters)
)
importance_matrix[, Cluster := as.character(clusters$cluster)]
plot <-
ggplot2::ggplot(importance_matrix,
ggplot2::aes(x = factor(Feature, levels = rev(Feature)), y = Importance, width = 0.05),
environment = environment()) +
ggplot2::geom_bar(ggplot2::aes(fill = Cluster), stat = "identity", position = "identity") +
ggplot2::coord_flip() +
ggplot2::xlab("Features") +
ggplot2::ggtitle("Feature importance") +
ggplot2::theme(plot.title = ggplot2::element_text(lineheight = .9, face = "bold"),
panel.grid.major.y = ggplot2::element_blank())
return(plot)
}
#' @rdname xgb.plot.deepness
#' @export
xgb.ggplot.deepness <- function(model = NULL, which = c("2x1", "max.depth", "med.depth", "med.weight")) {
if (!requireNamespace("ggplot2", quietly = TRUE))
stop("ggplot2 package is required for plotting the graph deepness.", call. = FALSE)
which <- match.arg(which)
dt_depths <- xgb.plot.deepness(model = model, plot = FALSE)
dt_summaries <- dt_depths[, .(.N, Cover = mean(Cover)), Depth]
setkey(dt_summaries, 'Depth')
if (which == "2x1") {
p1 <-
ggplot2::ggplot(dt_summaries) +
ggplot2::geom_bar(ggplot2::aes(x = Depth, y = N), stat = "Identity") +
ggplot2::xlab("") +
ggplot2::ylab("Number of leafs") +
ggplot2::ggtitle("Model complexity") +
ggplot2::theme(
plot.title = ggplot2::element_text(lineheight = 0.9, face = "bold"),
panel.grid.major.y = ggplot2::element_blank(),
axis.ticks = ggplot2::element_blank(),
axis.text.x = ggplot2::element_blank()
)
p2 <-
ggplot2::ggplot(dt_summaries) +
ggplot2::geom_bar(ggplot2::aes(x = Depth, y = Cover), stat = "Identity") +
ggplot2::xlab("Leaf depth") +
ggplot2::ylab("Weighted cover")
multiplot(p1, p2, cols = 1)
return(invisible(list(p1, p2)))
} else if (which == "max.depth") {
p <-
ggplot2::ggplot(dt_depths[, max(Depth), Tree]) +
ggplot2::geom_jitter(ggplot2::aes(x = Tree, y = V1),
height = 0.15, alpha=0.4, size=3, stroke=0) +
ggplot2::xlab("tree #") +
ggplot2::ylab("Max tree leaf depth")
return(p)
} else if (which == "med.depth") {
p <-
ggplot2::ggplot(dt_depths[, median(as.numeric(Depth)), Tree]) +
ggplot2::geom_jitter(ggplot2::aes(x = Tree, y = V1),
height = 0.15, alpha=0.4, size=3, stroke=0) +
ggplot2::xlab("tree #") +
ggplot2::ylab("Median tree leaf depth")
return(p)
} else if (which == "med.weight") {
p <-
ggplot2::ggplot(dt_depths[, median(abs(Weight)), Tree]) +
ggplot2::geom_point(ggplot2::aes(x = Tree, y = V1),
alpha=0.4, size=3, stroke=0) +
ggplot2::xlab("tree #") +
ggplot2::ylab("Median absolute leaf weight")
return(p)
}
}
# Plot multiple ggplot graph aligned by rows and columns.
# ... the plots
# cols number of columns
# internal utility function
multiplot <- function(..., cols = 1) {
plots <- list(...)
num_plots = length(plots)
layout <- matrix(seq(1, cols * ceiling(num_plots / cols)),
ncol = cols, nrow = ceiling(num_plots / cols))
if (num_plots == 1) {
print(plots[[1]])
} else {
grid::grid.newpage()
grid::pushViewport(grid::viewport(layout = grid::grid.layout(nrow(layout), ncol(layout))))
for (i in 1:num_plots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.table(which(layout == i, arr.ind = TRUE))
print(
plots[[i]], vp = grid::viewport(
layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col
)
)
}
}
}
globalVariables(c(
"Cluster", "ggplot", "aes", "geom_bar", "coord_flip", "xlab", "ylab", "ggtitle", "theme",
"element_blank", "element_text"
))

View File

@@ -1,106 +0,0 @@
#' Show importance of features in a model
#'
#' Create a \code{data.table} of the most important features of a model.
#'
#' @param feature_names names of each feature as a \code{character} vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}.
#' @param model generated by the \code{xgb.train} function.
#' @param data the dataset used for the training step. Will be used with \code{label} parameter for co-occurence computation. More information in \code{Detail} part. This parameter is optional.
#' @param label the label vetor used for the training step. Will be used with \code{data} parameter for co-occurence computation. More information in \code{Detail} part. This parameter is optional.
#' @param target a function which returns \code{TRUE} or \code{1} when an observation should be count as a co-occurence and \code{FALSE} or \code{0} otherwise. Default function is provided for computing co-occurences in a binary classification. The \code{target} function should have only one parameter. This parameter will be used to provide each important feature vector after having applied the split condition, therefore these vector will be only made of 0 and 1 only, whatever was the information before. More information in \code{Detail} part. This parameter is optional.
#'
#' @return A \code{data.table} of the features used in the model with their average gain (and their weight for boosted tree model) in the model.
#'
#' @details
#' This function is for both linear and tree models.
#'
#' \code{data.table} is returned by the function.
#' The columns are :
#' \itemize{
#' \item \code{Features} name of the features as provided in \code{feature_names} or already present in the model dump;
#' \item \code{Gain} contribution of each feature to the model. For boosted tree model, each gain of each feature of each tree is taken into account, then average per feature to give a vision of the entire model. Highest percentage means important feature to predict the \code{label} used for the training (only available for tree models);
#' \item \code{Cover} metric of the number of observation related to this feature (only available for tree models);
#' \item \code{Weight} percentage representing the relative number of times a feature have been taken into trees.
#' }
#'
#' If you don't provide \code{feature_names}, index of the features will be used instead.
#'
#' Because the index is extracted from the model dump (made on the C++ side), it starts at 0 (usual in C++) instead of 1 (usual in R).
#'
#' Co-occurence count
#' ------------------
#'
#' The gain gives you indication about the information of how a feature is important in making a branch of a decision tree more pure. However, with this information only, you can't know if this feature has to be present or not to get a specific classification. In the example code, you may wonder if odor=none should be \code{TRUE} to not eat a mushroom.
#'
#' Co-occurence computation is here to help in understanding this relation between a predictor and a specific class. It will count how many observations are returned as \code{TRUE} by the \code{target} function (see parameters). When you execute the example below, there are 92 times only over the 3140 observations of the train dataset where a mushroom have no odor and can be eaten safely.
#'
#' If you need to remember one thing only: until you want to leave us early, don't eat a mushroom which has no odor :-)
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#'
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
#'
#' xgb.importance(colnames(agaricus.train$data), model = bst)
#'
#' # Same thing with co-occurence computation this time
#' xgb.importance(colnames(agaricus.train$data), model = bst, data = agaricus.train$data, label = agaricus.train$label)
#'
#' @export
xgb.importance <- function(feature_names = NULL, model = NULL, data = NULL, label = NULL, target = function(x) ( (x + label) == 2)){
if (!class(feature_names) %in% c("character", "NULL")) {
stop("feature_names: Has to be a vector of character or NULL if the model already contains feature name. Look at this function documentation to see where to get feature names.")
}
if (class(model) != "xgb.Booster") {
stop("model: Has to be an object of class xgb.Booster model generaged by the xgb.train function.")
}
if((is.null(data) & !is.null(label)) | (!is.null(data) & is.null(label))) {
stop("data/label: Provide the two arguments if you want co-occurence computation or none of them if you are not interested but not one of them only.")
}
if(class(label) == "numeric"){
if(sum(label == 0) / length(label) > 0.5) label <- as(label, "sparseVector")
}
treeDump <- function(feature_names, text, keepDetail){
if(keepDetail) groupBy <- c("Feature", "Split", "MissingNo") else groupBy <- "Feature"
xgb.model.dt.tree(feature_names = feature_names, text = text)[,"MissingNo" := Missing == No ][Feature != "Leaf",.(Gain = sum(Quality), Cover = sum(Cover), Frequency = .N), by = groupBy, with = T][,`:=`(Gain = Gain / sum(Gain), Cover = Cover / sum(Cover), Frequency = Frequency / sum(Frequency))][order(Gain, decreasing = T)]
}
linearDump <- function(feature_names, text){
weights <- which(text == "weight:") %>% {a =. + 1; text[a:length(text)]} %>% as.numeric
if(is.null(feature_names)) feature_names <- seq(to = length(weights))
data.table(Feature = feature_names, Weight = weights)
}
model.text.dump <- xgb.dump(model = model, with_stats = T)
if(model.text.dump[2] == "bias:"){
result <- model.text.dump %>% linearDump(feature_names, .)
if(!is.null(data) | !is.null(label)) warning("data/label: these parameters should only be provided with decision tree based models.")
} else {
result <- treeDump(feature_names, text = model.text.dump, keepDetail = !is.null(data))
# Co-occurence computation
if(!is.null(data) & !is.null(label) & nrow(result) > 0) {
# Take care of missing column
a <- data[, result[MissingNo == T,Feature], drop=FALSE] != 0
# Bind the two Matrix and reorder columns
c <- data[, result[MissingNo == F,Feature], drop=FALSE] %>% cBind(a,.) %>% .[,result[,Feature]]
rm(a)
# Apply split
d <- data[, result[,Feature], drop=FALSE] < as.numeric(result[,Split])
apply(c & d, 2, . %>% target %>% sum) -> vec
result <- result[, "RealCover" := as.numeric(vec), with = F][, "RealCover %" := RealCover / sum(label)][,MissingNo := NULL]
}
}
result
}
# Avoid error messages during CRAN check.
# The reason is that these variables are never declared
# They are mainly column names inferred by Data.table...
globalVariables(c(".", ".N", "Gain", "Frequency", "Feature", "Split", "No", "Missing", "MissingNo", "RealCover"))

View File

@@ -1,31 +0,0 @@
#' Load xgboost model from binary file
#'
#' Load xgboost model from the binary model file
#'
#' @param modelfile the name of the binary file.
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' data(agaricus.test, package='xgboost')
#' train <- agaricus.train
#' test <- agaricus.test
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
#' xgb.save(bst, 'xgb.model')
#' bst <- xgb.load('xgb.model')
#' pred <- predict(bst, test$data)
#' @export
xgb.load <- function(modelfile) {
if (is.null(modelfile))
stop("xgb.load: modelfile cannot be NULL")
handle <- xgb.Booster(modelfile = modelfile)
# re-use modelfile if it is raw so we donot need to serialize
if (typeof(modelfile) == "raw") {
bst <- xgb.handleToBooster(handle, modelfile)
} else {
bst <- xgb.handleToBooster(handle, NULL)
}
bst <- xgb.Booster.check(bst, saveraw = TRUE)
return(bst)
}

View File

@@ -1,122 +0,0 @@
#' Parse a boosted tree model text dump
#'
#' Parse a boosted tree model text dump into a \code{data.table} structure.
#'
#' @param feature_names character vector of feature names. If the model already
#' contains feature names, this argument should be \code{NULL} (default value)
#' @param model object of class \code{xgb.Booster}
#' @param text \code{character} vector previously generated by the \code{xgb.dump}
#' function (where parameter \code{with_stats = TRUE} should have been set).
#' @param n_first_tree limit the parsing to the \code{n} first trees.
#' If set to \code{NULL}, all trees of the model are parsed.
#'
#' @return
#' A \code{data.table} with detailed information about model trees' nodes.
#'
#' The columns of the \code{data.table} are:
#'
#' \itemize{
#' \item \code{Tree}: ID of a tree in a model
#' \item \code{Node}: ID of a node in a tree
#' \item \code{ID}: unique identifier of a node in a model
#' \item \code{Feature}: for a branch node, it's a feature id or name (when available);
#' for a leaf note, it simply labels it as \code{'Leaf'}
#' \item \code{Split}: location of the split for a branch node (split condition is always "less than")
#' \item \code{Yes}: ID of the next node when the split condition is met
#' \item \code{No}: ID of the next node when the split condition is not met
#' \item \code{Missing}: ID of the next node when branch value is missing
#' \item \code{Quality}: either the split gain (change in loss) or the leaf value
#' \item \code{Cover}: metric related to the number of observation either seen by a split
#' or collected by a leaf during training.
#' }
#'
#' @examples
#' # Basic use:
#'
#' data(agaricus.train, package='xgboost')
#'
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
#'
#' (dt <- xgb.model.dt.tree(colnames(agaricus.train$data), bst))
#'
#'
#' # How to match feature names of splits that are following a current 'Yes' branch:
#'
#' merge(dt, dt[, .(ID, Y.Feature=Feature)], by.x='Yes', by.y='ID', all.x=TRUE)[order(Tree,Node)]
#'
#' @export
xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,
n_first_tree = NULL){
if (!class(feature_names) %in% c("character", "NULL")) {
stop("feature_names: Has to be a vector of character\n",
" or NULL if the model dump already contains feature names.\n",
" Look at this function documentation to see where to get feature names.")
}
if (class(model) != "xgb.Booster" & class(text) != "character") {
stop("Either 'model' has to be an object of class xgb.Booster\n",
" or 'text' has to be a character vector with the result of xgb.dump\n",
" (or NULL if the model was provided).")
}
if (!class(n_first_tree) %in% c("numeric", "NULL") | length(n_first_tree) > 1) {
stop("n_first_tree: Has to be a numeric vector of size 1.")
}
if(is.null(text)){
text <- xgb.dump(model = model, with_stats = T)
}
position <- which(!is.na(stri_match_first_regex(text, "booster")))
add.tree.id <- function(x, i) paste(i, x, sep = "-")
anynumber_regex <- "[-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?"
td <- data.table(t=text)
td[position, Tree := 1L]
td[, Tree := cumsum(ifelse(is.na(Tree), 0L, Tree)) - 1L]
n_first_tree <- min(max(td$Tree), n_first_tree)
td <- td[Tree <= n_first_tree & !grepl('^booster', t)]
td[, Node := stri_match_first_regex(t, "(\\d+):")[,2] %>% as.numeric ]
td[, ID := add.tree.id(Node, Tree)]
td[, isLeaf := !is.na(stri_match_first_regex(t, "leaf"))]
# parse branch lines
td[isLeaf==FALSE, c("Feature", "Split", "Yes", "No", "Missing", "Quality", "Cover") := {
rx <- paste0("f(\\d+)<(", anynumber_regex, ")\\] yes=(\\d+),no=(\\d+),missing=(\\d+),",
"gain=(", anynumber_regex, "),cover=(", anynumber_regex, ")")
# skip some indices with spurious capture groups from anynumber_regex
xtr <- stri_match_first_regex(t, rx)[, c(2,3,5,6,7,8,10)]
xtr[, 3:5] <- add.tree.id(xtr[, 3:5], Tree)
lapply(1:ncol(xtr), function(i) xtr[,i])
}]
# assign feature_names when available
td[isLeaf==FALSE & !is.null(feature_names),
Feature := feature_names[as.numeric(Feature) + 1] ]
# parse leaf lines
td[isLeaf==TRUE, c("Feature", "Quality", "Cover") := {
rx <- paste0("leaf=(", anynumber_regex, "),cover=(", anynumber_regex, ")")
xtr <- stri_match_first_regex(t, rx)[, c(2,4)]
c("Leaf", lapply(1:ncol(xtr), function(i) xtr[,i]))
}]
# convert some columns to numeric
numeric_cols <- c("Quality", "Cover")
td[, (numeric_cols) := lapply(.SD, as.numeric), .SDcols=numeric_cols]
td[, t := NULL]
td[, isLeaf := NULL]
td[order(Tree, Node)]
}
# Avoid error messages during CRAN check.
# The reason is that these variables are never declared
# They are mainly column names inferred by Data.table...
globalVariables(c("Tree", "Node", "ID", "Feature", "t", "isLeaf",".SD", ".SDcols"))

View File

@@ -1,149 +0,0 @@
#' Plot model trees deepness
#'
#' Visualizes distributions related to depth of tree leafs.
#' \code{xgb.plot.deepness} uses base R graphics, while \code{xgb.ggplot.deepness} uses the ggplot backend.
#'
#' @param model either an \code{xgb.Booster} model generated by the \code{xgb.train} function
#' or a data.table result of the \code{xgb.model.dt.tree} function.
#' @param plot (base R barplot) whether a barplot should be produced.
#' If FALSE, only a data.table is returned.
#' @param which which distribution to plot (see details).
#' @param ... other parameters passed to \code{barplot} or \code{plot}.
#'
#' @details
#'
#' When \code{which="2x1"}, two distributions with respect to the leaf depth
#' are plotted on top of each other:
#' \itemize{
#' \item the distribution of the number of leafs in a tree model at a certain depth;
#' \item the distribution of average weighted number of observations ("cover")
#' ending up in leafs at certain depth.
#' }
#' Those could be helpful in determining sensible ranges of the \code{max_depth}
#' and \code{min_child_weight} parameters.
#'
#' When \code{which="max.depth"} or \code{which="med.depth"}, plots of either maximum or median depth
#' per tree with respect to tree number are created. And \code{which="med.weight"} allows to see how
#' a tree's median absolute leaf weight changes through the iterations.
#'
#' This function was inspired by the blog post
#' \url{http://aysent.github.io/2015/11/08/random-forest-leaf-visualization.html}.
#'
#' @return
#'
#' Other than producing plots (when \code{plot=TRUE}), the \code{xgb.plot.deepness} function
#' silently returns a processed data.table where each row corresponds to a terminal leaf in a tree model,
#' and contains information about leaf's depth, cover, and weight (which is used in calculating predictions).
#'
#' The \code{xgb.ggplot.deepness} silently returns either a list of two ggplot graphs when \code{which="2x1"}
#' or a single ggplot graph for the other \code{which} options.
#'
#' @seealso
#'
#' \code{\link{xgb.train}}, \code{\link{xgb.model.dt.tree}}.
#'
#' @examples
#'
#' data(agaricus.train, package='xgboost')
#'
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 15,
#' eta = 0.1, nthread = 2, nrounds = 50, objective = "binary:logistic",
#' subsample = 0.5, min_child_weight = 2)
#'
#' xgb.plot.deepness(bst)
#' xgb.ggplot.deepness(bst)
#'
#' xgb.plot.deepness(bst, which='max.depth', pch=16, col=rgb(0,0,1,0.3), cex=2)
#'
#' xgb.plot.deepness(bst, which='med.weight', pch=16, col=rgb(0,0,1,0.3), cex=2)
#'
#' @rdname xgb.plot.deepness
#' @export
xgb.plot.deepness <- function(model = NULL, which = c("2x1", "max.depth", "med.depth", "med.weight"),
plot = TRUE, ...) {
if (!(class(model) == "xgb.Booster" || is.data.table(model)))
stop("model: Has to be either an xgb.Booster model generaged by the xgb.train function\n",
"or a data.table result of the xgb.importance function")
if (!requireNamespace("igraph", quietly = TRUE))
stop("igraph package is required for plotting the graph deepness.", call. = FALSE)
which <- match.arg(which)
dt_tree <- model
if (class(model) == "xgb.Booster")
dt_tree <- xgb.model.dt.tree(model = model)
if (!all(c("Feature", "Tree", "ID", "Yes", "No", "Cover") %in% colnames(dt_tree)))
stop("Model tree columns are not as expected!\n",
" Note that this function works only for tree models.")
dt_depths <- merge(get.leaf.depth(dt_tree), dt_tree[, .(ID, Cover, Weight=Quality)], by = "ID")
setkeyv(dt_depths, c("Tree", "ID"))
# count by depth levels, and also calculate average cover at a depth
dt_summaries <- dt_depths[, .(.N, Cover = mean(Cover)), Depth]
setkey(dt_summaries, "Depth")
if (plot) {
if (which == "2x1") {
op <- par(no.readonly = TRUE)
par(mfrow=c(2,1),
oma = c(3,1,3,1) + 0.1,
mar = c(1,4,1,0) + 0.1)
dt_summaries[, barplot(N, border=NA, ylab = 'Number of leafs', ...)]
dt_summaries[, barplot(Cover, border=NA, ylab = "Weighted cover", names.arg=Depth, ...)]
title("Model complexity", xlab = "Leaf depth", outer = TRUE, line = 1)
par(op)
} else if (which == "max.depth") {
dt_depths[, max(Depth), Tree][
, plot(jitter(V1, amount = 0.1) ~ Tree, ylab = 'Max tree leaf depth', xlab = "tree #", ...)]
} else if (which == "med.depth") {
dt_depths[, median(as.numeric(Depth)), Tree][
, plot(jitter(V1, amount = 0.1) ~ Tree, ylab = 'Median tree leaf depth', xlab = "tree #", ...)]
} else if (which == "med.weight") {
dt_depths[, median(abs(Weight)), Tree][
, plot(V1 ~ Tree, ylab = 'Median absolute leaf weight', xlab = "tree #", ...)]
}
}
invisible(dt_depths)
}
# Extract path depths from root to leaf
# from data.table containing the nodes and edges of the trees.
# internal utility function
get.leaf.depth <- function(dt_tree) {
# extract tree graph's edges
dt_edges <- rbindlist(list(
dt_tree[Feature != "Leaf", .(ID, To=Yes, Tree)],
dt_tree[Feature != "Leaf", .(ID, To=No, Tree)]
))
# whether "To" is a leaf:
dt_edges <-
merge(dt_edges,
dt_tree[Feature == "Leaf", .(ID, Leaf = TRUE)],
all.x = TRUE, by.x = "To", by.y = "ID")
dt_edges[is.na(Leaf), Leaf := FALSE]
dt_edges[, {
graph <- igraph::graph_from_data_frame(.SD[,.(ID, To)])
# min(ID) in a tree is a root node
paths_tmp <- igraph::shortest_paths(graph, from = min(ID), to = To[Leaf == TRUE])
# list of paths to each leaf in a tree
paths <- lapply(paths_tmp$vpath, names)
# combine into a resulting path lengths table for a tree
data.table(Depth = sapply(paths, length), ID = To[Leaf == TRUE])
}, by = Tree]
}
# Avoid error messages during CRAN check.
# The reason is that these variables are never declared
# They are mainly column names inferred by Data.table...
globalVariables(
c(
".N", "N", "Depth", "Quality", "Cover", "Tree", "ID", "Yes", "No", "Feature"
)
)

View File

@@ -1,125 +0,0 @@
#' Plot feature importance as a bar graph
#'
#' Represents previously calculated feature importance as a bar graph.
#' \code{xgb.plot.importance} uses base R graphics, while \code{xgb.ggplot.importance} uses the ggplot backend.
#'
#' @param importance_matrix a \code{data.table} returned by \code{\link{xgb.importance}}.
#' @param top_n maximal number of top features to include into the plot.
#' @param measure the name of importance measure to plot.
#' When \code{NULL}, 'Gain' would be used for trees and 'Weight' would be used for gblinear.
#' @param rel_to_first whether importance values should be represented as relative to the highest ranked feature.
#' See Details.
#' @param left_margin (base R barplot) allows to adjust the left margin size to fit feature names.
#' When it is NULL, the existing \code{par('mar')} is used.
#' @param cex (base R barplot) passed as \code{cex.names} parameter to \code{barplot}.
#' @param plot (base R barplot) whether a barplot should be produced.
#' If FALSE, only a data.table is returned.
#' @param n_clusters (ggplot only) a \code{numeric} vector containing the min and the max range
#' of the possible number of clusters of bars.
#' @param ... other parameters passed to \code{barplot} (except horiz, border, cex.names, names.arg, and las).
#'
#' @details
#' The graph represents each feature as a horizontal bar of length proportional to the importance of a feature.
#' Features are shown ranked in a decreasing importance order.
#' It works for importances from both \code{gblinear} and \code{gbtree} models.
#'
#' When \code{rel_to_first = FALSE}, the values would be plotted as they were in \code{importance_matrix}.
#' For gbtree model, that would mean being normalized to the total of 1
#' ("what is feature's importance contribution relative to the whole model?").
#' For linear models, \code{rel_to_first = FALSE} would show actual values of the coefficients.
#' Setting \code{rel_to_first = TRUE} allows to see the picture from the perspective of
#' "what is feature's importance contribution relative to the most important feature?"
#'
#' The ggplot-backend method also performs 1-D custering of the importance values,
#' with bar colors coresponding to different clusters that have somewhat similar importance values.
#'
#' @return
#' The \code{xgb.plot.importance} function creates a \code{barplot} (when \code{plot=TRUE})
#' and silently returns a processed data.table with \code{n_top} features sorted by importance.
#'
#' The \code{xgb.ggplot.importance} function returns a ggplot graph which could be customized afterwards.
#' E.g., to change the title of the graph, add \code{+ ggtitle("A GRAPH NAME")} to the result.
#'
#' @seealso
#' \code{\link[graphics]{barplot}}.
#'
#' @examples
#' data(agaricus.train)
#'
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 3,
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
#'
#' importance_matrix <- xgb.importance(colnames(agaricus.train$data), model = bst)
#'
#' xgb.plot.importance(importance_matrix, rel_to_first = TRUE, xlab = "Relative importance")
#'
#' (gg <- xgb.ggplot.importance(importance_matrix, measure = "Frequency", rel_to_first = TRUE))
#' gg + ggplot2::ylab("Frequency")
#'
#' @rdname xgb.plot.importance
#' @export
xgb.plot.importance <- function(importance_matrix = NULL, top_n = NULL, measure = NULL,
rel_to_first = FALSE, left_margin = 10, cex = NULL, plot = TRUE, ...) {
check.deprecation(...)
if (!"data.table" %in% class(importance_matrix)) {
stop("importance_matrix: Should be a data.table.")
}
imp_names <- colnames(importance_matrix)
if (is.null(measure)) {
if (all(c("Feature", "Gain") %in% imp_names)) {
measure <- "Gain"
} else if (all(c("Feature", "Weight") %in% imp_names)) {
measure <- "Weight"
} else {
stop("Importance matrix column names are not as expected!")
}
} else {
if (!measure %in% imp_names)
stop("Invalid `measure`")
if (!"Feature" %in% imp_names)
stop("Importance matrix column names are not as expected!")
}
# also aggregate, just in case when the values were not yet summed up by feature
importance_matrix <- importance_matrix[, Importance := sum(get(measure)), by = Feature]
# make sure it's ordered
importance_matrix <- importance_matrix[order(-abs(Importance))]
if (!is.null(top_n)) {
top_n <- min(top_n, nrow(importance_matrix))
importance_matrix <- head(importance_matrix, top_n)
}
if (rel_to_first) {
importance_matrix[, Importance := Importance/max(abs(Importance))]
}
if (is.null(cex)) {
cex <- 2.5/log2(1 + nrow(importance_matrix))
}
if (plot) {
op <- par(no.readonly = TRUE)
mar <- op$mar
if (!is.null(left_margin))
mar[2] <- left_margin
par(mar = mar)
# reverse the order of rows to have the highest ranked at the top
importance_matrix[nrow(importance_matrix):1,
barplot(Importance, horiz=TRUE, border=NA, cex.names=cex,
names.arg=Feature, las=1, ...)]
grid(NULL, NA)
# redraw over the grid
importance_matrix[nrow(importance_matrix):1,
barplot(Importance, horiz=TRUE, border=NA, add=TRUE)]
par(op)
}
invisible(importance_matrix)
}
# Avoid error messages during CRAN check.
# The reason is that these variables are never declared
# They are mainly column names inferred by Data.table...
globalVariables(c("Feature", "Importance"))

View File

@@ -1,108 +0,0 @@
#' Project all trees on one tree and plot it
#'
#' Visualization of the ensemble of trees as a single collective unit.
#'
#' @param model dump generated by the \code{xgb.train} function.
#' @param feature_names names of each feature as a \code{character} vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}.
#' @param features_keep number of features to keep in each position of the multi trees.
#' @param plot_width width in pixels of the graph to produce
#' @param plot_height height in pixels of the graph to produce
#' @param ... currently not used
#'
#' @return Two graphs showing the distribution of the model deepness.
#'
#' @details
#'
#' This function tries to capture the complexity of gradient boosted tree ensemble
#' in a cohesive way.
#'
#' The goal is to improve the interpretability of the model generally seen as black box.
#' The function is dedicated to boosting applied to decision trees only.
#'
#' The purpose is to move from an ensemble of trees to a single tree only.
#'
#' It takes advantage of the fact that the shape of a binary tree is only defined by
#' its deepness (therefore in a boosting model, all trees have the same shape).
#'
#' Moreover, the trees tend to reuse the same features.
#'
#' The function will project each tree on one, and keep for each position the
#' \code{features_keep} first features (based on Gain per feature measure).
#'
#' This function is inspired by this blog post:
#' \url{https://wellecks.wordpress.com/2015/02/21/peering-into-the-black-box-visualizing-lambdamart/}
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#'
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 15,
#' eta = 1, nthread = 2, nrounds = 30, objective = "binary:logistic",
#' min_child_weight = 50)
#'
#' p <- xgb.plot.multi.trees(model = bst, feature_names = colnames(agaricus.train$data), features_keep = 3)
#' print(p)
#'
#' @export
xgb.plot.multi.trees <- function(model, feature_names = NULL, features_keep = 5, plot_width = NULL, plot_height = NULL, ...){
check.deprecation(...)
tree.matrix <- xgb.model.dt.tree(feature_names = feature_names, model = model)
# first number of the path represents the tree, then the following numbers are related to the path to follow
# root init
root.nodes <- tree.matrix[stri_detect_regex(ID, "\\d+-0"), ID]
tree.matrix[ID %in% root.nodes, abs.node.position:=root.nodes]
precedent.nodes <- root.nodes
while(tree.matrix[,sum(is.na(abs.node.position))] > 0) {
yes.row.nodes <- tree.matrix[abs.node.position %in% precedent.nodes & !is.na(Yes)]
no.row.nodes <- tree.matrix[abs.node.position %in% precedent.nodes & !is.na(No)]
yes.nodes.abs.pos <- yes.row.nodes[, abs.node.position] %>% paste0("_0")
no.nodes.abs.pos <- no.row.nodes[, abs.node.position] %>% paste0("_1")
tree.matrix[ID %in% yes.row.nodes[, Yes], abs.node.position := yes.nodes.abs.pos]
tree.matrix[ID %in% no.row.nodes[, No], abs.node.position := no.nodes.abs.pos]
precedent.nodes <- c(yes.nodes.abs.pos, no.nodes.abs.pos)
}
tree.matrix[!is.na(Yes),Yes:= paste0(abs.node.position, "_0")]
tree.matrix[!is.na(No),No:= paste0(abs.node.position, "_1")]
remove.tree <- . %>% stri_replace_first_regex(pattern = "^\\d+-", replacement = "")
tree.matrix[,`:=`(abs.node.position=remove.tree(abs.node.position), Yes=remove.tree(Yes), No=remove.tree(No))]
nodes.dt <- tree.matrix[,.(Quality = sum(Quality)),by = .(abs.node.position, Feature)][,.(Text =paste0(Feature[1:min(length(Feature), features_keep)], " (", Quality[1:min(length(Quality), features_keep)], ")") %>% paste0(collapse = "\n")), by=abs.node.position]
edges.dt <- tree.matrix[Feature != "Leaf",.(abs.node.position, Yes)] %>% list(tree.matrix[Feature != "Leaf",.(abs.node.position, No)]) %>% rbindlist() %>% setnames(c("From", "To")) %>% .[,.N,.(From, To)] %>% .[,N:=NULL]
nodes <- DiagrammeR::create_nodes(nodes = nodes.dt[,abs.node.position],
label = nodes.dt[,Text],
style = "filled",
color = "DimGray",
fillcolor= "Beige",
shape = "oval",
fontname = "Helvetica"
)
edges <- DiagrammeR::create_edges(from = edges.dt[,From],
to = edges.dt[,To],
color = "DimGray",
arrowsize = "1.5",
arrowhead = "vee",
fontname = "Helvetica",
rel = "leading_to")
graph <- DiagrammeR::create_graph(nodes_df = nodes,
edges_df = edges,
graph_attrs = "rankdir = LR")
DiagrammeR::render_graph(graph, width = plot_width, height = plot_height)
}
globalVariables(
c(
".N", "N", "From", "To", "Text", "Feature", "no.nodes.abs.pos", "ID", "Yes", "No", "Tree", "yes.nodes.abs.pos", "abs.node.position"
)
)

View File

@@ -1,81 +0,0 @@
#' Plot a boosted tree model
#'
#' Read a tree model text dump and plot the model.
#'
#' @param feature_names names of each feature as a \code{character} vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}.
#' @param model generated by the \code{xgb.train} function. Avoid the creation of a dump file.
#' @param n_first_tree limit the plot to the n first trees. If \code{NULL}, all trees of the model are plotted. Performance can be low for huge models.
#' @param plot_width the width of the diagram in pixels.
#' @param plot_height the height of the diagram in pixels.
#' @param ... currently not used.
#'
#' @return A \code{DiagrammeR} of the model.
#'
#' @details
#'
#' The content of each node is organised that way:
#'
#' \itemize{
#' \item \code{feature} value;
#' \item \code{cover}: the sum of second order gradient of training data classified to the leaf, if it is square loss, this simply corresponds to the number of instances in that branch. Deeper in the tree a node is, lower this metric will be;
#' \item \code{gain}: metric the importance of the node in the model.
#' }
#'
#' The function uses \href{http://www.graphviz.org/}{GraphViz} library for that purpose.
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#'
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
#'
#' xgb.plot.tree(feature_names = colnames(agaricus.train$data), model = bst)
#'
#' @export
xgb.plot.tree <- function(feature_names = NULL, model = NULL, n_first_tree = NULL, plot_width = NULL, plot_height = NULL, ...){
check.deprecation(...)
if (class(model) != "xgb.Booster") {
stop("model: Has to be an object of class xgb.Booster model generaged by the xgb.train function.")
}
if (!requireNamespace("DiagrammeR", quietly = TRUE)) {
stop("DiagrammeR package is required for xgb.plot.tree", call. = FALSE)
}
allTrees <- xgb.model.dt.tree(feature_names = feature_names, model = model, n_first_tree = n_first_tree)
allTrees[, label:= paste0(Feature, "\nCover: ", Cover, "\nGain: ", Quality)]
allTrees[, shape:= "rectangle"][Feature == "Leaf", shape:= "oval"]
allTrees[, filledcolor:= "Beige"][Feature == "Leaf", filledcolor:= "Khaki"]
# rev is used to put the first tree on top.
nodes <- DiagrammeR::create_nodes(nodes = allTrees[,ID] %>% rev,
label = allTrees[,label] %>% rev,
style = "filled",
color = "DimGray",
fillcolor= allTrees[,filledcolor] %>% rev,
shape = allTrees[,shape] %>% rev,
data = allTrees[,Feature] %>% rev,
fontname = "Helvetica"
)
edges <- DiagrammeR::create_edges(from = allTrees[Feature != "Leaf", c(ID)] %>% rep(2),
to = allTrees[Feature != "Leaf", c(Yes, No)],
label = allTrees[Feature != "Leaf", paste("<",Split)] %>% c(rep("",nrow(allTrees[Feature != "Leaf"]))),
color = "DimGray",
arrowsize = "1.5",
arrowhead = "vee",
fontname = "Helvetica",
rel = "leading_to")
graph <- DiagrammeR::create_graph(nodes_df = nodes,
edges_df = edges,
graph_attrs = "rankdir = LR")
DiagrammeR::render_graph(graph, width = plot_width, height = plot_height)
}
# Avoid error messages during CRAN check.
# The reason is that these variables are never declared
# They are mainly column names inferred by Data.table...
globalVariables(c("Feature", "ID", "Cover", "Quality", "Split", "Yes", "No", ".", "shape", "filledcolor", "label"))

View File

@@ -1,27 +0,0 @@
#' Save xgboost model to binary file
#'
#' Save xgboost model from xgboost or xgb.train
#'
#' @param model the model object.
#' @param fname the name of the file to write.
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' data(agaricus.test, package='xgboost')
#' train <- agaricus.train
#' test <- agaricus.test
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
#' xgb.save(bst, 'xgb.model')
#' bst <- xgb.load('xgb.model')
#' pred <- predict(bst, test$data)
#' @export
xgb.save <- function(model, fname) {
if (typeof(fname) != "character")
stop("fname must be character")
if (class(model) != "xgb.Booster")
stop("the input must be xgb.Booster. Use xgb.DMatrix.save to save xgb.DMatrix object.")
.Call("XGBoosterSaveModel_R", model$handle, fname, PACKAGE = "xgboost")
return(TRUE)
}

View File

@@ -1,23 +0,0 @@
#' Save xgboost model to R's raw vector,
#' user can call xgb.load to load the model back from raw vector
#'
#' Save xgboost model from xgboost or xgb.train
#'
#' @param model the model object.
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' data(agaricus.test, package='xgboost')
#' train <- agaricus.train
#' test <- agaricus.test
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
#' raw <- xgb.save.raw(bst)
#' bst <- xgb.load(raw)
#' pred <- predict(bst, test$data)
#'
#' @export
xgb.save.raw <- function(model) {
model <- xgb.get.handle(model)
.Call("XGBoosterModelToRaw_R", model, PACKAGE = "xgboost")
}

View File

@@ -1,334 +0,0 @@
#' eXtreme Gradient Boosting Training
#'
#' \code{xgb.train} is an advanced interface for training an xgboost model. The \code{xgboost} function provides a simpler interface.
#'
#' @param params the list of parameters.
#' The complete list of parameters is available at \url{http://xgboost.readthedocs.io/en/latest/parameter.html}.
#' Below is a shorter summary:
#'
#' 1. General Parameters
#'
#' \itemize{
#' \item \code{booster} which booster to use, can be \code{gbtree} or \code{gblinear}. Default: \code{gbtree}
#' \item \code{silent} 0 means printing running messages, 1 means silent mode. Default: 0
#' }
#'
#' 2. Booster Parameters
#'
#' 2.1. Parameter for Tree Booster
#'
#' \itemize{
#' \item \code{eta} control the learning rate: scale the contribution of each tree by a factor of \code{0 < eta < 1} when it is added to the current approximation. Used to prevent overfitting by making the boosting process more conservative. Lower value for \code{eta} implies larger value for \code{nrounds}: low \code{eta} value means model more robust to overfitting but slower to compute. Default: 0.3
#' \item \code{gamma} minimum loss reduction required to make a further partition on a leaf node of the tree. the larger, the more conservative the algorithm will be.
#' \item \code{max_depth} maximum depth of a tree. Default: 6
#' \item \code{min_child_weight} minimum sum of instance weight (hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, then the building process will give up further partitioning. In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node. The larger, the more conservative the algorithm will be. Default: 1
#' \item \code{subsample} subsample ratio of the training instance. Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees and this will prevent overfitting. It makes computation shorter (because less data to analyse). It is advised to use this parameter with \code{eta} and increase \code{nround}. Default: 1
#' \item \code{colsample_bytree} subsample ratio of columns when constructing each tree. Default: 1
#' \item \code{num_parallel_tree} Experimental parameter. number of trees to grow per round. Useful to test Random Forest through Xgboost (set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly. Default: 1
#' }
#'
#' 2.2. Parameter for Linear Booster
#'
#' \itemize{
#' \item \code{lambda} L2 regularization term on weights. Default: 0
#' \item \code{lambda_bias} L2 regularization term on bias. Default: 0
#' \item \code{alpha} L1 regularization term on weights. (there is no L1 reg on bias because it is not important). Default: 0
#' }
#'
#' 3. Task Parameters
#'
#' \itemize{
#' \item \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it. The default objective options are below:
#' \itemize{
#' \item \code{reg:linear} linear regression (Default).
#' \item \code{reg:logistic} logistic regression.
#' \item \code{binary:logistic} logistic regression for binary classification. Output probability.
#' \item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation.
#' \item \code{num_class} set the number of classes. To use only with multiclass objectives.
#' \item \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective. Class is represented by a number and should be from 0 to \code{num_class - 1}.
#' \item \code{multi:softprob} same as softmax, but prediction outputs a vector of ndata * nclass elements, which can be further reshaped to ndata, nclass matrix. The result contains predicted probabilities of each data point belonging to each class.
#' \item \code{rank:pairwise} set xgboost to do ranking task by minimizing the pairwise loss.
#' }
#' \item \code{base_score} the initial prediction score of all instances, global bias. Default: 0.5
#' \item \code{eval_metric} evaluation metrics for validation data. Users can pass a self-defined function to it. Default: metric will be assigned according to objective(rmse for regression, and error for classification, mean average precision for ranking). List is provided in detail section.
#' }
#'
#' @param data input dataset. \code{xgb.train} takes only an \code{xgb.DMatrix} as the input.
#' \code{xgboost}, in addition, also accepts \code{matrix}, \code{dgCMatrix}, or local data file.
#' @param nrounds the max number of iterations
#' @param watchlist what information should be printed when \code{verbose=1} or
#' \code{verbose=2}. Watchlist is used to specify validation set monitoring
#' during training. For example user can specify
#' watchlist=list(validation1=mat1, validation2=mat2) to watch
#' the performance of each round's model on mat1 and mat2
#'
#' @param obj customized objective function. Returns gradient and second order
#' gradient with given prediction and dtrain.
#' @param feval custimized evaluation function. Returns
#' \code{list(metric='metric-name', value='metric-value')} with given
#' prediction and dtrain.
#' @param verbose If 0, xgboost will stay silent. If 1, xgboost will print
#' information of performance. If 2, xgboost will print some additional information.
#' Setting \code{verbose > 0} automatically engages the \code{\link{cb.evaluation.log}} and
#' \code{\link{cb.print.evaluation}} callback functions.
#' @param print_every_n Print each n-th iteration evaluation messages when \code{verbose>0}.
#' Default is 1 which means all messages are printed. This parameter is passed to the
#' \code{\link{cb.print.evaluation}} callback.
#' @param early_stopping_rounds If \code{NULL}, the early stopping function is not triggered.
#' If set to an integer \code{k}, training with a validation set will stop if the performance
#' doesn't improve for \code{k} rounds.
#' Setting this parameter engages the \code{\link{cb.early.stop}} callback.
#' @param maximize If \code{feval} and \code{early_stopping_rounds} are set,
#' then this parameter must be set as well.
#' When it is \code{TRUE}, it means the larger the evaluation score the better.
#' This parameter is passed to the \code{\link{cb.early.stop}} callback.
#' @param save_period when it is non-NULL, model is saved to disk after every \code{save_period} rounds,
#' 0 means save at the end. The saving is handled by the \code{\link{cb.save.model}} callback.
#' @param save_name the name or path for periodically saved model file.
#' @param xgb_model a previously built model to continue the trainig from.
#' Could be either an object of class \code{xgb.Booster}, or its raw data, or the name of a
#' file with a previously saved model.
#' @param callbacks a list of callback functions to perform various task during boosting.
#' See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the
#' parameters' values. User can provide either existing or their own callback methods in order
#' to customize the training process.
#' @param ... other parameters to pass to \code{params}.
#' @param label vector of response values. Should not be provided when data is
#' a local data file name or an \code{xgb.DMatrix}.
#' @param missing by default is set to NA, which means that NA values should be considered as 'missing'
#' by the algorithm. Sometimes, 0 or other extreme value might be used to represent missing values.
#' This parameter is only used when input is a dense matrix.
#' @param weight a vector indicating the weight for each row of the input.
#'
#' @details
#' These are the training functions for \code{xgboost}.
#'
#' The \code{xgb.train} interface supports advanced features such as \code{watchlist},
#' customized objective and evaluation metric functions, therefore it is more flexible
#' than the \code{\link{xgboost}} interface.
#'
#' Parallelization is automatically enabled if \code{OpenMP} is present.
#' Number of threads can also be manually specified via \code{nthread} parameter.
#'
#' The evaluation metric is chosen automatically by Xgboost (according to the objective)
#' when the \code{eval_metric} parameter is not provided.
#' User may set one or several \code{eval_metric} parameters.
#' Note that when using a customized metric, only this single metric can be used.
#' The folloiwing is the list of built-in metrics for which Xgboost provides optimized implementation:
#' \itemize{
#' \item \code{rmse} root mean square error. \url{http://en.wikipedia.org/wiki/Root_mean_square_error}
#' \item \code{logloss} negative log-likelihood. \url{http://en.wikipedia.org/wiki/Log-likelihood}
#' \item \code{mlogloss} multiclass logloss. \url{https://www.kaggle.com/wiki/MultiClassLogLoss}
#' \item \code{error} Binary classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}.
#' By default, it uses the 0.5 threshold for predicted values to define negative and positive instances.
#' Different threshold (e.g., 0.) could be specified as "error@0."
#' \item \code{merror} Multiclass classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}.
#' \item \code{auc} Area under the curve. \url{http://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation.
#' \item \code{ndcg} Normalized Discounted Cumulative Gain (for ranking task). \url{http://en.wikipedia.org/wiki/NDCG}
#' }
#'
#' The following callbacks are automatically created when certain parameters are set:
#' \itemize{
#' \item \code{cb.print.evaluation} is turned on when \code{verbose > 0};
#' and the \code{print_every_n} parameter is passed to it.
#' \item \code{cb.evaluation.log} is on when \code{verbose > 0} and \code{watchlist} is present.
#' \item \code{cb.early.stop}: when \code{early_stopping_rounds} is set.
#' \item \code{cb.save.model}: when \code{save_period > 0} is set.
#' }
#'
#' @return
#' An object of class \code{xgb.Booster} with the following elements:
#' \itemize{
#' \item \code{handle} a handle (pointer) to the xgboost model in memory.
#' \item \code{raw} a cached memory dump of the xgboost model saved as R's \code{raw} type.
#' \item \code{niter} number of boosting iterations.
#' \item \code{evaluation_log} evaluation history storead as a \code{data.table} with the
#' first column corresponding to iteration number and the rest corresponding to evaluation
#' metrics' values. It is created by the \code{\link{cb.evaluation.log}} callback.
#' \item \code{call} a function call.
#' \item \code{params} parameters that were passed to the xgboost library. Note that it does not
#' capture parameters changed by the \code{\link{cb.reset.parameters}} callback.
#' \item \code{callbacks} callback functions that were either automatically assigned or
#' explicitely passed.
#' \item \code{best_iteration} iteration number with the best evaluation metric value
#' (only available with early stopping).
#' \item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration,
#' which could further be used in \code{predict} method
#' (only available with early stopping).
#' \item \code{best_score} the best evaluation metric value during early stopping.
#' (only available with early stopping).
#' }
#'
#' @seealso
#' \code{\link{callbacks}},
#' \code{\link{predict.xgb.Booster}},
#' \code{\link{xgb.cv}}
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' data(agaricus.test, package='xgboost')
#'
#' dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
#' dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
#' watchlist <- list(eval = dtest, train = dtrain)
#'
#' ## A simple xgb.train example:
#' param <- list(max_depth = 2, eta = 1, silent = 1,
#' objective = "binary:logistic", eval_metric = "auc")
#' bst <- xgb.train(param, dtrain, nthread = 2, nrounds = 2, watchlist)
#'
#' ## An xgb.train example where custom objective and evaluation metric are used:
#' logregobj <- function(preds, dtrain) {
#' labels <- getinfo(dtrain, "label")
#' preds <- 1/(1 + exp(-preds))
#' grad <- preds - labels
#' hess <- preds * (1 - preds)
#' return(list(grad = grad, hess = hess))
#' }
#' evalerror <- function(preds, dtrain) {
#' labels <- getinfo(dtrain, "label")
#' err <- as.numeric(sum(labels != (preds > 0)))/length(labels)
#' return(list(metric = "error", value = err))
#' }
#' bst <- xgb.train(param, dtrain, nthread = 2, nrounds = 2, watchlist)
#'
#' ## An xgb.train example of using variable learning rates at each iteration:
#' my_etas <- list(eta = c(0.5, 0.1))
#' bst <- xgb.train(param, dtrain, nthread = 2, nrounds = 2, watchlist,
#' callbacks = list(cb.reset.parameters(my_etas)))
#'
#' ## Explicit use of the cb.evaluation.log callback allows to run
#' ## xgb.train silently but still store the evaluation results:
#' bst <- xgb.train(param, dtrain, nthread = 2, nrounds = 2, watchlist,
#' verbose = 0, callbacks = list(cb.evaluation.log()))
#' print(bst$evaluation_log)
#'
#' ## An 'xgboost' interface example:
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label,
#' max_depth = 2, eta = 1, nthread = 2, nrounds = 2,
#' objective = "binary:logistic")
#' pred <- predict(bst, agaricus.test$data)
#'
#' @rdname xgb.train
#' @export
xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
obj = NULL, feval = NULL, verbose = 1, print_every_n=1L,
early_stopping_rounds = NULL, maximize = NULL,
save_period = NULL, save_name = "xgboost.model",
xgb_model = NULL, callbacks = list(), ...) {
check.deprecation(...)
params <- check.booster.params(params, ...)
check.custom.obj()
check.custom.eval()
# data & watchlist checks
dtrain <- data
if (class(dtrain) != "xgb.DMatrix")
stop("second argument dtrain must be xgb.DMatrix")
if (length(watchlist) > 0) {
if (typeof(watchlist) != "list" ||
!all(sapply(watchlist, class) == "xgb.DMatrix"))
stop("watchlist must be a list of xgb.DMatrix elements")
evnames <- names(watchlist)
if (is.null(evnames) || any(evnames == ""))
stop("each element of the watchlist must have a name tag")
}
# evaluation printing callback
params <- c(params, list(silent = ifelse(verbose > 1, 0, 1)))
print_every_n <- max( as.integer(print_every_n), 1L)
if (!has.callbacks(callbacks, 'cb.print.evaluation') && verbose) {
callbacks <- add.cb(callbacks, cb.print.evaluation(print_every_n))
}
# evaluation log callback: it is automatically enabled only when verbose > 0
evaluation_log <- list()
if (verbose > 0 &&
!has.callbacks(callbacks, 'cb.evaluation.log') &&
length(watchlist) > 0) {
callbacks <- add.cb(callbacks, cb.evaluation.log())
}
# Model saving callback
if (!is.null(save_period) &&
!has.callbacks(callbacks, 'cb.save.model')) {
callbacks <- add.cb(callbacks, cb.save.model(save_period, save_name))
}
# Early stopping callback
stop_condition <- FALSE
if (!is.null(early_stopping_rounds) &&
!has.callbacks(callbacks, 'cb.early.stop')) {
callbacks <- add.cb(callbacks, cb.early.stop(early_stopping_rounds,
maximize=maximize, verbose=verbose))
}
# Sort the callbacks into categories
cb <- categorize.callbacks(callbacks)
# Construct a booster (either a new one or load from xgb_model)
handle <- xgb.Booster(params, append(watchlist, dtrain), xgb_model)
bst <- xgb.handleToBooster(handle)
# extract parameters that can affect the relationship b/w #trees and #iterations
num_class <- max(as.numeric(NVL(params[['num_class']], 1)), 1)
num_parallel_tree <- max(as.numeric(NVL(params[['num_parallel_tree']], 1)), 1)
# When the 'xgb_model' was set, find out how many boosting iterations it has
niter_skip <- 0
if (!is.null(xgb_model)) {
niter_skip <- as.numeric(xgb.attr(bst, 'niter')) + 1
if (length(niter_skip) == 0) {
niter_skip <- xgb.ntree(bst) %/% (num_parallel_tree * num_class)
}
}
# TODO: distributed code
rank <- 0
begin_iteration <- niter_skip + 1
end_iteration <- niter_skip + nrounds
# the main loop for boosting iterations
for (iteration in begin_iteration:end_iteration) {
for (f in cb$pre_iter) f()
xgb.iter.update(bst$handle, dtrain, iteration - 1, obj)
bst_evaluation <- numeric(0)
if (length(watchlist) > 0)
bst_evaluation <- xgb.iter.eval(bst$handle, watchlist, iteration - 1, feval)
xgb.attr(bst$handle, 'niter') <- iteration - 1
for (f in cb$post_iter) f()
if (stop_condition) break
}
for (f in cb$finalize) f(finalize=TRUE)
bst <- xgb.Booster.check(bst, saveraw = TRUE)
# store the total number of boosting iterations
bst$niter = end_iteration
# store the evaluation results
if (length(evaluation_log) > 0 &&
nrow(evaluation_log) > 0) {
# include the previous compatible history when available
if (class(xgb_model) == 'xgb.Booster' &&
!is.null(xgb_model$evaluation_log) &&
all.equal(colnames(evaluation_log),
colnames(xgb_model$evaluation_log))) {
evaluation_log <- rbindlist(list(xgb_model$evaluation_log, evaluation_log))
}
bst$evaluation_log <- evaluation_log
}
bst$call <- match.call()
bst$params <- params
bst$callbacks <- callbacks
return(bst)
}

View File

@@ -1,104 +0,0 @@
# Simple interface for training an xgboost model.
# Its documentation is combined with xgb.train.
#
#' @rdname xgb.train
#' @export
xgboost <- function(data = NULL, label = NULL, missing = NA, weight = NULL,
params = list(), nrounds,
verbose = 1, print_every_n = 1L,
early_stopping_rounds = NULL, maximize = NULL,
save_period = 0, save_name = "xgboost.model",
xgb_model = NULL, callbacks = list(), ...) {
dtrain <- xgb.get.DMatrix(data, label, missing, weight)
watchlist <- list()
if (verbose > 0)
watchlist$train = dtrain
bst <- xgb.train(params, dtrain, nrounds, watchlist, verbose = verbose, print_every_n=print_every_n,
early_stopping_rounds = early_stopping_rounds, maximize = maximize,
save_period = save_period, save_name = save_name,
xgb_model = xgb_model, callbacks = callbacks, ...)
return(bst)
}
#' Training part from Mushroom Data Set
#'
#' This data set is originally from the Mushroom data set,
#' UCI Machine Learning Repository.
#'
#' This data set includes the following fields:
#'
#' \itemize{
#' \item \code{label} the label for each record
#' \item \code{data} a sparse Matrix of \code{dgCMatrix} class, with 126 columns.
#' }
#'
#' @references
#' https://archive.ics.uci.edu/ml/datasets/Mushroom
#'
#' Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository
#' [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California,
#' School of Information and Computer Science.
#'
#' @docType data
#' @keywords datasets
#' @name agaricus.train
#' @usage data(agaricus.train)
#' @format A list containing a label vector, and a dgCMatrix object with 6513
#' rows and 127 variables
NULL
#' Test part from Mushroom Data Set
#'
#' This data set is originally from the Mushroom data set,
#' UCI Machine Learning Repository.
#'
#' This data set includes the following fields:
#'
#' \itemize{
#' \item \code{label} the label for each record
#' \item \code{data} a sparse Matrix of \code{dgCMatrix} class, with 126 columns.
#' }
#'
#' @references
#' https://archive.ics.uci.edu/ml/datasets/Mushroom
#'
#' Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository
#' [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California,
#' School of Information and Computer Science.
#'
#' @docType data
#' @keywords datasets
#' @name agaricus.test
#' @usage data(agaricus.test)
#' @format A list containing a label vector, and a dgCMatrix object with 1611
#' rows and 126 variables
NULL
# Various imports
#' @importClassesFrom Matrix dgCMatrix dgeMatrix
#' @importFrom Matrix cBind
#' @importFrom Matrix colSums
#' @importFrom Matrix sparse.model.matrix
#' @importFrom Matrix sparseVector
#' @importFrom data.table data.table
#' @importFrom data.table as.data.table
#' @importFrom data.table :=
#' @importFrom data.table rbindlist
#' @importFrom data.table setkey
#' @importFrom data.table setkeyv
#' @importFrom data.table setnames
#' @importFrom magrittr %>%
#' @importFrom stringi stri_detect_regex
#' @importFrom stringi stri_match_first_regex
#' @importFrom stringi stri_replace_first_regex
#' @importFrom stringi stri_replace_all_regex
#' @importFrom stringi stri_split_regex
#' @importFrom utils object.size str tail
#' @importFrom stats predict
#'
#' @import methods
#' @useDynLib xgboost
NULL

View File

@@ -1,73 +0,0 @@
XGBoost R Package for Scalable GBM
==================================
[![CRAN Status Badge](http://www.r-pkg.org/badges/version/xgboost)](http://cran.r-project.org/web/packages/xgboost)
[![CRAN Downloads](http://cranlogs.r-pkg.org/badges/xgboost)](http://cran.rstudio.com/web/packages/xgboost/index.html)
[![Documentation Status](https://readthedocs.org/projects/xgboost/badge/?version=latest)](http://xgboost.readthedocs.org/en/latest/R-package/index.html)
Resources
---------
* [XGBoost R Package Online Documentation](http://xgboost.readthedocs.org/en/latest/R-package/index.html)
- Check this out for detailed documents, examples and tutorials.
Installation
------------
We are [on CRAN](https://cran.r-project.org/web/packages/xgboost/index.html) now. For stable/pre-compiled(for Windows and OS X) version, please install from CRAN:
```r
install.packages('xgboost')
```
You can also install from our weekly updated drat repo:
```r
install.packages("drat", repos="https://cran.rstudio.com")
drat:::addRepo("dmlc")
install.packages("xgboost", repos="http://dmlc.ml/drat/", type="source")
```
***Important*** Due to the usage of submodule, `install_github` is no longer support to install the
latest version of R package.
For up-to-date version, please install from github.
Windows users will need to install [RTools](http://cran.r-project.org/bin/windows/Rtools/) first. They also need to download [MinGW-W64](http://iweb.dl.sourceforge.net/project/mingw-w64/Toolchains%20targetting%20Win32/Personal%20Builds/mingw-builds/installer/mingw-w64-install.exe) using x86_64 architecture during installation.
Run the following command to add MinGW to PATH in Windows if not already added.
```cmd
PATH %PATH%;C:\Program Files\mingw-w64\x86_64-5.3.0-posix-seh-rt_v4-rev0\mingw64\bin
```
To compile xgboost at the root of your storage, run the following bash script.
```bash
git clone --recursive https://github.com/dmlc/xgboost
cd xgboost
git submodule init
git submodule update
alias make='mingw32-make'
cd dmlc-core
make -j4
cd ../rabit
make lib/librabit_empty.a -j4
cd ..
cp make/mingw64.mk config.mk
make -j4
```
Run the following R script to install xgboost package from the root directory.
```r
install.package('devtools') # if not installed
setwd('C:/xgboost/')
library(devtools)
install('R-package')
```
For more detailed installation instructions, please see [here](http://xgboost.readthedocs.org/en/latest/build.html#r-package-installation).
Examples
--------
* Please visit [walk through example](demo).
* See also the [example scripts](../demo/kaggle-higgs) for Kaggle Higgs Challenge, including [speedtest script](../demo/kaggle-higgs/speedtest.R) on this dataset and the one related to [Otto challenge](../demo/kaggle-otto), including a [RMarkdown documentation](../demo/kaggle-otto/understandingXGBoostModel.Rmd).

Binary file not shown.

Binary file not shown.

View File

@@ -1,11 +0,0 @@
basic_walkthrough Basic feature walkthrough
caret_wrapper Use xgboost to train in caret library
custom_objective Cutomize loss function, and evaluation metric
boost_from_prediction Boosting from existing prediction
predict_first_ntree Predicting using first n trees
generalized_linear_model Generalized Linear Model
cross_validation Cross validation
create_sparse_matrix Create Sparse Matrix
predict_leaf_indices Predicting the corresponding leaves
early_stopping Early Stop in training
poisson_regression Poisson Regression on count data

View File

@@ -1,19 +0,0 @@
XGBoost R Feature Walkthrough
====
* [Basic walkthrough of wrappers](basic_walkthrough.R)
* [Train a xgboost model from caret library](caret_wrapper.R)
* [Cutomize loss function, and evaluation metric](custom_objective.R)
* [Boosting from existing prediction](boost_from_prediction.R)
* [Predicting using first n trees](predict_first_ntree.R)
* [Generalized Linear Model](generalized_linear_model.R)
* [Cross validation](cross_validation.R)
* [Create a sparse matrix from a dense one](create_sparse_matrix.R)
Benchmarks
====
* [Starter script for Kaggle Higgs Boson](../../demo/kaggle-higgs)
Notes
====
* Contribution of examples, benchmarks is more than welcomed!
* If you like to share how you use xgboost to solve your problem, send a pull request:)

View File

@@ -1,111 +0,0 @@
require(xgboost)
require(methods)
# we load in the agaricus dataset
# In this example, we are aiming to predict whether a mushroom is edible
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
train <- agaricus.train
test <- agaricus.test
# the loaded data is stored in sparseMatrix, and label is a numeric vector in {0,1}
class(train$label)
class(train$data)
#-------------Basic Training using XGBoost-----------------
# this is the basic usage of xgboost you can put matrix in data field
# note: we are putting in sparse matrix here, xgboost naturally handles sparse input
# use sparse matrix when your feature is sparse(e.g. when you are using one-hot encoding vector)
print("Training xgboost with sparseMatrix")
bst <- xgboost(data = train$data, label = train$label, max_depth = 2, eta = 1, nrounds = 2,
nthread = 2, objective = "binary:logistic")
# alternatively, you can put in dense matrix, i.e. basic R-matrix
print("Training xgboost with Matrix")
bst <- xgboost(data = as.matrix(train$data), label = train$label, max_depth = 2, eta = 1, nrounds = 2,
nthread = 2, objective = "binary:logistic")
# you can also put in xgb.DMatrix object, which stores label, data and other meta datas needed for advanced features
print("Training xgboost with xgb.DMatrix")
dtrain <- xgb.DMatrix(data = train$data, label = train$label)
bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nrounds = 2, nthread = 2,
objective = "binary:logistic")
# Verbose = 0,1,2
print("Train xgboost with verbose 0, no message")
bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nrounds = 2,
nthread = 2, objective = "binary:logistic", verbose = 0)
print("Train xgboost with verbose 1, print evaluation metric")
bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nrounds = 2,
nthread = 2, objective = "binary:logistic", verbose = 1)
print("Train xgboost with verbose 2, also print information about tree")
bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nrounds = 2,
nthread = 2, objective = "binary:logistic", verbose = 2)
# you can also specify data as file path to a LibSVM format input
# since we do not have this file with us, the following line is just for illustration
# bst <- xgboost(data = 'agaricus.train.svm', max_depth = 2, eta = 1, nrounds = 2,objective = "binary:logistic")
#--------------------basic prediction using xgboost--------------
# you can do prediction using the following line
# you can put in Matrix, sparseMatrix, or xgb.DMatrix
pred <- predict(bst, test$data)
err <- mean(as.numeric(pred > 0.5) != test$label)
print(paste("test-error=", err))
#-------------------save and load models-------------------------
# save model to binary local file
xgb.save(bst, "xgboost.model")
# load binary model to R
bst2 <- xgb.load("xgboost.model")
pred2 <- predict(bst2, test$data)
# pred2 should be identical to pred
print(paste("sum(abs(pred2-pred))=", sum(abs(pred2-pred))))
# save model to R's raw vector
raw = xgb.save.raw(bst)
# load binary model to R
bst3 <- xgb.load(raw)
pred3 <- predict(bst3, test$data)
# pred3 should be identical to pred
print(paste("sum(abs(pred3-pred))=", sum(abs(pred3-pred))))
#----------------Advanced features --------------
# to use advanced features, we need to put data in xgb.DMatrix
dtrain <- xgb.DMatrix(data = train$data, label=train$label)
dtest <- xgb.DMatrix(data = test$data, label=test$label)
#---------------Using watchlist----------------
# watchlist is a list of xgb.DMatrix, each of them is tagged with name
watchlist <- list(train=dtrain, test=dtest)
# to train with watchlist, use xgb.train, which contains more advanced features
# watchlist allows us to monitor the evaluation result on all data in the list
print("Train xgboost using xgb.train with watchlist")
bst <- xgb.train(data=dtrain, max_depth=2, eta=1, nrounds=2, watchlist=watchlist,
nthread = 2, objective = "binary:logistic")
# we can change evaluation metrics, or use multiple evaluation metrics
print("train xgboost using xgb.train with watchlist, watch logloss and error")
bst <- xgb.train(data=dtrain, max_depth=2, eta=1, nrounds=2, watchlist=watchlist,
eval_metric = "error", eval_metric = "logloss",
nthread = 2, objective = "binary:logistic")
# xgb.DMatrix can also be saved using xgb.DMatrix.save
xgb.DMatrix.save(dtrain, "dtrain.buffer")
# to load it in, simply call xgb.DMatrix
dtrain2 <- xgb.DMatrix("dtrain.buffer")
bst <- xgb.train(data=dtrain2, max_depth=2, eta=1, nrounds=2, watchlist=watchlist,
nthread = 2, objective = "binary:logistic")
# information can be extracted from xgb.DMatrix using getinfo
label = getinfo(dtest, "label")
pred <- predict(bst, dtest)
err <- as.numeric(sum(as.integer(pred > 0.5) != label))/length(label)
print(paste("test-error=", err))
# You can dump the tree you learned using xgb.dump into a text file
xgb.dump(bst, "dump.raw.txt", with_stats = T)
# Finally, you can check which features are the most important.
print("Most important features (look at column Gain):")
imp_matrix <- xgb.importance(feature_names = colnames(train$data), model = bst)
print(imp_matrix)
# Feature importance bar plot by gain
print("Feature importance Plot : ")
print(xgb.plot.importance(importance_matrix = imp_matrix))

View File

@@ -1,26 +0,0 @@
require(xgboost)
# load in the agaricus dataset
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
watchlist <- list(eval = dtest, train = dtrain)
###
# advanced: start from a initial base prediction
#
print('start running example to start from a initial prediction')
# train xgboost for 1 round
param <- list(max_depth=2, eta=1, nthread = 2, silent=1, objective='binary:logistic')
bst <- xgb.train(param, dtrain, 1, watchlist)
# Note: we need the margin value instead of transformed prediction in set_base_margin
# do predict with output_margin=TRUE, will always give you margin values before logistic transformation
ptrain <- predict(bst, dtrain, outputmargin=TRUE)
ptest <- predict(bst, dtest, outputmargin=TRUE)
# set the base_margin property of dtrain and dtest
# base margin is the base prediction we will boost from
setinfo(dtrain, "base_margin", ptrain)
setinfo(dtest, "base_margin", ptest)
print('this is result of boost from initial prediction')
bst <- xgb.train(params = param, data = dtrain, nrounds = 1, watchlist = watchlist)

View File

@@ -1,35 +0,0 @@
# install development version of caret library that contains xgboost models
devtools::install_github("topepo/caret/pkg/caret")
require(caret)
require(xgboost)
require(data.table)
require(vcd)
require(e1071)
# Load Arthritis dataset in memory.
data(Arthritis)
# Create a copy of the dataset with data.table package (data.table is 100% compliant with R dataframe but its syntax is a lot more consistent and its performance are really good).
df <- data.table(Arthritis, keep.rownames = F)
# Let's add some new categorical features to see if it helps. Of course these feature are highly correlated to the Age feature. Usually it's not a good thing in ML, but Tree algorithms (including boosted trees) are able to select the best features, even in case of highly correlated features.
# For the first feature we create groups of age by rounding the real age. Note that we transform it to factor (categorical data) so the algorithm treat them as independant values.
df[,AgeDiscret:= as.factor(round(Age/10,0))]
# Here is an even stronger simplification of the real age with an arbitrary split at 30 years old. I choose this value based on nothing. We will see later if simplifying the information based on arbitrary values is a good strategy (I am sure you already have an idea of how well it will work!).
df[,AgeCat:= as.factor(ifelse(Age > 30, "Old", "Young"))]
# We remove ID as there is nothing to learn from this feature (it will just add some noise as the dataset is small).
df[,ID:=NULL]
#-------------Basic Training using XGBoost in caret Library-----------------
# Set up control parameters for caret::train
# Here we use 10-fold cross-validation, repeating twice, and using random search for tuning hyper-parameters.
fitControl <- trainControl(method = "cv", number = 10, repeats = 2, search = "random")
# train a xgbTree model using caret::train
model <- train(factor(Improved)~., data = df, method = "xgbTree", trControl = fitControl)
# Instead of tree for our boosters, you can also fit a linear regression or logistic regression model using xgbLinear
# model <- train(factor(Improved)~., data = df, method = "xgbLinear", trControl = fitControl)
# See model results
print(model)

View File

@@ -1,89 +0,0 @@
require(xgboost)
require(Matrix)
require(data.table)
if (!require(vcd)) {
install.packages('vcd') #Available in Cran. Used for its dataset with categorical values.
require(vcd)
}
# According to its documentation, Xgboost works only on numbers.
# Sometimes the dataset we have to work on have categorical data.
# A categorical variable is one which have a fixed number of values. By example, if for each observation a variable called "Colour" can have only "red", "blue" or "green" as value, it is a categorical variable.
#
# In R, categorical variable is called Factor.
# Type ?factor in console for more information.
#
# In this demo we will see how to transform a dense dataframe with categorical variables to a sparse matrix before analyzing it in Xgboost.
# The method we are going to see is usually called "one hot encoding".
#load Arthritis dataset in memory.
data(Arthritis)
# create a copy of the dataset with data.table package (data.table is 100% compliant with R dataframe but its syntax is a lot more consistent and its performance are really good).
df <- data.table(Arthritis, keep.rownames = F)
# Let's have a look to the data.table
cat("Print the dataset\n")
print(df)
# 2 columns have factor type, one has ordinal type (ordinal variable is a categorical variable with values wich can be ordered, here: None > Some > Marked).
cat("Structure of the dataset\n")
str(df)
# Let's add some new categorical features to see if it helps. Of course these feature are highly correlated to the Age feature. Usually it's not a good thing in ML, but Tree algorithms (including boosted trees) are able to select the best features, even in case of highly correlated features.
# For the first feature we create groups of age by rounding the real age. Note that we transform it to factor (categorical data) so the algorithm treat them as independant values.
df[,AgeDiscret:= as.factor(round(Age/10,0))]
# Here is an even stronger simplification of the real age with an arbitrary split at 30 years old. I choose this value based on nothing. We will see later if simplifying the information based on arbitrary values is a good strategy (I am sure you already have an idea of how well it will work!).
df[,AgeCat:= as.factor(ifelse(Age > 30, "Old", "Young"))]
# We remove ID as there is nothing to learn from this feature (it will just add some noise as the dataset is small).
df[,ID:=NULL]
# List the different values for the column Treatment: Placebo, Treated.
cat("Values of the categorical feature Treatment\n")
print(levels(df[,Treatment]))
# Next step, we will transform the categorical data to dummy variables.
# This method is also called one hot encoding.
# The purpose is to transform each value of each categorical feature in one binary feature.
#
# Let's take, the column Treatment will be replaced by two columns, Placebo, and Treated. Each of them will be binary. For example an observation which had the value Placebo in column Treatment before the transformation will have, after the transformation, the value 1 in the new column Placebo and the value 0 in the new column Treated.
#
# Formulae Improved~.-1 used below means transform all categorical features but column Improved to binary values.
# Column Improved is excluded because it will be our output column, the one we want to predict.
sparse_matrix = sparse.model.matrix(Improved~.-1, data = df)
cat("Encoding of the sparse Matrix\n")
print(sparse_matrix)
# Create the output vector (not sparse)
# 1. Set, for all rows, field in Y column to 0;
# 2. set Y to 1 when Improved == Marked;
# 3. Return Y column
output_vector = df[,Y:=0][Improved == "Marked",Y:=1][,Y]
# Following is the same process as other demo
cat("Learning...\n")
bst <- xgboost(data = sparse_matrix, label = output_vector, max_depth = 9,
eta = 1, nthread = 2, nrounds = 10, objective = "binary:logistic")
importance <- xgb.importance(feature_names = colnames(sparse_matrix), model = bst)
print(importance)
# According to the matrix below, the most important feature in this dataset to predict if the treatment will work is the Age. The second most important feature is having received a placebo or not. The sex is third. Then we see our generated features (AgeDiscret). We can see that their contribution is very low (Gain column).
# Does these result make sense?
# Let's check some Chi2 between each of these features and the outcome.
print(chisq.test(df$Age, df$Y))
# Pearson correlation between Age and illness disappearing is 35
print(chisq.test(df$AgeDiscret, df$Y))
# Our first simplification of Age gives a Pearson correlation of 8.
print(chisq.test(df$AgeCat, df$Y))
# The perfectly random split I did between young and old at 30 years old have a low correlation of 2. It's a result we may expect as may be in my mind > 30 years is being old (I am 32 and starting feeling old, this may explain that), but for the illness we are studying, the age to be vulnerable is not the same. Don't let your "gut" lower the quality of your model. In "data science", there is science :-)
# As you can see, in general destroying information by simplifying it won't improve your model. Chi2 just demonstrates that. But in more complex cases, creating a new feature based on existing one which makes link with the outcome more obvious may help the algorithm and improve the model. The case studied here is not enough complex to show that. Check Kaggle forum for some challenging datasets.
# However it's almost always worse when you add some arbitrary rules.
# Moreover, you can notice that even if we have added some not useful new features highly correlated with other features, the boosting tree algorithm have been able to choose the best one, which in this case is the Age. Linear model may not be that strong in these scenario.

View File

@@ -1,51 +0,0 @@
require(xgboost)
# load in the agaricus dataset
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
nround <- 2
param <- list(max_depth=2, eta=1, silent=1, nthread=2, objective='binary:logistic')
cat('running cross validation\n')
# do cross validation, this will print result out as
# [iteration] metric_name:mean_value+std_value
# std_value is standard deviation of the metric
xgb.cv(param, dtrain, nround, nfold=5, metrics={'error'})
cat('running cross validation, disable standard deviation display\n')
# do cross validation, this will print result out as
# [iteration] metric_name:mean_value+std_value
# std_value is standard deviation of the metric
xgb.cv(param, dtrain, nround, nfold=5,
metrics='error', showsd = FALSE)
###
# you can also do cross validation with cutomized loss function
# See custom_objective.R
##
print ('running cross validation, with cutomsized loss function')
logregobj <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
preds <- 1/(1 + exp(-preds))
grad <- preds - labels
hess <- preds * (1 - preds)
return(list(grad = grad, hess = hess))
}
evalerror <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
err <- as.numeric(sum(labels != (preds > 0)))/length(labels)
return(list(metric = "error", value = err))
}
param <- list(max_depth=2, eta=1, silent=1,
objective = logregobj, eval_metric = evalerror)
# train with customized objective
xgb.cv(params = param, data = dtrain, nrounds = nround, nfold = 5)
# do cross validation with prediction values for each fold
res <- xgb.cv(params = param, data = dtrain, nrounds = nround, nfold = 5, prediction = TRUE)
res$evaluation_log
length(res$pred)

View File

@@ -1,65 +0,0 @@
require(xgboost)
# load in the agaricus dataset
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
# note: for customized objective function, we leave objective as default
# note: what we are getting is margin value in prediction
# you must know what you are doing
watchlist <- list(eval = dtest, train = dtrain)
num_round <- 2
# user define objective function, given prediction, return gradient and second order gradient
# this is loglikelihood loss
logregobj <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
preds <- 1/(1 + exp(-preds))
grad <- preds - labels
hess <- preds * (1 - preds)
return(list(grad = grad, hess = hess))
}
# user defined evaluation function, return a pair metric_name, result
# NOTE: when you do customized loss function, the default prediction value is margin
# this may make buildin evalution metric not function properly
# for example, we are doing logistic loss, the prediction is score before logistic transformation
# the buildin evaluation error assumes input is after logistic transformation
# Take this in mind when you use the customization, and maybe you need write customized evaluation function
evalerror <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
err <- as.numeric(sum(labels != (preds > 0)))/length(labels)
return(list(metric = "error", value = err))
}
param <- list(max_depth=2, eta=1, nthread = 2, silent=1,
objective=logregobj, eval_metric=evalerror)
print ('start training with user customized objective')
# training with customized objective, we can also do step by step training
# simply look at xgboost.py's implementation of train
bst <- xgb.train(param, dtrain, num_round, watchlist)
#
# there can be cases where you want additional information
# being considered besides the property of DMatrix you can get by getinfo
# you can set additional information as attributes if DMatrix
# set label attribute of dtrain to be label, we use label as an example, it can be anything
attr(dtrain, 'label') <- getinfo(dtrain, 'label')
# this is new customized objective, where you can access things you set
# same thing applies to customized evaluation function
logregobjattr <- function(preds, dtrain) {
# now you can access the attribute in customized function
labels <- attr(dtrain, 'label')
preds <- 1/(1 + exp(-preds))
grad <- preds - labels
hess <- preds * (1 - preds)
return(list(grad = grad, hess = hess))
}
param <- list(max_depth=2, eta=1, nthread = 2, silent=1,
objective=logregobjattr, eval_metric=evalerror)
print ('start training with user customized objective, with additional attributes in DMatrix')
# training with customized objective, we can also do step by step training
# simply look at xgboost.py's implementation of train
bst <- xgb.train(param, dtrain, num_round, watchlist)

View File

@@ -1,40 +0,0 @@
require(xgboost)
# load in the agaricus dataset
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
# note: for customized objective function, we leave objective as default
# note: what we are getting is margin value in prediction
# you must know what you are doing
param <- list(max_depth=2, eta=1, nthread = 2, silent=1)
watchlist <- list(eval = dtest)
num_round <- 20
# user define objective function, given prediction, return gradient and second order gradient
# this is loglikelihood loss
logregobj <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
preds <- 1/(1 + exp(-preds))
grad <- preds - labels
hess <- preds * (1 - preds)
return(list(grad = grad, hess = hess))
}
# user defined evaluation function, return a pair metric_name, result
# NOTE: when you do customized loss function, the default prediction value is margin
# this may make buildin evalution metric not function properly
# for example, we are doing logistic loss, the prediction is score before logistic transformation
# the buildin evaluation error assumes input is after logistic transformation
# Take this in mind when you use the customization, and maybe you need write customized evaluation function
evalerror <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
err <- as.numeric(sum(labels != (preds > 0)))/length(labels)
return(list(metric = "error", value = err))
}
print ('start training with early Stopping setting')
bst <- xgb.train(param, dtrain, num_round, watchlist,
objective = logregobj, eval_metric = evalerror, maximize = FALSE,
early_stopping_round = 3)
bst <- xgb.cv(param, dtrain, num_round, nfold = 5,
objective = logregobj, eval_metric = evalerror,
maximize = FALSE, early_stopping_rounds = 3)

View File

@@ -1,34 +0,0 @@
require(xgboost)
# load in the agaricus dataset
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
##
# this script demonstrate how to fit generalized linear model in xgboost
# basically, we are using linear model, instead of tree for our boosters
# you can fit a linear regression, or logistic regression model
##
# change booster to gblinear, so that we are fitting a linear model
# alpha is the L1 regularizer
# lambda is the L2 regularizer
# you can also set lambda_bias which is L2 regularizer on the bias term
param <- list(objective = "binary:logistic", booster = "gblinear",
nthread = 2, alpha = 0.0001, lambda = 1)
# normally, you do not need to set eta (step_size)
# XGBoost uses a parallel coordinate descent algorithm (shotgun),
# there could be affection on convergence with parallelization on certain cases
# setting eta to be smaller value, e.g 0.5 can make the optimization more stable
##
# the rest of settings are the same
##
watchlist <- list(eval = dtest, train = dtrain)
num_round <- 2
bst <- xgb.train(param, dtrain, num_round, watchlist)
ypred <- predict(bst, dtest)
labels <- getinfo(dtest, 'label')
cat('error of preds=', mean(as.numeric(ypred>0.5)!=labels),'\n')

View File

@@ -1,7 +0,0 @@
data(mtcars)
head(mtcars)
bst = xgboost(data=as.matrix(mtcars[,-11]),label=mtcars[,11],
objective='count:poisson',nrounds=5)
pred = predict(bst,as.matrix(mtcars[,-11]))
sqrt(mean((pred-mtcars[,11])^2))

View File

@@ -1,23 +0,0 @@
require(xgboost)
# load in the agaricus dataset
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
param <- list(max_depth=2, eta=1, silent=1, objective='binary:logistic')
watchlist <- list(eval = dtest, train = dtrain)
nround = 2
# training the model for two rounds
bst = xgb.train(param, dtrain, nround, nthread = 2, watchlist)
cat('start testing prediction from first n trees\n')
labels <- getinfo(dtest,'label')
### predict using first 1 tree
ypred1 = predict(bst, dtest, ntreelimit=1)
# by default, we predict using all the trees
ypred2 = predict(bst, dtest)
cat('error of ypred1=', mean(as.numeric(ypred1>0.5)!=labels),'\n')
cat('error of ypred2=', mean(as.numeric(ypred2>0.5)!=labels),'\n')

View File

@@ -1,52 +0,0 @@
require(xgboost)
require(data.table)
require(Matrix)
set.seed(1982)
# load in the agaricus dataset
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
dtrain <- xgb.DMatrix(data = agaricus.train$data, label = agaricus.train$label)
dtest <- xgb.DMatrix(data = agaricus.test$data, label = agaricus.test$label)
param <- list(max_depth=2, eta=1, silent=1, objective='binary:logistic')
nround = 4
# training the model for two rounds
bst = xgb.train(params = param, data = dtrain, nrounds = nround, nthread = 2)
# Model accuracy without new features
accuracy.before <- sum((predict(bst, agaricus.test$data) >= 0.5) == agaricus.test$label) / length(agaricus.test$label)
# by default, we predict using all the trees
pred_with_leaf = predict(bst, dtest, predleaf = TRUE)
head(pred_with_leaf)
create.new.tree.features <- function(model, original.features){
pred_with_leaf <- predict(model, original.features, predleaf = TRUE)
cols <- list()
for(i in 1:length(trees)){
# max is not the real max but it s not important for the purpose of adding features
leaf.id <- sort(unique(pred_with_leaf[,i]))
cols[[i]] <- factor(x = pred_with_leaf[,i], level = leaf.id)
}
cBind(original.features, sparse.model.matrix( ~ . -1, as.data.frame(cols)))
}
# Convert previous features to one hot encoding
new.features.train <- create.new.tree.features(bst, agaricus.train$data)
new.features.test <- create.new.tree.features(bst, agaricus.test$data)
# learning with new features
new.dtrain <- xgb.DMatrix(data = new.features.train, label = agaricus.train$label)
new.dtest <- xgb.DMatrix(data = new.features.test, label = agaricus.test$label)
watchlist <- list(train = new.dtrain)
bst <- xgb.train(params = param, data = new.dtrain, nrounds = nround, nthread = 2)
# Model accuracy with new features
accuracy.after <- sum((predict(bst, new.dtest) >= 0.5) == agaricus.test$label) / length(agaricus.test$label)
# Here the accuracy was already good and is now perfect.
cat(paste("The accuracy was", accuracy.before, "before adding leaf features and it is now", accuracy.after, "!\n"))

View File

@@ -1,12 +0,0 @@
# running all scripts in demo folder
demo(basic_walkthrough)
demo(custom_objective)
demo(boost_from_prediction)
demo(predict_first_ntree)
demo(generalized_linear_model)
demo(cross_validation)
demo(create_sparse_matrix)
demo(predict_leaf_indices)
demo(early_stopping)
demo(poisson_regression)
demo(caret_wrapper)

View File

@@ -1,32 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgboost.R
\docType{data}
\name{agaricus.test}
\alias{agaricus.test}
\title{Test part from Mushroom Data Set}
\format{A list containing a label vector, and a dgCMatrix object with 1611
rows and 126 variables}
\usage{
data(agaricus.test)
}
\description{
This data set is originally from the Mushroom data set,
UCI Machine Learning Repository.
}
\details{
This data set includes the following fields:
\itemize{
\item \code{label} the label for each record
\item \code{data} a sparse Matrix of \code{dgCMatrix} class, with 126 columns.
}
}
\references{
https://archive.ics.uci.edu/ml/datasets/Mushroom
Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository
[http://archive.ics.uci.edu/ml]. Irvine, CA: University of California,
School of Information and Computer Science.
}
\keyword{datasets}

View File

@@ -1,32 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgboost.R
\docType{data}
\name{agaricus.train}
\alias{agaricus.train}
\title{Training part from Mushroom Data Set}
\format{A list containing a label vector, and a dgCMatrix object with 6513
rows and 127 variables}
\usage{
data(agaricus.train)
}
\description{
This data set is originally from the Mushroom data set,
UCI Machine Learning Repository.
}
\details{
This data set includes the following fields:
\itemize{
\item \code{label} the label for each record
\item \code{data} a sparse Matrix of \code{dgCMatrix} class, with 126 columns.
}
}
\references{
https://archive.ics.uci.edu/ml/datasets/Mushroom
Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository
[http://archive.ics.uci.edu/ml]. Irvine, CA: University of California,
School of Information and Computer Science.
}
\keyword{datasets}

View File

@@ -1,38 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/callbacks.R
\name{callbacks}
\alias{callbacks}
\title{Callback closures for booster training.}
\description{
These are used to perform various service tasks either during boosting iterations or at the end.
This approach helps to modularize many of such tasks without bloating the main training methods,
and it offers .
}
\details{
By default, a callback function is run after each boosting iteration.
An R-attribute \code{is_pre_iteration} could be set for a callback to define a pre-iteration function.
When a callback function has \code{finalize} parameter, its finalizer part will also be run after
the boosting is completed.
WARNING: side-effects!!! Be aware that these callback functions access and modify things in
the environment from which they are called from, which is a fairly uncommon thing to do in R.
To write a custom callback closure, make sure you first understand the main concepts about R envoronments.
Check either R documentation on \code{\link[base]{environment}} or the
\href{http://adv-r.had.co.nz/Environments.html}{Environments chapter} from the "Advanced R"
book by Hadley Wickham. Further, the best option is to read the code of some of the existing callbacks -
choose ones that do something similar to what you want to achieve. Also, you would need to get familiar
with the objects available inside of the \code{xgb.train} and \code{xgb.cv} internal environments.
}
\seealso{
\code{\link{cb.print.evaluation}},
\code{\link{cb.evaluation.log}},
\code{\link{cb.reset.parameters}},
\code{\link{cb.early.stop}},
\code{\link{cb.save.model}},
\code{\link{cb.cv.predict}},
\code{\link{xgb.train}},
\code{\link{xgb.cv}}
}

View File

@@ -1,43 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/callbacks.R
\name{cb.cv.predict}
\alias{cb.cv.predict}
\title{Callback closure for returning cross-validation based predictions.}
\usage{
cb.cv.predict(save_models = FALSE)
}
\arguments{
\item{save_models}{a flag for whether to save the folds' models.}
}
\value{
Predictions are returned inside of the \code{pred} element, which is either a vector or a matrix,
depending on the number of prediction outputs per data row. The order of predictions corresponds
to the order of rows in the original dataset. Note that when a custom \code{folds} list is
provided in \code{xgb.cv}, the predictions would only be returned properly when this list is a
non-overlapping list of k sets of indices, as in a standard k-fold CV. The predictions would not be
meaningful when user-profided folds have overlapping indices as in, e.g., random sampling splits.
When some of the indices in the training dataset are not included into user-provided \code{folds},
their prediction value would be \code{NA}.
}
\description{
Callback closure for returning cross-validation based predictions.
}
\details{
This callback function saves predictions for all of the test folds,
and also allows to save the folds' models.
It is a "finalizer" callback and it uses early stopping information whenever it is available,
thus it must be run after the early stopping callback if the early stopping is used.
Callback function expects the following values to be set in its calling frame:
\code{bst_folds},
\code{basket},
\code{data},
\code{end_iteration},
\code{num_parallel_tree},
\code{num_class}.
}
\seealso{
\code{\link{callbacks}}
}

View File

@@ -1,63 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/callbacks.R
\name{cb.early.stop}
\alias{cb.early.stop}
\title{Callback closure to activate the early stopping.}
\usage{
cb.early.stop(stopping_rounds, maximize = FALSE, metric_name = NULL,
verbose = TRUE)
}
\arguments{
\item{stopping_rounds}{The number of rounds with no improvement in
the evaluation metric in order to stop the training.}
\item{maximize}{whether to maximize the evaluation metric}
\item{metric_name}{the name of an evaluation column to use as a criteria for early
stopping. If not set, the last column would be used.
Let's say the test data in \code{watchlist} was labelled as \code{dtest},
and one wants to use the AUC in test data for early stopping regardless of where
it is in the \code{watchlist}, then one of the following would need to be set:
\code{metric_name='dtest-auc'} or \code{metric_name='dtest_auc'}.
All dash '-' characters in metric names are considered equivalent to '_'.}
\item{verbose}{whether to print the early stopping information.}
}
\description{
Callback closure to activate the early stopping.
}
\details{
This callback function determines the condition for early stopping
by setting the \code{stop_condition = TRUE} flag in its calling frame.
The following additional fields are assigned to the model's R object:
\itemize{
\item \code{best_score} the evaluation score at the best iteration
\item \code{best_iteration} at which boosting iteration the best score has occurred (1-based index)
\item \code{best_ntreelimit} to use with the \code{ntreelimit} parameter in \code{predict}.
It differs from \code{best_iteration} in multiclass or random forest settings.
}
The Same values are also stored as xgb-attributes:
\itemize{
\item \code{best_iteration} is stored as a 0-based iteration index (for interoperability of binary models)
\item \code{best_msg} message string is also stored.
}
At least one data element is required in the evaluation watchlist for early stopping to work.
Callback function expects the following values to be set in its calling frame:
\code{stop_condition},
\code{bst_evaluation},
\code{rank},
\code{bst} (or \code{bst_folds} and \code{basket}),
\code{iteration},
\code{begin_iteration},
\code{end_iteration},
\code{num_parallel_tree}.
}
\seealso{
\code{\link{callbacks}},
\code{\link{xgb.attr}}
}

View File

@@ -1,32 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/callbacks.R
\name{cb.evaluation.log}
\alias{cb.evaluation.log}
\title{Callback closure for logging the evaluation history}
\usage{
cb.evaluation.log()
}
\description{
Callback closure for logging the evaluation history
}
\details{
This callback function appends the current iteration evaluation results \code{bst_evaluation}
available in the calling parent frame to the \code{evaluation_log} list in a calling frame.
The finalizer callback (called with \code{finalize = TURE} in the end) converts
the \code{evaluation_log} list into a final data.table.
The iteration evaluation result \code{bst_evaluation} must be a named numeric vector.
Note: in the column names of the final data.table, the dash '-' character is replaced with
the underscore '_' in order to make the column names more like regular R identifiers.
Callback function expects the following values to be set in its calling frame:
\code{evaluation_log},
\code{bst_evaluation},
\code{iteration}.
}
\seealso{
\code{\link{callbacks}}
}

View File

@@ -1,28 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/callbacks.R
\name{cb.print.evaluation}
\alias{cb.print.evaluation}
\title{Callback closure for printing the result of evaluation}
\usage{
cb.print.evaluation(period = 1)
}
\arguments{
\item{period}{results would be printed every number of periods}
}
\description{
Callback closure for printing the result of evaluation
}
\details{
The callback function prints the result of evaluation at every \code{period} iterations.
The initial and the last iteration's evaluations are always printed.
Callback function expects the following values to be set in its calling frame:
\code{bst_evaluation} (also \code{bst_evaluation_err} when available),
\code{iteration},
\code{begin_iteration},
\code{end_iteration}.
}
\seealso{
\code{\link{callbacks}}
}

View File

@@ -1,37 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/callbacks.R
\name{cb.reset.parameters}
\alias{cb.reset.parameters}
\title{Callback closure for restetting the booster's parameters at each iteration.}
\usage{
cb.reset.parameters(new_params)
}
\arguments{
\item{new_params}{a list where each element corresponds to a parameter that needs to be reset.
Each element's value must be either a vector of values of length \code{nrounds}
to be set at each iteration,
or a function of two parameters \code{learning_rates(iteration, nrounds)}
which returns a new parameter value by using the current iteration number
and the total number of boosting rounds.}
}
\description{
Callback closure for restetting the booster's parameters at each iteration.
}
\details{
This is a "pre-iteration" callback function used to reset booster's parameters
at the beginning of each iteration.
Note that when training is resumed from some previous model, and a function is used to
reset a parameter value, the \code{nround} argument in this function would be the
the number of boosting rounds in the current training.
Callback function expects the following values to be set in its calling frame:
\code{bst} or \code{bst_folds},
\code{iteration},
\code{begin_iteration},
\code{end_iteration}.
}
\seealso{
\code{\link{callbacks}}
}

View File

@@ -1,34 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/callbacks.R
\name{cb.save.model}
\alias{cb.save.model}
\title{Callback closure for saving a model file.}
\usage{
cb.save.model(save_period = 0, save_name = "xgboost.model")
}
\arguments{
\item{save_period}{save the model to disk after every
\code{save_period} iterations; 0 means save the model at the end.}
\item{save_name}{the name or path for the saved model file.
It can contain a \code{\link[base]{sprintf}} formatting specifier
to include the integer iteration number in the file name.
E.g., with \code{save_name} = 'xgboost_%04d.model',
the file saved at iteration 50 would be named "xgboost_0050.model".}
}
\description{
Callback closure for saving a model file.
}
\details{
This callback function allows to save an xgb-model file, either periodically after each \code{save_period}'s or at the end.
Callback function expects the following values to be set in its calling frame:
\code{bst},
\code{iteration},
\code{begin_iteration},
\code{end_iteration}.
}
\seealso{
\code{\link{callbacks}}
}

View File

@@ -1,29 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.DMatrix.R
\name{dim.xgb.DMatrix}
\alias{dim.xgb.DMatrix}
\title{Dimensions of xgb.DMatrix}
\usage{
\method{dim}{xgb.DMatrix}(x)
}
\arguments{
\item{x}{Object of class \code{xgb.DMatrix}}
}
\description{
Returns a vector of numbers of rows and of columns in an \code{xgb.DMatrix}.
}
\details{
Note: since \code{nrow} and \code{ncol} internally use \code{dim}, they can also
be directly used with an \code{xgb.DMatrix} object.
}
\examples{
data(agaricus.train, package='xgboost')
train <- agaricus.train
dtrain <- xgb.DMatrix(train$data, label=train$label)
stopifnot(nrow(dtrain) == nrow(train$data))
stopifnot(ncol(dtrain) == ncol(train$data))
stopifnot(all(dim(dtrain) == dim(train$data)))
}

View File

@@ -1,36 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.DMatrix.R
\name{dimnames.xgb.DMatrix}
\alias{dimnames.xgb.DMatrix}
\alias{dimnames<-.xgb.DMatrix}
\title{Handling of column names of \code{xgb.DMatrix}}
\usage{
\method{dimnames}{xgb.DMatrix}(x)
\method{dimnames}{xgb.DMatrix}(x) <- value
}
\arguments{
\item{x}{object of class \code{xgb.DMatrix}}
\item{value}{a list of two elements: the first one is ignored
and the second one is column names}
}
\description{
Only column names are supported for \code{xgb.DMatrix}, thus setting of
row names would have no effect and returnten row names would be NULL.
}
\details{
Generic \code{dimnames} methods are used by \code{colnames}.
Since row names are irrelevant, it is recommended to use \code{colnames} directly.
}
\examples{
data(agaricus.train, package='xgboost')
train <- agaricus.train
dtrain <- xgb.DMatrix(train$data, label=train$label)
dimnames(dtrain)
colnames(dtrain)
colnames(dtrain) <- make.names(1:ncol(train$data))
print(dtrain, verbose=TRUE)
}

View File

@@ -1,43 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.DMatrix.R
\name{getinfo}
\alias{getinfo}
\alias{getinfo.xgb.DMatrix}
\title{Get information of an xgb.DMatrix object}
\usage{
getinfo(object, ...)
\method{getinfo}{xgb.DMatrix}(object, name, ...)
}
\arguments{
\item{object}{Object of class \code{xgb.DMatrix}}
\item{...}{other parameters}
\item{name}{the name of the information field to get (see details)}
}
\description{
Get information of an xgb.DMatrix object
}
\details{
The \code{name} field can be one of the following:
\itemize{
\item \code{label}: label Xgboost learn from ;
\item \code{weight}: to do a weight rescale ;
\item \code{base_margin}: base margin is the base prediction Xgboost will boost from ;
\item \code{nrow}: number of rows of the \code{xgb.DMatrix}.
}
}
\examples{
data(agaricus.train, package='xgboost')
train <- agaricus.train
dtrain <- xgb.DMatrix(train$data, label=train$label)
labels <- getinfo(dtrain, 'label')
setinfo(dtrain, 'label', 1-labels)
labels2 <- getinfo(dtrain, 'label')
stopifnot(all(labels2 == 1-labels))
}

View File

@@ -1,129 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.Booster.R
\name{predict.xgb.Booster}
\alias{predict.xgb.Booster}
\alias{predict.xgb.Booster.handle}
\title{Predict method for eXtreme Gradient Boosting model}
\usage{
\method{predict}{xgb.Booster}(object, newdata, missing = NA,
outputmargin = FALSE, ntreelimit = NULL, predleaf = FALSE,
reshape = FALSE, ...)
\method{predict}{xgb.Booster.handle}(object, ...)
}
\arguments{
\item{object}{Object of class \code{xgb.Booster} or \code{xgb.Booster.handle}}
\item{newdata}{takes \code{matrix}, \code{dgCMatrix}, local data file or \code{xgb.DMatrix}.}
\item{missing}{Missing is only used when input is dense matrix. Pick a float value that represents
missing values in data (e.g., sometimes 0 or some other extreme value is used).}
\item{outputmargin}{whether the prediction should be returned in the for of original untransformed
sum of predictions from boosting iterations' results. E.g., setting \code{outputmargin=TRUE} for
logistic regression would result in predictions for log-odds instead of probabilities.}
\item{ntreelimit}{limit the number of model's trees or boosting iterations used in prediction (see Details).
It will use all the trees by default (\code{NULL} value).}
\item{predleaf}{whether predict leaf index instead.}
\item{reshape}{whether to reshape the vector of predictions to a matrix form when there are several
prediction outputs per case. This option has no effect when \code{predleaf = TRUE}.}
\item{...}{Parameters passed to \code{predict.xgb.Booster}}
}
\value{
For regression or binary classification, it returns a vector of length \code{nrows(newdata)}.
For multiclass classification, either a \code{num_class * nrows(newdata)} vector or
a \code{(nrows(newdata), num_class)} dimension matrix is returned, depending on
the \code{reshape} value.
When \code{predleaf = TRUE}, the output is a matrix object with the
number of columns corresponding to the number of trees.
}
\description{
Predicted values based on either xgboost model or model handle object.
}
\details{
Note that \code{ntreelimit} is not necesserily equal to the number of boosting iterations
and it is not necesserily equal to the number of trees in a model.
E.g., in a random forest-like model, \code{ntreelimit} would limit the number of trees.
But for multiclass classification, there are multiple trees per iteration,
but \code{ntreelimit} limits the number of boosting iterations.
Also note that \code{ntreelimit} would currently do nothing for predictions from gblinear,
since gblinear doesn't keep its boosting history.
One possible practical applications of the \code{predleaf} option is to use the model
as a generator of new features which capture non-linearity and interactions,
e.g., as implemented in \code{\link{xgb.create.features}}.
}
\examples{
## binary classification:
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
train <- agaricus.train
test <- agaricus.test
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
# use all trees by default
pred <- predict(bst, test$data)
# use only the 1st tree
pred <- predict(bst, test$data, ntreelimit = 1)
## multiclass classification in iris dataset:
lb <- as.numeric(iris$Species) - 1
num_class <- 3
set.seed(11)
bst <- xgboost(data = as.matrix(iris[, -5]), label = lb,
max_depth = 4, eta = 0.5, nthread = 2, nrounds = 10, subsample = 0.5,
objective = "multi:softprob", num_class = num_class)
# predict for softmax returns num_class probability numbers per case:
pred <- predict(bst, as.matrix(iris[, -5]))
str(pred)
# reshape it to a num_class-columns matrix
pred <- matrix(pred, ncol=num_class, byrow=TRUE)
# convert the probabilities to softmax labels
pred_labels <- max.col(pred) - 1
# the following should result in the same error as seen in the last iteration
sum(pred_labels != lb)/length(lb)
# compare that to the predictions from softmax:
set.seed(11)
bst <- xgboost(data = as.matrix(iris[, -5]), label = lb,
max_depth = 4, eta = 0.5, nthread = 2, nrounds = 10, subsample = 0.5,
objective = "multi:softmax", num_class = num_class)
pred <- predict(bst, as.matrix(iris[, -5]))
str(pred)
all.equal(pred, pred_labels)
# prediction from using only 5 iterations should result
# in the same error as seen in iteration 5:
pred5 <- predict(bst, as.matrix(iris[, -5]), ntreelimit=5)
sum(pred5 != lb)/length(lb)
## random forest-like model of 25 trees for binary classification:
set.seed(11)
bst <- xgboost(data = train$data, label = train$label, max_depth = 5,
nthread = 2, nrounds = 1, objective = "binary:logistic",
num_parallel_tree = 25, subsample = 0.6, colsample_bytree = 0.1)
# Inspect the prediction error vs number of trees:
lb <- test$label
dtest <- xgb.DMatrix(test$data, label=lb)
err <- sapply(1:25, function(n) {
pred <- predict(bst, dtest, ntreelimit=n)
sum((pred > 0.5) != lb)/length(lb)
})
plot(err, type='l', ylim=c(0,0.1), xlab='#trees')
}
\seealso{
\code{\link{xgb.train}}.
}

View File

@@ -1,30 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.Booster.R
\name{print.xgb.Booster}
\alias{print.xgb.Booster}
\title{Print xgb.Booster}
\usage{
\method{print}{xgb.Booster}(x, verbose = FALSE, ...)
}
\arguments{
\item{x}{an xgb.Booster object}
\item{verbose}{whether to print detailed data (e.g., attribute values)}
\item{...}{not currently used}
}
\description{
Print information about xgb.Booster.
}
\examples{
data(agaricus.train, package='xgboost')
train <- agaricus.train
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
attr(bst, 'myattr') <- 'memo'
print(bst)
print(bst, verbose=TRUE)
}

View File

@@ -1,29 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.DMatrix.R
\name{print.xgb.DMatrix}
\alias{print.xgb.DMatrix}
\title{Print xgb.DMatrix}
\usage{
\method{print}{xgb.DMatrix}(x, verbose = FALSE, ...)
}
\arguments{
\item{x}{an xgb.DMatrix object}
\item{verbose}{whether to print colnames (when present)}
\item{...}{not currently used}
}
\description{
Print information about xgb.DMatrix.
Currently it displays dimensions and presence of info-fields and colnames.
}
\examples{
data(agaricus.train, package='xgboost')
train <- agaricus.train
dtrain <- xgb.DMatrix(train$data, label=train$label)
dtrain
print(dtrain, verbose=TRUE)
}

View File

@@ -1,32 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.cv.R
\name{print.xgb.cv.synchronous}
\alias{print.xgb.cv.synchronous}
\title{Print xgb.cv result}
\usage{
\method{print}{xgb.cv.synchronous}(x, verbose = FALSE, ...)
}
\arguments{
\item{x}{an \code{xgb.cv.synchronous} object}
\item{verbose}{whether to print detailed data}
\item{...}{passed to \code{data.table.print}}
}
\description{
Prints formatted results of \code{xgb.cv}.
}
\details{
When not verbose, it would only print the evaluation results,
including the best iteration (when available).
}
\examples{
data(agaricus.train, package='xgboost')
train <- agaricus.train
cv <- xgb.cv(data = train$data, label = train$label, nfold = 5, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
print(cv)
print(cv, verbose=TRUE)
}

View File

@@ -1,44 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.DMatrix.R
\name{setinfo}
\alias{setinfo}
\alias{setinfo.xgb.DMatrix}
\title{Set information of an xgb.DMatrix object}
\usage{
setinfo(object, ...)
\method{setinfo}{xgb.DMatrix}(object, name, info, ...)
}
\arguments{
\item{object}{Object of class "xgb.DMatrix"}
\item{...}{other parameters}
\item{name}{the name of the field to get}
\item{info}{the specific field of information to set}
}
\description{
Set information of an xgb.DMatrix object
}
\details{
The \code{name} field can be one of the following:
\itemize{
\item \code{label}: label Xgboost learn from ;
\item \code{weight}: to do a weight rescale ;
\item \code{base_margin}: base margin is the base prediction Xgboost will boost from ;
\item \code{group}.
}
}
\examples{
data(agaricus.train, package='xgboost')
train <- agaricus.train
dtrain <- xgb.DMatrix(train$data, label=train$label)
labels <- getinfo(dtrain, 'label')
setinfo(dtrain, 'label', 1-labels)
labels2 <- getinfo(dtrain, 'label')
stopifnot(all.equal(labels2, 1-labels))
}

View File

@@ -1,41 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.DMatrix.R
\name{slice}
\alias{[.xgb.DMatrix}
\alias{slice}
\alias{slice.xgb.DMatrix}
\title{Get a new DMatrix containing the specified rows of
orginal xgb.DMatrix object}
\usage{
slice(object, ...)
\method{slice}{xgb.DMatrix}(object, idxset, ...)
\method{[}{xgb.DMatrix}(object, idxset, colset = NULL)
}
\arguments{
\item{object}{Object of class "xgb.DMatrix"}
\item{...}{other parameters (currently not used)}
\item{idxset}{a integer vector of indices of rows needed}
\item{colset}{currently not used (columns subsetting is not available)}
}
\description{
Get a new DMatrix containing the specified rows of
orginal xgb.DMatrix object
}
\examples{
data(agaricus.train, package='xgboost')
train <- agaricus.train
dtrain <- xgb.DMatrix(train$data, label=train$label)
dsub <- slice(dtrain, 1:42)
labels1 <- getinfo(dsub, 'label')
dsub <- dtrain[1:42, ]
labels2 <- getinfo(dsub, 'label')
all.equal(labels1, labels2)
}

View File

@@ -1,30 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.DMatrix.R
\name{xgb.DMatrix}
\alias{xgb.DMatrix}
\title{Contruct xgb.DMatrix object}
\usage{
xgb.DMatrix(data, info = list(), missing = NA, ...)
}
\arguments{
\item{data}{a \code{matrix} object, a \code{dgCMatrix} object or a character representing a filename}
\item{info}{a list of information of the xgb.DMatrix object}
\item{missing}{Missing is only used when input is dense matrix, pick a float
value that represents missing value. Sometime a data use 0 or other extreme value to represents missing values.}
\item{...}{other information to pass to \code{info}.}
}
\description{
Contruct xgb.DMatrix object from dense matrix, sparse matrix
or local file (that was created previously by saving an \code{xgb.DMatrix}).
}
\examples{
data(agaricus.train, package='xgboost')
train <- agaricus.train
dtrain <- xgb.DMatrix(train$data, label=train$label)
xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data')
dtrain <- xgb.DMatrix('xgb.DMatrix.data')
}

View File

@@ -1,24 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.DMatrix.save.R
\name{xgb.DMatrix.save}
\alias{xgb.DMatrix.save}
\title{Save xgb.DMatrix object to binary file}
\usage{
xgb.DMatrix.save(dmatrix, fname)
}
\arguments{
\item{dmatrix}{the \code{xgb.DMatrix} object}
\item{fname}{the name of the file to write.}
}
\description{
Save xgb.DMatrix object to binary file
}
\examples{
data(agaricus.train, package='xgboost')
train <- agaricus.train
dtrain <- xgb.DMatrix(train$data, label=train$label)
xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data')
dtrain <- xgb.DMatrix('xgb.DMatrix.data')
}

View File

@@ -1,86 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.Booster.R
\name{xgb.attr}
\alias{xgb.attr}
\alias{xgb.attr<-}
\alias{xgb.attributes}
\alias{xgb.attributes<-}
\title{Accessors for serializable attributes of a model.}
\usage{
xgb.attr(object, name)
xgb.attr(object, name) <- value
xgb.attributes(object)
xgb.attributes(object) <- value
}
\arguments{
\item{object}{Object of class \code{xgb.Booster} or \code{xgb.Booster.handle}.}
\item{name}{a non-empty character string specifying which attribute is to be accessed.}
\item{value}{a value of an attribute for \code{xgb.attr<-}; for \code{xgb.attributes<-}
it's a list (or an object coercible to a list) with the names of attributes to set
and the elements corresponding to attribute values.
Non-character values are converted to character.
When attribute value is not a scalar, only the first index is used.
Use \code{NULL} to remove an attribute.}
}
\value{
\code{xgb.attr} returns either a string value of an attribute
or \code{NULL} if an attribute wasn't stored in a model.
\code{xgb.attributes} returns a list of all attribute stored in a model
or \code{NULL} if a model has no stored attributes.
}
\description{
These methods allow to manipulate the key-value attribute strings of an xgboost model.
}
\details{
The primary purpose of xgboost model attributes is to store some meta-data about the model.
Note that they are a separate concept from the object attributes in R.
Specifically, they refer to key-value strings that can be attached to an xgboost model,
stored together with the model's binary representation, and accessed later
(from R or any other interface).
In contrast, any R-attribute assigned to an R-object of \code{xgb.Booster} class
would not be saved by \code{xgb.save} because an xgboost model is an external memory object
and its serialization is handled extrnally.
Also, setting an attribute that has the same name as one of xgboost's parameters wouldn't
change the value of that parameter for a model.
Use \code{\link{xgb.parameters<-}} to set or change model parameters.
The attribute setters would usually work more efficiently for \code{xgb.Booster.handle}
than for \code{xgb.Booster}, since only just a handle (pointer) would need to be copied.
That would only matter if attributes need to be set many times.
Note, however, that when feeding a handle of an \code{xgb.Booster} object to the attribute setters,
the raw model cache of an \code{xgb.Booster} object would not be automatically updated,
and it would be user's responsibility to call \code{xgb.save.raw} to update it.
The \code{xgb.attributes<-} setter either updates the existing or adds one or several attributes,
but it doesn't delete the other existing attributes.
}
\examples{
data(agaricus.train, package='xgboost')
train <- agaricus.train
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
xgb.attr(bst, "my_attribute") <- "my attribute value"
print(xgb.attr(bst, "my_attribute"))
xgb.attributes(bst) <- list(a = 123, b = "abc")
xgb.save(bst, 'xgb.model')
bst1 <- xgb.load('xgb.model')
print(xgb.attr(bst1, "my_attribute"))
print(xgb.attributes(bst1))
# deletion:
xgb.attr(bst1, "my_attribute") <- NULL
print(xgb.attributes(bst1))
xgb.attributes(bst1) <- list(a = NULL, b = NULL)
print(xgb.attributes(bst1))
}

View File

@@ -1,90 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.create.features.R
\name{xgb.create.features}
\alias{xgb.create.features}
\title{Create new features from a previously learned model}
\usage{
xgb.create.features(model, data, ...)
}
\arguments{
\item{model}{decision tree boosting model learned on the original data}
\item{data}{original data (usually provided as a \code{dgCMatrix} matrix)}
\item{...}{currently not used}
}
\value{
\code{dgCMatrix} matrix including both the original data and the new features.
}
\description{
May improve the learning by adding new features to the training data based on the decision trees from a previously learned model.
}
\details{
This is the function inspired from the paragraph 3.1 of the paper:
\strong{Practical Lessons from Predicting Clicks on Ads at Facebook}
\emph{(Xinran He, Junfeng Pan, Ou Jin, Tianbing Xu, Bo Liu, Tao Xu, Yan, xin Shi, Antoine Atallah, Ralf Herbrich, Stuart Bowers,
Joaquin Quinonero Candela)}
International Workshop on Data Mining for Online Advertising (ADKDD) - August 24, 2014
\url{https://research.facebook.com/publications/758569837499391/practical-lessons-from-predicting-clicks-on-ads-at-facebook/}.
Extract explaining the method:
"We found that boosted decision trees are a powerful and very
convenient way to implement non-linear and tuple transformations
of the kind we just described. We treat each individual
tree as a categorical feature that takes as value the
index of the leaf an instance ends up falling in. We use
1-of-K coding of this type of features.
For example, consider the boosted tree model in Figure 1 with 2 subtrees,
where the first subtree has 3 leafs and the second 2 leafs. If an
instance ends up in leaf 2 in the first subtree and leaf 1 in
second subtree, the overall input to the linear classifier will
be the binary vector \code{[0, 1, 0, 1, 0]}, where the first 3 entries
correspond to the leaves of the first subtree and last 2 to
those of the second subtree.
[...]
We can understand boosted decision tree
based transformation as a supervised feature encoding that
converts a real-valued vector into a compact binary-valued
vector. A traversal from root node to a leaf node represents
a rule on certain features."
}
\examples{
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
dtrain <- xgb.DMatrix(data = agaricus.train$data, label = agaricus.train$label)
dtest <- xgb.DMatrix(data = agaricus.test$data, label = agaricus.test$label)
param <- list(max_depth=2, eta=1, silent=1, objective='binary:logistic')
nround = 4
bst = xgb.train(params = param, data = dtrain, nrounds = nround, nthread = 2)
# Model accuracy without new features
accuracy.before <- sum((predict(bst, agaricus.test$data) >= 0.5) == agaricus.test$label) / length(agaricus.test$label)
# Convert previous features to one hot encoding
new.features.train <- xgb.create.features(model = bst, agaricus.train$data)
new.features.test <- xgb.create.features(model = bst, agaricus.test$data)
# learning with new features
new.dtrain <- xgb.DMatrix(data = new.features.train, label = agaricus.train$label)
new.dtest <- xgb.DMatrix(data = new.features.test, label = agaricus.test$label)
watchlist <- list(train = new.dtrain)
bst <- xgb.train(params = param, data = new.dtrain, nrounds = nround, nthread = 2)
# Model accuracy with new features
accuracy.after <- sum((predict(bst, new.dtest) >= 0.5) == agaricus.test$label) / length(agaricus.test$label)
# Here the accuracy was already good and is now perfect.
cat(paste("The accuracy was", accuracy.before, "before adding leaf features and it is now", accuracy.after, "!\\n"))
}

View File

@@ -1,143 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.cv.R
\name{xgb.cv}
\alias{xgb.cv}
\title{Cross Validation}
\usage{
xgb.cv(params = list(), data, nrounds, nfold, label = NULL, missing = NA,
prediction = FALSE, showsd = TRUE, metrics = list(), obj = NULL,
feval = NULL, stratified = TRUE, folds = NULL, verbose = TRUE,
print_every_n = 1L, early_stopping_rounds = NULL, maximize = NULL,
callbacks = list(), ...)
}
\arguments{
\item{params}{the list of parameters. Commonly used ones are:
\itemize{
\item \code{objective} objective function, common ones are
\itemize{
\item \code{reg:linear} linear regression
\item \code{binary:logistic} logistic regression for classification
}
\item \code{eta} step size of each boosting step
\item \code{max_depth} maximum depth of the tree
\item \code{nthread} number of thread used in training, if not set, all threads are used
}
See \code{\link{xgb.train}} for further details.
See also demo/ for walkthrough example in R.}
\item{data}{takes an \code{xgb.DMatrix} or \code{Matrix} as the input.}
\item{nrounds}{the max number of iterations}
\item{nfold}{the original dataset is randomly partitioned into \code{nfold} equal size subsamples.}
\item{label}{vector of response values. Should be provided only when data is \code{DMatrix}.}
\item{missing}{is only used when input is a dense matrix. By default is set to NA, which means
that NA values should be considered as 'missing' by the algorithm.
Sometimes, 0 or other extreme value might be used to represent missing values.}
\item{prediction}{A logical value indicating whether to return the test fold predictions
from each CV model. This parameter engages the \code{\link{cb.cv.predict}} callback.}
\item{showsd}{\code{boolean}, whether to show standard deviation of cross validation}
\item{metrics, }{list of evaluation metrics to be used in cross validation,
when it is not specified, the evaluation metric is chosen according to objective function.
Possible options are:
\itemize{
\item \code{error} binary classification error rate
\item \code{rmse} Rooted mean square error
\item \code{logloss} negative log-likelihood function
\item \code{auc} Area under curve
\item \code{merror} Exact matching error, used to evaluate multi-class classification
}}
\item{obj}{customized objective function. Returns gradient and second order
gradient with given prediction and dtrain.}
\item{feval}{custimized evaluation function. Returns
\code{list(metric='metric-name', value='metric-value')} with given
prediction and dtrain.}
\item{stratified}{a \code{boolean} indicating whether sampling of folds should be stratified
by the values of outcome labels.}
\item{folds}{\code{list} provides a possibility to use a list of pre-defined CV folds
(each element must be a vector of test fold's indices). When folds are supplied,
the \code{nfold} and \code{stratified} parameters are ignored.}
\item{verbose}{\code{boolean}, print the statistics during the process}
\item{print_every_n}{Print each n-th iteration evaluation messages when \code{verbose>0}.
Default is 1 which means all messages are printed. This parameter is passed to the
\code{\link{cb.print.evaluation}} callback.}
\item{early_stopping_rounds}{If \code{NULL}, the early stopping function is not triggered.
If set to an integer \code{k}, training with a validation set will stop if the performance
doesn't improve for \code{k} rounds.
Setting this parameter engages the \code{\link{cb.early.stop}} callback.}
\item{maximize}{If \code{feval} and \code{early_stopping_rounds} are set,
then this parameter must be set as well.
When it is \code{TRUE}, it means the larger the evaluation score the better.
This parameter is passed to the \code{\link{cb.early.stop}} callback.}
\item{callbacks}{a list of callback functions to perform various task during boosting.
See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the
parameters' values. User can provide either existing or their own callback methods in order
to customize the training process.}
\item{...}{other parameters to pass to \code{params}.}
}
\value{
An object of class \code{xgb.cv.synchronous} with the following elements:
\itemize{
\item \code{call} a function call.
\item \code{params} parameters that were passed to the xgboost library. Note that it does not
capture parameters changed by the \code{\link{cb.reset.parameters}} callback.
\item \code{callbacks} callback functions that were either automatically assigned or
explicitely passed.
\item \code{evaluation_log} evaluation history storead as a \code{data.table} with the
first column corresponding to iteration number and the rest corresponding to the
CV-based evaluation means and standard deviations for the training and test CV-sets.
It is created by the \code{\link{cb.evaluation.log}} callback.
\item \code{niter} number of boosting iterations.
\item \code{folds} the list of CV folds' indices - either those passed through the \code{folds}
parameter or randomly generated.
\item \code{best_iteration} iteration number with the best evaluation metric value
(only available with early stopping).
\item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration,
which could further be used in \code{predict} method
(only available with early stopping).
\item \code{pred} CV prediction values available when \code{prediction} is set.
It is either vector or matrix (see \code{\link{cb.cv.predict}}).
\item \code{models} a liost of the CV folds' models. It is only available with the explicit
setting of the \code{cb.cv.predict(save_models = TRUE)} callback.
}
}
\description{
The cross valudation function of xgboost
}
\details{
The original sample is randomly partitioned into \code{nfold} equal size subsamples.
Of the \code{nfold} subsamples, a single subsample is retained as the validation data for testing the model, and the remaining \code{nfold - 1} subsamples are used as training data.
The cross-validation process is then repeated \code{nrounds} times, with each of the \code{nfold} subsamples used exactly once as the validation data.
All observations are used for both training and validation.
Adapted from \url{http://en.wikipedia.org/wiki/Cross-validation_\%28statistics\%29#k-fold_cross-validation}
}
\examples{
data(agaricus.train, package='xgboost')
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
cv <- xgb.cv(data = dtrain, nrounds = 3, nthread = 2, nfold = 5, metrics = list("rmse","auc"),
max_depth = 3, eta = 1, objective = "binary:logistic")
print(cv)
print(cv, verbose=TRUE)
}

View File

@@ -1,47 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.dump.R
\name{xgb.dump}
\alias{xgb.dump}
\title{Save xgboost model to text file}
\usage{
xgb.dump(model = NULL, fname = NULL, fmap = "", with_stats = FALSE, ...)
}
\arguments{
\item{model}{the model object.}
\item{fname}{the name of the text file where to save the model text dump. If not provided or set to \code{NULL} the function will return the model as a \code{character} vector.}
\item{fmap}{feature map file representing the type of feature.
Detailed description could be found at
\url{https://github.com/dmlc/xgboost/wiki/Binary-Classification#dump-model}.
See demo/ for walkthrough example in R, and
\url{https://github.com/dmlc/xgboost/blob/master/demo/data/featmap.txt}
for example Format.}
\item{with_stats}{whether dump statistics of splits
When this option is on, the model dump comes with two additional statistics:
gain is the approximate loss function gain we get in each split;
cover is the sum of second order gradient in each node.}
\item{...}{currently not used}
}
\value{
if fname is not provided or set to \code{NULL} the function will return the model as a \code{character} vector. Otherwise it will return \code{TRUE}.
}
\description{
Save a xgboost model to text file. Could be parsed later.
}
\examples{
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
train <- agaricus.train
test <- agaricus.test
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
# save the model in file 'xgb.model.dump'
xgb.dump(bst, 'xgb.model.dump', with_stats = TRUE)
# print the model without saving it to a file
print(xgb.dump(bst))
}

View File

@@ -1,64 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.importance.R
\name{xgb.importance}
\alias{xgb.importance}
\title{Show importance of features in a model}
\usage{
xgb.importance(feature_names = NULL, model = NULL, data = NULL,
label = NULL, target = function(x) ((x + label) == 2))
}
\arguments{
\item{feature_names}{names of each feature as a \code{character} vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}.}
\item{model}{generated by the \code{xgb.train} function.}
\item{data}{the dataset used for the training step. Will be used with \code{label} parameter for co-occurence computation. More information in \code{Detail} part. This parameter is optional.}
\item{label}{the label vetor used for the training step. Will be used with \code{data} parameter for co-occurence computation. More information in \code{Detail} part. This parameter is optional.}
\item{target}{a function which returns \code{TRUE} or \code{1} when an observation should be count as a co-occurence and \code{FALSE} or \code{0} otherwise. Default function is provided for computing co-occurences in a binary classification. The \code{target} function should have only one parameter. This parameter will be used to provide each important feature vector after having applied the split condition, therefore these vector will be only made of 0 and 1 only, whatever was the information before. More information in \code{Detail} part. This parameter is optional.}
}
\value{
A \code{data.table} of the features used in the model with their average gain (and their weight for boosted tree model) in the model.
}
\description{
Create a \code{data.table} of the most important features of a model.
}
\details{
This function is for both linear and tree models.
\code{data.table} is returned by the function.
The columns are :
\itemize{
\item \code{Features} name of the features as provided in \code{feature_names} or already present in the model dump;
\item \code{Gain} contribution of each feature to the model. For boosted tree model, each gain of each feature of each tree is taken into account, then average per feature to give a vision of the entire model. Highest percentage means important feature to predict the \code{label} used for the training (only available for tree models);
\item \code{Cover} metric of the number of observation related to this feature (only available for tree models);
\item \code{Weight} percentage representing the relative number of times a feature have been taken into trees.
}
If you don't provide \code{feature_names}, index of the features will be used instead.
Because the index is extracted from the model dump (made on the C++ side), it starts at 0 (usual in C++) instead of 1 (usual in R).
Co-occurence count
------------------
The gain gives you indication about the information of how a feature is important in making a branch of a decision tree more pure. However, with this information only, you can't know if this feature has to be present or not to get a specific classification. In the example code, you may wonder if odor=none should be \code{TRUE} to not eat a mushroom.
Co-occurence computation is here to help in understanding this relation between a predictor and a specific class. It will count how many observations are returned as \code{TRUE} by the \code{target} function (see parameters). When you execute the example below, there are 92 times only over the 3140 observations of the train dataset where a mushroom have no odor and can be eaten safely.
If you need to remember one thing only: until you want to leave us early, don't eat a mushroom which has no odor :-)
}
\examples{
data(agaricus.train, package='xgboost')
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
xgb.importance(colnames(agaricus.train$data), model = bst)
# Same thing with co-occurence computation this time
xgb.importance(colnames(agaricus.train$data), model = bst, data = agaricus.train$data, label = agaricus.train$label)
}

View File

@@ -1,26 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.load.R
\name{xgb.load}
\alias{xgb.load}
\title{Load xgboost model from binary file}
\usage{
xgb.load(modelfile)
}
\arguments{
\item{modelfile}{the name of the binary file.}
}
\description{
Load xgboost model from the binary model file
}
\examples{
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
train <- agaricus.train
test <- agaricus.test
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
xgb.save(bst, 'xgb.model')
bst <- xgb.load('xgb.model')
pred <- predict(bst, test$data)
}

View File

@@ -1,61 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.model.dt.tree.R
\name{xgb.model.dt.tree}
\alias{xgb.model.dt.tree}
\title{Parse a boosted tree model text dump}
\usage{
xgb.model.dt.tree(feature_names = NULL, model = NULL, text = NULL,
n_first_tree = NULL)
}
\arguments{
\item{feature_names}{character vector of feature names. If the model already
contains feature names, this argument should be \code{NULL} (default value)}
\item{model}{object of class \code{xgb.Booster}}
\item{text}{\code{character} vector previously generated by the \code{xgb.dump}
function (where parameter \code{with_stats = TRUE} should have been set).}
\item{n_first_tree}{limit the parsing to the \code{n} first trees.
If set to \code{NULL}, all trees of the model are parsed.}
}
\value{
A \code{data.table} with detailed information about model trees' nodes.
The columns of the \code{data.table} are:
\itemize{
\item \code{Tree}: ID of a tree in a model
\item \code{Node}: ID of a node in a tree
\item \code{ID}: unique identifier of a node in a model
\item \code{Feature}: for a branch node, it's a feature id or name (when available);
for a leaf note, it simply labels it as \code{'Leaf'}
\item \code{Split}: location of the split for a branch node (split condition is always "less than")
\item \code{Yes}: ID of the next node when the split condition is met
\item \code{No}: ID of the next node when the split condition is not met
\item \code{Missing}: ID of the next node when branch value is missing
\item \code{Quality}: either the split gain (change in loss) or the leaf value
\item \code{Cover}: metric related to the number of observation either seen by a split
or collected by a leaf during training.
}
}
\description{
Parse a boosted tree model text dump into a \code{data.table} structure.
}
\examples{
# Basic use:
data(agaricus.train, package='xgboost')
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
(dt <- xgb.model.dt.tree(colnames(agaricus.train$data), bst))
# How to match feature names of splits that are following a current 'Yes' branch:
merge(dt, dt[, .(ID, Y.Feature=Feature)], by.x='Yes', by.y='ID', all.x=TRUE)[order(Tree,Node)]
}

View File

@@ -1,32 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.Booster.R
\name{xgb.parameters<-}
\alias{xgb.parameters<-}
\title{Accessors for model parameters.}
\usage{
xgb.parameters(object) <- value
}
\arguments{
\item{object}{Object of class \code{xgb.Booster} or \code{xgb.Booster.handle}.}
\item{value}{a list (or an object coercible to a list) with the names of parameters to set
and the elements corresponding to parameter values.}
}
\description{
Only the setter for xgboost parameters is currently implemented.
}
\details{
Note that the setter would usually work more efficiently for \code{xgb.Booster.handle}
than for \code{xgb.Booster}, since only just a handle would need to be copied.
}
\examples{
data(agaricus.train, package='xgboost')
train <- agaricus.train
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
xgb.parameters(bst) <- list(eta = 0.1)
}

View File

@@ -1,74 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.ggplot.R, R/xgb.plot.deepness.R
\name{xgb.ggplot.deepness}
\alias{xgb.ggplot.deepness}
\alias{xgb.plot.deepness}
\title{Plot model trees deepness}
\usage{
xgb.ggplot.deepness(model = NULL, which = c("2x1", "max.depth", "med.depth",
"med.weight"))
xgb.plot.deepness(model = NULL, which = c("2x1", "max.depth", "med.depth",
"med.weight"), plot = TRUE, ...)
}
\arguments{
\item{model}{either an \code{xgb.Booster} model generated by the \code{xgb.train} function
or a data.table result of the \code{xgb.model.dt.tree} function.}
\item{which}{which distribution to plot (see details).}
\item{plot}{(base R barplot) whether a barplot should be produced.
If FALSE, only a data.table is returned.}
\item{...}{other parameters passed to \code{barplot} or \code{plot}.}
}
\value{
Other than producing plots (when \code{plot=TRUE}), the \code{xgb.plot.deepness} function
silently returns a processed data.table where each row corresponds to a terminal leaf in a tree model,
and contains information about leaf's depth, cover, and weight (which is used in calculating predictions).
The \code{xgb.ggplot.deepness} silently returns either a list of two ggplot graphs when \code{which="2x1"}
or a single ggplot graph for the other \code{which} options.
}
\description{
Visualizes distributions related to depth of tree leafs.
\code{xgb.plot.deepness} uses base R graphics, while \code{xgb.ggplot.deepness} uses the ggplot backend.
}
\details{
When \code{which="2x1"}, two distributions with respect to the leaf depth
are plotted on top of each other:
\itemize{
\item the distribution of the number of leafs in a tree model at a certain depth;
\item the distribution of average weighted number of observations ("cover")
ending up in leafs at certain depth.
}
Those could be helpful in determining sensible ranges of the \code{max_depth}
and \code{min_child_weight} parameters.
When \code{which="max.depth"} or \code{which="med.depth"}, plots of either maximum or median depth
per tree with respect to tree number are created. And \code{which="med.weight"} allows to see how
a tree's median absolute leaf weight changes through the iterations.
This function was inspired by the blog post
\url{http://aysent.github.io/2015/11/08/random-forest-leaf-visualization.html}.
}
\examples{
data(agaricus.train, package='xgboost')
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 15,
eta = 0.1, nthread = 2, nrounds = 50, objective = "binary:logistic",
subsample = 0.5, min_child_weight = 2)
xgb.plot.deepness(bst)
xgb.ggplot.deepness(bst)
xgb.plot.deepness(bst, which='max.depth', pch=16, col=rgb(0,0,1,0.3), cex=2)
xgb.plot.deepness(bst, which='med.weight', pch=16, col=rgb(0,0,1,0.3), cex=2)
}
\seealso{
\code{\link{xgb.train}}, \code{\link{xgb.model.dt.tree}}.
}

View File

@@ -1,82 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.ggplot.R, R/xgb.plot.importance.R
\name{xgb.ggplot.importance}
\alias{xgb.ggplot.importance}
\alias{xgb.plot.importance}
\title{Plot feature importance as a bar graph}
\usage{
xgb.ggplot.importance(importance_matrix = NULL, top_n = NULL,
measure = NULL, rel_to_first = FALSE, n_clusters = c(1:10), ...)
xgb.plot.importance(importance_matrix = NULL, top_n = NULL,
measure = NULL, rel_to_first = FALSE, left_margin = 10, cex = NULL,
plot = TRUE, ...)
}
\arguments{
\item{importance_matrix}{a \code{data.table} returned by \code{\link{xgb.importance}}.}
\item{top_n}{maximal number of top features to include into the plot.}
\item{measure}{the name of importance measure to plot.
When \code{NULL}, 'Gain' would be used for trees and 'Weight' would be used for gblinear.}
\item{rel_to_first}{whether importance values should be represented as relative to the highest ranked feature.
See Details.}
\item{n_clusters}{(ggplot only) a \code{numeric} vector containing the min and the max range
of the possible number of clusters of bars.}
\item{...}{other parameters passed to \code{barplot} (except horiz, border, cex.names, names.arg, and las).}
\item{left_margin}{(base R barplot) allows to adjust the left margin size to fit feature names.
When it is NULL, the existing \code{par('mar')} is used.}
\item{cex}{(base R barplot) passed as \code{cex.names} parameter to \code{barplot}.}
\item{plot}{(base R barplot) whether a barplot should be produced.
If FALSE, only a data.table is returned.}
}
\value{
The \code{xgb.plot.importance} function creates a \code{barplot} (when \code{plot=TRUE})
and silently returns a processed data.table with \code{n_top} features sorted by importance.
The \code{xgb.ggplot.importance} function returns a ggplot graph which could be customized afterwards.
E.g., to change the title of the graph, add \code{+ ggtitle("A GRAPH NAME")} to the result.
}
\description{
Represents previously calculated feature importance as a bar graph.
\code{xgb.plot.importance} uses base R graphics, while \code{xgb.ggplot.importance} uses the ggplot backend.
}
\details{
The graph represents each feature as a horizontal bar of length proportional to the importance of a feature.
Features are shown ranked in a decreasing importance order.
It works for importances from both \code{gblinear} and \code{gbtree} models.
When \code{rel_to_first = FALSE}, the values would be plotted as they were in \code{importance_matrix}.
For gbtree model, that would mean being normalized to the total of 1
("what is feature's importance contribution relative to the whole model?").
For linear models, \code{rel_to_first = FALSE} would show actual values of the coefficients.
Setting \code{rel_to_first = TRUE} allows to see the picture from the perspective of
"what is feature's importance contribution relative to the most important feature?"
The ggplot-backend method also performs 1-D custering of the importance values,
with bar colors coresponding to different clusters that have somewhat similar importance values.
}
\examples{
data(agaricus.train)
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 3,
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
importance_matrix <- xgb.importance(colnames(agaricus.train$data), model = bst)
xgb.plot.importance(importance_matrix, rel_to_first = TRUE, xlab = "Relative importance")
(gg <- xgb.ggplot.importance(importance_matrix, measure = "Frequency", rel_to_first = TRUE))
gg + ggplot2::ylab("Frequency")
}
\seealso{
\code{\link[graphics]{barplot}}.
}

View File

@@ -1,60 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.plot.multi.trees.R
\name{xgb.plot.multi.trees}
\alias{xgb.plot.multi.trees}
\title{Project all trees on one tree and plot it}
\usage{
xgb.plot.multi.trees(model, feature_names = NULL, features_keep = 5,
plot_width = NULL, plot_height = NULL, ...)
}
\arguments{
\item{model}{dump generated by the \code{xgb.train} function.}
\item{feature_names}{names of each feature as a \code{character} vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}.}
\item{features_keep}{number of features to keep in each position of the multi trees.}
\item{plot_width}{width in pixels of the graph to produce}
\item{plot_height}{height in pixels of the graph to produce}
\item{...}{currently not used}
}
\value{
Two graphs showing the distribution of the model deepness.
}
\description{
Visualization of the ensemble of trees as a single collective unit.
}
\details{
This function tries to capture the complexity of gradient boosted tree ensemble
in a cohesive way.
The goal is to improve the interpretability of the model generally seen as black box.
The function is dedicated to boosting applied to decision trees only.
The purpose is to move from an ensemble of trees to a single tree only.
It takes advantage of the fact that the shape of a binary tree is only defined by
its deepness (therefore in a boosting model, all trees have the same shape).
Moreover, the trees tend to reuse the same features.
The function will project each tree on one, and keep for each position the
\code{features_keep} first features (based on Gain per feature measure).
This function is inspired by this blog post:
\url{https://wellecks.wordpress.com/2015/02/21/peering-into-the-black-box-visualizing-lambdamart/}
}
\examples{
data(agaricus.train, package='xgboost')
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 15,
eta = 1, nthread = 2, nrounds = 30, objective = "binary:logistic",
min_child_weight = 50)
p <- xgb.plot.multi.trees(model = bst, feature_names = colnames(agaricus.train$data), features_keep = 3)
print(p)
}

View File

@@ -1,49 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.plot.tree.R
\name{xgb.plot.tree}
\alias{xgb.plot.tree}
\title{Plot a boosted tree model}
\usage{
xgb.plot.tree(feature_names = NULL, model = NULL, n_first_tree = NULL,
plot_width = NULL, plot_height = NULL, ...)
}
\arguments{
\item{feature_names}{names of each feature as a \code{character} vector. Can be extracted from a sparse matrix (see example). If model dump already contains feature names, this argument should be \code{NULL}.}
\item{model}{generated by the \code{xgb.train} function. Avoid the creation of a dump file.}
\item{n_first_tree}{limit the plot to the n first trees. If \code{NULL}, all trees of the model are plotted. Performance can be low for huge models.}
\item{plot_width}{the width of the diagram in pixels.}
\item{plot_height}{the height of the diagram in pixels.}
\item{...}{currently not used.}
}
\value{
A \code{DiagrammeR} of the model.
}
\description{
Read a tree model text dump and plot the model.
}
\details{
The content of each node is organised that way:
\itemize{
\item \code{feature} value;
\item \code{cover}: the sum of second order gradient of training data classified to the leaf, if it is square loss, this simply corresponds to the number of instances in that branch. Deeper in the tree a node is, lower this metric will be;
\item \code{gain}: metric the importance of the node in the model.
}
The function uses \href{http://www.graphviz.org/}{GraphViz} library for that purpose.
}
\examples{
data(agaricus.train, package='xgboost')
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
xgb.plot.tree(feature_names = colnames(agaricus.train$data), model = bst)
}

View File

@@ -1,28 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.save.R
\name{xgb.save}
\alias{xgb.save}
\title{Save xgboost model to binary file}
\usage{
xgb.save(model, fname)
}
\arguments{
\item{model}{the model object.}
\item{fname}{the name of the file to write.}
}
\description{
Save xgboost model from xgboost or xgb.train
}
\examples{
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
train <- agaricus.train
test <- agaricus.test
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
xgb.save(bst, 'xgb.model')
bst <- xgb.load('xgb.model')
pred <- predict(bst, test$data)
}

View File

@@ -1,28 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.save.raw.R
\name{xgb.save.raw}
\alias{xgb.save.raw}
\title{Save xgboost model to R's raw vector,
user can call xgb.load to load the model back from raw vector}
\usage{
xgb.save.raw(model)
}
\arguments{
\item{model}{the model object.}
}
\description{
Save xgboost model from xgboost or xgb.train
}
\examples{
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
train <- agaricus.train
test <- agaricus.test
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
raw <- xgb.save.raw(bst)
bst <- xgb.load(raw)
pred <- predict(bst, test$data)
}

View File

@@ -1,245 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.train.R, R/xgboost.R
\name{xgb.train}
\alias{xgb.train}
\alias{xgboost}
\title{eXtreme Gradient Boosting Training}
\usage{
xgb.train(params = list(), data, nrounds, watchlist = list(), obj = NULL,
feval = NULL, verbose = 1, print_every_n = 1L,
early_stopping_rounds = NULL, maximize = NULL, save_period = NULL,
save_name = "xgboost.model", xgb_model = NULL, callbacks = list(), ...)
xgboost(data = NULL, label = NULL, missing = NA, weight = NULL,
params = list(), nrounds, verbose = 1, print_every_n = 1L,
early_stopping_rounds = NULL, maximize = NULL, save_period = 0,
save_name = "xgboost.model", xgb_model = NULL, callbacks = list(), ...)
}
\arguments{
\item{params}{the list of parameters.
The complete list of parameters is available at \url{http://xgboost.readthedocs.io/en/latest/parameter.html}.
Below is a shorter summary:
1. General Parameters
\itemize{
\item \code{booster} which booster to use, can be \code{gbtree} or \code{gblinear}. Default: \code{gbtree}
\item \code{silent} 0 means printing running messages, 1 means silent mode. Default: 0
}
2. Booster Parameters
2.1. Parameter for Tree Booster
\itemize{
\item \code{eta} control the learning rate: scale the contribution of each tree by a factor of \code{0 < eta < 1} when it is added to the current approximation. Used to prevent overfitting by making the boosting process more conservative. Lower value for \code{eta} implies larger value for \code{nrounds}: low \code{eta} value means model more robust to overfitting but slower to compute. Default: 0.3
\item \code{gamma} minimum loss reduction required to make a further partition on a leaf node of the tree. the larger, the more conservative the algorithm will be.
\item \code{max_depth} maximum depth of a tree. Default: 6
\item \code{min_child_weight} minimum sum of instance weight (hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, then the building process will give up further partitioning. In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node. The larger, the more conservative the algorithm will be. Default: 1
\item \code{subsample} subsample ratio of the training instance. Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees and this will prevent overfitting. It makes computation shorter (because less data to analyse). It is advised to use this parameter with \code{eta} and increase \code{nround}. Default: 1
\item \code{colsample_bytree} subsample ratio of columns when constructing each tree. Default: 1
\item \code{num_parallel_tree} Experimental parameter. number of trees to grow per round. Useful to test Random Forest through Xgboost (set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly. Default: 1
}
2.2. Parameter for Linear Booster
\itemize{
\item \code{lambda} L2 regularization term on weights. Default: 0
\item \code{lambda_bias} L2 regularization term on bias. Default: 0
\item \code{alpha} L1 regularization term on weights. (there is no L1 reg on bias because it is not important). Default: 0
}
3. Task Parameters
\itemize{
\item \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it. The default objective options are below:
\itemize{
\item \code{reg:linear} linear regression (Default).
\item \code{reg:logistic} logistic regression.
\item \code{binary:logistic} logistic regression for binary classification. Output probability.
\item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation.
\item \code{num_class} set the number of classes. To use only with multiclass objectives.
\item \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective. Class is represented by a number and should be from 0 to \code{num_class - 1}.
\item \code{multi:softprob} same as softmax, but prediction outputs a vector of ndata * nclass elements, which can be further reshaped to ndata, nclass matrix. The result contains predicted probabilities of each data point belonging to each class.
\item \code{rank:pairwise} set xgboost to do ranking task by minimizing the pairwise loss.
}
\item \code{base_score} the initial prediction score of all instances, global bias. Default: 0.5
\item \code{eval_metric} evaluation metrics for validation data. Users can pass a self-defined function to it. Default: metric will be assigned according to objective(rmse for regression, and error for classification, mean average precision for ranking). List is provided in detail section.
}}
\item{data}{input dataset. \code{xgb.train} takes only an \code{xgb.DMatrix} as the input.
\code{xgboost}, in addition, also accepts \code{matrix}, \code{dgCMatrix}, or local data file.}
\item{nrounds}{the max number of iterations}
\item{watchlist}{what information should be printed when \code{verbose=1} or
\code{verbose=2}. Watchlist is used to specify validation set monitoring
during training. For example user can specify
watchlist=list(validation1=mat1, validation2=mat2) to watch
the performance of each round's model on mat1 and mat2}
\item{obj}{customized objective function. Returns gradient and second order
gradient with given prediction and dtrain.}
\item{feval}{custimized evaluation function. Returns
\code{list(metric='metric-name', value='metric-value')} with given
prediction and dtrain.}
\item{verbose}{If 0, xgboost will stay silent. If 1, xgboost will print
information of performance. If 2, xgboost will print some additional information.
Setting \code{verbose > 0} automatically engages the \code{\link{cb.evaluation.log}} and
\code{\link{cb.print.evaluation}} callback functions.}
\item{print_every_n}{Print each n-th iteration evaluation messages when \code{verbose>0}.
Default is 1 which means all messages are printed. This parameter is passed to the
\code{\link{cb.print.evaluation}} callback.}
\item{early_stopping_rounds}{If \code{NULL}, the early stopping function is not triggered.
If set to an integer \code{k}, training with a validation set will stop if the performance
doesn't improve for \code{k} rounds.
Setting this parameter engages the \code{\link{cb.early.stop}} callback.}
\item{maximize}{If \code{feval} and \code{early_stopping_rounds} are set,
then this parameter must be set as well.
When it is \code{TRUE}, it means the larger the evaluation score the better.
This parameter is passed to the \code{\link{cb.early.stop}} callback.}
\item{save_period}{when it is non-NULL, model is saved to disk after every \code{save_period} rounds,
0 means save at the end. The saving is handled by the \code{\link{cb.save.model}} callback.}
\item{save_name}{the name or path for periodically saved model file.}
\item{xgb_model}{a previously built model to continue the trainig from.
Could be either an object of class \code{xgb.Booster}, or its raw data, or the name of a
file with a previously saved model.}
\item{callbacks}{a list of callback functions to perform various task during boosting.
See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the
parameters' values. User can provide either existing or their own callback methods in order
to customize the training process.}
\item{...}{other parameters to pass to \code{params}.}
\item{label}{vector of response values. Should not be provided when data is
a local data file name or an \code{xgb.DMatrix}.}
\item{missing}{by default is set to NA, which means that NA values should be considered as 'missing'
by the algorithm. Sometimes, 0 or other extreme value might be used to represent missing values.
This parameter is only used when input is a dense matrix.}
\item{weight}{a vector indicating the weight for each row of the input.}
}
\value{
An object of class \code{xgb.Booster} with the following elements:
\itemize{
\item \code{handle} a handle (pointer) to the xgboost model in memory.
\item \code{raw} a cached memory dump of the xgboost model saved as R's \code{raw} type.
\item \code{niter} number of boosting iterations.
\item \code{evaluation_log} evaluation history storead as a \code{data.table} with the
first column corresponding to iteration number and the rest corresponding to evaluation
metrics' values. It is created by the \code{\link{cb.evaluation.log}} callback.
\item \code{call} a function call.
\item \code{params} parameters that were passed to the xgboost library. Note that it does not
capture parameters changed by the \code{\link{cb.reset.parameters}} callback.
\item \code{callbacks} callback functions that were either automatically assigned or
explicitely passed.
\item \code{best_iteration} iteration number with the best evaluation metric value
(only available with early stopping).
\item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration,
which could further be used in \code{predict} method
(only available with early stopping).
\item \code{best_score} the best evaluation metric value during early stopping.
(only available with early stopping).
}
}
\description{
\code{xgb.train} is an advanced interface for training an xgboost model. The \code{xgboost} function provides a simpler interface.
}
\details{
These are the training functions for \code{xgboost}.
The \code{xgb.train} interface supports advanced features such as \code{watchlist},
customized objective and evaluation metric functions, therefore it is more flexible
than the \code{\link{xgboost}} interface.
Parallelization is automatically enabled if \code{OpenMP} is present.
Number of threads can also be manually specified via \code{nthread} parameter.
The evaluation metric is chosen automatically by Xgboost (according to the objective)
when the \code{eval_metric} parameter is not provided.
User may set one or several \code{eval_metric} parameters.
Note that when using a customized metric, only this single metric can be used.
The folloiwing is the list of built-in metrics for which Xgboost provides optimized implementation:
\itemize{
\item \code{rmse} root mean square error. \url{http://en.wikipedia.org/wiki/Root_mean_square_error}
\item \code{logloss} negative log-likelihood. \url{http://en.wikipedia.org/wiki/Log-likelihood}
\item \code{mlogloss} multiclass logloss. \url{https://www.kaggle.com/wiki/MultiClassLogLoss}
\item \code{error} Binary classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}.
By default, it uses the 0.5 threshold for predicted values to define negative and positive instances.
Different threshold (e.g., 0.) could be specified as "error@0."
\item \code{merror} Multiclass classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}.
\item \code{auc} Area under the curve. \url{http://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation.
\item \code{ndcg} Normalized Discounted Cumulative Gain (for ranking task). \url{http://en.wikipedia.org/wiki/NDCG}
}
The following callbacks are automatically created when certain parameters are set:
\itemize{
\item \code{cb.print.evaluation} is turned on when \code{verbose > 0};
and the \code{print_every_n} parameter is passed to it.
\item \code{cb.evaluation.log} is on when \code{verbose > 0} and \code{watchlist} is present.
\item \code{cb.early.stop}: when \code{early_stopping_rounds} is set.
\item \code{cb.save.model}: when \code{save_period > 0} is set.
}
}
\examples{
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
watchlist <- list(eval = dtest, train = dtrain)
## A simple xgb.train example:
param <- list(max_depth = 2, eta = 1, silent = 1,
objective = "binary:logistic", eval_metric = "auc")
bst <- xgb.train(param, dtrain, nthread = 2, nrounds = 2, watchlist)
## An xgb.train example where custom objective and evaluation metric are used:
logregobj <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
preds <- 1/(1 + exp(-preds))
grad <- preds - labels
hess <- preds * (1 - preds)
return(list(grad = grad, hess = hess))
}
evalerror <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
err <- as.numeric(sum(labels != (preds > 0)))/length(labels)
return(list(metric = "error", value = err))
}
bst <- xgb.train(param, dtrain, nthread = 2, nrounds = 2, watchlist)
## An xgb.train example of using variable learning rates at each iteration:
my_etas <- list(eta = c(0.5, 0.1))
bst <- xgb.train(param, dtrain, nthread = 2, nrounds = 2, watchlist,
callbacks = list(cb.reset.parameters(my_etas)))
## Explicit use of the cb.evaluation.log callback allows to run
## xgb.train silently but still store the evaluation results:
bst <- xgb.train(param, dtrain, nthread = 2, nrounds = 2, watchlist,
verbose = 0, callbacks = list(cb.evaluation.log()))
print(bst$evaluation_log)
## An 'xgboost' interface example:
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label,
max_depth = 2, eta = 1, nthread = 2, nrounds = 2,
objective = "binary:logistic")
pred <- predict(bst, agaricus.test$data)
}
\seealso{
\code{\link{callbacks}},
\code{\link{predict.xgb.Booster}},
\code{\link{xgb.cv}}
}

View File

@@ -1,17 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{xgboost-deprecated}
\alias{xgboost-deprecated}
\title{Deprecation notices.}
\description{
At this time, some of the parameter names were changed in order to make the code style more uniform.
The deprecated parameters would be removed in the next release.
}
\details{
To see all the current deprecated and new parameters, check the \code{xgboost:::depr_par_lut} table.
A deprecation warning is shown when any of the deprecated parameters is used in a call.
An additional warning is shown when there was a partial match to a deprecated parameter
(as R is able to partially match parameter names).
}

View File

@@ -1,18 +0,0 @@
# package root
PKGROOT=../../
ENABLE_STD_THREAD=1
# _*_ mode: Makefile; _*_
CXX_STD = CXX11
XGB_RFLAGS = -DXGBOOST_STRICT_R_MODE=1 -DDMLC_LOG_BEFORE_THROW=0\
-DDMLC_ENABLE_STD_THREAD=$(ENABLE_STD_THREAD) -DDMLC_DISABLE_STDIN=1\
-DDMLC_LOG_CUSTOMIZE=1 -DXGBOOST_CUSTOMIZE_LOGGER=1\
-DRABIT_CUSTOMIZE_MSG_ -DRABIT_STRICT_CXX98_
PKG_CPPFLAGS= -I$(PKGROOT)/include -I$(PKGROOT)/dmlc-core/include -I$(PKGROOT)/rabit/include -I$(PKGROOT) $(XGB_RFLAGS)
PKG_CXXFLAGS= $(SHLIB_OPENMP_CFLAGS) $(SHLIB_PTHREAD_FLAGS)
PKG_LIBS = $(SHLIB_OPENMP_CFLAGS) $(SHLIB_PTHREAD_FLAGS)
OBJECTS= ./xgboost_R.o ./xgboost_custom.o ./xgboost_assert.o\
$(PKGROOT)/amalgamation/xgboost-all0.o $(PKGROOT)/amalgamation/dmlc-minimum0.o\
$(PKGROOT)/rabit/src/engine_empty.o $(PKGROOT)/rabit/src/c_api.o

View File

@@ -1,32 +0,0 @@
# package root
PKGROOT=./
ENABLE_STD_THREAD=0
# _*_ mode: Makefile; _*_
# This file is only used for windows compilation from github
# It will be replaced by Makevars in CRAN version
.PHONY: all xgblib
all: $(SHLIB)
$(SHLIB): xgblib
xgblib:
cp -r ../../src .
cp -r ../../rabit .
cp -r ../../dmlc-core .
cp -r ../../include .
cp -r ../../amalgamation .
CXX_STD = CXX11
XGB_RFLAGS = -DXGBOOST_STRICT_R_MODE=1 -DDMLC_LOG_BEFORE_THROW=0\
-DDMLC_ENABLE_STD_THREAD=$(ENABLE_STD_THREAD) -DDMLC_DISABLE_STDIN=1\
-DDMLC_LOG_CUSTOMIZE=1 -DXGBOOST_CUSTOMIZE_LOGGER=1\
-DRABIT_CUSTOMIZE_MSG_ -DRABIT_STRICT_CXX98_
PKG_CPPFLAGS= -I$(PKGROOT)/include -I$(PKGROOT)/dmlc-core/include -I$(PKGROOT)/rabit/include -I$(PKGROOT) $(XGB_RFLAGS)
PKG_CXXFLAGS= $(SHLIB_OPENMP_CFLAGS) $(SHLIB_PTHREAD_FLAGS)
PKG_LIBS = $(SHLIB_OPENMP_CFLAGS) $(SHLIB_PTHREAD_FLAGS)
OBJECTS= ./xgboost_R.o ./xgboost_custom.o ./xgboost_assert.o\
$(PKGROOT)/amalgamation/xgboost-all0.o $(PKGROOT)/amalgamation/dmlc-minimum0.o\
$(PKGROOT)/rabit/src/engine_empty.o $(PKGROOT)/rabit/src/c_api.o
$(OBJECTS) : xgblib

View File

@@ -1,419 +0,0 @@
// Copyright (c) 2014 by Contributors
#include <dmlc/logging.h>
#include <dmlc/omp.h>
#include <xgboost/c_api.h>
#include <vector>
#include <string>
#include <utility>
#include <cstring>
#include <cstdio>
#include <sstream>
#include "./xgboost_R.h"
/*!
* \brief macro to annotate begin of api
*/
#define R_API_BEGIN() \
GetRNGstate(); \
try {
/*!
* \brief macro to annotate end of api
*/
#define R_API_END() \
} catch(dmlc::Error& e) { \
PutRNGstate(); \
error(e.what()); \
} \
PutRNGstate();
/*!
* \brief macro to check the call.
*/
#define CHECK_CALL(x) \
if ((x) != 0) { \
error(XGBGetLastError()); \
}
using namespace dmlc;
SEXP XGCheckNullPtr_R(SEXP handle) {
return ScalarLogical(R_ExternalPtrAddr(handle) == NULL);
}
void _DMatrixFinalizer(SEXP ext) {
R_API_BEGIN();
if (R_ExternalPtrAddr(ext) == NULL) return;
CHECK_CALL(XGDMatrixFree(R_ExternalPtrAddr(ext)));
R_ClearExternalPtr(ext);
R_API_END();
}
SEXP XGDMatrixCreateFromFile_R(SEXP fname, SEXP silent) {
SEXP ret;
R_API_BEGIN();
DMatrixHandle handle;
CHECK_CALL(XGDMatrixCreateFromFile(CHAR(asChar(fname)), asInteger(silent), &handle));
ret = PROTECT(R_MakeExternalPtr(handle, R_NilValue, R_NilValue));
R_RegisterCFinalizerEx(ret, _DMatrixFinalizer, TRUE);
UNPROTECT(1);
R_API_END();
return ret;
}
SEXP XGDMatrixCreateFromMat_R(SEXP mat,
SEXP missing) {
SEXP ret;
R_API_BEGIN();
SEXP dim = getAttrib(mat, R_DimSymbol);
size_t nrow = static_cast<size_t>(INTEGER(dim)[0]);
size_t ncol = static_cast<size_t>(INTEGER(dim)[1]);
double *din = REAL(mat);
std::vector<float> data(nrow * ncol);
#pragma omp parallel for schedule(static)
for (omp_ulong i = 0; i < nrow; ++i) {
for (size_t j = 0; j < ncol; ++j) {
data[i * ncol +j] = din[i + nrow * j];
}
}
DMatrixHandle handle;
CHECK_CALL(XGDMatrixCreateFromMat(BeginPtr(data), nrow, ncol, asReal(missing), &handle));
ret = PROTECT(R_MakeExternalPtr(handle, R_NilValue, R_NilValue));
R_RegisterCFinalizerEx(ret, _DMatrixFinalizer, TRUE);
UNPROTECT(1);
R_API_END();
return ret;
}
SEXP XGDMatrixCreateFromCSC_R(SEXP indptr,
SEXP indices,
SEXP data) {
SEXP ret;
R_API_BEGIN();
const int *p_indptr = INTEGER(indptr);
const int *p_indices = INTEGER(indices);
const double *p_data = REAL(data);
int nindptr = length(indptr);
int ndata = length(data);
std::vector<bst_ulong> col_ptr_(nindptr);
std::vector<unsigned> indices_(ndata);
std::vector<float> data_(ndata);
for (int i = 0; i < nindptr; ++i) {
col_ptr_[i] = static_cast<bst_ulong>(p_indptr[i]);
}
#pragma omp parallel for schedule(static)
for (int i = 0; i < ndata; ++i) {
indices_[i] = static_cast<unsigned>(p_indices[i]);
data_[i] = static_cast<float>(p_data[i]);
}
DMatrixHandle handle;
CHECK_CALL(XGDMatrixCreateFromCSC(BeginPtr(col_ptr_), BeginPtr(indices_),
BeginPtr(data_), nindptr, ndata,
&handle));
ret = PROTECT(R_MakeExternalPtr(handle, R_NilValue, R_NilValue));
R_RegisterCFinalizerEx(ret, _DMatrixFinalizer, TRUE);
UNPROTECT(1);
R_API_END();
return ret;
}
SEXP XGDMatrixSliceDMatrix_R(SEXP handle, SEXP idxset) {
SEXP ret;
R_API_BEGIN();
int len = length(idxset);
std::vector<int> idxvec(len);
for (int i = 0; i < len; ++i) {
idxvec[i] = INTEGER(idxset)[i] - 1;
}
DMatrixHandle res;
CHECK_CALL(XGDMatrixSliceDMatrix(R_ExternalPtrAddr(handle),
BeginPtr(idxvec), len,
&res));
ret = PROTECT(R_MakeExternalPtr(res, R_NilValue, R_NilValue));
R_RegisterCFinalizerEx(ret, _DMatrixFinalizer, TRUE);
UNPROTECT(1);
R_API_END();
return ret;
}
SEXP XGDMatrixSaveBinary_R(SEXP handle, SEXP fname, SEXP silent) {
R_API_BEGIN();
CHECK_CALL(XGDMatrixSaveBinary(R_ExternalPtrAddr(handle),
CHAR(asChar(fname)),
asInteger(silent)));
R_API_END();
return R_NilValue;
}
SEXP XGDMatrixSetInfo_R(SEXP handle, SEXP field, SEXP array) {
R_API_BEGIN();
int len = length(array);
const char *name = CHAR(asChar(field));
if (!strcmp("group", name)) {
std::vector<unsigned> vec(len);
#pragma omp parallel for schedule(static)
for (int i = 0; i < len; ++i) {
vec[i] = static_cast<unsigned>(INTEGER(array)[i]);
}
CHECK_CALL(XGDMatrixSetGroup(R_ExternalPtrAddr(handle), BeginPtr(vec), len));
} else {
std::vector<float> vec(len);
#pragma omp parallel for schedule(static)
for (int i = 0; i < len; ++i) {
vec[i] = REAL(array)[i];
}
CHECK_CALL(XGDMatrixSetFloatInfo(R_ExternalPtrAddr(handle),
CHAR(asChar(field)),
BeginPtr(vec), len));
}
R_API_END();
return R_NilValue;
}
SEXP XGDMatrixGetInfo_R(SEXP handle, SEXP field) {
SEXP ret;
R_API_BEGIN();
bst_ulong olen;
const float *res;
CHECK_CALL(XGDMatrixGetFloatInfo(R_ExternalPtrAddr(handle),
CHAR(asChar(field)),
&olen,
&res));
ret = PROTECT(allocVector(REALSXP, olen));
for (size_t i = 0; i < olen; ++i) {
REAL(ret)[i] = res[i];
}
UNPROTECT(1);
R_API_END();
return ret;
}
SEXP XGDMatrixNumRow_R(SEXP handle) {
bst_ulong nrow;
R_API_BEGIN();
CHECK_CALL(XGDMatrixNumRow(R_ExternalPtrAddr(handle), &nrow));
R_API_END();
return ScalarInteger(static_cast<int>(nrow));
}
SEXP XGDMatrixNumCol_R(SEXP handle) {
bst_ulong ncol;
R_API_BEGIN();
CHECK_CALL(XGDMatrixNumCol(R_ExternalPtrAddr(handle), &ncol));
R_API_END();
return ScalarInteger(static_cast<int>(ncol));
}
// functions related to booster
void _BoosterFinalizer(SEXP ext) {
if (R_ExternalPtrAddr(ext) == NULL) return;
CHECK_CALL(XGBoosterFree(R_ExternalPtrAddr(ext)));
R_ClearExternalPtr(ext);
}
SEXP XGBoosterCreate_R(SEXP dmats) {
SEXP ret;
R_API_BEGIN();
int len = length(dmats);
std::vector<void*> dvec;
for (int i = 0; i < len; ++i) {
dvec.push_back(R_ExternalPtrAddr(VECTOR_ELT(dmats, i)));
}
BoosterHandle handle;
CHECK_CALL(XGBoosterCreate(BeginPtr(dvec), dvec.size(), &handle));
ret = PROTECT(R_MakeExternalPtr(handle, R_NilValue, R_NilValue));
R_RegisterCFinalizerEx(ret, _BoosterFinalizer, TRUE);
UNPROTECT(1);
R_API_END();
return ret;
}
SEXP XGBoosterSetParam_R(SEXP handle, SEXP name, SEXP val) {
R_API_BEGIN();
CHECK_CALL(XGBoosterSetParam(R_ExternalPtrAddr(handle),
CHAR(asChar(name)),
CHAR(asChar(val))));
R_API_END();
return R_NilValue;
}
SEXP XGBoosterUpdateOneIter_R(SEXP handle, SEXP iter, SEXP dtrain) {
R_API_BEGIN();
CHECK_CALL(XGBoosterUpdateOneIter(R_ExternalPtrAddr(handle),
asInteger(iter),
R_ExternalPtrAddr(dtrain)));
R_API_END();
return R_NilValue;
}
SEXP XGBoosterBoostOneIter_R(SEXP handle, SEXP dtrain, SEXP grad, SEXP hess) {
R_API_BEGIN();
CHECK_EQ(length(grad), length(hess))
<< "gradient and hess must have same length";
int len = length(grad);
std::vector<float> tgrad(len), thess(len);
#pragma omp parallel for schedule(static)
for (int j = 0; j < len; ++j) {
tgrad[j] = REAL(grad)[j];
thess[j] = REAL(hess)[j];
}
CHECK_CALL(XGBoosterBoostOneIter(R_ExternalPtrAddr(handle),
R_ExternalPtrAddr(dtrain),
BeginPtr(tgrad), BeginPtr(thess),
len));
R_API_END();
return R_NilValue;
}
SEXP XGBoosterEvalOneIter_R(SEXP handle, SEXP iter, SEXP dmats, SEXP evnames) {
const char *ret;
R_API_BEGIN();
CHECK_EQ(length(dmats), length(evnames))
<< "dmats and evnams must have same length";
int len = length(dmats);
std::vector<void*> vec_dmats;
std::vector<std::string> vec_names;
std::vector<const char*> vec_sptr;
for (int i = 0; i < len; ++i) {
vec_dmats.push_back(R_ExternalPtrAddr(VECTOR_ELT(dmats, i)));
vec_names.push_back(std::string(CHAR(asChar(VECTOR_ELT(evnames, i)))));
}
for (int i = 0; i < len; ++i) {
vec_sptr.push_back(vec_names[i].c_str());
}
CHECK_CALL(XGBoosterEvalOneIter(R_ExternalPtrAddr(handle),
asInteger(iter),
BeginPtr(vec_dmats),
BeginPtr(vec_sptr),
len, &ret));
R_API_END();
return mkString(ret);
}
SEXP XGBoosterPredict_R(SEXP handle, SEXP dmat, SEXP option_mask, SEXP ntree_limit) {
SEXP ret;
R_API_BEGIN();
bst_ulong olen;
const float *res;
CHECK_CALL(XGBoosterPredict(R_ExternalPtrAddr(handle),
R_ExternalPtrAddr(dmat),
asInteger(option_mask),
asInteger(ntree_limit),
&olen, &res));
ret = PROTECT(allocVector(REALSXP, olen));
for (size_t i = 0; i < olen; ++i) {
REAL(ret)[i] = res[i];
}
UNPROTECT(1);
R_API_END();
return ret;
}
SEXP XGBoosterLoadModel_R(SEXP handle, SEXP fname) {
R_API_BEGIN();
CHECK_CALL(XGBoosterLoadModel(R_ExternalPtrAddr(handle), CHAR(asChar(fname))));
R_API_END();
return R_NilValue;
}
SEXP XGBoosterSaveModel_R(SEXP handle, SEXP fname) {
R_API_BEGIN();
CHECK_CALL(XGBoosterSaveModel(R_ExternalPtrAddr(handle), CHAR(asChar(fname))));
R_API_END();
return R_NilValue;
}
SEXP XGBoosterLoadModelFromRaw_R(SEXP handle, SEXP raw) {
R_API_BEGIN();
CHECK_CALL(XGBoosterLoadModelFromBuffer(R_ExternalPtrAddr(handle),
RAW(raw),
length(raw)));
R_API_END();
return R_NilValue;
}
SEXP XGBoosterModelToRaw_R(SEXP handle) {
SEXP ret;
R_API_BEGIN();
bst_ulong olen;
const char *raw;
CHECK_CALL(XGBoosterGetModelRaw(R_ExternalPtrAddr(handle), &olen, &raw));
ret = PROTECT(allocVector(RAWSXP, olen));
if (olen != 0) {
memcpy(RAW(ret), raw, olen);
}
UNPROTECT(1);
R_API_END();
return ret;
}
SEXP XGBoosterDumpModel_R(SEXP handle, SEXP fmap, SEXP with_stats) {
SEXP out;
R_API_BEGIN();
bst_ulong olen;
const char **res;
CHECK_CALL(XGBoosterDumpModel(R_ExternalPtrAddr(handle),
CHAR(asChar(fmap)),
asInteger(with_stats),
&olen, &res));
out = PROTECT(allocVector(STRSXP, olen));
for (size_t i = 0; i < olen; ++i) {
std::stringstream stream;
stream << "booster[" << i <<"]\n" << res[i];
SET_STRING_ELT(out, i, mkChar(stream.str().c_str()));
}
UNPROTECT(1);
R_API_END();
return out;
}
SEXP XGBoosterGetAttr_R(SEXP handle, SEXP name) {
SEXP out;
R_API_BEGIN();
int success;
const char *val;
CHECK_CALL(XGBoosterGetAttr(R_ExternalPtrAddr(handle),
CHAR(asChar(name)),
&val,
&success));
if (success) {
out = PROTECT(allocVector(STRSXP, 1));
SET_STRING_ELT(out, 0, mkChar(val));
} else {
out = PROTECT(R_NilValue);
}
UNPROTECT(1);
R_API_END();
return out;
}
SEXP XGBoosterSetAttr_R(SEXP handle, SEXP name, SEXP val) {
R_API_BEGIN();
const char *v = isNull(val) ? nullptr : CHAR(asChar(val));
CHECK_CALL(XGBoosterSetAttr(R_ExternalPtrAddr(handle),
CHAR(asChar(name)), v));
R_API_END();
return R_NilValue;
}
SEXP XGBoosterGetAttrNames_R(SEXP handle) {
SEXP out;
R_API_BEGIN();
bst_ulong len;
const char **res;
CHECK_CALL(XGBoosterGetAttrNames(R_ExternalPtrAddr(handle),
&len, &res));
if (len > 0) {
out = PROTECT(allocVector(STRSXP, len));
for (size_t i = 0; i < len; ++i) {
SET_STRING_ELT(out, i, mkChar(res[i]));
}
} else {
out = PROTECT(R_NilValue);
}
UNPROTECT(1);
R_API_END();
return out;
}

View File

@@ -1,212 +0,0 @@
/*!
* Copyright 2014 (c) by Contributors
* \file xgboost_wrapper_R.h
* \author Tianqi Chen
* \brief R wrapper of xgboost
*/
#ifndef XGBOOST_R_H_ // NOLINT(*)
#define XGBOOST_R_H_ // NOLINT(*)
#include <Rinternals.h>
#include <R_ext/Random.h>
#include <Rmath.h>
#include <xgboost/c_api.h>
/*!
* \brief check whether a handle is NULL
* \param handle
* \return whether it is null ptr
*/
XGB_DLL SEXP XGCheckNullPtr_R(SEXP handle);
/*!
* \brief load a data matrix
* \param fname name of the content
* \param silent whether print messages
* \return a loaded data matrix
*/
XGB_DLL SEXP XGDMatrixCreateFromFile_R(SEXP fname, SEXP silent);
/*!
* \brief create matrix content from dense matrix
* This assumes the matrix is stored in column major format
* \param data R Matrix object
* \param missing which value to represent missing value
* \return created dmatrix
*/
XGB_DLL SEXP XGDMatrixCreateFromMat_R(SEXP mat,
SEXP missing);
/*!
* \brief create a matrix content from CSC format
* \param indptr pointer to column headers
* \param indices row indices
* \param data content of the data
* \return created dmatrix
*/
XGB_DLL SEXP XGDMatrixCreateFromCSC_R(SEXP indptr,
SEXP indices,
SEXP data);
/*!
* \brief create a new dmatrix from sliced content of existing matrix
* \param handle instance of data matrix to be sliced
* \param idxset index set
* \return a sliced new matrix
*/
XGB_DLL SEXP XGDMatrixSliceDMatrix_R(SEXP handle, SEXP idxset);
/*!
* \brief load a data matrix into binary file
* \param handle a instance of data matrix
* \param fname file name
* \param silent print statistics when saving
* \return R_NilValue
*/
XGB_DLL SEXP XGDMatrixSaveBinary_R(SEXP handle, SEXP fname, SEXP silent);
/*!
* \brief set information to dmatrix
* \param handle a instance of data matrix
* \param field field name, can be label, weight
* \param array pointer to float vector
* \return R_NilValue
*/
XGB_DLL SEXP XGDMatrixSetInfo_R(SEXP handle, SEXP field, SEXP array);
/*!
* \brief get info vector from matrix
* \param handle a instance of data matrix
* \param field field name
* \return info vector
*/
XGB_DLL SEXP XGDMatrixGetInfo_R(SEXP handle, SEXP field);
/*!
* \brief return number of rows
* \param handle an instance of data matrix
*/
XGB_DLL SEXP XGDMatrixNumRow_R(SEXP handle);
/*!
* \brief return number of columns
* \param handle an instance of data matrix
*/
XGB_DLL SEXP XGDMatrixNumCol_R(SEXP handle);
/*!
* \brief create xgboost learner
* \param dmats a list of dmatrix handles that will be cached
*/
XGB_DLL SEXP XGBoosterCreate_R(SEXP dmats);
/*!
* \brief set parameters
* \param handle handle
* \param name parameter name
* \param val value of parameter
* \return R_NilValue
*/
XGB_DLL SEXP XGBoosterSetParam_R(SEXP handle, SEXP name, SEXP val);
/*!
* \brief update the model in one round using dtrain
* \param handle handle
* \param iter current iteration rounds
* \param dtrain training data
* \return R_NilValue
*/
XGB_DLL SEXP XGBoosterUpdateOneIter_R(SEXP ext, SEXP iter, SEXP dtrain);
/*!
* \brief update the model, by directly specify gradient and second order gradient,
* this can be used to replace UpdateOneIter, to support customized loss function
* \param handle handle
* \param dtrain training data
* \param grad gradient statistics
* \param hess second order gradient statistics
* \return R_NilValue
*/
XGB_DLL SEXP XGBoosterBoostOneIter_R(SEXP handle, SEXP dtrain, SEXP grad, SEXP hess);
/*!
* \brief get evaluation statistics for xgboost
* \param handle handle
* \param iter current iteration rounds
* \param dmats list of handles to dmatrices
* \param evname name of evaluation
* \return the string containing evaluation stats
*/
XGB_DLL SEXP XGBoosterEvalOneIter_R(SEXP handle, SEXP iter, SEXP dmats, SEXP evnames);
/*!
* \brief make prediction based on dmat
* \param handle handle
* \param dmat data matrix
* \param option_mask output_margin:1 predict_leaf:2
* \param ntree_limit limit number of trees used in prediction
*/
XGB_DLL SEXP XGBoosterPredict_R(SEXP handle, SEXP dmat, SEXP option_mask, SEXP ntree_limit);
/*!
* \brief load model from existing file
* \param handle handle
* \param fname file name
* \return R_NilValue
*/
XGB_DLL SEXP XGBoosterLoadModel_R(SEXP handle, SEXP fname);
/*!
* \brief save model into existing file
* \param handle handle
* \param fname file name
* \return R_NilValue
*/
XGB_DLL SEXP XGBoosterSaveModel_R(SEXP handle, SEXP fname);
/*!
* \brief load model from raw array
* \param handle handle
* \return R_NilValue
*/
XGB_DLL SEXP XGBoosterLoadModelFromRaw_R(SEXP handle, SEXP raw);
/*!
* \brief save model into R's raw array
* \param handle handle
* \return raw array
*/
XGB_DLL SEXP XGBoosterModelToRaw_R(SEXP handle);
/*!
* \brief dump model into a string
* \param handle handle
* \param fmap name to fmap can be empty string
* \param with_stats whether dump statistics of splits
*/
XGB_DLL SEXP XGBoosterDumpModel_R(SEXP handle, SEXP fmap, SEXP with_stats);
/*!
* \brief get learner attribute value
* \param handle handle
* \param name attribute name
* \return character containing attribute value
*/
XGB_DLL SEXP XGBoosterGetAttr_R(SEXP handle, SEXP name);
/*!
* \brief set learner attribute value
* \param handle handle
* \param name attribute name
* \param val attribute value; NULL value would delete an attribute
* \return R_NilValue
*/
XGB_DLL SEXP XGBoosterSetAttr_R(SEXP handle, SEXP name, SEXP val);
/*!
* \brief get the names of learner attributes
* \return string vector containing attribute names
*/
XGB_DLL SEXP XGBoosterGetAttrNames_R(SEXP handle);
#endif // XGBOOST_WRAPPER_R_H_ // NOLINT(*)

View File

@@ -1,26 +0,0 @@
// Copyright (c) 2014 by Contributors
#include <stdio.h>
#include <stdarg.h>
#include <Rinternals.h>
// implements error handling
void XGBoostAssert_R(int exp, const char *fmt, ...) {
char buf[1024];
if (exp == 0) {
va_list args;
va_start(args, fmt);
vsprintf(buf, fmt, args);
va_end(args);
error("AssertError:%s\n", buf);
}
}
void XGBoostCheck_R(int exp, const char *fmt, ...) {
char buf[1024];
if (exp == 0) {
va_list args;
va_start(args, fmt);
vsprintf(buf, fmt, args);
va_end(args);
error("%s\n", buf);
}
}

View File

@@ -1,65 +0,0 @@
// Copyright (c) 2015 by Contributors
// This file contains the customization implementations of R module
// to change behavior of libxgboost
#include <xgboost/logging.h>
#include "src/common/random.h"
#include "./xgboost_R.h"
// redirect the messages to R's console.
namespace dmlc {
void CustomLogMessage::Log(const std::string& msg) {
Rprintf("%s\n", msg.c_str());
}
} // namespace dmlc
// implements rabit error handling.
extern "C" {
void XGBoostAssert_R(int exp, const char *fmt, ...);
void XGBoostCheck_R(int exp, const char *fmt, ...);
}
namespace rabit {
namespace utils {
extern "C" {
void (*Printf)(const char *fmt, ...) = Rprintf;
void (*Assert)(int exp, const char *fmt, ...) = XGBoostAssert_R;
void (*Check)(int exp, const char *fmt, ...) = XGBoostCheck_R;
void (*Error)(const char *fmt, ...) = error;
}
}
}
namespace xgboost {
ConsoleLogger::~ConsoleLogger() {
dmlc::CustomLogMessage::Log(log_stream_.str());
}
TrackerLogger::~TrackerLogger() {
dmlc::CustomLogMessage::Log(log_stream_.str());
}
} // namespace xgboost
namespace xgboost {
namespace common {
// redirect the nath functions.
bool CheckNAN(double v) {
return ISNAN(v);
}
double LogGamma(double v) {
return lgammafn(v);
}
// customize random engine.
void CustomGlobalRandomEngine::seed(CustomGlobalRandomEngine::result_type val) {
// ignore the seed
}
// use R's PRNG to replacd
CustomGlobalRandomEngine::result_type
CustomGlobalRandomEngine::operator()() {
return static_cast<result_type>(
std::floor(unif_rand() * CustomGlobalRandomEngine::max()));
}
} // namespace common
} // namespace xgboost

View File

@@ -1,4 +0,0 @@
library(testthat)
library(xgboost)
test_check("xgboost")

View File

@@ -1,182 +0,0 @@
require(xgboost)
context("basic functions")
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
train <- agaricus.train
test <- agaricus.test
set.seed(1994)
test_that("train and predict binary classification", {
nrounds = 2
expect_output(
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = nrounds, objective = "binary:logistic")
, "train-error")
expect_equal(class(bst), "xgb.Booster")
expect_equal(bst$niter, nrounds)
expect_false(is.null(bst$evaluation_log))
expect_equal(nrow(bst$evaluation_log), nrounds)
expect_lt(bst$evaluation_log[, min(train_error)], 0.03)
pred <- predict(bst, test$data)
expect_length(pred, 1611)
pred1 <- predict(bst, train$data, ntreelimit = 1)
expect_length(pred1, 6513)
err_pred1 <- sum((pred1 > 0.5) != train$label)/length(train$label)
err_log <- bst$evaluation_log[1, train_error]
expect_lt(abs(err_pred1 - err_log), 10e-6)
})
test_that("train and predict softprob", {
lb <- as.numeric(iris$Species) - 1
set.seed(11)
expect_output(
bst <- xgboost(data = as.matrix(iris[, -5]), label = lb,
max_depth = 3, eta = 0.5, nthread = 2, nrounds = 5,
objective = "multi:softprob", num_class=3)
, "train-merror")
expect_false(is.null(bst$evaluation_log))
expect_lt(bst$evaluation_log[, min(train_merror)], 0.025)
expect_equal(bst$niter * 3, xgb.ntree(bst))
pred <- predict(bst, as.matrix(iris[, -5]))
expect_length(pred, nrow(iris) * 3)
# row sums add up to total probability of 1:
expect_equal(rowSums(matrix(pred, ncol=3, byrow=TRUE)), rep(1, nrow(iris)), tolerance = 1e-7)
# manually calculate error at the last iteration:
mpred <- predict(bst, as.matrix(iris[, -5]), reshape = TRUE)
expect_equal(as.numeric(t(mpred)), pred)
pred_labels <- max.col(mpred) - 1
err <- sum(pred_labels != lb)/length(lb)
expect_equal(bst$evaluation_log[5, train_merror], err, tolerance = 5e-6)
# manually calculate error at the 1st iteration:
mpred <- predict(bst, as.matrix(iris[, -5]), reshape = TRUE, ntreelimit = 1)
pred_labels <- max.col(mpred) - 1
err <- sum(pred_labels != lb)/length(lb)
expect_equal(bst$evaluation_log[1, train_merror], err, tolerance = 5e-6)
})
test_that("train and predict softmax", {
lb <- as.numeric(iris$Species) - 1
set.seed(11)
expect_output(
bst <- xgboost(data = as.matrix(iris[, -5]), label = lb,
max_depth = 3, eta = 0.5, nthread = 2, nrounds = 5,
objective = "multi:softmax", num_class=3)
, "train-merror")
expect_false(is.null(bst$evaluation_log))
expect_lt(bst$evaluation_log[, min(train_merror)], 0.025)
expect_equal(bst$niter * 3, xgb.ntree(bst))
pred <- predict(bst, as.matrix(iris[, -5]))
expect_length(pred, nrow(iris))
err <- sum(pred != lb)/length(lb)
expect_equal(bst$evaluation_log[5, train_merror], err, tolerance = 5e-6)
})
test_that("train and predict RF", {
set.seed(11)
lb <- train$label
# single iteration
bst <- xgboost(data = train$data, label = lb, max_depth = 5,
nthread = 2, nrounds = 1, objective = "binary:logistic",
num_parallel_tree = 20, subsample = 0.6, colsample_bytree = 0.1)
expect_equal(bst$niter, 1)
expect_equal(xgb.ntree(bst), 20)
pred <- predict(bst, train$data)
pred_err <- sum((pred > 0.5) != lb)/length(lb)
expect_lt(abs(bst$evaluation_log[1, train_error] - pred_err), 10e-6)
#expect_lt(pred_err, 0.03)
pred <- predict(bst, train$data, ntreelimit = 20)
pred_err_20 <- sum((pred > 0.5) != lb)/length(lb)
expect_equal(pred_err_20, pred_err)
#pred <- predict(bst, train$data, ntreelimit = 1)
#pred_err_1 <- sum((pred > 0.5) != lb)/length(lb)
#expect_lt(pred_err, pred_err_1)
#expect_lt(pred_err, 0.08)
})
test_that("train and predict RF with softprob", {
lb <- as.numeric(iris$Species) - 1
nrounds <- 15
set.seed(11)
bst <- xgboost(data = as.matrix(iris[, -5]), label = lb,
max_depth = 3, eta = 0.9, nthread = 2, nrounds = nrounds,
objective = "multi:softprob", num_class=3,
num_parallel_tree = 4, subsample = 0.5, colsample_bytree = 0.5)
expect_equal(bst$niter, 15)
expect_equal(xgb.ntree(bst), 15*3*4)
# predict for all iterations:
pred <- predict(bst, as.matrix(iris[, -5]), reshape=TRUE)
expect_equal(dim(pred), c(nrow(iris), 3))
pred_labels <- max.col(pred) - 1
err <- sum(pred_labels != lb)/length(lb)
expect_equal(bst$evaluation_log[nrounds, train_merror], err, tolerance = 5e-6)
# predict for 7 iterations and adjust for 4 parallel trees per iteration
pred <- predict(bst, as.matrix(iris[, -5]), reshape=TRUE, ntreelimit = 7 * 4)
err <- sum((max.col(pred) - 1) != lb)/length(lb)
expect_equal(bst$evaluation_log[7, train_merror], err, tolerance = 5e-6)
})
test_that("use of multiple eval metrics works", {
expect_output(
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic",
eval_metric = 'error', eval_metric = 'auc', eval_metric = "logloss")
, "train-error.*train-auc.*train-logloss")
expect_false(is.null(bst$evaluation_log))
expect_equal(dim(bst$evaluation_log), c(2, 4))
expect_equal(colnames(bst$evaluation_log), c("iter", "train_error", "train_auc", "train_logloss"))
})
test_that("training continuation works", {
dtrain <- xgb.DMatrix(train$data, label = train$label)
watchlist = list(train=dtrain)
param <- list(objective = "binary:logistic", max_depth = 2, eta = 1, nthread = 2)
# for the reference, use 4 iterations at once:
set.seed(11)
bst <- xgb.train(param, dtrain, nrounds = 4, watchlist)
# first two iterations:
set.seed(11)
bst1 <- xgb.train(param, dtrain, nrounds = 2, watchlist)
# continue for two more:
bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, xgb_model = bst1)
expect_equal(bst$raw, bst2$raw)
expect_false(is.null(bst2$evaluation_log))
expect_equal(dim(bst2$evaluation_log), c(4, 2))
expect_equal(bst2$evaluation_log, bst$evaluation_log)
# test continuing from raw model data
bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, xgb_model = bst1$raw)
expect_equal(bst$raw, bst2$raw)
expect_equal(dim(bst2$evaluation_log), c(2, 2))
# test continuing from a model in file
xgb.save(bst1, "xgboost.model")
bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, xgb_model = "xgboost.model")
expect_equal(bst$raw, bst2$raw)
expect_equal(dim(bst2$evaluation_log), c(2, 2))
})
test_that("xgb.cv works", {
set.seed(11)
cv <- xgb.cv(data = train$data, label = train$label, max_depth = 2, nfold = 5,
eta = 1., nthread = 2, nrounds = 2, objective = "binary:logistic",
verbose=TRUE)
expect_is(cv, 'xgb.cv.synchronous')
expect_false(is.null(cv$evaluation_log))
expect_lt(cv$evaluation_log[, min(test_error_mean)], 0.03)
expect_lt(cv$evaluation_log[, min(test_error_std)], 0.004)
expect_equal(cv$niter, 2)
expect_false(is.null(cv$folds) && is.list(cv$folds))
expect_length(cv$folds, 5)
expect_false(is.null(cv$params) && is.list(cv$params))
expect_false(is.null(cv$callbacks))
expect_false(is.null(cv$call))
})

View File

@@ -1,295 +0,0 @@
# More specific testing of callbacks
require(xgboost)
require(data.table)
context("callbacks")
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
train <- agaricus.train
test <- agaricus.test
# add some label noise for early stopping tests
add.noise <- function(label, frac) {
inoise <- sample(length(label), length(label) * frac)
label[inoise] <- !label[inoise]
label
}
set.seed(11)
ltrain <- add.noise(train$label, 0.2)
ltest <- add.noise(test$label, 0.2)
dtrain <- xgb.DMatrix(train$data, label = ltrain)
dtest <- xgb.DMatrix(test$data, label = ltest)
watchlist = list(train=dtrain, test=dtest)
err <- function(label, pr) sum((pr > 0.5) != label)/length(label)
param <- list(objective = "binary:logistic", max_depth = 2, nthread = 2)
test_that("cb.print.evaluation works as expected", {
bst_evaluation <- c('train-auc'=0.9, 'test-auc'=0.8)
bst_evaluation_err <- NULL
begin_iteration <- 1
end_iteration <- 7
f0 <- cb.print.evaluation(period=0)
f1 <- cb.print.evaluation(period=1)
f5 <- cb.print.evaluation(period=5)
expect_false(is.null(attr(f1, 'call')))
expect_equal(attr(f1, 'name'), 'cb.print.evaluation')
iteration <- 1
expect_silent(f0())
expect_output(f1(), "\\[1\\]\ttrain-auc:0.900000\ttest-auc:0.800000")
expect_output(f5(), "\\[1\\]\ttrain-auc:0.900000\ttest-auc:0.800000")
expect_null(f1())
iteration <- 2
expect_output(f1(), "\\[2\\]\ttrain-auc:0.900000\ttest-auc:0.800000")
expect_silent(f5())
iteration <- 7
expect_output(f1(), "\\[7\\]\ttrain-auc:0.900000\ttest-auc:0.800000")
expect_output(f5(), "\\[7\\]\ttrain-auc:0.900000\ttest-auc:0.800000")
bst_evaluation_err <- c('train-auc'=0.1, 'test-auc'=0.2)
expect_output(f1(), "\\[7\\]\ttrain-auc:0.900000\\+0.100000\ttest-auc:0.800000\\+0.200000")
})
test_that("cb.evaluation.log works as expected", {
bst_evaluation <- c('train-auc'=0.9, 'test-auc'=0.8)
bst_evaluation_err <- NULL
evaluation_log <- list()
f <- cb.evaluation.log()
expect_false(is.null(attr(f, 'call')))
expect_equal(attr(f, 'name'), 'cb.evaluation.log')
iteration <- 1
expect_silent(f())
expect_equal(evaluation_log,
list(c(iter=1, bst_evaluation)))
iteration <- 2
expect_silent(f())
expect_equal(evaluation_log,
list(c(iter=1, bst_evaluation), c(iter=2, bst_evaluation)))
expect_silent(f(finalize = TRUE))
expect_equal(evaluation_log,
data.table(iter=1:2, train_auc=c(0.9,0.9), test_auc=c(0.8,0.8)))
bst_evaluation_err <- c('train-auc'=0.1, 'test-auc'=0.2)
evaluation_log <- list()
f <- cb.evaluation.log()
iteration <- 1
expect_silent(f())
expect_equal(evaluation_log,
list(c(iter=1, c(bst_evaluation, bst_evaluation_err))))
iteration <- 2
expect_silent(f())
expect_equal(evaluation_log,
list(c(iter=1, c(bst_evaluation, bst_evaluation_err)),
c(iter=2, c(bst_evaluation, bst_evaluation_err))))
expect_silent(f(finalize = TRUE))
expect_equal(evaluation_log,
data.table(iter=1:2,
train_auc_mean=c(0.9,0.9), train_auc_std=c(0.1,0.1),
test_auc_mean=c(0.8,0.8), test_auc_std=c(0.2,0.2)))
})
param <- list(objective = "binary:logistic", max_depth = 4, nthread = 2)
test_that("cb.reset.parameters works as expected", {
# fixed eta
set.seed(111)
bst0 <- xgb.train(param, dtrain, nrounds = 2, watchlist, eta = 0.9)
expect_false(is.null(bst0$evaluation_log))
expect_false(is.null(bst0$evaluation_log$train_error))
# same eta but re-set as a vector parameter in the callback
set.seed(111)
my_par <- list(eta = c(0.9, 0.9))
bst1 <- xgb.train(param, dtrain, nrounds = 2, watchlist,
callbacks = list(cb.reset.parameters(my_par)))
expect_false(is.null(bst1$evaluation_log$train_error))
expect_equal(bst0$evaluation_log$train_error,
bst1$evaluation_log$train_error)
# same eta but re-set via a function in the callback
set.seed(111)
my_par <- list(eta = function(itr, itr_end) 0.9)
bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist,
callbacks = list(cb.reset.parameters(my_par)))
expect_false(is.null(bst2$evaluation_log$train_error))
expect_equal(bst0$evaluation_log$train_error,
bst2$evaluation_log$train_error)
# different eta re-set as a vector parameter in the callback
set.seed(111)
my_par <- list(eta = c(0.6, 0.5))
bst3 <- xgb.train(param, dtrain, nrounds = 2, watchlist,
callbacks = list(cb.reset.parameters(my_par)))
expect_false(is.null(bst3$evaluation_log$train_error))
expect_false(all(bst0$evaluation_log$train_error == bst3$evaluation_log$train_error))
# resetting multiple parameters at the same time runs with no error
my_par <- list(eta = c(1., 0.5), gamma = c(1, 2), max_depth = c(4, 8))
expect_error(
bst4 <- xgb.train(param, dtrain, nrounds = 2, watchlist,
callbacks = list(cb.reset.parameters(my_par)))
, NA) # NA = no error
# expect no learning with 0 learning rate
my_par <- list(eta = c(0., 0.))
bstX <- xgb.train(param, dtrain, nrounds = 2, watchlist,
callbacks = list(cb.reset.parameters(my_par)))
expect_false(is.null(bstX$evaluation_log$train_error))
er <- unique(bstX$evaluation_log$train_error)
expect_length(er, 1)
expect_gt(er, 0.4)
})
test_that("cb.save.model works as expected", {
files <- c('xgboost_01.model', 'xgboost_02.model', 'xgboost.model')
for (f in files) if (file.exists(f)) file.remove(f)
bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, eta = 1,
save_period = 1, save_name = "xgboost_%02d.model")
expect_true(file.exists('xgboost_01.model'))
expect_true(file.exists('xgboost_02.model'))
b1 <- xgb.load('xgboost_01.model')
expect_equal(xgb.ntree(b1), 1)
b2 <- xgb.load('xgboost_02.model')
expect_equal(xgb.ntree(b2), 2)
expect_equal(bst$raw, b2$raw)
# save_period = 0 saves the last iteration's model
bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, eta = 1, save_period = 0)
expect_true(file.exists('xgboost.model'))
b2 <- xgb.load('xgboost.model')
expect_equal(bst$raw, b2$raw)
for (f in files) if (file.exists(f)) file.remove(f)
})
test_that("can store evaluation_log without printing", {
expect_silent(
bst <- xgb.train(param, dtrain, nrounds = 10, watchlist, eta = 1,
verbose = 0, callbacks = list(cb.evaluation.log()))
)
expect_false(is.null(bst$evaluation_log))
expect_false(is.null(bst$evaluation_log$train_error))
expect_lt(bst$evaluation_log[, min(train_error)], 0.2)
})
test_that("early stopping xgb.train works", {
set.seed(11)
expect_output(
bst <- xgb.train(param, dtrain, nrounds = 20, watchlist, eta = 0.3,
early_stopping_rounds = 3, maximize = FALSE)
, "Stopping. Best iteration")
expect_false(is.null(bst$best_iteration))
expect_lt(bst$best_iteration, 19)
expect_equal(bst$best_iteration, bst$best_ntreelimit)
pred <- predict(bst, dtest)
expect_equal(length(pred), 1611)
err_pred <- err(ltest, pred)
err_log <- bst$evaluation_log[bst$best_iteration, test_error]
expect_equal(err_log, err_pred, tolerance = 5e-6)
})
test_that("early stopping using a specific metric works", {
set.seed(11)
expect_output(
bst <- xgb.train(param, dtrain, nrounds = 20, watchlist, eta = 0.6,
eval_metric="logloss", eval_metric="auc",
callbacks = list(cb.early.stop(stopping_rounds = 3, maximize = FALSE,
metric_name = 'test_logloss')))
, "Stopping. Best iteration")
expect_false(is.null(bst$best_iteration))
expect_lt(bst$best_iteration, 19)
expect_equal(bst$best_iteration, bst$best_ntreelimit)
pred <- predict(bst, dtest, ntreelimit = bst$best_ntreelimit)
expect_equal(length(pred), 1611)
logloss_pred <- sum(-ltest * log(pred) - (1 - ltest) * log(1 - pred)) / length(ltest)
logloss_log <- bst$evaluation_log[bst$best_iteration, test_logloss]
expect_equal(logloss_log, logloss_pred, tolerance = 5e-6)
})
test_that("early stopping xgb.cv works", {
set.seed(11)
expect_output(
cv <- xgb.cv(param, dtrain, nfold = 5, eta = 0.3, nrounds = 20,
early_stopping_rounds = 3, maximize = FALSE)
, "Stopping. Best iteration")
expect_false(is.null(cv$best_iteration))
expect_lt(cv$best_iteration, 19)
expect_equal(cv$best_iteration, cv$best_ntreelimit)
# the best error is min error:
expect_true(cv$evaluation_log[, test_error_mean[cv$best_iteration] == min(test_error_mean)])
})
test_that("prediction in xgb.cv works", {
set.seed(11)
nrounds = 4
cv <- xgb.cv(param, dtrain, nfold = 5, eta = 0.5, nrounds = nrounds, prediction = TRUE)
expect_false(is.null(cv$evaluation_log))
expect_false(is.null(cv$pred))
expect_length(cv$pred, nrow(train$data))
err_pred <- mean( sapply(cv$folds, function(f) mean(err(ltrain[f], cv$pred[f]))) )
err_log <- cv$evaluation_log[nrounds, test_error_mean]
expect_equal(err_pred, err_log, tolerance = 1e-6)
# save CV models
set.seed(11)
cvx <- xgb.cv(param, dtrain, nfold = 5, eta = 0.5, nrounds = nrounds, prediction = TRUE,
callbacks = list(cb.cv.predict(save_models = TRUE)))
expect_equal(cv$evaluation_log, cvx$evaluation_log)
expect_length(cvx$models, 5)
expect_true(all(sapply(cvx$models, class) == 'xgb.Booster'))
})
test_that("prediction in early-stopping xgb.cv works", {
set.seed(1)
expect_output(
cv <- xgb.cv(param, dtrain, nfold = 5, eta = 0.1, nrounds = 20,
early_stopping_rounds = 5, maximize = FALSE, prediction = TRUE)
, "Stopping. Best iteration")
expect_false(is.null(cv$best_iteration))
expect_lt(cv$best_iteration, 19)
expect_false(is.null(cv$evaluation_log))
expect_false(is.null(cv$pred))
expect_length(cv$pred, nrow(train$data))
err_pred <- mean( sapply(cv$folds, function(f) mean(err(ltrain[f], cv$pred[f]))) )
err_log <- cv$evaluation_log[cv$best_iteration, test_error_mean]
expect_equal(err_pred, err_log, tolerance = 1e-6)
err_log_last <- cv$evaluation_log[cv$niter, test_error_mean]
expect_gt(abs(err_pred - err_log_last), 1e-4)
})
test_that("prediction in xgb.cv for softprob works", {
lb <- as.numeric(iris$Species) - 1
set.seed(11)
expect_warning(
cv <- xgb.cv(data = as.matrix(iris[, -5]), label = lb, nfold = 4,
eta = 0.5, nrounds = 5, max_depth = 3, nthread = 2,
subsample = 0.8, gamma = 2,
prediction = TRUE, objective = "multi:softprob", num_class = 3)
, NA)
expect_false(is.null(cv$pred))
expect_equal(dim(cv$pred), c(nrow(iris), 3))
expect_lt(diff(range(rowSums(cv$pred))), 1e-6)
})

View File

@@ -1,62 +0,0 @@
context('Test models with custom objective')
require(xgboost)
set.seed(1994)
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
watchlist <- list(eval = dtest, train = dtrain)
logregobj <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
preds <- 1 / (1 + exp(-preds))
grad <- preds - labels
hess <- preds * (1 - preds)
return(list(grad = grad, hess = hess))
}
evalerror <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
err <- as.numeric(sum(labels != (preds > 0))) / length(labels)
return(list(metric = "error", value = err))
}
param <- list(max_depth=2, eta=1, nthread = 2,
objective=logregobj, eval_metric=evalerror)
num_round <- 2
test_that("custom objective works", {
bst <- xgb.train(param, dtrain, num_round, watchlist)
expect_equal(class(bst), "xgb.Booster")
expect_equal(length(bst$raw), 1094)
expect_false(is.null(bst$evaluation_log))
expect_false(is.null(bst$evaluation_log$eval_error))
expect_lt(bst$evaluation_log[num_round, eval_error], 0.03)
})
test_that("custom objective in CV works", {
cv <- xgb.cv(param, dtrain, num_round, nfold=10, verbose=FALSE)
expect_false(is.null(cv$evaluation_log))
expect_equal(dim(cv$evaluation_log), c(2, 5))
expect_lt(cv$evaluation_log[num_round, test_error_mean], 0.03)
})
test_that("custom objective using DMatrix attr works", {
attr(dtrain, 'label') <- getinfo(dtrain, 'label')
logregobjattr <- function(preds, dtrain) {
labels <- attr(dtrain, 'label')
preds <- 1 / (1 + exp(-preds))
grad <- preds - labels
hess <- preds * (1 - preds)
return(list(grad = grad, hess = hess))
}
param$objective = logregobjattr
bst <- xgb.train(param, dtrain, num_round, watchlist)
expect_equal(class(bst), "xgb.Booster")
expect_equal(length(bst$raw), 1094)
})

View File

@@ -1,67 +0,0 @@
require(xgboost)
context("testing xgb.DMatrix functionality")
data(agaricus.test, package='xgboost')
test_data <- agaricus.test$data[1:100,]
test_label <- agaricus.test$label[1:100]
test_that("xgb.DMatrix: basic construction, saving, loading", {
# from sparse matrix
dtest1 <- xgb.DMatrix(test_data, label=test_label)
# from dense matrix
dtest2 <- xgb.DMatrix(as.matrix(test_data), label=test_label)
expect_equal(getinfo(dtest1, 'label'), getinfo(dtest2, 'label'))
# save to a local file
tmp_file <- tempfile('xgb.DMatrix_')
expect_true(xgb.DMatrix.save(dtest1, tmp_file))
# read from a local file
dtest3 <- xgb.DMatrix(tmp_file)
unlink(tmp_file)
expect_equal(getinfo(dtest1, 'label'), getinfo(dtest3, 'label'))
})
test_that("xgb.DMatrix: getinfo & setinfo", {
dtest <- xgb.DMatrix(test_data)
expect_true(setinfo(dtest, 'label', test_label))
labels <- getinfo(dtest, 'label')
expect_equal(test_label, getinfo(dtest, 'label'))
expect_true(length(getinfo(dtest, 'weight')) == 0)
expect_true(length(getinfo(dtest, 'base_margin')) == 0)
expect_true(setinfo(dtest, 'weight', test_label))
expect_true(setinfo(dtest, 'base_margin', test_label))
expect_true(setinfo(dtest, 'group', c(50,50)))
expect_error(setinfo(dtest, 'group', test_label))
# providing character values will give a warning
expect_warning( setinfo(dtest, 'weight', rep('a', nrow(test_data))) )
# any other label should error
expect_error(setinfo(dtest, 'asdf', test_label))
})
test_that("xgb.DMatrix: slice, dim", {
dtest <- xgb.DMatrix(test_data, label=test_label)
expect_equal(dim(dtest), dim(test_data))
dsub1 <- slice(dtest, 1:42)
expect_equal(nrow(dsub1), 42)
expect_equal(ncol(dsub1), ncol(test_data))
dsub2 <- dtest[1:42,]
expect_equal(dim(dtest), dim(test_data))
expect_equal(getinfo(dsub1, 'label'), getinfo(dsub2, 'label'))
})
test_that("xgb.DMatrix: colnames", {
dtest <- xgb.DMatrix(test_data, label=test_label)
expect_equal(colnames(dtest), colnames(test_data))
expect_error( colnames(dtest) <- 'asdf')
new_names <- make.names(1:ncol(test_data))
expect_silent( colnames(dtest) <- new_names)
expect_equal(colnames(dtest), new_names)
expect_silent(colnames(dtest) <- NULL)
expect_null(colnames(dtest))
})

View File

@@ -1,19 +0,0 @@
context('Test generalized linear models')
require(xgboost)
test_that("glm works", {
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
expect_equal(class(dtrain), "xgb.DMatrix")
expect_equal(class(dtest), "xgb.DMatrix")
param <- list(objective = "binary:logistic", booster = "gblinear",
nthread = 2, alpha = 0.0001, lambda = 1)
watchlist <- list(eval = dtest, train = dtrain)
num_round <- 2
bst <- xgb.train(param, dtrain, num_round, watchlist)
ypred <- predict(bst, dtest)
expect_equal(length(getinfo(dtest, 'label')), 1611)
})

View File

@@ -1,136 +0,0 @@
context('Test helper functions')
require(xgboost)
require(data.table)
require(Matrix)
require(vcd)
set.seed(1982)
data(Arthritis)
df <- data.table(Arthritis, keep.rownames = F)
df[,AgeDiscret := as.factor(round(Age / 10,0))]
df[,AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))]
df[,ID := NULL]
sparse_matrix <- sparse.model.matrix(Improved~.-1, data = df)
label <- df[, ifelse(Improved == "Marked", 1, 0)]
bst.Tree <- xgboost(data = sparse_matrix, label = label, max_depth = 9,
eta = 1, nthread = 2, nrounds = 10, objective = "binary:logistic", booster = "gbtree")
bst.GLM <- xgboost(data = sparse_matrix, label = label,
eta = 1, nthread = 2, nrounds = 10, objective = "binary:logistic", booster = "gblinear")
feature.names <- colnames(sparse_matrix)
test_that("xgb.dump works", {
expect_length(xgb.dump(bst.Tree), 172)
expect_true(xgb.dump(bst.Tree, 'xgb.model.dump', with_stats = T))
expect_true(file.exists('xgb.model.dump'))
expect_gt(file.size('xgb.model.dump'), 8000)
})
test_that("xgb.dump works for gblinear", {
expect_length(xgb.dump(bst.GLM), 14)
# also make sure that it works properly for a sparse model where some coefficients
# are 0 from setting large L1 regularization:
bst.GLM.sp <- xgboost(data = sparse_matrix, label = label, eta = 1, nthread = 2, nrounds = 1,
alpha=2, objective = "binary:logistic", booster = "gblinear")
d.sp <- xgb.dump(bst.GLM.sp)
expect_length(d.sp, 14)
expect_gt(sum(d.sp == "0"), 0)
})
test_that("xgb-attribute functionality", {
val <- "my attribute value"
list.val <- list(my_attr=val, a=123, b='ok')
list.ch <- list.val[order(names(list.val))]
list.ch <- lapply(list.ch, as.character)
# note: iter is 0-index in xgb attributes
list.default <- list(niter = "9")
list.ch <- c(list.ch, list.default)
# proper input:
expect_error(xgb.attr(bst.Tree, NULL))
expect_error(xgb.attr(val, val))
# set & get:
expect_null(xgb.attr(bst.Tree, "asdf"))
expect_equal(xgb.attributes(bst.Tree), list.default)
xgb.attr(bst.Tree, "my_attr") <- val
expect_equal(xgb.attr(bst.Tree, "my_attr"), val)
xgb.attributes(bst.Tree) <- list.val
expect_equal(xgb.attributes(bst.Tree), list.ch)
# serializing:
xgb.save(bst.Tree, 'xgb.model')
bst <- xgb.load('xgb.model')
expect_equal(xgb.attr(bst, "my_attr"), val)
expect_equal(xgb.attributes(bst), list.ch)
# deletion:
xgb.attr(bst, "my_attr") <- NULL
expect_null(xgb.attr(bst, "my_attr"))
expect_equal(xgb.attributes(bst), list.ch[c("a", "b", "niter")])
xgb.attributes(bst) <- list(a=NULL, b=NULL)
expect_equal(xgb.attributes(bst), list.default)
xgb.attributes(bst) <- list(niter=NULL)
expect_null(xgb.attributes(bst))
})
test_that("xgb.model.dt.tree works with and without feature names", {
names.dt.trees <- c("Tree", "Node", "ID", "Feature", "Split", "Yes", "No", "Missing", "Quality", "Cover")
dt.tree <- xgb.model.dt.tree(feature_names = feature.names, model = bst.Tree)
expect_equal(names.dt.trees, names(dt.tree))
expect_equal(dim(dt.tree), c(162, 10))
expect_output(str(xgb.model.dt.tree(model = bst.Tree)), 'Feature.*\\"3\\"')
})
test_that("xgb.importance works with and without feature names", {
importance.Tree <- xgb.importance(feature_names = feature.names, model = bst.Tree)
expect_equal(dim(importance.Tree), c(7, 4))
expect_equal(colnames(importance.Tree), c("Feature", "Gain", "Cover", "Frequency"))
expect_output(str(xgb.importance(model = bst.Tree)), 'Feature.*\\"3\\"')
imp2plot <- xgb.plot.importance(importance_matrix = importance.Tree)
expect_equal(colnames(imp2plot), c("Feature", "Gain", "Cover", "Frequency", "Importance"))
xgb.ggplot.importance(importance_matrix = importance.Tree)
})
test_that("xgb.importance works with GLM model", {
importance.GLM <- xgb.importance(feature_names = feature.names, model = bst.GLM)
expect_equal(dim(importance.GLM), c(10, 2))
expect_equal(colnames(importance.GLM), c("Feature", "Weight"))
xgb.importance(model = bst.GLM)
imp2plot <- xgb.plot.importance(importance.GLM)
expect_equal(colnames(imp2plot), c("Feature", "Weight", "Importance"))
xgb.ggplot.importance(importance.GLM)
})
test_that("xgb.plot.tree works with and without feature names", {
xgb.plot.tree(feature_names = feature.names, model = bst.Tree)
xgb.plot.tree(model = bst.Tree)
})
test_that("xgb.plot.multi.trees works with and without feature names", {
xgb.plot.multi.trees(model = bst.Tree, feature_names = feature.names, features_keep = 3)
xgb.plot.multi.trees(model = bst.Tree, features_keep = 3)
})
test_that("xgb.plot.deepness works", {
d2p <- xgb.plot.deepness(model = bst.Tree)
expect_equal(colnames(d2p), c("ID", "Tree", "Depth", "Cover", "Weight"))
xgb.plot.deepness(model = bst.Tree, which = "med.depth")
xgb.ggplot.deepness(model = bst.Tree)
})
test_that("check.deprecation works", {
ttt <- function(a = NNULL, DUMMY=NULL, ...) {
check.deprecation(...)
as.list((environment()))
}
res <- ttt(a = 1, DUMMY = 2, z = 3)
expect_equal(res, list(a = 1, DUMMY = 2))
expect_warning(
res <- ttt(a = 1, dummy = 22, z = 3)
, "\'dummy\' is deprecated")
expect_equal(res, list(a = 1, DUMMY = 22))
expect_warning(
res <- ttt(a = 1, dumm = 22, z = 3)
, "\'dumm\' was partially matched to \'dummy\'")
expect_equal(res, list(a = 1, DUMMY = 22))
})

View File

@@ -1,27 +0,0 @@
context("Code is of high quality and lint free")
test_that("Code Lint", {
skip_on_cran()
skip_on_travis()
skip_if_not_installed("lintr")
my_linters <- list(
absolute_paths_linter=lintr::absolute_paths_linter,
assignment_linter=lintr::assignment_linter,
closed_curly_linter=lintr::closed_curly_linter,
commas_linter=lintr::commas_linter,
# commented_code_linter=lintr::commented_code_linter,
infix_spaces_linter=lintr::infix_spaces_linter,
line_length_linter=lintr::line_length_linter,
no_tab_linter=lintr::no_tab_linter,
object_usage_linter=lintr::object_usage_linter,
# snake_case_linter=lintr::snake_case_linter,
# multiple_dots_linter=lintr::multiple_dots_linter,
object_length_linter=lintr::object_length_linter,
open_curly_linter=lintr::open_curly_linter,
# single_quotes_linter=lintr::single_quotes_linter,
spaces_inside_linter=lintr::spaces_inside_linter,
spaces_left_parentheses_linter=lintr::spaces_left_parentheses_linter,
trailing_blank_lines_linter=lintr::trailing_blank_lines_linter,
trailing_whitespace_linter=lintr::trailing_whitespace_linter
)
# lintr::expect_lint_free(linters=my_linters) # uncomment this if you want to check code quality
})

View File

@@ -1,30 +0,0 @@
context('Test model params and call are exposed to R')
require(xgboost)
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
bst <- xgboost(data = dtrain,
max_depth = 2,
eta = 1,
nrounds = 10,
nthread = 1,
verbose = 0,
objective = "binary:logistic")
test_that("call is exposed to R", {
expect_false(is.null(bst$call))
expect_is(bst$call, "call")
})
test_that("params is exposed to R", {
model_params <- bst$params
expect_is(model_params, "list")
expect_equal(model_params$eta, 1)
expect_equal(model_params$max_depth, 2)
expect_equal(model_params$objective, "binary:logistic")
})

Some files were not shown because too many files have changed in this diff Show More