Compare commits
1125 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9ecb7583e9 | ||
|
|
92f1c48a22 | ||
|
|
8cf2f7aed8 | ||
|
|
429f956111 | ||
|
|
c9f89c4241 | ||
|
|
9ee4008654 | ||
|
|
497f1bdd38 | ||
|
|
521324ba9c | ||
|
|
271f4a80e7 | ||
|
|
13b9874fd6 | ||
|
|
dac6e4daa1 | ||
|
|
1d6f9d91fc | ||
|
|
43ca23fdf2 | ||
|
|
cc2daadec3 | ||
|
|
86157b9480 | ||
|
|
83b5eabd70 | ||
|
|
df6b3e1481 | ||
|
|
72546e71a8 | ||
|
|
c648442a46 | ||
|
|
a049490cdb | ||
|
|
2179baa50c | ||
|
|
bc69a3e877 | ||
|
|
f3df0d0eb4 | ||
|
|
2a03685bff | ||
|
|
68a8865bc5 | ||
|
|
982ee34658 | ||
|
|
e228c1a121 | ||
|
|
215da76263 | ||
|
|
19b55b300b | ||
|
|
d7599e095b | ||
|
|
2a37a8880c | ||
|
|
24241ed6e3 | ||
|
|
d5e1c41b69 | ||
|
|
15c6172e09 | ||
|
|
96bbf80457 | ||
|
|
de00e07087 | ||
|
|
67c8c96784 | ||
|
|
d94f6679fc | ||
|
|
ed5f33df16 | ||
|
|
3ef8383d93 | ||
|
|
bba6aa74fb | ||
|
|
5f7f31d464 | ||
|
|
c69c4adb58 | ||
|
|
f52f11e1d7 | ||
|
|
ec8cfb3267 | ||
|
|
15b72571f3 | ||
|
|
4f88ada219 | ||
|
|
4503555274 | ||
|
|
e1a2c1bbb3 | ||
|
|
98ac153265 | ||
|
|
5cc7c735e5 | ||
|
|
34d4ab455e | ||
|
|
61dd854a52 | ||
|
|
34937fea41 | ||
|
|
7510a87466 | ||
|
|
4fe67f10b4 | ||
|
|
3043827efc | ||
|
|
7794d3da8a | ||
|
|
64afe9873b | ||
|
|
bde1265caf | ||
|
|
d6ebcfb032 | ||
|
|
12c6b7ceea | ||
|
|
06c4246ff1 | ||
|
|
25966e4ba8 | ||
|
|
074cad2343 | ||
|
|
479ae8081b | ||
|
|
fd0138c91c | ||
|
|
55aef8f546 | ||
|
|
dbfafd8557 | ||
|
|
cd83fe6033 | ||
|
|
142bdc73ec | ||
|
|
cb54374550 | ||
|
|
03bd1183bc | ||
|
|
24d225c1ab | ||
|
|
9b88495840 | ||
|
|
402e7837fb | ||
|
|
e9f1abc1f0 | ||
|
|
adf87b27c5 | ||
|
|
508ac13243 | ||
|
|
b949a4bf7b | ||
|
|
5db0803eb2 | ||
|
|
caabee2135 | ||
|
|
fd365c147e | ||
|
|
ec3f327c20 | ||
|
|
8d7fe262d9 | ||
|
|
033a666900 | ||
|
|
abe65e3769 | ||
|
|
2258bc870d | ||
|
|
582ea104b5 | ||
|
|
0def8e0bae | ||
|
|
773ded684b | ||
|
|
b457d0d792 | ||
|
|
2ecc85ffad | ||
|
|
43704549a2 | ||
|
|
d414fdf2e7 | ||
|
|
18b28d9315 | ||
|
|
fb9201abae | ||
|
|
e02b376bf7 | ||
|
|
7bccc1ea2c | ||
|
|
ac8366654b | ||
|
|
e555a238bc | ||
|
|
cc3b56fc37 | ||
|
|
6ccf116601 | ||
|
|
35b1cdb365 | ||
|
|
3d8107adb8 | ||
|
|
a269055b2b | ||
|
|
a185b693dc | ||
|
|
2e7ba900ef | ||
|
|
ad32b4e021 | ||
|
|
9e0a9a066b | ||
|
|
574c20dc1d | ||
|
|
77c844cef7 | ||
|
|
778751a1bb | ||
|
|
fb77ed7603 | ||
|
|
827d0e8edb | ||
|
|
757aafc131 | ||
|
|
d4b82f50ab | ||
|
|
449be7a402 | ||
|
|
7720272870 | ||
|
|
384983ed27 | ||
|
|
ec82c75ee7 | ||
|
|
d5834b68c3 | ||
|
|
fcae6301ec | ||
|
|
411c8466bd | ||
|
|
7949a8d5f4 | ||
|
|
b3ed81877a | ||
|
|
003b418312 | ||
|
|
485d90218c | ||
|
|
a19bbc9be5 | ||
|
|
b2cae34a8e | ||
|
|
f6cae4da85 | ||
|
|
6d9fcb771e | ||
|
|
cb62f9e73b | ||
|
|
0846ad860c | ||
|
|
344ddeb9ca | ||
|
|
326921dbe4 | ||
|
|
7ab93f3ce3 | ||
|
|
292bb677e5 | ||
|
|
e9fbce9791 | ||
|
|
07732e02e5 | ||
|
|
919cfd9c8d | ||
|
|
c41a657c4e | ||
|
|
ee8bb60bf1 | ||
|
|
a6a8a55ffa | ||
|
|
5a92ffe3ca | ||
|
|
370dce9d57 | ||
|
|
fa8fea145a | ||
|
|
bbd308595a | ||
|
|
ab982e7873 | ||
|
|
17c64300e3 | ||
|
|
b7511cbd6f | ||
|
|
a81ccab7e5 | ||
|
|
5b68b68379 | ||
|
|
0f789e2b22 | ||
|
|
8b77964d03 | ||
|
|
7996914a2d | ||
|
|
5b7c68946d | ||
|
|
6fc1088592 | ||
|
|
ce97de2a7c | ||
|
|
5fea9d24f2 | ||
|
|
6c403187ec | ||
|
|
1ca4bfd20e | ||
|
|
89da9f9741 | ||
|
|
5f910cd4ff | ||
|
|
34b154c284 | ||
|
|
baba3e9eb0 | ||
|
|
8e2b874b4c | ||
|
|
3ec74a1ba9 | ||
|
|
8d0f2bfbaa | ||
|
|
2266db17d1 | ||
|
|
0a3941be6d | ||
|
|
00264eb72b | ||
|
|
513d7a7d84 | ||
|
|
620b2b155a | ||
|
|
cd1d108c7d | ||
|
|
6243e7c43d | ||
|
|
628411a654 | ||
|
|
9cb4c938da | ||
|
|
e537b0969f | ||
|
|
d33043a348 | ||
|
|
a39fef2c67 | ||
|
|
5f0c1e902b | ||
|
|
804cf85fe4 | ||
|
|
09d32f1f2b | ||
|
|
e8a962575a | ||
|
|
824fba783e | ||
|
|
2d88d17008 | ||
|
|
bed3695beb | ||
|
|
4b88dfff24 | ||
|
|
5efc979551 | ||
|
|
08658b124d | ||
|
|
4abf24aa4f | ||
|
|
4c1920a6a5 | ||
|
|
d4dee25eb3 | ||
|
|
9a8bb7d186 | ||
|
|
c519f5690e | ||
|
|
124bc57a6e | ||
|
|
61ac8eec8a | ||
|
|
26eb68859f | ||
|
|
b38c7fe2ce | ||
|
|
2b400b18d5 | ||
|
|
e5f1720656 | ||
|
|
63418d2f35 | ||
|
|
45150a844e | ||
|
|
8689f0b562 | ||
|
|
b9e5229ff2 | ||
|
|
b4cc350ec5 | ||
|
|
a8ddbac163 | ||
|
|
bc3747bdce | ||
|
|
320e7c2041 | ||
|
|
6c83c8c2ef | ||
|
|
49e25cfb36 | ||
|
|
bbff74d2ff | ||
|
|
601f2067c7 | ||
|
|
b36d023f9e | ||
|
|
1ace9c66ec | ||
|
|
dc14f98f40 | ||
|
|
01ff2b2c29 | ||
|
|
cf0c1d0888 | ||
|
|
e0ebbc0746 | ||
|
|
0c44067736 | ||
|
|
c9f5fcaf21 | ||
|
|
f5815b6982 | ||
|
|
9f6608d6aa | ||
|
|
bc7643d35e | ||
|
|
9b7633c01d | ||
|
|
43a57c4a85 | ||
|
|
979e392deb | ||
|
|
0808e50ae8 | ||
|
|
c4ec64d409 | ||
|
|
4057f861c1 | ||
|
|
eb6622ff7a | ||
|
|
4847f24840 | ||
|
|
492bb76f64 | ||
|
|
7157b9586b | ||
|
|
99a7f5b3ab | ||
|
|
7f3e92d71a | ||
|
|
1164dc07cd | ||
|
|
6cfc3e16fc | ||
|
|
8286a190b7 | ||
|
|
4f48647932 | ||
|
|
92cba25fe2 | ||
|
|
d2d01d977a | ||
|
|
c2e3d4f3cd | ||
|
|
e6eefea5e2 | ||
|
|
0717e886e5 | ||
|
|
324f2d4e4a | ||
|
|
8998733ef4 | ||
|
|
bc6c993aaa | ||
|
|
2de67f0050 | ||
|
|
7354955cbb | ||
|
|
7ae5c972f9 | ||
|
|
e20ed8ab9c | ||
|
|
5627af6b21 | ||
|
|
949f062229 | ||
|
|
b2008773bb | ||
|
|
0058301e6f | ||
|
|
9def441e9a | ||
|
|
5086decb0c | ||
|
|
95ba0998b3 | ||
|
|
089bee0a00 | ||
|
|
5a084fb9b3 | ||
|
|
291d417f57 | ||
|
|
15eb553c1f | ||
|
|
932d7201f9 | ||
|
|
966dc81788 | ||
|
|
d5fcbee44b | ||
|
|
b8a7773736 | ||
|
|
e56ca69c31 | ||
|
|
1b25d23583 | ||
|
|
6a43a4b9d3 | ||
|
|
c7e7ce7569 | ||
|
|
7a54ca41c9 | ||
|
|
d66b5570f4 | ||
|
|
841867e05a | ||
|
|
e7f8f40240 | ||
|
|
d5c9ef64a5 | ||
|
|
a5a58102e5 | ||
|
|
ba9b4cb1ee | ||
|
|
835e59e538 | ||
|
|
ee2afb3256 | ||
|
|
ca1d04bcb7 | ||
|
|
f1f69ff10e | ||
|
|
871fabeee3 | ||
|
|
75fe2ff0c3 | ||
|
|
d81e319e78 | ||
|
|
5e816e616a | ||
|
|
5de57435c7 | ||
|
|
f588252481 | ||
|
|
9b465052ce | ||
|
|
8237920c48 | ||
|
|
73afef1a6e | ||
|
|
dcc9639b91 | ||
|
|
5e64276a9b | ||
|
|
837d44a345 | ||
|
|
f8c3d22587 | ||
|
|
54754f29dd | ||
|
|
f355418186 | ||
|
|
4d69ce96b3 | ||
|
|
a5003fc8ce | ||
|
|
8ed85b8ce7 | ||
|
|
edb945d59b | ||
|
|
a81b78e56b | ||
|
|
58513dc288 | ||
|
|
59d7b8dc72 | ||
|
|
3fbb221fec | ||
|
|
8fb05c8c95 | ||
|
|
bb212bf33c | ||
|
|
3f64b4fde3 | ||
|
|
551fa6e25e | ||
|
|
531ff21b20 | ||
|
|
303c603c7d | ||
|
|
0aa2600399 | ||
|
|
f53f5ca359 | ||
|
|
4b10200456 | ||
|
|
7c0c9677a9 | ||
|
|
32be4669fb | ||
|
|
3d1d97c8cc | ||
|
|
9e354fb120 | ||
|
|
2925cebdca | ||
|
|
6e5c335cea | ||
|
|
882f4136e0 | ||
|
|
732e27cebc | ||
|
|
1022909bbe | ||
|
|
f0a138f33a | ||
|
|
a99bb38bd2 | ||
|
|
e15d61b916 | ||
|
|
bc9ea62ec0 | ||
|
|
8bad677c2f | ||
|
|
230010d9a0 | ||
|
|
6a7c6a8ae6 | ||
|
|
e1695775e9 | ||
|
|
2b2aac85f4 | ||
|
|
ca4801f81d | ||
|
|
53fc17578f | ||
|
|
56b1868278 | ||
|
|
617970a0c2 | ||
|
|
e0f890ba28 | ||
|
|
1450aebb74 | ||
|
|
06c9702028 | ||
|
|
b023a253b4 | ||
|
|
2c13f90384 | ||
|
|
e14c3b9325 | ||
|
|
bc516198dc | ||
|
|
23a37dcaf9 | ||
|
|
d07b7fe8c8 | ||
|
|
7a61216690 | ||
|
|
2c12b956da | ||
|
|
3941b31ade | ||
|
|
8189126d51 | ||
|
|
d24df52bb9 | ||
|
|
5ac233280e | ||
|
|
761845f594 | ||
|
|
0ce4372bd4 | ||
|
|
f7005d32c1 | ||
|
|
729fd97196 | ||
|
|
9f7b94cf70 | ||
|
|
3ab8ccaa0c | ||
|
|
aaa950951b | ||
|
|
5b1d7a760b | ||
|
|
eb281ff9b4 | ||
|
|
b9171d8f0b | ||
|
|
2e4ea5ecc0 | ||
|
|
8ea705e4d5 | ||
|
|
69a17d5114 | ||
|
|
d37b83e8d9 | ||
|
|
6e3c899ba7 | ||
|
|
edf501d227 | ||
|
|
057f03cacc | ||
|
|
7cc256e246 | ||
|
|
c182c584ca | ||
|
|
4de866211d | ||
|
|
f2095f1d5b | ||
|
|
a730c7e67e | ||
|
|
662854c7d7 | ||
|
|
4dfbe2a893 | ||
|
|
234674a0a6 | ||
|
|
0955213220 | ||
|
|
1e72dc1276 | ||
|
|
c53d59f8db | ||
|
|
5e00a71671 | ||
|
|
df7cf744b4 | ||
|
|
3abbbe41ac | ||
|
|
d9f4ab557a | ||
|
|
54b71c8fba | ||
|
|
65d7bf2dfe | ||
|
|
a76d6c6131 | ||
|
|
d3f2dbe64f | ||
|
|
c8f5d190c6 | ||
|
|
60ec7b8424 | ||
|
|
d12cc1090a | ||
|
|
5062a3ab46 | ||
|
|
c5d0608057 | ||
|
|
60b9d2eeb9 | ||
|
|
2c8fa8b8b9 | ||
|
|
bde20dd897 | ||
|
|
d07e8b503e | ||
|
|
cacb4b1fdd | ||
|
|
85d09245f6 | ||
|
|
0798e36d73 | ||
|
|
2de85d3241 | ||
|
|
547abb8c12 | ||
|
|
1168a68872 | ||
|
|
f88c43801f | ||
|
|
73b3955dd4 | ||
|
|
d3a8d284ab | ||
|
|
01c4711556 | ||
|
|
2f57bbde3c | ||
|
|
bed0349954 | ||
|
|
7ff6d44efa | ||
|
|
b3eb5d0945 | ||
|
|
fa5e2f6c45 | ||
|
|
9a30bdd313 | ||
|
|
3ff3a5f1ed | ||
|
|
3976455af9 | ||
|
|
38dd91f491 | ||
|
|
c03a4d5088 | ||
|
|
db396ee340 | ||
|
|
621348abb3 | ||
|
|
5f7b5a6921 | ||
|
|
26a5436a65 | ||
|
|
9f73127a23 | ||
|
|
3c004a4145 | ||
|
|
49247458f9 | ||
|
|
9e33a10202 | ||
|
|
32cbab1cc0 | ||
|
|
73713de601 | ||
|
|
8b9c98b65b | ||
|
|
e40c4260ed | ||
|
|
ef8bdaa047 | ||
|
|
a7226c0222 | ||
|
|
a197899161 | ||
|
|
52620fdb34 | ||
|
|
6a5f6ba694 | ||
|
|
b807f3e30c | ||
|
|
252e018275 | ||
|
|
9d122293bc | ||
|
|
ae32936ba2 | ||
|
|
ff3d82c006 | ||
|
|
0edd600f3d | ||
|
|
db7f952ed6 | ||
|
|
2a6ab2547d | ||
|
|
1c6e031c75 | ||
|
|
125bc812f8 | ||
|
|
1aa8c8d9be | ||
|
|
cd473c9da3 | ||
|
|
936b22fdf3 | ||
|
|
42173d7bc3 | ||
|
|
d530d37707 | ||
|
|
43897b8296 | ||
|
|
ddab49a8be | ||
|
|
faf0f2df10 | ||
|
|
b3700bbb3f | ||
|
|
562352101d | ||
|
|
1094d6015d | ||
|
|
42de9206fc | ||
|
|
39c637ee19 | ||
|
|
2c0fc97306 | ||
|
|
9e9d41b95c | ||
|
|
4bc1f3a388 | ||
|
|
1de3f4135c | ||
|
|
0716c64ef7 | ||
|
|
62571b79eb | ||
|
|
9c56916fd7 | ||
|
|
381f1d3dc9 | ||
|
|
7196c9d95e | ||
|
|
e78b46046e | ||
|
|
2d8c67d6dc | ||
|
|
95af5c074b | ||
|
|
37da66f865 | ||
|
|
da3d55db5b | ||
|
|
e2e089ce12 | ||
|
|
c0ef2f8dce | ||
|
|
59684b2db6 | ||
|
|
bfa1252fca | ||
|
|
34a2616696 | ||
|
|
e9f149481e | ||
|
|
3f4e22015a | ||
|
|
8fe1a2213c | ||
|
|
e9260de3f3 | ||
|
|
1877cb8e83 | ||
|
|
0715ab3c10 | ||
|
|
fedd9674c8 | ||
|
|
178cfe70a8 | ||
|
|
ada377c57e | ||
|
|
36a552ac98 | ||
|
|
162da7b52b | ||
|
|
6fd4a30667 | ||
|
|
44099f585d | ||
|
|
06bdc15e9b | ||
|
|
6c0a190f6d | ||
|
|
c3a0622b49 | ||
|
|
82828621d0 | ||
|
|
98238d63fa | ||
|
|
093b675838 | ||
|
|
be20df8c23 | ||
|
|
4da4e092b5 | ||
|
|
0ff8572737 | ||
|
|
1b9ed4a4a1 | ||
|
|
d3f0646779 | ||
|
|
bc995a4865 | ||
|
|
6b98305db4 | ||
|
|
80390e6cb6 | ||
|
|
fa65cf6646 | ||
|
|
2cfc90e8db | ||
|
|
6755179e77 | ||
|
|
0c621094b3 | ||
|
|
9c22df9342 | ||
|
|
1323531323 | ||
|
|
f41a08fda8 | ||
|
|
d4d7097acc | ||
|
|
01d59ded00 | ||
|
|
7a02facc9d | ||
|
|
ee8b29c843 | ||
|
|
87621322ed | ||
|
|
3ca06ac51e | ||
|
|
5e6cb63a56 | ||
|
|
791de7789b | ||
|
|
b771f58453 | ||
|
|
6fbe6248f4 | ||
|
|
83cdf14b2c | ||
|
|
3b86260b50 | ||
|
|
5d1bcde719 | ||
|
|
ea9f09716b | ||
|
|
4c0e4422d0 | ||
|
|
48ac9b6cbe | ||
|
|
da6803b75b | ||
|
|
4d1607eefd | ||
|
|
83191f0839 | ||
|
|
eee7cdf07e | ||
|
|
eb562d3829 | ||
|
|
53049b16b8 | ||
|
|
81a059864a | ||
|
|
a5e07a01f8 | ||
|
|
cd8760cba3 | ||
|
|
e164d51c43 | ||
|
|
d1dee4ad99 | ||
|
|
946ae1c440 | ||
|
|
2e42f33fc1 | ||
|
|
084d89216c | ||
|
|
51e32e4905 | ||
|
|
0ecb4de963 | ||
|
|
b14e535e78 | ||
|
|
680d53db43 | ||
|
|
4e5a7729c3 | ||
|
|
db8d117f7e | ||
|
|
799f8485e2 | ||
|
|
4d7a187cb0 | ||
|
|
d95be1c38d | ||
|
|
417c3ba47e | ||
|
|
295f13ef09 | ||
|
|
60526100e3 | ||
|
|
3f2093fb81 | ||
|
|
7cafd41a58 | ||
|
|
d6d14d0fb9 | ||
|
|
c75a3bc0a9 | ||
|
|
7901a299b2 | ||
|
|
290b17ffda | ||
|
|
1167e6c554 | ||
|
|
cac2cd2e94 | ||
|
|
def77870f3 | ||
|
|
a90d204942 | ||
|
|
bbf5b9ee57 | ||
|
|
0080c97075 | ||
|
|
8c676c889d | ||
|
|
38ac52dd87 | ||
|
|
259d80c0cf | ||
|
|
0df1da2db4 | ||
|
|
730bc1f688 | ||
|
|
d8c3cc92ae | ||
|
|
a343ae3b34 | ||
|
|
300f9ace06 | ||
|
|
b438d684d2 | ||
|
|
5abe50ff8c | ||
|
|
f90d034a86 | ||
|
|
d05ea589fb | ||
|
|
9027686cac | ||
|
|
66a0832778 | ||
|
|
6c791b5b47 | ||
|
|
0f35493b65 | ||
|
|
3b9e5909fb | ||
|
|
adea842c83 | ||
|
|
d159ee8547 | ||
|
|
419e052314 | ||
|
|
98e45f7b54 | ||
|
|
c928dd4ff5 | ||
|
|
9bab06cbca | ||
|
|
ccfc90e4c6 | ||
|
|
2462e22cd4 | ||
|
|
ddf2e68821 | ||
|
|
942b957eef | ||
|
|
be6a552956 | ||
|
|
90ef250ea1 | ||
|
|
c3574d932f | ||
|
|
1b87a1d8f8 | ||
|
|
209335b18c | ||
|
|
aa86bd5207 | ||
|
|
972730cde0 | ||
|
|
6103dca0bb | ||
|
|
8c10af45a0 | ||
|
|
3c09399f29 | ||
|
|
302bbdc958 | ||
|
|
044fea1281 | ||
|
|
d779a11af9 | ||
|
|
e6cf7a1278 | ||
|
|
38a3e1b858 | ||
|
|
74d5056c61 | ||
|
|
db87d481bc | ||
|
|
5358e1ebf0 | ||
|
|
d016309a15 | ||
|
|
7f29a238e6 | ||
|
|
0bb87b5b35 | ||
|
|
b74802dea9 | ||
|
|
58530b1bc4 | ||
|
|
68be454cfa | ||
|
|
5188e27513 | ||
|
|
f380c10a93 | ||
|
|
12fe2fc06c | ||
|
|
b2e93d2742 | ||
|
|
c061e3ae50 | ||
|
|
b82e78c169 | ||
|
|
8463107013 | ||
|
|
19b59938b7 | ||
|
|
e3f624d8e7 | ||
|
|
2c84daeca7 | ||
|
|
344f90b67b | ||
|
|
05d7000096 | ||
|
|
f03463c45b | ||
|
|
fd4335d0bf | ||
|
|
801116c307 | ||
|
|
bb56183396 | ||
|
|
bdc1a3c178 | ||
|
|
428f6cbbe2 | ||
|
|
d638535581 | ||
|
|
44bd2981b2 | ||
|
|
9dbb71490c | ||
|
|
4359356d46 | ||
|
|
1caa93221a | ||
|
|
a57371ef7c | ||
|
|
f05a23b41c | ||
|
|
d495a180d8 | ||
|
|
7f854848d3 | ||
|
|
f05294a6f2 | ||
|
|
819098a48f | ||
|
|
c1b2cff874 | ||
|
|
7ce090e775 | ||
|
|
97fd5207dd | ||
|
|
54029a59af | ||
|
|
5bd163aa25 | ||
|
|
7fc57f3974 | ||
|
|
bde1ebc209 | ||
|
|
1aabc690ec | ||
|
|
04c99683c3 | ||
|
|
1332ff787f | ||
|
|
f958e32683 | ||
|
|
7129988847 | ||
|
|
e93a274823 | ||
|
|
c2b85ab68a | ||
|
|
a9da2e244a | ||
|
|
912e341d57 | ||
|
|
8f0efb4ab3 | ||
|
|
7579905e18 | ||
|
|
54579da4d7 | ||
|
|
3a9996173e | ||
|
|
1b657a5513 | ||
|
|
a196443a07 | ||
|
|
851cba931e | ||
|
|
01e00efc53 | ||
|
|
275da176ba | ||
|
|
22b0a55a04 | ||
|
|
0de7c47495 | ||
|
|
dbd5309b55 | ||
|
|
f7f673b00c | ||
|
|
7a0ccfbb49 | ||
|
|
0897477af0 | ||
|
|
e082718c66 | ||
|
|
6e18d3a290 | ||
|
|
2a0ff209ff | ||
|
|
f4fb2be101 | ||
|
|
2caceb157d | ||
|
|
b342ef951b | ||
|
|
0a07900b9f | ||
|
|
16eb41936d | ||
|
|
9da5050643 | ||
|
|
04aff3af8e | ||
|
|
2d0cd2817e | ||
|
|
a1367ea1f8 | ||
|
|
3632242e0b | ||
|
|
97ed944209 | ||
|
|
20c52f07d2 | ||
|
|
c3124813e8 | ||
|
|
59787b23af | ||
|
|
15ca12a77e | ||
|
|
41c6813496 | ||
|
|
b572a39919 | ||
|
|
645037e376 | ||
|
|
6c9c8a9001 | ||
|
|
bb2de1fd5d | ||
|
|
d0916849a6 | ||
|
|
6155394a06 | ||
|
|
e964654b8f | ||
|
|
39390cc2ee | ||
|
|
3a0f787703 | ||
|
|
f90771eec6 | ||
|
|
f4798718c7 | ||
|
|
bc267dd729 | ||
|
|
96c3071a8a | ||
|
|
cfa9c42eb4 | ||
|
|
6efe7c129f | ||
|
|
54da4b3185 | ||
|
|
4066d68261 | ||
|
|
6d22ea793c | ||
|
|
ee6809e642 | ||
|
|
d8beb517ed | ||
|
|
2718ff530c | ||
|
|
0df1272695 | ||
|
|
e70810be8a | ||
|
|
c2f0486d37 | ||
|
|
aad1313154 | ||
|
|
2b76061659 | ||
|
|
152e2fb072 | ||
|
|
ea0deeca68 | ||
|
|
8c1065f645 | ||
|
|
1fcc26a6f8 | ||
|
|
e4dd6051a0 | ||
|
|
2ec2ecf013 | ||
|
|
181dee13e9 | ||
|
|
ff122d61ff | ||
|
|
84d3fcb7ea | ||
|
|
e229692572 | ||
|
|
4a5802ed2c | ||
|
|
0cba2cdbb0 | ||
|
|
fc8110ef79 | ||
|
|
7f9cb921f4 | ||
|
|
a474a66573 | ||
|
|
962a20693f | ||
|
|
3bf0f145bb | ||
|
|
a1fad72ab3 | ||
|
|
288539ac78 | ||
|
|
9fbde21e9d | ||
|
|
db8288121d | ||
|
|
bb2a17b90c | ||
|
|
e93b805a75 | ||
|
|
fa2ab1f021 | ||
|
|
aba4559c4f | ||
|
|
7f20eaed93 | ||
|
|
62e9387cd5 | ||
|
|
17fd3f55e9 | ||
|
|
097f11b6e0 | ||
|
|
6f83d9c69a | ||
|
|
ae7450ce54 | ||
|
|
ddec0f378c | ||
|
|
320323f533 | ||
|
|
03bc6e6427 | ||
|
|
d563d6d8f4 | ||
|
|
a01df102c9 | ||
|
|
8c174ef2d3 | ||
|
|
c5677a2b2c | ||
|
|
053aababd4 | ||
|
|
614f47c477 | ||
|
|
5b69534b43 | ||
|
|
acd363033e | ||
|
|
5d99b441d5 | ||
|
|
e38e94ba4d | ||
|
|
d6d83c818f | ||
|
|
22b0fc0992 | ||
|
|
e67a0b8599 | ||
|
|
3913ff470f | ||
|
|
6274fba0a5 | ||
|
|
caf326d508 | ||
|
|
cb370c4f7d | ||
|
|
7375bd058b | ||
|
|
59edfdb315 | ||
|
|
779b82c098 | ||
|
|
603f8ce2fa | ||
|
|
52311dcec9 | ||
|
|
e4129ed6ee | ||
|
|
2ab6660943 | ||
|
|
d21e7e5f82 | ||
|
|
0cd4382d72 | ||
|
|
09b44915e7 | ||
|
|
85988a3178 | ||
|
|
a075aa24ba | ||
|
|
55968ed3fa | ||
|
|
250b22dd22 | ||
|
|
47b3cb6fb7 | ||
|
|
07b2d5a26d | ||
|
|
08ce495b5d | ||
|
|
1f9a57d17b | ||
|
|
e922004329 | ||
|
|
17ff471616 | ||
|
|
fb941262b4 | ||
|
|
e206b899ef | ||
|
|
0e470ef606 | ||
|
|
fa267ad093 | ||
|
|
96d3f8a6f3 | ||
|
|
511d4996b5 | ||
|
|
101a2e643d | ||
|
|
353ed5339d | ||
|
|
0e7377ba9c | ||
|
|
a320b402a5 | ||
|
|
49ccae7fb9 | ||
|
|
17add4776f | ||
|
|
a2cc78c1fb | ||
|
|
339f21e1bf | ||
|
|
d237378452 | ||
|
|
c512c3f46b | ||
|
|
8dbe0510de | ||
|
|
7032981350 | ||
|
|
3b742dc4f1 | ||
|
|
39b0fde0e7 | ||
|
|
ee84e22c8d | ||
|
|
b908680bec | ||
|
|
a5cd2412de | ||
|
|
a7b3dd3176 | ||
|
|
2acd78b44b | ||
|
|
a84a1fde02 | ||
|
|
564df59204 | ||
|
|
42d100de18 | ||
|
|
ef13dd31b1 | ||
|
|
ba9d24ff7b | ||
|
|
191d0aa5cf | ||
|
|
8e0f320db3 | ||
|
|
fe9dff339c | ||
|
|
2c8d735cb3 | ||
|
|
ebd64f6e22 | ||
|
|
1cf4d93246 | ||
|
|
15e073ca9d | ||
|
|
720a8c3273 | ||
|
|
4caca2947d | ||
|
|
bcb55d3b6a | ||
|
|
bac22734fb | ||
|
|
b647403baa | ||
|
|
cd05e38533 | ||
|
|
d062a9e009 | ||
|
|
d385cc64e2 | ||
|
|
a58055075b | ||
|
|
6676c28cbc | ||
|
|
ff26cd3212 | ||
|
|
401ce5cf5e | ||
|
|
acc110c251 | ||
|
|
c2b3a13e70 | ||
|
|
90645c4957 | ||
|
|
43878b10b6 | ||
|
|
cff50fe3ef | ||
|
|
21a52c7f98 | ||
|
|
bf88dadb61 | ||
|
|
15a2724ff7 | ||
|
|
151882dd26 | ||
|
|
ea04d4c46c | ||
|
|
a551bed803 | ||
|
|
a05799ed39 | ||
|
|
5891f752c8 | ||
|
|
b240f055d3 | ||
|
|
8dc1e4b3ea | ||
|
|
34092d7fd0 | ||
|
|
9b6cc0ed07 | ||
|
|
36263dd109 | ||
|
|
55ed50c860 | ||
|
|
a093770f36 | ||
|
|
26209a42a5 | ||
|
|
a2cdba51ce | ||
|
|
fd016e43c6 | ||
|
|
f186c87cf9 | ||
|
|
72e8331eab | ||
|
|
8685556af2 | ||
|
|
95e2baf7c2 | ||
|
|
910ce580c8 | ||
|
|
c400fa1e8d | ||
|
|
8be6095ece | ||
|
|
9bade7203a | ||
|
|
5ba3509dd3 | ||
|
|
bbee355b45 | ||
|
|
3689695d16 | ||
|
|
36a7396658 | ||
|
|
79efcd37f5 | ||
|
|
2aa838c75e | ||
|
|
6deaec8027 | ||
|
|
54e001bbf4 | ||
|
|
c5c8f643f2 | ||
|
|
5feee8d4a9 | ||
|
|
46dfcc7d22 | ||
|
|
e8a69013e6 | ||
|
|
8c16da8863 | ||
|
|
85c3334c2b | ||
|
|
f236640427 | ||
|
|
f53055f75e | ||
|
|
f7ce0ec0df | ||
|
|
2b2eb0d0f1 | ||
|
|
5eabcae27b | ||
|
|
d06b1fc26e | ||
|
|
ffa5eb2aa4 | ||
|
|
0f6c502d36 | ||
|
|
7eba285a1e | ||
|
|
cad7401783 | ||
|
|
228a46e8ad | ||
|
|
173096a6a7 | ||
|
|
6a892ce281 | ||
|
|
4d665b3fb0 | ||
|
|
2dc22e7aad | ||
|
|
69a50248b7 | ||
|
|
6cef9a08e9 | ||
|
|
803d5e3c4c | ||
|
|
a5852365fd | ||
|
|
7cbaee9916 | ||
|
|
6d8afb2218 | ||
|
|
d54ef56f6f | ||
|
|
d9688f93c7 | ||
|
|
90c0633a28 | ||
|
|
a65ad0bd9c | ||
|
|
40fd3d6d5f | ||
|
|
6ce9a35f55 | ||
|
|
d62daa0b32 | ||
|
|
c0afdb6786 | ||
|
|
cce4af4acf | ||
|
|
282b1729da | ||
|
|
c7c485d052 | ||
|
|
594371e35b | ||
|
|
e62167937b | ||
|
|
74572b5d45 | ||
|
|
f27a7258c6 | ||
|
|
52d0230b58 | ||
|
|
81b2ee1153 | ||
|
|
31d3ec07af | ||
|
|
457f704e3d | ||
|
|
d11a0044cf | ||
|
|
ed91e775ec | ||
|
|
225b3158f6 | ||
|
|
17b709acb9 | ||
|
|
70c9b885ef | ||
|
|
e9c178f402 | ||
|
|
8a16944664 | ||
|
|
ad0ccc6e4f | ||
|
|
199c421d60 | ||
|
|
5f76edd296 | ||
|
|
4ead65a28c | ||
|
|
cbf98cb9c6 | ||
|
|
48cefa012e | ||
|
|
c4802bfcd0 | ||
|
|
7b3d473593 | ||
|
|
28bb01aa22 | ||
|
|
0f37a01dd9 | ||
|
|
a2e433a089 | ||
|
|
7214a45e83 | ||
|
|
66191e9926 | ||
|
|
c1786849e3 | ||
|
|
213b5602d9 | ||
|
|
dd79ab846f | ||
|
|
0e61ba57d6 | ||
|
|
16ef016ba7 | ||
|
|
0d8248ddcd | ||
|
|
1325ba9251 | ||
|
|
3760cede0f | ||
|
|
21a28f2cc5 | ||
|
|
8af98e30fc | ||
|
|
96e6b6beba | ||
|
|
d29e45371f | ||
|
|
0f4d52a864 | ||
|
|
9fb12b20a4 | ||
|
|
34eee56256 | ||
|
|
e49e0998c0 | ||
|
|
7a068af1a3 | ||
|
|
6933240837 | ||
|
|
4416452f94 | ||
|
|
31b9cbab3d | ||
|
|
175986b739 | ||
|
|
78396f8a6e | ||
|
|
980233e648 | ||
|
|
247946a875 | ||
|
|
06ba285f71 | ||
|
|
43152657d4 | ||
|
|
9f598efc3e | ||
|
|
d6018eb4b9 | ||
|
|
0ae8df9a65 | ||
|
|
a9c6199723 | ||
|
|
37d4482e3e | ||
|
|
e227abc57a | ||
|
|
e7d612d22c | ||
|
|
292df67824 | ||
|
|
f7a2f52136 | ||
|
|
07cf3d3e53 | ||
|
|
e27cda7626 | ||
|
|
b2b6a8aa39 | ||
|
|
72ec0c5484 | ||
|
|
cfa994d57f | ||
|
|
badeff1d74 | ||
|
|
1b58d81315 | ||
|
|
4e12f3e1bc | ||
|
|
06a1cb6e03 | ||
|
|
2b88099c74 | ||
|
|
e68a152d9e | ||
|
|
26c9882e23 | ||
|
|
d3ad0524e7 | ||
|
|
beefd28471 | ||
|
|
d308124910 | ||
|
|
fa44a33ee6 | ||
|
|
6eaddaa9c3 | ||
|
|
8d545ab2a2 | ||
|
|
20e6087579 | ||
|
|
dd72af2620 | ||
|
|
9a98c3726c | ||
|
|
b05abfc494 | ||
|
|
3ceeb8c61c | ||
|
|
77b069c25d | ||
|
|
c7e82b5914 | ||
|
|
f489d824ca | ||
|
|
c430ae52f3 | ||
|
|
5bf9e79413 | ||
|
|
c6a8754c62 | ||
|
|
e01639548a | ||
|
|
17ce1f26c8 | ||
|
|
53e6e32718 | ||
|
|
f6effa1734 | ||
|
|
a10e4cba4e | ||
|
|
38887a1876 | ||
|
|
43a647a4dd | ||
|
|
7dc3e95a77 | ||
|
|
0c38ca7f6e | ||
|
|
001e663d42 | ||
|
|
7a07dcf651 | ||
|
|
eac980fbfc | ||
|
|
06ea6c7e79 | ||
|
|
f64871c74a | ||
|
|
40343c8ee1 | ||
|
|
15a88ceef0 | ||
|
|
35d8447282 | ||
|
|
42e6fbb0db | ||
|
|
deb3edf562 | ||
|
|
8824b40961 | ||
|
|
0caf2be684 | ||
|
|
ffee35e0f0 | ||
|
|
fbe40d00d8 | ||
|
|
40a1a2ffa8 | ||
|
|
5aeb8f7009 | ||
|
|
f592a5125b | ||
|
|
27aea6c7b5 | ||
|
|
5540019373 | ||
|
|
8c6630c310 | ||
|
|
b7ffdcdbb9 | ||
|
|
4f1e453ff5 | ||
|
|
3e26107a9c | ||
|
|
05fc6f3ca9 | ||
|
|
e38fe21e0d | ||
|
|
7774bf628e | ||
|
|
4a99c9bdb8 | ||
|
|
d99bdd1b1e | ||
|
|
ed1a4f3205 | ||
|
|
e85c9b987b | ||
|
|
7ac52e674f | ||
|
|
2790e3091f | ||
|
|
0c1769b3a5 | ||
|
|
67752e3967 | ||
|
|
8afcecc025 | ||
|
|
e143a4dd7e | ||
|
|
42c5ee5588 | ||
|
|
e3bf5565ab | ||
|
|
d8544e4d9e | ||
|
|
d8d2eefa63 | ||
|
|
8e8d3ac708 | ||
|
|
3bfe90c183 | ||
|
|
a903241fbf | ||
|
|
f1e9bbcee5 | ||
|
|
78d65a1928 | ||
|
|
c0609b98f1 | ||
|
|
ba0ed255ef | ||
|
|
1d8bb7332f | ||
|
|
dcc92a6703 | ||
|
|
fcafd3a777 | ||
|
|
b23e97f8b0 | ||
|
|
8e41ad24f5 | ||
|
|
f747e05eac | ||
|
|
2546d139d6 | ||
|
|
7c6f2346d3 | ||
|
|
f550109641 | ||
|
|
9a98e79649 | ||
|
|
a8255ea678 | ||
|
|
157e98edf7 | ||
|
|
addaa63732 | ||
|
|
3fc1046fd3 | ||
|
|
d666ba775e | ||
|
|
f2209c1fe4 | ||
|
|
67ea1c3435 | ||
|
|
8f97c92541 | ||
|
|
e07245f110 | ||
|
|
284dcf8d22 | ||
|
|
5f1a6fca0d | ||
|
|
58d211545f | ||
|
|
2dde65f807 | ||
|
|
3b8a0e08f7 | ||
|
|
30b1a26fc0 | ||
|
|
812d577597 | ||
|
|
16f96b6cfb | ||
|
|
0c6266bc4a | ||
|
|
9dd8d70f0e | ||
|
|
0252d504d8 | ||
|
|
a83748eb45 | ||
|
|
4449e30184 | ||
|
|
ca0f7f2714 | ||
|
|
0b36f8fba1 | ||
|
|
8e76f5f595 | ||
|
|
0d3da9869c | ||
|
|
bf8de227a9 | ||
|
|
b1b2524dbb | ||
|
|
99fa8dad2d | ||
|
|
0db903b471 | ||
|
|
917cbc0699 | ||
|
|
2ed3c29c8a | ||
|
|
7174d60ed2 | ||
|
|
a408c34558 | ||
|
|
cfd2a9f872 | ||
|
|
f73520bfff | ||
|
|
ae27e228c4 | ||
|
|
5699f60a88 | ||
|
|
a2593e60bf | ||
|
|
786aa27134 | ||
|
|
cf70864fa3 | ||
|
|
7e53189e7c | ||
|
|
ba9cc43464 | ||
|
|
8bb55949ef | ||
|
|
d0b99bdd95 | ||
|
|
1d2f6de573 | ||
|
|
a3b8bca46a | ||
|
|
bb5e18c29c | ||
|
|
5761f27e5e | ||
|
|
99467f3999 |
@@ -17,7 +17,7 @@ AllowShortEnumsOnASingleLine: true
|
||||
AllowShortBlocksOnASingleLine: Never
|
||||
AllowShortCaseLabelsOnASingleLine: false
|
||||
AllowShortFunctionsOnASingleLine: All
|
||||
AllowShortLambdasOnASingleLine: All
|
||||
AllowShortLambdasOnASingleLine: Inline
|
||||
AllowShortIfStatementsOnASingleLine: WithoutElse
|
||||
AllowShortLoopsOnASingleLine: true
|
||||
AlwaysBreakAfterDefinitionReturnType: None
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
Checks: 'modernize-*,-modernize-make-*,-modernize-use-auto,-modernize-raw-string-literal,-modernize-avoid-c-arrays,-modernize-use-trailing-return-type,google-*,-google-default-arguments,-clang-diagnostic-#pragma-messages,readability-identifier-naming'
|
||||
Checks: 'modernize-*,-modernize-use-nodiscard,-modernize-concat-nested-namespaces,-modernize-make-*,-modernize-use-auto,-modernize-raw-string-literal,-modernize-avoid-c-arrays,-modernize-use-trailing-return-type,google-*,-google-default-arguments,-clang-diagnostic-#pragma-messages,readability-identifier-naming'
|
||||
CheckOptions:
|
||||
- { key: readability-identifier-naming.ClassCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.StructCase, value: CamelCase }
|
||||
|
||||
18
.gitattributes
vendored
Normal file
18
.gitattributes
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
* text=auto
|
||||
|
||||
*.c text eol=lf
|
||||
*.h text eol=lf
|
||||
*.cc text eol=lf
|
||||
*.cuh text eol=lf
|
||||
*.cu text eol=lf
|
||||
*.py text eol=lf
|
||||
*.txt text eol=lf
|
||||
*.R text eol=lf
|
||||
*.scala text eol=lf
|
||||
*.java text eol=lf
|
||||
|
||||
*.sh text eol=lf
|
||||
|
||||
*.rst text eol=lf
|
||||
*.md text eol=lf
|
||||
*.csv text eol=lf
|
||||
35
.github/dependabot.yml
vendored
Normal file
35
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
# To get started with Dependabot version updates, you'll need to specify which
|
||||
# package ecosystems to update and where the package manifests are located.
|
||||
# Please see the documentation for all configuration options:
|
||||
# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "maven"
|
||||
directory: "/jvm-packages"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
- package-ecosystem: "maven"
|
||||
directory: "/jvm-packages/xgboost4j"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
- package-ecosystem: "maven"
|
||||
directory: "/jvm-packages/xgboost4j-gpu"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
- package-ecosystem: "maven"
|
||||
directory: "/jvm-packages/xgboost4j-example"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
- package-ecosystem: "maven"
|
||||
directory: "/jvm-packages/xgboost4j-spark"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
- package-ecosystem: "maven"
|
||||
directory: "/jvm-packages/xgboost4j-spark-gpu"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: /
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
34
.github/workflows/freebsd.yml
vendored
Normal file
34
.github/workflows/freebsd.yml
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
name: FreeBSD
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
name: A job to run test in FreeBSD
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: 'true'
|
||||
- name: Test in FreeBSD
|
||||
id: test
|
||||
uses: vmactions/freebsd-vm@v1
|
||||
with:
|
||||
usesh: true
|
||||
prepare: |
|
||||
pkg install -y cmake git ninja googletest
|
||||
|
||||
run: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -GNinja -DGOOGLE_TEST=ON
|
||||
ninja -v
|
||||
./testxgboost
|
||||
43
.github/workflows/i386.yml
vendored
Normal file
43
.github/workflows/i386.yml
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
name: XGBoost-i386-test
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build-32bit:
|
||||
name: Build 32-bit
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
registry:
|
||||
image: registry:2
|
||||
ports:
|
||||
- 5000:5000
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
with:
|
||||
submodules: 'true'
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3.6.1
|
||||
with:
|
||||
driver-opts: network=host
|
||||
- name: Build and push container
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: tests/ci_build/Dockerfile.i386
|
||||
push: true
|
||||
tags: localhost:5000/xgboost/build-32bit:latest
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
- name: Build XGBoost
|
||||
run: |
|
||||
docker run --rm -v $PWD:/workspace -w /workspace \
|
||||
-e CXXFLAGS='-Wno-error=overloaded-virtual -Wno-error=maybe-uninitialized -Wno-error=redundant-move' \
|
||||
localhost:5000/xgboost/build-32bit:latest \
|
||||
tests/ci_build/build_via_cmake.sh
|
||||
67
.github/workflows/jvm_tests.yml
vendored
67
.github/workflows/jvm_tests.yml
vendored
@@ -5,60 +5,72 @@ on: [push, pull_request]
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test-with-jvm:
|
||||
name: Test JVM on OS ${{ matrix.os }}
|
||||
timeout-minutes: 30
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [windows-latest, ubuntu-latest, macos-11]
|
||||
os: [windows-latest, ubuntu-latest, macos-13]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
with:
|
||||
submodules: 'true'
|
||||
|
||||
- uses: actions/setup-python@v2
|
||||
- uses: actions/setup-java@6a0805fcefea3d4657a47ac4c165951e33482018 # v4.2.2
|
||||
with:
|
||||
python-version: '3.8'
|
||||
architecture: 'x64'
|
||||
distribution: 'temurin'
|
||||
java-version: '8'
|
||||
|
||||
- uses: actions/setup-java@v1
|
||||
- uses: conda-incubator/setup-miniconda@a4260408e20b96e80095f42ff7f1a15b27dd94ca # v3.0.4
|
||||
with:
|
||||
java-version: 1.8
|
||||
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
python -m pip install wheel setuptools
|
||||
python -m pip install awscli
|
||||
miniforge-variant: Mambaforge
|
||||
miniforge-version: latest
|
||||
activate-environment: jvm_tests
|
||||
environment-file: tests/ci_build/conda_env/jvm_tests.yml
|
||||
use-mamba: true
|
||||
|
||||
- name: Cache Maven packages
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
|
||||
with:
|
||||
path: ~/.m2
|
||||
key: ${{ runner.os }}-m2-${{ hashFiles('./jvm-packages/pom.xml') }}
|
||||
restore-keys: ${{ runner.os }}-m2
|
||||
restore-keys: ${{ runner.os }}-m2-${{ hashFiles('./jvm-packages/pom.xml') }}
|
||||
|
||||
- name: Test XGBoost4J
|
||||
- name: Test XGBoost4J (Core)
|
||||
run: |
|
||||
cd jvm-packages
|
||||
mvn test -B -pl :xgboost4j_2.12
|
||||
|
||||
- name: Test XGBoost4J (Core, Spark, Examples)
|
||||
run: |
|
||||
rm -rfv build/
|
||||
cd jvm-packages
|
||||
mvn -B test
|
||||
if: matrix.os == 'ubuntu-latest' # Distributed training doesn't work on Windows
|
||||
|
||||
- name: Extract branch name
|
||||
shell: bash
|
||||
run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})"
|
||||
run: |
|
||||
echo "branch=${GITHUB_REF#refs/heads/}" >> "$GITHUB_OUTPUT"
|
||||
id: extract_branch
|
||||
if: |
|
||||
(github.ref == 'refs/heads/master' || contains(github.ref, 'refs/heads/release_')) &&
|
||||
matrix.os == 'windows-latest'
|
||||
(matrix.os == 'windows-latest' || matrix.os == 'macos-13')
|
||||
|
||||
- name: Publish artifact xgboost4j.dll to S3
|
||||
run: |
|
||||
cd lib/
|
||||
Rename-Item -Path xgboost4j.dll -NewName xgboost4j_${{ github.sha }}.dll
|
||||
dir
|
||||
python -m awscli s3 cp xgboost4j_${{ github.sha }}.dll s3://xgboost-nightly-builds/${{ steps.extract_branch.outputs.branch }}/ --acl public-read
|
||||
python -m awscli s3 cp xgboost4j_${{ github.sha }}.dll s3://xgboost-nightly-builds/${{ steps.extract_branch.outputs.branch }}/libxgboost4j/ --acl public-read --region us-west-2
|
||||
if: |
|
||||
(github.ref == 'refs/heads/master' || contains(github.ref, 'refs/heads/release_')) &&
|
||||
matrix.os == 'windows-latest'
|
||||
@@ -66,12 +78,23 @@ jobs:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID_IAM_S3_UPLOADER }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY_IAM_S3_UPLOADER }}
|
||||
|
||||
- name: Publish artifact libxgboost4j.dylib to S3
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
cd lib/
|
||||
mv -v libxgboost4j.dylib libxgboost4j_${{ github.sha }}.dylib
|
||||
ls
|
||||
python -m awscli s3 cp libxgboost4j_${{ github.sha }}.dylib s3://xgboost-nightly-builds/${{ steps.extract_branch.outputs.branch }}/libxgboost4j/ --acl public-read --region us-west-2
|
||||
if: |
|
||||
(github.ref == 'refs/heads/master' || contains(github.ref, 'refs/heads/release_')) &&
|
||||
matrix.os == 'macos-13'
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID_IAM_S3_UPLOADER }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY_IAM_S3_UPLOADER }}
|
||||
|
||||
- name: Test XGBoost4J-Spark
|
||||
- name: Build and Test XGBoost4J with scala 2.13
|
||||
run: |
|
||||
rm -rfv build/
|
||||
cd jvm-packages
|
||||
mvn -B test
|
||||
mvn -B clean install test -Pdefault,scala-2.13
|
||||
if: matrix.os == 'ubuntu-latest' # Distributed training doesn't work on Windows
|
||||
env:
|
||||
RABIT_MOCK: ON
|
||||
|
||||
166
.github/workflows/main.yml
vendored
166
.github/workflows/main.yml
vendored
@@ -9,6 +9,10 @@ on: [push, pull_request]
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||
jobs:
|
||||
gtest-cpu:
|
||||
@@ -17,9 +21,9 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [macos-11]
|
||||
os: [macos-12]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
with:
|
||||
submodules: 'true'
|
||||
- name: Install system packages
|
||||
@@ -29,7 +33,7 @@ jobs:
|
||||
run: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DGOOGLE_TEST=ON -DUSE_OPENMP=ON -DUSE_DMLC_GTEST=ON -DPLUGIN_DENSE_PARSER=ON -GNinja
|
||||
cmake .. -DGOOGLE_TEST=ON -DUSE_OPENMP=ON -DUSE_DMLC_GTEST=ON -GNinja -DBUILD_DEPRECATED_CLI=ON -DUSE_SANITIZER=ON -DENABLED_SANITIZERS=address -DCMAKE_BUILD_TYPE=RelWithDebInfo
|
||||
ninja -v
|
||||
- name: Run gtest binary
|
||||
run: |
|
||||
@@ -45,7 +49,7 @@ jobs:
|
||||
matrix:
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
with:
|
||||
submodules: 'true'
|
||||
- name: Install system packages
|
||||
@@ -56,40 +60,80 @@ jobs:
|
||||
run: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -GNinja -DGOOGLE_TEST=ON -DUSE_DMLC_GTEST=ON -DUSE_OPENMP=OFF
|
||||
cmake .. -GNinja -DGOOGLE_TEST=ON -DUSE_DMLC_GTEST=ON -DUSE_OPENMP=OFF -DBUILD_DEPRECATED_CLI=ON
|
||||
ninja -v
|
||||
- name: Run gtest binary
|
||||
run: |
|
||||
cd build
|
||||
ctest --extra-verbose
|
||||
|
||||
c-api-demo:
|
||||
name: Test installing XGBoost lib + building the C API demo
|
||||
gtest-cpu-sycl:
|
||||
name: Test Google C++ unittest (CPU SYCL)
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: ["ubuntu-latest"]
|
||||
python-version: ["3.8"]
|
||||
os: [ubuntu-latest]
|
||||
python-version: ["3.10"]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
with:
|
||||
submodules: 'true'
|
||||
- name: Install system packages
|
||||
run: |
|
||||
sudo apt-get install -y --no-install-recommends ninja-build
|
||||
- uses: conda-incubator/setup-miniconda@v2
|
||||
- uses: conda-incubator/setup-miniconda@a4260408e20b96e80095f42ff7f1a15b27dd94ca # v3.0.4
|
||||
with:
|
||||
auto-update-conda: true
|
||||
python-version: ${{ matrix.python-version }}
|
||||
activate-environment: test
|
||||
miniforge-variant: Mambaforge
|
||||
miniforge-version: latest
|
||||
activate-environment: linux_sycl_test
|
||||
environment-file: tests/ci_build/conda_env/linux_sycl_test.yml
|
||||
use-mamba: true
|
||||
- name: Display Conda env
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
conda info
|
||||
conda list
|
||||
- name: Build and install XGBoost static library
|
||||
- name: Build and install XGBoost
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DGOOGLE_TEST=ON -DUSE_DMLC_GTEST=ON -DPLUGIN_SYCL=ON -DCMAKE_CXX_COMPILER=g++ -DCMAKE_C_COMPILER=gcc -DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX
|
||||
make -j$(nproc)
|
||||
- name: Run gtest binary for SYCL
|
||||
run: |
|
||||
cd build
|
||||
./testxgboost --gtest_filter=Sycl*
|
||||
- name: Run gtest binary for non SYCL
|
||||
run: |
|
||||
cd build
|
||||
./testxgboost --gtest_filter=-Sycl*
|
||||
|
||||
c-api-demo:
|
||||
name: Test installing XGBoost lib + building the C API demo
|
||||
runs-on: ${{ matrix.os }}
|
||||
defaults:
|
||||
run:
|
||||
shell: bash -l {0}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: ["ubuntu-latest"]
|
||||
python-version: ["3.10"]
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
with:
|
||||
submodules: 'true'
|
||||
- uses: conda-incubator/setup-miniconda@a4260408e20b96e80095f42ff7f1a15b27dd94ca # v3.0.4
|
||||
with:
|
||||
miniforge-variant: Mambaforge
|
||||
miniforge-version: latest
|
||||
activate-environment: cpp_test
|
||||
environment-file: tests/ci_build/conda_env/cpp_test.yml
|
||||
use-mamba: true
|
||||
- name: Display Conda env
|
||||
run: |
|
||||
conda info
|
||||
conda list
|
||||
|
||||
- name: Build and install XGBoost static library
|
||||
run: |
|
||||
mkdir build
|
||||
cd build
|
||||
@@ -97,7 +141,6 @@ jobs:
|
||||
ninja -v install
|
||||
cd -
|
||||
- name: Build and run C API demo with static
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
pushd .
|
||||
cd demo/c-api/
|
||||
@@ -109,15 +152,15 @@ jobs:
|
||||
cd ..
|
||||
rm -rf ./build
|
||||
popd
|
||||
|
||||
- name: Build and install XGBoost shared library
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
cd build
|
||||
cmake .. -DBUILD_STATIC_LIB=OFF -DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX -GNinja
|
||||
cmake .. -DBUILD_STATIC_LIB=OFF -DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX -GNinja -DPLUGIN_FEDERATED=ON -DGOOGLE_TEST=ON
|
||||
ninja -v install
|
||||
./testxgboost
|
||||
cd -
|
||||
- name: Build and run C API demo with shared
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
pushd .
|
||||
cd demo/c-api/
|
||||
@@ -130,84 +173,21 @@ jobs:
|
||||
./tests/ci_build/verify_link.sh ./demo/c-api/build/basic/api-demo
|
||||
./tests/ci_build/verify_link.sh ./demo/c-api/build/external-memory/external-memory-demo
|
||||
|
||||
lint:
|
||||
cpp-lint:
|
||||
runs-on: ubuntu-latest
|
||||
name: Code linting for C++
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
with:
|
||||
submodules: 'true'
|
||||
- uses: actions/setup-python@v2
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0
|
||||
with:
|
||||
python-version: "3.8"
|
||||
python-version: "3.10"
|
||||
architecture: 'x64'
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
python -m pip install wheel setuptools cpplint pylint
|
||||
python -m pip install wheel setuptools cmakelint cpplint pylint
|
||||
- name: Run lint
|
||||
run: |
|
||||
LINT_LANG=cpp make lint
|
||||
|
||||
doxygen:
|
||||
runs-on: ubuntu-latest
|
||||
name: Generate C/C++ API doc using Doxygen
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: "3.8"
|
||||
architecture: 'x64'
|
||||
- name: Install system packages
|
||||
run: |
|
||||
sudo apt-get install -y --no-install-recommends doxygen graphviz ninja-build
|
||||
python -m pip install wheel setuptools
|
||||
python -m pip install awscli
|
||||
- name: Run Doxygen
|
||||
run: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DBUILD_C_DOC=ON -GNinja
|
||||
ninja -v doc_doxygen
|
||||
- name: Extract branch name
|
||||
shell: bash
|
||||
run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})"
|
||||
id: extract_branch
|
||||
if: github.ref == 'refs/heads/master' || contains(github.ref, 'refs/heads/release_')
|
||||
- name: Publish
|
||||
run: |
|
||||
cd build/
|
||||
tar cvjf ${{ steps.extract_branch.outputs.branch }}.tar.bz2 doc_doxygen/
|
||||
python -m awscli s3 cp ./${{ steps.extract_branch.outputs.branch }}.tar.bz2 s3://xgboost-docs/doxygen/ --acl public-read
|
||||
if: github.ref == 'refs/heads/master' || contains(github.ref, 'refs/heads/release_')
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID_IAM_S3_UPLOADER }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY_IAM_S3_UPLOADER }}
|
||||
|
||||
sphinx:
|
||||
runs-on: ubuntu-latest
|
||||
name: Build docs using Sphinx
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: "3.8"
|
||||
architecture: 'x64'
|
||||
- name: Install system packages
|
||||
run: |
|
||||
sudo apt-get install -y --no-install-recommends graphviz
|
||||
python -m pip install wheel setuptools
|
||||
python -m pip install -r doc/requirements.txt
|
||||
- name: Extract branch name
|
||||
shell: bash
|
||||
run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})"
|
||||
id: extract_branch
|
||||
if: github.ref == 'refs/heads/master' || contains(github.ref, 'refs/heads/release_')
|
||||
- name: Run Sphinx
|
||||
run: |
|
||||
make -C doc html
|
||||
env:
|
||||
SPHINX_GIT_BRANCH: ${{ steps.extract_branch.outputs.branch }}
|
||||
python3 tests/ci_build/lint_cpp.py
|
||||
sh ./tests/ci_build/lint_cmake.sh
|
||||
|
||||
344
.github/workflows/python_tests.yml
vendored
344
.github/workflows/python_tests.yml
vendored
@@ -5,6 +5,14 @@ on: [push, pull_request]
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash -l {0}
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
python-mypy-lint:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -12,150 +20,128 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest]
|
||||
python-version: ["3.8"]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
with:
|
||||
submodules: 'true'
|
||||
- uses: conda-incubator/setup-miniconda@v2
|
||||
- uses: conda-incubator/setup-miniconda@a4260408e20b96e80095f42ff7f1a15b27dd94ca # v3.0.4
|
||||
with:
|
||||
auto-update-conda: true
|
||||
python-version: ${{ matrix.python-version }}
|
||||
miniforge-variant: Mambaforge
|
||||
miniforge-version: latest
|
||||
activate-environment: python_lint
|
||||
environment-file: tests/ci_build/conda_env/python_lint.yml
|
||||
use-mamba: true
|
||||
- name: Display Conda env
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
conda info
|
||||
conda list
|
||||
- name: Run mypy
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
python tests/ci_build/lint_python.py --format=0 --type-check=1 --pylint=0
|
||||
- name: Run formatter
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
python tests/ci_build/lint_python.py --format=1 --type-check=0 --pylint=0
|
||||
- name: Run pylint
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
python tests/ci_build/lint_python.py --format=0 --type-check=0 --pylint=1
|
||||
|
||||
python-sdist-test:
|
||||
python-sdist-test-on-Linux:
|
||||
# Mismatched glibcxx version between system and conda forge.
|
||||
runs-on: ${{ matrix.os }}
|
||||
name: Test installing XGBoost Python source package on ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-11, windows-latest]
|
||||
python-version: ["3.8"]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
with:
|
||||
submodules: 'true'
|
||||
- name: Install osx system dependencies
|
||||
if: matrix.os == 'macos-11'
|
||||
run: |
|
||||
brew install ninja libomp
|
||||
- name: Install Ubuntu system dependencies
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
run: |
|
||||
sudo apt-get install -y --no-install-recommends ninja-build
|
||||
- uses: conda-incubator/setup-miniconda@v2
|
||||
- uses: conda-incubator/setup-miniconda@a4260408e20b96e80095f42ff7f1a15b27dd94ca # v3.0.4
|
||||
with:
|
||||
auto-update-conda: true
|
||||
python-version: ${{ matrix.python-version }}
|
||||
activate-environment: test
|
||||
miniforge-variant: Mambaforge
|
||||
miniforge-version: latest
|
||||
activate-environment: sdist_test
|
||||
environment-file: tests/ci_build/conda_env/sdist_test.yml
|
||||
use-mamba: true
|
||||
- name: Display Conda env
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
conda info
|
||||
conda list
|
||||
- name: Build and install XGBoost
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
cd python-package
|
||||
python --version
|
||||
python setup.py sdist
|
||||
python -m build --sdist
|
||||
pip install -v ./dist/xgboost-*.tar.gz --config-settings use_openmp=False
|
||||
cd ..
|
||||
python -c 'import xgboost'
|
||||
|
||||
python-sdist-test:
|
||||
# Use system toolchain instead of conda toolchain for macos and windows.
|
||||
# MacOS has linker error if clang++ from conda-forge is used
|
||||
runs-on: ${{ matrix.os }}
|
||||
name: Test installing XGBoost Python source package on ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [macos-13, windows-latest]
|
||||
python-version: ["3.10"]
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
with:
|
||||
submodules: 'true'
|
||||
- name: Install osx system dependencies
|
||||
if: matrix.os == 'macos-13'
|
||||
run: |
|
||||
brew install ninja libomp
|
||||
- uses: conda-incubator/setup-miniconda@a4260408e20b96e80095f42ff7f1a15b27dd94ca # v3.0.4
|
||||
with:
|
||||
auto-update-conda: true
|
||||
python-version: ${{ matrix.python-version }}
|
||||
activate-environment: test
|
||||
- name: Install build
|
||||
run: |
|
||||
conda install -c conda-forge python-build
|
||||
- name: Display Conda env
|
||||
run: |
|
||||
conda info
|
||||
conda list
|
||||
- name: Build and install XGBoost
|
||||
run: |
|
||||
cd python-package
|
||||
python --version
|
||||
python -m build --sdist
|
||||
pip install -v ./dist/xgboost-*.tar.gz
|
||||
cd ..
|
||||
python -c 'import xgboost'
|
||||
|
||||
python-tests-on-win:
|
||||
name: Test XGBoost Python package on ${{ matrix.config.os }}
|
||||
runs-on: ${{ matrix.config.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
config:
|
||||
- {os: windows-latest, python-version: '3.8'}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
|
||||
- uses: conda-incubator/setup-miniconda@v2
|
||||
with:
|
||||
auto-update-conda: true
|
||||
python-version: ${{ matrix.config.python-version }}
|
||||
activate-environment: win64_env
|
||||
environment-file: tests/ci_build/conda_env/win64_cpu_test.yml
|
||||
|
||||
- name: Display Conda env
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
conda info
|
||||
conda list
|
||||
|
||||
- name: Build XGBoost on Windows
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
mkdir build_msvc
|
||||
cd build_msvc
|
||||
cmake .. -G"Visual Studio 17 2022" -DCMAKE_CONFIGURATION_TYPES="Release" -A x64 -DGOOGLE_TEST=ON -DUSE_DMLC_GTEST=ON
|
||||
cmake --build . --config Release --parallel $(nproc)
|
||||
|
||||
- name: Install Python package
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
cd python-package
|
||||
python --version
|
||||
python setup.py bdist_wheel --universal
|
||||
pip install ./dist/*.whl
|
||||
|
||||
- name: Test Python package
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
pytest -s -v ./tests/python
|
||||
|
||||
python-tests-on-macos:
|
||||
name: Test XGBoost Python package on ${{ matrix.config.os }}
|
||||
runs-on: ${{ matrix.config.os }}
|
||||
timeout-minutes: 90
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
matrix:
|
||||
config:
|
||||
- {os: macos-11, python-version "3.8" }
|
||||
- {os: macos-13}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
with:
|
||||
submodules: 'true'
|
||||
|
||||
- uses: conda-incubator/setup-miniconda@v2
|
||||
- uses: conda-incubator/setup-miniconda@a4260408e20b96e80095f42ff7f1a15b27dd94ca # v3.0.4
|
||||
with:
|
||||
auto-update-conda: true
|
||||
python-version: ${{ matrix.config.python-version }}
|
||||
activate-environment: macos_test
|
||||
miniforge-variant: Mambaforge
|
||||
miniforge-version: latest
|
||||
activate-environment: macos_cpu_test
|
||||
environment-file: tests/ci_build/conda_env/macos_cpu_test.yml
|
||||
use-mamba: true
|
||||
|
||||
- name: Display Conda env
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
conda info
|
||||
conda list
|
||||
|
||||
- name: Build XGBoost on macos
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
brew install ninja
|
||||
|
||||
@@ -164,17 +150,199 @@ jobs:
|
||||
# Set prefix, to use OpenMP library from Conda env
|
||||
# See https://github.com/dmlc/xgboost/issues/7039#issuecomment-1025038228
|
||||
# to learn why we don't use libomp from Homebrew.
|
||||
cmake .. -GNinja -DGOOGLE_TEST=ON -DUSE_DMLC_GTEST=ON -DCMAKE_PREFIX_PATH=$CONDA_PREFIX
|
||||
cmake .. -GNinja -DCMAKE_PREFIX_PATH=$CONDA_PREFIX -DBUILD_DEPRECATED_CLI=ON
|
||||
ninja
|
||||
|
||||
- name: Install Python package
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
cd python-package
|
||||
python --version
|
||||
python setup.py install
|
||||
pip install -v .
|
||||
|
||||
- name: Test Python package
|
||||
run: |
|
||||
pytest -s -v -rxXs --durations=0 ./tests/python
|
||||
|
||||
- name: Test Dask Interface
|
||||
run: |
|
||||
pytest -s -v -rxXs --durations=0 ./tests/test_distributed/test_with_dask
|
||||
|
||||
python-tests-on-win:
|
||||
name: Test XGBoost Python package on ${{ matrix.config.os }}
|
||||
runs-on: ${{ matrix.config.os }}
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
matrix:
|
||||
config:
|
||||
- {os: windows-latest, python-version: '3.10'}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
with:
|
||||
submodules: 'true'
|
||||
|
||||
- uses: conda-incubator/setup-miniconda@a4260408e20b96e80095f42ff7f1a15b27dd94ca # v3.0.4
|
||||
with:
|
||||
auto-update-conda: true
|
||||
python-version: ${{ matrix.config.python-version }}
|
||||
activate-environment: win64_env
|
||||
environment-file: tests/ci_build/conda_env/win64_cpu_test.yml
|
||||
|
||||
- name: Display Conda env
|
||||
run: |
|
||||
conda info
|
||||
conda list
|
||||
|
||||
- name: Build XGBoost on Windows
|
||||
run: |
|
||||
mkdir build_msvc
|
||||
cd build_msvc
|
||||
cmake .. -G"Visual Studio 17 2022" -DCMAKE_CONFIGURATION_TYPES="Release" -A x64 -DBUILD_DEPRECATED_CLI=ON
|
||||
cmake --build . --config Release --parallel $(nproc)
|
||||
|
||||
- name: Install Python package
|
||||
run: |
|
||||
cd python-package
|
||||
python --version
|
||||
pip wheel -v . --wheel-dir dist/
|
||||
pip install ./dist/*.whl
|
||||
|
||||
- name: Test Python package
|
||||
run: |
|
||||
pytest -s -v -rxXs --durations=0 ./tests/python
|
||||
|
||||
python-tests-on-ubuntu:
|
||||
name: Test XGBoost Python package on ${{ matrix.config.os }}
|
||||
runs-on: ${{ matrix.config.os }}
|
||||
timeout-minutes: 90
|
||||
strategy:
|
||||
matrix:
|
||||
config:
|
||||
- {os: ubuntu-latest, python-version: "3.10"}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
with:
|
||||
submodules: 'true'
|
||||
|
||||
- uses: conda-incubator/setup-miniconda@a4260408e20b96e80095f42ff7f1a15b27dd94ca # v3.0.4
|
||||
with:
|
||||
miniforge-variant: Mambaforge
|
||||
miniforge-version: latest
|
||||
activate-environment: linux_cpu_test
|
||||
environment-file: tests/ci_build/conda_env/linux_cpu_test.yml
|
||||
use-mamba: true
|
||||
|
||||
- name: Display Conda env
|
||||
run: |
|
||||
conda info
|
||||
conda list
|
||||
|
||||
- name: Build XGBoost on Ubuntu
|
||||
run: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -GNinja -DCMAKE_PREFIX_PATH=$CONDA_PREFIX -DBUILD_DEPRECATED_CLI=ON
|
||||
ninja
|
||||
|
||||
- name: Install Python package
|
||||
run: |
|
||||
cd python-package
|
||||
python --version
|
||||
pip install -v .
|
||||
|
||||
- name: Test Python package
|
||||
run: |
|
||||
pytest -s -v -rxXs --durations=0 ./tests/python
|
||||
|
||||
- name: Test Dask Interface
|
||||
run: |
|
||||
pytest -s -v -rxXs --durations=0 ./tests/test_distributed/test_with_dask
|
||||
|
||||
- name: Test PySpark Interface
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
pytest -s -v ./tests/python
|
||||
pytest -s -v -rxXs --durations=0 ./tests/test_distributed/test_with_spark
|
||||
|
||||
python-sycl-tests-on-ubuntu:
|
||||
name: Test XGBoost Python package with SYCL on ${{ matrix.config.os }}
|
||||
runs-on: ${{ matrix.config.os }}
|
||||
timeout-minutes: 90
|
||||
strategy:
|
||||
matrix:
|
||||
config:
|
||||
- {os: ubuntu-latest, python-version: "3.10"}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
with:
|
||||
submodules: 'true'
|
||||
|
||||
- uses: conda-incubator/setup-miniconda@a4260408e20b96e80095f42ff7f1a15b27dd94ca # v3.0.4
|
||||
with:
|
||||
miniforge-variant: Mambaforge
|
||||
miniforge-version: latest
|
||||
activate-environment: linux_sycl_test
|
||||
environment-file: tests/ci_build/conda_env/linux_sycl_test.yml
|
||||
use-mamba: true
|
||||
|
||||
- name: Display Conda env
|
||||
run: |
|
||||
conda info
|
||||
conda list
|
||||
- name: Build XGBoost on Ubuntu
|
||||
run: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DPLUGIN_SYCL=ON -DCMAKE_CXX_COMPILER=g++ -DCMAKE_C_COMPILER=gcc -DCMAKE_PREFIX_PATH=$CONDA_PREFIX
|
||||
make -j$(nproc)
|
||||
- name: Install Python package
|
||||
run: |
|
||||
cd python-package
|
||||
python --version
|
||||
pip install -v .
|
||||
- name: Test Python package
|
||||
run: |
|
||||
pytest -s -v -rxXs --durations=0 ./tests/python-sycl/
|
||||
|
||||
|
||||
python-system-installation-on-ubuntu:
|
||||
name: Test XGBoost Python package System Installation on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
with:
|
||||
submodules: 'true'
|
||||
|
||||
- name: Set up Python 3.10
|
||||
uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0
|
||||
with:
|
||||
python-version: "3.10"
|
||||
|
||||
- name: Install ninja
|
||||
run: |
|
||||
sudo apt-get update && sudo apt-get install -y ninja-build
|
||||
|
||||
- name: Build XGBoost on Ubuntu
|
||||
run: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -GNinja
|
||||
ninja
|
||||
|
||||
- name: Copy lib to system lib
|
||||
run: |
|
||||
cp lib/* "$(python -c 'import sys; print(sys.base_prefix)')/lib"
|
||||
|
||||
- name: Install XGBoost in Virtual Environment
|
||||
run: |
|
||||
cd python-package
|
||||
pip install virtualenv
|
||||
virtualenv venv
|
||||
source venv/bin/activate && \
|
||||
pip install -v . --config-settings use_system_libxgboost=True && \
|
||||
python -c 'import xgboost'
|
||||
|
||||
32
.github/workflows/python_wheels.yml
vendored
32
.github/workflows/python_wheels.yml
vendored
@@ -5,6 +5,14 @@ on: [push, pull_request]
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash -l {0}
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
python-wheels:
|
||||
name: Build wheel for ${{ matrix.platform_id }}
|
||||
@@ -12,30 +20,36 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- os: macos-latest
|
||||
- os: macos-13
|
||||
platform_id: macosx_x86_64
|
||||
- os: macos-latest
|
||||
- os: macos-14
|
||||
platform_id: macosx_arm64
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
with:
|
||||
submodules: 'true'
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v2
|
||||
- name: Set up homebrew
|
||||
uses: Homebrew/actions/setup-homebrew@68fa6aeb1ccb0596d311f2b34ec74ec21ee68e54
|
||||
- name: Install libomp
|
||||
run: brew install libomp
|
||||
- uses: conda-incubator/setup-miniconda@a4260408e20b96e80095f42ff7f1a15b27dd94ca # v3.0.4
|
||||
with:
|
||||
python-version: "3.8"
|
||||
miniforge-variant: Mambaforge
|
||||
miniforge-version: latest
|
||||
python-version: "3.10"
|
||||
use-mamba: true
|
||||
- name: Build wheels
|
||||
run: bash tests/ci_build/build_python_wheels.sh ${{ matrix.platform_id }} ${{ github.sha }}
|
||||
- name: Extract branch name
|
||||
shell: bash
|
||||
run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})"
|
||||
run: |
|
||||
echo "branch=${GITHUB_REF#refs/heads/}" >> "$GITHUB_OUTPUT"
|
||||
id: extract_branch
|
||||
if: github.ref == 'refs/heads/master' || contains(github.ref, 'refs/heads/release_')
|
||||
- name: Upload Python wheel
|
||||
if: github.ref == 'refs/heads/master' || contains(github.ref, 'refs/heads/release_')
|
||||
run: |
|
||||
python -m pip install awscli
|
||||
python -m awscli s3 cp wheelhouse/*.whl s3://xgboost-nightly-builds/${{ steps.extract_branch.outputs.branch }}/ --acl public-read
|
||||
python -m awscli s3 cp wheelhouse/*.whl s3://xgboost-nightly-builds/${{ steps.extract_branch.outputs.branch }}/ --acl public-read --region us-west-2
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID_IAM_S3_UPLOADER }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY_IAM_S3_UPLOADER }}
|
||||
|
||||
25
.github/workflows/r_nold.yml
vendored
25
.github/workflows/r_nold.yml
vendored
@@ -1,4 +1,4 @@
|
||||
# Run R tests with noLD R. Only triggered by a pull request review
|
||||
# Run expensive R tests with the help of rhub. Only triggered by a pull request review
|
||||
# See discussion at https://github.com/dmlc/xgboost/pull/6378
|
||||
|
||||
name: XGBoost-R-noLD
|
||||
@@ -7,37 +7,34 @@ on:
|
||||
pull_request_review_comment:
|
||||
types: [created]
|
||||
|
||||
env:
|
||||
R_PACKAGES: c('XML', 'igraph', 'data.table', 'ggplot2', 'DiagrammeR', 'Ckmeans.1d.dp', 'vcd', 'testthat', 'lintr', 'knitr', 'rmarkdown', 'e1071', 'cplm', 'devtools', 'float', 'titanic')
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test-R-noLD:
|
||||
if: github.event.comment.body == '/gha run r-nold-test' && contains('OWNER,MEMBER,COLLABORATOR', github.event.comment.author_association)
|
||||
timeout-minutes: 120
|
||||
runs-on: ubuntu-latest
|
||||
container: rhub/debian-gcc-devel-nold
|
||||
container:
|
||||
image: rhub/debian-gcc-devel-nold
|
||||
steps:
|
||||
- name: Install git and system packages
|
||||
shell: bash
|
||||
run: |
|
||||
apt-get update && apt-get install -y git libcurl4-openssl-dev libssl-dev libssh2-1-dev libgit2-dev libxml2-dev
|
||||
apt update && apt install libcurl4-openssl-dev libssl-dev libssh2-1-dev libgit2-dev libglpk-dev libxml2-dev libharfbuzz-dev libfribidi-dev git -y
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
with:
|
||||
submodules: 'true'
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
cat > install_libs.R <<EOT
|
||||
install.packages(${{ env.R_PACKAGES }},
|
||||
repos = 'http://cloud.r-project.org',
|
||||
dependencies = c('Depends', 'Imports', 'LinkingTo'))
|
||||
EOT
|
||||
/tmp/R-devel/bin/Rscript install_libs.R
|
||||
/tmp/R-devel/bin/Rscript -e "source('./R-package/tests/helper_scripts/install_deps.R')"
|
||||
|
||||
- name: Run R tests
|
||||
shell: bash
|
||||
|
||||
164
.github/workflows/r_tests.yml
vendored
164
.github/workflows/r_tests.yml
vendored
@@ -3,12 +3,15 @@ name: XGBoost-R-Tests
|
||||
on: [push, pull_request]
|
||||
|
||||
env:
|
||||
R_PACKAGES: c('XML', 'data.table', 'ggplot2', 'DiagrammeR', 'Ckmeans.1d.dp', 'vcd', 'testthat', 'lintr', 'knitr', 'rmarkdown', 'e1071', 'cplm', 'devtools', 'float', 'titanic')
|
||||
GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
lintr:
|
||||
runs-on: ${{ matrix.config.os }}
|
||||
@@ -22,41 +25,32 @@ jobs:
|
||||
RSPM: ${{ matrix.config.rspm }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
with:
|
||||
submodules: 'true'
|
||||
|
||||
- uses: r-lib/actions/setup-r@v2
|
||||
- uses: r-lib/actions/setup-r@929c772977a3a13c8733b363bf5a2f685c25dd91 # v2.9.0
|
||||
with:
|
||||
r-version: ${{ matrix.config.r }}
|
||||
|
||||
- name: Cache R packages
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
|
||||
with:
|
||||
path: ${{ env.R_LIBS_USER }}
|
||||
key: ${{ runner.os }}-r-${{ matrix.config.r }}-5-${{ hashFiles('R-package/DESCRIPTION') }}
|
||||
restore-keys: ${{ runner.os }}-r-${{ matrix.config.r }}-5-${{ hashFiles('R-package/DESCRIPTION') }}
|
||||
key: ${{ runner.os }}-r-${{ matrix.config.r }}-7-${{ hashFiles('R-package/DESCRIPTION') }}
|
||||
restore-keys: ${{ runner.os }}-r-${{ matrix.config.r }}-7-${{ hashFiles('R-package/DESCRIPTION') }}
|
||||
|
||||
- name: Install dependencies
|
||||
shell: Rscript {0}
|
||||
run: |
|
||||
install.packages(${{ env.R_PACKAGES }},
|
||||
repos = 'http://cloud.r-project.org',
|
||||
dependencies = c('Depends', 'Imports', 'LinkingTo'))
|
||||
- name: Install igraph on Windows
|
||||
shell: Rscript {0}
|
||||
if: matrix.config.os == 'windows-latest'
|
||||
run: |
|
||||
install.packages('igraph', type='binary')
|
||||
source("./R-package/tests/helper_scripts/install_deps.R")
|
||||
|
||||
- name: Run lintr
|
||||
run: |
|
||||
cd R-package
|
||||
R CMD INSTALL .
|
||||
# Disable lintr errors for now: https://github.com/dmlc/xgboost/issues/8012
|
||||
Rscript tests/helper_scripts/run_lint.R || true
|
||||
MAKEFLAGS="-j$(nproc)" R CMD INSTALL R-package/
|
||||
Rscript tests/ci_build/lint_r.R $(pwd)
|
||||
|
||||
test-with-R:
|
||||
test-Rpkg:
|
||||
runs-on: ${{ matrix.config.os }}
|
||||
name: Test R on OS ${{ matrix.config.os }}, R ${{ matrix.config.r }}, Compiler ${{ matrix.config.compiler }}, Build ${{ matrix.config.build }}
|
||||
strategy:
|
||||
@@ -64,95 +58,93 @@ jobs:
|
||||
matrix:
|
||||
config:
|
||||
- {os: windows-latest, r: 'release', compiler: 'mingw', build: 'autotools'}
|
||||
- {os: windows-latest, r: 'release', compiler: 'msvc', build: 'cmake'}
|
||||
- {os: windows-latest, r: 'release', compiler: 'mingw', build: 'cmake'}
|
||||
- {os: ubuntu-latest, r: 'release', compiler: 'none', build: 'cmake'}
|
||||
env:
|
||||
R_REMOTES_NO_ERRORS_FROM_WARNINGS: true
|
||||
RSPM: ${{ matrix.config.rspm }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Install system dependencies
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install libcurl4-openssl-dev libssl-dev libssh2-1-dev libgit2-dev libglpk-dev libxml2-dev libharfbuzz-dev libfribidi-dev
|
||||
if: matrix.config.os == 'ubuntu-latest'
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
with:
|
||||
submodules: 'true'
|
||||
|
||||
- uses: r-lib/actions/setup-r@v2
|
||||
- uses: r-lib/actions/setup-r@929c772977a3a13c8733b363bf5a2f685c25dd91 # v2.9.0
|
||||
with:
|
||||
r-version: ${{ matrix.config.r }}
|
||||
|
||||
- name: Cache R packages
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
|
||||
with:
|
||||
path: ${{ env.R_LIBS_USER }}
|
||||
key: ${{ runner.os }}-r-${{ matrix.config.r }}-5-${{ hashFiles('R-package/DESCRIPTION') }}
|
||||
restore-keys: ${{ runner.os }}-r-${{ matrix.config.r }}-5-${{ hashFiles('R-package/DESCRIPTION') }}
|
||||
key: ${{ runner.os }}-r-${{ matrix.config.r }}-7-${{ hashFiles('R-package/DESCRIPTION') }}
|
||||
restore-keys: ${{ runner.os }}-r-${{ matrix.config.r }}-7-${{ hashFiles('R-package/DESCRIPTION') }}
|
||||
|
||||
- name: Install dependencies
|
||||
shell: Rscript {0}
|
||||
if: matrix.config.os != 'windows-latest'
|
||||
run: |
|
||||
install.packages(${{ env.R_PACKAGES }},
|
||||
repos = 'http://cloud.r-project.org',
|
||||
dependencies = c('Depends', 'Imports', 'LinkingTo'))
|
||||
|
||||
- name: Install binary dependencies
|
||||
shell: Rscript {0}
|
||||
if: matrix.config.os == 'windows-latest'
|
||||
run: |
|
||||
install.packages(${{ env.R_PACKAGES }},
|
||||
type = 'binary',
|
||||
repos = 'http://cloud.r-project.org',
|
||||
dependencies = c('Depends', 'Imports', 'LinkingTo'))
|
||||
|
||||
- uses: actions/setup-python@v2
|
||||
- uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0
|
||||
with:
|
||||
python-version: "3.8"
|
||||
python-version: "3.10"
|
||||
architecture: 'x64'
|
||||
|
||||
- name: Test R
|
||||
run: |
|
||||
python tests/ci_build/test_r_package.py --compiler='${{ matrix.config.compiler }}' --build-tool='${{ matrix.config.build }}'
|
||||
|
||||
test-R-CRAN:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
- {r: 'release'}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
|
||||
- uses: r-lib/actions/setup-r@v2
|
||||
with:
|
||||
r-version: ${{ matrix.config.r }}
|
||||
|
||||
- uses: r-lib/actions/setup-tinytex@v2
|
||||
|
||||
- name: Install system packages
|
||||
run: |
|
||||
sudo apt-get update && sudo apt-get install libcurl4-openssl-dev libssl-dev libssh2-1-dev libgit2-dev pandoc pandoc-citeproc libglpk-dev
|
||||
|
||||
- name: Cache R packages
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ${{ env.R_LIBS_USER }}
|
||||
key: ${{ runner.os }}-r-${{ matrix.config.r }}-5-${{ hashFiles('R-package/DESCRIPTION') }}
|
||||
restore-keys: ${{ runner.os }}-r-${{ matrix.config.r }}-5-${{ hashFiles('R-package/DESCRIPTION') }}
|
||||
|
||||
- name: Install dependencies
|
||||
shell: Rscript {0}
|
||||
run: |
|
||||
install.packages(${{ env.R_PACKAGES }},
|
||||
repos = 'http://cloud.r-project.org',
|
||||
dependencies = c('Depends', 'Imports', 'LinkingTo'))
|
||||
install.packages('igraph', repos = 'http://cloud.r-project.org', dependencies = c('Depends', 'Imports', 'LinkingTo'))
|
||||
source("./R-package/tests/helper_scripts/install_deps.R")
|
||||
|
||||
- name: Check R Package
|
||||
- name: Test R
|
||||
run: |
|
||||
# Print stacktrace upon success of failure
|
||||
make Rcheck || tests/ci_build/print_r_stacktrace.sh fail
|
||||
tests/ci_build/print_r_stacktrace.sh success
|
||||
python tests/ci_build/test_r_package.py --compiler='${{ matrix.config.compiler }}' --build-tool="${{ matrix.config.build }}" --task=check
|
||||
if: matrix.config.compiler != 'none'
|
||||
|
||||
- name: Test R
|
||||
run: |
|
||||
python tests/ci_build/test_r_package.py --build-tool="${{ matrix.config.build }}" --task=check
|
||||
if: matrix.config.compiler == 'none'
|
||||
|
||||
test-R-on-Debian:
|
||||
name: Test R package on Debian
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: rhub/debian-gcc-release
|
||||
|
||||
steps:
|
||||
- name: Install system dependencies
|
||||
run: |
|
||||
# Must run before checkout to have the latest git installed.
|
||||
# No need to add pandoc, the container has it figured out.
|
||||
apt update && apt install libcurl4-openssl-dev libssl-dev libssh2-1-dev libgit2-dev libglpk-dev libxml2-dev libharfbuzz-dev libfribidi-dev git -y
|
||||
|
||||
- name: Trust git cloning project sources
|
||||
run: |
|
||||
git config --global --add safe.directory "${GITHUB_WORKSPACE}"
|
||||
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
with:
|
||||
submodules: 'true'
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
Rscript -e "source('./R-package/tests/helper_scripts/install_deps.R')"
|
||||
|
||||
- name: Test R
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
python3 tests/ci_build/test_r_package.py --r=/usr/bin/R --build-tool=autotools --task=check
|
||||
|
||||
- uses: dorny/paths-filter@v3
|
||||
id: changes
|
||||
with:
|
||||
filters: |
|
||||
r_package:
|
||||
- 'R-package/**'
|
||||
|
||||
- name: Run document check
|
||||
if: steps.changes.outputs.r_package == 'true'
|
||||
run: |
|
||||
python3 tests/ci_build/test_r_package.py --r=/usr/bin/R --task=doc
|
||||
|
||||
12
.github/workflows/scorecards.yml
vendored
12
.github/workflows/scorecards.yml
vendored
@@ -22,26 +22,26 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: "Checkout code"
|
||||
uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # tag=v3.0.0
|
||||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: "Run analysis"
|
||||
uses: ossf/scorecard-action@865b4092859256271290c77adbd10a43f4779972 # tag=v2.0.3
|
||||
uses: ossf/scorecard-action@62b2cac7ed8198b15735ed49ab1e5cf35480ba46 # v2.4.0
|
||||
with:
|
||||
results_file: results.sarif
|
||||
results_format: sarif
|
||||
|
||||
# Publish the results for public repositories to enable scorecard badges. For more details, see
|
||||
# https://github.com/ossf/scorecard-action#publishing-results.
|
||||
# For private repositories, `publish_results` will automatically be set to `false`, regardless
|
||||
# https://github.com/ossf/scorecard-action#publishing-results.
|
||||
# For private repositories, `publish_results` will automatically be set to `false`, regardless
|
||||
# of the value entered here.
|
||||
publish_results: true
|
||||
|
||||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||
# format to the repository Actions tab.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@6673cd052c4cd6fcf4b4e6e60ea986c889389535 # tag=v3.0.0
|
||||
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
@@ -49,6 +49,6 @@ jobs:
|
||||
|
||||
# Upload the results to GitHub's code scanning dashboard.
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@5f532563584d71fdef14ee64d17bafb34f751ce5 # tag=v1.0.26
|
||||
uses: github/codeql-action/upload-sarif@83a02f7883b12e0e4e1a146174f5e2292a01e601 # v2.16.4
|
||||
with:
|
||||
sarif_file: results.sarif
|
||||
|
||||
44
.github/workflows/update_rapids.yml
vendored
Normal file
44
.github/workflows/update_rapids.yml
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
name: update-rapids
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: "0 20 * * 1" # Run once weekly
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: write
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash -l {0}
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # To use GitHub CLI
|
||||
|
||||
jobs:
|
||||
update-rapids:
|
||||
name: Check latest RAPIDS
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
with:
|
||||
submodules: 'true'
|
||||
- name: Check latest RAPIDS and update conftest.sh
|
||||
run: |
|
||||
bash tests/buildkite/update-rapids.sh
|
||||
- name: Create Pull Request
|
||||
uses: peter-evans/create-pull-request@v6
|
||||
if: github.ref == 'refs/heads/master'
|
||||
with:
|
||||
add-paths: |
|
||||
tests/buildkite
|
||||
branch: create-pull-request/update-rapids
|
||||
base: master
|
||||
title: "[CI] Update RAPIDS to latest stable"
|
||||
commit-message: "[CI] Update RAPIDS to latest stable"
|
||||
|
||||
26
.gitignore
vendored
26
.gitignore
vendored
@@ -27,12 +27,13 @@
|
||||
*vali
|
||||
*sdf
|
||||
Release
|
||||
*exe*
|
||||
*exe
|
||||
*exp
|
||||
ipch
|
||||
*.filters
|
||||
*.user
|
||||
*log
|
||||
rmm_log.txt
|
||||
Debug
|
||||
*suo
|
||||
.Rhistory
|
||||
@@ -48,6 +49,7 @@ Debug
|
||||
*.Rproj
|
||||
./xgboost.mpi
|
||||
./xgboost.mock
|
||||
*.bak
|
||||
#.Rbuildignore
|
||||
R-package.Rproj
|
||||
*.cache*
|
||||
@@ -62,6 +64,7 @@ java/xgboost4j-demo/data/
|
||||
java/xgboost4j-demo/tmp/
|
||||
java/xgboost4j-demo/model/
|
||||
nb-configuration*
|
||||
|
||||
# Eclipse
|
||||
.project
|
||||
.cproject
|
||||
@@ -83,6 +86,7 @@ target
|
||||
*.gcov
|
||||
*.gcda
|
||||
*.gcno
|
||||
*.ubj
|
||||
build_tests
|
||||
/tests/cpp/xgboost_test
|
||||
|
||||
@@ -96,6 +100,7 @@ metastore_db
|
||||
|
||||
# files from R-package source install
|
||||
**/config.status
|
||||
R-package/config.h
|
||||
R-package/src/Makevars
|
||||
*.lib
|
||||
|
||||
@@ -137,5 +142,20 @@ credentials.csv
|
||||
.metals
|
||||
.bloop
|
||||
|
||||
# hypothesis python tests
|
||||
.hypothesis
|
||||
# python tests
|
||||
demo/**/*.txt
|
||||
*.dmatrix
|
||||
.hypothesis
|
||||
__MACOSX/
|
||||
model*.json
|
||||
|
||||
# R tests
|
||||
*.htm
|
||||
*.html
|
||||
*.libsvm
|
||||
*.rds
|
||||
Rplots.pdf
|
||||
*.zip
|
||||
|
||||
# nsys
|
||||
*.nsys-rep
|
||||
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -2,9 +2,6 @@
|
||||
path = dmlc-core
|
||||
url = https://github.com/dmlc/dmlc-core
|
||||
branch = main
|
||||
[submodule "cub"]
|
||||
path = cub
|
||||
url = https://github.com/NVlabs/cub
|
||||
[submodule "gputreeshap"]
|
||||
path = gputreeshap
|
||||
url = https://github.com/rapidsai/gputreeshap.git
|
||||
|
||||
@@ -12,7 +12,7 @@ submodules:
|
||||
build:
|
||||
os: ubuntu-22.04
|
||||
tools:
|
||||
python: "3.8"
|
||||
python: "3.10"
|
||||
apt_packages:
|
||||
- graphviz
|
||||
- cmake
|
||||
@@ -32,4 +32,3 @@ formats:
|
||||
python:
|
||||
install:
|
||||
- requirements: doc/requirements.txt
|
||||
system_packages: true
|
||||
|
||||
53
.travis.yml
53
.travis.yml
@@ -1,53 +0,0 @@
|
||||
sudo: required
|
||||
|
||||
dist: bionic
|
||||
|
||||
env:
|
||||
global:
|
||||
- secure: "lqkL5SCM/CBwgVb1GWoOngpojsa0zCSGcvF0O3/45rBT1EpNYtQ4LRJ1+XcHi126vdfGoim/8i7AQhn5eOgmZI8yAPBeoUZ5zSrejD3RUpXr2rXocsvRRP25Z4mIuAGHD9VAHtvTdhBZRVV818W02pYduSzAeaY61q/lU3xmWsE="
|
||||
- secure: "mzms6X8uvdhRWxkPBMwx+mDl3d+V1kUpZa7UgjT+dr4rvZMzvKtjKp/O0JZZVogdgZjUZf444B98/7AvWdSkGdkfz2QdmhWmXzNPfNuHtmfCYMdijsgFIGLuD3GviFL/rBiM2vgn32T3QqFiEJiC5StparnnXimPTc9TpXQRq5c="
|
||||
|
||||
|
||||
jobs:
|
||||
include:
|
||||
- os: linux
|
||||
arch: s390x
|
||||
env: TASK=s390x_test
|
||||
|
||||
# dependent brew packages
|
||||
# the dependencies from homebrew is installed manually from setup script due to outdated image from travis.
|
||||
addons:
|
||||
homebrew:
|
||||
update: false
|
||||
apt:
|
||||
packages:
|
||||
- unzip
|
||||
|
||||
before_install:
|
||||
- source tests/travis/travis_setup_env.sh
|
||||
|
||||
install:
|
||||
- source tests/travis/setup.sh
|
||||
|
||||
script:
|
||||
- tests/travis/run_test.sh
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- ${HOME}/.cache/usr
|
||||
- ${HOME}/.cache/pip
|
||||
|
||||
before_cache:
|
||||
- tests/travis/travis_before_cache.sh
|
||||
|
||||
after_failure:
|
||||
- tests/travis/travis_after_failure.sh
|
||||
|
||||
after_success:
|
||||
- tree build
|
||||
- bash <(curl -s https://codecov.io/bash) -a '-o src/ src/*.c'
|
||||
|
||||
notifications:
|
||||
email:
|
||||
on_success: change
|
||||
on_failure: always
|
||||
1
CITATION
1
CITATION
@@ -15,4 +15,3 @@
|
||||
address = {New York, NY, USA},
|
||||
keywords = {large-scale machine learning},
|
||||
}
|
||||
|
||||
|
||||
463
CMakeLists.txt
463
CMakeLists.txt
@@ -1,36 +1,58 @@
|
||||
cmake_minimum_required(VERSION 3.14 FATAL_ERROR)
|
||||
project(xgboost LANGUAGES CXX C VERSION 1.7.0)
|
||||
cmake_minimum_required(VERSION 3.18 FATAL_ERROR)
|
||||
|
||||
if(PLUGIN_SYCL)
|
||||
string(REPLACE " -isystem ${CONDA_PREFIX}/include" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
|
||||
endif()
|
||||
|
||||
project(xgboost LANGUAGES CXX C VERSION 2.2.0)
|
||||
include(cmake/Utils.cmake)
|
||||
list(APPEND CMAKE_MODULE_PATH "${xgboost_SOURCE_DIR}/cmake/modules")
|
||||
cmake_policy(SET CMP0022 NEW)
|
||||
cmake_policy(SET CMP0079 NEW)
|
||||
cmake_policy(SET CMP0076 NEW)
|
||||
set(CMAKE_POLICY_DEFAULT_CMP0063 NEW)
|
||||
cmake_policy(SET CMP0063 NEW)
|
||||
|
||||
if ((${CMAKE_VERSION} VERSION_GREATER 3.13) OR (${CMAKE_VERSION} VERSION_EQUAL 3.13))
|
||||
cmake_policy(SET CMP0077 NEW)
|
||||
endif ((${CMAKE_VERSION} VERSION_GREATER 3.13) OR (${CMAKE_VERSION} VERSION_EQUAL 3.13))
|
||||
# These policies are already set from 3.18 but we still need to set the policy
|
||||
# default variables here for lower minimum versions in the submodules
|
||||
set(CMAKE_POLICY_DEFAULT_CMP0063 NEW)
|
||||
set(CMAKE_POLICY_DEFAULT_CMP0069 NEW)
|
||||
set(CMAKE_POLICY_DEFAULT_CMP0076 NEW)
|
||||
set(CMAKE_POLICY_DEFAULT_CMP0077 NEW)
|
||||
set(CMAKE_POLICY_DEFAULT_CMP0079 NEW)
|
||||
|
||||
message(STATUS "CMake version ${CMAKE_VERSION}")
|
||||
|
||||
if (CMAKE_COMPILER_IS_GNUCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.0)
|
||||
message(FATAL_ERROR "GCC version must be at least 5.0!")
|
||||
# Check compiler versions
|
||||
# Use recent compilers to ensure that std::filesystem is available
|
||||
if(MSVC)
|
||||
if(MSVC_VERSION LESS 1920)
|
||||
message(FATAL_ERROR "Need Visual Studio 2019 or newer to build XGBoost")
|
||||
endif()
|
||||
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "8.1")
|
||||
message(FATAL_ERROR "Need GCC 8.1 or newer to build XGBoost")
|
||||
endif()
|
||||
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")
|
||||
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "11.0")
|
||||
message(FATAL_ERROR "Need Xcode 11.0 (AppleClang 11.0) or newer to build XGBoost")
|
||||
endif()
|
||||
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
||||
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "9.0")
|
||||
message(FATAL_ERROR "Need Clang 9.0 or newer to build XGBoost")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
include(${xgboost_SOURCE_DIR}/cmake/FindPrefetchIntrinsics.cmake)
|
||||
include(${xgboost_SOURCE_DIR}/cmake/PrefetchIntrinsics.cmake)
|
||||
find_prefetch_intrinsics()
|
||||
include(${xgboost_SOURCE_DIR}/cmake/Version.cmake)
|
||||
write_version()
|
||||
set_default_configuration_release()
|
||||
|
||||
#-- Options
|
||||
include(CMakeDependentOption)
|
||||
|
||||
## User options
|
||||
option(BUILD_C_DOC "Build documentation for C APIs using Doxygen." OFF)
|
||||
option(USE_OPENMP "Build with OpenMP support." ON)
|
||||
option(BUILD_STATIC_LIB "Build static library" OFF)
|
||||
option(BUILD_DEPRECATED_CLI "Build the deprecated command line interface" OFF)
|
||||
option(FORCE_SHARED_CRT "Build with dynamic CRT on Windows (/MD)" OFF)
|
||||
option(RABIT_BUILD_MPI "Build MPI" OFF)
|
||||
## Bindings
|
||||
option(JVM_BINDINGS "Build JVM bindings" OFF)
|
||||
option(R_LIB "Build shared library for R package" OFF)
|
||||
@@ -45,19 +67,34 @@ option(USE_DMLC_GTEST "Use google tests bundled with dmlc-core submodule" OFF)
|
||||
option(USE_DEVICE_DEBUG "Generate CUDA device debug info." OFF)
|
||||
option(USE_NVTX "Build with cuda profiling annotations. Developers only." OFF)
|
||||
set(NVTX_HEADER_DIR "" CACHE PATH "Path to the stand-alone nvtx header")
|
||||
option(RABIT_MOCK "Build rabit with mock" OFF)
|
||||
option(HIDE_CXX_SYMBOLS "Build shared library and hide all C++ symbols" OFF)
|
||||
option(KEEP_BUILD_ARTIFACTS_IN_BINARY_DIR "Output build artifacts in CMake binary dir" OFF)
|
||||
## CUDA
|
||||
option(USE_CUDA "Build with GPU acceleration" OFF)
|
||||
option(USE_NCCL "Build with NCCL to enable distributed GPU support." OFF)
|
||||
# This is specifically designed for PyPI binary release and should be disabled for most of the cases.
|
||||
option(USE_DLOPEN_NCCL "Whether to load nccl dynamically." OFF)
|
||||
option(BUILD_WITH_SHARED_NCCL "Build with shared NCCL library." OFF)
|
||||
option(BUILD_WITH_CUDA_CUB "Build with cub in CUDA installation" OFF)
|
||||
set(GPU_COMPUTE_VER "" CACHE STRING
|
||||
"Semicolon separated list of compute versions to be built against, e.g. '35;61'")
|
||||
## Copied From dmlc
|
||||
option(USE_HDFS "Build with HDFS support" OFF)
|
||||
option(USE_AZURE "Build with AZURE support" OFF)
|
||||
option(USE_S3 "Build with S3 support" OFF)
|
||||
|
||||
if(USE_CUDA)
|
||||
if(NOT DEFINED CMAKE_CUDA_ARCHITECTURES AND NOT DEFINED ENV{CUDAARCHS})
|
||||
set(GPU_COMPUTE_VER "" CACHE STRING
|
||||
"Semicolon separated list of compute versions to be built against, e.g. '35;61'")
|
||||
else()
|
||||
# Clear any cached values from previous runs
|
||||
unset(GPU_COMPUTE_VER)
|
||||
unset(GPU_COMPUTE_VER CACHE)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# CUDA device LTO was introduced in CMake v3.25 and requires host LTO to also be enabled but can still
|
||||
# be explicitly disabled allowing for LTO on host only, host and device, or neither, but device-only LTO
|
||||
# is not a supproted configuration
|
||||
cmake_dependent_option(USE_CUDA_LTO
|
||||
"Enable link-time optimization for CUDA device code"
|
||||
"${CMAKE_INTERPROCEDURAL_OPTIMIZATION}"
|
||||
"CMAKE_VERSION VERSION_GREATER_EQUAL 3.25;USE_CUDA;CMAKE_INTERPROCEDURAL_OPTIMIZATION"
|
||||
OFF)
|
||||
## Sanitizers
|
||||
option(USE_SANITIZER "Use santizer flags" OFF)
|
||||
option(SANITIZER_PATH "Path to sanitizes.")
|
||||
@@ -65,100 +102,145 @@ set(ENABLED_SANITIZERS "address" "leak" CACHE STRING
|
||||
"Semicolon separated list of sanitizer names. E.g 'address;leak'. Supported sanitizers are
|
||||
address, leak, undefined and thread.")
|
||||
## Plugins
|
||||
option(PLUGIN_DENSE_PARSER "Build dense parser plugin" OFF)
|
||||
option(PLUGIN_RMM "Build with RAPIDS Memory Manager (RMM)" OFF)
|
||||
option(PLUGIN_FEDERATED "Build with Federated Learning" OFF)
|
||||
## TODO: 1. Add check if DPC++ compiler is used for building
|
||||
option(PLUGIN_UPDATER_ONEAPI "DPC++ updater" OFF)
|
||||
option(PLUGIN_SYCL "SYCL plugin" OFF)
|
||||
option(ADD_PKGCONFIG "Add xgboost.pc into system." ON)
|
||||
|
||||
#-- Checks for building XGBoost
|
||||
if (USE_DEBUG_OUTPUT AND (NOT (CMAKE_BUILD_TYPE MATCHES Debug)))
|
||||
if(USE_DEBUG_OUTPUT AND (NOT (CMAKE_BUILD_TYPE MATCHES Debug)))
|
||||
message(SEND_ERROR "Do not enable `USE_DEBUG_OUTPUT' with release build.")
|
||||
endif (USE_DEBUG_OUTPUT AND (NOT (CMAKE_BUILD_TYPE MATCHES Debug)))
|
||||
if (USE_NCCL AND NOT (USE_CUDA))
|
||||
endif()
|
||||
if(USE_NCCL AND NOT (USE_CUDA))
|
||||
message(SEND_ERROR "`USE_NCCL` must be enabled with `USE_CUDA` flag.")
|
||||
endif (USE_NCCL AND NOT (USE_CUDA))
|
||||
if (USE_DEVICE_DEBUG AND NOT (USE_CUDA))
|
||||
endif()
|
||||
if(USE_DEVICE_DEBUG AND NOT (USE_CUDA))
|
||||
message(SEND_ERROR "`USE_DEVICE_DEBUG` must be enabled with `USE_CUDA` flag.")
|
||||
endif (USE_DEVICE_DEBUG AND NOT (USE_CUDA))
|
||||
if (BUILD_WITH_SHARED_NCCL AND (NOT USE_NCCL))
|
||||
endif()
|
||||
if(BUILD_WITH_SHARED_NCCL AND (NOT USE_NCCL))
|
||||
message(SEND_ERROR "Build XGBoost with -DUSE_NCCL=ON to enable BUILD_WITH_SHARED_NCCL.")
|
||||
endif (BUILD_WITH_SHARED_NCCL AND (NOT USE_NCCL))
|
||||
if (JVM_BINDINGS AND R_LIB)
|
||||
endif()
|
||||
if(USE_DLOPEN_NCCL AND (NOT USE_NCCL))
|
||||
message(SEND_ERROR "Build XGBoost with -DUSE_NCCL=ON to enable USE_DLOPEN_NCCL.")
|
||||
endif()
|
||||
if(USE_DLOPEN_NCCL AND (NOT (CMAKE_SYSTEM_NAME STREQUAL "Linux")))
|
||||
message(SEND_ERROR "`USE_DLOPEN_NCCL` supports only Linux at the moment.")
|
||||
endif()
|
||||
if(JVM_BINDINGS AND R_LIB)
|
||||
message(SEND_ERROR "`R_LIB' is not compatible with `JVM_BINDINGS' as they both have customized configurations.")
|
||||
endif (JVM_BINDINGS AND R_LIB)
|
||||
if (R_LIB AND GOOGLE_TEST)
|
||||
message(WARNING "Some C++ unittests will fail with `R_LIB` enabled,
|
||||
as R package redirects some functions to R runtime implementation.")
|
||||
endif (R_LIB AND GOOGLE_TEST)
|
||||
if (USE_AVX)
|
||||
message(SEND_ERROR "The option 'USE_AVX' is deprecated as experimental AVX features have been removed from XGBoost.")
|
||||
endif (USE_AVX)
|
||||
if (PLUGIN_LZ4)
|
||||
message(SEND_ERROR "The option 'PLUGIN_LZ4' is removed from XGBoost.")
|
||||
endif (PLUGIN_LZ4)
|
||||
if (PLUGIN_RMM AND NOT (USE_CUDA))
|
||||
endif()
|
||||
if(R_LIB AND GOOGLE_TEST)
|
||||
message(
|
||||
WARNING
|
||||
"Some C++ tests will fail with `R_LIB` enabled, as R package redirects some functions to R runtime implementation."
|
||||
)
|
||||
endif()
|
||||
if(PLUGIN_RMM AND NOT (USE_CUDA))
|
||||
message(SEND_ERROR "`PLUGIN_RMM` must be enabled with `USE_CUDA` flag.")
|
||||
endif (PLUGIN_RMM AND NOT (USE_CUDA))
|
||||
if (PLUGIN_RMM AND NOT ((CMAKE_CXX_COMPILER_ID STREQUAL "Clang") OR (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")))
|
||||
endif()
|
||||
if(PLUGIN_RMM AND NOT ((CMAKE_CXX_COMPILER_ID STREQUAL "Clang") OR (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")))
|
||||
message(SEND_ERROR "`PLUGIN_RMM` must be used with GCC or Clang compiler.")
|
||||
endif (PLUGIN_RMM AND NOT ((CMAKE_CXX_COMPILER_ID STREQUAL "Clang") OR (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")))
|
||||
if (PLUGIN_RMM AND NOT (CMAKE_SYSTEM_NAME STREQUAL "Linux"))
|
||||
endif()
|
||||
if(PLUGIN_RMM AND NOT (CMAKE_SYSTEM_NAME STREQUAL "Linux"))
|
||||
message(SEND_ERROR "`PLUGIN_RMM` must be used with Linux.")
|
||||
endif (PLUGIN_RMM AND NOT (CMAKE_SYSTEM_NAME STREQUAL "Linux"))
|
||||
if (ENABLE_ALL_WARNINGS)
|
||||
if ((NOT CMAKE_CXX_COMPILER_ID MATCHES "Clang") AND (NOT CMAKE_CXX_COMPILER_ID STREQUAL "GNU"))
|
||||
endif()
|
||||
if(ENABLE_ALL_WARNINGS)
|
||||
if((NOT CMAKE_CXX_COMPILER_ID MATCHES "Clang") AND (NOT CMAKE_CXX_COMPILER_ID STREQUAL "GNU"))
|
||||
message(SEND_ERROR "ENABLE_ALL_WARNINGS is only available for Clang and GCC.")
|
||||
endif ((NOT CMAKE_CXX_COMPILER_ID MATCHES "Clang") AND (NOT CMAKE_CXX_COMPILER_ID STREQUAL "GNU"))
|
||||
endif (ENABLE_ALL_WARNINGS)
|
||||
if (BUILD_STATIC_LIB AND (R_LIB OR JVM_BINDINGS))
|
||||
endif()
|
||||
endif()
|
||||
if(BUILD_STATIC_LIB AND (R_LIB OR JVM_BINDINGS))
|
||||
message(SEND_ERROR "Cannot build a static library libxgboost.a when R or JVM packages are enabled.")
|
||||
endif (BUILD_STATIC_LIB AND (R_LIB OR JVM_BINDINGS))
|
||||
if (PLUGIN_RMM AND (NOT BUILD_WITH_CUDA_CUB))
|
||||
message(SEND_ERROR "Cannot build with RMM using cub submodule.")
|
||||
endif (PLUGIN_RMM AND (NOT BUILD_WITH_CUDA_CUB))
|
||||
if (PLUGIN_FEDERATED)
|
||||
if (CMAKE_CROSSCOMPILING)
|
||||
endif()
|
||||
if(PLUGIN_FEDERATED)
|
||||
if(CMAKE_CROSSCOMPILING)
|
||||
message(SEND_ERROR "Cannot cross compile with federated learning support")
|
||||
endif ()
|
||||
if (BUILD_STATIC_LIB)
|
||||
endif()
|
||||
if(BUILD_STATIC_LIB)
|
||||
message(SEND_ERROR "Cannot build static lib with federated learning support")
|
||||
endif ()
|
||||
if (R_LIB OR JVM_BINDINGS)
|
||||
endif()
|
||||
if(R_LIB OR JVM_BINDINGS)
|
||||
message(SEND_ERROR "Cannot enable federated learning support when R or JVM packages are enabled.")
|
||||
endif ()
|
||||
if (WIN32)
|
||||
endif()
|
||||
if(WIN32)
|
||||
message(SEND_ERROR "Federated learning not supported for Windows platform")
|
||||
endif ()
|
||||
endif ()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
#-- Removed options
|
||||
if(USE_AVX)
|
||||
message(SEND_ERROR "The option `USE_AVX` is deprecated as experimental AVX features have been removed from XGBoost.")
|
||||
endif()
|
||||
if(PLUGIN_LZ4)
|
||||
message(SEND_ERROR "The option `PLUGIN_LZ4` is removed from XGBoost.")
|
||||
endif()
|
||||
if(RABIT_BUILD_MPI)
|
||||
message(SEND_ERROR "The option `RABIT_BUILD_MPI` has been removed from XGBoost.")
|
||||
endif()
|
||||
if(USE_S3)
|
||||
message(SEND_ERROR "The option `USE_S3` has been removed from XGBoost")
|
||||
endif()
|
||||
if(USE_AZURE)
|
||||
message(SEND_ERROR "The option `USE_AZURE` has been removed from XGBoost")
|
||||
endif()
|
||||
if(USE_HDFS)
|
||||
message(SEND_ERROR "The option `USE_HDFS` has been removed from XGBoost")
|
||||
endif()
|
||||
if(PLUGIN_DENSE_PARSER)
|
||||
message(SEND_ERROR "The option `PLUGIN_DENSE_PARSER` has been removed from XGBoost.")
|
||||
endif()
|
||||
|
||||
#-- Sanitizer
|
||||
if (USE_SANITIZER)
|
||||
if(USE_SANITIZER)
|
||||
include(cmake/Sanitizer.cmake)
|
||||
enable_sanitizers("${ENABLED_SANITIZERS}")
|
||||
endif (USE_SANITIZER)
|
||||
endif()
|
||||
|
||||
if (USE_CUDA)
|
||||
if(USE_CUDA)
|
||||
set(USE_OPENMP ON CACHE BOOL "CUDA requires OpenMP" FORCE)
|
||||
# `export CXX=' is ignored by CMake CUDA.
|
||||
set(CMAKE_CUDA_HOST_COMPILER ${CMAKE_CXX_COMPILER})
|
||||
message(STATUS "Configured CUDA host compiler: ${CMAKE_CUDA_HOST_COMPILER}")
|
||||
if(NOT DEFINED CMAKE_CUDA_HOST_COMPILER AND NOT DEFINED ENV{CUDAHOSTCXX})
|
||||
set(CMAKE_CUDA_HOST_COMPILER ${CMAKE_CXX_COMPILER} CACHE FILEPATH
|
||||
"The compiler executable to use when compiling host code for CUDA or HIP language files.")
|
||||
mark_as_advanced(CMAKE_CUDA_HOST_COMPILER)
|
||||
message(STATUS "Configured CUDA host compiler: ${CMAKE_CUDA_HOST_COMPILER}")
|
||||
endif()
|
||||
|
||||
if(NOT DEFINED CMAKE_CUDA_RUNTIME_LIBRARY)
|
||||
set(CMAKE_CUDA_RUNTIME_LIBRARY Static)
|
||||
endif()
|
||||
|
||||
enable_language(CUDA)
|
||||
if (${CMAKE_CUDA_COMPILER_VERSION} VERSION_LESS 11.0)
|
||||
if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_LESS 11.0)
|
||||
message(FATAL_ERROR "CUDA version must be at least 11.0!")
|
||||
endif()
|
||||
set(GEN_CODE "")
|
||||
format_gencode_flags("${GPU_COMPUTE_VER}" GEN_CODE)
|
||||
add_subdirectory(${PROJECT_SOURCE_DIR}/gputreeshap)
|
||||
if(DEFINED GPU_COMPUTE_VER)
|
||||
compute_cmake_cuda_archs("${GPU_COMPUTE_VER}")
|
||||
endif()
|
||||
|
||||
if ((${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 11.4) AND (NOT BUILD_WITH_CUDA_CUB))
|
||||
set(BUILD_WITH_CUDA_CUB ON)
|
||||
endif ()
|
||||
endif (USE_CUDA)
|
||||
find_package(CUDAToolkit REQUIRED)
|
||||
find_package(CCCL CONFIG)
|
||||
if(NOT CCCL_FOUND)
|
||||
message(STATUS "Standalone CCCL not found. Attempting to use CCCL from CUDA Toolkit...")
|
||||
find_package(CCCL CONFIG
|
||||
HINTS ${CUDAToolkit_LIBRARY_DIR}/cmake)
|
||||
if(NOT CCCL_FOUND)
|
||||
message(STATUS "Could not locate CCCL from CUDA Toolkit. Using Thrust and CUB from CUDA Toolkit...")
|
||||
find_package(libcudacxx CONFIG REQUIRED
|
||||
HINTS ${CUDAToolkit_LIBRARY_DIR}/cmake)
|
||||
find_package(CUB CONFIG REQUIRED
|
||||
HINTS ${CUDAToolkit_LIBRARY_DIR}/cmake)
|
||||
find_package(Thrust CONFIG REQUIRED
|
||||
HINTS ${CUDAToolkit_LIBRARY_DIR}/cmake)
|
||||
thrust_create_target(Thrust HOST CPP DEVICE CUDA)
|
||||
add_library(CCCL::CCCL INTERFACE IMPORTED GLOBAL)
|
||||
target_link_libraries(CCCL::CCCL INTERFACE libcudacxx::libcudacxx CUB::CUB Thrust)
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if (FORCE_COLORED_OUTPUT AND (CMAKE_GENERATOR STREQUAL "Ninja") AND
|
||||
if(FORCE_COLORED_OUTPUT AND (CMAKE_GENERATOR STREQUAL "Ninja") AND
|
||||
((CMAKE_CXX_COMPILER_ID STREQUAL "GNU") OR
|
||||
(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")))
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-color=always")
|
||||
@@ -166,71 +248,99 @@ endif()
|
||||
|
||||
find_package(Threads REQUIRED)
|
||||
|
||||
if (USE_OPENMP)
|
||||
if (APPLE)
|
||||
# Require CMake 3.16+ on Mac OSX, as previous versions of CMake had trouble locating
|
||||
# OpenMP on Mac. See https://github.com/dmlc/xgboost/pull/5146#issuecomment-568312706
|
||||
cmake_minimum_required(VERSION 3.16)
|
||||
endif (APPLE)
|
||||
find_package(OpenMP REQUIRED)
|
||||
endif (USE_OPENMP)
|
||||
#Add for IBM i
|
||||
if (${CMAKE_SYSTEM_NAME} MATCHES "OS400")
|
||||
# -- OpenMP
|
||||
include(cmake/FindOpenMPMacOS.cmake)
|
||||
if(USE_OPENMP)
|
||||
if(APPLE)
|
||||
find_openmp_macos()
|
||||
else()
|
||||
find_package(OpenMP REQUIRED)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# Add for IBM i
|
||||
if(${CMAKE_SYSTEM_NAME} MATCHES "OS400")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread")
|
||||
set(CMAKE_CXX_ARCHIVE_CREATE "<CMAKE_AR> -X64 qc <TARGET> <OBJECTS>")
|
||||
endif()
|
||||
|
||||
if (USE_NCCL)
|
||||
if(USE_NCCL)
|
||||
find_package(Nccl REQUIRED)
|
||||
endif (USE_NCCL)
|
||||
endif()
|
||||
|
||||
if(MSVC)
|
||||
if(FORCE_SHARED_CRT)
|
||||
message(STATUS "XGBoost: Using dynamically linked MSVC runtime...")
|
||||
set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$<CONFIG:Debug>:Debug>DLL")
|
||||
else()
|
||||
message(STATUS "XGBoost: Using statically linked MSVC runtime...")
|
||||
set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$<CONFIG:Debug>:Debug>")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# dmlc-core
|
||||
msvc_use_static_runtime()
|
||||
if (FORCE_SHARED_CRT)
|
||||
set(DMLC_FORCE_SHARED_CRT ON)
|
||||
endif ()
|
||||
set(DMLC_FORCE_SHARED_CRT ${FORCE_SHARED_CRT})
|
||||
add_subdirectory(${xgboost_SOURCE_DIR}/dmlc-core)
|
||||
|
||||
if (MSVC)
|
||||
if (TARGET dmlc_unit_tests)
|
||||
target_compile_options(dmlc_unit_tests PRIVATE
|
||||
-D_CRT_SECURE_NO_WARNINGS -D_CRT_SECURE_NO_DEPRECATE)
|
||||
endif (TARGET dmlc_unit_tests)
|
||||
endif (MSVC)
|
||||
|
||||
# rabit
|
||||
add_subdirectory(rabit)
|
||||
if (RABIT_BUILD_MPI)
|
||||
find_package(MPI REQUIRED)
|
||||
endif (RABIT_BUILD_MPI)
|
||||
if(MSVC)
|
||||
if(TARGET dmlc_unit_tests)
|
||||
target_compile_options(
|
||||
dmlc_unit_tests PRIVATE
|
||||
-D_CRT_SECURE_NO_WARNINGS -D_CRT_SECURE_NO_DEPRECATE
|
||||
)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# core xgboost
|
||||
add_subdirectory(${xgboost_SOURCE_DIR}/src)
|
||||
target_link_libraries(objxgboost PUBLIC dmlc)
|
||||
|
||||
# Link -lstdc++fs for GCC 8.x
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS "9.0")
|
||||
target_link_libraries(objxgboost PUBLIC stdc++fs)
|
||||
endif()
|
||||
|
||||
# Exports some R specific definitions and objects
|
||||
if (R_LIB)
|
||||
if(R_LIB)
|
||||
add_subdirectory(${xgboost_SOURCE_DIR}/R-package)
|
||||
endif (R_LIB)
|
||||
endif()
|
||||
|
||||
# This creates its own shared library `xgboost4j'.
|
||||
if (JVM_BINDINGS)
|
||||
if(JVM_BINDINGS)
|
||||
add_subdirectory(${xgboost_SOURCE_DIR}/jvm-packages)
|
||||
endif (JVM_BINDINGS)
|
||||
endif()
|
||||
|
||||
# Plugin
|
||||
add_subdirectory(${xgboost_SOURCE_DIR}/plugin)
|
||||
|
||||
if (PLUGIN_RMM)
|
||||
if(PLUGIN_RMM)
|
||||
find_package(rmm REQUIRED)
|
||||
endif (PLUGIN_RMM)
|
||||
|
||||
# Patch the rmm targets so they reference the static cudart
|
||||
# Remove this patch once RMM stops specifying cudart requirement
|
||||
# (since RMM is a header-only library, it should not specify cudart in its CMake config)
|
||||
get_target_property(rmm_link_libs rmm::rmm INTERFACE_LINK_LIBRARIES)
|
||||
list(REMOVE_ITEM rmm_link_libs CUDA::cudart)
|
||||
list(APPEND rmm_link_libs CUDA::cudart_static)
|
||||
set_target_properties(rmm::rmm PROPERTIES INTERFACE_LINK_LIBRARIES "${rmm_link_libs}")
|
||||
get_target_property(rmm_link_libs rmm::rmm INTERFACE_LINK_LIBRARIES)
|
||||
endif()
|
||||
|
||||
if(PLUGIN_SYCL)
|
||||
set(CMAKE_CXX_LINK_EXECUTABLE
|
||||
"icpx <FLAGS> <CMAKE_CXX_LINK_FLAGS> -qopenmp <LINK_FLAGS> <OBJECTS> -o <TARGET> <LINK_LIBRARIES>")
|
||||
set(CMAKE_CXX_CREATE_SHARED_LIBRARY
|
||||
"icpx <CMAKE_SHARED_LIBRARY_CXX_FLAGS> -qopenmp <LANGUAGE_COMPILE_FLAGS> \
|
||||
<CMAKE_SHARED_LIBRARY_CREATE_CXX_FLAGS> <SONAME_FLAG>,<TARGET_SONAME> \
|
||||
-o <TARGET> <OBJECTS> <LINK_LIBRARIES>")
|
||||
endif()
|
||||
|
||||
#-- library
|
||||
if (BUILD_STATIC_LIB)
|
||||
if(BUILD_STATIC_LIB)
|
||||
add_library(xgboost STATIC)
|
||||
else (BUILD_STATIC_LIB)
|
||||
else()
|
||||
add_library(xgboost SHARED)
|
||||
endif (BUILD_STATIC_LIB)
|
||||
endif()
|
||||
target_link_libraries(xgboost PRIVATE objxgboost)
|
||||
target_include_directories(xgboost
|
||||
INTERFACE
|
||||
@@ -239,53 +349,73 @@ target_include_directories(xgboost
|
||||
#-- End shared library
|
||||
|
||||
#-- CLI for xgboost
|
||||
add_executable(runxgboost ${xgboost_SOURCE_DIR}/src/cli_main.cc)
|
||||
target_link_libraries(runxgboost PRIVATE objxgboost)
|
||||
target_include_directories(runxgboost
|
||||
PRIVATE
|
||||
${xgboost_SOURCE_DIR}/include
|
||||
${xgboost_SOURCE_DIR}/dmlc-core/include
|
||||
${xgboost_SOURCE_DIR}/rabit/include
|
||||
)
|
||||
set_target_properties(runxgboost PROPERTIES OUTPUT_NAME xgboost)
|
||||
if(BUILD_DEPRECATED_CLI)
|
||||
add_executable(runxgboost ${xgboost_SOURCE_DIR}/src/cli_main.cc)
|
||||
target_link_libraries(runxgboost PRIVATE objxgboost)
|
||||
target_include_directories(runxgboost
|
||||
PRIVATE
|
||||
${xgboost_SOURCE_DIR}/include
|
||||
${xgboost_SOURCE_DIR}/dmlc-core/include
|
||||
)
|
||||
set_target_properties(runxgboost PROPERTIES OUTPUT_NAME xgboost)
|
||||
xgboost_target_properties(runxgboost)
|
||||
xgboost_target_link_libraries(runxgboost)
|
||||
xgboost_target_defs(runxgboost)
|
||||
|
||||
if(KEEP_BUILD_ARTIFACTS_IN_BINARY_DIR)
|
||||
set_output_directory(runxgboost ${xgboost_BINARY_DIR})
|
||||
else()
|
||||
set_output_directory(runxgboost ${xgboost_SOURCE_DIR})
|
||||
endif()
|
||||
endif()
|
||||
#-- End CLI for xgboost
|
||||
|
||||
# Common setup for all targets
|
||||
foreach(target xgboost objxgboost dmlc runxgboost)
|
||||
foreach(target xgboost objxgboost dmlc)
|
||||
xgboost_target_properties(${target})
|
||||
xgboost_target_link_libraries(${target})
|
||||
xgboost_target_defs(${target})
|
||||
endforeach()
|
||||
|
||||
if (JVM_BINDINGS)
|
||||
if(JVM_BINDINGS)
|
||||
xgboost_target_properties(xgboost4j)
|
||||
xgboost_target_link_libraries(xgboost4j)
|
||||
xgboost_target_defs(xgboost4j)
|
||||
endif (JVM_BINDINGS)
|
||||
endif()
|
||||
|
||||
if(USE_OPENMP AND APPLE)
|
||||
patch_openmp_path_macos(xgboost libxgboost)
|
||||
endif()
|
||||
|
||||
if(KEEP_BUILD_ARTIFACTS_IN_BINARY_DIR)
|
||||
set_output_directory(xgboost ${xgboost_BINARY_DIR}/lib)
|
||||
else()
|
||||
set_output_directory(xgboost ${xgboost_SOURCE_DIR}/lib)
|
||||
endif()
|
||||
|
||||
set_output_directory(runxgboost ${xgboost_SOURCE_DIR})
|
||||
set_output_directory(xgboost ${xgboost_SOURCE_DIR}/lib)
|
||||
# Ensure these two targets do not build simultaneously, as they produce outputs with conflicting names
|
||||
add_dependencies(xgboost runxgboost)
|
||||
if(BUILD_DEPRECATED_CLI)
|
||||
add_dependencies(xgboost runxgboost)
|
||||
endif()
|
||||
|
||||
#-- Installing XGBoost
|
||||
if (R_LIB)
|
||||
if(R_LIB)
|
||||
include(cmake/RPackageInstallTargetSetup.cmake)
|
||||
set_target_properties(xgboost PROPERTIES PREFIX "")
|
||||
if (APPLE)
|
||||
if(APPLE)
|
||||
set_target_properties(xgboost PROPERTIES SUFFIX ".so")
|
||||
endif (APPLE)
|
||||
endif()
|
||||
setup_rpackage_install_target(xgboost "${CMAKE_CURRENT_BINARY_DIR}/R-package-install")
|
||||
set(CMAKE_INSTALL_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/dummy_inst")
|
||||
endif (R_LIB)
|
||||
if (MINGW)
|
||||
endif()
|
||||
if(MINGW)
|
||||
set_target_properties(xgboost PROPERTIES PREFIX "")
|
||||
endif (MINGW)
|
||||
endif()
|
||||
|
||||
if (BUILD_C_DOC)
|
||||
if(BUILD_C_DOC)
|
||||
include(cmake/Doc.cmake)
|
||||
run_doxygen()
|
||||
endif (BUILD_C_DOC)
|
||||
endif()
|
||||
|
||||
include(CPack)
|
||||
|
||||
@@ -301,11 +431,19 @@ install(DIRECTORY ${xgboost_SOURCE_DIR}/include/xgboost
|
||||
# > in any export set.
|
||||
#
|
||||
# https://github.com/dmlc/xgboost/issues/6085
|
||||
if (BUILD_STATIC_LIB)
|
||||
set(INSTALL_TARGETS xgboost runxgboost objxgboost dmlc)
|
||||
else (BUILD_STATIC_LIB)
|
||||
set(INSTALL_TARGETS xgboost runxgboost)
|
||||
endif (BUILD_STATIC_LIB)
|
||||
if(BUILD_STATIC_LIB)
|
||||
if(BUILD_DEPRECATED_CLI)
|
||||
set(INSTALL_TARGETS xgboost runxgboost objxgboost dmlc)
|
||||
else()
|
||||
set(INSTALL_TARGETS xgboost objxgboost dmlc)
|
||||
endif()
|
||||
else()
|
||||
if(BUILD_DEPRECATED_CLI)
|
||||
set(INSTALL_TARGETS xgboost runxgboost)
|
||||
else()
|
||||
set(INSTALL_TARGETS xgboost)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
install(TARGETS ${INSTALL_TARGETS}
|
||||
EXPORT XGBoostTargets
|
||||
@@ -334,7 +472,7 @@ install(
|
||||
DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/xgboost)
|
||||
|
||||
#-- Test
|
||||
if (GOOGLE_TEST)
|
||||
if(GOOGLE_TEST)
|
||||
enable_testing()
|
||||
# Unittests.
|
||||
add_executable(testxgboost)
|
||||
@@ -354,25 +492,22 @@ if (GOOGLE_TEST)
|
||||
${xgboost_SOURCE_DIR}/tests/cli/machine.conf.in
|
||||
${xgboost_BINARY_DIR}/tests/cli/machine.conf
|
||||
@ONLY)
|
||||
add_test(
|
||||
NAME TestXGBoostCLI
|
||||
COMMAND runxgboost ${xgboost_BINARY_DIR}/tests/cli/machine.conf
|
||||
WORKING_DIRECTORY ${xgboost_BINARY_DIR})
|
||||
set_tests_properties(TestXGBoostCLI
|
||||
PROPERTIES
|
||||
PASS_REGULAR_EXPRESSION ".*test-rmse:0.087.*")
|
||||
endif (GOOGLE_TEST)
|
||||
|
||||
# For MSVC: Call msvc_use_static_runtime() once again to completely
|
||||
# replace /MD with /MT. See https://github.com/dmlc/xgboost/issues/4462
|
||||
# for issues caused by mixing of /MD and /MT flags
|
||||
msvc_use_static_runtime()
|
||||
if(BUILD_DEPRECATED_CLI)
|
||||
add_test(
|
||||
NAME TestXGBoostCLI
|
||||
COMMAND runxgboost ${xgboost_BINARY_DIR}/tests/cli/machine.conf
|
||||
WORKING_DIRECTORY ${xgboost_BINARY_DIR})
|
||||
set_tests_properties(TestXGBoostCLI
|
||||
PROPERTIES
|
||||
PASS_REGULAR_EXPRESSION ".*test-rmse:0.087.*")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# Add xgboost.pc
|
||||
if (ADD_PKGCONFIG)
|
||||
if(ADD_PKGCONFIG)
|
||||
configure_file(${xgboost_SOURCE_DIR}/cmake/xgboost.pc.in ${xgboost_BINARY_DIR}/xgboost.pc @ONLY)
|
||||
|
||||
install(
|
||||
FILES ${xgboost_BINARY_DIR}/xgboost.pc
|
||||
DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig)
|
||||
endif (ADD_PKGCONFIG)
|
||||
endif()
|
||||
|
||||
@@ -10,8 +10,8 @@ The Project Management Committee(PMC) consists group of active committers that m
|
||||
- Tianqi is a Ph.D. student working on large-scale machine learning. He is the creator of the project.
|
||||
* [Michael Benesty](https://github.com/pommedeterresautee)
|
||||
- Michael is a lawyer and data scientist in France. He is the creator of XGBoost interactive analysis module in R.
|
||||
* [Yuan Tang](https://github.com/terrytangyuan), Akuity
|
||||
- Yuan is a founding engineer at Akuity. He contributed mostly in R and Python packages.
|
||||
* [Yuan Tang](https://github.com/terrytangyuan), Red Hat
|
||||
- Yuan is a principal software engineer at Red Hat. He contributed mostly in R and Python packages.
|
||||
* [Nan Zhu](https://github.com/CodingCat), Uber
|
||||
- Nan is a software engineer in Uber. He contributed mostly in JVM packages.
|
||||
* [Jiaming Yuan](https://github.com/trivialfis)
|
||||
|
||||
145
Makefile
145
Makefile
@@ -1,145 +0,0 @@
|
||||
ifndef DMLC_CORE
|
||||
DMLC_CORE = dmlc-core
|
||||
endif
|
||||
|
||||
ifndef RABIT
|
||||
RABIT = rabit
|
||||
endif
|
||||
|
||||
ROOTDIR = $(CURDIR)
|
||||
|
||||
# workarounds for some buggy old make & msys2 versions seen in windows
|
||||
ifeq (NA, $(shell test ! -d "$(ROOTDIR)" && echo NA ))
|
||||
$(warning Attempting to fix non-existing ROOTDIR [$(ROOTDIR)])
|
||||
ROOTDIR := $(shell pwd)
|
||||
$(warning New ROOTDIR [$(ROOTDIR)] $(shell test -d "$(ROOTDIR)" && echo " is OK" ))
|
||||
endif
|
||||
MAKE_OK := $(shell "$(MAKE)" -v 2> /dev/null)
|
||||
ifndef MAKE_OK
|
||||
$(warning Attempting to recover non-functional MAKE [$(MAKE)])
|
||||
MAKE := $(shell which make 2> /dev/null)
|
||||
MAKE_OK := $(shell "$(MAKE)" -v 2> /dev/null)
|
||||
endif
|
||||
$(warning MAKE [$(MAKE)] - $(if $(MAKE_OK),checked OK,PROBLEM))
|
||||
|
||||
include $(DMLC_CORE)/make/dmlc.mk
|
||||
|
||||
# set compiler defaults for OSX versus *nix
|
||||
# let people override either
|
||||
OS := $(shell uname)
|
||||
ifeq ($(OS), Darwin)
|
||||
ifndef CC
|
||||
export CC = $(if $(shell which clang), clang, gcc)
|
||||
endif
|
||||
ifndef CXX
|
||||
export CXX = $(if $(shell which clang++), clang++, g++)
|
||||
endif
|
||||
else
|
||||
# linux defaults
|
||||
ifndef CC
|
||||
export CC = gcc
|
||||
endif
|
||||
ifndef CXX
|
||||
export CXX = g++
|
||||
endif
|
||||
endif
|
||||
|
||||
export CFLAGS= -DDMLC_LOG_CUSTOMIZE=1 -std=c++14 -Wall -Wno-unknown-pragmas -Iinclude $(ADD_CFLAGS)
|
||||
CFLAGS += -I$(DMLC_CORE)/include -I$(RABIT)/include -I$(GTEST_PATH)/include
|
||||
|
||||
ifeq ($(TEST_COVER), 1)
|
||||
CFLAGS += -g -O0 -fprofile-arcs -ftest-coverage
|
||||
else
|
||||
CFLAGS += -O3 -funroll-loops
|
||||
endif
|
||||
|
||||
ifndef LINT_LANG
|
||||
LINT_LANG= "all"
|
||||
endif
|
||||
|
||||
# specify tensor path
|
||||
.PHONY: clean all lint clean_all doxygen rcpplint pypack Rpack Rbuild Rcheck
|
||||
|
||||
build/%.o: src/%.cc
|
||||
@mkdir -p $(@D)
|
||||
$(CXX) $(CFLAGS) -MM -MT build/$*.o $< >build/$*.d
|
||||
$(CXX) -c $(CFLAGS) $< -o $@
|
||||
|
||||
# The should be equivalent to $(ALL_OBJ) except for build/cli_main.o
|
||||
amalgamation/xgboost-all0.o: amalgamation/xgboost-all0.cc
|
||||
$(CXX) -c $(CFLAGS) $< -o $@
|
||||
|
||||
rcpplint:
|
||||
python3 dmlc-core/scripts/lint.py xgboost ${LINT_LANG} R-package/src
|
||||
|
||||
lint: rcpplint
|
||||
python3 dmlc-core/scripts/lint.py --exclude_path python-package/xgboost/dmlc-core \
|
||||
python-package/xgboost/include python-package/xgboost/lib \
|
||||
python-package/xgboost/make python-package/xgboost/rabit \
|
||||
python-package/xgboost/src --pylint-rc ${PWD}/python-package/.pylintrc xgboost \
|
||||
${LINT_LANG} include src python-package
|
||||
|
||||
ifeq ($(TEST_COVER), 1)
|
||||
cover: check
|
||||
@- $(foreach COV_OBJ, $(COVER_OBJ), \
|
||||
gcov -pbcul -o $(shell dirname $(COV_OBJ)) $(COV_OBJ) > gcov.log || cat gcov.log; \
|
||||
)
|
||||
endif
|
||||
|
||||
|
||||
clean:
|
||||
$(RM) -rf build lib bin *~ */*~ */*/*~ */*/*/*~ */*.o */*/*.o */*/*/*.o #xgboost
|
||||
$(RM) -rf build_tests *.gcov tests/cpp/xgboost_test
|
||||
if [ -d "R-package/src" ]; then \
|
||||
cd R-package/src; \
|
||||
$(RM) -rf rabit src include dmlc-core amalgamation *.so *.dll; \
|
||||
cd $(ROOTDIR); \
|
||||
fi
|
||||
|
||||
clean_all: clean
|
||||
cd $(DMLC_CORE); "$(MAKE)" clean; cd $(ROOTDIR)
|
||||
cd $(RABIT); "$(MAKE)" clean; cd $(ROOTDIR)
|
||||
|
||||
# create pip source dist (sdist) pack for PyPI
|
||||
pippack: clean_all
|
||||
cd python-package; python setup.py sdist; mv dist/*.tar.gz ..; cd ..
|
||||
|
||||
# Script to make a clean installable R package.
|
||||
Rpack: clean_all
|
||||
rm -rf xgboost xgboost*.tar.gz
|
||||
cp -r R-package xgboost
|
||||
rm -rf xgboost/src/*.o xgboost/src/*.so xgboost/src/*.dll
|
||||
rm -rf xgboost/src/*/*.o
|
||||
rm -rf xgboost/demo/*.model xgboost/demo/*.buffer xgboost/demo/*.txt
|
||||
rm -rf xgboost/demo/runall.R
|
||||
cp -r src xgboost/src/src
|
||||
cp -r include xgboost/src/include
|
||||
cp -r amalgamation xgboost/src/amalgamation
|
||||
mkdir -p xgboost/src/rabit
|
||||
cp -r rabit/include xgboost/src/rabit/include
|
||||
cp -r rabit/src xgboost/src/rabit/src
|
||||
rm -rf xgboost/src/rabit/src/*.o
|
||||
mkdir -p xgboost/src/dmlc-core
|
||||
cp -r dmlc-core/include xgboost/src/dmlc-core/include
|
||||
cp -r dmlc-core/src xgboost/src/dmlc-core/src
|
||||
cp ./LICENSE xgboost
|
||||
cat R-package/src/Makevars.in|sed '2s/.*/PKGROOT=./' > xgboost/src/Makevars.in
|
||||
cat R-package/src/Makevars.win|sed '2s/.*/PKGROOT=./' > xgboost/src/Makevars.win
|
||||
rm -f xgboost/src/Makevars.win-e # OSX sed create this extra file; remove it
|
||||
bash R-package/remove_warning_suppression_pragma.sh
|
||||
bash xgboost/remove_warning_suppression_pragma.sh
|
||||
rm xgboost/remove_warning_suppression_pragma.sh
|
||||
rm xgboost/CMakeLists.txt
|
||||
rm -rfv xgboost/tests/helper_scripts/
|
||||
|
||||
R ?= R
|
||||
|
||||
Rbuild: Rpack
|
||||
$(R) CMD build xgboost
|
||||
rm -rf xgboost
|
||||
|
||||
Rcheck: Rbuild
|
||||
$(R) CMD check --as-cran xgboost*.tar.gz
|
||||
|
||||
-include build/*.d
|
||||
-include build/*/*.d
|
||||
424
NEWS.md
424
NEWS.md
@@ -1,8 +1,430 @@
|
||||
XGBoost Change Log
|
||||
==================
|
||||
|
||||
**Starting from 2.1.0, release note is recorded in the documentation.**
|
||||
|
||||
This file records the changes in xgboost library in reverse chronological order.
|
||||
|
||||
## 2.0.0 (2023 Aug 16)
|
||||
|
||||
We are excited to announce the release of XGBoost 2.0. This note will begin by covering some overall changes and then highlight specific updates to the package.
|
||||
|
||||
### Initial work on multi-target trees with vector-leaf outputs
|
||||
We have been working on vector-leaf tree models for multi-target regression, multi-label classification, and multi-class classification in version 2.0. Previously, XGBoost would build a separate model for each target. However, with this new feature that's still being developed, XGBoost can build one tree for all targets. The feature has multiple benefits and trade-offs compared to the existing approach. It can help prevent overfitting, produce smaller models, and build trees that consider the correlation between targets. In addition, users can combine vector leaf and scalar leaf trees during a training session using a callback. Please note that the feature is still a working in progress, and many parts are not yet available. See #9043 for the current status. Related PRs: (#8538, #8697, #8902, #8884, #8895, #8898, #8612, #8652, #8698, #8908, #8928, #8968, #8616, #8922, #8890, #8872, #8889, #9509) Please note that, only the `hist` (default) tree method on CPU can be used for building vector leaf trees at the moment.
|
||||
|
||||
### New `device` parameter.
|
||||
|
||||
A new `device` parameter is set to replace the existing `gpu_id`, `gpu_hist`, `gpu_predictor`, `cpu_predictor`, `gpu_coord_descent`, and the PySpark specific parameter `use_gpu`. Onward, users need only the `device` parameter to select which device to run along with the ordinal of the device. For more information, please see our document page (https://xgboost.readthedocs.io/en/stable/parameter.html#general-parameters) . For example, with `device="cuda", tree_method="hist"`, XGBoost will run the `hist` tree method on GPU. (#9363, #8528, #8604, #9354, #9274, #9243, #8896, #9129, #9362, #9402, #9385, #9398, #9390, #9386, #9412, #9507, #9536). The old behavior of ``gpu_hist`` is preserved but deprecated. In addition, the `predictor` parameter is removed.
|
||||
|
||||
|
||||
### `hist` is now the default tree method
|
||||
Starting from 2.0, the `hist` tree method will be the default. In previous versions, XGBoost chooses `approx` or `exact` depending on the input data and training environment. The new default can help XGBoost train models more efficiently and consistently. (#9320, #9353)
|
||||
|
||||
### GPU-based approx tree method
|
||||
There's initial support for using the `approx` tree method on GPU. The performance of the `approx` is not yet well optimized but is feature complete except for the JVM packages. It can be accessed through the use of the parameter combination `device="cuda", tree_method="approx"`. (#9414, #9399, #9478). Please note that the Scala-based Spark interface is not yet supported.
|
||||
|
||||
### Optimize and bound the size of the histogram on CPU, to control memory footprint
|
||||
|
||||
XGBoost has a new parameter `max_cached_hist_node` for users to limit the CPU cache size for histograms. It can help prevent XGBoost from caching histograms too aggressively. Without the cache, performance is likely to decrease. However, the size of the cache grows exponentially with the depth of the tree. The limit can be crucial when growing deep trees. In most cases, users need not configure this parameter as it does not affect the model's accuracy. (#9455, #9441, #9440, #9427, #9400).
|
||||
|
||||
Along with the cache limit, XGBoost also reduces the memory usage of the `hist` and `approx` tree method on distributed systems by cutting the size of the cache by half. (#9433)
|
||||
|
||||
### Improved external memory support
|
||||
There is some exciting development around external memory support in XGBoost. It's still an experimental feature, but the performance has been significantly improved with the default `hist` tree method. We replaced the old file IO logic with memory map. In addition to performance, we have reduced CPU memory usage and added extensive documentation. Beginning from 2.0.0, we encourage users to try it with the `hist` tree method when the memory saving by `QuantileDMatrix` is not sufficient. (#9361, #9317, #9282, #9315, #8457)
|
||||
|
||||
### Learning to rank
|
||||
We created a brand-new implementation for the learning-to-rank task. With the latest version, XGBoost gained a set of new features for ranking task including:
|
||||
|
||||
- A new parameter `lambdarank_pair_method` for choosing the pair construction strategy.
|
||||
- A new parameter `lambdarank_num_pair_per_sample` for controlling the number of samples for each group.
|
||||
- An experimental implementation of unbiased learning-to-rank, which can be accessed using the `lambdarank_unbiased` parameter.
|
||||
- Support for custom gain function with `NDCG` using the `ndcg_exp_gain` parameter.
|
||||
- Deterministic GPU computation for all objectives and metrics.
|
||||
- `NDCG` is now the default objective function.
|
||||
- Improved performance of metrics using caches.
|
||||
- Support scikit-learn utilities for `XGBRanker`.
|
||||
- Extensive documentation on how learning-to-rank works with XGBoost.
|
||||
|
||||
For more information, please see the [tutorial](https://xgboost.readthedocs.io/en/latest/tutorials/learning_to_rank.html). Related PRs: (#8771, #8692, #8783, #8789, #8790, #8859, #8887, #8893, #8906, #8931, #9075, #9015, #9381, #9336, #8822, #9222, #8984, #8785, #8786, #8768)
|
||||
|
||||
### Automatically estimated intercept
|
||||
|
||||
In the previous version, `base_score` was a constant that could be set as a training parameter. In the new version, XGBoost can automatically estimate this parameter based on input labels for optimal accuracy. (#8539, #8498, #8272, #8793, #8607)
|
||||
|
||||
### Quantile regression
|
||||
The XGBoost algorithm now supports quantile regression, which involves minimizing the quantile loss (also called "pinball loss"). Furthermore, XGBoost allows for training with multiple target quantiles simultaneously with one tree per quantile. (#8775, #8761, #8760, #8758, #8750)
|
||||
|
||||
### L1 and Quantile regression now supports learning rate
|
||||
Both objectives use adaptive trees due to the lack of proper Hessian values. In the new version, XGBoost can scale the leaf value with the learning rate accordingly. (#8866)
|
||||
|
||||
### Export cut value
|
||||
|
||||
Using the Python or the C package, users can export the quantile values (not to be confused with quantile regression) used for the `hist` tree method. (#9356)
|
||||
|
||||
### column-based split and federated learning
|
||||
We made progress on column-based split for federated learning. In 2.0, both `approx`, `hist`, and `hist` with vector leaf can work with column-based data split, along with support for vertical federated learning. Work on GPU support is still on-going, stay tuned. (#8576, #8468, #8442, #8847, #8811, #8985, #8623, #8568, #8828, #8932, #9081, #9102, #9103, #9124, #9120, #9367, #9370, #9343, #9171, #9346, #9270, #9244, #8494, #8434, #8742, #8804, #8710, #8676, #9020, #9002, #9058, #9037, #9018, #9295, #9006, #9300, #8765, #9365, #9060)
|
||||
|
||||
### PySpark
|
||||
After the initial introduction of the PySpark interface, it has gained some new features and optimizations in 2.0.
|
||||
|
||||
- GPU-based prediction. (#9292, #9542)
|
||||
- Optimization for data initialization by avoiding the stack operation. (#9088)
|
||||
- Support predict feature contribution. (#8633)
|
||||
- Python typing support. (#9156, #9172, #9079, #8375)
|
||||
- `use_gpu` is deprecated. The `device` parameter is preferred.
|
||||
- Update eval_metric validation to support list of strings (#8826)
|
||||
- Improved logs for training (#9449)
|
||||
- Maintenance, including refactoring and document updates (#8324, #8465, #8605, #9202, #9460, #9302, #8385, #8630, #8525, #8496)
|
||||
- Fix for GPU setup. (#9495)
|
||||
|
||||
### Other General New Features
|
||||
Here's a list of new features that don't have their own section and yet are general to all language bindings.
|
||||
|
||||
- Use array interface for CSC matrix. This helps XGBoost to use a consistent number of threads and align the interface of the CSC matrix with other interfaces. In addition, memory usage is likely to decrease with CSC input thanks to on-the-fly type conversion. (#8672)
|
||||
- CUDA compute 90 is now part of the default build.. (#9397)
|
||||
|
||||
### Other General Optimization
|
||||
These optimizations are general to all language bindings. For language-specific optimization, please visit the corresponding sections.
|
||||
|
||||
- Performance for input with `array_interface` on CPU (like `numpy`) is significantly improved. (#9090)
|
||||
- Some optimization with CUDA for data initialization. (#9199, #9209, #9144)
|
||||
- Use the latest thrust policy to prevent synchronizing GPU devices. (#9212)
|
||||
- XGBoost now uses a per-thread CUDA stream, which prevents synchronization with other streams. (#9416, #9396, #9413)
|
||||
|
||||
### Notable breaking change
|
||||
|
||||
Other than the aforementioned change with the `device` parameter, here's a list of breaking changes affecting all packages.
|
||||
|
||||
- Users must specify the format for text input (#9077). However, we suggest using third-party data structures such as `numpy.ndarray` instead of relying on text inputs. See https://github.com/dmlc/xgboost/issues/9472 for more info.
|
||||
|
||||
### Notable bug fixes
|
||||
|
||||
Some noteworthy bug fixes that are not related to specific language bindings are listed in this section.
|
||||
|
||||
- Some language environments use a different thread to perform garbage collection, which breaks the thread-local cache used in XGBoost. XGBoost 2.0 implements a new thread-safe cache using a light weight lock to replace the thread-local cache. (#8851)
|
||||
- Fix model IO by clearing the prediction cache. (#8904)
|
||||
- `inf` is checked during data construction. (#8911)
|
||||
- Preserve order of saved updaters configuration. Usually, this is not an issue unless the `updater` parameter is used instead of the `tree_method` parameter (#9355)
|
||||
- Fix GPU memory allocation issue with categorical splits. (#9529)
|
||||
- Handle escape sequence like `\t\n` in feature names for JSON model dump. (#9474)
|
||||
- Normalize file path for model IO and text input. This handles short paths on Windows and paths that contain `~` on Unix (#9463). In addition, all path inputs are required to be encoded in UTF-8 (#9448, #9443)
|
||||
- Fix integer overflow on H100. (#9380)
|
||||
- Fix weighted sketching on GPU with categorical features. (#9341)
|
||||
- Fix metric serialization. The bug might cause some of the metrics to be dropped during evaluation. (#9405)
|
||||
- Fixes compilation errors on MSVC x86 targets (#8823)
|
||||
- Pick up the dmlc-core fix for the CSV parser. (#8897)
|
||||
|
||||
|
||||
### Documentation
|
||||
Aside from documents for new features, we have many smaller updates to improve user experience, from troubleshooting guides to typo fixes.
|
||||
|
||||
- Explain CPU/GPU interop. (#8450)
|
||||
- Guide to troubleshoot NCCL errors. (#8943, #9206)
|
||||
- Add a note for rabit port selection. (#8879)
|
||||
- How to build the docs using conda (#9276)
|
||||
- Explain how to obtain reproducible results on distributed systems. (#8903)
|
||||
|
||||
* Fixes and small updates to document and demonstration scripts. (#8626, #8436, #8995, #8907, #8923, #8926, #9358, #9232, #9201, #9469, #9462, #9458, #8543, #8597, #8401, #8784, #9213, #9098, #9008, #9223, #9333, #9434, #9435, #9415, #8773, #8752, #9291, #9549)
|
||||
|
||||
### Python package
|
||||
* New Features and Improvements
|
||||
- Support primitive types of pyarrow-backed pandas dataframe. (#8653)
|
||||
- Warning messages emitted by XGBoost are now emitted using Python warnings. (#9387)
|
||||
- User can now format the value printed near the bars on the `plot_importance` plot (#8540)
|
||||
- XGBoost has improved half-type support (float16) with pandas, cupy, and cuDF. With GPU input, the handling is through CUDA `__half` type, and no data copy is made. (#8487, #9207, #8481)
|
||||
- Support `Series` and Python primitive types in `inplace_predict` and `QuantileDMatrix` (#8547, #8542)
|
||||
- Support all pandas' nullable integer types. (#8480)
|
||||
- Custom metric with the scikit-learn interface now supports `sample_weight`. (#8706)
|
||||
- Enable Installation of Python Package with System lib in a Virtual Environment (#9349)
|
||||
- Raise if expected workers are not alive in `xgboost.dask.train` (#9421)
|
||||
|
||||
* Optimization
|
||||
- Cache transformed data in `QuantileDMatrix` for efficiency. (#8666, #9445)
|
||||
- Take datatable as row-major input. (#8472)
|
||||
- Remove unnecessary conversions between data structures (#8546)
|
||||
|
||||
* Adopt modern Python packaging conventions (PEP 517, PEP 518, PEP 621)
|
||||
- XGBoost adopted the modern Python packaging conventions. The old setup script `setup.py` is now replaced with the new configuration file `pyproject.toml`. Along with this, XGBoost now supports Python 3.11. (#9021, #9112, #9114, #9115) Consult the latest documentation for the updated instructions to build and install XGBoost.
|
||||
|
||||
* Fixes
|
||||
- `DataIter` now accepts only keyword arguments. (#9431)
|
||||
- Fix empty DMatrix with categorical features. (#8739)
|
||||
- Convert ``DaskXGBClassifier.classes_`` to an array (#8452)
|
||||
- Define `best_iteration` only if early stopping is used to be consistent with documented behavior. (#9403)
|
||||
- Make feature validation immutable. (#9388)
|
||||
|
||||
* Breaking changes
|
||||
- Discussed in the new `device` parameter section, the `predictor` parameter is now removed. (#9129)
|
||||
- Remove support for single-string feature info. Feature type and names should be a sequence of strings (#9401)
|
||||
- Remove parameters in the `save_model` call for the scikit-learn interface. (#8963)
|
||||
- Remove the `ntree_limit` in the python package. This has been deprecated in previous versions. (#8345)
|
||||
|
||||
* Maintenance including formatting and refactoring along with type hints.
|
||||
- More consistent use of `black` and `isort` for code formatting (#8420, #8748, #8867)
|
||||
- Improved type support. Most of the type changes happen in the PySpark module; here, we list the remaining changes. (#8444, #8617, #9197, #9005)
|
||||
- Set `enable_categorical` to True in predict. (#8592)
|
||||
- Some refactoring and updates for tests (#8395, #8372, #8557, #8379, #8702, #9459, #9316, #8446, #8695, #8409, #8993, #9480)
|
||||
|
||||
* Documentation
|
||||
- Add introduction and notes for the sklearn interface. (#8948)
|
||||
- Demo for using dask for hyper-parameter optimization. (#8891)
|
||||
- Document all supported Python input types. (#8643)
|
||||
- Other documentation updates (#8944, #9304)
|
||||
|
||||
### R package
|
||||
- Use the new data consumption interface for CSR and CSC. This provides better control for the number of threads and improves performance. (#8455, #8673)
|
||||
- Accept multiple evaluation metrics during training. (#8657)
|
||||
- Fix integer inputs with `NA`. (#9522)
|
||||
- Some refactoring for the R package (#8545, #8430, #8614, #8624, #8613, #9457, #8689, #8563, #9461, #8647, #8564, #8565, #8736, #8610, #8609, #8599, #8704, #9456, #9450, #9476, #9477, #9481). Special thanks to @jameslamb.
|
||||
- Document updates (#8886, #9323, #9437, #8998)
|
||||
|
||||
### JVM packages
|
||||
Following are changes specific to various JVM-based packages.
|
||||
|
||||
- Stop using Rabit in prediction (#9054)
|
||||
- Set feature_names and feature_types in jvm-packages. This is to prepare support for categorical features (#9364)
|
||||
- Scala 2.13 support. (#9099)
|
||||
- Change training stage from `ResultStage` to `ShuffleMapStage` (#9423)
|
||||
- Automatically set the max/min direction for the best score during early stopping. (#9404)
|
||||
* Revised support for `flink` (#9046)
|
||||
|
||||
* Breaking changes
|
||||
- Scala-based tracker is removed. (#9078, #9045)
|
||||
- Change `DeviceQuantileDmatrix` into `QuantileDMatrix` (#8461)
|
||||
|
||||
* Maintenance (#9253, #9166, #9395, #9389, #9224, #9233, #9351, #9479)
|
||||
|
||||
* CI bot PRs
|
||||
We employed GitHub dependent bot to help us keep the dependencies up-to-date for JVM packages. With the help from the bot, we have cleared up all the dependencies that are lagging behind (#8501, #8507).
|
||||
|
||||
Here's a list of dependency update PRs including those made by dependent bots (#8456, #8560, #8571, #8561, #8562, #8600, #8594, #8524, #8509, #8548, #8549, #8533, #8521, #8534, #8532, #8516, #8503, #8531, #8530, #8518, #8512, #8515, #8517, #8506, #8504, #8502, #8629, #8815, #8813, #8814, #8877, #8876, #8875, #8874, #8873, #9049, #9070, #9073, #9039, #9083, #8917, #8952, #8980, #8973, #8962, #9252, #9208, #9131, #9136, #9219, #9160, #9158, #9163, #9184, #9192, #9265, #9268, #8882, #8837, #8662, #8661, #8390, #9056, #8508, #8925, #8920, #9149, #9230, #9097, #8648, #9203, #8593).
|
||||
|
||||
### Maintenance
|
||||
Maintenance work includes refactoring, fixing small issues that don't affect end users. (#9256, #8627, #8756, #8735, #8966, #8864, #8747, #8892, #9057, #8921, #8949, #8941, #8942, #9108, #9125, #9155, #9153, #9176, #9447, #9444, #9436, #9438, #9430, #9200, #9210, #9055, #9014, #9004, #8999, #9154, #9148, #9283, #9246, #8888, #8900, #8871, #8861, #8858, #8791, #8807, #8751, #8703, #8696, #8693, #8677, #8686, #8665, #8660, #8386, #8371, #8410, #8578, #8574, #8483, #8443, #8454, #8733)
|
||||
|
||||
### CI
|
||||
- Build pip wheel with RMM support (#9383)
|
||||
- Other CI updates including updating dependencies and work on the CI infrastructure. (#9464, #9428, #8767, #9394, #9278, #9214, #9234, #9205, #9034, #9104, #8878, #9294, #8625, #8806, #8741, #8707, #8381, #8382, #8388, #8402, #8397, #8445, #8602, #8628, #8583, #8460, #9544)
|
||||
|
||||
## 1.7.6 (2023 Jun 16)
|
||||
|
||||
This is a patch release for bug fixes. The CRAN package for the R binding is kept at 1.7.5.
|
||||
|
||||
### Bug Fixes
|
||||
* Fix distributed training with mixed dense and sparse partitions. (#9272)
|
||||
* Fix monotone constraints on CPU with large trees. (#9122)
|
||||
* [spark] Make the spark model have the same UID as its estimator (#9022)
|
||||
* Optimize prediction with `QuantileDMatrix`. (#9096)
|
||||
|
||||
### Document
|
||||
* Improve doxygen (#8959)
|
||||
* Update the cuDF pip index URL. (#9106)
|
||||
|
||||
### Maintenance
|
||||
* Fix tests with pandas 2.0. (#9014)
|
||||
|
||||
## 1.7.5 (2023 Mar 30)
|
||||
This is a patch release for bug fixes.
|
||||
|
||||
* C++ requirement is updated to C++-17, along with which, CUDA 11.8 is used as the default CTK. (#8860, #8855, #8853)
|
||||
* Fix import for pyspark ranker. (#8692)
|
||||
* Fix Windows binary wheel to be compatible with Poetry (#8991)
|
||||
* Fix GPU hist with column sampling. (#8850)
|
||||
* Make sure iterative DMatrix is properly initialized. (#8997)
|
||||
* [R] Update link in document. (#8998)
|
||||
|
||||
## 1.7.4 (2023 Feb 16)
|
||||
This is a patch release for bug fixes.
|
||||
|
||||
* [R] Fix OpenMP detection on macOS. (#8684)
|
||||
* [Python] Make sure input numpy array is aligned. (#8690)
|
||||
* Fix feature interaction with column sampling in gpu_hist evaluator. (#8754)
|
||||
* Fix GPU L1 error. (#8749)
|
||||
* [PySpark] Fix feature types param (#8772)
|
||||
* Fix ranking with quantile dmatrix and group weight. (#8762)
|
||||
|
||||
## 1.7.3 (2023 Jan 6)
|
||||
This is a patch release for bug fixes.
|
||||
|
||||
* [Breaking] XGBoost Sklearn estimator method `get_params` no longer returns internally configured values. (#8634)
|
||||
* Fix linalg iterator, which may crash the L1 error. (#8603)
|
||||
* Fix loading pickled GPU model with a CPU-only XGBoost build. (#8632)
|
||||
* Fix inference with unseen categories with categorical features. (#8591, #8602)
|
||||
* CI fixes. (#8620, #8631, #8579)
|
||||
|
||||
## v1.7.2 (2022 Dec 8)
|
||||
This is a patch release for bug fixes.
|
||||
|
||||
* Work with newer thrust and libcudacxx (#8432)
|
||||
* Support null value in CUDA array interface namespace. (#8486)
|
||||
* Use `getsockname` instead of `SO_DOMAIN` on AIX. (#8437)
|
||||
* [pyspark] Make QDM optional based on a cuDF check (#8471)
|
||||
* [pyspark] sort qid for SparkRanker. (#8497)
|
||||
* [dask] Properly await async method client.wait_for_workers. (#8558)
|
||||
|
||||
* [R] Fix CRAN test notes. (#8428)
|
||||
|
||||
* [doc] Fix outdated document [skip ci]. (#8527)
|
||||
* [CI] Fix github action mismatched glibcxx. (#8551)
|
||||
|
||||
## v1.7.1 (2022 Nov 3)
|
||||
This is a patch release to incorporate the following hotfix:
|
||||
|
||||
* Add back xgboost.rabit for backwards compatibility (#8411)
|
||||
|
||||
|
||||
## v1.7.0 (2022 Oct 20)
|
||||
|
||||
We are excited to announce the feature packed XGBoost 1.7 release. The release note will walk through some of the major new features first, then make a summary for other improvements and language-binding-specific changes.
|
||||
|
||||
### PySpark
|
||||
|
||||
XGBoost 1.7 features initial support for PySpark integration. The new interface is adapted from the existing PySpark XGBoost interface developed by databricks with additional features like `QuantileDMatrix` and the rapidsai plugin (GPU pipeline) support. The new Spark XGBoost Python estimators not only benefit from PySpark ml facilities for powerful distributed computing but also enjoy the rest of the Python ecosystem. Users can define a custom objective, callbacks, and metrics in Python and use them with this interface on distributed clusters. The support is labeled as experimental with more features to come in future releases. For a brief introduction please visit the tutorial on XGBoost's [document page](https://xgboost.readthedocs.io/en/latest/tutorials/spark_estimator.html). (#8355, #8344, #8335, #8284, #8271, #8283, #8250, #8231, #8219, #8245, #8217, #8200, #8173, #8172, #8145, #8117, #8131, #8088, #8082, #8085, #8066, #8068, #8067, #8020, #8385)
|
||||
|
||||
Due to its initial support status, the new interface has some limitations; categorical features and multi-output models are not yet supported.
|
||||
|
||||
### Development of categorical data support
|
||||
More progress on the experimental support for categorical features. In 1.7, XGBoost can handle missing values in categorical features and features a new parameter `max_cat_threshold`, which limits the number of categories that can be used in the split evaluation. The parameter is enabled when the partitioning algorithm is used and helps prevent over-fitting. Also, the sklearn interface can now accept the `feature_types` parameter to use data types other than dataframe for categorical features. (#8280, #7821, #8285, #8080, #7948, #7858, #7853, #8212, #7957, #7937, #7934)
|
||||
|
||||
|
||||
### Experimental support for federated learning and new communication collective
|
||||
|
||||
An exciting addition to XGBoost is the experimental federated learning support. The federated learning is implemented with a gRPC federated server that aggregates allreduce calls, and federated clients that train on local data and use existing tree methods (approx, hist, gpu_hist). Currently, this only supports horizontal federated learning (samples are split across participants, and each participant has all the features and labels). Future plans include vertical federated learning (features split across participants), and stronger privacy guarantees with homomorphic encryption and differential privacy. See [Demo with NVFlare integration](demo/nvflare/README.md) for example usage with nvflare.
|
||||
|
||||
As part of the work, XGBoost 1.7 has replaced the old rabit module with the new collective module as the network communication interface with added support for runtime backend selection. In previous versions, the backend is defined at compile time and can not be changed once built. In this new release, users can choose between `rabit` and `federated.` (#8029, #8351, #8350, #8342, #8340, #8325, #8279, #8181, #8027, #7958, #7831, #7879, #8257, #8316, #8242, #8057, #8203, #8038, #7965, #7930, #7911)
|
||||
|
||||
The feature is available in the public PyPI binary package for testing.
|
||||
|
||||
### Quantile DMatrix
|
||||
Before 1.7, XGBoost has an internal data structure called `DeviceQuantileDMatrix` (and its distributed version). We now extend its support to CPU and renamed it to `QuantileDMatrix`. This data structure is used for optimizing memory usage for the `hist` and `gpu_hist` tree methods. The new feature helps reduce CPU memory usage significantly, especially for dense data. The new `QuantileDMatrix` can be initialized from both CPU and GPU data, and regardless of where the data comes from, the constructed instance can be used by both the CPU algorithm and GPU algorithm including training and prediction (with some overhead of conversion if the device of data and training algorithm doesn't match). Also, a new parameter `ref` is added to `QuantileDMatrix`, which can be used to construct validation/test datasets. Lastly, it's set as default in the scikit-learn interface when a supported tree method is specified by users. (#7889, #7923, #8136, #8215, #8284, #8268, #8220, #8346, #8327, #8130, #8116, #8103, #8094, #8086, #7898, #8060, #8019, #8045, #7901, #7912, #7922)
|
||||
|
||||
### Mean absolute error
|
||||
The mean absolute error is a new member of the collection of objectives in XGBoost. It's noteworthy since MAE has zero hessian value, which is unusual to XGBoost as XGBoost relies on Newton optimization. Without valid Hessian values, the convergence speed can be slow. As part of the support for MAE, we added line searches into the XGBoost training algorithm to overcome the difficulty of training without valid Hessian values. In the future, we will extend the line search to other objectives where it's appropriate for faster convergence speed. (#8343, #8107, #7812, #8380)
|
||||
|
||||
### XGBoost on Browser
|
||||
With the help of the [pyodide](https://github.com/pyodide/pyodide) project, you can now run XGBoost on browsers. (#7954, #8369)
|
||||
|
||||
### Experimental IPv6 Support for Dask
|
||||
|
||||
With the growing adaption of the new internet protocol, XGBoost joined the club. In the latest release, the Dask interface can be used on IPv6 clusters, see XGBoost's Dask tutorial for details. (#8225, #8234)
|
||||
|
||||
### Optimizations
|
||||
We have new optimizations for both the `hist` and `gpu_hist` tree methods to make XGBoost's training even more efficient.
|
||||
|
||||
* Hist
|
||||
Hist now supports optional by-column histogram build, which is automatically configured based on various conditions of input data. This helps the XGBoost CPU hist algorithm to scale better with different shapes of training datasets. (#8233, #8259). Also, the build histogram kernel now can better utilize CPU registers (#8218)
|
||||
|
||||
* GPU Hist
|
||||
GPU hist performance is significantly improved for wide datasets. GPU hist now supports batched node build, which reduces kernel latency and increases throughput. The improvement is particularly significant when growing deep trees with the default ``depthwise`` policy. (#7919, #8073, #8051, #8118, #7867, #7964, #8026)
|
||||
|
||||
### Breaking Changes
|
||||
Breaking changes made in the 1.7 release are summarized below.
|
||||
- The `grow_local_histmaker` updater is removed. This updater is rarely used in practice and has no test. We decided to remove it and focus have XGBoot focus on other more efficient algorithms. (#7992, #8091)
|
||||
- Single precision histogram is removed due to its lack of accuracy caused by significant floating point error. In some cases the error can be difficult to detect due to log-scale operations, which makes the parameter dangerous to use. (#7892, #7828)
|
||||
- Deprecated CUDA architectures are no longer supported in the release binaries. (#7774)
|
||||
- As part of the federated learning development, the `rabit` module is replaced with the new `collective` module. It's a drop-in replacement with added runtime backend selection, see the federated learning section for more details (#8257)
|
||||
|
||||
### General new features and improvements
|
||||
Before diving into package-specific changes, some general new features other than those listed at the beginning are summarized here.
|
||||
* Users of `DMatrix` and `QuantileDMatrix` can get the data from XGBoost. In previous versions, only getters for meta info like labels are available. The new method is available in Python (`DMatrix::get_data`) and C. (#8269, #8323)
|
||||
* In previous versions, the GPU histogram tree method may generate phantom gradient for missing values due to floating point error. We fixed such an error in this release and XGBoost is much better equated to handle floating point errors when training on GPU. (#8274, #8246)
|
||||
* Parameter validation is no longer experimental. (#8206)
|
||||
* C pointer parameters and JSON parameters are vigorously checked. (#8254, #8254)
|
||||
* Improved handling of JSON model input. (#7953, #7918)
|
||||
* Support IBM i OS (#7920, #8178)
|
||||
|
||||
### Fixes
|
||||
Some noteworthy bug fixes that are not related to specific language binding are listed in this section.
|
||||
* Rename misspelled config parameter for pseudo-Huber (#7904)
|
||||
* Fix feature weights with nested column sampling. (#8100)
|
||||
* Fix loading DMatrix binary in distributed env. (#8149)
|
||||
* Force auc.cc to be statically linked for unusual compiler platforms. (#8039)
|
||||
* New logic for detecting libomp on macos (#8384).
|
||||
|
||||
### Python Package
|
||||
* Python 3.8 is now the minimum required Python version. (#8071)
|
||||
* More progress on type hint support. Except for the new PySpark interface, the XGBoost module is fully typed. (#7742, #7945, #8302, #7914, #8052)
|
||||
* XGBoost now validates the feature names in `inplace_predict`, which also affects the predict function in scikit-learn estimators as it uses `inplace_predict` internally. (#8359)
|
||||
* Users can now get the data from `DMatrix` using `DMatrix::get_data` or `QuantileDMatrix::get_data`.
|
||||
* Show `libxgboost.so` path in build info. (#7893)
|
||||
* Raise import error when using the sklearn module while scikit-learn is missing. (#8049)
|
||||
* Use `config_context` in the sklearn interface. (#8141)
|
||||
* Validate features for inplace prediction. (#8359)
|
||||
* Pandas dataframe handling is refactored to reduce data fragmentation. (#7843)
|
||||
* Support more pandas nullable types (#8262)
|
||||
* Remove pyarrow workaround. (#7884)
|
||||
|
||||
* Binary wheel size
|
||||
We aim to enable as many features as possible in XGBoost's default binary distribution on PyPI (package installed with pip), but there's a upper limit on the size of the binary wheel. In 1.7, XGBoost reduces the size of the wheel by pruning unused CUDA architectures. (#8179, #8152, #8150)
|
||||
|
||||
* Fixes
|
||||
Some noteworthy fixes are listed here:
|
||||
- Fix the Dask interface with the latest cupy. (#8210)
|
||||
- Check cuDF lazily to avoid potential errors with cuda-python. (#8084)
|
||||
* Fix potential error in DMatrix constructor on 32-bit platform. (#8369)
|
||||
|
||||
* Maintenance work
|
||||
- Linter script is moved from dmlc-core to XGBoost with added support for formatting, mypy, and parallel run, along with some fixes (#7967, #8101, #8216)
|
||||
- We now require the use of `isort` and `black` for selected files. (#8137, #8096)
|
||||
- Code cleanups. (#7827)
|
||||
- Deprecate `use_label_encoder` in XGBClassifier. The label encoder has already been deprecated and removed in the previous version. These changes only affect the indicator parameter (#7822)
|
||||
- Remove the use of distutils. (#7770)
|
||||
- Refactor and fixes for tests (#8077, #8064, #8078, #8076, #8013, #8010, #8244, #7833)
|
||||
|
||||
* Documents
|
||||
- [dask] Fix potential error in demo. (#8079)
|
||||
- Improved documentation for the ranker. (#8356, #8347)
|
||||
- Indicate lack of py-xgboost-gpu on Windows (#8127)
|
||||
- Clarification for feature importance. (#8151)
|
||||
- Simplify Python getting started example (#8153)
|
||||
|
||||
### R Package
|
||||
We summarize improvements for the R package briefly here:
|
||||
* Feature info including names and types are now passed to DMatrix in preparation for categorical feature support. (#804)
|
||||
* XGBoost 1.7 can now gracefully load old R models from RDS for better compatibility with 3-party tuning libraries (#7864)
|
||||
* The R package now can be built with parallel compilation, along with fixes for warnings in CRAN tests. (#8330)
|
||||
* Emit error early if DiagrammeR is missing (#8037)
|
||||
* Fix R package Windows build. (#8065)
|
||||
|
||||
### JVM Packages
|
||||
The consistency between JVM packages and other language bindings is greatly improved in 1.7, improvements range from model serialization format to the default value of hyper-parameters.
|
||||
|
||||
* Java package now supports feature names and feature types for DMatrix in preparation for categorical feature support. (#7966)
|
||||
* Models trained by the JVM packages can now be safely used with other language bindings. (#7896, #7907)
|
||||
* Users can specify the model format when saving models with a stream. (#7940, #7955)
|
||||
* The default value for training parameters is now sourced from XGBoost directly, which helps JVM packages be consistent with other packages. (#7938)
|
||||
* Set the correct objective if the user doesn't explicitly set it (#7781)
|
||||
* Auto-detection of MUSL is replaced by system properties (#7921)
|
||||
* Improved error message for launching tracker. (#7952, #7968)
|
||||
* Fix a race condition in parameter configuration. (#8025)
|
||||
* [Breaking] ` timeoutRequestWorkers` is now removed. With the support for barrier mode, this parameter is no longer needed. (#7839)
|
||||
* Dependencies updates. (#7791, #8157, #7801, #8240)
|
||||
|
||||
### Documents
|
||||
- Document for the C interface is greatly improved and is now displayed at the [sphinx document page](https://xgboost.readthedocs.io/en/latest/c.html). Thanks to the breathe project, you can view the C API just like the Python API. (#8300)
|
||||
- We now avoid having XGBoost internal text parser in demos and recommend users use dedicated libraries for loading data whenever it's feasible. (#7753)
|
||||
- Python survival training demos are now displayed at [sphinx gallery](https://xgboost.readthedocs.io/en/latest/python/survival-examples/index.html). (#8328)
|
||||
- Some typos, links, format, and grammar fixes. (#7800, #7832, #7861, #8099, #8163, #8166, #8229, #8028, #8214, #7777, #7905, #8270, #8309, d70e59fef, #7806)
|
||||
- Updated winning solution under readme.md (#7862)
|
||||
- New security policy. (#8360)
|
||||
- GPU document is overhauled as we consider CUDA support to be feature-complete. (#8378)
|
||||
|
||||
### Maintenance
|
||||
* Code refactoring and cleanups. (#7850, #7826, #7910, #8332, #8204)
|
||||
* Reduce compiler warnings. (#7768, #7916, #8046, #8059, #7974, #8031, #8022)
|
||||
* Compiler workarounds. (#8211, #8314, #8226, #8093)
|
||||
* Dependencies update. (#8001, #7876, #7973, #8298, #7816)
|
||||
* Remove warnings emitted in previous versions. (#7815)
|
||||
* Small fixes occurred during development. (#8008)
|
||||
|
||||
### CI and Tests
|
||||
* We overhauled the CI infrastructure to reduce the CI cost and lift the maintenance burdens. Jenkins is replaced with buildkite for better automation, with which, finer control of test runs is implemented to reduce overall cost. Also, we refactored some of the existing tests to reduce their runtime, drooped the size of docker images, and removed multi-GPU C++ tests. Lastly, `pytest-timeout` is added as an optional dependency for running Python tests to keep the test time in check. (#7772, #8291, #8286, #8276, #8306, #8287, #8243, #8313, #8235, #8288, #8303, #8142, #8092, #8333, #8312, #8348)
|
||||
* New documents for how to reproduce the CI environment (#7971, #8297)
|
||||
* Improved automation for JVM release. (#7882)
|
||||
* GitHub Action security-related updates. (#8263, #8267, #8360)
|
||||
* Other fixes and maintenance work. (#8154, #7848, #8069, #7943)
|
||||
* Small updates and fixes to GitHub action pipelines. (#8364, #8321, #8241, #7950, #8011)
|
||||
|
||||
## v1.6.1 (2022 May 9)
|
||||
This is a patch release for bug fixes and Spark barrier mode support. The R package is unchanged.
|
||||
|
||||
@@ -1681,7 +2103,7 @@ This release marks a major milestone for the XGBoost project.
|
||||
## v0.90 (2019.05.18)
|
||||
|
||||
### XGBoost Python package drops Python 2.x (#4379, #4381)
|
||||
Python 2.x is reaching its end-of-life at the end of this year. [Many scientific Python packages are now moving to drop Python 2.x](https://python3statement.org/).
|
||||
Python 2.x is reaching its end-of-life at the end of this year. [Many scientific Python packages are now moving to drop Python 2.x](https://python3statement.github.io/).
|
||||
|
||||
### XGBoost4J-Spark now requires Spark 2.4.x (#4377)
|
||||
* Spark 2.3 is reaching its end-of-life soon. See discussion at #4389.
|
||||
|
||||
@@ -4,3 +4,5 @@
|
||||
^.*\.Rproj$
|
||||
^\.Rproj\.user$
|
||||
README.md
|
||||
^doc$
|
||||
^Meta$
|
||||
|
||||
@@ -1,42 +1,60 @@
|
||||
find_package(LibR REQUIRED)
|
||||
message(STATUS "LIBR_CORE_LIBRARY " ${LIBR_CORE_LIBRARY})
|
||||
|
||||
file(GLOB_RECURSE R_SOURCES
|
||||
file(
|
||||
GLOB_RECURSE R_SOURCES
|
||||
${CMAKE_CURRENT_LIST_DIR}/src/*.cc
|
||||
${CMAKE_CURRENT_LIST_DIR}/src/*.c)
|
||||
${CMAKE_CURRENT_LIST_DIR}/src/*.c
|
||||
)
|
||||
|
||||
# Use object library to expose symbols
|
||||
add_library(xgboost-r OBJECT ${R_SOURCES})
|
||||
if (ENABLE_ALL_WARNINGS)
|
||||
|
||||
if(ENABLE_ALL_WARNINGS)
|
||||
target_compile_options(xgboost-r PRIVATE -Wall -Wextra)
|
||||
endif (ENABLE_ALL_WARNINGS)
|
||||
target_compile_definitions(xgboost-r
|
||||
PUBLIC
|
||||
endif()
|
||||
|
||||
if(MSVC)
|
||||
# https://github.com/microsoft/LightGBM/pull/6061
|
||||
# MSVC doesn't work with anonymous types in structs. (R complex)
|
||||
#
|
||||
# syntax error: missing ';' before identifier 'private_data_c'
|
||||
#
|
||||
target_compile_definitions(xgboost-r PRIVATE -DR_LEGACY_RCOMPLEX)
|
||||
endif()
|
||||
|
||||
target_compile_definitions(
|
||||
xgboost-r PUBLIC
|
||||
-DXGBOOST_STRICT_R_MODE=1
|
||||
-DXGBOOST_CUSTOMIZE_GLOBAL_PRNG=1
|
||||
-DDMLC_LOG_BEFORE_THROW=0
|
||||
-DDMLC_DISABLE_STDIN=1
|
||||
-DDMLC_LOG_CUSTOMIZE=1
|
||||
-DRABIT_CUSTOMIZE_MSG_
|
||||
-DRABIT_STRICT_CXX98_)
|
||||
target_include_directories(xgboost-r
|
||||
PRIVATE
|
||||
)
|
||||
|
||||
target_include_directories(
|
||||
xgboost-r PRIVATE
|
||||
${LIBR_INCLUDE_DIRS}
|
||||
${PROJECT_SOURCE_DIR}/include
|
||||
${PROJECT_SOURCE_DIR}/dmlc-core/include
|
||||
${PROJECT_SOURCE_DIR}/rabit/include)
|
||||
)
|
||||
|
||||
target_link_libraries(xgboost-r PUBLIC ${LIBR_CORE_LIBRARY})
|
||||
if (USE_OPENMP)
|
||||
|
||||
if(USE_OPENMP)
|
||||
find_package(OpenMP REQUIRED)
|
||||
target_link_libraries(xgboost-r PUBLIC OpenMP::OpenMP_CXX OpenMP::OpenMP_C)
|
||||
endif (USE_OPENMP)
|
||||
endif()
|
||||
|
||||
set_target_properties(
|
||||
xgboost-r PROPERTIES
|
||||
CXX_STANDARD 14
|
||||
CXX_STANDARD 17
|
||||
CXX_STANDARD_REQUIRED ON
|
||||
POSITION_INDEPENDENT_CODE ON)
|
||||
POSITION_INDEPENDENT_CODE ON
|
||||
)
|
||||
|
||||
# Get compilation and link flags of xgboost-r and propagate to objxgboost
|
||||
target_link_libraries(objxgboost PUBLIC xgboost-r)
|
||||
|
||||
# Add all objects of xgboost-r to objxgboost
|
||||
target_sources(objxgboost INTERFACE $<TARGET_OBJECTS:xgboost-r>)
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
Package: xgboost
|
||||
Type: Package
|
||||
Title: Extreme Gradient Boosting
|
||||
Version: 1.7.0.1
|
||||
Date: 2022-10-18
|
||||
Version: 2.2.0.0
|
||||
Date: 2024-06-03
|
||||
Authors@R: c(
|
||||
person("Tianqi", "Chen", role = c("aut"),
|
||||
email = "tianqi.tchen@gmail.com"),
|
||||
@@ -54,17 +54,19 @@ Suggests:
|
||||
Ckmeans.1d.dp (>= 3.3.1),
|
||||
vcd (>= 1.3),
|
||||
testthat,
|
||||
lintr,
|
||||
igraph (>= 1.0.1),
|
||||
float,
|
||||
crayon,
|
||||
titanic
|
||||
titanic,
|
||||
RhpcBLASctl,
|
||||
survival
|
||||
Depends:
|
||||
R (>= 3.3.0)
|
||||
R (>= 4.3.0)
|
||||
Imports:
|
||||
Matrix (>= 1.1-0),
|
||||
methods,
|
||||
data.table (>= 1.9.6),
|
||||
jsonlite (>= 1.0),
|
||||
RoxygenNote: 7.1.1
|
||||
SystemRequirements: GNU make, C++14
|
||||
jsonlite (>= 1.0)
|
||||
Roxygen: list(markdown = TRUE)
|
||||
RoxygenNote: 7.3.2
|
||||
Encoding: UTF-8
|
||||
SystemRequirements: GNU make, C++17
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
Copyright (c) 2014 by Tianqi Chen and Contributors
|
||||
Copyright (c) 2014-2023, Tianqi Chen and XBGoost Contributors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
|
||||
@@ -1,46 +1,62 @@
|
||||
# Generated by roxygen2: do not edit by hand
|
||||
|
||||
S3method("[",xgb.Booster)
|
||||
S3method("[",xgb.DMatrix)
|
||||
S3method("dimnames<-",xgb.DMatrix)
|
||||
S3method(coef,xgb.Booster)
|
||||
S3method(dim,xgb.DMatrix)
|
||||
S3method(dimnames,xgb.DMatrix)
|
||||
S3method(getinfo,xgb.Booster)
|
||||
S3method(getinfo,xgb.DMatrix)
|
||||
S3method(length,xgb.Booster)
|
||||
S3method(predict,xgb.Booster)
|
||||
S3method(predict,xgb.Booster.handle)
|
||||
S3method(print,xgb.Booster)
|
||||
S3method(print,xgb.DMatrix)
|
||||
S3method(print,xgb.cv.synchronous)
|
||||
S3method(print,xgboost)
|
||||
S3method(setinfo,xgb.Booster)
|
||||
S3method(setinfo,xgb.DMatrix)
|
||||
S3method(slice,xgb.DMatrix)
|
||||
S3method(variable.names,xgb.Booster)
|
||||
export("xgb.attr<-")
|
||||
export("xgb.attributes<-")
|
||||
export("xgb.config<-")
|
||||
export("xgb.parameters<-")
|
||||
export(cb.cv.predict)
|
||||
export(cb.early.stop)
|
||||
export(cb.evaluation.log)
|
||||
export(cb.gblinear.history)
|
||||
export(cb.print.evaluation)
|
||||
export(cb.reset.parameters)
|
||||
export(cb.save.model)
|
||||
export(getinfo)
|
||||
export(setinfo)
|
||||
export(slice)
|
||||
export(xgb.Booster.complete)
|
||||
export(xgb.Callback)
|
||||
export(xgb.DMatrix)
|
||||
export(xgb.DMatrix.hasinfo)
|
||||
export(xgb.DMatrix.save)
|
||||
export(xgb.DataBatch)
|
||||
export(xgb.DataIter)
|
||||
export(xgb.ExtMemDMatrix)
|
||||
export(xgb.QuantileDMatrix)
|
||||
export(xgb.QuantileDMatrix.from_iterator)
|
||||
export(xgb.attr)
|
||||
export(xgb.attributes)
|
||||
export(xgb.cb.cv.predict)
|
||||
export(xgb.cb.early.stop)
|
||||
export(xgb.cb.evaluation.log)
|
||||
export(xgb.cb.gblinear.history)
|
||||
export(xgb.cb.print.evaluation)
|
||||
export(xgb.cb.reset.parameters)
|
||||
export(xgb.cb.save.model)
|
||||
export(xgb.config)
|
||||
export(xgb.copy.Booster)
|
||||
export(xgb.create.features)
|
||||
export(xgb.cv)
|
||||
export(xgb.dump)
|
||||
export(xgb.gblinear.history)
|
||||
export(xgb.get.DMatrix.data)
|
||||
export(xgb.get.DMatrix.num.non.missing)
|
||||
export(xgb.get.DMatrix.qcut)
|
||||
export(xgb.get.config)
|
||||
export(xgb.get.num.boosted.rounds)
|
||||
export(xgb.ggplot.deepness)
|
||||
export(xgb.ggplot.importance)
|
||||
export(xgb.ggplot.shap.summary)
|
||||
export(xgb.importance)
|
||||
export(xgb.is.same.Booster)
|
||||
export(xgb.load)
|
||||
export(xgb.load.raw)
|
||||
export(xgb.model.dt.tree)
|
||||
@@ -52,19 +68,16 @@ export(xgb.plot.shap.summary)
|
||||
export(xgb.plot.tree)
|
||||
export(xgb.save)
|
||||
export(xgb.save.raw)
|
||||
export(xgb.serialize)
|
||||
export(xgb.set.config)
|
||||
export(xgb.slice.Booster)
|
||||
export(xgb.slice.DMatrix)
|
||||
export(xgb.train)
|
||||
export(xgb.unserialize)
|
||||
export(xgboost)
|
||||
import(methods)
|
||||
importClassesFrom(Matrix,CsparseMatrix)
|
||||
importClassesFrom(Matrix,dgCMatrix)
|
||||
importClassesFrom(Matrix,dgeMatrix)
|
||||
importFrom(Matrix,colSums)
|
||||
importClassesFrom(Matrix,dgRMatrix)
|
||||
importFrom(Matrix,sparse.model.matrix)
|
||||
importFrom(Matrix,sparseMatrix)
|
||||
importFrom(Matrix,sparseVector)
|
||||
importFrom(Matrix,t)
|
||||
importFrom(data.table,":=")
|
||||
importFrom(data.table,as.data.table)
|
||||
importFrom(data.table,data.table)
|
||||
@@ -82,8 +95,12 @@ importFrom(graphics,points)
|
||||
importFrom(graphics,title)
|
||||
importFrom(jsonlite,fromJSON)
|
||||
importFrom(jsonlite,toJSON)
|
||||
importFrom(methods,new)
|
||||
importFrom(stats,coef)
|
||||
importFrom(stats,median)
|
||||
importFrom(stats,predict)
|
||||
importFrom(stats,sd)
|
||||
importFrom(stats,variable.names)
|
||||
importFrom(utils,head)
|
||||
importFrom(utils,object.size)
|
||||
importFrom(utils,str)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -26,6 +26,44 @@ NVL <- function(x, val) {
|
||||
'multi:softprob', 'rank:pairwise', 'rank:ndcg', 'rank:map'))
|
||||
}
|
||||
|
||||
.RANKING_OBJECTIVES <- function() {
|
||||
return(c('rank:pairwise', 'rank:ndcg', 'rank:map'))
|
||||
}
|
||||
|
||||
.OBJECTIVES_NON_DEFAULT_MODE <- function() {
|
||||
return(c("reg:logistic", "binary:logitraw", "multi:softmax"))
|
||||
}
|
||||
|
||||
.BINARY_CLASSIF_OBJECTIVES <- function() {
|
||||
return(c("binary:logistic", "binary:hinge"))
|
||||
}
|
||||
|
||||
.MULTICLASS_CLASSIF_OBJECTIVES <- function() {
|
||||
return("multi:softprob")
|
||||
}
|
||||
|
||||
.SURVIVAL_RIGHT_CENSORING_OBJECTIVES <- function() { # nolint
|
||||
return(c("survival:cox", "survival:aft"))
|
||||
}
|
||||
|
||||
.SURVIVAL_ALL_CENSORING_OBJECTIVES <- function() { # nolint
|
||||
return("survival:aft")
|
||||
}
|
||||
|
||||
.REGRESSION_OBJECTIVES <- function() {
|
||||
return(c(
|
||||
"reg:squarederror", "reg:squaredlogerror", "reg:logistic", "reg:pseudohubererror",
|
||||
"reg:absoluteerror", "reg:quantileerror", "count:poisson", "reg:gamma", "reg:tweedie"
|
||||
))
|
||||
}
|
||||
|
||||
.MULTI_TARGET_OBJECTIVES <- function() {
|
||||
return(c(
|
||||
"reg:squarederror", "reg:squaredlogerror", "reg:logistic", "reg:pseudohubererror",
|
||||
"reg:quantileerror", "reg:gamma"
|
||||
))
|
||||
}
|
||||
|
||||
|
||||
#
|
||||
# Low-level functions for boosting --------------------------------------------
|
||||
@@ -38,11 +76,11 @@ check.booster.params <- function(params, ...) {
|
||||
stop("params must be a list")
|
||||
|
||||
# in R interface, allow for '.' instead of '_' in parameter names
|
||||
names(params) <- gsub("\\.", "_", names(params))
|
||||
names(params) <- gsub(".", "_", names(params), fixed = TRUE)
|
||||
|
||||
# merge parameters from the params and the dots-expansion
|
||||
dot_params <- list(...)
|
||||
names(dot_params) <- gsub("\\.", "_", names(dot_params))
|
||||
names(dot_params) <- gsub(".", "_", names(dot_params), fixed = TRUE)
|
||||
if (length(intersect(names(params),
|
||||
names(dot_params))) > 0)
|
||||
stop("Same parameters in 'params' and in the call are not allowed. Please check your 'params' list.")
|
||||
@@ -66,7 +104,7 @@ check.booster.params <- function(params, ...) {
|
||||
|
||||
# for multiclass, expect num_class to be set
|
||||
if (typeof(params[['objective']]) == "character" &&
|
||||
substr(NVL(params[['objective']], 'x'), 1, 6) == 'multi:' &&
|
||||
startsWith(NVL(params[['objective']], 'x'), 'multi:') &&
|
||||
as.numeric(NVL(params[['num_class']], 0)) < 2) {
|
||||
stop("'num_class' > 1 parameter must be set for multiclass classification")
|
||||
}
|
||||
@@ -82,7 +120,7 @@ check.booster.params <- function(params, ...) {
|
||||
|
||||
# interaction constraints parser (convert from list of column indices to string)
|
||||
if (!is.null(params[['interaction_constraints']]) &&
|
||||
typeof(params[['interaction_constraints']]) != "character"){
|
||||
typeof(params[['interaction_constraints']]) != "character") {
|
||||
# check input class
|
||||
if (!identical(class(params[['interaction_constraints']]), 'list')) stop('interaction_constraints should be class list')
|
||||
if (!all(unique(sapply(params[['interaction_constraints']], class)) %in% c('numeric', 'integer'))) {
|
||||
@@ -93,6 +131,14 @@ check.booster.params <- function(params, ...) {
|
||||
interaction_constraints <- sapply(params[['interaction_constraints']], function(x) paste0('[', paste(x, collapse = ','), ']'))
|
||||
params[['interaction_constraints']] <- paste0('[', paste(interaction_constraints, collapse = ','), ']')
|
||||
}
|
||||
|
||||
# for evaluation metrics, should generate multiple entries per metric
|
||||
if (NROW(params[['eval_metric']]) > 1) {
|
||||
eval_metrics <- as.list(params[["eval_metric"]])
|
||||
names(eval_metrics) <- rep("eval_metric", length(eval_metrics))
|
||||
params_without_ev_metrics <- within(params, rm("eval_metric"))
|
||||
params <- c(params_without_ev_metrics, eval_metrics)
|
||||
}
|
||||
return(params)
|
||||
}
|
||||
|
||||
@@ -134,27 +180,48 @@ check.custom.eval <- function(env = parent.frame()) {
|
||||
if (!is.null(env$feval) &&
|
||||
is.null(env$maximize) && (
|
||||
!is.null(env$early_stopping_rounds) ||
|
||||
has.callbacks(env$callbacks, 'cb.early.stop')))
|
||||
has.callbacks(env$callbacks, "early_stop")))
|
||||
stop("Please set 'maximize' to indicate whether the evaluation metric needs to be maximized or not")
|
||||
}
|
||||
|
||||
|
||||
# Update a booster handle for an iteration with dtrain data
|
||||
xgb.iter.update <- function(booster_handle, dtrain, iter, obj = NULL) {
|
||||
if (!identical(class(booster_handle), "xgb.Booster.handle")) {
|
||||
stop("booster_handle must be of xgb.Booster.handle class")
|
||||
}
|
||||
xgb.iter.update <- function(bst, dtrain, iter, obj) {
|
||||
if (!inherits(dtrain, "xgb.DMatrix")) {
|
||||
stop("dtrain must be of xgb.DMatrix class")
|
||||
}
|
||||
handle <- xgb.get.handle(bst)
|
||||
|
||||
if (is.null(obj)) {
|
||||
.Call(XGBoosterUpdateOneIter_R, booster_handle, as.integer(iter), dtrain)
|
||||
.Call(XGBoosterUpdateOneIter_R, handle, as.integer(iter), dtrain)
|
||||
} else {
|
||||
pred <- predict(booster_handle, dtrain, outputmargin = TRUE, training = TRUE,
|
||||
ntreelimit = 0)
|
||||
pred <- predict(
|
||||
bst,
|
||||
dtrain,
|
||||
outputmargin = TRUE,
|
||||
training = TRUE
|
||||
)
|
||||
gpair <- obj(pred, dtrain)
|
||||
.Call(XGBoosterBoostOneIter_R, booster_handle, dtrain, gpair$grad, gpair$hess)
|
||||
n_samples <- dim(dtrain)[1]
|
||||
grad <- gpair$grad
|
||||
hess <- gpair$hess
|
||||
|
||||
if ((is.matrix(grad) && dim(grad)[1] != n_samples) ||
|
||||
(is.vector(grad) && length(grad) != n_samples) ||
|
||||
(is.vector(grad) != is.vector(hess))) {
|
||||
warning(paste(
|
||||
"Since 2.1.0, the shape of the gradient and hessian is required to be ",
|
||||
"(n_samples, n_targets) or (n_samples, n_classes). Will reshape assuming ",
|
||||
"column-major order.",
|
||||
sep = ""
|
||||
))
|
||||
grad <- matrix(grad, nrow = n_samples)
|
||||
hess <- matrix(hess, nrow = n_samples)
|
||||
}
|
||||
|
||||
.Call(
|
||||
XGBoosterTrainOneIter_R, handle, dtrain, iter, grad, hess
|
||||
)
|
||||
}
|
||||
return(TRUE)
|
||||
}
|
||||
@@ -163,23 +230,22 @@ xgb.iter.update <- function(booster_handle, dtrain, iter, obj = NULL) {
|
||||
# Evaluate one iteration.
|
||||
# Returns a named vector of evaluation metrics
|
||||
# with the names in a 'datasetname-metricname' format.
|
||||
xgb.iter.eval <- function(booster_handle, watchlist, iter, feval = NULL) {
|
||||
if (!identical(class(booster_handle), "xgb.Booster.handle"))
|
||||
stop("class of booster_handle must be xgb.Booster.handle")
|
||||
xgb.iter.eval <- function(bst, evals, iter, feval) {
|
||||
handle <- xgb.get.handle(bst)
|
||||
|
||||
if (length(watchlist) == 0)
|
||||
if (length(evals) == 0)
|
||||
return(NULL)
|
||||
|
||||
evnames <- names(watchlist)
|
||||
evnames <- names(evals)
|
||||
if (is.null(feval)) {
|
||||
msg <- .Call(XGBoosterEvalOneIter_R, booster_handle, as.integer(iter), watchlist, as.list(evnames))
|
||||
msg <- .Call(XGBoosterEvalOneIter_R, handle, as.integer(iter), evals, as.list(evnames))
|
||||
mat <- matrix(strsplit(msg, '\\s+|:')[[1]][-1], nrow = 2)
|
||||
res <- structure(as.numeric(mat[2, ]), names = mat[1, ])
|
||||
} else {
|
||||
res <- sapply(seq_along(watchlist), function(j) {
|
||||
w <- watchlist[[j]]
|
||||
res <- sapply(seq_along(evals), function(j) {
|
||||
w <- evals[[j]]
|
||||
## predict using all trees
|
||||
preds <- predict(booster_handle, w, outputmargin = TRUE, iterationrange = c(1, 1))
|
||||
preds <- predict(bst, w, outputmargin = TRUE, iterationrange = "all")
|
||||
eval_res <- feval(preds, w)
|
||||
out <- eval_res$value
|
||||
names(out) <- paste0(evnames[j], "-", eval_res$metric)
|
||||
@@ -206,35 +272,45 @@ convert.labels <- function(labels, objective_name) {
|
||||
}
|
||||
|
||||
# Generates random (stratified if needed) CV folds
|
||||
generate.cv.folds <- function(nfold, nrows, stratified, label, params) {
|
||||
generate.cv.folds <- function(nfold, nrows, stratified, label, group, params) {
|
||||
if (NROW(group)) {
|
||||
if (stratified) {
|
||||
warning(
|
||||
paste0(
|
||||
"Stratified splitting is not supported when using 'group' attribute.",
|
||||
" Will use unstratified splitting."
|
||||
)
|
||||
)
|
||||
}
|
||||
return(generate.group.folds(nfold, group))
|
||||
}
|
||||
objective <- params$objective
|
||||
if (!is.character(objective)) {
|
||||
warning("Will use unstratified splitting (custom objective used)")
|
||||
stratified <- FALSE
|
||||
}
|
||||
# cannot stratify if label is NULL
|
||||
if (stratified && is.null(label)) {
|
||||
warning("Will use unstratified splitting (no 'labels' available)")
|
||||
stratified <- FALSE
|
||||
}
|
||||
|
||||
# cannot do it for rank
|
||||
objective <- params$objective
|
||||
if (is.character(objective) && strtrim(objective, 5) == 'rank:') {
|
||||
stop("\n\tAutomatic generation of CV-folds is not implemented for ranking!\n",
|
||||
stop("\n\tAutomatic generation of CV-folds is not implemented for ranking without 'group' field!\n",
|
||||
"\tConsider providing pre-computed CV-folds through the 'folds=' parameter.\n")
|
||||
}
|
||||
# shuffle
|
||||
rnd_idx <- sample.int(nrows)
|
||||
if (stratified &&
|
||||
length(label) == length(rnd_idx)) {
|
||||
if (stratified && length(label) == length(rnd_idx)) {
|
||||
y <- label[rnd_idx]
|
||||
# WARNING: some heuristic logic is employed to identify classification setting!
|
||||
# - For classification, need to convert y labels to factor before making the folds,
|
||||
# and then do stratification by factor levels.
|
||||
# - For regression, leave y numeric and do stratification by quantiles.
|
||||
if (is.character(objective)) {
|
||||
y <- convert.labels(y, params$objective)
|
||||
} else {
|
||||
# If no 'objective' given in params, it means that user either wants to
|
||||
# use the default 'reg:squarederror' objective or has provided a custom
|
||||
# obj function. Here, assume classification setting when y has 5 or less
|
||||
# unique values:
|
||||
if (length(unique(y)) <= 5) {
|
||||
y <- factor(y)
|
||||
}
|
||||
y <- convert.labels(y, objective)
|
||||
}
|
||||
folds <- xgb.createFolds(y, nfold)
|
||||
folds <- xgb.createFolds(y = y, k = nfold)
|
||||
} else {
|
||||
# make simple non-stratified folds
|
||||
kstep <- length(rnd_idx) %/% nfold
|
||||
@@ -248,11 +324,33 @@ generate.cv.folds <- function(nfold, nrows, stratified, label, params) {
|
||||
return(folds)
|
||||
}
|
||||
|
||||
generate.group.folds <- function(nfold, group) {
|
||||
ngroups <- length(group) - 1
|
||||
if (ngroups < nfold) {
|
||||
stop("DMatrix has fewer groups than folds.")
|
||||
}
|
||||
seq_groups <- seq_len(ngroups)
|
||||
indices <- lapply(seq_groups, function(gr) seq(group[gr] + 1, group[gr + 1]))
|
||||
assignments <- base::split(seq_groups, as.integer(seq_groups %% nfold))
|
||||
assignments <- unname(assignments)
|
||||
|
||||
out <- vector("list", nfold)
|
||||
randomized_groups <- sample(ngroups)
|
||||
for (idx in seq_len(nfold)) {
|
||||
groups_idx_test <- randomized_groups[assignments[[idx]]]
|
||||
groups_test <- indices[groups_idx_test]
|
||||
idx_test <- unlist(groups_test)
|
||||
attributes(idx_test)$group_test <- lengths(groups_test)
|
||||
attributes(idx_test)$group_train <- lengths(indices[-groups_idx_test])
|
||||
out[[idx]] <- idx_test
|
||||
}
|
||||
return(out)
|
||||
}
|
||||
|
||||
# Creates CV folds stratified by the values of y.
|
||||
# It was borrowed from caret::createFolds and simplified
|
||||
# by always returning an unnamed list of fold indices.
|
||||
xgb.createFolds <- function(y, k = 10)
|
||||
{
|
||||
xgb.createFolds <- function(y, k) {
|
||||
if (is.numeric(y)) {
|
||||
## Group the numeric data based on their magnitudes
|
||||
## and sample within those groups.
|
||||
@@ -312,7 +410,7 @@ xgb.createFolds <- function(y, k = 10)
|
||||
#' At this time, some of the parameter names were changed in order to make the code style more uniform.
|
||||
#' The deprecated parameters would be removed in the next release.
|
||||
#'
|
||||
#' To see all the current deprecated and new parameters, check the \code{xgboost:::depr_par_lut} table.
|
||||
#' To see all the current deprecated and new parameters, check the `xgboost:::depr_par_lut` table.
|
||||
#'
|
||||
#' A deprecation warning is shown when any of the deprecated parameters is used in a call.
|
||||
#' An additional warning is shown when there was a partial match to a deprecated parameter
|
||||
@@ -321,48 +419,100 @@ xgb.createFolds <- function(y, k = 10)
|
||||
#' @name xgboost-deprecated
|
||||
NULL
|
||||
|
||||
#' Do not use \code{\link[base]{saveRDS}} or \code{\link[base]{save}} for long-term archival of
|
||||
#' models. Instead, use \code{\link{xgb.save}} or \code{\link{xgb.save.raw}}.
|
||||
#' Model Serialization and Compatibility
|
||||
#'
|
||||
#' It is a common practice to use the built-in \code{\link[base]{saveRDS}} function (or
|
||||
#' \code{\link[base]{save}}) to persist R objects to the disk. While it is possible to persist
|
||||
#' \code{xgb.Booster} objects using \code{\link[base]{saveRDS}}, it is not advisable to do so if
|
||||
#' the model is to be accessed in the future. If you train a model with the current version of
|
||||
#' XGBoost and persist it with \code{\link[base]{saveRDS}}, the model is not guaranteed to be
|
||||
#' accessible in later releases of XGBoost. To ensure that your model can be accessed in future
|
||||
#' releases of XGBoost, use \code{\link{xgb.save}} or \code{\link{xgb.save.raw}} instead.
|
||||
#' @description
|
||||
#' When it comes to serializing XGBoost models, it's possible to use R serializers such as
|
||||
#' [save()] or [saveRDS()] to serialize an XGBoost R model, but XGBoost also provides
|
||||
#' its own serializers with better compatibility guarantees, which allow loading
|
||||
#' said models in other language bindings of XGBoost.
|
||||
#'
|
||||
#' Note that an `xgb.Booster` object (**as produced by [xgb.train()]**, see rest of the doc
|
||||
#' for objects produced by [xgboost()]), outside of its core components, might also keep:
|
||||
#' - Additional model configuration (accessible through [xgb.config()]), which includes
|
||||
#' model fitting parameters like `max_depth` and runtime parameters like `nthread`.
|
||||
#' These are not necessarily useful for prediction/importance/plotting.
|
||||
#' - Additional R specific attributes - e.g. results of callbacks, such as evaluation logs,
|
||||
#' which are kept as a `data.table` object, accessible through
|
||||
#' `attributes(model)$evaluation_log` if present.
|
||||
#'
|
||||
#' The first one (configurations) does not have the same compatibility guarantees as
|
||||
#' the model itself, including attributes that are set and accessed through
|
||||
#' [xgb.attributes()] - that is, such configuration might be lost after loading the
|
||||
#' booster in a different XGBoost version, regardless of the serializer that was used.
|
||||
#' These are saved when using [saveRDS()], but will be discarded if loaded into an
|
||||
#' incompatible XGBoost version. They are not saved when using XGBoost's
|
||||
#' serializers from its public interface including [xgb.save()] and [xgb.save.raw()].
|
||||
#'
|
||||
#' The second ones (R attributes) are not part of the standard XGBoost model structure,
|
||||
#' and thus are not saved when using XGBoost's own serializers. These attributes are
|
||||
#' only used for informational purposes, such as keeping track of evaluation metrics as
|
||||
#' the model was fit, or saving the R call that produced the model, but are otherwise
|
||||
#' not used for prediction / importance / plotting / etc.
|
||||
#' These R attributes are only preserved when using R's serializers.
|
||||
#'
|
||||
#' In addition to the regular `xgb.Booster` objects producted by [xgb.train()], the
|
||||
#' function [xgboost()] produces a different subclass `xgboost`, which keeps other
|
||||
#' additional metadata as R attributes such as class names in classification problems,
|
||||
#' and which has a dedicated `predict` method that uses different defaults. XGBoost's
|
||||
#' own serializers can work with this `xgboost` class, but as they do not keep R
|
||||
#' attributes, the resulting object, when deserialized, is downcasted to the regular
|
||||
#' `xgb.Booster` class (i.e. it loses the metadata, and the resulting object will use
|
||||
#' `predict.xgb.Booster` instead of `predict.xgboost`) - for these `xgboost` objects,
|
||||
#' `saveRDS` might thus be a better option if the extra functionalities are needed.
|
||||
#'
|
||||
#' Note that XGBoost models in R starting from version `2.1.0` and onwards, and
|
||||
#' XGBoost models before version `2.1.0`; have a very different R object structure and
|
||||
#' are incompatible with each other. Hence, models that were saved with R serializers
|
||||
#' like [saveRDS()] or [save()] before version `2.1.0` will not work with latter
|
||||
#' `xgboost` versions and vice versa. Be aware that the structure of R model objects
|
||||
#' could in theory change again in the future, so XGBoost's serializers
|
||||
#' should be preferred for long-term storage.
|
||||
#'
|
||||
#' Furthermore, note that using the package `qs` for serialization will require
|
||||
#' version 0.26 or higher of said package, and will have the same compatibility
|
||||
#' restrictions as R serializers.
|
||||
#'
|
||||
#' @details
|
||||
#' Use \code{\link{xgb.save}} to save the XGBoost model as a stand-alone file. You may opt into
|
||||
#' Use [xgb.save()] to save the XGBoost model as a stand-alone file. You may opt into
|
||||
#' the JSON format by specifying the JSON extension. To read the model back, use
|
||||
#' \code{\link{xgb.load}}.
|
||||
#' [xgb.load()].
|
||||
#'
|
||||
#' Use \code{\link{xgb.save.raw}} to save the XGBoost model as a sequence (vector) of raw bytes
|
||||
#' Use [xgb.save.raw()] to save the XGBoost model as a sequence (vector) of raw bytes
|
||||
#' in a future-proof manner. Future releases of XGBoost will be able to read the raw bytes and
|
||||
#' re-construct the corresponding model. To read the model back, use \code{\link{xgb.load.raw}}.
|
||||
#' The \code{\link{xgb.save.raw}} function is useful if you'd like to persist the XGBoost model
|
||||
#' re-construct the corresponding model. To read the model back, use [xgb.load.raw()].
|
||||
#' The [xgb.save.raw()] function is useful if you would like to persist the XGBoost model
|
||||
#' as part of another R object.
|
||||
#'
|
||||
#' Note: Do not use \code{\link{xgb.serialize}} to store models long-term. It persists not only the
|
||||
#' model but also internal configurations and parameters, and its format is not stable across
|
||||
#' multiple XGBoost versions. Use \code{\link{xgb.serialize}} only for checkpointing.
|
||||
#' Use [saveRDS()] if you require the R-specific attributes that a booster might have, such
|
||||
#' as evaluation logs or the model class `xgboost` instead of `xgb.Booster`, but note that
|
||||
#' future compatibility of such objects is outside XGBoost's control as it relies on R's
|
||||
#' serialization format (see e.g. the details section in [serialize] and [save()] from base R).
|
||||
#'
|
||||
#' For more details and explanation about model persistence and archival, consult the page
|
||||
#' \url{https://xgboost.readthedocs.io/en/latest/tutorials/saving_model.html}.
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
|
||||
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
#' data(agaricus.train, package = "xgboost")
|
||||
#'
|
||||
#' bst <- xgb.train(
|
||||
#' data = xgb.DMatrix(agaricus.train$data, label = agaricus.train$label),
|
||||
#' max_depth = 2,
|
||||
#' eta = 1,
|
||||
#' nthread = 2,
|
||||
#' nrounds = 2,
|
||||
#' objective = "binary:logistic"
|
||||
#' )
|
||||
#'
|
||||
#' # Save as a stand-alone file; load it with xgb.load()
|
||||
#' xgb.save(bst, 'xgb.model')
|
||||
#' bst2 <- xgb.load('xgb.model')
|
||||
#' fname <- file.path(tempdir(), "xgb_model.ubj")
|
||||
#' xgb.save(bst, fname)
|
||||
#' bst2 <- xgb.load(fname)
|
||||
#'
|
||||
#' # Save as a stand-alone file (JSON); load it with xgb.load()
|
||||
#' xgb.save(bst, 'xgb.model.json')
|
||||
#' bst2 <- xgb.load('xgb.model.json')
|
||||
#' if (file.exists('xgb.model.json')) file.remove('xgb.model.json')
|
||||
#' fname <- file.path(tempdir(), "xgb_model.json")
|
||||
#' xgb.save(bst, fname)
|
||||
#' bst2 <- xgb.load(fname)
|
||||
#'
|
||||
#' # Save as a raw byte vector; load it with xgb.load.raw()
|
||||
#' xgb_bytes <- xgb.save.raw(bst)
|
||||
@@ -373,12 +523,12 @@ NULL
|
||||
#' # Persist the R object. Here, saveRDS() is okay, since it doesn't persist
|
||||
#' # xgb.Booster directly. What's being persisted is the future-proof byte representation
|
||||
#' # as given by xgb.save.raw().
|
||||
#' saveRDS(obj, 'my_object.rds')
|
||||
#' fname <- file.path(tempdir(), "my_object.Rds")
|
||||
#' saveRDS(obj, fname)
|
||||
#' # Read back the R object
|
||||
#' obj2 <- readRDS('my_object.rds')
|
||||
#' obj2 <- readRDS(fname)
|
||||
#' # Re-construct xgb.Booster object from the bytes
|
||||
#' bst2 <- xgb.load.raw(obj2$xgb_model_bytes)
|
||||
#' if (file.exists('my_object.rds')) file.remove('my_object.rds')
|
||||
#'
|
||||
#' @name a-compatibility-note-for-saveRDS-save
|
||||
NULL
|
||||
@@ -394,7 +544,8 @@ depr_par_lut <- matrix(c(
|
||||
'plot.height', 'plot_height',
|
||||
'plot.width', 'plot_width',
|
||||
'n_first_tree', 'trees',
|
||||
'dummy', 'DUMMY'
|
||||
'dummy', 'DUMMY',
|
||||
'watchlist', 'evals'
|
||||
), ncol = 2, byrow = TRUE)
|
||||
colnames(depr_par_lut) <- c('old', 'new')
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -2,15 +2,17 @@
|
||||
#'
|
||||
#' Save xgb.DMatrix object to binary file
|
||||
#'
|
||||
#' @param dmatrix the \code{xgb.DMatrix} object
|
||||
#' @param dmatrix the `xgb.DMatrix` object
|
||||
#' @param fname the name of the file to write.
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label))
|
||||
#' xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data')
|
||||
#' dtrain <- xgb.DMatrix('xgb.DMatrix.data')
|
||||
#' if (file.exists('xgb.DMatrix.data')) file.remove('xgb.DMatrix.data')
|
||||
#' \dontshow{RhpcBLASctl::omp_set_num_threads(1)}
|
||||
#' data(agaricus.train, package = "xgboost")
|
||||
#'
|
||||
#' dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label, nthread = 2))
|
||||
#' fname <- file.path(tempdir(), "xgb.DMatrix.data")
|
||||
#' xgb.DMatrix.save(dtrain, fname)
|
||||
#' dtrain <- xgb.DMatrix(fname)
|
||||
#' @export
|
||||
xgb.DMatrix.save <- function(dmatrix, fname) {
|
||||
if (typeof(fname) != "character")
|
||||
|
||||
@@ -1,17 +1,26 @@
|
||||
#' Set and get global configuration
|
||||
#'
|
||||
#' Global configuration consists of a collection of parameters that can be applied in the global
|
||||
#' scope. See \url{https://xgboost.readthedocs.io/en/stable/parameter.html} for the full list of
|
||||
#' parameters supported in the global configuration. Use \code{xgb.set.config} to update the
|
||||
#' values of one or more global-scope parameters. Use \code{xgb.get.config} to fetch the current
|
||||
#' parameters supported in the global configuration. Use `xgb.set.config()` to update the
|
||||
#' values of one or more global-scope parameters. Use `xgb.get.config()` to fetch the current
|
||||
#' values of all global-scope parameters (listed in
|
||||
#' \url{https://xgboost.readthedocs.io/en/stable/parameter.html}).
|
||||
#'
|
||||
#' @details
|
||||
#' Note that serialization-related functions might use a globally-configured number of threads,
|
||||
#' which is managed by the system's OpenMP (OMP) configuration instead. Typically, XGBoost methods
|
||||
#' accept an `nthreads` parameter, but some methods like [readRDS()] might get executed before such
|
||||
#' parameter can be supplied.
|
||||
#'
|
||||
#' The number of OMP threads can in turn be configured for example through an environment variable
|
||||
#' `OMP_NUM_THREADS` (needs to be set before R is started), or through `RhpcBLASctl::omp_set_num_threads`.
|
||||
#' @rdname xgbConfig
|
||||
#' @title Set and get global configuration
|
||||
#' @name xgb.set.config, xgb.get.config
|
||||
#' @export xgb.set.config xgb.get.config
|
||||
#' @param ... List of parameters to be set, as keyword arguments
|
||||
#' @return
|
||||
#' \code{xgb.set.config} returns \code{TRUE} to signal success. \code{xgb.get.config} returns
|
||||
#' `xgb.set.config()` returns `TRUE` to signal success. `xgb.get.config()` returns
|
||||
#' a list containing all global-scope parameters and their values.
|
||||
#'
|
||||
#' @examples
|
||||
|
||||
@@ -1,20 +1,15 @@
|
||||
#' Create new features from a previously learned model
|
||||
#'
|
||||
#' May improve the learning by adding new features to the training data based on the decision trees from a previously learned model.
|
||||
#'
|
||||
#' @param model decision tree boosting model learned on the original data
|
||||
#' @param data original data (usually provided as a \code{dgCMatrix} matrix)
|
||||
#' @param ... currently not used
|
||||
#'
|
||||
#' @return \code{dgCMatrix} matrix including both the original data and the new features.
|
||||
#' May improve the learning by adding new features to the training data based on the
|
||||
#' decision trees from a previously learned model.
|
||||
#'
|
||||
#' @details
|
||||
#' This is the function inspired from the paragraph 3.1 of the paper:
|
||||
#'
|
||||
#' \strong{Practical Lessons from Predicting Clicks on Ads at Facebook}
|
||||
#' **Practical Lessons from Predicting Clicks on Ads at Facebook**
|
||||
#'
|
||||
#' \emph{(Xinran He, Junfeng Pan, Ou Jin, Tianbing Xu, Bo Liu, Tao Xu, Yan, xin Shi, Antoine Atallah, Ralf Herbrich, Stuart Bowers,
|
||||
#' Joaquin Quinonero Candela)}
|
||||
#' *(Xinran He, Junfeng Pan, Ou Jin, Tianbing Xu, Bo Liu, Tao Xu, Yan, xin Shi, Antoine Atallah, Ralf Herbrich, Stuart Bowers,
|
||||
#' Joaquin Quinonero Candela)*
|
||||
#'
|
||||
#' International Workshop on Data Mining for Online Advertising (ADKDD) - August 24, 2014
|
||||
#'
|
||||
@@ -33,11 +28,11 @@
|
||||
#' where the first subtree has 3 leafs and the second 2 leafs. If an
|
||||
#' instance ends up in leaf 2 in the first subtree and leaf 1 in
|
||||
#' second subtree, the overall input to the linear classifier will
|
||||
#' be the binary vector \code{[0, 1, 0, 1, 0]}, where the first 3 entries
|
||||
#' be the binary vector `[0, 1, 0, 1, 0]`, where the first 3 entries
|
||||
#' correspond to the leaves of the first subtree and last 2 to
|
||||
#' those of the second subtree.
|
||||
#'
|
||||
#' [...]
|
||||
#' ...
|
||||
#'
|
||||
#' We can understand boosted decision tree
|
||||
#' based transformation as a supervised feature encoding that
|
||||
@@ -45,16 +40,23 @@
|
||||
#' vector. A traversal from root node to a leaf node represents
|
||||
#' a rule on certain features."
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' data(agaricus.test, package='xgboost')
|
||||
#' dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label))
|
||||
#' dtest <- with(agaricus.test, xgb.DMatrix(data, label = label))
|
||||
#' @param model Decision tree boosting model learned on the original data.
|
||||
#' @param data Original data (usually provided as a `dgCMatrix` matrix).
|
||||
#' @param ... Currently not used.
|
||||
#'
|
||||
#' param <- list(max_depth=2, eta=1, silent=1, objective='binary:logistic')
|
||||
#' @return A `dgCMatrix` matrix including both the original data and the new features.
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package = "xgboost")
|
||||
#' data(agaricus.test, package = "xgboost")
|
||||
#'
|
||||
#' dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label, nthread = 2))
|
||||
#' dtest <- with(agaricus.test, xgb.DMatrix(data, label = label, nthread = 2))
|
||||
#'
|
||||
#' param <- list(max_depth = 2, eta = 1, objective = 'binary:logistic')
|
||||
#' nrounds = 4
|
||||
#'
|
||||
#' bst = xgb.train(params = param, data = dtrain, nrounds = nrounds, nthread = 2)
|
||||
#' bst <- xgb.train(params = param, data = dtrain, nrounds = nrounds, nthread = 2)
|
||||
#'
|
||||
#' # Model accuracy without new features
|
||||
#' accuracy.before <- sum((predict(bst, agaricus.test$data) >= 0.5) == agaricus.test$label) /
|
||||
@@ -65,9 +67,12 @@
|
||||
#' new.features.test <- xgb.create.features(model = bst, agaricus.test$data)
|
||||
#'
|
||||
#' # learning with new features
|
||||
#' new.dtrain <- xgb.DMatrix(data = new.features.train, label = agaricus.train$label)
|
||||
#' new.dtest <- xgb.DMatrix(data = new.features.test, label = agaricus.test$label)
|
||||
#' watchlist <- list(train = new.dtrain)
|
||||
#' new.dtrain <- xgb.DMatrix(
|
||||
#' data = new.features.train, label = agaricus.train$label, nthread = 2
|
||||
#' )
|
||||
#' new.dtest <- xgb.DMatrix(
|
||||
#' data = new.features.test, label = agaricus.test$label, nthread = 2
|
||||
#' )
|
||||
#' bst <- xgb.train(params = param, data = new.dtrain, nrounds = nrounds, nthread = 2)
|
||||
#'
|
||||
#' # Model accuracy with new features
|
||||
@@ -79,7 +84,7 @@
|
||||
#' accuracy.after, "!\n"))
|
||||
#'
|
||||
#' @export
|
||||
xgb.create.features <- function(model, data, ...){
|
||||
xgb.create.features <- function(model, data, ...) {
|
||||
check.deprecation(...)
|
||||
pred_with_leaf <- predict(model, data, predleaf = TRUE)
|
||||
cols <- lapply(as.data.frame(pred_with_leaf), factor)
|
||||
|
||||
@@ -1,129 +1,155 @@
|
||||
#' Cross Validation
|
||||
#'
|
||||
#' The cross validation function of xgboost
|
||||
#' The cross validation function of xgboost.
|
||||
#'
|
||||
#' @param params the list of parameters. The complete list of parameters is
|
||||
#' available in the \href{http://xgboost.readthedocs.io/en/latest/parameter.html}{online documentation}. Below
|
||||
#' is a shorter summary:
|
||||
#' \itemize{
|
||||
#' \item \code{objective} objective function, common ones are
|
||||
#' \itemize{
|
||||
#' \item \code{reg:squarederror} Regression with squared loss.
|
||||
#' \item \code{binary:logistic} logistic regression for classification.
|
||||
#' \item See \code{\link[=xgb.train]{xgb.train}()} for complete list of objectives.
|
||||
#' }
|
||||
#' \item \code{eta} step size of each boosting step
|
||||
#' \item \code{max_depth} maximum depth of the tree
|
||||
#' \item \code{nthread} number of thread used in training, if not set, all threads are used
|
||||
#' }
|
||||
#' @param params The list of parameters. The complete list of parameters is available in the
|
||||
#' [online documentation](http://xgboost.readthedocs.io/en/latest/parameter.html).
|
||||
#' Below is a shorter summary:
|
||||
#' - `objective`: Objective function, common ones are
|
||||
#' - `reg:squarederror`: Regression with squared loss.
|
||||
#' - `binary:logistic`: Logistic regression for classification.
|
||||
#'
|
||||
#' See \code{\link{xgb.train}} for further details.
|
||||
#' See also demo/ for walkthrough example in R.
|
||||
#' @param data takes an \code{xgb.DMatrix}, \code{matrix}, or \code{dgCMatrix} as the input.
|
||||
#' @param nrounds the max number of iterations
|
||||
#' @param nfold the original dataset is randomly partitioned into \code{nfold} equal size subsamples.
|
||||
#' @param label vector of response values. Should be provided only when data is an R-matrix.
|
||||
#' @param missing is only used when input is a dense matrix. By default is set to NA, which means
|
||||
#' that NA values should be considered as 'missing' by the algorithm.
|
||||
#' Sometimes, 0 or other extreme value might be used to represent missing values.
|
||||
#' See [xgb.train()] for complete list of objectives.
|
||||
#' - `eta`: Step size of each boosting step
|
||||
#' - `max_depth`: Maximum depth of the tree
|
||||
#' - `nthread`: Number of threads used in training. If not set, all threads are used
|
||||
#'
|
||||
#' See [xgb.train()] for further details.
|
||||
#' See also demo for walkthrough example in R.
|
||||
#'
|
||||
#' Note that, while `params` accepts a `seed` entry and will use such parameter for model training if
|
||||
#' supplied, this seed is not used for creation of train-test splits, which instead rely on R's own RNG
|
||||
#' system - thus, for reproducible results, one needs to call the [set.seed()] function beforehand.
|
||||
#' @param data An `xgb.DMatrix` object, with corresponding fields like `label` or bounds as required
|
||||
#' for model training by the objective.
|
||||
#'
|
||||
#' Note that only the basic `xgb.DMatrix` class is supported - variants such as `xgb.QuantileDMatrix`
|
||||
#' or `xgb.ExtMemDMatrix` are not supported here.
|
||||
#' @param nrounds The max number of iterations.
|
||||
#' @param nfold The original dataset is randomly partitioned into `nfold` equal size subsamples.
|
||||
#' @param prediction A logical value indicating whether to return the test fold predictions
|
||||
#' from each CV model. This parameter engages the \code{\link{cb.cv.predict}} callback.
|
||||
#' @param showsd \code{boolean}, whether to show standard deviation of cross validation
|
||||
#' @param metrics, list of evaluation metrics to be used in cross validation,
|
||||
#' from each CV model. This parameter engages the [xgb.cb.cv.predict()] callback.
|
||||
#' @param showsd Logical value whether to show standard deviation of cross validation.
|
||||
#' @param metrics List of evaluation metrics to be used in cross validation,
|
||||
#' when it is not specified, the evaluation metric is chosen according to objective function.
|
||||
#' Possible options are:
|
||||
#' \itemize{
|
||||
#' \item \code{error} binary classification error rate
|
||||
#' \item \code{rmse} Rooted mean square error
|
||||
#' \item \code{logloss} negative log-likelihood function
|
||||
#' \item \code{mae} Mean absolute error
|
||||
#' \item \code{mape} Mean absolute percentage error
|
||||
#' \item \code{auc} Area under curve
|
||||
#' \item \code{aucpr} Area under PR curve
|
||||
#' \item \code{merror} Exact matching error, used to evaluate multi-class classification
|
||||
#' }
|
||||
#' @param obj customized objective function. Returns gradient and second order
|
||||
#' gradient with given prediction and dtrain.
|
||||
#' @param feval customized evaluation function. Returns
|
||||
#' \code{list(metric='metric-name', value='metric-value')} with given
|
||||
#' prediction and dtrain.
|
||||
#' @param stratified a \code{boolean} indicating whether sampling of folds should be stratified
|
||||
#' by the values of outcome labels.
|
||||
#' @param folds \code{list} provides a possibility to use a list of pre-defined CV folds
|
||||
#' (each element must be a vector of test fold's indices). When folds are supplied,
|
||||
#' the \code{nfold} and \code{stratified} parameters are ignored.
|
||||
#' @param train_folds \code{list} list specifying which indicies to use for training. If \code{NULL}
|
||||
#' (the default) all indices not specified in \code{folds} will be used for training.
|
||||
#' @param verbose \code{boolean}, print the statistics during the process
|
||||
#' @param print_every_n Print each n-th iteration evaluation messages when \code{verbose>0}.
|
||||
#' Default is 1 which means all messages are printed. This parameter is passed to the
|
||||
#' \code{\link{cb.print.evaluation}} callback.
|
||||
#' @param early_stopping_rounds If \code{NULL}, the early stopping function is not triggered.
|
||||
#' If set to an integer \code{k}, training with a validation set will stop if the performance
|
||||
#' doesn't improve for \code{k} rounds.
|
||||
#' Setting this parameter engages the \code{\link{cb.early.stop}} callback.
|
||||
#' @param maximize If \code{feval} and \code{early_stopping_rounds} are set,
|
||||
#' then this parameter must be set as well.
|
||||
#' When it is \code{TRUE}, it means the larger the evaluation score the better.
|
||||
#' This parameter is passed to the \code{\link{cb.early.stop}} callback.
|
||||
#' @param callbacks a list of callback functions to perform various task during boosting.
|
||||
#' See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the
|
||||
#' parameters' values. User can provide either existing or their own callback methods in order
|
||||
#' to customize the training process.
|
||||
#' @param ... other parameters to pass to \code{params}.
|
||||
#' - `error`: Binary classification error rate
|
||||
#' - `rmse`: Root mean square error
|
||||
#' - `logloss`: Negative log-likelihood function
|
||||
#' - `mae`: Mean absolute error
|
||||
#' - `mape`: Mean absolute percentage error
|
||||
#' - `auc`: Area under curve
|
||||
#' - `aucpr`: Area under PR curve
|
||||
#' - `merror`: Exact matching error used to evaluate multi-class classification
|
||||
#' @param obj Customized objective function. Returns gradient and second order
|
||||
#' gradient with given prediction and dtrain.
|
||||
#' @param feval Customized evaluation function. Returns
|
||||
#' `list(metric='metric-name', value='metric-value')` with given prediction and dtrain.
|
||||
#' @param stratified Logical flag indicating whether sampling of folds should be stratified
|
||||
#' by the values of outcome labels. For real-valued labels in regression objectives,
|
||||
#' stratification will be done by discretizing the labels into up to 5 buckets beforehand.
|
||||
#'
|
||||
#' If passing "auto", will be set to `TRUE` if the objective in `params` is a classification
|
||||
#' objective (from XGBoost's built-in objectives, doesn't apply to custom ones), and to
|
||||
#' `FALSE` otherwise.
|
||||
#'
|
||||
#' This parameter is ignored when `data` has a `group` field - in such case, the splitting
|
||||
#' will be based on whole groups (note that this might make the folds have different sizes).
|
||||
#'
|
||||
#' Value `TRUE` here is **not** supported for custom objectives.
|
||||
#' @param folds List with pre-defined CV folds (each element must be a vector of test fold's indices).
|
||||
#' When folds are supplied, the `nfold` and `stratified` parameters are ignored.
|
||||
#'
|
||||
#' If `data` has a `group` field and the objective requires this field, each fold (list element)
|
||||
#' must additionally have two attributes (retrievable through `attributes`) named `group_test`
|
||||
#' and `group_train`, which should hold the `group` to assign through [setinfo.xgb.DMatrix()] to
|
||||
#' the resulting DMatrices.
|
||||
#' @param train_folds List specifying which indices to use for training. If `NULL`
|
||||
#' (the default) all indices not specified in `folds` will be used for training.
|
||||
#'
|
||||
#' This is not supported when `data` has `group` field.
|
||||
#' @param verbose Logical flag. Should statistics be printed during the process?
|
||||
#' @param print_every_n Print each nth iteration evaluation messages when `verbose > 0`.
|
||||
#' Default is 1 which means all messages are printed. This parameter is passed to the
|
||||
#' [xgb.cb.print.evaluation()] callback.
|
||||
#' @param early_stopping_rounds If `NULL`, the early stopping function is not triggered.
|
||||
#' If set to an integer `k`, training with a validation set will stop if the performance
|
||||
#' doesn't improve for `k` rounds.
|
||||
#' Setting this parameter engages the [xgb.cb.early.stop()] callback.
|
||||
#' @param maximize If `feval` and `early_stopping_rounds` are set,
|
||||
#' then this parameter must be set as well.
|
||||
#' When it is `TRUE`, it means the larger the evaluation score the better.
|
||||
#' This parameter is passed to the [xgb.cb.early.stop()] callback.
|
||||
#' @param callbacks A list of callback functions to perform various task during boosting.
|
||||
#' See [xgb.Callback()]. Some of the callbacks are automatically created depending on the
|
||||
#' parameters' values. User can provide either existing or their own callback methods in order
|
||||
#' to customize the training process.
|
||||
#' @param ... Other parameters to pass to `params`.
|
||||
#'
|
||||
#' @details
|
||||
#' The original sample is randomly partitioned into \code{nfold} equal size subsamples.
|
||||
#' The original sample is randomly partitioned into `nfold` equal size subsamples.
|
||||
#'
|
||||
#' Of the \code{nfold} subsamples, a single subsample is retained as the validation data for testing the model, and the remaining \code{nfold - 1} subsamples are used as training data.
|
||||
#' Of the `nfold` subsamples, a single subsample is retained as the validation data for testing the model,
|
||||
#' and the remaining `nfold - 1` subsamples are used as training data.
|
||||
#'
|
||||
#' The cross-validation process is then repeated \code{nrounds} times, with each of the \code{nfold} subsamples used exactly once as the validation data.
|
||||
#' The cross-validation process is then repeated `nrounds` times, with each of the
|
||||
#' `nfold` subsamples used exactly once as the validation data.
|
||||
#'
|
||||
#' All observations are used for both training and validation.
|
||||
#'
|
||||
#' Adapted from \url{https://en.wikipedia.org/wiki/Cross-validation_\%28statistics\%29}
|
||||
#'
|
||||
#' @return
|
||||
#' An object of class \code{xgb.cv.synchronous} with the following elements:
|
||||
#' \itemize{
|
||||
#' \item \code{call} a function call.
|
||||
#' \item \code{params} parameters that were passed to the xgboost library. Note that it does not
|
||||
#' capture parameters changed by the \code{\link{cb.reset.parameters}} callback.
|
||||
#' \item \code{callbacks} callback functions that were either automatically assigned or
|
||||
#' explicitly passed.
|
||||
#' \item \code{evaluation_log} evaluation history stored as a \code{data.table} with the
|
||||
#' first column corresponding to iteration number and the rest corresponding to the
|
||||
#' CV-based evaluation means and standard deviations for the training and test CV-sets.
|
||||
#' It is created by the \code{\link{cb.evaluation.log}} callback.
|
||||
#' \item \code{niter} number of boosting iterations.
|
||||
#' \item \code{nfeatures} number of features in training data.
|
||||
#' \item \code{folds} the list of CV folds' indices - either those passed through the \code{folds}
|
||||
#' parameter or randomly generated.
|
||||
#' \item \code{best_iteration} iteration number with the best evaluation metric value
|
||||
#' (only available with early stopping).
|
||||
#' \item \code{best_ntreelimit} and the \code{ntreelimit} Deprecated attributes, use \code{best_iteration} instead.
|
||||
#' \item \code{pred} CV prediction values available when \code{prediction} is set.
|
||||
#' It is either vector or matrix (see \code{\link{cb.cv.predict}}).
|
||||
#' \item \code{models} a list of the CV folds' models. It is only available with the explicit
|
||||
#' setting of the \code{cb.cv.predict(save_models = TRUE)} callback.
|
||||
#' }
|
||||
#' An object of class 'xgb.cv.synchronous' with the following elements:
|
||||
#' - `call`: Function call.
|
||||
#' - `params`: Parameters that were passed to the xgboost library. Note that it does not
|
||||
#' capture parameters changed by the [xgb.cb.reset.parameters()] callback.
|
||||
#' - `evaluation_log`: Evaluation history stored as a `data.table` with the
|
||||
#' first column corresponding to iteration number and the rest corresponding to the
|
||||
#' CV-based evaluation means and standard deviations for the training and test CV-sets.
|
||||
#' It is created by the [xgb.cb.evaluation.log()] callback.
|
||||
#' - `niter`: Number of boosting iterations.
|
||||
#' - `nfeatures`: Number of features in training data.
|
||||
#' - `folds`: The list of CV folds' indices - either those passed through the `folds`
|
||||
#' parameter or randomly generated.
|
||||
#' - `best_iteration`: Iteration number with the best evaluation metric value
|
||||
#' (only available with early stopping).
|
||||
#'
|
||||
#' Plus other potential elements that are the result of callbacks, such as a list `cv_predict` with
|
||||
#' a sub-element `pred` when passing `prediction = TRUE`, which is added by the [xgb.cb.cv.predict()]
|
||||
#' callback (note that one can also pass it manually under `callbacks` with different settings,
|
||||
#' such as saving also the models created during cross validation); or a list `early_stop` which
|
||||
#' will contain elements such as `best_iteration` when using the early stopping callback ([xgb.cb.early.stop()]).
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label))
|
||||
#' cv <- xgb.cv(data = dtrain, nrounds = 3, nthread = 2, nfold = 5, metrics = list("rmse","auc"),
|
||||
#' max_depth = 3, eta = 1, objective = "binary:logistic")
|
||||
#' data(agaricus.train, package = "xgboost")
|
||||
#'
|
||||
#' dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label, nthread = 2))
|
||||
#'
|
||||
#' cv <- xgb.cv(
|
||||
#' data = dtrain,
|
||||
#' nrounds = 3,
|
||||
#' nthread = 2,
|
||||
#' nfold = 5,
|
||||
#' metrics = list("rmse","auc"),
|
||||
#' max_depth = 3,
|
||||
#' eta = 1,objective = "binary:logistic"
|
||||
#' )
|
||||
#' print(cv)
|
||||
#' print(cv, verbose=TRUE)
|
||||
#' print(cv, verbose = TRUE)
|
||||
#'
|
||||
#' @export
|
||||
xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing = NA,
|
||||
prediction = FALSE, showsd = TRUE, metrics=list(),
|
||||
obj = NULL, feval = NULL, stratified = TRUE, folds = NULL, train_folds = NULL,
|
||||
verbose = TRUE, print_every_n=1L,
|
||||
xgb.cv <- function(params = list(), data, nrounds, nfold,
|
||||
prediction = FALSE, showsd = TRUE, metrics = list(),
|
||||
obj = NULL, feval = NULL, stratified = "auto", folds = NULL, train_folds = NULL,
|
||||
verbose = TRUE, print_every_n = 1L,
|
||||
early_stopping_rounds = NULL, maximize = NULL, callbacks = list(), ...) {
|
||||
|
||||
check.deprecation(...)
|
||||
stopifnot(inherits(data, "xgb.DMatrix"))
|
||||
if (inherits(data, "xgb.DMatrix") && .Call(XGCheckNullPtr_R, data)) {
|
||||
stop("'data' is an invalid 'xgb.DMatrix' object. Must be constructed again.")
|
||||
}
|
||||
|
||||
params <- check.booster.params(params, ...)
|
||||
# TODO: should we deprecate the redundant 'metrics' parameter?
|
||||
@@ -133,19 +159,22 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
|
||||
check.custom.obj()
|
||||
check.custom.eval()
|
||||
|
||||
#if (is.null(params[['eval_metric']]) && is.null(feval))
|
||||
# stop("Either 'eval_metric' or 'feval' must be provided for CV")
|
||||
if (stratified == "auto") {
|
||||
if (is.character(params$objective)) {
|
||||
stratified <- (
|
||||
(params$objective %in% .CLASSIFICATION_OBJECTIVES())
|
||||
&& !(params$objective %in% .RANKING_OBJECTIVES())
|
||||
)
|
||||
} else {
|
||||
stratified <- FALSE
|
||||
}
|
||||
}
|
||||
|
||||
# Check the labels
|
||||
if ((inherits(data, 'xgb.DMatrix') && is.null(getinfo(data, 'label'))) ||
|
||||
(!inherits(data, 'xgb.DMatrix') && is.null(label))) {
|
||||
stop("Labels must be provided for CV either through xgb.DMatrix, or through 'label=' when 'data' is matrix")
|
||||
} else if (inherits(data, 'xgb.DMatrix')) {
|
||||
if (!is.null(label))
|
||||
warning("xgb.cv: label will be ignored, since data is of type xgb.DMatrix")
|
||||
cv_label <- getinfo(data, 'label')
|
||||
} else {
|
||||
cv_label <- label
|
||||
# Check the labels and groups
|
||||
cv_label <- getinfo(data, "label")
|
||||
cv_group <- getinfo(data, "group")
|
||||
if (!is.null(train_folds) && NROW(cv_group)) {
|
||||
stop("'train_folds' is not supported for DMatrix object with 'group' field.")
|
||||
}
|
||||
|
||||
# CV folds
|
||||
@@ -156,121 +185,171 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
|
||||
} else {
|
||||
if (nfold <= 1)
|
||||
stop("'nfold' must be > 1")
|
||||
folds <- generate.cv.folds(nfold, nrow(data), stratified, cv_label, params)
|
||||
folds <- generate.cv.folds(nfold, nrow(data), stratified, cv_label, cv_group, params)
|
||||
}
|
||||
|
||||
# Potential TODO: sequential CV
|
||||
#if (strategy == 'sequential')
|
||||
# stop('Sequential CV strategy is not yet implemented')
|
||||
# Callbacks
|
||||
tmp <- .process.callbacks(callbacks, is_cv = TRUE)
|
||||
callbacks <- tmp$callbacks
|
||||
cb_names <- tmp$cb_names
|
||||
rm(tmp)
|
||||
|
||||
# Early stopping callback
|
||||
if (!is.null(early_stopping_rounds) && !("early_stop" %in% cb_names)) {
|
||||
callbacks <- add.callback(
|
||||
callbacks,
|
||||
xgb.cb.early.stop(
|
||||
early_stopping_rounds,
|
||||
maximize = maximize,
|
||||
verbose = verbose
|
||||
),
|
||||
as_first_elt = TRUE
|
||||
)
|
||||
}
|
||||
# verbosity & evaluation printing callback:
|
||||
params <- c(params, list(silent = 1))
|
||||
print_every_n <- max(as.integer(print_every_n), 1L)
|
||||
if (!has.callbacks(callbacks, 'cb.print.evaluation') && verbose) {
|
||||
callbacks <- add.cb(callbacks, cb.print.evaluation(print_every_n, showsd = showsd))
|
||||
if (verbose && !("print_evaluation" %in% cb_names)) {
|
||||
callbacks <- add.callback(callbacks, xgb.cb.print.evaluation(print_every_n, showsd = showsd))
|
||||
}
|
||||
# evaluation log callback: always is on in CV
|
||||
evaluation_log <- list()
|
||||
if (!has.callbacks(callbacks, 'cb.evaluation.log')) {
|
||||
callbacks <- add.cb(callbacks, cb.evaluation.log())
|
||||
}
|
||||
# Early stopping callback
|
||||
stop_condition <- FALSE
|
||||
if (!is.null(early_stopping_rounds) &&
|
||||
!has.callbacks(callbacks, 'cb.early.stop')) {
|
||||
callbacks <- add.cb(callbacks, cb.early.stop(early_stopping_rounds,
|
||||
maximize = maximize, verbose = verbose))
|
||||
if (!("evaluation_log" %in% cb_names)) {
|
||||
callbacks <- add.callback(callbacks, xgb.cb.evaluation.log())
|
||||
}
|
||||
# CV-predictions callback
|
||||
if (prediction &&
|
||||
!has.callbacks(callbacks, 'cb.cv.predict')) {
|
||||
callbacks <- add.cb(callbacks, cb.cv.predict(save_models = FALSE))
|
||||
if (prediction && !("cv_predict" %in% cb_names)) {
|
||||
callbacks <- add.callback(callbacks, xgb.cb.cv.predict(save_models = FALSE))
|
||||
}
|
||||
# Sort the callbacks into categories
|
||||
cb <- categorize.callbacks(callbacks)
|
||||
|
||||
|
||||
# create the booster-folds
|
||||
# train_folds
|
||||
dall <- xgb.get.DMatrix(data, label, missing)
|
||||
dall <- data
|
||||
bst_folds <- lapply(seq_along(folds), function(k) {
|
||||
dtest <- slice(dall, folds[[k]])
|
||||
dtest <- xgb.slice.DMatrix(dall, folds[[k]], allow_groups = TRUE)
|
||||
# code originally contributed by @RolandASc on stackoverflow
|
||||
if (is.null(train_folds))
|
||||
dtrain <- slice(dall, unlist(folds[-k]))
|
||||
dtrain <- xgb.slice.DMatrix(dall, unlist(folds[-k]), allow_groups = TRUE)
|
||||
else
|
||||
dtrain <- slice(dall, train_folds[[k]])
|
||||
handle <- xgb.Booster.handle(params, list(dtrain, dtest))
|
||||
list(dtrain = dtrain, bst = handle, watchlist = list(train = dtrain, test = dtest), index = folds[[k]])
|
||||
dtrain <- xgb.slice.DMatrix(dall, train_folds[[k]], allow_groups = TRUE)
|
||||
if (!is.null(attributes(folds[[k]])$group_test)) {
|
||||
setinfo(dtest, "group", attributes(folds[[k]])$group_test)
|
||||
setinfo(dtrain, "group", attributes(folds[[k]])$group_train)
|
||||
}
|
||||
bst <- xgb.Booster(
|
||||
params = params,
|
||||
cachelist = list(dtrain, dtest),
|
||||
modelfile = NULL
|
||||
)
|
||||
bst <- bst$bst
|
||||
list(dtrain = dtrain, bst = bst, evals = list(train = dtrain, test = dtest), index = folds[[k]])
|
||||
})
|
||||
rm(dall)
|
||||
# a "basket" to collect some results from callbacks
|
||||
basket <- list()
|
||||
|
||||
# extract parameters that can affect the relationship b/w #trees and #iterations
|
||||
num_class <- max(as.numeric(NVL(params[['num_class']], 1)), 1) # nolint
|
||||
num_parallel_tree <- max(as.numeric(NVL(params[['num_parallel_tree']], 1)), 1) # nolint
|
||||
|
||||
# those are fixed for CV (no training continuation)
|
||||
begin_iteration <- 1
|
||||
end_iteration <- nrounds
|
||||
|
||||
.execute.cb.before.training(
|
||||
callbacks,
|
||||
bst_folds,
|
||||
dall,
|
||||
NULL,
|
||||
begin_iteration,
|
||||
end_iteration
|
||||
)
|
||||
|
||||
# synchronous CV boosting: run CV folds' models within each iteration
|
||||
for (iteration in begin_iteration:end_iteration) {
|
||||
|
||||
for (f in cb$pre_iter) f()
|
||||
.execute.cb.before.iter(
|
||||
callbacks,
|
||||
bst_folds,
|
||||
dall,
|
||||
NULL,
|
||||
iteration
|
||||
)
|
||||
|
||||
msg <- lapply(bst_folds, function(fd) {
|
||||
xgb.iter.update(fd$bst, fd$dtrain, iteration - 1, obj)
|
||||
xgb.iter.eval(fd$bst, fd$watchlist, iteration - 1, feval)
|
||||
xgb.iter.update(
|
||||
bst = fd$bst,
|
||||
dtrain = fd$dtrain,
|
||||
iter = iteration - 1,
|
||||
obj = obj
|
||||
)
|
||||
xgb.iter.eval(
|
||||
bst = fd$bst,
|
||||
evals = fd$evals,
|
||||
iter = iteration - 1,
|
||||
feval = feval
|
||||
)
|
||||
})
|
||||
msg <- simplify2array(msg)
|
||||
bst_evaluation <- rowMeans(msg)
|
||||
bst_evaluation_err <- sqrt(rowMeans(msg^2) - bst_evaluation^2) # nolint
|
||||
|
||||
for (f in cb$post_iter) f()
|
||||
should_stop <- .execute.cb.after.iter(
|
||||
callbacks,
|
||||
bst_folds,
|
||||
dall,
|
||||
NULL,
|
||||
iteration,
|
||||
msg
|
||||
)
|
||||
|
||||
if (stop_condition) break
|
||||
if (should_stop) break
|
||||
}
|
||||
for (f in cb$finalize) f(finalize = TRUE)
|
||||
cb_outputs <- .execute.cb.after.training(
|
||||
callbacks,
|
||||
bst_folds,
|
||||
dall,
|
||||
NULL,
|
||||
iteration,
|
||||
msg
|
||||
)
|
||||
|
||||
# the CV result
|
||||
ret <- list(
|
||||
call = match.call(),
|
||||
params = params,
|
||||
callbacks = callbacks,
|
||||
evaluation_log = evaluation_log,
|
||||
niter = end_iteration,
|
||||
nfeatures = ncol(data),
|
||||
niter = iteration,
|
||||
nfeatures = ncol(dall),
|
||||
folds = folds
|
||||
)
|
||||
ret <- c(ret, basket)
|
||||
ret <- c(ret, cb_outputs)
|
||||
|
||||
class(ret) <- 'xgb.cv.synchronous'
|
||||
invisible(ret)
|
||||
return(invisible(ret))
|
||||
}
|
||||
|
||||
|
||||
|
||||
#' Print xgb.cv result
|
||||
#'
|
||||
#' Prints formatted results of \code{xgb.cv}.
|
||||
#' Prints formatted results of [xgb.cv()].
|
||||
#'
|
||||
#' @param x an \code{xgb.cv.synchronous} object
|
||||
#' @param verbose whether to print detailed data
|
||||
#' @param ... passed to \code{data.table.print}
|
||||
#' @param x An `xgb.cv.synchronous` object.
|
||||
#' @param verbose Whether to print detailed data.
|
||||
#' @param ... Passed to `data.table.print()`.
|
||||
#'
|
||||
#' @details
|
||||
#' When not verbose, it would only print the evaluation results,
|
||||
#' including the best iteration (when available).
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' data(agaricus.train, package = "xgboost")
|
||||
#'
|
||||
#' train <- agaricus.train
|
||||
#' cv <- xgb.cv(data = train$data, label = train$label, nfold = 5, max_depth = 2,
|
||||
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
#' cv <- xgb.cv(
|
||||
#' data = xgb.DMatrix(train$data, label = train$label),
|
||||
#' nfold = 5,
|
||||
#' max_depth = 2,
|
||||
#' eta = 1,
|
||||
#' nthread = 2,
|
||||
#' nrounds = 2,
|
||||
#' objective = "binary:logistic"
|
||||
#' )
|
||||
#' print(cv)
|
||||
#' print(cv, verbose=TRUE)
|
||||
#' print(cv, verbose = TRUE)
|
||||
#'
|
||||
#' @rdname print.xgb.cv
|
||||
#' @method print xgb.cv.synchronous
|
||||
@@ -290,23 +369,16 @@ print.xgb.cv.synchronous <- function(x, verbose = FALSE, ...) {
|
||||
paste0('"', unlist(x$params), '"'),
|
||||
sep = ' = ', collapse = ', '), '\n', sep = '')
|
||||
}
|
||||
if (!is.null(x$callbacks) && length(x$callbacks) > 0) {
|
||||
cat('callbacks:\n')
|
||||
lapply(callback.calls(x$callbacks), function(x) {
|
||||
cat(' ')
|
||||
print(x)
|
||||
})
|
||||
}
|
||||
|
||||
for (n in c('niter', 'best_iteration', 'best_ntreelimit')) {
|
||||
if (is.null(x[[n]]))
|
||||
for (n in c('niter', 'best_iteration')) {
|
||||
if (is.null(x$early_stop[[n]]))
|
||||
next
|
||||
cat(n, ': ', x[[n]], '\n', sep = '')
|
||||
cat(n, ': ', x$early_stop[[n]], '\n', sep = '')
|
||||
}
|
||||
|
||||
if (!is.null(x$pred)) {
|
||||
if (!is.null(x$cv_predict$pred)) {
|
||||
cat('pred:\n')
|
||||
str(x$pred)
|
||||
str(x$cv_predict$pred)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -314,9 +386,9 @@ print.xgb.cv.synchronous <- function(x, verbose = FALSE, ...) {
|
||||
cat('evaluation_log:\n')
|
||||
print(x$evaluation_log, row.names = FALSE, ...)
|
||||
|
||||
if (!is.null(x$best_iteration)) {
|
||||
if (!is.null(x$early_stop$best_iteration)) {
|
||||
cat('Best iteration:\n')
|
||||
print(x$evaluation_log[x$best_iteration], row.names = FALSE, ...)
|
||||
print(x$evaluation_log[x$early_stop$best_iteration], row.names = FALSE, ...)
|
||||
}
|
||||
invisible(x)
|
||||
}
|
||||
|
||||
@@ -1,32 +1,44 @@
|
||||
#' Dump an xgboost model in text format.
|
||||
#' Dump an XGBoost model in text format.
|
||||
#'
|
||||
#' Dump an xgboost model in text format.
|
||||
#' Dump an XGBoost model in text format.
|
||||
#'
|
||||
#' @param model the model object.
|
||||
#' @param fname the name of the text file where to save the model text dump.
|
||||
#' If not provided or set to \code{NULL}, the model is returned as a \code{character} vector.
|
||||
#' @param fmap feature map file representing feature types.
|
||||
#' See demo/ for walkthrough example in R, and
|
||||
#' \url{https://github.com/dmlc/xgboost/blob/master/demo/data/featmap.txt}
|
||||
#' for example Format.
|
||||
#' @param with_stats whether to dump some additional statistics about the splits.
|
||||
#' When this option is on, the model dump contains two additional values:
|
||||
#' gain is the approximate loss function gain we get in each split;
|
||||
#' cover is the sum of second order gradient in each node.
|
||||
#' @param dump_format either 'text' or 'json' format could be specified.
|
||||
#' @param ... currently not used
|
||||
#' @param model The model object.
|
||||
#' @param fname The name of the text file where to save the model text dump.
|
||||
#' If not provided or set to `NULL`, the model is returned as a character vector.
|
||||
#' @param fmap Feature map file representing feature types. See demo/ for a walkthrough
|
||||
#' example in R, and \url{https://github.com/dmlc/xgboost/blob/master/demo/data/featmap.txt}
|
||||
#' to see an example of the value.
|
||||
#' @param with_stats Whether to dump some additional statistics about the splits.
|
||||
#' When this option is on, the model dump contains two additional values:
|
||||
#' gain is the approximate loss function gain we get in each split;
|
||||
#' cover is the sum of second order gradient in each node.
|
||||
#' @param dump_format Either 'text', 'json', or 'dot' (graphviz) format could be specified.
|
||||
#'
|
||||
#' Format 'dot' for a single tree can be passed directly to packages that consume this format
|
||||
#' for graph visualization, such as function `DiagrammeR::grViz()`
|
||||
#' @param ... Currently not used
|
||||
#'
|
||||
#' @return
|
||||
#' If fname is not provided or set to \code{NULL} the function will return the model
|
||||
#' as a \code{character} vector. Otherwise it will return \code{TRUE}.
|
||||
#' If fname is not provided or set to `NULL` the function will return the model
|
||||
#' as a character vector. Otherwise it will return `TRUE`.
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' data(agaricus.test, package='xgboost')
|
||||
#' \dontshow{RhpcBLASctl::omp_set_num_threads(1)}
|
||||
#' data(agaricus.train, package = "xgboost")
|
||||
#' data(agaricus.test, package = "xgboost")
|
||||
#'
|
||||
#' train <- agaricus.train
|
||||
#' test <- agaricus.test
|
||||
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
#'
|
||||
#' bst <- xgb.train(
|
||||
#' data = xgb.DMatrix(train$data, label = train$label),
|
||||
#' max_depth = 2,
|
||||
#' eta = 1,
|
||||
#' nthread = 2,
|
||||
#' nrounds = 2,
|
||||
#' objective = "binary:logistic"
|
||||
#' )
|
||||
#'
|
||||
#' # save the model in file 'xgb.model.dump'
|
||||
#' dump_path = file.path(tempdir(), 'model.dump')
|
||||
#' xgb.dump(bst, dump_path, with_stats = TRUE)
|
||||
@@ -35,11 +47,15 @@
|
||||
#' print(xgb.dump(bst, with_stats = TRUE))
|
||||
#'
|
||||
#' # print in JSON format:
|
||||
#' cat(xgb.dump(bst, with_stats = TRUE, dump_format='json'))
|
||||
#' cat(xgb.dump(bst, with_stats = TRUE, dump_format = "json"))
|
||||
#'
|
||||
#' # plot first tree leveraging the 'dot' format
|
||||
#' if (requireNamespace('DiagrammeR', quietly = TRUE)) {
|
||||
#' DiagrammeR::grViz(xgb.dump(bst, dump_format = "dot")[[1L]])
|
||||
#' }
|
||||
#' @export
|
||||
xgb.dump <- function(model, fname = NULL, fmap = "", with_stats=FALSE,
|
||||
dump_format = c("text", "json"), ...) {
|
||||
xgb.dump <- function(model, fname = NULL, fmap = "", with_stats = FALSE,
|
||||
dump_format = c("text", "json", "dot"), ...) {
|
||||
check.deprecation(...)
|
||||
dump_format <- match.arg(dump_format)
|
||||
if (!inherits(model, "xgb.Booster"))
|
||||
@@ -49,9 +65,16 @@ xgb.dump <- function(model, fname = NULL, fmap = "", with_stats=FALSE,
|
||||
if (!(is.null(fmap) || is.character(fmap)))
|
||||
stop("fmap: argument must be a character string (when provided)")
|
||||
|
||||
model <- xgb.Booster.complete(model)
|
||||
model_dump <- .Call(XGBoosterDumpModel_R, model$handle, NVL(fmap, "")[1], as.integer(with_stats),
|
||||
as.character(dump_format))
|
||||
model_dump <- .Call(
|
||||
XGBoosterDumpModel_R,
|
||||
xgb.get.handle(model),
|
||||
NVL(fmap, "")[1],
|
||||
as.integer(with_stats),
|
||||
as.character(dump_format)
|
||||
)
|
||||
if (dump_format == "dot") {
|
||||
return(sapply(model_dump, function(x) gsub("^booster\\[\\d+\\]\\n", "\\1", x)))
|
||||
}
|
||||
|
||||
if (is.null(fname))
|
||||
model_dump <- gsub('\t', '', model_dump, fixed = TRUE)
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
# ggplot backend for the xgboost plotting facilities
|
||||
|
||||
|
||||
#' @rdname xgb.plot.importance
|
||||
#' @export
|
||||
xgb.ggplot.importance <- function(importance_matrix = NULL, top_n = NULL, measure = NULL,
|
||||
rel_to_first = FALSE, n_clusters = c(1:10), ...) {
|
||||
rel_to_first = FALSE, n_clusters = seq_len(10), ...) {
|
||||
|
||||
importance_matrix <- xgb.plot.importance(importance_matrix, top_n = top_n, measure = measure,
|
||||
rel_to_first = rel_to_first, plot = FALSE, ...)
|
||||
@@ -103,6 +102,27 @@ xgb.ggplot.deepness <- function(model = NULL, which = c("2x1", "max.depth", "med
|
||||
#' @export
|
||||
xgb.ggplot.shap.summary <- function(data, shap_contrib = NULL, features = NULL, top_n = 10, model = NULL,
|
||||
trees = NULL, target_class = NULL, approxcontrib = FALSE, subsample = NULL) {
|
||||
if (inherits(data, "xgb.DMatrix")) {
|
||||
stop(
|
||||
"'xgb.ggplot.shap.summary' is not compatible with 'xgb.DMatrix' objects. Try passing a matrix or data.frame."
|
||||
)
|
||||
}
|
||||
cols_categ <- NULL
|
||||
if (!is.null(model)) {
|
||||
ftypes <- getinfo(model, "feature_type")
|
||||
if (NROW(ftypes)) {
|
||||
if (length(ftypes) != ncol(data)) {
|
||||
stop(sprintf("'data' has incorrect number of columns (expected: %d, got: %d).", length(ftypes), ncol(data)))
|
||||
}
|
||||
cols_categ <- colnames(data)[ftypes == "c"]
|
||||
}
|
||||
} else if (inherits(data, "data.frame")) {
|
||||
cols_categ <- names(data)[sapply(data, function(x) is.factor(x) || is.character(x))]
|
||||
}
|
||||
if (NROW(cols_categ)) {
|
||||
warning("Categorical features are ignored in 'xgb.ggplot.shap.summary'.")
|
||||
}
|
||||
|
||||
data_list <- xgb.shap.data(
|
||||
data = data,
|
||||
shap_contrib = shap_contrib,
|
||||
@@ -115,6 +135,10 @@ xgb.ggplot.shap.summary <- function(data, shap_contrib = NULL, features = NULL,
|
||||
subsample = subsample,
|
||||
max_observations = 10000 # 10,000 samples per feature.
|
||||
)
|
||||
if (NROW(cols_categ)) {
|
||||
data_list <- lapply(data_list, function(x) x[, !(colnames(x) %in% cols_categ), drop = FALSE])
|
||||
}
|
||||
|
||||
p_data <- prepare.ggplot.shap.data(data_list, normalize = TRUE)
|
||||
# Reverse factor levels so that the first level is at the top of the plot
|
||||
p_data[, "feature" := factor(feature, rev(levels(feature)))]
|
||||
@@ -127,21 +151,20 @@ xgb.ggplot.shap.summary <- function(data, shap_contrib = NULL, features = NULL,
|
||||
p
|
||||
}
|
||||
|
||||
#' Combine and melt feature values and SHAP contributions for sample
|
||||
#' observations.
|
||||
#' Combine feature values and SHAP values
|
||||
#'
|
||||
#' Conforms to data format required for ggplot functions.
|
||||
#' Internal function used to combine and melt feature values and SHAP contributions
|
||||
#' as required for ggplot functions related to SHAP.
|
||||
#'
|
||||
#' Internal utility function.
|
||||
#'
|
||||
#' @param data_list List containing 'data' and 'shap_contrib' returned by
|
||||
#' \code{xgb.shap.data()}.
|
||||
#' @param normalize Whether to standardize feature values to have mean 0 and
|
||||
#' standard deviation 1 (useful for comparing multiple features on the same
|
||||
#' plot). Default \code{FALSE}.
|
||||
#'
|
||||
#' @return A data.table containing the observation ID, the feature name, the
|
||||
#' @param data_list The result of `xgb.shap.data()`.
|
||||
#' @param normalize Whether to standardize feature values to mean 0 and
|
||||
#' standard deviation 1. This is useful for comparing multiple features on the same
|
||||
#' plot. Default is `FALSE`. Note that it cannot be used when the data contains
|
||||
#' categorical features.
|
||||
#' @return A `data.table` containing the observation ID, the feature name, the
|
||||
#' feature value (normalized if specified), and the SHAP contribution value.
|
||||
#' @noRd
|
||||
#' @keywords internal
|
||||
prepare.ggplot.shap.data <- function(data_list, normalize = FALSE) {
|
||||
data <- data_list[["data"]]
|
||||
shap_contrib <- data_list[["shap_contrib"]]
|
||||
@@ -162,14 +185,15 @@ prepare.ggplot.shap.data <- function(data_list, normalize = FALSE) {
|
||||
p_data
|
||||
}
|
||||
|
||||
#' Scale feature value to have mean 0, standard deviation 1
|
||||
#' Scale feature values
|
||||
#'
|
||||
#' This is used to compare multiple features on the same plot.
|
||||
#' Internal utility function
|
||||
#' Internal function that scales feature values to mean 0 and standard deviation 1.
|
||||
#' Useful to compare multiple features on the same plot.
|
||||
#'
|
||||
#' @param x Numeric vector
|
||||
#'
|
||||
#' @return Numeric vector with mean 0 and sd 1.
|
||||
#' @param x Numeric vector.
|
||||
#' @return Numeric vector with mean 0 and standard deviation 1.
|
||||
#' @noRd
|
||||
#' @keywords internal
|
||||
normalize <- function(x) {
|
||||
loc <- mean(x, na.rm = TRUE)
|
||||
scale <- stats::sd(x, na.rm = TRUE)
|
||||
@@ -181,7 +205,7 @@ normalize <- function(x) {
|
||||
# ... the plots
|
||||
# cols number of columns
|
||||
# internal utility function
|
||||
multiplot <- function(..., cols = 1) {
|
||||
multiplot <- function(..., cols) {
|
||||
plots <- list(...)
|
||||
num_plots <- length(plots)
|
||||
|
||||
|
||||
@@ -1,110 +1,139 @@
|
||||
#' Importance of features in a model.
|
||||
#' Feature importance
|
||||
#'
|
||||
#' Creates a \code{data.table} of feature importances in a model.
|
||||
#'
|
||||
#' @param feature_names character vector of feature names. If the model already
|
||||
#' contains feature names, those would be used when \code{feature_names=NULL} (default value).
|
||||
#' Non-null \code{feature_names} could be provided to override those in the model.
|
||||
#' @param model object of class \code{xgb.Booster}.
|
||||
#' @param trees (only for the gbtree booster) an integer vector of tree indices that should be included
|
||||
#' into the importance calculation. If set to \code{NULL}, all trees of the model are parsed.
|
||||
#' It could be useful, e.g., in multiclass classification to get feature importances
|
||||
#' for each class separately. IMPORTANT: the tree index in xgboost models
|
||||
#' is zero-based (e.g., use \code{trees = 0:4} for first 5 trees).
|
||||
#' @param data deprecated.
|
||||
#' @param label deprecated.
|
||||
#' @param target deprecated.
|
||||
#' Creates a `data.table` of feature importances.
|
||||
#'
|
||||
#' @details
|
||||
#'
|
||||
#' This function works for both linear and tree models.
|
||||
#'
|
||||
#' For linear models, the importance is the absolute magnitude of linear coefficients.
|
||||
#' For that reason, in order to obtain a meaningful ranking by importance for a linear model,
|
||||
#' the features need to be on the same scale (which you also would want to do when using either
|
||||
#' L1 or L2 regularization).
|
||||
#' To obtain a meaningful ranking by importance for linear models, the features need to
|
||||
#' be on the same scale (which is also recommended when using L1 or L2 regularization).
|
||||
#'
|
||||
#' @return
|
||||
#' @param feature_names Character vector used to overwrite the feature names
|
||||
#' of the model. The default is `NULL` (use original feature names).
|
||||
#' @param model Object of class `xgb.Booster`.
|
||||
#' @param trees An integer vector of tree indices that should be included
|
||||
#' into the importance calculation (only for the "gbtree" booster).
|
||||
#' The default (`NULL`) parses all trees.
|
||||
#' It could be useful, e.g., in multiclass classification to get feature importances
|
||||
#' for each class separately. *Important*: the tree index in XGBoost models
|
||||
#' is zero-based (e.g., use `trees = 0:4` for the first five trees).
|
||||
#' @param data Deprecated.
|
||||
#' @param label Deprecated.
|
||||
#' @param target Deprecated.
|
||||
#' @return A `data.table` with the following columns:
|
||||
#'
|
||||
#' For a tree model, a \code{data.table} with the following columns:
|
||||
#' \itemize{
|
||||
#' \item \code{Features} names of the features used in the model;
|
||||
#' \item \code{Gain} represents fractional contribution of each feature to the model based on
|
||||
#' the total gain of this feature's splits. Higher percentage means a more important
|
||||
#' predictive feature.
|
||||
#' \item \code{Cover} metric of the number of observation related to this feature;
|
||||
#' \item \code{Frequency} percentage representing the relative number of times
|
||||
#' a feature have been used in trees.
|
||||
#' }
|
||||
#' For a tree model:
|
||||
#' - `Features`: Names of the features used in the model.
|
||||
#' - `Gain`: Fractional contribution of each feature to the model based on
|
||||
#' the total gain of this feature's splits. Higher percentage means higher importance.
|
||||
#' - `Cover`: Metric of the number of observation related to this feature.
|
||||
#' - `Frequency`: Percentage of times a feature has been used in trees.
|
||||
#'
|
||||
#' A linear model's importance \code{data.table} has the following columns:
|
||||
#' \itemize{
|
||||
#' \item \code{Features} names of the features used in the model;
|
||||
#' \item \code{Weight} the linear coefficient of this feature;
|
||||
#' \item \code{Class} (only for multiclass models) class label.
|
||||
#' }
|
||||
#' For a linear model:
|
||||
#' - `Features`: Names of the features used in the model.
|
||||
#' - `Weight`: Linear coefficient of this feature.
|
||||
#' - `Class`: Class label (only for multiclass models).
|
||||
#'
|
||||
#' If \code{feature_names} is not provided and \code{model} doesn't have \code{feature_names},
|
||||
#' index of the features will be used instead. Because the index is extracted from the model dump
|
||||
#' If `feature_names` is not provided and `model` doesn't have `feature_names`,
|
||||
#' the index of the features will be used instead. Because the index is extracted from the model dump
|
||||
#' (based on C++ code), it starts at 0 (as in C/C++ or Python) instead of 1 (usual in R).
|
||||
#'
|
||||
#' @examples
|
||||
#'
|
||||
#' # binomial classification using gbtree:
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
|
||||
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
#' # binomial classification using "gbtree":
|
||||
#' data(agaricus.train, package = "xgboost")
|
||||
#'
|
||||
#' bst <- xgb.train(
|
||||
#' data = xgb.DMatrix(agaricus.train$data, label = agaricus.train$label),
|
||||
#' max_depth = 2,
|
||||
#' eta = 1,
|
||||
#' nthread = 2,
|
||||
#' nrounds = 2,
|
||||
#' objective = "binary:logistic"
|
||||
#' )
|
||||
#'
|
||||
#' xgb.importance(model = bst)
|
||||
#'
|
||||
#' # binomial classification using gblinear:
|
||||
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, booster = "gblinear",
|
||||
#' eta = 0.3, nthread = 1, nrounds = 20, objective = "binary:logistic")
|
||||
#' # binomial classification using "gblinear":
|
||||
#' bst <- xgb.train(
|
||||
#' data = xgb.DMatrix(agaricus.train$data, label = agaricus.train$label),
|
||||
#' booster = "gblinear",
|
||||
#' eta = 0.3,
|
||||
#' nthread = 1,
|
||||
#' nrounds = 20,objective = "binary:logistic"
|
||||
#' )
|
||||
#'
|
||||
#' xgb.importance(model = bst)
|
||||
#'
|
||||
#' # multiclass classification using gbtree:
|
||||
#' # multiclass classification using "gbtree":
|
||||
#' nclass <- 3
|
||||
#' nrounds <- 10
|
||||
#' mbst <- xgboost(data = as.matrix(iris[, -5]), label = as.numeric(iris$Species) - 1,
|
||||
#' max_depth = 3, eta = 0.2, nthread = 2, nrounds = nrounds,
|
||||
#' objective = "multi:softprob", num_class = nclass)
|
||||
#' mbst <- xgb.train(
|
||||
#' data = xgb.DMatrix(
|
||||
#' as.matrix(iris[, -5]),
|
||||
#' label = as.numeric(iris$Species) - 1
|
||||
#' ),
|
||||
#' max_depth = 3,
|
||||
#' eta = 0.2,
|
||||
#' nthread = 2,
|
||||
#' nrounds = nrounds,
|
||||
#' objective = "multi:softprob",
|
||||
#' num_class = nclass
|
||||
#' )
|
||||
#'
|
||||
#' # all classes clumped together:
|
||||
#' xgb.importance(model = mbst)
|
||||
#' # inspect importances separately for each class:
|
||||
#' xgb.importance(model = mbst, trees = seq(from=0, by=nclass, length.out=nrounds))
|
||||
#' xgb.importance(model = mbst, trees = seq(from=1, by=nclass, length.out=nrounds))
|
||||
#' xgb.importance(model = mbst, trees = seq(from=2, by=nclass, length.out=nrounds))
|
||||
#'
|
||||
#' # multiclass classification using gblinear:
|
||||
#' mbst <- xgboost(data = scale(as.matrix(iris[, -5])), label = as.numeric(iris$Species) - 1,
|
||||
#' booster = "gblinear", eta = 0.2, nthread = 1, nrounds = 15,
|
||||
#' objective = "multi:softprob", num_class = nclass)
|
||||
#' # inspect importances separately for each class:
|
||||
#' xgb.importance(
|
||||
#' model = mbst, trees = seq(from = 0, by = nclass, length.out = nrounds)
|
||||
#' )
|
||||
#' xgb.importance(
|
||||
#' model = mbst, trees = seq(from = 1, by = nclass, length.out = nrounds)
|
||||
#' )
|
||||
#' xgb.importance(
|
||||
#' model = mbst, trees = seq(from = 2, by = nclass, length.out = nrounds)
|
||||
#' )
|
||||
#'
|
||||
#' # multiclass classification using "gblinear":
|
||||
#' mbst <- xgb.train(
|
||||
#' data = xgb.DMatrix(
|
||||
#' scale(as.matrix(iris[, -5])),
|
||||
#' label = as.numeric(iris$Species) - 1
|
||||
#' ),
|
||||
#' booster = "gblinear",
|
||||
#' eta = 0.2,
|
||||
#' nthread = 1,
|
||||
#' nrounds = 15,
|
||||
#' objective = "multi:softprob",
|
||||
#' num_class = nclass
|
||||
#' )
|
||||
#'
|
||||
#' xgb.importance(model = mbst)
|
||||
#'
|
||||
#' @export
|
||||
xgb.importance <- function(feature_names = NULL, model = NULL, trees = NULL,
|
||||
data = NULL, label = NULL, target = NULL){
|
||||
xgb.importance <- function(model = NULL, feature_names = getinfo(model, "feature_name"), trees = NULL,
|
||||
data = NULL, label = NULL, target = NULL) {
|
||||
|
||||
if (!(is.null(data) && is.null(label) && is.null(target)))
|
||||
warning("xgb.importance: parameters 'data', 'label' and 'target' are deprecated")
|
||||
|
||||
if (!inherits(model, "xgb.Booster"))
|
||||
stop("model: must be an object of class xgb.Booster")
|
||||
|
||||
if (is.null(feature_names) && !is.null(model$feature_names))
|
||||
feature_names <- model$feature_names
|
||||
|
||||
if (!(is.null(feature_names) || is.character(feature_names)))
|
||||
stop("feature_names: Has to be a character vector")
|
||||
|
||||
model <- xgb.Booster.complete(model)
|
||||
config <- jsonlite::fromJSON(xgb.config(model))
|
||||
if (config$learner$gradient_booster$name == "gblinear") {
|
||||
handle <- xgb.get.handle(model)
|
||||
if (xgb.booster_type(model) == "gblinear") {
|
||||
args <- list(importance_type = "weight", feature_names = feature_names)
|
||||
results <- .Call(
|
||||
XGBoosterFeatureScore_R, model$handle, jsonlite::toJSON(args, auto_unbox = TRUE, null = "null")
|
||||
XGBoosterFeatureScore_R, handle, jsonlite::toJSON(args, auto_unbox = TRUE, null = "null")
|
||||
)
|
||||
names(results) <- c("features", "shape", "weight")
|
||||
n_classes <- if (length(results$shape) == 2) { results$shape[2] } else { 0 }
|
||||
if (length(results$shape) == 2) {
|
||||
n_classes <- results$shape[2]
|
||||
} else {
|
||||
n_classes <- 0
|
||||
}
|
||||
importance <- if (n_classes == 0) {
|
||||
data.table(Feature = results$features, Weight = results$weight)[order(-abs(Weight))]
|
||||
} else {
|
||||
@@ -118,7 +147,7 @@ xgb.importance <- function(feature_names = NULL, model = NULL, trees = NULL,
|
||||
for (importance_type in c("weight", "total_gain", "total_cover")) {
|
||||
args <- list(importance_type = importance_type, feature_names = feature_names, tree_idx = trees)
|
||||
results <- .Call(
|
||||
XGBoosterFeatureScore_R, model$handle, jsonlite::toJSON(args, auto_unbox = TRUE, null = "null")
|
||||
XGBoosterFeatureScore_R, handle, jsonlite::toJSON(args, auto_unbox = TRUE, null = "null")
|
||||
)
|
||||
names(results) <- c("features", "shape", importance_type)
|
||||
concatenated[
|
||||
|
||||
@@ -1,54 +1,66 @@
|
||||
#' Load xgboost model from binary file
|
||||
#' Load XGBoost model from binary file
|
||||
#'
|
||||
#' Load xgboost model from the binary model file.
|
||||
#' Load XGBoost model from binary model file.
|
||||
#'
|
||||
#' @param modelfile the name of the binary input file.
|
||||
#' @param modelfile The name of the binary input file.
|
||||
#'
|
||||
#' @details
|
||||
#' The input file is expected to contain a model saved in an xgboost model format
|
||||
#' using either \code{\link{xgb.save}} or \code{\link{cb.save.model}} in R, or using some
|
||||
#' appropriate methods from other xgboost interfaces. E.g., a model trained in Python and
|
||||
#' saved from there in xgboost format, could be loaded from R.
|
||||
#' The input file is expected to contain a model saved in an XGBoost model format
|
||||
#' using either [xgb.save()] in R, or using some
|
||||
#' appropriate methods from other XGBoost interfaces. E.g., a model trained in Python and
|
||||
#' saved from there in XGBoost format, could be loaded from R.
|
||||
#'
|
||||
#' Note: a model saved as an R-object, has to be loaded using corresponding R-methods,
|
||||
#' not \code{xgb.load}.
|
||||
#' Note: a model saved as an R object has to be loaded using corresponding R-methods,
|
||||
#' not by [xgb.load()].
|
||||
#'
|
||||
#' @return
|
||||
#' An object of \code{xgb.Booster} class.
|
||||
#' An object of `xgb.Booster` class.
|
||||
#'
|
||||
#' @seealso
|
||||
#' \code{\link{xgb.save}}, \code{\link{xgb.Booster.complete}}.
|
||||
#' @seealso [xgb.save()]
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' data(agaricus.test, package='xgboost')
|
||||
#' \dontshow{RhpcBLASctl::omp_set_num_threads(1)}
|
||||
#' data(agaricus.train, package = "xgboost")
|
||||
#' data(agaricus.test, package = "xgboost")
|
||||
#'
|
||||
#' ## Keep the number of threads to 1 for examples
|
||||
#' nthread <- 1
|
||||
#' data.table::setDTthreads(nthread)
|
||||
#'
|
||||
#' train <- agaricus.train
|
||||
#' test <- agaricus.test
|
||||
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||
#' xgb.save(bst, 'xgb.model')
|
||||
#' bst <- xgb.load('xgb.model')
|
||||
#' if (file.exists('xgb.model')) file.remove('xgb.model')
|
||||
#' pred <- predict(bst, test$data)
|
||||
#'
|
||||
#' bst <- xgb.train(
|
||||
#' data = xgb.DMatrix(train$data, label = train$label),
|
||||
#' max_depth = 2,
|
||||
#' eta = 1,
|
||||
#' nthread = nthread,
|
||||
#' nrounds = 2,
|
||||
#' objective = "binary:logistic"
|
||||
#' )
|
||||
#'
|
||||
#' fname <- file.path(tempdir(), "xgb.ubj")
|
||||
#' xgb.save(bst, fname)
|
||||
#' bst <- xgb.load(fname)
|
||||
#' @export
|
||||
xgb.load <- function(modelfile) {
|
||||
if (is.null(modelfile))
|
||||
stop("xgb.load: modelfile cannot be NULL")
|
||||
|
||||
handle <- xgb.Booster.handle(modelfile = modelfile)
|
||||
bst <- xgb.Booster(
|
||||
params = list(),
|
||||
cachelist = list(),
|
||||
modelfile = modelfile
|
||||
)
|
||||
bst <- bst$bst
|
||||
# re-use modelfile if it is raw so we do not need to serialize
|
||||
if (typeof(modelfile) == "raw") {
|
||||
warning(
|
||||
paste(
|
||||
"The support for loading raw booster with `xgb.load` will be ",
|
||||
"discontinued in upcoming release. Use `xgb.load.raw` or",
|
||||
" `xgb.unserialize` instead. "
|
||||
"discontinued in upcoming release. Use `xgb.load.raw` instead. "
|
||||
)
|
||||
)
|
||||
bst <- xgb.handleToBooster(handle, modelfile)
|
||||
} else {
|
||||
bst <- xgb.handleToBooster(handle, NULL)
|
||||
}
|
||||
bst <- xgb.Booster.complete(bst, saveraw = TRUE)
|
||||
return(bst)
|
||||
}
|
||||
|
||||
@@ -1,23 +1,12 @@
|
||||
#' Load serialised xgboost model from R's raw vector
|
||||
#' Load serialised XGBoost model from R's raw vector
|
||||
#'
|
||||
#' User can generate raw memory buffer by calling xgb.save.raw
|
||||
#'
|
||||
#' @param buffer the buffer returned by xgb.save.raw
|
||||
#' @param as_booster Return the loaded model as xgb.Booster instead of xgb.Booster.handle.
|
||||
#' User can generate raw memory buffer by calling [xgb.save.raw()].
|
||||
#'
|
||||
#' @param buffer The buffer returned by [xgb.save.raw()].
|
||||
#' @export
|
||||
xgb.load.raw <- function(buffer, as_booster = FALSE) {
|
||||
xgb.load.raw <- function(buffer) {
|
||||
cachelist <- list()
|
||||
handle <- .Call(XGBoosterCreate_R, cachelist)
|
||||
.Call(XGBoosterLoadModelFromRaw_R, handle, buffer)
|
||||
class(handle) <- "xgb.Booster.handle"
|
||||
|
||||
if (as_booster) {
|
||||
booster <- list(handle = handle, raw = NULL)
|
||||
class(booster) <- "xgb.Booster"
|
||||
booster <- xgb.Booster.complete(booster, saveraw = TRUE)
|
||||
return(booster)
|
||||
} else {
|
||||
return (handle)
|
||||
}
|
||||
bst <- .Call(XGBoosterCreate_R, cachelist)
|
||||
.Call(XGBoosterLoadModelFromRaw_R, xgb.get.handle(bst), buffer)
|
||||
return(bst)
|
||||
}
|
||||
|
||||
@@ -1,68 +1,71 @@
|
||||
#' Parse a boosted tree model text dump
|
||||
#' Parse model text dump
|
||||
#'
|
||||
#' Parse a boosted tree model text dump into a \code{data.table} structure.
|
||||
#' Parse a boosted tree model text dump into a `data.table` structure.
|
||||
#'
|
||||
#' @param feature_names character vector of feature names. If the model already
|
||||
#' contains feature names, those would be used when \code{feature_names=NULL} (default value).
|
||||
#' Non-null \code{feature_names} could be provided to override those in the model.
|
||||
#' @param model object of class \code{xgb.Booster}
|
||||
#' @param text \code{character} vector previously generated by the \code{xgb.dump}
|
||||
#' function (where parameter \code{with_stats = TRUE} should have been set).
|
||||
#' \code{text} takes precedence over \code{model}.
|
||||
#' @param trees an integer vector of tree indices that should be parsed.
|
||||
#' If set to \code{NULL}, all trees of the model are parsed.
|
||||
#' It could be useful, e.g., in multiclass classification to get only
|
||||
#' the trees of one certain class. IMPORTANT: the tree index in xgboost models
|
||||
#' is zero-based (e.g., use \code{trees = 0:4} for first 5 trees).
|
||||
#' @param use_int_id a logical flag indicating whether nodes in columns "Yes", "No", "Missing" should be
|
||||
#' represented as integers (when FALSE) or as "Tree-Node" character strings (when FALSE).
|
||||
#' @param ... currently not used.
|
||||
#' @param model Object of class `xgb.Booster`. If it contains feature names (they can
|
||||
#' be set through [setinfo()]), they will be used in the output from this function.
|
||||
#' @param text Character vector previously generated by the function [xgb.dump()]
|
||||
#' (called with parameter `with_stats = TRUE`). `text` takes precedence over `model`.
|
||||
#' @param trees An integer vector of tree indices that should be used. The default
|
||||
#' (`NULL`) uses all trees. Useful, e.g., in multiclass classification to get only
|
||||
#' the trees of one class. *Important*: the tree index in XGBoost models
|
||||
#' is zero-based (e.g., use `trees = 0:4` for the first five trees).
|
||||
#' @param use_int_id A logical flag indicating whether nodes in columns "Yes", "No", and
|
||||
#' "Missing" should be represented as integers (when `TRUE`) or as "Tree-Node"
|
||||
#' character strings (when `FALSE`, default).
|
||||
#' @param ... Currently not used.
|
||||
#'
|
||||
#' @return
|
||||
#' A \code{data.table} with detailed information about model trees' nodes.
|
||||
#' A `data.table` with detailed information about tree nodes. It has the following columns:
|
||||
#' - `Tree`: integer ID of a tree in a model (zero-based index).
|
||||
#' - `Node`: integer ID of a node in a tree (zero-based index).
|
||||
#' - `ID`: character identifier of a node in a model (only when `use_int_id = FALSE`).
|
||||
#' - `Feature`: for a branch node, a feature ID or name (when available);
|
||||
#' for a leaf node, it simply labels it as `"Leaf"`.
|
||||
#' - `Split`: location of the split for a branch node (split condition is always "less than").
|
||||
#' - `Yes`: ID of the next node when the split condition is met.
|
||||
#' - `No`: ID of the next node when the split condition is not met.
|
||||
#' - `Missing`: ID of the next node when the branch value is missing.
|
||||
#' - `Gain`: either the split gain (change in loss) or the leaf value.
|
||||
#' - `Cover`: metric related to the number of observations either seen by a split
|
||||
#' or collected by a leaf during training.
|
||||
#'
|
||||
#' The columns of the \code{data.table} are:
|
||||
#'
|
||||
#' \itemize{
|
||||
#' \item \code{Tree}: integer ID of a tree in a model (zero-based index)
|
||||
#' \item \code{Node}: integer ID of a node in a tree (zero-based index)
|
||||
#' \item \code{ID}: character identifier of a node in a model (only when \code{use_int_id=FALSE})
|
||||
#' \item \code{Feature}: for a branch node, it's a feature id or name (when available);
|
||||
#' for a leaf note, it simply labels it as \code{'Leaf'}
|
||||
#' \item \code{Split}: location of the split for a branch node (split condition is always "less than")
|
||||
#' \item \code{Yes}: ID of the next node when the split condition is met
|
||||
#' \item \code{No}: ID of the next node when the split condition is not met
|
||||
#' \item \code{Missing}: ID of the next node when branch value is missing
|
||||
#' \item \code{Quality}: either the split gain (change in loss) or the leaf value
|
||||
#' \item \code{Cover}: metric related to the number of observation either seen by a split
|
||||
#' or collected by a leaf during training.
|
||||
#' }
|
||||
#'
|
||||
#' When \code{use_int_id=FALSE}, columns "Yes", "No", and "Missing" point to model-wide node identifiers
|
||||
#' in the "ID" column. When \code{use_int_id=TRUE}, those columns point to node identifiers from
|
||||
#' When `use_int_id = FALSE`, columns "Yes", "No", and "Missing" point to model-wide node identifiers
|
||||
#' in the "ID" column. When `use_int_id = TRUE`, those columns point to node identifiers from
|
||||
#' the corresponding trees in the "Node" column.
|
||||
#'
|
||||
#' @examples
|
||||
#' # Basic use:
|
||||
#'
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' data(agaricus.train, package = "xgboost")
|
||||
#' ## Keep the number of threads to 1 for examples
|
||||
#' nthread <- 1
|
||||
#' data.table::setDTthreads(nthread)
|
||||
#'
|
||||
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
|
||||
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||
#'
|
||||
#' (dt <- xgb.model.dt.tree(colnames(agaricus.train$data), bst))
|
||||
#' bst <- xgb.train(
|
||||
#' data = xgb.DMatrix(agaricus.train$data, label = agaricus.train$label),
|
||||
#' max_depth = 2,
|
||||
#' eta = 1,
|
||||
#' nthread = nthread,
|
||||
#' nrounds = 2,
|
||||
#' objective = "binary:logistic"
|
||||
#' )
|
||||
#'
|
||||
#' # This bst model already has feature_names stored with it, so those would be used when
|
||||
#' # feature_names is not set:
|
||||
#' (dt <- xgb.model.dt.tree(model = bst))
|
||||
#' dt <- xgb.model.dt.tree(bst)
|
||||
#'
|
||||
#' # How to match feature names of splits that are following a current 'Yes' branch:
|
||||
#'
|
||||
#' merge(dt, dt[, .(ID, Y.Feature=Feature)], by.x='Yes', by.y='ID', all.x=TRUE)[order(Tree,Node)]
|
||||
#' merge(
|
||||
#' dt,
|
||||
#' dt[, .(ID, Y.Feature = Feature)], by.x = "Yes", by.y = "ID", all.x = TRUE
|
||||
#' )[
|
||||
#' order(Tree, Node)
|
||||
#' ]
|
||||
#'
|
||||
#' @export
|
||||
xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,
|
||||
trees = NULL, use_int_id = FALSE, ...){
|
||||
xgb.model.dt.tree <- function(model = NULL, text = NULL,
|
||||
trees = NULL, use_int_id = FALSE, ...) {
|
||||
check.deprecation(...)
|
||||
|
||||
if (!inherits(model, "xgb.Booster") && !is.character(text)) {
|
||||
@@ -71,23 +74,22 @@ xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,
|
||||
" (or NULL if 'model' was provided).")
|
||||
}
|
||||
|
||||
if (is.null(feature_names) && !is.null(model) && !is.null(model$feature_names))
|
||||
feature_names <- model$feature_names
|
||||
|
||||
if (!(is.null(feature_names) || is.character(feature_names))) {
|
||||
stop("feature_names: must be a character vector")
|
||||
}
|
||||
|
||||
if (!(is.null(trees) || is.numeric(trees))) {
|
||||
stop("trees: must be a vector of integers.")
|
||||
}
|
||||
|
||||
if (is.null(text)){
|
||||
text <- xgb.dump(model = model, with_stats = TRUE)
|
||||
feature_names <- NULL
|
||||
if (inherits(model, "xgb.Booster")) {
|
||||
feature_names <- xgb.feature_names(model)
|
||||
}
|
||||
|
||||
if (length(text) < 2 ||
|
||||
sum(grepl('leaf=(\\d+)', text)) < 1) {
|
||||
from_text <- TRUE
|
||||
if (is.null(text)) {
|
||||
text <- xgb.dump(model = model, with_stats = TRUE)
|
||||
from_text <- FALSE
|
||||
}
|
||||
|
||||
if (length(text) < 2 || !any(grepl('leaf=(-?\\d+)', text))) {
|
||||
stop("Non-tree model detected! This function can only be used with tree models.")
|
||||
}
|
||||
|
||||
@@ -106,16 +108,33 @@ xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,
|
||||
} else {
|
||||
trees <- trees[trees >= 0 & trees <= max(td$Tree)]
|
||||
}
|
||||
td <- td[Tree %in% trees & !grepl('^booster', t)]
|
||||
td <- td[Tree %in% trees & !is.na(t) & !startsWith(t, 'booster')]
|
||||
|
||||
td[, Node := as.integer(sub("^([0-9]+):.*", "\\1", t))]
|
||||
if (!use_int_id) td[, ID := add.tree.id(Node, Tree)]
|
||||
td[, isLeaf := grepl("leaf", t, fixed = TRUE)]
|
||||
|
||||
# parse branch lines
|
||||
branch_rx <- paste0("f(\\d+)<(", anynumber_regex, ")\\] yes=(\\d+),no=(\\d+),missing=(\\d+),",
|
||||
"gain=(", anynumber_regex, "),cover=(", anynumber_regex, ")")
|
||||
branch_cols <- c("Feature", "Split", "Yes", "No", "Missing", "Quality", "Cover")
|
||||
branch_rx_nonames <- paste0("f(\\d+)<(", anynumber_regex, ")\\] yes=(\\d+),no=(\\d+),missing=(\\d+),",
|
||||
"gain=(", anynumber_regex, "),cover=(", anynumber_regex, ")")
|
||||
branch_rx_w_names <- paste0("\\d+:\\[(.+)<(", anynumber_regex, ")\\] yes=(\\d+),no=(\\d+),missing=(\\d+),",
|
||||
"gain=(", anynumber_regex, "),cover=(", anynumber_regex, ")")
|
||||
text_has_feature_names <- FALSE
|
||||
if (NROW(feature_names)) {
|
||||
branch_rx <- branch_rx_w_names
|
||||
text_has_feature_names <- TRUE
|
||||
} else {
|
||||
# Note: when passing a text dump, it might or might not have feature names,
|
||||
# but that aspect is unknown from just the text attributes
|
||||
branch_rx <- branch_rx_nonames
|
||||
if (from_text) {
|
||||
if (sum(grepl(branch_rx_w_names, text)) > sum(grepl(branch_rx_nonames, text))) {
|
||||
branch_rx <- branch_rx_w_names
|
||||
text_has_feature_names <- TRUE
|
||||
}
|
||||
}
|
||||
}
|
||||
branch_cols <- c("Feature", "Split", "Yes", "No", "Missing", "Gain", "Cover")
|
||||
td[
|
||||
isLeaf == FALSE,
|
||||
(branch_cols) := {
|
||||
@@ -125,7 +144,7 @@ xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,
|
||||
xtr[, 3:5] <- add.tree.id(xtr[, 3:5], Tree)
|
||||
if (length(xtr) == 0) {
|
||||
as.data.table(
|
||||
list(Feature = "NA", Split = "NA", Yes = "NA", No = "NA", Missing = "NA", Quality = "NA", Cover = "NA")
|
||||
list(Feature = "NA", Split = "NA", Yes = "NA", No = "NA", Missing = "NA", Gain = "NA", Cover = "NA")
|
||||
)
|
||||
} else {
|
||||
as.data.table(xtr)
|
||||
@@ -137,15 +156,17 @@ xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,
|
||||
is_stump <- function() {
|
||||
return(length(td$Feature) == 1 && is.na(td$Feature))
|
||||
}
|
||||
if (!is.null(feature_names) && !is_stump()) {
|
||||
if (length(feature_names) <= max(as.numeric(td$Feature), na.rm = TRUE))
|
||||
stop("feature_names has less elements than there are features used in the model")
|
||||
td[isLeaf == FALSE, Feature := feature_names[as.numeric(Feature) + 1]]
|
||||
if (!text_has_feature_names) {
|
||||
if (!is.null(feature_names) && !is_stump()) {
|
||||
if (length(feature_names) <= max(as.numeric(td$Feature), na.rm = TRUE))
|
||||
stop("feature_names has less elements than there are features used in the model")
|
||||
td[isLeaf == FALSE, Feature := feature_names[as.numeric(Feature) + 1]]
|
||||
}
|
||||
}
|
||||
|
||||
# parse leaf lines
|
||||
leaf_rx <- paste0("leaf=(", anynumber_regex, "),cover=(", anynumber_regex, ")")
|
||||
leaf_cols <- c("Feature", "Quality", "Cover")
|
||||
leaf_cols <- c("Feature", "Gain", "Cover")
|
||||
td[
|
||||
isLeaf == TRUE,
|
||||
(leaf_cols) := {
|
||||
@@ -160,7 +181,7 @@ xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,
|
||||
]
|
||||
|
||||
# convert some columns to numeric
|
||||
numeric_cols <- c("Split", "Quality", "Cover")
|
||||
numeric_cols <- c("Split", "Gain", "Cover")
|
||||
td[, (numeric_cols) := lapply(.SD, as.numeric), .SDcols = numeric_cols]
|
||||
if (use_int_id) {
|
||||
int_cols <- c("Yes", "No", "Missing")
|
||||
@@ -173,7 +194,7 @@ xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,
|
||||
td[order(Tree, Node)]
|
||||
}
|
||||
|
||||
# Avoid error messages during CRAN check.
|
||||
# Avoid notes during CRAN check.
|
||||
# The reason is that these variables are never declared
|
||||
# They are mainly column names inferred by Data.table...
|
||||
globalVariables(c("Tree", "Node", "ID", "Feature", "t", "isLeaf", ".SD", ".SDcols"))
|
||||
|
||||
@@ -1,62 +1,74 @@
|
||||
#' Plot model trees deepness
|
||||
#' Plot model tree depth
|
||||
#'
|
||||
#' Visualizes distributions related to depth of tree leafs.
|
||||
#' \code{xgb.plot.deepness} uses base R graphics, while \code{xgb.ggplot.deepness} uses the ggplot backend.
|
||||
#' Visualizes distributions related to the depth of tree leaves.
|
||||
#' - `xgb.plot.deepness()` uses base R graphics, while
|
||||
#' - `xgb.ggplot.deepness()` uses "ggplot2".
|
||||
#'
|
||||
#' @param model either an \code{xgb.Booster} model generated by the \code{xgb.train} function
|
||||
#' or a data.table result of the \code{xgb.model.dt.tree} function.
|
||||
#' @param plot (base R barplot) whether a barplot should be produced.
|
||||
#' If FALSE, only a data.table is returned.
|
||||
#' @param which which distribution to plot (see details).
|
||||
#' @param ... other parameters passed to \code{barplot} or \code{plot}.
|
||||
#' @param model Either an `xgb.Booster` model, or the "data.table" returned
|
||||
#' by [xgb.model.dt.tree()].
|
||||
#' @param which Which distribution to plot (see details).
|
||||
#' @param plot Should the plot be shown? Default is `TRUE`.
|
||||
#' @param ... Other parameters passed to [graphics::barplot()] or [graphics::plot()].
|
||||
#'
|
||||
#' @details
|
||||
#'
|
||||
#' When \code{which="2x1"}, two distributions with respect to the leaf depth
|
||||
#' When `which = "2x1"`, two distributions with respect to the leaf depth
|
||||
#' are plotted on top of each other:
|
||||
#' \itemize{
|
||||
#' \item the distribution of the number of leafs in a tree model at a certain depth;
|
||||
#' \item the distribution of average weighted number of observations ("cover")
|
||||
#' ending up in leafs at certain depth.
|
||||
#' }
|
||||
#' Those could be helpful in determining sensible ranges of the \code{max_depth}
|
||||
#' and \code{min_child_weight} parameters.
|
||||
#' 1. The distribution of the number of leaves in a tree model at a certain depth.
|
||||
#' 2. The distribution of the average weighted number of observations ("cover")
|
||||
#' ending up in leaves at a certain depth.
|
||||
#'
|
||||
#' When \code{which="max.depth"} or \code{which="med.depth"}, plots of either maximum or median depth
|
||||
#' per tree with respect to tree number are created. And \code{which="med.weight"} allows to see how
|
||||
#' Those could be helpful in determining sensible ranges of the `max_depth`
|
||||
#' and `min_child_weight` parameters.
|
||||
#'
|
||||
#' When `which = "max.depth"` or `which = "med.depth"`, plots of either maximum or
|
||||
#' median depth per tree with respect to the tree number are created.
|
||||
#'
|
||||
#' Finally, `which = "med.weight"` allows to see how
|
||||
#' a tree's median absolute leaf weight changes through the iterations.
|
||||
#'
|
||||
#' This function was inspired by the blog post
|
||||
#' \url{https://github.com/aysent/random-forest-leaf-visualization}.
|
||||
#' These functions have been inspired by the blog post
|
||||
#' <https://github.com/aysent/random-forest-leaf-visualization>.
|
||||
#'
|
||||
#' @return
|
||||
#' The return value of the two functions is as follows:
|
||||
#' - `xgb.plot.deepness()`: A "data.table" (invisibly).
|
||||
#' Each row corresponds to a terminal leaf in the model. It contains its information
|
||||
#' about depth, cover, and weight (used in calculating predictions).
|
||||
#' If `plot = TRUE`, also a plot is shown.
|
||||
#' - `xgb.ggplot.deepness()`: When `which = "2x1"`, a list of two "ggplot" objects,
|
||||
#' and a single "ggplot" object otherwise.
|
||||
#'
|
||||
#' Other than producing plots (when \code{plot=TRUE}), the \code{xgb.plot.deepness} function
|
||||
#' silently returns a processed data.table where each row corresponds to a terminal leaf in a tree model,
|
||||
#' and contains information about leaf's depth, cover, and weight (which is used in calculating predictions).
|
||||
#'
|
||||
#' The \code{xgb.ggplot.deepness} silently returns either a list of two ggplot graphs when \code{which="2x1"}
|
||||
#' or a single ggplot graph for the other \code{which} options.
|
||||
#'
|
||||
#' @seealso
|
||||
#'
|
||||
#' \code{\link{xgb.train}}, \code{\link{xgb.model.dt.tree}}.
|
||||
#' @seealso [xgb.train()] and [xgb.model.dt.tree()].
|
||||
#'
|
||||
#' @examples
|
||||
#'
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' data(agaricus.train, package = "xgboost")
|
||||
#' ## Keep the number of threads to 2 for examples
|
||||
#' nthread <- 2
|
||||
#' data.table::setDTthreads(nthread)
|
||||
#'
|
||||
#' # Change max_depth to a higher number to get a more significant result
|
||||
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 6,
|
||||
#' eta = 0.1, nthread = 2, nrounds = 50, objective = "binary:logistic",
|
||||
#' subsample = 0.5, min_child_weight = 2)
|
||||
#' ## Change max_depth to a higher number to get a more significant result
|
||||
#' bst <- xgb.train(
|
||||
#' data = xgb.DMatrix(agaricus.train$data, label = agaricus.train$label),
|
||||
#' max_depth = 6,
|
||||
#' nthread = nthread,
|
||||
#' nrounds = 50,
|
||||
#' objective = "binary:logistic",
|
||||
#' subsample = 0.5,
|
||||
#' min_child_weight = 2
|
||||
#' )
|
||||
#'
|
||||
#' xgb.plot.deepness(bst)
|
||||
#' xgb.ggplot.deepness(bst)
|
||||
#'
|
||||
#' xgb.plot.deepness(bst, which='max.depth', pch=16, col=rgb(0,0,1,0.3), cex=2)
|
||||
#' xgb.plot.deepness(
|
||||
#' bst, which = "max.depth", pch = 16, col = rgb(0, 0, 1, 0.3), cex = 2
|
||||
#' )
|
||||
#'
|
||||
#' xgb.plot.deepness(bst, which='med.weight', pch=16, col=rgb(0,0,1,0.3), cex=2)
|
||||
#' xgb.plot.deepness(
|
||||
#' bst, which = "med.weight", pch = 16, col = rgb(0, 0, 1, 0.3), cex = 2
|
||||
#' )
|
||||
#'
|
||||
#' @rdname xgb.plot.deepness
|
||||
#' @export
|
||||
@@ -80,7 +92,7 @@ xgb.plot.deepness <- function(model = NULL, which = c("2x1", "max.depth", "med.d
|
||||
stop("Model tree columns are not as expected!\n",
|
||||
" Note that this function works only for tree models.")
|
||||
|
||||
dt_depths <- merge(get.leaf.depth(dt_tree), dt_tree[, .(ID, Cover, Weight = Quality)], by = "ID")
|
||||
dt_depths <- merge(get.leaf.depth(dt_tree), dt_tree[, .(ID, Cover, Weight = Gain)], by = "ID")
|
||||
setkeyv(dt_depths, c("Tree", "ID"))
|
||||
# count by depth levels, and also calculate average cover at a depth
|
||||
dt_summaries <- dt_depths[, .(.N, Cover = mean(Cover)), Depth]
|
||||
@@ -136,7 +148,7 @@ get.leaf.depth <- function(dt_tree) {
|
||||
# list of paths to each leaf in a tree
|
||||
paths <- lapply(paths_tmp$vpath, names)
|
||||
# combine into a resulting path lengths table for a tree
|
||||
data.table(Depth = sapply(paths, length), ID = To[Leaf == TRUE])
|
||||
data.table(Depth = lengths(paths), ID = To[Leaf == TRUE])
|
||||
}, by = Tree]
|
||||
}
|
||||
|
||||
@@ -145,6 +157,6 @@ get.leaf.depth <- function(dt_tree) {
|
||||
# They are mainly column names inferred by Data.table...
|
||||
globalVariables(
|
||||
c(
|
||||
".N", "N", "Depth", "Quality", "Cover", "Tree", "ID", "Yes", "No", "Feature", "Leaf", "Weight"
|
||||
".N", "N", "Depth", "Gain", "Cover", "Tree", "ID", "Yes", "No", "Feature", "Leaf", "Weight"
|
||||
)
|
||||
)
|
||||
|
||||
@@ -1,59 +1,73 @@
|
||||
#' Plot feature importance as a bar graph
|
||||
#' Plot feature importance
|
||||
#'
|
||||
#' Represents previously calculated feature importance as a bar graph.
|
||||
#' \code{xgb.plot.importance} uses base R graphics, while \code{xgb.ggplot.importance} uses the ggplot backend.
|
||||
#'
|
||||
#' @param importance_matrix a \code{data.table} returned by \code{\link{xgb.importance}}.
|
||||
#' @param top_n maximal number of top features to include into the plot.
|
||||
#' @param measure the name of importance measure to plot.
|
||||
#' When \code{NULL}, 'Gain' would be used for trees and 'Weight' would be used for gblinear.
|
||||
#' @param rel_to_first whether importance values should be represented as relative to the highest ranked feature.
|
||||
#' See Details.
|
||||
#' @param left_margin (base R barplot) allows to adjust the left margin size to fit feature names.
|
||||
#' When it is NULL, the existing \code{par('mar')} is used.
|
||||
#' @param cex (base R barplot) passed as \code{cex.names} parameter to \code{barplot}.
|
||||
#' @param plot (base R barplot) whether a barplot should be produced.
|
||||
#' If FALSE, only a data.table is returned.
|
||||
#' @param n_clusters (ggplot only) a \code{numeric} vector containing the min and the max range
|
||||
#' of the possible number of clusters of bars.
|
||||
#' @param ... other parameters passed to \code{barplot} (except horiz, border, cex.names, names.arg, and las).
|
||||
#' - `xgb.plot.importance()` uses base R graphics, while
|
||||
#' - `xgb.ggplot.importance()` uses "ggplot".
|
||||
#'
|
||||
#' @details
|
||||
#' The graph represents each feature as a horizontal bar of length proportional to the importance of a feature.
|
||||
#' Features are shown ranked in a decreasing importance order.
|
||||
#' It works for importances from both \code{gblinear} and \code{gbtree} models.
|
||||
#' The graph represents each feature as a horizontal bar of length proportional to the
|
||||
#' importance of a feature. Features are sorted by decreasing importance.
|
||||
#' It works for both "gblinear" and "gbtree" models.
|
||||
#'
|
||||
#' When \code{rel_to_first = FALSE}, the values would be plotted as they were in \code{importance_matrix}.
|
||||
#' For gbtree model, that would mean being normalized to the total of 1
|
||||
#' When `rel_to_first = FALSE`, the values would be plotted as in `importance_matrix`.
|
||||
#' For a "gbtree" model, that would mean being normalized to the total of 1
|
||||
#' ("what is feature's importance contribution relative to the whole model?").
|
||||
#' For linear models, \code{rel_to_first = FALSE} would show actual values of the coefficients.
|
||||
#' Setting \code{rel_to_first = TRUE} allows to see the picture from the perspective of
|
||||
#' For linear models, `rel_to_first = FALSE` would show actual values of the coefficients.
|
||||
#' Setting `rel_to_first = TRUE` allows to see the picture from the perspective of
|
||||
#' "what is feature's importance contribution relative to the most important feature?"
|
||||
#'
|
||||
#' The ggplot-backend method also performs 1-D clustering of the importance values,
|
||||
#' with bar colors corresponding to different clusters that have somewhat similar importance values.
|
||||
#' The "ggplot" backend performs 1-D clustering of the importance values,
|
||||
#' with bar colors corresponding to different clusters having similar importance values.
|
||||
#'
|
||||
#' @param importance_matrix A `data.table` as returned by [xgb.importance()].
|
||||
#' @param top_n Maximal number of top features to include into the plot.
|
||||
#' @param measure The name of importance measure to plot.
|
||||
#' When `NULL`, 'Gain' would be used for trees and 'Weight' would be used for gblinear.
|
||||
#' @param rel_to_first Whether importance values should be represented as relative to
|
||||
#' the highest ranked feature, see Details.
|
||||
#' @param left_margin Adjust the left margin size to fit feature names.
|
||||
#' When `NULL`, the existing `par("mar")` is used.
|
||||
#' @param cex Passed as `cex.names` parameter to [graphics::barplot()].
|
||||
#' @param plot Should the barplot be shown? Default is `TRUE`.
|
||||
#' @param n_clusters A numeric vector containing the min and the max range
|
||||
#' of the possible number of clusters of bars.
|
||||
#' @param ... Other parameters passed to [graphics::barplot()]
|
||||
#' (except `horiz`, `border`, `cex.names`, `names.arg`, and `las`).
|
||||
#' Only used in `xgb.plot.importance()`.
|
||||
#' @return
|
||||
#' The \code{xgb.plot.importance} function creates a \code{barplot} (when \code{plot=TRUE})
|
||||
#' and silently returns a processed data.table with \code{n_top} features sorted by importance.
|
||||
#' The return value depends on the function:
|
||||
#' - `xgb.plot.importance()`: Invisibly, a "data.table" with `n_top` features sorted
|
||||
#' by importance. If `plot = TRUE`, the values are also plotted as barplot.
|
||||
#' - `xgb.ggplot.importance()`: A customizable "ggplot" object.
|
||||
#' E.g., to change the title, set `+ ggtitle("A GRAPH NAME")`.
|
||||
#'
|
||||
#' The \code{xgb.ggplot.importance} function returns a ggplot graph which could be customized afterwards.
|
||||
#' E.g., to change the title of the graph, add \code{+ ggtitle("A GRAPH NAME")} to the result.
|
||||
#'
|
||||
#' @seealso
|
||||
#' \code{\link[graphics]{barplot}}.
|
||||
#' @seealso [graphics::barplot()]
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train)
|
||||
#'
|
||||
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 3,
|
||||
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
#' ## Keep the number of threads to 2 for examples
|
||||
#' nthread <- 2
|
||||
#' data.table::setDTthreads(nthread)
|
||||
#'
|
||||
#' bst <- xgb.train(
|
||||
#' data = xgb.DMatrix(agaricus.train$data, label = agaricus.train$label),
|
||||
#' max_depth = 3,
|
||||
#' eta = 1,
|
||||
#' nthread = nthread,
|
||||
#' nrounds = 2,
|
||||
#' objective = "binary:logistic"
|
||||
#' )
|
||||
#'
|
||||
#' importance_matrix <- xgb.importance(colnames(agaricus.train$data), model = bst)
|
||||
#' xgb.plot.importance(
|
||||
#' importance_matrix, rel_to_first = TRUE, xlab = "Relative importance"
|
||||
#' )
|
||||
#'
|
||||
#' xgb.plot.importance(importance_matrix, rel_to_first = TRUE, xlab = "Relative importance")
|
||||
#'
|
||||
#' (gg <- xgb.ggplot.importance(importance_matrix, measure = "Frequency", rel_to_first = TRUE))
|
||||
#' gg <- xgb.ggplot.importance(
|
||||
#' importance_matrix, measure = "Frequency", rel_to_first = TRUE
|
||||
#' )
|
||||
#' gg
|
||||
#' gg + ggplot2::ylab("Frequency")
|
||||
#'
|
||||
#' @rdname xgb.plot.importance
|
||||
@@ -82,7 +96,13 @@ xgb.plot.importance <- function(importance_matrix = NULL, top_n = NULL, measure
|
||||
}
|
||||
|
||||
# also aggregate, just in case when the values were not yet summed up by feature
|
||||
importance_matrix <- importance_matrix[, Importance := sum(get(measure)), by = Feature]
|
||||
importance_matrix <- importance_matrix[
|
||||
, lapply(.SD, sum)
|
||||
, .SDcols = setdiff(names(importance_matrix), "Feature")
|
||||
, by = Feature
|
||||
][
|
||||
, Importance := get(measure)
|
||||
]
|
||||
|
||||
# make sure it's ordered
|
||||
importance_matrix <- importance_matrix[order(-abs(Importance))]
|
||||
@@ -102,7 +122,9 @@ xgb.plot.importance <- function(importance_matrix = NULL, top_n = NULL, measure
|
||||
original_mar <- par()$mar
|
||||
|
||||
# reset margins so this function doesn't have side effects
|
||||
on.exit({par(mar = original_mar)})
|
||||
on.exit({
|
||||
par(mar = original_mar)
|
||||
})
|
||||
|
||||
mar <- original_mar
|
||||
if (!is.null(left_margin))
|
||||
|
||||
@@ -1,17 +1,8 @@
|
||||
#' Project all trees on one tree and plot it
|
||||
#' Project all trees on one tree
|
||||
#'
|
||||
#' Visualization of the ensemble of trees as a single collective unit.
|
||||
#'
|
||||
#' @param model produced by the \code{xgb.train} function.
|
||||
#' @param feature_names names of each feature as a \code{character} vector.
|
||||
#' @param features_keep number of features to keep in each position of the multi trees.
|
||||
#' @param plot_width width in pixels of the graph to produce
|
||||
#' @param plot_height height in pixels of the graph to produce
|
||||
#' @param render a logical flag for whether the graph should be rendered (see Value).
|
||||
#' @param ... currently not used
|
||||
#'
|
||||
#' @details
|
||||
#'
|
||||
#' This function tries to capture the complexity of a gradient boosted tree model
|
||||
#' in a cohesive way by compressing an ensemble of trees into a single tree-graph representation.
|
||||
#' The goal is to improve the interpretability of a model generally seen as black box.
|
||||
@@ -24,49 +15,57 @@
|
||||
#' Moreover, the trees tend to reuse the same features.
|
||||
#'
|
||||
#' The function projects each tree onto one, and keeps for each position the
|
||||
#' \code{features_keep} first features (based on the Gain per feature measure).
|
||||
#' `features_keep` first features (based on the Gain per feature measure).
|
||||
#'
|
||||
#' This function is inspired by this blog post:
|
||||
#' \url{https://wellecks.wordpress.com/2015/02/21/peering-into-the-black-box-visualizing-lambdamart/}
|
||||
#' <https://wellecks.wordpress.com/2015/02/21/peering-into-the-black-box-visualizing-lambdamart/>
|
||||
#'
|
||||
#' @return
|
||||
#'
|
||||
#' When \code{render = TRUE}:
|
||||
#' returns a rendered graph object which is an \code{htmlwidget} of class \code{grViz}.
|
||||
#' Similar to ggplot objects, it needs to be printed to see it when not running from command line.
|
||||
#'
|
||||
#' When \code{render = FALSE}:
|
||||
#' silently returns a graph object which is of DiagrammeR's class \code{dgr_graph}.
|
||||
#' This could be useful if one wants to modify some of the graph attributes
|
||||
#' before rendering the graph with \code{\link[DiagrammeR]{render_graph}}.
|
||||
#' @inheritParams xgb.plot.tree
|
||||
#' @param features_keep Number of features to keep in each position of the multi trees,
|
||||
#' by default 5.
|
||||
#' @inherit xgb.plot.tree return
|
||||
#'
|
||||
#' @examples
|
||||
#'
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' data(agaricus.train, package = "xgboost")
|
||||
#'
|
||||
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 15,
|
||||
#' eta = 1, nthread = 2, nrounds = 30, objective = "binary:logistic",
|
||||
#' min_child_weight = 50, verbose = 0)
|
||||
#' ## Keep the number of threads to 2 for examples
|
||||
#' nthread <- 2
|
||||
#' data.table::setDTthreads(nthread)
|
||||
#'
|
||||
#' bst <- xgb.train(
|
||||
#' data = xgb.DMatrix(agaricus.train$data, label = agaricus.train$label),
|
||||
#' max_depth = 15,
|
||||
#' eta = 1,
|
||||
#' nthread = nthread,
|
||||
#' nrounds = 30,
|
||||
#' objective = "binary:logistic",
|
||||
#' min_child_weight = 50,
|
||||
#' verbose = 0
|
||||
#' )
|
||||
#'
|
||||
#' p <- xgb.plot.multi.trees(model = bst, features_keep = 3)
|
||||
#' print(p)
|
||||
#'
|
||||
#' \dontrun{
|
||||
#' # Below is an example of how to save this plot to a file.
|
||||
#' # Note that for `export_graph` to work, the DiagrammeRsvg and rsvg packages must also be installed.
|
||||
#' # Note that for export_graph() to work, the {DiagrammeRsvg} and {rsvg} packages
|
||||
#' # must also be installed.
|
||||
#'
|
||||
#' library(DiagrammeR)
|
||||
#' gr <- xgb.plot.multi.trees(model=bst, features_keep = 3, render=FALSE)
|
||||
#' export_graph(gr, 'tree.pdf', width=1500, height=600)
|
||||
#'
|
||||
#' gr <- xgb.plot.multi.trees(model = bst, features_keep = 3, render = FALSE)
|
||||
#' export_graph(gr, "tree.pdf", width = 1500, height = 600)
|
||||
#' }
|
||||
#'
|
||||
#' @export
|
||||
xgb.plot.multi.trees <- function(model, feature_names = NULL, features_keep = 5, plot_width = NULL, plot_height = NULL,
|
||||
render = TRUE, ...){
|
||||
xgb.plot.multi.trees <- function(model, features_keep = 5, plot_width = NULL, plot_height = NULL,
|
||||
render = TRUE, ...) {
|
||||
if (!requireNamespace("DiagrammeR", quietly = TRUE)) {
|
||||
stop("DiagrammeR is required for xgb.plot.multi.trees")
|
||||
}
|
||||
check.deprecation(...)
|
||||
tree.matrix <- xgb.model.dt.tree(feature_names = feature_names, model = model)
|
||||
tree.matrix <- xgb.model.dt.tree(model = model)
|
||||
|
||||
# first number of the path represents the tree, then the following numbers are related to the path to follow
|
||||
# root init
|
||||
@@ -93,13 +92,13 @@ xgb.plot.multi.trees <- function(model, feature_names = NULL, features_keep = 5,
|
||||
data.table::set(tree.matrix, j = nm, value = sub("^\\d+-", "", tree.matrix[[nm]]))
|
||||
|
||||
nodes.dt <- tree.matrix[
|
||||
, .(Quality = sum(Quality))
|
||||
, .(Gain = sum(Gain))
|
||||
, by = .(abs.node.position, Feature)
|
||||
][, .(Text = paste0(
|
||||
paste0(
|
||||
Feature[1:min(length(Feature), features_keep)],
|
||||
Feature[seq_len(min(length(Feature), features_keep))],
|
||||
" (",
|
||||
format(Quality[1:min(length(Quality), features_keep)], digits = 5),
|
||||
format(Gain[seq_len(min(length(Gain), features_keep))], digits = 5),
|
||||
")"
|
||||
),
|
||||
collapse = "\n"
|
||||
@@ -110,11 +109,10 @@ xgb.plot.multi.trees <- function(model, feature_names = NULL, features_keep = 5,
|
||||
|
||||
edges.dt <- data.table::rbindlist(
|
||||
l = list(
|
||||
tree.matrix[Feature != "Leaf", .(abs.node.position, Yes)],
|
||||
tree.matrix[Feature != "Leaf", .(abs.node.position, No)]
|
||||
tree.matrix[Feature != "Leaf", .(From = abs.node.position, To = Yes)],
|
||||
tree.matrix[Feature != "Leaf", .(From = abs.node.position, To = No)]
|
||||
)
|
||||
)
|
||||
data.table::setnames(edges.dt, c("From", "To"))
|
||||
edges.dt <- edges.dt[, .N, .(From, To)]
|
||||
edges.dt[, N := NULL]
|
||||
|
||||
|
||||
@@ -1,106 +1,163 @@
|
||||
#' SHAP contribution dependency plots
|
||||
#' SHAP dependence plots
|
||||
#'
|
||||
#' Visualizing the SHAP feature contribution to prediction dependencies on feature value.
|
||||
#' Visualizes SHAP values against feature values to gain an impression of feature effects.
|
||||
#'
|
||||
#' @param data data as a \code{matrix} or \code{dgCMatrix}.
|
||||
#' @param shap_contrib a matrix of SHAP contributions that was computed earlier for the above
|
||||
#' \code{data}. When it is NULL, it is computed internally using \code{model} and \code{data}.
|
||||
#' @param features a vector of either column indices or of feature names to plot. When it is NULL,
|
||||
#' feature importance is calculated, and \code{top_n} high ranked features are taken.
|
||||
#' @param top_n when \code{features} is NULL, top_n [1, 100] most important features in a model are taken.
|
||||
#' @param model an \code{xgb.Booster} model. It has to be provided when either \code{shap_contrib}
|
||||
#' or \code{features} is missing.
|
||||
#' @param trees passed to \code{\link{xgb.importance}} when \code{features = NULL}.
|
||||
#' @param target_class is only relevant for multiclass models. When it is set to a 0-based class index,
|
||||
#' only SHAP contributions for that specific class are used.
|
||||
#' If it is not set, SHAP importances are averaged over all classes.
|
||||
#' @param approxcontrib passed to \code{\link{predict.xgb.Booster}} when \code{shap_contrib = NULL}.
|
||||
#' @param subsample a random fraction of data points to use for plotting. When it is NULL,
|
||||
#' it is set so that up to 100K data points are used.
|
||||
#' @param n_col a number of columns in a grid of plots.
|
||||
#' @param col color of the scatterplot markers.
|
||||
#' @param pch scatterplot marker.
|
||||
#' @param discrete_n_uniq a maximal number of unique values in a feature to consider it as discrete.
|
||||
#' @param discrete_jitter an \code{amount} parameter of jitter added to discrete features' positions.
|
||||
#' @param ylab a y-axis label in 1D plots.
|
||||
#' @param plot_NA whether the contributions of cases with missing values should also be plotted.
|
||||
#' @param col_NA a color of marker for missing value contributions.
|
||||
#' @param pch_NA a marker type for NA values.
|
||||
#' @param pos_NA a relative position of the x-location where NA values are shown:
|
||||
#' \code{min(x) + (max(x) - min(x)) * pos_NA}.
|
||||
#' @param plot_loess whether to plot loess-smoothed curves. The smoothing is only done for features with
|
||||
#' more than 5 distinct values.
|
||||
#' @param col_loess a color to use for the loess curves.
|
||||
#' @param span_loess the \code{span} parameter in \code{\link[stats]{loess}}'s call.
|
||||
#' @param which whether to do univariate or bivariate plotting. NOTE: only 1D is implemented so far.
|
||||
#' @param plot whether a plot should be drawn. If FALSE, only a list of matrices is returned.
|
||||
#' @param ... other parameters passed to \code{plot}.
|
||||
#' @param data The data to explain as a `matrix`, `dgCMatrix`, or `data.frame`.
|
||||
#' @param shap_contrib Matrix of SHAP contributions of `data`.
|
||||
#' The default (`NULL`) computes it from `model` and `data`.
|
||||
#' @param features Vector of column indices or feature names to plot. When `NULL`
|
||||
#' (default), the `top_n` most important features are selected by [xgb.importance()].
|
||||
#' @param top_n How many of the most important features (<= 100) should be selected?
|
||||
#' By default 1 for SHAP dependence and 10 for SHAP summary.
|
||||
#' Only used when `features = NULL`.
|
||||
#' @param model An `xgb.Booster` model. Only required when `shap_contrib = NULL` or
|
||||
#' `features = NULL`.
|
||||
#' @param trees Passed to [xgb.importance()] when `features = NULL`.
|
||||
#' @param target_class Only relevant for multiclass models. The default (`NULL`)
|
||||
#' averages the SHAP values over all classes. Pass a (0-based) class index
|
||||
#' to show only SHAP values of that class.
|
||||
#' @param approxcontrib Passed to `predict()` when `shap_contrib = NULL`.
|
||||
#' @param subsample Fraction of data points randomly picked for plotting.
|
||||
#' The default (`NULL`) will use up to 100k data points.
|
||||
#' @param n_col Number of columns in a grid of plots.
|
||||
#' @param col Color of the scatterplot markers.
|
||||
#' @param pch Scatterplot marker.
|
||||
#' @param discrete_n_uniq Maximal number of unique feature values to consider the
|
||||
#' feature as discrete.
|
||||
#' @param discrete_jitter Jitter amount added to the values of discrete features.
|
||||
#' @param ylab The y-axis label in 1D plots.
|
||||
#' @param plot_NA Should contributions of cases with missing values be plotted?
|
||||
#' Default is `TRUE`.
|
||||
#' @param col_NA Color of marker for missing value contributions.
|
||||
#' @param pch_NA Marker type for `NA` values.
|
||||
#' @param pos_NA Relative position of the x-location where `NA` values are shown:
|
||||
#' `min(x) + (max(x) - min(x)) * pos_NA`.
|
||||
#' @param plot_loess Should loess-smoothed curves be plotted? (Default is `TRUE`).
|
||||
#' The smoothing is only done for features with more than 5 distinct values.
|
||||
#' @param col_loess Color of loess curves.
|
||||
#' @param span_loess The `span` parameter of [stats::loess()].
|
||||
#' @param which Whether to do univariate or bivariate plotting. Currently, only "1d" is implemented.
|
||||
#' @param plot Should the plot be drawn? (Default is `TRUE`).
|
||||
#' If `FALSE`, only a list of matrices is returned.
|
||||
#' @param ... Other parameters passed to [graphics::plot()].
|
||||
#'
|
||||
#' @details
|
||||
#'
|
||||
#' These scatterplots represent how SHAP feature contributions depend of feature values.
|
||||
#' The similarity to partial dependency plots is that they also give an idea for how feature values
|
||||
#' affect predictions. However, in partial dependency plots, we usually see marginal dependencies
|
||||
#' of model prediction on feature value, while SHAP contribution dependency plots display the estimated
|
||||
#' contributions of a feature to model prediction for each individual case.
|
||||
#' The similarity to partial dependence plots is that they also give an idea for how feature values
|
||||
#' affect predictions. However, in partial dependence plots, we see marginal dependencies
|
||||
#' of model prediction on feature value, while SHAP dependence plots display the estimated
|
||||
#' contributions of a feature to the prediction for each individual case.
|
||||
#'
|
||||
#' When \code{plot_loess = TRUE} is set, feature values are rounded to 3 significant digits and
|
||||
#' weighted LOESS is computed and plotted, where weights are the numbers of data points
|
||||
#' When `plot_loess = TRUE`, feature values are rounded to three significant digits and
|
||||
#' weighted LOESS is computed and plotted, where the weights are the numbers of data points
|
||||
#' at each rounded value.
|
||||
#'
|
||||
#' Note: SHAP contributions are shown on the scale of model margin. E.g., for a logistic binomial objective,
|
||||
#' the margin is prediction before a sigmoidal transform into probability-like values.
|
||||
#' Note: SHAP contributions are on the scale of the model margin.
|
||||
#' E.g., for a logistic binomial objective, the margin is on log-odds scale.
|
||||
#' Also, since SHAP stands for "SHapley Additive exPlanation" (model prediction = sum of SHAP
|
||||
#' contributions for all features + bias), depending on the objective used, transforming SHAP
|
||||
#' contributions for a feature from the marginal to the prediction space is not necessarily
|
||||
#' a meaningful thing to do.
|
||||
#'
|
||||
#' @return
|
||||
#'
|
||||
#' In addition to producing plots (when \code{plot=TRUE}), it silently returns a list of two matrices:
|
||||
#' \itemize{
|
||||
#' \item \code{data} the values of selected features;
|
||||
#' \item \code{shap_contrib} the contributions of selected features.
|
||||
#' }
|
||||
#' In addition to producing plots (when `plot = TRUE`), it silently returns a list of two matrices:
|
||||
#' - `data`: Feature value matrix.
|
||||
#' - `shap_contrib`: Corresponding SHAP value matrix.
|
||||
#'
|
||||
#' @references
|
||||
#'
|
||||
#' Scott M. Lundberg, Su-In Lee, "A Unified Approach to Interpreting Model Predictions", NIPS Proceedings 2017, \url{https://arxiv.org/abs/1705.07874}
|
||||
#'
|
||||
#' Scott M. Lundberg, Su-In Lee, "Consistent feature attribution for tree ensembles", \url{https://arxiv.org/abs/1706.06060}
|
||||
#' 1. Scott M. Lundberg, Su-In Lee, "A Unified Approach to Interpreting Model Predictions",
|
||||
#' NIPS Proceedings 2017, <https://arxiv.org/abs/1705.07874>
|
||||
#' 2. Scott M. Lundberg, Su-In Lee, "Consistent feature attribution for tree ensembles",
|
||||
#' <https://arxiv.org/abs/1706.06060>
|
||||
#'
|
||||
#' @examples
|
||||
#'
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' data(agaricus.test, package='xgboost')
|
||||
#' data(agaricus.train, package = "xgboost")
|
||||
#' data(agaricus.test, package = "xgboost")
|
||||
#'
|
||||
#' bst <- xgboost(agaricus.train$data, agaricus.train$label, nrounds = 50,
|
||||
#' eta = 0.1, max_depth = 3, subsample = .5,
|
||||
#' method = "hist", objective = "binary:logistic", nthread = 2, verbose = 0)
|
||||
#' ## Keep the number of threads to 1 for examples
|
||||
#' nthread <- 1
|
||||
#' data.table::setDTthreads(nthread)
|
||||
#' nrounds <- 20
|
||||
#'
|
||||
#' bst <- xgb.train(
|
||||
#' data = xgb.DMatrix(agaricus.train$data, agaricus.train$label),
|
||||
#' nrounds = nrounds,
|
||||
#' eta = 0.1,
|
||||
#' max_depth = 3,
|
||||
#' subsample = 0.5,
|
||||
#' objective = "binary:logistic",
|
||||
#' nthread = nthread,
|
||||
#' verbose = 0
|
||||
#' )
|
||||
#'
|
||||
#' xgb.plot.shap(agaricus.test$data, model = bst, features = "odor=none")
|
||||
#'
|
||||
#' contr <- predict(bst, agaricus.test$data, predcontrib = TRUE)
|
||||
#' xgb.plot.shap(agaricus.test$data, contr, model = bst, top_n = 12, n_col = 3)
|
||||
#' xgb.ggplot.shap.summary(agaricus.test$data, contr, model = bst, top_n = 12) # Summary plot
|
||||
#'
|
||||
#' # multiclass example - plots for each class separately:
|
||||
#' # Summary plot
|
||||
#' xgb.ggplot.shap.summary(agaricus.test$data, contr, model = bst, top_n = 12)
|
||||
#'
|
||||
#' # Multiclass example - plots for each class separately:
|
||||
#' nclass <- 3
|
||||
#' nrounds <- 20
|
||||
#' x <- as.matrix(iris[, -5])
|
||||
#' set.seed(123)
|
||||
#' is.na(x[sample(nrow(x) * 4, 30)]) <- TRUE # introduce some missing values
|
||||
#' mbst <- xgboost(data = x, label = as.numeric(iris$Species) - 1, nrounds = nrounds,
|
||||
#' max_depth = 2, eta = 0.3, subsample = .5, nthread = 2,
|
||||
#' objective = "multi:softprob", num_class = nclass, verbose = 0)
|
||||
#' trees0 <- seq(from=0, by=nclass, length.out=nrounds)
|
||||
#'
|
||||
#' mbst <- xgb.train(
|
||||
#' data = xgb.DMatrix(x, label = as.numeric(iris$Species) - 1),
|
||||
#' nrounds = nrounds,
|
||||
#' max_depth = 2,
|
||||
#' eta = 0.3,
|
||||
#' subsample = 0.5,
|
||||
#' nthread = nthread,
|
||||
#' objective = "multi:softprob",
|
||||
#' num_class = nclass,
|
||||
#' verbose = 0
|
||||
#' )
|
||||
#' trees0 <- seq(from = 0, by = nclass, length.out = nrounds)
|
||||
#' col <- rgb(0, 0, 1, 0.5)
|
||||
#' xgb.plot.shap(x, model = mbst, trees = trees0, target_class = 0, top_n = 4,
|
||||
#' n_col = 2, col = col, pch = 16, pch_NA = 17)
|
||||
#' xgb.plot.shap(x, model = mbst, trees = trees0 + 1, target_class = 1, top_n = 4,
|
||||
#' n_col = 2, col = col, pch = 16, pch_NA = 17)
|
||||
#' xgb.plot.shap(x, model = mbst, trees = trees0 + 2, target_class = 2, top_n = 4,
|
||||
#' n_col = 2, col = col, pch = 16, pch_NA = 17)
|
||||
#' xgb.ggplot.shap.summary(x, model = mbst, target_class = 0, top_n = 4) # Summary plot
|
||||
#'
|
||||
#' xgb.plot.shap(
|
||||
#' x,
|
||||
#' model = mbst,
|
||||
#' trees = trees0,
|
||||
#' target_class = 0,
|
||||
#' top_n = 4,
|
||||
#' n_col = 2,
|
||||
#' col = col,
|
||||
#' pch = 16,
|
||||
#' pch_NA = 17
|
||||
#' )
|
||||
#'
|
||||
#' xgb.plot.shap(
|
||||
#' x,
|
||||
#' model = mbst,
|
||||
#' trees = trees0 + 1,
|
||||
#' target_class = 1,
|
||||
#' top_n = 4,
|
||||
#' n_col = 2,
|
||||
#' col = col,
|
||||
#' pch = 16,
|
||||
#' pch_NA = 17
|
||||
#' )
|
||||
#'
|
||||
#' xgb.plot.shap(
|
||||
#' x,
|
||||
#' model = mbst,
|
||||
#' trees = trees0 + 2,
|
||||
#' target_class = 2,
|
||||
#' top_n = 4,
|
||||
#' n_col = 2,
|
||||
#' col = col,
|
||||
#' pch = 16,
|
||||
#' pch_NA = 17
|
||||
#' )
|
||||
#'
|
||||
#' # Summary plot
|
||||
#' xgb.ggplot.shap.summary(x, model = mbst, target_class = 0, top_n = 4)
|
||||
#'
|
||||
#' @rdname xgb.plot.shap
|
||||
#' @export
|
||||
@@ -143,7 +200,7 @@ xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
|
||||
y <- shap_contrib[, f][ord]
|
||||
x_lim <- range(x, na.rm = TRUE)
|
||||
y_lim <- range(y, na.rm = TRUE)
|
||||
do_na <- plot_NA && any(is.na(x))
|
||||
do_na <- plot_NA && anyNA(x)
|
||||
if (do_na) {
|
||||
x_range <- diff(x_lim)
|
||||
loc_na <- min(x, na.rm = TRUE) + x_range * pos_NA
|
||||
@@ -183,46 +240,56 @@ xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
|
||||
invisible(list(data = data, shap_contrib = shap_contrib))
|
||||
}
|
||||
|
||||
#' SHAP contribution dependency summary plot
|
||||
#' SHAP summary plot
|
||||
#'
|
||||
#' Compare SHAP contributions of different features.
|
||||
#' Visualizes SHAP contributions of different features.
|
||||
#'
|
||||
#' A point plot (each point representing one sample from \code{data}) is
|
||||
#' A point plot (each point representing one observation from `data`) is
|
||||
#' produced for each feature, with the points plotted on the SHAP value axis.
|
||||
#' Each point (observation) is coloured based on its feature value. The plot
|
||||
#' hence allows us to see which features have a negative / positive contribution
|
||||
#' Each point (observation) is coloured based on its feature value.
|
||||
#'
|
||||
#' The plot allows to see which features have a negative / positive contribution
|
||||
#' on the model prediction, and whether the contribution is different for larger
|
||||
#' or smaller values of the feature. We effectively try to replicate the
|
||||
#' \code{summary_plot} function from https://github.com/slundberg/shap.
|
||||
#' or smaller values of the feature. Inspired by the summary plot of
|
||||
#' <https://github.com/shap/shap>.
|
||||
#'
|
||||
#' @inheritParams xgb.plot.shap
|
||||
#'
|
||||
#' @return A \code{ggplot2} object.
|
||||
#' @return A `ggplot2` object.
|
||||
#' @export
|
||||
#'
|
||||
#' @examples # See \code{\link{xgb.plot.shap}}.
|
||||
#' @seealso \code{\link{xgb.plot.shap}}, \code{\link{xgb.ggplot.shap.summary}},
|
||||
#' \url{https://github.com/slundberg/shap}
|
||||
#' @examples
|
||||
#' # See examples in xgb.plot.shap()
|
||||
#'
|
||||
#' @seealso [xgb.plot.shap()], [xgb.ggplot.shap.summary()],
|
||||
#' and the Python library <https://github.com/shap/shap>.
|
||||
xgb.plot.shap.summary <- function(data, shap_contrib = NULL, features = NULL, top_n = 10, model = NULL,
|
||||
trees = NULL, target_class = NULL, approxcontrib = FALSE, subsample = NULL) {
|
||||
# Only ggplot implementation is available.
|
||||
xgb.ggplot.shap.summary(data, shap_contrib, features, top_n, model, trees, target_class, approxcontrib, subsample)
|
||||
}
|
||||
|
||||
#' Prepare data for SHAP plots. To be used in xgb.plot.shap, xgb.plot.shap.summary, etc.
|
||||
#' Internal utility function.
|
||||
#' Prepare data for SHAP plots
|
||||
#'
|
||||
#' Internal function used in [xgb.plot.shap()], [xgb.plot.shap.summary()], etc.
|
||||
#'
|
||||
#' @inheritParams xgb.plot.shap
|
||||
#' @param max_observations Maximum number of observations to consider.
|
||||
#' @keywords internal
|
||||
#' @noRd
|
||||
#'
|
||||
#' @return A list containing: 'data', a matrix containing sample observations
|
||||
#' and their feature values; 'shap_contrib', a matrix containing the SHAP contribution
|
||||
#' values for these observations.
|
||||
#' @return
|
||||
#' A list containing:
|
||||
#' - `data`: The matrix of feature values.
|
||||
#' - `shap_contrib`: The matrix with corresponding SHAP values.
|
||||
xgb.shap.data <- function(data, shap_contrib = NULL, features = NULL, top_n = 1, model = NULL,
|
||||
trees = NULL, target_class = NULL, approxcontrib = FALSE,
|
||||
subsample = NULL, max_observations = 100000) {
|
||||
if (!is.matrix(data) && !inherits(data, "dgCMatrix"))
|
||||
stop("data: must be either matrix or dgCMatrix")
|
||||
if (!inherits(data, c("matrix", "dsparseMatrix", "data.frame")))
|
||||
stop("data: must be matrix, sparse matrix, or data.frame.")
|
||||
if (inherits(data, "data.frame") && length(class(data)) > 1L) {
|
||||
data <- as.data.frame(data)
|
||||
}
|
||||
|
||||
if (is.null(shap_contrib) && (is.null(model) || !inherits(model, "xgb.Booster")))
|
||||
stop("when shap_contrib is not provided, one must provide an xgb.Booster model")
|
||||
@@ -230,18 +297,31 @@ xgb.shap.data <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
|
||||
if (is.null(features) && (is.null(model) || !inherits(model, "xgb.Booster")))
|
||||
stop("when features are not provided, one must provide an xgb.Booster model to rank the features")
|
||||
|
||||
last_dim <- function(v) dim(v)[length(dim(v))]
|
||||
|
||||
if (!is.null(shap_contrib) &&
|
||||
(!is.matrix(shap_contrib) || nrow(shap_contrib) != nrow(data) || ncol(shap_contrib) != ncol(data) + 1))
|
||||
(!is.array(shap_contrib) || nrow(shap_contrib) != nrow(data) || last_dim(shap_contrib) != ncol(data) + 1))
|
||||
stop("shap_contrib is not compatible with the provided data")
|
||||
|
||||
if (is.character(features) && is.null(colnames(data)))
|
||||
stop("either provide `data` with column names or provide `features` as column indices")
|
||||
|
||||
if (is.null(model$feature_names) && model$nfeatures != ncol(data))
|
||||
model_feature_names <- NULL
|
||||
if (is.null(features) && !is.null(model)) {
|
||||
model_feature_names <- xgb.feature_names(model)
|
||||
}
|
||||
if (is.null(model_feature_names) && xgb.num_feature(model) != ncol(data))
|
||||
stop("if model has no feature_names, columns in `data` must match features in model")
|
||||
|
||||
if (!is.null(subsample)) {
|
||||
idx <- sample(x = seq_len(nrow(data)), size = as.integer(subsample * nrow(data)), replace = FALSE)
|
||||
if (subsample <= 0 || subsample >= 1) {
|
||||
stop("'subsample' must be a number between zero and one (non-inclusive).")
|
||||
}
|
||||
sample_size <- as.integer(subsample * nrow(data))
|
||||
if (sample_size < 2) {
|
||||
stop("Sampling fraction involves less than 2 rows.")
|
||||
}
|
||||
idx <- sample(x = seq_len(nrow(data)), size = sample_size, replace = FALSE)
|
||||
} else {
|
||||
idx <- seq_len(min(nrow(data), max_observations))
|
||||
}
|
||||
@@ -250,30 +330,50 @@ xgb.shap.data <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
|
||||
colnames(data) <- paste0("X", seq_len(ncol(data)))
|
||||
}
|
||||
|
||||
if (!is.null(shap_contrib)) {
|
||||
if (is.list(shap_contrib)) { # multiclass: either choose a class or merge
|
||||
shap_contrib <- if (!is.null(target_class)) shap_contrib[[target_class + 1]] else Reduce("+", lapply(shap_contrib, abs))
|
||||
}
|
||||
shap_contrib <- shap_contrib[idx, ]
|
||||
if (is.null(colnames(shap_contrib))) {
|
||||
colnames(shap_contrib) <- paste0("X", seq_len(ncol(data)))
|
||||
}
|
||||
} else {
|
||||
shap_contrib <- predict(model, newdata = data, predcontrib = TRUE, approxcontrib = approxcontrib)
|
||||
if (is.list(shap_contrib)) { # multiclass: either choose a class or merge
|
||||
shap_contrib <- if (!is.null(target_class)) shap_contrib[[target_class + 1]] else Reduce("+", lapply(shap_contrib, abs))
|
||||
reshape_3d_shap_contrib <- function(shap_contrib, target_class) {
|
||||
# multiclass: either choose a class or merge
|
||||
if (is.list(shap_contrib)) {
|
||||
if (!is.null(target_class)) {
|
||||
shap_contrib <- shap_contrib[[target_class + 1]]
|
||||
} else {
|
||||
shap_contrib <- Reduce("+", lapply(shap_contrib, abs))
|
||||
}
|
||||
} else if (length(dim(shap_contrib)) > 2) {
|
||||
if (!is.null(target_class)) {
|
||||
orig_shape <- dim(shap_contrib)
|
||||
shap_contrib <- shap_contrib[, target_class + 1, , drop = TRUE]
|
||||
if (!is.matrix(shap_contrib)) {
|
||||
shap_contrib <- matrix(shap_contrib, orig_shape[c(1L, 3L)])
|
||||
}
|
||||
} else {
|
||||
shap_contrib <- apply(abs(shap_contrib), c(1L, 3L), sum)
|
||||
}
|
||||
}
|
||||
return(shap_contrib)
|
||||
}
|
||||
|
||||
if (is.null(shap_contrib)) {
|
||||
shap_contrib <- predict(
|
||||
model,
|
||||
newdata = data,
|
||||
predcontrib = TRUE,
|
||||
approxcontrib = approxcontrib
|
||||
)
|
||||
}
|
||||
shap_contrib <- reshape_3d_shap_contrib(shap_contrib, target_class)
|
||||
if (is.null(colnames(shap_contrib))) {
|
||||
colnames(shap_contrib) <- paste0("X", seq_len(ncol(data)))
|
||||
}
|
||||
|
||||
if (is.null(features)) {
|
||||
if (!is.null(model$feature_names)) {
|
||||
if (!is.null(model_feature_names)) {
|
||||
imp <- xgb.importance(model = model, trees = trees)
|
||||
} else {
|
||||
imp <- xgb.importance(model = model, trees = trees, feature_names = colnames(data))
|
||||
}
|
||||
top_n <- top_n[1]
|
||||
if (top_n < 1 | top_n > 100) stop("top_n: must be an integer within [1, 100]")
|
||||
features <- imp$Feature[1:min(top_n, NROW(imp))]
|
||||
if (top_n < 1 || top_n > 100) stop("top_n: must be an integer within [1, 100]")
|
||||
features <- imp$Feature[seq_len(min(top_n, NROW(imp)))]
|
||||
}
|
||||
if (is.character(features)) {
|
||||
features <- match(features, colnames(data))
|
||||
|
||||
@@ -1,74 +1,104 @@
|
||||
#' Plot a boosted tree model
|
||||
#' Plot boosted trees
|
||||
#'
|
||||
#' Read a tree model text dump and plot the model.
|
||||
#'
|
||||
#' @param feature_names names of each feature as a \code{character} vector.
|
||||
#' @param model produced by the \code{xgb.train} function.
|
||||
#' @param trees an integer vector of tree indices that should be visualized.
|
||||
#' If set to \code{NULL}, all trees of the model are included.
|
||||
#' IMPORTANT: the tree index in xgboost model is zero-based
|
||||
#' (e.g., use \code{trees = 0:2} for the first 3 trees in a model).
|
||||
#' @param plot_width the width of the diagram in pixels.
|
||||
#' @param plot_height the height of the diagram in pixels.
|
||||
#' @param render a logical flag for whether the graph should be rendered (see Value).
|
||||
#' @param show_node_id a logical flag for whether to show node id's in the graph.
|
||||
#' @param ... currently not used.
|
||||
#'
|
||||
#' @details
|
||||
#' When using `style="xgboost"`, the content of each node is visualized as follows:
|
||||
#' - For non-terminal nodes, it will display the split condition (number or name if
|
||||
#' available, and the condition that would decide to which node to go next).
|
||||
#' - Those nodes will be connected to their children by arrows that indicate whether the
|
||||
#' branch corresponds to the condition being met or not being met.
|
||||
#' - Terminal (leaf) nodes contain the margin to add when ending there.
|
||||
#'
|
||||
#' The content of each node is organised that way:
|
||||
#'
|
||||
#' \itemize{
|
||||
#' \item Feature name.
|
||||
#' \item \code{Cover}: The sum of second order gradient of training data classified to the leaf.
|
||||
#' If it is square loss, this simply corresponds to the number of instances seen by a split
|
||||
#' or collected by a leaf during training.
|
||||
#' The deeper in the tree a node is, the lower this metric will be.
|
||||
#' \item \code{Gain} (for split nodes): the information gain metric of a split
|
||||
#' When using `style="R"`, the content of each node is visualized like this:
|
||||
#' - *Feature name*.
|
||||
#' - *Cover:* The sum of second order gradients of training data.
|
||||
#' For the squared loss, this simply corresponds to the number of instances in the node.
|
||||
#' The deeper in the tree, the lower the value.
|
||||
#' - *Gain* (for split nodes): Information gain metric of a split
|
||||
#' (corresponds to the importance of the node in the model).
|
||||
#' \item \code{Value} (for leafs): the margin value that the leaf may contribute to prediction.
|
||||
#' }
|
||||
#' The tree root nodes also indicate the Tree index (0-based).
|
||||
#' - *Value* (for leaves): Margin value that the leaf may contribute to the prediction.
|
||||
#'
|
||||
#' The tree root nodes also indicate the tree index (0-based).
|
||||
#'
|
||||
#' The "Yes" branches are marked by the "< split_value" label.
|
||||
#' The branches that also used for missing values are marked as bold
|
||||
#' The branches also used for missing values are marked as bold
|
||||
#' (as in "carrying extra capacity").
|
||||
#'
|
||||
#' This function uses \href{http://www.graphviz.org/}{GraphViz} as a backend of DiagrammeR.
|
||||
#' This function uses [GraphViz](https://www.graphviz.org/) as DiagrammeR backend.
|
||||
#'
|
||||
#' @param model Object of class `xgb.Booster`. If it contains feature names (they can be set through
|
||||
#' [setinfo()], they will be used in the output from this function.
|
||||
#' @param trees An integer vector of tree indices that should be used.
|
||||
#' The default (`NULL`) uses all trees.
|
||||
#' Useful, e.g., in multiclass classification to get only
|
||||
#' the trees of one class. *Important*: the tree index in XGBoost models
|
||||
#' is zero-based (e.g., use `trees = 0:2` for the first three trees).
|
||||
#' @param plot_width,plot_height Width and height of the graph in pixels.
|
||||
#' The values are passed to `DiagrammeR::render_graph()`.
|
||||
#' @param render Should the graph be rendered or not? The default is `TRUE`.
|
||||
#' @param show_node_id a logical flag for whether to show node id's in the graph.
|
||||
#' @param style Style to use for the plot:
|
||||
#' - `"xgboost"`: will use the plot style defined in the core XGBoost library,
|
||||
#' which is shared between different interfaces through the 'dot' format. This
|
||||
#' style was not available before version 2.1.0 in R. It always plots the trees
|
||||
#' vertically (from top to bottom).
|
||||
#' - `"R"`: will use the style defined from XGBoost's R interface, which predates
|
||||
#' the introducition of the standardized style from the core library. It might plot
|
||||
#' the trees horizontally (from left to right).
|
||||
#'
|
||||
#' Note that `style="xgboost"` is only supported when all of the following conditions are met:
|
||||
#' - Only a single tree is being plotted.
|
||||
#' - Node IDs are not added to the graph.
|
||||
#' - The graph is being returned as `htmlwidget` (`render=TRUE`).
|
||||
#' @param ... Currently not used.
|
||||
#' @return
|
||||
#'
|
||||
#' When \code{render = TRUE}:
|
||||
#' returns a rendered graph object which is an \code{htmlwidget} of class \code{grViz}.
|
||||
#' Similar to ggplot objects, it needs to be printed to see it when not running from command line.
|
||||
#'
|
||||
#' When \code{render = FALSE}:
|
||||
#' silently returns a graph object which is of DiagrammeR's class \code{dgr_graph}.
|
||||
#' This could be useful if one wants to modify some of the graph attributes
|
||||
#' before rendering the graph with \code{\link[DiagrammeR]{render_graph}}.
|
||||
#' The value depends on the `render` parameter:
|
||||
#' - If `render = TRUE` (default): Rendered graph object which is an htmlwidget of
|
||||
#' class `grViz`. Similar to "ggplot" objects, it needs to be printed when not
|
||||
#' running from the command line.
|
||||
#' - If `render = FALSE`: Graph object which is of DiagrammeR's class `dgr_graph`.
|
||||
#' This could be useful if one wants to modify some of the graph attributes
|
||||
#' before rendering the graph with `DiagrammeR::render_graph()`.
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' data(agaricus.train, package = "xgboost")
|
||||
#'
|
||||
#' bst <- xgb.train(
|
||||
#' data = xgb.DMatrix(agaricus.train$data, agaricus.train$label),
|
||||
#' max_depth = 3,
|
||||
#' eta = 1,
|
||||
#' nthread = 2,
|
||||
#' nrounds = 2,
|
||||
#' objective = "binary:logistic"
|
||||
#' )
|
||||
#'
|
||||
#' # plot the first tree, using the style from xgboost's core library
|
||||
#' # (this plot should look identical to the ones generated from other
|
||||
#' # interfaces like the python package for xgboost)
|
||||
#' xgb.plot.tree(model = bst, trees = 1, style = "xgboost")
|
||||
#'
|
||||
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 3,
|
||||
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||
#' # plot all the trees
|
||||
#' xgb.plot.tree(model = bst)
|
||||
#' xgb.plot.tree(model = bst, trees = NULL)
|
||||
#'
|
||||
#' # plot only the first tree and display the node ID:
|
||||
#' xgb.plot.tree(model = bst, trees = 0, show_node_id = TRUE)
|
||||
#'
|
||||
#' \dontrun{
|
||||
#' # Below is an example of how to save this plot to a file.
|
||||
#' # Note that for `export_graph` to work, the DiagrammeRsvg and rsvg packages must also be installed.
|
||||
#' # Note that for export_graph() to work, the {DiagrammeRsvg}
|
||||
#' # and {rsvg} packages must also be installed.
|
||||
#'
|
||||
#' library(DiagrammeR)
|
||||
#' gr <- xgb.plot.tree(model=bst, trees=0:1, render=FALSE)
|
||||
#' export_graph(gr, 'tree.pdf', width=1500, height=1900)
|
||||
#' export_graph(gr, 'tree.png', width=1500, height=1900)
|
||||
#'
|
||||
#' gr <- xgb.plot.tree(model = bst, trees = 0:1, render = FALSE)
|
||||
#' export_graph(gr, "tree.pdf", width = 1500, height = 1900)
|
||||
#' export_graph(gr, "tree.png", width = 1500, height = 1900)
|
||||
#' }
|
||||
#'
|
||||
#' @export
|
||||
xgb.plot.tree <- function(feature_names = NULL, model = NULL, trees = NULL, plot_width = NULL, plot_height = NULL,
|
||||
render = TRUE, show_node_id = FALSE, ...){
|
||||
xgb.plot.tree <- function(model = NULL, trees = NULL, plot_width = NULL, plot_height = NULL,
|
||||
render = TRUE, show_node_id = FALSE, style = c("R", "xgboost"), ...) {
|
||||
check.deprecation(...)
|
||||
if (!inherits(model, "xgb.Booster")) {
|
||||
stop("model: Has to be an object of class xgb.Booster")
|
||||
@@ -78,9 +108,20 @@ xgb.plot.tree <- function(feature_names = NULL, model = NULL, trees = NULL, plot
|
||||
stop("DiagrammeR package is required for xgb.plot.tree", call. = FALSE)
|
||||
}
|
||||
|
||||
dt <- xgb.model.dt.tree(feature_names = feature_names, model = model, trees = trees)
|
||||
style <- as.character(head(style, 1L))
|
||||
stopifnot(style %in% c("R", "xgboost"))
|
||||
if (style == "xgboost") {
|
||||
if (NROW(trees) != 1L || !render || show_node_id) {
|
||||
stop("style='xgboost' is only supported for single, rendered tree, without node IDs.")
|
||||
}
|
||||
|
||||
dt[, label := paste0(Feature, "\nCover: ", Cover, ifelse(Feature == "Leaf", "\nValue: ", "\nGain: "), Quality)]
|
||||
txt <- xgb.dump(model, dump_format = "dot")
|
||||
return(DiagrammeR::grViz(txt[[trees + 1]], width = plot_width, height = plot_height))
|
||||
}
|
||||
|
||||
dt <- xgb.model.dt.tree(model = model, trees = trees)
|
||||
|
||||
dt[, label := paste0(Feature, "\nCover: ", Cover, ifelse(Feature == "Leaf", "\nValue: ", "\nGain: "), Gain)]
|
||||
if (show_node_id)
|
||||
dt[, label := paste0(ID, ": ", label)]
|
||||
dt[Node == 0, label := paste0("Tree ", Tree, "\n", label)]
|
||||
@@ -147,4 +188,4 @@ xgb.plot.tree <- function(feature_names = NULL, model = NULL, trees = NULL, plot
|
||||
# Avoid error messages during CRAN check.
|
||||
# The reason is that these variables are never declared
|
||||
# They are mainly column names inferred by Data.table...
|
||||
globalVariables(c("Feature", "ID", "Cover", "Quality", "Split", "Yes", "No", "Missing", ".", "shape", "filledcolor", "label"))
|
||||
globalVariables(c("Feature", "ID", "Cover", "Gain", "Split", "Yes", "No", "Missing", ".", "shape", "filledcolor", "label"))
|
||||
|
||||
@@ -1,38 +1,59 @@
|
||||
#' Save xgboost model to binary file
|
||||
#' Save XGBoost model to binary file
|
||||
#'
|
||||
#' Save xgboost model to a file in binary format.
|
||||
#' Save XGBoost model to a file in binary or JSON format.
|
||||
#'
|
||||
#' @param model model object of \code{xgb.Booster} class.
|
||||
#' @param fname name of the file to write.
|
||||
#' @param model Model object of `xgb.Booster` class.
|
||||
#' @param fname Name of the file to write. Its extension determines the serialization format:
|
||||
#' - ".ubj": Use the universal binary JSON format (recommended).
|
||||
#' This format uses binary types for e.g. floating point numbers, thereby preventing any loss
|
||||
#' of precision when converting to a human-readable JSON text or similar.
|
||||
#' - ".json": Use plain JSON, which is a human-readable format.
|
||||
#' - ".deprecated": Use **deprecated** binary format. This format will
|
||||
#' not be able to save attributes introduced after v1 of XGBoost, such as the "best_iteration"
|
||||
#' attribute that boosters might keep, nor feature names or user-specifiec attributes.
|
||||
#' - If the format is not specified by passing one of the file extensions above, will
|
||||
#' default to UBJ.
|
||||
#'
|
||||
#' @details
|
||||
#' This methods allows to save a model in an xgboost-internal binary format which is universal
|
||||
#' among the various xgboost interfaces. In R, the saved model file could be read-in later
|
||||
#' using either the \code{\link{xgb.load}} function or the \code{xgb_model} parameter
|
||||
#' of \code{\link{xgb.train}}.
|
||||
#'
|
||||
#' Note: a model can also be saved as an R-object (e.g., by using \code{\link[base]{readRDS}}
|
||||
#' or \code{\link[base]{save}}). However, it would then only be compatible with R, and
|
||||
#' corresponding R-methods would need to be used to load it. Moreover, persisting the model with
|
||||
#' \code{\link[base]{readRDS}} or \code{\link[base]{save}}) will cause compatibility problems in
|
||||
#' future versions of XGBoost. Consult \code{\link{a-compatibility-note-for-saveRDS-save}} to learn
|
||||
#' how to persist models in a future-proof way, i.e. to make the model accessible in future
|
||||
#' This methods allows to save a model in an XGBoost-internal binary or text format which is universal
|
||||
#' among the various xgboost interfaces. In R, the saved model file could be read later
|
||||
#' using either the [xgb.load()] function or the `xgb_model` parameter of [xgb.train()].
|
||||
#'
|
||||
#' Note: a model can also be saved as an R object (e.g., by using [readRDS()]
|
||||
#' or [save()]). However, it would then only be compatible with R, and
|
||||
#' corresponding R methods would need to be used to load it. Moreover, persisting the model with
|
||||
#' [readRDS()] or [save()] might cause compatibility problems in
|
||||
#' future versions of XGBoost. Consult [a-compatibility-note-for-saveRDS-save] to learn
|
||||
#' how to persist models in a future-proof way, i.e., to make the model accessible in future
|
||||
#' releases of XGBoost.
|
||||
#'
|
||||
#' @seealso
|
||||
#' \code{\link{xgb.load}}, \code{\link{xgb.Booster.complete}}.
|
||||
#' @seealso [xgb.load()]
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' data(agaricus.test, package='xgboost')
|
||||
#' \dontshow{RhpcBLASctl::omp_set_num_threads(1)}
|
||||
#' data(agaricus.train, package = "xgboost")
|
||||
#' data(agaricus.test, package = "xgboost")
|
||||
#'
|
||||
#' ## Keep the number of threads to 1 for examples
|
||||
#' nthread <- 1
|
||||
#' data.table::setDTthreads(nthread)
|
||||
#'
|
||||
#' train <- agaricus.train
|
||||
#' test <- agaricus.test
|
||||
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||
#' xgb.save(bst, 'xgb.model')
|
||||
#' bst <- xgb.load('xgb.model')
|
||||
#' if (file.exists('xgb.model')) file.remove('xgb.model')
|
||||
#' pred <- predict(bst, test$data)
|
||||
#'
|
||||
#' bst <- xgb.train(
|
||||
#' data = xgb.DMatrix(train$data, label = train$label),
|
||||
#' max_depth = 2,
|
||||
#' eta = 1,
|
||||
#' nthread = nthread,
|
||||
#' nrounds = 2,
|
||||
#' objective = "binary:logistic"
|
||||
#' )
|
||||
#'
|
||||
#' fname <- file.path(tempdir(), "xgb.ubj")
|
||||
#' xgb.save(bst, fname)
|
||||
#' bst <- xgb.load(fname)
|
||||
#' @export
|
||||
xgb.save <- function(model, fname) {
|
||||
if (typeof(fname) != "character")
|
||||
@@ -41,8 +62,7 @@ xgb.save <- function(model, fname) {
|
||||
stop("model must be xgb.Booster.",
|
||||
if (inherits(model, "xgb.DMatrix")) " Use xgb.DMatrix.save to save an xgb.DMatrix object." else "")
|
||||
}
|
||||
model <- xgb.Booster.complete(model, saveraw = FALSE)
|
||||
fname <- path.expand(fname)
|
||||
.Call(XGBoosterSaveModel_R, model$handle, fname[1])
|
||||
.Call(XGBoosterSaveModel_R, xgb.get.handle(model), enc2utf8(fname[1]))
|
||||
return(TRUE)
|
||||
}
|
||||
|
||||
@@ -1,31 +1,40 @@
|
||||
#' Save xgboost model to R's raw vector,
|
||||
#' user can call xgb.load.raw to load the model back from raw vector
|
||||
#' Save XGBoost model to R's raw vector
|
||||
#'
|
||||
#' Save xgboost model from xgboost or xgb.train
|
||||
#' Save XGBoost model from [xgboost()] or [xgb.train()].
|
||||
#' Call [xgb.load.raw()] to load the model back from raw vector.
|
||||
#'
|
||||
#' @param model the model object.
|
||||
#' @param raw_format The format for encoding the booster. Available options are
|
||||
#' \itemize{
|
||||
#' \item \code{json}: Encode the booster into JSON text document.
|
||||
#' \item \code{ubj}: Encode the booster into Universal Binary JSON.
|
||||
#' \item \code{deprecated}: Encode the booster into old customized binary format.
|
||||
#' }
|
||||
#'
|
||||
#' Right now the default is \code{deprecated} but will be changed to \code{ubj} in upcoming release.
|
||||
#' @param model The model object.
|
||||
#' @param raw_format The format for encoding the booster:
|
||||
#' - "json": Encode the booster into JSON text document.
|
||||
#' - "ubj": Encode the booster into Universal Binary JSON.
|
||||
#' - "deprecated": Encode the booster into old customized binary format.
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' data(agaricus.test, package='xgboost')
|
||||
#' \dontshow{RhpcBLASctl::omp_set_num_threads(1)}
|
||||
#' data(agaricus.train, package = "xgboost")
|
||||
#' data(agaricus.test, package = "xgboost")
|
||||
#'
|
||||
#' ## Keep the number of threads to 1 for examples
|
||||
#' nthread <- 1
|
||||
#' data.table::setDTthreads(nthread)
|
||||
#'
|
||||
#' train <- agaricus.train
|
||||
#' test <- agaricus.test
|
||||
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||
#'
|
||||
#' bst <- xgb.train(
|
||||
#' data = xgb.DMatrix(train$data, label = train$label),
|
||||
#' max_depth = 2,
|
||||
#' eta = 1,
|
||||
#' nthread = nthread,
|
||||
#' nrounds = 2,
|
||||
#' objective = "binary:logistic"
|
||||
#' )
|
||||
#'
|
||||
#' raw <- xgb.save.raw(bst)
|
||||
#' bst <- xgb.load.raw(raw)
|
||||
#' pred <- predict(bst, test$data)
|
||||
#'
|
||||
#' @export
|
||||
xgb.save.raw <- function(model, raw_format = "deprecated") {
|
||||
xgb.save.raw <- function(model, raw_format = "ubj") {
|
||||
handle <- xgb.get.handle(model)
|
||||
args <- list(format = raw_format)
|
||||
.Call(XGBoosterSaveModelToRaw_R, handle, jsonlite::toJSON(args, auto_unbox = TRUE))
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
#' Serialize the booster instance into R's raw vector. The serialization method differs
|
||||
#' from \code{\link{xgb.save.raw}} as the latter one saves only the model but not
|
||||
#' parameters. This serialization format is not stable across different xgboost versions.
|
||||
#'
|
||||
#' @param booster the booster instance
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' data(agaricus.test, package='xgboost')
|
||||
#' train <- agaricus.train
|
||||
#' test <- agaricus.test
|
||||
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||
#' raw <- xgb.serialize(bst)
|
||||
#' bst <- xgb.unserialize(raw)
|
||||
#'
|
||||
#' @export
|
||||
xgb.serialize <- function(booster) {
|
||||
handle <- xgb.get.handle(booster)
|
||||
.Call(XGBoosterSerializeToBuffer_R, handle)
|
||||
}
|
||||
@@ -1,208 +1,268 @@
|
||||
#' eXtreme Gradient Boosting Training
|
||||
#'
|
||||
#' \code{xgb.train} is an advanced interface for training an xgboost model.
|
||||
#' The \code{xgboost} function is a simpler wrapper for \code{xgb.train}.
|
||||
#' `xgb.train()` is an advanced interface for training an xgboost model.
|
||||
#' The [xgboost()] function is a simpler wrapper for `xgb.train()`.
|
||||
#'
|
||||
#' @param params the list of parameters. The complete list of parameters is
|
||||
#' available in the \href{http://xgboost.readthedocs.io/en/latest/parameter.html}{online documentation}. Below
|
||||
#' is a shorter summary:
|
||||
#' available in the [online documentation](http://xgboost.readthedocs.io/en/latest/parameter.html).
|
||||
#' Below is a shorter summary:
|
||||
#'
|
||||
#' 1. General Parameters
|
||||
#' **1. General Parameters**
|
||||
#'
|
||||
#' \itemize{
|
||||
#' \item \code{booster} which booster to use, can be \code{gbtree} or \code{gblinear}. Default: \code{gbtree}.
|
||||
#' }
|
||||
#' - `booster`: Which booster to use, can be `gbtree` or `gblinear`. Default: `gbtree`.
|
||||
#'
|
||||
#' 2. Booster Parameters
|
||||
#' **2. Booster Parameters**
|
||||
#'
|
||||
#' 2.1. Parameters for Tree Booster
|
||||
#' **2.1. Parameters for Tree Booster**
|
||||
#' - `eta`: The learning rate: scale the contribution of each tree by a factor of `0 < eta < 1`
|
||||
#' when it is added to the current approximation.
|
||||
#' Used to prevent overfitting by making the boosting process more conservative.
|
||||
#' Lower value for `eta` implies larger value for `nrounds`: low `eta` value means model
|
||||
#' more robust to overfitting but slower to compute. Default: 0.3.
|
||||
#' - `gamma`: Minimum loss reduction required to make a further partition on a leaf node of the tree.
|
||||
#' the larger, the more conservative the algorithm will be.
|
||||
#' - `max_depth`: Maximum depth of a tree. Default: 6.
|
||||
#' - `min_child_weight`: Minimum sum of instance weight (hessian) needed in a child.
|
||||
#' If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight,
|
||||
#' then the building process will give up further partitioning.
|
||||
#' In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node.
|
||||
#' The larger, the more conservative the algorithm will be. Default: 1.
|
||||
#' - `subsample`: Subsample ratio of the training instance.
|
||||
#' Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees
|
||||
#' and this will prevent overfitting. It makes computation shorter (because less data to analyse).
|
||||
#' It is advised to use this parameter with `eta` and increase `nrounds`. Default: 1.
|
||||
#' - `colsample_bytree`: Subsample ratio of columns when constructing each tree. Default: 1.
|
||||
#' - `lambda`: L2 regularization term on weights. Default: 1.
|
||||
#' - `alpha`: L1 regularization term on weights. (there is no L1 reg on bias because it is not important). Default: 0.
|
||||
#' - `num_parallel_tree`: Experimental parameter. number of trees to grow per round.
|
||||
#' Useful to test Random Forest through XGBoost.
|
||||
#' (set `colsample_bytree < 1`, `subsample < 1` and `round = 1`) accordingly.
|
||||
#' Default: 1.
|
||||
#' - `monotone_constraints`: A numerical vector consists of `1`, `0` and `-1` with its length
|
||||
#' equals to the number of features in the training data.
|
||||
#' `1` is increasing, `-1` is decreasing and `0` is no constraint.
|
||||
#' - `interaction_constraints`: A list of vectors specifying feature indices of permitted interactions.
|
||||
#' Each item of the list represents one permitted interaction where specified features are allowed to interact with each other.
|
||||
#' Feature index values should start from `0` (`0` references the first column).
|
||||
#' Leave argument unspecified for no interaction constraints.
|
||||
#'
|
||||
#' \itemize{
|
||||
#' \item \code{eta} control the learning rate: scale the contribution of each tree by a factor of \code{0 < eta < 1} when it is added to the current approximation. Used to prevent overfitting by making the boosting process more conservative. Lower value for \code{eta} implies larger value for \code{nrounds}: low \code{eta} value means model more robust to overfitting but slower to compute. Default: 0.3
|
||||
#' \item \code{gamma} minimum loss reduction required to make a further partition on a leaf node of the tree. the larger, the more conservative the algorithm will be.
|
||||
#' \item \code{max_depth} maximum depth of a tree. Default: 6
|
||||
#' \item \code{min_child_weight} minimum sum of instance weight (hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, then the building process will give up further partitioning. In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node. The larger, the more conservative the algorithm will be. Default: 1
|
||||
#' \item \code{subsample} subsample ratio of the training instance. Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees and this will prevent overfitting. It makes computation shorter (because less data to analyse). It is advised to use this parameter with \code{eta} and increase \code{nrounds}. Default: 1
|
||||
#' \item \code{colsample_bytree} subsample ratio of columns when constructing each tree. Default: 1
|
||||
#' \item \code{lambda} L2 regularization term on weights. Default: 1
|
||||
#' \item \code{alpha} L1 regularization term on weights. (there is no L1 reg on bias because it is not important). Default: 0
|
||||
#' \item \code{num_parallel_tree} Experimental parameter. number of trees to grow per round. Useful to test Random Forest through XGBoost (set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly. Default: 1
|
||||
#' \item \code{monotone_constraints} A numerical vector consists of \code{1}, \code{0} and \code{-1} with its length equals to the number of features in the training data. \code{1} is increasing, \code{-1} is decreasing and \code{0} is no constraint.
|
||||
#' \item \code{interaction_constraints} A list of vectors specifying feature indices of permitted interactions. Each item of the list represents one permitted interaction where specified features are allowed to interact with each other. Feature index values should start from \code{0} (\code{0} references the first column). Leave argument unspecified for no interaction constraints.
|
||||
#' }
|
||||
#' **2.2. Parameters for Linear Booster**
|
||||
#'
|
||||
#' 2.2. Parameters for Linear Booster
|
||||
#' - `lambda`: L2 regularization term on weights. Default: 0.
|
||||
#' - `lambda_bias`: L2 regularization term on bias. Default: 0.
|
||||
#' - `alpha`: L1 regularization term on weights. (there is no L1 reg on bias because it is not important). Default: 0.
|
||||
#'
|
||||
#' \itemize{
|
||||
#' \item \code{lambda} L2 regularization term on weights. Default: 0
|
||||
#' \item \code{lambda_bias} L2 regularization term on bias. Default: 0
|
||||
#' \item \code{alpha} L1 regularization term on weights. (there is no L1 reg on bias because it is not important). Default: 0
|
||||
#' }
|
||||
#' **3. Task Parameters**
|
||||
#'
|
||||
#' 3. Task Parameters
|
||||
#' - `objective`: Specifies the learning task and the corresponding learning objective.
|
||||
#' users can pass a self-defined function to it. The default objective options are below:
|
||||
#' - `reg:squarederror`: Regression with squared loss (default).
|
||||
#' - `reg:squaredlogerror`: Regression with squared log loss \eqn{1/2 \cdot (\log(pred + 1) - \log(label + 1))^2}.
|
||||
#' All inputs are required to be greater than -1.
|
||||
#' Also, see metric rmsle for possible issue with this objective.
|
||||
#' - `reg:logistic`: Logistic regression.
|
||||
#' - `reg:pseudohubererror`: Regression with Pseudo Huber loss, a twice differentiable alternative to absolute loss.
|
||||
#' - `binary:logistic`: Logistic regression for binary classification. Output probability.
|
||||
#' - `binary:logitraw`: Logistic regression for binary classification, output score before logistic transformation.
|
||||
#' - `binary:hinge`: Hinge loss for binary classification. This makes predictions of 0 or 1, rather than producing probabilities.
|
||||
#' - `count:poisson`: Poisson regression for count data, output mean of Poisson distribution.
|
||||
#' The parameter `max_delta_step` is set to 0.7 by default in poisson regression
|
||||
#' (used to safeguard optimization).
|
||||
#' - `survival:cox`: Cox regression for right censored survival time data (negative values are considered right censored).
|
||||
#' Note that predictions are returned on the hazard ratio scale (i.e., as HR = exp(marginal_prediction) in the proportional
|
||||
#' hazard function \eqn{h(t) = h_0(t) \cdot HR}.
|
||||
#' - `survival:aft`: Accelerated failure time model for censored survival time data. See
|
||||
#' [Survival Analysis with Accelerated Failure Time](https://xgboost.readthedocs.io/en/latest/tutorials/aft_survival_analysis.html)
|
||||
#' for details.
|
||||
#' The parameter `aft_loss_distribution` specifies the Probability Density Function
|
||||
#' used by `survival:aft` and the `aft-nloglik` metric.
|
||||
#' - `multi:softmax`: Set xgboost to do multiclass classification using the softmax objective.
|
||||
#' Class is represented by a number and should be from 0 to `num_class - 1`.
|
||||
#' - `multi:softprob`: Same as softmax, but prediction outputs a vector of ndata * nclass elements, which can be
|
||||
#' further reshaped to ndata, nclass matrix. The result contains predicted probabilities of each data point belonging
|
||||
#' to each class.
|
||||
#' - `rank:pairwise`: Set XGBoost to do ranking task by minimizing the pairwise loss.
|
||||
#' - `rank:ndcg`: Use LambdaMART to perform list-wise ranking where
|
||||
#' [Normalized Discounted Cumulative Gain (NDCG)](https://en.wikipedia.org/wiki/Discounted_cumulative_gain) is maximized.
|
||||
#' - `rank:map`: Use LambdaMART to perform list-wise ranking where
|
||||
#' [Mean Average Precision (MAP)](https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Mean_average_precision)
|
||||
#' is maximized.
|
||||
#' - `reg:gamma`: Gamma regression with log-link. Output is a mean of gamma distribution.
|
||||
#' It might be useful, e.g., for modeling insurance claims severity, or for any outcome that might be
|
||||
#' [gamma-distributed](https://en.wikipedia.org/wiki/Gamma_distribution#Applications).
|
||||
#' - `reg:tweedie`: Tweedie regression with log-link.
|
||||
#' It might be useful, e.g., for modeling total loss in insurance, or for any outcome that might be
|
||||
#' [Tweedie-distributed](https://en.wikipedia.org/wiki/Tweedie_distribution#Applications).
|
||||
#'
|
||||
#' \itemize{
|
||||
#' \item \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it. The default objective options are below:
|
||||
#' \itemize{
|
||||
#' \item \code{reg:squarederror} Regression with squared loss (Default).
|
||||
#' \item \code{reg:squaredlogerror}: regression with squared log loss \eqn{1/2 * (log(pred + 1) - log(label + 1))^2}. All inputs are required to be greater than -1. Also, see metric rmsle for possible issue with this objective.
|
||||
#' \item \code{reg:logistic} logistic regression.
|
||||
#' \item \code{reg:pseudohubererror}: regression with Pseudo Huber loss, a twice differentiable alternative to absolute loss.
|
||||
#' \item \code{binary:logistic} logistic regression for binary classification. Output probability.
|
||||
#' \item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation.
|
||||
#' \item \code{binary:hinge}: hinge loss for binary classification. This makes predictions of 0 or 1, rather than producing probabilities.
|
||||
#' \item \code{count:poisson}: Poisson regression for count data, output mean of Poisson distribution. \code{max_delta_step} is set to 0.7 by default in poisson regression (used to safeguard optimization).
|
||||
#' \item \code{survival:cox}: Cox regression for right censored survival time data (negative values are considered right censored). Note that predictions are returned on the hazard ratio scale (i.e., as HR = exp(marginal_prediction) in the proportional hazard function \code{h(t) = h0(t) * HR)}.
|
||||
#' \item \code{survival:aft}: Accelerated failure time model for censored survival time data. See \href{https://xgboost.readthedocs.io/en/latest/tutorials/aft_survival_analysis.html}{Survival Analysis with Accelerated Failure Time} for details.
|
||||
#' \item \code{aft_loss_distribution}: Probability Density Function used by \code{survival:aft} and \code{aft-nloglik} metric.
|
||||
#' \item \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective. Class is represented by a number and should be from 0 to \code{num_class - 1}.
|
||||
#' \item \code{multi:softprob} same as softmax, but prediction outputs a vector of ndata * nclass elements, which can be further reshaped to ndata, nclass matrix. The result contains predicted probabilities of each data point belonging to each class.
|
||||
#' \item \code{rank:pairwise} set xgboost to do ranking task by minimizing the pairwise loss.
|
||||
#' \item \code{rank:ndcg}: Use LambdaMART to perform list-wise ranking where \href{https://en.wikipedia.org/wiki/Discounted_cumulative_gain}{Normalized Discounted Cumulative Gain (NDCG)} is maximized.
|
||||
#' \item \code{rank:map}: Use LambdaMART to perform list-wise ranking where \href{https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Mean_average_precision}{Mean Average Precision (MAP)} is maximized.
|
||||
#' \item \code{reg:gamma}: gamma regression with log-link. Output is a mean of gamma distribution. It might be useful, e.g., for modeling insurance claims severity, or for any outcome that might be \href{https://en.wikipedia.org/wiki/Gamma_distribution#Applications}{gamma-distributed}.
|
||||
#' \item \code{reg:tweedie}: Tweedie regression with log-link. It might be useful, e.g., for modeling total loss in insurance, or for any outcome that might be \href{https://en.wikipedia.org/wiki/Tweedie_distribution#Applications}{Tweedie-distributed}.
|
||||
#' }
|
||||
#' \item \code{base_score} the initial prediction score of all instances, global bias. Default: 0.5
|
||||
#' \item \code{eval_metric} evaluation metrics for validation data. Users can pass a self-defined function to it. Default: metric will be assigned according to objective(rmse for regression, and error for classification, mean average precision for ranking). List is provided in detail section.
|
||||
#' }
|
||||
#' For custom objectives, one should pass a function taking as input the current predictions (as a numeric
|
||||
#' vector or matrix) and the training data (as an `xgb.DMatrix` object) that will return a list with elements
|
||||
#' `grad` and `hess`, which should be numeric vectors or matrices with number of rows matching to the numbers
|
||||
#' of rows in the training data (same shape as the predictions that are passed as input to the function).
|
||||
#' For multi-valued custom objectives, should have shape `[nrows, ntargets]`. Note that negative values of
|
||||
#' the Hessian will be clipped, so one might consider using the expected Hessian (Fisher information) if the
|
||||
#' objective is non-convex.
|
||||
#'
|
||||
#' @param data training dataset. \code{xgb.train} accepts only an \code{xgb.DMatrix} as the input.
|
||||
#' \code{xgboost}, in addition, also accepts \code{matrix}, \code{dgCMatrix}, or name of a local data file.
|
||||
#' @param nrounds max number of boosting iterations.
|
||||
#' @param watchlist named list of xgb.DMatrix datasets to use for evaluating model performance.
|
||||
#' Metrics specified in either \code{eval_metric} or \code{feval} will be computed for each
|
||||
#' of these datasets during each boosting iteration, and stored in the end as a field named
|
||||
#' \code{evaluation_log} in the resulting object. When either \code{verbose>=1} or
|
||||
#' \code{\link{cb.print.evaluation}} callback is engaged, the performance results are continuously
|
||||
#' printed out during the training.
|
||||
#' E.g., specifying \code{watchlist=list(validation1=mat1, validation2=mat2)} allows to track
|
||||
#' the performance of each round's model on mat1 and mat2.
|
||||
#' @param obj customized objective function. Returns gradient and second order
|
||||
#' gradient with given prediction and dtrain.
|
||||
#' @param feval customized evaluation function. Returns
|
||||
#' \code{list(metric='metric-name', value='metric-value')} with given
|
||||
#' prediction and dtrain.
|
||||
#' See the tutorials [Custom Objective and Evaluation Metric](https://xgboost.readthedocs.io/en/stable/tutorials/custom_metric_obj.html)
|
||||
#' and [Advanced Usage of Custom Objectives](https://xgboost.readthedocs.io/en/stable/tutorials/advanced_custom_obj)
|
||||
#' for more information about custom objectives.
|
||||
#'
|
||||
#' - `base_score`: The initial prediction score of all instances, global bias. Default: 0.5.
|
||||
#' - `eval_metric`: Evaluation metrics for validation data.
|
||||
#' Users can pass a self-defined function to it.
|
||||
#' Default: metric will be assigned according to objective
|
||||
#' (rmse for regression, and error for classification, mean average precision for ranking).
|
||||
#' List is provided in detail section.
|
||||
#' @param data Training dataset. `xgb.train()` accepts only an `xgb.DMatrix` as the input.
|
||||
#' [xgboost()], in addition, also accepts `matrix`, `dgCMatrix`, or name of a local data file.
|
||||
#' @param nrounds Max number of boosting iterations.
|
||||
#' @param evals Named list of `xgb.DMatrix` datasets to use for evaluating model performance.
|
||||
#' Metrics specified in either `eval_metric` or `feval` will be computed for each
|
||||
#' of these datasets during each boosting iteration, and stored in the end as a field named
|
||||
#' `evaluation_log` in the resulting object. When either `verbose>=1` or
|
||||
#' [xgb.cb.print.evaluation()] callback is engaged, the performance results are continuously
|
||||
#' printed out during the training.
|
||||
#' E.g., specifying `evals=list(validation1=mat1, validation2=mat2)` allows to track
|
||||
#' the performance of each round's model on mat1 and mat2.
|
||||
#' @param obj Customized objective function. Should take two arguments: the first one will be the
|
||||
#' current predictions (either a numeric vector or matrix depending on the number of targets / classes),
|
||||
#' and the second one will be the `data` DMatrix object that is used for training.
|
||||
#'
|
||||
#' It should return a list with two elements `grad` and `hess` (in that order), as either
|
||||
#' numeric vectors or numeric matrices depending on the number of targets / classes (same
|
||||
#' dimension as the predictions that are passed as first argument).
|
||||
#' @param feval Customized evaluation function. Just like `obj`, should take two arguments, with
|
||||
#' the first one being the predictions and the second one the `data` DMatrix.
|
||||
#'
|
||||
#' Should return a list with two elements `metric` (name that will be displayed for this metric,
|
||||
#' should be a string / character), and `value` (the number that the function calculates, should
|
||||
#' be a numeric scalar).
|
||||
#'
|
||||
#' Note that even if passing `feval`, objectives also have an associated default metric that
|
||||
#' will be evaluated in addition to it. In order to disable the built-in metric, one can pass
|
||||
#' parameter `disable_default_eval_metric = TRUE`.
|
||||
#' @param verbose If 0, xgboost will stay silent. If 1, it will print information about performance.
|
||||
#' If 2, some additional information will be printed out.
|
||||
#' Note that setting \code{verbose > 0} automatically engages the
|
||||
#' \code{cb.print.evaluation(period=1)} callback function.
|
||||
#' @param print_every_n Print each n-th iteration evaluation messages when \code{verbose>0}.
|
||||
#' Default is 1 which means all messages are printed. This parameter is passed to the
|
||||
#' \code{\link{cb.print.evaluation}} callback.
|
||||
#' @param early_stopping_rounds If \code{NULL}, the early stopping function is not triggered.
|
||||
#' If set to an integer \code{k}, training with a validation set will stop if the performance
|
||||
#' doesn't improve for \code{k} rounds.
|
||||
#' Setting this parameter engages the \code{\link{cb.early.stop}} callback.
|
||||
#' @param maximize If \code{feval} and \code{early_stopping_rounds} are set,
|
||||
#' then this parameter must be set as well.
|
||||
#' When it is \code{TRUE}, it means the larger the evaluation score the better.
|
||||
#' This parameter is passed to the \code{\link{cb.early.stop}} callback.
|
||||
#' @param save_period when it is non-NULL, model is saved to disk after every \code{save_period} rounds,
|
||||
#' 0 means save at the end. The saving is handled by the \code{\link{cb.save.model}} callback.
|
||||
#' If 2, some additional information will be printed out.
|
||||
#' Note that setting `verbose > 0` automatically engages the
|
||||
#' `xgb.cb.print.evaluation(period=1)` callback function.
|
||||
#' @param print_every_n Print each nth iteration evaluation messages when `verbose>0`.
|
||||
#' Default is 1 which means all messages are printed. This parameter is passed to the
|
||||
#' [xgb.cb.print.evaluation()] callback.
|
||||
#' @param early_stopping_rounds If `NULL`, the early stopping function is not triggered.
|
||||
#' If set to an integer `k`, training with a validation set will stop if the performance
|
||||
#' doesn't improve for `k` rounds. Setting this parameter engages the [xgb.cb.early.stop()] callback.
|
||||
#' @param maximize If `feval` and `early_stopping_rounds` are set, then this parameter must be set as well.
|
||||
#' When it is `TRUE`, it means the larger the evaluation score the better.
|
||||
#' This parameter is passed to the [xgb.cb.early.stop()] callback.
|
||||
#' @param save_period When not `NULL`, model is saved to disk after every `save_period` rounds.
|
||||
#' 0 means save at the end. The saving is handled by the [xgb.cb.save.model()] callback.
|
||||
#' @param save_name the name or path for periodically saved model file.
|
||||
#' @param xgb_model a previously built model to continue the training from.
|
||||
#' Could be either an object of class \code{xgb.Booster}, or its raw data, or the name of a
|
||||
#' file with a previously saved model.
|
||||
#' @param callbacks a list of callback functions to perform various task during boosting.
|
||||
#' See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the
|
||||
#' parameters' values. User can provide either existing or their own callback methods in order
|
||||
#' to customize the training process.
|
||||
#' @param ... other parameters to pass to \code{params}.
|
||||
#' @param label vector of response values. Should not be provided when data is
|
||||
#' a local data file name or an \code{xgb.DMatrix}.
|
||||
#' @param missing by default is set to NA, which means that NA values should be considered as 'missing'
|
||||
#' by the algorithm. Sometimes, 0 or other extreme value might be used to represent missing values.
|
||||
#' This parameter is only used when input is a dense matrix.
|
||||
#' @param weight a vector indicating the weight for each row of the input.
|
||||
#' @param xgb_model A previously built model to continue the training from.
|
||||
#' Could be either an object of class `xgb.Booster`, or its raw data, or the name of a
|
||||
#' file with a previously saved model.
|
||||
#' @param callbacks A list of callback functions to perform various task during boosting.
|
||||
#' See [xgb.Callback()]. Some of the callbacks are automatically created depending on the
|
||||
#' parameters' values. User can provide either existing or their own callback methods in order
|
||||
#' to customize the training process.
|
||||
#'
|
||||
#' Note that some callbacks might try to leave attributes in the resulting model object,
|
||||
#' such as an evaluation log (a `data.table` object) - be aware that these objects are kept
|
||||
#' as R attributes, and thus do not get saved when using XGBoost's own serializaters like
|
||||
#' [xgb.save()] (but are kept when using R serializers like [saveRDS()]).
|
||||
#' @param ... other parameters to pass to `params`.
|
||||
#'
|
||||
#' @return An object of class `xgb.Booster`.
|
||||
#'
|
||||
#' @details
|
||||
#' These are the training functions for \code{xgboost}.
|
||||
#' These are the training functions for [xgboost()].
|
||||
#'
|
||||
#' The \code{xgb.train} interface supports advanced features such as \code{watchlist},
|
||||
#' The `xgb.train()` interface supports advanced features such as `evals`,
|
||||
#' customized objective and evaluation metric functions, therefore it is more flexible
|
||||
#' than the \code{xgboost} interface.
|
||||
#' than the [xgboost()] interface.
|
||||
#'
|
||||
#' Parallelization is automatically enabled if \code{OpenMP} is present.
|
||||
#' Number of threads can also be manually specified via \code{nthread} parameter.
|
||||
#' Parallelization is automatically enabled if OpenMP is present.
|
||||
#' Number of threads can also be manually specified via the `nthread` parameter.
|
||||
#'
|
||||
#' While in other interfaces, the default random seed defaults to zero, in R, if a parameter `seed`
|
||||
#' is not manually supplied, it will generate a random seed through R's own random number generator,
|
||||
#' whose seed in turn is controllable through `set.seed`. If `seed` is passed, it will override the
|
||||
#' RNG from R.
|
||||
#'
|
||||
#' The evaluation metric is chosen automatically by XGBoost (according to the objective)
|
||||
#' when the \code{eval_metric} parameter is not provided.
|
||||
#' User may set one or several \code{eval_metric} parameters.
|
||||
#' when the `eval_metric` parameter is not provided.
|
||||
#' User may set one or several `eval_metric` parameters.
|
||||
#' Note that when using a customized metric, only this single metric can be used.
|
||||
#' The following is the list of built-in metrics for which XGBoost provides optimized implementation:
|
||||
#' \itemize{
|
||||
#' \item \code{rmse} root mean square error. \url{https://en.wikipedia.org/wiki/Root_mean_square_error}
|
||||
#' \item \code{logloss} negative log-likelihood. \url{https://en.wikipedia.org/wiki/Log-likelihood}
|
||||
#' \item \code{mlogloss} multiclass logloss. \url{https://scikit-learn.org/stable/modules/generated/sklearn.metrics.log_loss.html}
|
||||
#' \item \code{error} Binary classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}.
|
||||
#' By default, it uses the 0.5 threshold for predicted values to define negative and positive instances.
|
||||
#' Different threshold (e.g., 0.) could be specified as "error@0."
|
||||
#' \item \code{merror} Multiclass classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}.
|
||||
#' \item \code{mae} Mean absolute error
|
||||
#' \item \code{mape} Mean absolute percentage error
|
||||
#' \item \code{auc} Area under the curve. \url{https://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation.
|
||||
#' \item \code{aucpr} Area under the PR curve. \url{https://en.wikipedia.org/wiki/Precision_and_recall} for ranking evaluation.
|
||||
#' \item \code{ndcg} Normalized Discounted Cumulative Gain (for ranking task). \url{https://en.wikipedia.org/wiki/NDCG}
|
||||
#' }
|
||||
#' - `rmse`: Root mean square error. \url{https://en.wikipedia.org/wiki/Root_mean_square_error}
|
||||
#' - `logloss`: Negative log-likelihood. \url{https://en.wikipedia.org/wiki/Log-likelihood}
|
||||
#' - `mlogloss`: Multiclass logloss. \url{https://scikit-learn.org/stable/modules/generated/sklearn.metrics.log_loss.html}
|
||||
#' - `error`: Binary classification error rate. It is calculated as `(# wrong cases) / (# all cases)`.
|
||||
#' By default, it uses the 0.5 threshold for predicted values to define negative and positive instances.
|
||||
#' Different threshold (e.g., 0.) could be specified as `error@0`.
|
||||
#' - `merror`: Multiclass classification error rate. It is calculated as `(# wrong cases) / (# all cases)`.
|
||||
#' - `mae`: Mean absolute error.
|
||||
#' - `mape`: Mean absolute percentage error.
|
||||
#' - `auc`: Area under the curve.
|
||||
#' \url{https://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation.
|
||||
#' - `aucpr`: Area under the PR curve. \url{https://en.wikipedia.org/wiki/Precision_and_recall} for ranking evaluation.
|
||||
#' - `ndcg`: Normalized Discounted Cumulative Gain (for ranking task). \url{https://en.wikipedia.org/wiki/NDCG}
|
||||
#'
|
||||
#' The following callbacks are automatically created when certain parameters are set:
|
||||
#' \itemize{
|
||||
#' \item \code{cb.print.evaluation} is turned on when \code{verbose > 0};
|
||||
#' and the \code{print_every_n} parameter is passed to it.
|
||||
#' \item \code{cb.evaluation.log} is on when \code{watchlist} is present.
|
||||
#' \item \code{cb.early.stop}: when \code{early_stopping_rounds} is set.
|
||||
#' \item \code{cb.save.model}: when \code{save_period > 0} is set.
|
||||
#' }
|
||||
#' - [xgb.cb.print.evaluation()] is turned on when `verbose > 0` and the `print_every_n`
|
||||
#' parameter is passed to it.
|
||||
#' - [xgb.cb.evaluation.log()] is on when `evals` is present.
|
||||
#' - [xgb.cb.early.stop()]: When `early_stopping_rounds` is set.
|
||||
#' - [xgb.cb.save.model()]: When `save_period > 0` is set.
|
||||
#'
|
||||
#' @return
|
||||
#' An object of class \code{xgb.Booster} with the following elements:
|
||||
#' \itemize{
|
||||
#' \item \code{handle} a handle (pointer) to the xgboost model in memory.
|
||||
#' \item \code{raw} a cached memory dump of the xgboost model saved as R's \code{raw} type.
|
||||
#' \item \code{niter} number of boosting iterations.
|
||||
#' \item \code{evaluation_log} evaluation history stored as a \code{data.table} with the
|
||||
#' first column corresponding to iteration number and the rest corresponding to evaluation
|
||||
#' metrics' values. It is created by the \code{\link{cb.evaluation.log}} callback.
|
||||
#' \item \code{call} a function call.
|
||||
#' \item \code{params} parameters that were passed to the xgboost library. Note that it does not
|
||||
#' capture parameters changed by the \code{\link{cb.reset.parameters}} callback.
|
||||
#' \item \code{callbacks} callback functions that were either automatically assigned or
|
||||
#' explicitly passed.
|
||||
#' \item \code{best_iteration} iteration number with the best evaluation metric value
|
||||
#' (only available with early stopping).
|
||||
#' \item \code{best_score} the best evaluation metric value during early stopping.
|
||||
#' (only available with early stopping).
|
||||
#' \item \code{feature_names} names of the training dataset features
|
||||
#' (only when column names were defined in training data).
|
||||
#' \item \code{nfeatures} number of features in training data.
|
||||
#' }
|
||||
#' Note that objects of type `xgb.Booster` as returned by this function behave a bit differently
|
||||
#' from typical R objects (it's an 'altrep' list class), and it makes a separation between
|
||||
#' internal booster attributes (restricted to jsonifyable data), accessed through [xgb.attr()]
|
||||
#' and shared between interfaces through serialization functions like [xgb.save()]; and
|
||||
#' R-specific attributes (typically the result from a callback), accessed through [attributes()]
|
||||
#' and [attr()], which are otherwise
|
||||
#' only used in the R interface, only kept when using R's serializers like [saveRDS()], and
|
||||
#' not anyhow used by functions like `predict.xgb.Booster()`.
|
||||
#'
|
||||
#' @seealso
|
||||
#' \code{\link{callbacks}},
|
||||
#' \code{\link{predict.xgb.Booster}},
|
||||
#' \code{\link{xgb.cv}}
|
||||
#' Be aware that one such R attribute that is automatically added is `params` - this attribute
|
||||
#' is assigned from the `params` argument to this function, and is only meant to serve as a
|
||||
#' reference for what went into the booster, but is not used in other methods that take a booster
|
||||
#' object - so for example, changing the booster's configuration requires calling `xgb.config<-`
|
||||
#' or `xgb.parameters<-`, while simply modifying `attributes(model)$params$<...>` will have no
|
||||
#' effect elsewhere.
|
||||
#'
|
||||
#' @seealso [xgb.Callback()], [predict.xgb.Booster()], [xgb.cv()]
|
||||
#'
|
||||
#' @references
|
||||
#'
|
||||
#' Tianqi Chen and Carlos Guestrin, "XGBoost: A Scalable Tree Boosting System",
|
||||
#' 22nd SIGKDD Conference on Knowledge Discovery and Data Mining, 2016, \url{https://arxiv.org/abs/1603.02754}
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' data(agaricus.test, package='xgboost')
|
||||
#' data(agaricus.train, package = "xgboost")
|
||||
#' data(agaricus.test, package = "xgboost")
|
||||
#'
|
||||
#' dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label))
|
||||
#' dtest <- with(agaricus.test, xgb.DMatrix(data, label = label))
|
||||
#' watchlist <- list(train = dtrain, eval = dtest)
|
||||
#' ## Keep the number of threads to 1 for examples
|
||||
#' nthread <- 1
|
||||
#' data.table::setDTthreads(nthread)
|
||||
#'
|
||||
#' dtrain <- with(
|
||||
#' agaricus.train, xgb.DMatrix(data, label = label, nthread = nthread)
|
||||
#' )
|
||||
#' dtest <- with(
|
||||
#' agaricus.test, xgb.DMatrix(data, label = label, nthread = nthread)
|
||||
#' )
|
||||
#' evals <- list(train = dtrain, eval = dtest)
|
||||
#'
|
||||
#' ## A simple xgb.train example:
|
||||
#' param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2,
|
||||
#' objective = "binary:logistic", eval_metric = "auc")
|
||||
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist)
|
||||
#' param <- list(
|
||||
#' max_depth = 2,
|
||||
#' eta = 1,
|
||||
#' nthread = nthread,
|
||||
#' objective = "binary:logistic",
|
||||
#' eval_metric = "auc"
|
||||
#' )
|
||||
#' bst <- xgb.train(param, dtrain, nrounds = 2, evals = evals, verbose = 0)
|
||||
#'
|
||||
#'
|
||||
#' ## An xgb.train example where custom objective and evaluation metric are used:
|
||||
#' ## An xgb.train example where custom objective and evaluation metric are
|
||||
#' ## used:
|
||||
#' logregobj <- function(preds, dtrain) {
|
||||
#' labels <- getinfo(dtrain, "label")
|
||||
#' preds <- 1/(1 + exp(-preds))
|
||||
@@ -218,40 +278,69 @@
|
||||
#'
|
||||
#' # These functions could be used by passing them either:
|
||||
#' # as 'objective' and 'eval_metric' parameters in the params list:
|
||||
#' param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2,
|
||||
#' objective = logregobj, eval_metric = evalerror)
|
||||
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist)
|
||||
#' param <- list(
|
||||
#' max_depth = 2,
|
||||
#' eta = 1,
|
||||
#' nthread = nthread,
|
||||
#' objective = logregobj,
|
||||
#' eval_metric = evalerror
|
||||
#' )
|
||||
#' bst <- xgb.train(param, dtrain, nrounds = 2, evals = evals, verbose = 0)
|
||||
#'
|
||||
#' # or through the ... arguments:
|
||||
#' param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2)
|
||||
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist,
|
||||
#' objective = logregobj, eval_metric = evalerror)
|
||||
#' param <- list(max_depth = 2, eta = 1, nthread = nthread)
|
||||
#' bst <- xgb.train(
|
||||
#' param,
|
||||
#' dtrain,
|
||||
#' nrounds = 2,
|
||||
#' evals = evals,
|
||||
#' verbose = 0,
|
||||
#' objective = logregobj,
|
||||
#' eval_metric = evalerror
|
||||
#' )
|
||||
#'
|
||||
#' # or as dedicated 'obj' and 'feval' parameters of xgb.train:
|
||||
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist,
|
||||
#' obj = logregobj, feval = evalerror)
|
||||
#' bst <- xgb.train(
|
||||
#' param, dtrain, nrounds = 2, evals = evals, obj = logregobj, feval = evalerror
|
||||
#' )
|
||||
#'
|
||||
#'
|
||||
#' ## An xgb.train example of using variable learning rates at each iteration:
|
||||
#' param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2,
|
||||
#' objective = "binary:logistic", eval_metric = "auc")
|
||||
#' param <- list(
|
||||
#' max_depth = 2,
|
||||
#' eta = 1,
|
||||
#' nthread = nthread,
|
||||
#' objective = "binary:logistic",
|
||||
#' eval_metric = "auc"
|
||||
#' )
|
||||
#' my_etas <- list(eta = c(0.5, 0.1))
|
||||
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist,
|
||||
#' callbacks = list(cb.reset.parameters(my_etas)))
|
||||
#'
|
||||
#' bst <- xgb.train(
|
||||
#' param,
|
||||
#' dtrain,
|
||||
#' nrounds = 2,
|
||||
#' evals = evals,
|
||||
#' verbose = 0,
|
||||
#' callbacks = list(xgb.cb.reset.parameters(my_etas))
|
||||
#' )
|
||||
#'
|
||||
#' ## Early stopping:
|
||||
#' bst <- xgb.train(param, dtrain, nrounds = 25, watchlist,
|
||||
#' early_stopping_rounds = 3)
|
||||
#' bst <- xgb.train(
|
||||
#' param, dtrain, nrounds = 25, evals = evals, early_stopping_rounds = 3
|
||||
#' )
|
||||
#'
|
||||
#' ## An 'xgboost' interface example:
|
||||
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label,
|
||||
#' max_depth = 2, eta = 1, nthread = 2, nrounds = 2,
|
||||
#' objective = "binary:logistic")
|
||||
#' bst <- xgboost(
|
||||
#' x = agaricus.train$data,
|
||||
#' y = factor(agaricus.train$label),
|
||||
#' params = list(max_depth = 2, eta = 1),
|
||||
#' nthread = nthread,
|
||||
#' nrounds = 2
|
||||
#' )
|
||||
#' pred <- predict(bst, agaricus.test$data)
|
||||
#'
|
||||
#' @rdname xgb.train
|
||||
#' @export
|
||||
xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
|
||||
xgb.train <- function(params = list(), data, nrounds, evals = list(),
|
||||
obj = NULL, feval = NULL, verbose = 1, print_every_n = 1L,
|
||||
early_stopping_rounds = NULL, maximize = NULL,
|
||||
save_period = NULL, save_name = "xgboost.model",
|
||||
@@ -264,71 +353,78 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
|
||||
check.custom.obj()
|
||||
check.custom.eval()
|
||||
|
||||
# data & watchlist checks
|
||||
# data & evals checks
|
||||
dtrain <- data
|
||||
if (!inherits(dtrain, "xgb.DMatrix"))
|
||||
stop("second argument dtrain must be xgb.DMatrix")
|
||||
if (length(watchlist) > 0) {
|
||||
if (typeof(watchlist) != "list" ||
|
||||
!all(vapply(watchlist, inherits, logical(1), what = 'xgb.DMatrix')))
|
||||
stop("watchlist must be a list of xgb.DMatrix elements")
|
||||
evnames <- names(watchlist)
|
||||
if (length(evals) > 0) {
|
||||
if (typeof(evals) != "list" ||
|
||||
!all(vapply(evals, inherits, logical(1), what = 'xgb.DMatrix')))
|
||||
stop("'evals' must be a list of xgb.DMatrix elements")
|
||||
evnames <- names(evals)
|
||||
if (is.null(evnames) || any(evnames == ""))
|
||||
stop("each element of the watchlist must have a name tag")
|
||||
stop("each element of 'evals' must have a name tag")
|
||||
}
|
||||
# Handle multiple evaluation metrics given as a list
|
||||
for (m in params$eval_metric) {
|
||||
params <- c(params, list(eval_metric = m))
|
||||
}
|
||||
|
||||
# evaluation printing callback
|
||||
params <- c(params)
|
||||
print_every_n <- max(as.integer(print_every_n), 1L)
|
||||
if (!has.callbacks(callbacks, 'cb.print.evaluation') &&
|
||||
verbose) {
|
||||
callbacks <- add.cb(callbacks, cb.print.evaluation(print_every_n))
|
||||
params['validate_parameters'] <- TRUE
|
||||
if (!("seed" %in% names(params))) {
|
||||
params[["seed"]] <- sample(.Machine$integer.max, size = 1)
|
||||
}
|
||||
# evaluation log callback: it is automatically enabled when watchlist is provided
|
||||
evaluation_log <- list()
|
||||
if (!has.callbacks(callbacks, 'cb.evaluation.log') &&
|
||||
length(watchlist) > 0) {
|
||||
callbacks <- add.cb(callbacks, cb.evaluation.log())
|
||||
|
||||
# callbacks
|
||||
tmp <- .process.callbacks(callbacks, is_cv = FALSE)
|
||||
callbacks <- tmp$callbacks
|
||||
cb_names <- tmp$cb_names
|
||||
rm(tmp)
|
||||
|
||||
# Early stopping callback (should always come first)
|
||||
if (!is.null(early_stopping_rounds) && !("early_stop" %in% cb_names)) {
|
||||
callbacks <- add.callback(
|
||||
callbacks,
|
||||
xgb.cb.early.stop(
|
||||
early_stopping_rounds,
|
||||
maximize = maximize,
|
||||
verbose = verbose
|
||||
),
|
||||
as_first_elt = TRUE
|
||||
)
|
||||
}
|
||||
# evaluation printing callback
|
||||
print_every_n <- max(as.integer(print_every_n), 1L)
|
||||
if (verbose && !("print_evaluation" %in% cb_names)) {
|
||||
callbacks <- add.callback(callbacks, xgb.cb.print.evaluation(print_every_n))
|
||||
}
|
||||
# evaluation log callback: it is automatically enabled when 'evals' is provided
|
||||
if (length(evals) && !("evaluation_log" %in% cb_names)) {
|
||||
callbacks <- add.callback(callbacks, xgb.cb.evaluation.log())
|
||||
}
|
||||
# Model saving callback
|
||||
if (!is.null(save_period) &&
|
||||
!has.callbacks(callbacks, 'cb.save.model')) {
|
||||
callbacks <- add.cb(callbacks, cb.save.model(save_period, save_name))
|
||||
}
|
||||
# Early stopping callback
|
||||
stop_condition <- FALSE
|
||||
if (!is.null(early_stopping_rounds) &&
|
||||
!has.callbacks(callbacks, 'cb.early.stop')) {
|
||||
callbacks <- add.cb(callbacks, cb.early.stop(early_stopping_rounds,
|
||||
maximize = maximize, verbose = verbose))
|
||||
}
|
||||
|
||||
# Sort the callbacks into categories
|
||||
cb <- categorize.callbacks(callbacks)
|
||||
params['validate_parameters'] <- TRUE
|
||||
if (!is.null(params[['seed']])) {
|
||||
warning("xgb.train: `seed` is ignored in R package. Use `set.seed()` instead.")
|
||||
if (!is.null(save_period) && !("save_model" %in% cb_names)) {
|
||||
callbacks <- add.callback(callbacks, xgb.cb.save.model(save_period, save_name))
|
||||
}
|
||||
|
||||
# The tree updating process would need slightly different handling
|
||||
is_update <- NVL(params[['process_type']], '.') == 'update'
|
||||
|
||||
# Construct a booster (either a new one or load from xgb_model)
|
||||
handle <- xgb.Booster.handle(params, append(watchlist, dtrain), xgb_model)
|
||||
bst <- xgb.handleToBooster(handle)
|
||||
bst <- xgb.Booster(
|
||||
params = params,
|
||||
cachelist = append(evals, dtrain),
|
||||
modelfile = xgb_model
|
||||
)
|
||||
niter_init <- bst$niter
|
||||
bst <- bst$bst
|
||||
.Call(
|
||||
XGBoosterCopyInfoFromDMatrix_R,
|
||||
xgb.get.handle(bst),
|
||||
dtrain
|
||||
)
|
||||
|
||||
# extract parameters that can affect the relationship b/w #trees and #iterations
|
||||
num_class <- max(as.numeric(NVL(params[['num_class']], 1)), 1)
|
||||
num_parallel_tree <- max(as.numeric(NVL(params[['num_parallel_tree']], 1)), 1)
|
||||
|
||||
# When the 'xgb_model' was set, find out how many boosting iterations it has
|
||||
niter_init <- 0
|
||||
if (!is.null(xgb_model)) {
|
||||
niter_init <- as.numeric(xgb.attr(bst, 'niter')) + 1
|
||||
if (length(niter_init) == 0) {
|
||||
niter_init <- xgb.ntree(bst) %/% (num_parallel_tree * num_class)
|
||||
}
|
||||
}
|
||||
if (is_update && nrounds > niter_init)
|
||||
stop("nrounds cannot be larger than ", niter_init, " (nrounds of xgb_model)")
|
||||
|
||||
@@ -336,49 +432,83 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
|
||||
begin_iteration <- niter_skip + 1
|
||||
end_iteration <- niter_skip + nrounds
|
||||
|
||||
.execute.cb.before.training(
|
||||
callbacks,
|
||||
bst,
|
||||
dtrain,
|
||||
evals,
|
||||
begin_iteration,
|
||||
end_iteration
|
||||
)
|
||||
|
||||
# the main loop for boosting iterations
|
||||
for (iteration in begin_iteration:end_iteration) {
|
||||
|
||||
for (f in cb$pre_iter) f()
|
||||
.execute.cb.before.iter(
|
||||
callbacks,
|
||||
bst,
|
||||
dtrain,
|
||||
evals,
|
||||
iteration
|
||||
)
|
||||
|
||||
xgb.iter.update(bst$handle, dtrain, iteration - 1, obj)
|
||||
xgb.iter.update(
|
||||
bst = bst,
|
||||
dtrain = dtrain,
|
||||
iter = iteration - 1,
|
||||
obj = obj
|
||||
)
|
||||
|
||||
if (length(watchlist) > 0)
|
||||
bst_evaluation <- xgb.iter.eval(bst$handle, watchlist, iteration - 1, feval)
|
||||
|
||||
xgb.attr(bst$handle, 'niter') <- iteration - 1
|
||||
|
||||
for (f in cb$post_iter) f()
|
||||
|
||||
if (stop_condition) break
|
||||
}
|
||||
for (f in cb$finalize) f(finalize = TRUE)
|
||||
|
||||
bst <- xgb.Booster.complete(bst, saveraw = TRUE)
|
||||
|
||||
# store the total number of boosting iterations
|
||||
bst$niter <- end_iteration
|
||||
|
||||
# store the evaluation results
|
||||
if (length(evaluation_log) > 0 &&
|
||||
nrow(evaluation_log) > 0) {
|
||||
# include the previous compatible history when available
|
||||
if (inherits(xgb_model, 'xgb.Booster') &&
|
||||
!is_update &&
|
||||
!is.null(xgb_model$evaluation_log) &&
|
||||
isTRUE(all.equal(colnames(evaluation_log),
|
||||
colnames(xgb_model$evaluation_log)))) {
|
||||
evaluation_log <- rbindlist(list(xgb_model$evaluation_log, evaluation_log))
|
||||
bst_evaluation <- NULL
|
||||
if (length(evals) > 0) {
|
||||
bst_evaluation <- xgb.iter.eval(
|
||||
bst = bst,
|
||||
evals = evals,
|
||||
iter = iteration - 1,
|
||||
feval = feval
|
||||
)
|
||||
}
|
||||
bst$evaluation_log <- evaluation_log
|
||||
|
||||
should_stop <- .execute.cb.after.iter(
|
||||
callbacks,
|
||||
bst,
|
||||
dtrain,
|
||||
evals,
|
||||
iteration,
|
||||
bst_evaluation
|
||||
)
|
||||
|
||||
if (should_stop) break
|
||||
}
|
||||
|
||||
bst$call <- match.call()
|
||||
bst$params <- params
|
||||
bst$callbacks <- callbacks
|
||||
if (!is.null(colnames(dtrain)))
|
||||
bst$feature_names <- colnames(dtrain)
|
||||
bst$nfeatures <- ncol(dtrain)
|
||||
cb_outputs <- .execute.cb.after.training(
|
||||
callbacks,
|
||||
bst,
|
||||
dtrain,
|
||||
evals,
|
||||
iteration,
|
||||
bst_evaluation
|
||||
)
|
||||
|
||||
extra_attrs <- list(
|
||||
call = match.call(),
|
||||
params = params
|
||||
)
|
||||
|
||||
curr_attrs <- attributes(bst)
|
||||
if (NROW(curr_attrs)) {
|
||||
curr_attrs <- curr_attrs[
|
||||
setdiff(
|
||||
names(curr_attrs),
|
||||
c(names(extra_attrs), names(cb_outputs))
|
||||
)
|
||||
]
|
||||
}
|
||||
curr_attrs <- c(extra_attrs, curr_attrs)
|
||||
if (NROW(cb_outputs)) {
|
||||
curr_attrs <- c(curr_attrs, cb_outputs)
|
||||
}
|
||||
attributes(bst) <- curr_attrs
|
||||
|
||||
return(bst)
|
||||
}
|
||||
|
||||
@@ -1,41 +0,0 @@
|
||||
#' Load the instance back from \code{\link{xgb.serialize}}
|
||||
#'
|
||||
#' @param buffer the buffer containing booster instance saved by \code{\link{xgb.serialize}}
|
||||
#' @param handle An \code{xgb.Booster.handle} object which will be overwritten with
|
||||
#' the new deserialized object. Must be a null handle (e.g. when loading the model through
|
||||
#' `readRDS`). If not provided, a new handle will be created.
|
||||
#' @return An \code{xgb.Booster.handle} object.
|
||||
#'
|
||||
#' @export
|
||||
xgb.unserialize <- function(buffer, handle = NULL) {
|
||||
cachelist <- list()
|
||||
if (is.null(handle)) {
|
||||
handle <- .Call(XGBoosterCreate_R, cachelist)
|
||||
} else {
|
||||
if (!is.null.handle(handle))
|
||||
stop("'handle' is not null/empty. Cannot overwrite existing handle.")
|
||||
.Call(XGBoosterCreateInEmptyObj_R, cachelist, handle)
|
||||
}
|
||||
tryCatch(
|
||||
.Call(XGBoosterUnserializeFromBuffer_R, handle, buffer),
|
||||
error = function(e) {
|
||||
error_msg <- conditionMessage(e)
|
||||
m <- regexec("(src[\\\\/]learner.cc:[0-9]+): Check failed: (header == serialisation_header_)",
|
||||
error_msg, perl = TRUE)
|
||||
groups <- regmatches(error_msg, m)[[1]]
|
||||
if (length(groups) == 3) {
|
||||
warning(paste("The model had been generated by XGBoost version 1.0.0 or earlier and was ",
|
||||
"loaded from a RDS file. We strongly ADVISE AGAINST using saveRDS() ",
|
||||
"function, to ensure that your model can be read in current and upcoming ",
|
||||
"XGBoost releases. Please use xgb.save() instead to preserve models for the ",
|
||||
"long term. For more details and explanation, see ",
|
||||
"https://xgboost.readthedocs.io/en/latest/tutorials/saving_model.html",
|
||||
sep = ""))
|
||||
.Call(XGBoosterLoadModelFromRaw_R, handle, buffer)
|
||||
} else {
|
||||
stop(e)
|
||||
}
|
||||
})
|
||||
class(handle) <- "xgb.Booster.handle"
|
||||
return (handle)
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
66
R-package/config.h.in
Normal file
66
R-package/config.h.in
Normal file
@@ -0,0 +1,66 @@
|
||||
/* config.h.in. Generated from configure.ac by autoheader. */
|
||||
|
||||
/* Define if building universal (internal helper macro) */
|
||||
#undef AC_APPLE_UNIVERSAL_BUILD
|
||||
|
||||
/* Define to 1 if you have the <inttypes.h> header file. */
|
||||
#undef HAVE_INTTYPES_H
|
||||
|
||||
/* Define to 1 if you have the <stdint.h> header file. */
|
||||
#undef HAVE_STDINT_H
|
||||
|
||||
/* Define to 1 if you have the <stdio.h> header file. */
|
||||
#undef HAVE_STDIO_H
|
||||
|
||||
/* Define to 1 if you have the <stdlib.h> header file. */
|
||||
#undef HAVE_STDLIB_H
|
||||
|
||||
/* Define to 1 if you have the <strings.h> header file. */
|
||||
#undef HAVE_STRINGS_H
|
||||
|
||||
/* Define to 1 if you have the <string.h> header file. */
|
||||
#undef HAVE_STRING_H
|
||||
|
||||
/* Define to 1 if you have the <sys/stat.h> header file. */
|
||||
#undef HAVE_SYS_STAT_H
|
||||
|
||||
/* Define to 1 if you have the <sys/types.h> header file. */
|
||||
#undef HAVE_SYS_TYPES_H
|
||||
|
||||
/* Define to 1 if you have the <unistd.h> header file. */
|
||||
#undef HAVE_UNISTD_H
|
||||
|
||||
/* Define to the address where bug reports for this package should be sent. */
|
||||
#undef PACKAGE_BUGREPORT
|
||||
|
||||
/* Define to the full name of this package. */
|
||||
#undef PACKAGE_NAME
|
||||
|
||||
/* Define to the full name and version of this package. */
|
||||
#undef PACKAGE_STRING
|
||||
|
||||
/* Define to the one symbol short name of this package. */
|
||||
#undef PACKAGE_TARNAME
|
||||
|
||||
/* Define to the home page for this package. */
|
||||
#undef PACKAGE_URL
|
||||
|
||||
/* Define to the version of this package. */
|
||||
#undef PACKAGE_VERSION
|
||||
|
||||
/* Define to 1 if all of the C90 standard headers exist (not just the ones
|
||||
required in a freestanding environment). This macro is provided for
|
||||
backward compatibility; new code need not use it. */
|
||||
#undef STDC_HEADERS
|
||||
|
||||
/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
|
||||
significant byte first (like Motorola and SPARC, unlike Intel). */
|
||||
#if defined AC_APPLE_UNIVERSAL_BUILD
|
||||
# if defined __BIG_ENDIAN__
|
||||
# define WORDS_BIGENDIAN 1
|
||||
# endif
|
||||
#else
|
||||
# ifndef WORDS_BIGENDIAN
|
||||
# undef WORDS_BIGENDIAN
|
||||
# endif
|
||||
#endif
|
||||
2386
R-package/configure
vendored
2386
R-package/configure
vendored
File diff suppressed because it is too large
Load Diff
@@ -2,10 +2,25 @@
|
||||
|
||||
AC_PREREQ(2.69)
|
||||
|
||||
AC_INIT([xgboost],[1.7.0],[],[xgboost],[])
|
||||
AC_INIT([xgboost],[2.2.0],[],[xgboost],[])
|
||||
|
||||
# Use this line to set CC variable to a C compiler
|
||||
AC_PROG_CC
|
||||
: ${R_HOME=`R RHOME`}
|
||||
if test -z "${R_HOME}"; then
|
||||
echo "could not determine R_HOME"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CXX17=`"${R_HOME}/bin/R" CMD config CXX17`
|
||||
CXX17STD=`"${R_HOME}/bin/R" CMD config CXX17STD`
|
||||
CXX="${CXX17} ${CXX17STD}"
|
||||
CXXFLAGS=`"${R_HOME}/bin/R" CMD config CXXFLAGS`
|
||||
|
||||
CC=`"${R_HOME}/bin/R" CMD config CC`
|
||||
CFLAGS=`"${R_HOME}/bin/R" CMD config CFLAGS`
|
||||
CPPFLAGS=`"${R_HOME}/bin/R" CMD config CPPFLAGS`
|
||||
|
||||
LDFLAGS=`"${R_HOME}/bin/R" CMD config LDFLAGS`
|
||||
AC_LANG(C++)
|
||||
|
||||
### Check whether backtrace() is part of libc or the external lib libexecinfo
|
||||
AC_MSG_CHECKING([Backtrace lib])
|
||||
@@ -13,11 +28,22 @@ AC_MSG_RESULT([])
|
||||
AC_CHECK_LIB([execinfo], [backtrace], [BACKTRACE_LIB=-lexecinfo], [BACKTRACE_LIB=''])
|
||||
|
||||
### Endian detection
|
||||
AC_MSG_CHECKING([endian])
|
||||
AC_MSG_RESULT([])
|
||||
AC_RUN_IFELSE([AC_LANG_PROGRAM([[#include <stdint.h>]], [[const uint16_t endianness = 256; return !!(*(const uint8_t *)&endianness);]])],
|
||||
[ENDIAN_FLAG="-DDMLC_CMAKE_LITTLE_ENDIAN=1"],
|
||||
[ENDIAN_FLAG="-DDMLC_CMAKE_LITTLE_ENDIAN=0"])
|
||||
AC_ARG_VAR(USE_LITTLE_ENDIAN, "Whether to build with little endian (checks at compile time if unset)")
|
||||
AS_IF([test -z "${USE_LITTLE_ENDIAN+x}"], [
|
||||
AC_MSG_NOTICE([Checking system endianness as USE_LITTLE_ENDIAN is unset])
|
||||
AC_MSG_CHECKING([system endianness])
|
||||
AC_C_BIGENDIAN(
|
||||
[AC_MSG_RESULT([using big endian])
|
||||
ENDIAN_FLAG="-DDMLC_CMAKE_LITTLE_ENDIAN=0"],
|
||||
[AC_MSG_RESULT([using little endian])
|
||||
ENDIAN_FLAG="-DDMLC_CMAKE_LITTLE_ENDIAN=1"],
|
||||
[AC_MSG_RESULT([unknown])
|
||||
AC_MSG_ERROR([Could not determine endianness. Please set USE_LITTLE_ENDIAN])]
|
||||
)
|
||||
], [
|
||||
AC_MSG_NOTICE([Forcing endianness to: ${USE_LITTLE_ENDIAN}])
|
||||
ENDIAN_FLAG="-DDMLC_CMAKE_LITTLE_ENDIAN=${USE_LITTLE_ENDIAN}"
|
||||
])
|
||||
|
||||
OPENMP_CXXFLAGS=""
|
||||
|
||||
@@ -28,12 +54,19 @@ fi
|
||||
|
||||
if test `uname -s` = "Darwin"
|
||||
then
|
||||
OPENMP_CXXFLAGS='-Xclang -fopenmp'
|
||||
OPENMP_LIB='-lomp'
|
||||
if command -v brew &> /dev/null
|
||||
then
|
||||
HOMEBREW_LIBOMP_PREFIX=`brew --prefix libomp`
|
||||
else
|
||||
# Homebrew not found
|
||||
HOMEBREW_LIBOMP_PREFIX=''
|
||||
fi
|
||||
OPENMP_CXXFLAGS="-Xpreprocessor -fopenmp -I${HOMEBREW_LIBOMP_PREFIX}/include"
|
||||
OPENMP_LIB="-lomp -L${HOMEBREW_LIBOMP_PREFIX}/lib"
|
||||
ac_pkg_openmp=no
|
||||
AC_MSG_CHECKING([whether OpenMP will work in a package])
|
||||
AC_LANG_CONFTEST([AC_LANG_PROGRAM([[#include <omp.h>]], [[ return (omp_get_max_threads() <= 1); ]])])
|
||||
${CC} -o conftest conftest.c ${CPPFLAGS} ${LDFLAGS} ${OPENMP_LIB} ${OPENMP_CXXFLAGS} 2>/dev/null && ./conftest && ac_pkg_openmp=yes
|
||||
${CXX} -o conftest conftest.cpp ${CPPFLAGS} ${LDFLAGS} ${OPENMP_LIB} ${OPENMP_CXXFLAGS} 2>/dev/null && ./conftest && ac_pkg_openmp=yes
|
||||
AC_MSG_RESULT([${ac_pkg_openmp}])
|
||||
if test "${ac_pkg_openmp}" = no; then
|
||||
OPENMP_CXXFLAGS=''
|
||||
@@ -51,4 +84,5 @@ AC_SUBST(OPENMP_LIB)
|
||||
AC_SUBST(ENDIAN_FLAG)
|
||||
AC_SUBST(BACKTRACE_LIB)
|
||||
AC_CONFIG_FILES([src/Makevars])
|
||||
AC_CONFIG_HEADERS([config.h])
|
||||
AC_OUTPUT
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
basic_walkthrough Basic feature walkthrough
|
||||
caret_wrapper Use xgboost to train in caret library
|
||||
custom_objective Customize loss function, and evaluation metric
|
||||
boost_from_prediction Boosting from existing prediction
|
||||
predict_first_ntree Predicting using first n trees
|
||||
generalized_linear_model Generalized Linear Model
|
||||
cross_validation Cross validation
|
||||
create_sparse_matrix Create Sparse Matrix
|
||||
predict_leaf_indices Predicting the corresponding leaves
|
||||
early_stopping Early Stop in training
|
||||
poisson_regression Poisson regression on count data
|
||||
tweedie_regression Tweedie regression
|
||||
gpu_accelerated GPU-accelerated tree building algorithms
|
||||
interaction_constraints Interaction constraints among features
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
XGBoost R Feature Walkthrough
|
||||
====
|
||||
* [Basic walkthrough of wrappers](basic_walkthrough.R)
|
||||
* [Train a xgboost model from caret library](caret_wrapper.R)
|
||||
* [Customize loss function, and evaluation metric](custom_objective.R)
|
||||
* [Boosting from existing prediction](boost_from_prediction.R)
|
||||
* [Predicting using first n trees](predict_first_ntree.R)
|
||||
* [Generalized Linear Model](generalized_linear_model.R)
|
||||
* [Cross validation](cross_validation.R)
|
||||
* [Create a sparse matrix from a dense one](create_sparse_matrix.R)
|
||||
* [Use GPU-accelerated tree building algorithms](gpu_accelerated.R)
|
||||
|
||||
Benchmarks
|
||||
====
|
||||
* [Starter script for Kaggle Higgs Boson](../../demo/kaggle-higgs)
|
||||
|
||||
Notes
|
||||
====
|
||||
* Contribution of examples, benchmarks is more than welcomed!
|
||||
* If you like to share how you use xgboost to solve your problem, send a pull request :)
|
||||
@@ -1,112 +0,0 @@
|
||||
require(xgboost)
|
||||
require(methods)
|
||||
|
||||
# we load in the agaricus dataset
|
||||
# In this example, we are aiming to predict whether a mushroom is edible
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
train <- agaricus.train
|
||||
test <- agaricus.test
|
||||
# the loaded data is stored in sparseMatrix, and label is a numeric vector in {0,1}
|
||||
class(train$label)
|
||||
class(train$data)
|
||||
|
||||
#-------------Basic Training using XGBoost-----------------
|
||||
# this is the basic usage of xgboost you can put matrix in data field
|
||||
# note: we are putting in sparse matrix here, xgboost naturally handles sparse input
|
||||
# use sparse matrix when your feature is sparse(e.g. when you are using one-hot encoding vector)
|
||||
print("Training xgboost with sparseMatrix")
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 2, eta = 1, nrounds = 2,
|
||||
nthread = 2, objective = "binary:logistic")
|
||||
# alternatively, you can put in dense matrix, i.e. basic R-matrix
|
||||
print("Training xgboost with Matrix")
|
||||
bst <- xgboost(data = as.matrix(train$data), label = train$label, max_depth = 2, eta = 1, nrounds = 2,
|
||||
nthread = 2, objective = "binary:logistic")
|
||||
|
||||
# you can also put in xgb.DMatrix object, which stores label, data and other meta datas needed for advanced features
|
||||
print("Training xgboost with xgb.DMatrix")
|
||||
dtrain <- xgb.DMatrix(data = train$data, label = train$label)
|
||||
bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nrounds = 2, nthread = 2,
|
||||
objective = "binary:logistic")
|
||||
|
||||
# Verbose = 0,1,2
|
||||
print("Train xgboost with verbose 0, no message")
|
||||
bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nrounds = 2,
|
||||
nthread = 2, objective = "binary:logistic", verbose = 0)
|
||||
print("Train xgboost with verbose 1, print evaluation metric")
|
||||
bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nrounds = 2,
|
||||
nthread = 2, objective = "binary:logistic", verbose = 1)
|
||||
print("Train xgboost with verbose 2, also print information about tree")
|
||||
bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nrounds = 2,
|
||||
nthread = 2, objective = "binary:logistic", verbose = 2)
|
||||
|
||||
# you can also specify data as file path to a LIBSVM format input
|
||||
# since we do not have this file with us, the following line is just for illustration
|
||||
# bst <- xgboost(data = 'agaricus.train.svm', max_depth = 2, eta = 1, nrounds = 2,objective = "binary:logistic")
|
||||
|
||||
#--------------------basic prediction using xgboost--------------
|
||||
# you can do prediction using the following line
|
||||
# you can put in Matrix, sparseMatrix, or xgb.DMatrix
|
||||
pred <- predict(bst, test$data)
|
||||
err <- mean(as.numeric(pred > 0.5) != test$label)
|
||||
print(paste("test-error=", err))
|
||||
|
||||
#-------------------save and load models-------------------------
|
||||
# save model to binary local file
|
||||
xgb.save(bst, "xgboost.model")
|
||||
# load binary model to R
|
||||
bst2 <- xgb.load("xgboost.model")
|
||||
pred2 <- predict(bst2, test$data)
|
||||
# pred2 should be identical to pred
|
||||
print(paste("sum(abs(pred2-pred))=", sum(abs(pred2 - pred))))
|
||||
|
||||
# save model to R's raw vector
|
||||
raw <- xgb.save.raw(bst)
|
||||
# load binary model to R
|
||||
bst3 <- xgb.load.raw(raw)
|
||||
pred3 <- predict(bst3, test$data)
|
||||
# pred3 should be identical to pred
|
||||
print(paste("sum(abs(pred3-pred))=", sum(abs(pred3 - pred))))
|
||||
|
||||
#----------------Advanced features --------------
|
||||
# to use advanced features, we need to put data in xgb.DMatrix
|
||||
dtrain <- xgb.DMatrix(data = train$data, label = train$label)
|
||||
dtest <- xgb.DMatrix(data = test$data, label = test$label)
|
||||
#---------------Using watchlist----------------
|
||||
# watchlist is a list of xgb.DMatrix, each of them is tagged with name
|
||||
watchlist <- list(train = dtrain, test = dtest)
|
||||
# to train with watchlist, use xgb.train, which contains more advanced features
|
||||
# watchlist allows us to monitor the evaluation result on all data in the list
|
||||
print("Train xgboost using xgb.train with watchlist")
|
||||
bst <- xgb.train(data = dtrain, max_depth = 2, eta = 1, nrounds = 2, watchlist = watchlist,
|
||||
nthread = 2, objective = "binary:logistic")
|
||||
# we can change evaluation metrics, or use multiple evaluation metrics
|
||||
print("train xgboost using xgb.train with watchlist, watch logloss and error")
|
||||
bst <- xgb.train(data = dtrain, max_depth = 2, eta = 1, nrounds = 2, watchlist = watchlist,
|
||||
eval_metric = "error", eval_metric = "logloss",
|
||||
nthread = 2, objective = "binary:logistic")
|
||||
|
||||
# xgb.DMatrix can also be saved using xgb.DMatrix.save
|
||||
xgb.DMatrix.save(dtrain, "dtrain.buffer")
|
||||
# to load it in, simply call xgb.DMatrix
|
||||
dtrain2 <- xgb.DMatrix("dtrain.buffer")
|
||||
bst <- xgb.train(data = dtrain2, max_depth = 2, eta = 1, nrounds = 2, watchlist = watchlist,
|
||||
nthread = 2, objective = "binary:logistic")
|
||||
# information can be extracted from xgb.DMatrix using getinfo
|
||||
label <- getinfo(dtest, "label")
|
||||
pred <- predict(bst, dtest)
|
||||
err <- as.numeric(sum(as.integer(pred > 0.5) != label)) / length(label)
|
||||
print(paste("test-error=", err))
|
||||
|
||||
# You can dump the tree you learned using xgb.dump into a text file
|
||||
dump_path <- file.path(tempdir(), 'dump.raw.txt')
|
||||
xgb.dump(bst, dump_path, with_stats = TRUE)
|
||||
|
||||
# Finally, you can check which features are the most important.
|
||||
print("Most important features (look at column Gain):")
|
||||
imp_matrix <- xgb.importance(feature_names = colnames(train$data), model = bst)
|
||||
print(imp_matrix)
|
||||
|
||||
# Feature importance bar plot by gain
|
||||
print("Feature importance Plot : ")
|
||||
print(xgb.plot.importance(importance_matrix = imp_matrix))
|
||||
@@ -1,26 +0,0 @@
|
||||
require(xgboost)
|
||||
# load in the agaricus dataset
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
|
||||
watchlist <- list(eval = dtest, train = dtrain)
|
||||
###
|
||||
# advanced: start from a initial base prediction
|
||||
#
|
||||
print('start running example to start from a initial prediction')
|
||||
# train xgboost for 1 round
|
||||
param <- list(max_depth = 2, eta = 1, nthread = 2, objective = 'binary:logistic')
|
||||
bst <- xgb.train(param, dtrain, 1, watchlist)
|
||||
# Note: we need the margin value instead of transformed prediction in set_base_margin
|
||||
# do predict with output_margin=TRUE, will always give you margin values before logistic transformation
|
||||
ptrain <- predict(bst, dtrain, outputmargin = TRUE)
|
||||
ptest <- predict(bst, dtest, outputmargin = TRUE)
|
||||
# set the base_margin property of dtrain and dtest
|
||||
# base margin is the base prediction we will boost from
|
||||
setinfo(dtrain, "base_margin", ptrain)
|
||||
setinfo(dtest, "base_margin", ptest)
|
||||
|
||||
print('this is result of boost from initial prediction')
|
||||
bst <- xgb.train(params = param, data = dtrain, nrounds = 1, watchlist = watchlist)
|
||||
@@ -1,35 +0,0 @@
|
||||
# install development version of caret library that contains xgboost models
|
||||
devtools::install_github("topepo/caret/pkg/caret")
|
||||
require(caret)
|
||||
require(xgboost)
|
||||
require(data.table)
|
||||
require(vcd)
|
||||
require(e1071)
|
||||
|
||||
# Load Arthritis dataset in memory.
|
||||
data(Arthritis)
|
||||
# Create a copy of the dataset with data.table package (data.table is 100% compliant with R dataframe but its syntax is a lot more consistent and its performance are really good).
|
||||
df <- data.table(Arthritis, keep.rownames = FALSE)
|
||||
|
||||
# Let's add some new categorical features to see if it helps. Of course these feature are highly correlated to the Age feature. Usually it's not a good thing in ML, but Tree algorithms (including boosted trees) are able to select the best features, even in case of highly correlated features.
|
||||
# For the first feature we create groups of age by rounding the real age. Note that we transform it to factor (categorical data) so the algorithm treat them as independant values.
|
||||
df[, AgeDiscret := as.factor(round(Age / 10, 0))]
|
||||
|
||||
# Here is an even stronger simplification of the real age with an arbitrary split at 30 years old. I choose this value based on nothing. We will see later if simplifying the information based on arbitrary values is a good strategy (I am sure you already have an idea of how well it will work!).
|
||||
df[, AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))]
|
||||
|
||||
# We remove ID as there is nothing to learn from this feature (it will just add some noise as the dataset is small).
|
||||
df[, ID := NULL]
|
||||
|
||||
#-------------Basic Training using XGBoost in caret Library-----------------
|
||||
# Set up control parameters for caret::train
|
||||
# Here we use 10-fold cross-validation, repeating twice, and using random search for tuning hyper-parameters.
|
||||
fitControl <- trainControl(method = "repeatedcv", number = 10, repeats = 2, search = "random")
|
||||
# train a xgbTree model using caret::train
|
||||
model <- train(factor(Improved)~., data = df, method = "xgbTree", trControl = fitControl)
|
||||
|
||||
# Instead of tree for our boosters, you can also fit a linear regression or logistic regression model using xgbLinear
|
||||
# model <- train(factor(Improved)~., data = df, method = "xgbLinear", trControl = fitControl)
|
||||
|
||||
# See model results
|
||||
print(model)
|
||||
@@ -1,89 +0,0 @@
|
||||
require(xgboost)
|
||||
require(Matrix)
|
||||
require(data.table)
|
||||
if (!require(vcd)) {
|
||||
install.packages('vcd') #Available in CRAN. Used for its dataset with categorical values.
|
||||
require(vcd)
|
||||
}
|
||||
# According to its documentation, XGBoost works only on numbers.
|
||||
# Sometimes the dataset we have to work on have categorical data.
|
||||
# A categorical variable is one which have a fixed number of values. By example, if for each observation a variable called "Colour" can have only "red", "blue" or "green" as value, it is a categorical variable.
|
||||
#
|
||||
# In R, categorical variable is called Factor.
|
||||
# Type ?factor in console for more information.
|
||||
#
|
||||
# In this demo we will see how to transform a dense dataframe with categorical variables to a sparse matrix before analyzing it in XGBoost.
|
||||
# The method we are going to see is usually called "one hot encoding".
|
||||
|
||||
#load Arthritis dataset in memory.
|
||||
data(Arthritis)
|
||||
|
||||
# create a copy of the dataset with data.table package (data.table is 100% compliant with R dataframe but its syntax is a lot more consistent and its performance are really good).
|
||||
df <- data.table(Arthritis, keep.rownames = FALSE)
|
||||
|
||||
# Let's have a look to the data.table
|
||||
cat("Print the dataset\n")
|
||||
print(df)
|
||||
|
||||
# 2 columns have factor type, one has ordinal type (ordinal variable is a categorical variable with values which can be ordered, here: None > Some > Marked).
|
||||
cat("Structure of the dataset\n")
|
||||
str(df)
|
||||
|
||||
# Let's add some new categorical features to see if it helps. Of course these feature are highly correlated to the Age feature. Usually it's not a good thing in ML, but Tree algorithms (including boosted trees) are able to select the best features, even in case of highly correlated features.
|
||||
|
||||
# For the first feature we create groups of age by rounding the real age. Note that we transform it to factor (categorical data) so the algorithm treat them as independent values.
|
||||
df[, AgeDiscret := as.factor(round(Age / 10, 0))]
|
||||
|
||||
# Here is an even stronger simplification of the real age with an arbitrary split at 30 years old. I choose this value based on nothing. We will see later if simplifying the information based on arbitrary values is a good strategy (I am sure you already have an idea of how well it will work!).
|
||||
df[, AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))]
|
||||
|
||||
# We remove ID as there is nothing to learn from this feature (it will just add some noise as the dataset is small).
|
||||
df[, ID := NULL]
|
||||
|
||||
# List the different values for the column Treatment: Placebo, Treated.
|
||||
cat("Values of the categorical feature Treatment\n")
|
||||
print(levels(df[, Treatment]))
|
||||
|
||||
# Next step, we will transform the categorical data to dummy variables.
|
||||
# This method is also called one hot encoding.
|
||||
# The purpose is to transform each value of each categorical feature in one binary feature.
|
||||
#
|
||||
# Let's take, the column Treatment will be replaced by two columns, Placebo, and Treated. Each of them will be binary. For example an observation which had the value Placebo in column Treatment before the transformation will have, after the transformation, the value 1 in the new column Placebo and the value 0 in the new column Treated.
|
||||
#
|
||||
# Formulae Improved~.-1 used below means transform all categorical features but column Improved to binary values.
|
||||
# Column Improved is excluded because it will be our output column, the one we want to predict.
|
||||
sparse_matrix <- sparse.model.matrix(Improved ~ . - 1, data = df)
|
||||
|
||||
cat("Encoding of the sparse Matrix\n")
|
||||
print(sparse_matrix)
|
||||
|
||||
# Create the output vector (not sparse)
|
||||
# 1. Set, for all rows, field in Y column to 0;
|
||||
# 2. set Y to 1 when Improved == Marked;
|
||||
# 3. Return Y column
|
||||
output_vector <- df[, Y := 0][Improved == "Marked", Y := 1][, Y]
|
||||
|
||||
# Following is the same process as other demo
|
||||
cat("Learning...\n")
|
||||
bst <- xgboost(data = sparse_matrix, label = output_vector, max_depth = 9,
|
||||
eta = 1, nthread = 2, nrounds = 10, objective = "binary:logistic")
|
||||
|
||||
importance <- xgb.importance(feature_names = colnames(sparse_matrix), model = bst)
|
||||
print(importance)
|
||||
# According to the matrix below, the most important feature in this dataset to predict if the treatment will work is the Age. The second most important feature is having received a placebo or not. The sex is third. Then we see our generated features (AgeDiscret). We can see that their contribution is very low (Gain column).
|
||||
|
||||
# Does these result make sense?
|
||||
# Let's check some Chi2 between each of these features and the outcome.
|
||||
|
||||
print(chisq.test(df$Age, df$Y))
|
||||
# Pearson correlation between Age and illness disappearing is 35
|
||||
|
||||
print(chisq.test(df$AgeDiscret, df$Y))
|
||||
# Our first simplification of Age gives a Pearson correlation of 8.
|
||||
|
||||
print(chisq.test(df$AgeCat, df$Y))
|
||||
# The perfectly random split I did between young and old at 30 years old have a low correlation of 2. It's a result we may expect as may be in my mind > 30 years is being old (I am 32 and starting feeling old, this may explain that), but for the illness we are studying, the age to be vulnerable is not the same. Don't let your "gut" lower the quality of your model. In "data science", there is science :-)
|
||||
|
||||
# As you can see, in general destroying information by simplifying it won't improve your model. Chi2 just demonstrates that. But in more complex cases, creating a new feature based on existing one which makes link with the outcome more obvious may help the algorithm and improve the model. The case studied here is not enough complex to show that. Check Kaggle forum for some challenging datasets.
|
||||
# However it's almost always worse when you add some arbitrary rules.
|
||||
# Moreover, you can notice that even if we have added some not useful new features highly correlated with other features, the boosting tree algorithm have been able to choose the best one, which in this case is the Age. Linear model may not be that strong in these scenario.
|
||||
@@ -1,51 +0,0 @@
|
||||
require(xgboost)
|
||||
# load in the agaricus dataset
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
|
||||
nrounds <- 2
|
||||
param <- list(max_depth = 2, eta = 1, nthread = 2, objective = 'binary:logistic')
|
||||
|
||||
cat('running cross validation\n')
|
||||
# do cross validation, this will print result out as
|
||||
# [iteration] metric_name:mean_value+std_value
|
||||
# std_value is standard deviation of the metric
|
||||
xgb.cv(param, dtrain, nrounds, nfold = 5, metrics = {'error'})
|
||||
|
||||
cat('running cross validation, disable standard deviation display\n')
|
||||
# do cross validation, this will print result out as
|
||||
# [iteration] metric_name:mean_value+std_value
|
||||
# std_value is standard deviation of the metric
|
||||
xgb.cv(param, dtrain, nrounds, nfold = 5,
|
||||
metrics = 'error', showsd = FALSE)
|
||||
|
||||
###
|
||||
# you can also do cross validation with customized loss function
|
||||
# See custom_objective.R
|
||||
##
|
||||
print ('running cross validation, with customized loss function')
|
||||
|
||||
logregobj <- function(preds, dtrain) {
|
||||
labels <- getinfo(dtrain, "label")
|
||||
preds <- 1 / (1 + exp(-preds))
|
||||
grad <- preds - labels
|
||||
hess <- preds * (1 - preds)
|
||||
return(list(grad = grad, hess = hess))
|
||||
}
|
||||
evalerror <- function(preds, dtrain) {
|
||||
labels <- getinfo(dtrain, "label")
|
||||
err <- as.numeric(sum(labels != (preds > 0))) / length(labels)
|
||||
return(list(metric = "error", value = err))
|
||||
}
|
||||
|
||||
param <- list(max_depth = 2, eta = 1,
|
||||
objective = logregobj, eval_metric = evalerror)
|
||||
# train with customized objective
|
||||
xgb.cv(params = param, data = dtrain, nrounds = nrounds, nfold = 5)
|
||||
|
||||
# do cross validation with prediction values for each fold
|
||||
res <- xgb.cv(params = param, data = dtrain, nrounds = nrounds, nfold = 5, prediction = TRUE)
|
||||
res$evaluation_log
|
||||
length(res$pred)
|
||||
@@ -1,65 +0,0 @@
|
||||
require(xgboost)
|
||||
# load in the agaricus dataset
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
|
||||
# note: for customized objective function, we leave objective as default
|
||||
# note: what we are getting is margin value in prediction
|
||||
# you must know what you are doing
|
||||
watchlist <- list(eval = dtest, train = dtrain)
|
||||
num_round <- 2
|
||||
|
||||
# user define objective function, given prediction, return gradient and second order gradient
|
||||
# this is log likelihood loss
|
||||
logregobj <- function(preds, dtrain) {
|
||||
labels <- getinfo(dtrain, "label")
|
||||
preds <- 1 / (1 + exp(-preds))
|
||||
grad <- preds - labels
|
||||
hess <- preds * (1 - preds)
|
||||
return(list(grad = grad, hess = hess))
|
||||
}
|
||||
|
||||
# user defined evaluation function, return a pair metric_name, result
|
||||
# NOTE: when you do customized loss function, the default prediction value is margin
|
||||
# this may make builtin evaluation metric not function properly
|
||||
# for example, we are doing logistic loss, the prediction is score before logistic transformation
|
||||
# the builtin evaluation error assumes input is after logistic transformation
|
||||
# Take this in mind when you use the customization, and maybe you need write customized evaluation function
|
||||
evalerror <- function(preds, dtrain) {
|
||||
labels <- getinfo(dtrain, "label")
|
||||
err <- as.numeric(sum(labels != (preds > 0))) / length(labels)
|
||||
return(list(metric = "error", value = err))
|
||||
}
|
||||
|
||||
param <- list(max_depth = 2, eta = 1, nthread = 2, verbosity = 0,
|
||||
objective = logregobj, eval_metric = evalerror)
|
||||
print ('start training with user customized objective')
|
||||
# training with customized objective, we can also do step by step training
|
||||
# simply look at xgboost.py's implementation of train
|
||||
bst <- xgb.train(param, dtrain, num_round, watchlist)
|
||||
|
||||
#
|
||||
# there can be cases where you want additional information
|
||||
# being considered besides the property of DMatrix you can get by getinfo
|
||||
# you can set additional information as attributes if DMatrix
|
||||
|
||||
# set label attribute of dtrain to be label, we use label as an example, it can be anything
|
||||
attr(dtrain, 'label') <- getinfo(dtrain, 'label')
|
||||
# this is new customized objective, where you can access things you set
|
||||
# same thing applies to customized evaluation function
|
||||
logregobjattr <- function(preds, dtrain) {
|
||||
# now you can access the attribute in customized function
|
||||
labels <- attr(dtrain, 'label')
|
||||
preds <- 1 / (1 + exp(-preds))
|
||||
grad <- preds - labels
|
||||
hess <- preds * (1 - preds)
|
||||
return(list(grad = grad, hess = hess))
|
||||
}
|
||||
param <- list(max_depth = 2, eta = 1, nthread = 2, verbosity = 0,
|
||||
objective = logregobjattr, eval_metric = evalerror)
|
||||
print ('start training with user customized objective, with additional attributes in DMatrix')
|
||||
# training with customized objective, we can also do step by step training
|
||||
# simply look at xgboost.py's implementation of train
|
||||
bst <- xgb.train(param, dtrain, num_round, watchlist)
|
||||
@@ -1,40 +0,0 @@
|
||||
require(xgboost)
|
||||
# load in the agaricus dataset
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
# note: for customized objective function, we leave objective as default
|
||||
# note: what we are getting is margin value in prediction
|
||||
# you must know what you are doing
|
||||
param <- list(max_depth = 2, eta = 1, nthread = 2, verbosity = 0)
|
||||
watchlist <- list(eval = dtest)
|
||||
num_round <- 20
|
||||
# user define objective function, given prediction, return gradient and second order gradient
|
||||
# this is log likelihood loss
|
||||
logregobj <- function(preds, dtrain) {
|
||||
labels <- getinfo(dtrain, "label")
|
||||
preds <- 1 / (1 + exp(-preds))
|
||||
grad <- preds - labels
|
||||
hess <- preds * (1 - preds)
|
||||
return(list(grad = grad, hess = hess))
|
||||
}
|
||||
# user defined evaluation function, return a pair metric_name, result
|
||||
# NOTE: when you do customized loss function, the default prediction value is margin
|
||||
# this may make builtin evaluation metric not function properly
|
||||
# for example, we are doing logistic loss, the prediction is score before logistic transformation
|
||||
# the builtin evaluation error assumes input is after logistic transformation
|
||||
# Take this in mind when you use the customization, and maybe you need write customized evaluation function
|
||||
evalerror <- function(preds, dtrain) {
|
||||
labels <- getinfo(dtrain, "label")
|
||||
err <- as.numeric(sum(labels != (preds > 0))) / length(labels)
|
||||
return(list(metric = "error", value = err))
|
||||
}
|
||||
print ('start training with early Stopping setting')
|
||||
|
||||
bst <- xgb.train(param, dtrain, num_round, watchlist,
|
||||
objective = logregobj, eval_metric = evalerror, maximize = FALSE,
|
||||
early_stopping_round = 3)
|
||||
bst <- xgb.cv(param, dtrain, num_round, nfold = 5,
|
||||
objective = logregobj, eval_metric = evalerror,
|
||||
maximize = FALSE, early_stopping_rounds = 3)
|
||||
@@ -1,33 +0,0 @@
|
||||
require(xgboost)
|
||||
# load in the agaricus dataset
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
##
|
||||
# this script demonstrate how to fit generalized linear model in xgboost
|
||||
# basically, we are using linear model, instead of tree for our boosters
|
||||
# you can fit a linear regression, or logistic regression model
|
||||
##
|
||||
|
||||
# change booster to gblinear, so that we are fitting a linear model
|
||||
# alpha is the L1 regularizer
|
||||
# lambda is the L2 regularizer
|
||||
# you can also set lambda_bias which is L2 regularizer on the bias term
|
||||
param <- list(objective = "binary:logistic", booster = "gblinear",
|
||||
nthread = 2, alpha = 0.0001, lambda = 1)
|
||||
|
||||
# normally, you do not need to set eta (step_size)
|
||||
# XGBoost uses a parallel coordinate descent algorithm (shotgun),
|
||||
# there could be affection on convergence with parallelization on certain cases
|
||||
# setting eta to be smaller value, e.g 0.5 can make the optimization more stable
|
||||
|
||||
##
|
||||
# the rest of settings are the same
|
||||
##
|
||||
watchlist <- list(eval = dtest, train = dtrain)
|
||||
num_round <- 2
|
||||
bst <- xgb.train(param, dtrain, num_round, watchlist)
|
||||
ypred <- predict(bst, dtest)
|
||||
labels <- getinfo(dtest, 'label')
|
||||
cat('error of preds=', mean(as.numeric(ypred > 0.5) != labels), '\n')
|
||||
@@ -1,45 +0,0 @@
|
||||
# An example of using GPU-accelerated tree building algorithms
|
||||
#
|
||||
# NOTE: it can only run if you have a CUDA-enable GPU and the package was
|
||||
# specially compiled with GPU support.
|
||||
#
|
||||
# For the current functionality, see
|
||||
# https://xgboost.readthedocs.io/en/latest/gpu/index.html
|
||||
#
|
||||
|
||||
library('xgboost')
|
||||
|
||||
# Simulate N x p random matrix with some binomial response dependent on pp columns
|
||||
set.seed(111)
|
||||
N <- 1000000
|
||||
p <- 50
|
||||
pp <- 25
|
||||
X <- matrix(runif(N * p), ncol = p)
|
||||
betas <- 2 * runif(pp) - 1
|
||||
sel <- sort(sample(p, pp))
|
||||
m <- X[, sel] %*% betas - 1 + rnorm(N)
|
||||
y <- rbinom(N, 1, plogis(m))
|
||||
|
||||
tr <- sample.int(N, N * 0.75)
|
||||
dtrain <- xgb.DMatrix(X[tr, ], label = y[tr])
|
||||
dtest <- xgb.DMatrix(X[-tr, ], label = y[-tr])
|
||||
wl <- list(train = dtrain, test = dtest)
|
||||
|
||||
# An example of running 'gpu_hist' algorithm
|
||||
# which is
|
||||
# - similar to the 'hist'
|
||||
# - the fastest option for moderately large datasets
|
||||
# - current limitations: max_depth < 16, does not implement guided loss
|
||||
# You can use tree_method = 'gpu_hist' for another GPU accelerated algorithm,
|
||||
# which is slower, more memory-hungry, but does not use binning.
|
||||
param <- list(objective = 'reg:logistic', eval_metric = 'auc', subsample = 0.5, nthread = 4,
|
||||
max_bin = 64, tree_method = 'gpu_hist')
|
||||
pt <- proc.time()
|
||||
bst_gpu <- xgb.train(param, dtrain, watchlist = wl, nrounds = 50)
|
||||
proc.time() - pt
|
||||
|
||||
# Compare to the 'hist' algorithm:
|
||||
param$tree_method <- 'hist'
|
||||
pt <- proc.time()
|
||||
bst_hist <- xgb.train(param, dtrain, watchlist = wl, nrounds = 50)
|
||||
proc.time() - pt
|
||||
@@ -1,113 +0,0 @@
|
||||
library(xgboost)
|
||||
library(data.table)
|
||||
|
||||
set.seed(1024)
|
||||
|
||||
# Function to obtain a list of interactions fitted in trees, requires input of maximum depth
|
||||
treeInteractions <- function(input_tree, input_max_depth) {
|
||||
ID_merge <- i.id <- i.feature <- NULL # Suppress warning "no visible binding for global variable"
|
||||
|
||||
trees <- data.table::copy(input_tree) # copy tree input to prevent overwriting
|
||||
if (input_max_depth < 2) return(list()) # no interactions if max depth < 2
|
||||
if (nrow(input_tree) == 1) return(list())
|
||||
|
||||
# Attach parent nodes
|
||||
for (i in 2:input_max_depth) {
|
||||
if (i == 2) trees[, ID_merge := ID] else trees[, ID_merge := get(paste0('parent_', i - 2))]
|
||||
parents_left <- trees[!is.na(Split), list(i.id = ID, i.feature = Feature, ID_merge = Yes)]
|
||||
parents_right <- trees[!is.na(Split), list(i.id = ID, i.feature = Feature, ID_merge = No)]
|
||||
|
||||
data.table::setorderv(trees, 'ID_merge')
|
||||
data.table::setorderv(parents_left, 'ID_merge')
|
||||
data.table::setorderv(parents_right, 'ID_merge')
|
||||
|
||||
trees <- merge(trees, parents_left, by = 'ID_merge', all.x = TRUE)
|
||||
trees[!is.na(i.id), c(paste0('parent_', i - 1), paste0('parent_feat_', i - 1))
|
||||
:= list(i.id, i.feature)]
|
||||
trees[, c('i.id', 'i.feature') := NULL]
|
||||
|
||||
trees <- merge(trees, parents_right, by = 'ID_merge', all.x = TRUE)
|
||||
trees[!is.na(i.id), c(paste0('parent_', i - 1), paste0('parent_feat_', i - 1))
|
||||
:= list(i.id, i.feature)]
|
||||
trees[, c('i.id', 'i.feature') := NULL]
|
||||
}
|
||||
|
||||
# Extract nodes with interactions
|
||||
interaction_trees <- trees[!is.na(Split) & !is.na(parent_1),
|
||||
c('Feature', paste0('parent_feat_', 1:(input_max_depth - 1))),
|
||||
with = FALSE]
|
||||
interaction_trees_split <- split(interaction_trees, seq_len(nrow(interaction_trees)))
|
||||
interaction_list <- lapply(interaction_trees_split, as.character)
|
||||
|
||||
# Remove NAs (no parent interaction)
|
||||
interaction_list <- lapply(interaction_list, function(x) x[!is.na(x)])
|
||||
|
||||
# Remove non-interactions (same variable)
|
||||
interaction_list <- lapply(interaction_list, unique) # remove same variables
|
||||
interaction_length <- sapply(interaction_list, length)
|
||||
interaction_list <- interaction_list[interaction_length > 1]
|
||||
interaction_list <- unique(lapply(interaction_list, sort))
|
||||
return(interaction_list)
|
||||
}
|
||||
|
||||
# Generate sample data
|
||||
x <- list()
|
||||
for (i in 1:10) {
|
||||
x[[i]] <- i * rnorm(1000, 10)
|
||||
}
|
||||
x <- as.data.table(x)
|
||||
|
||||
y <- -1 * x[, rowSums(.SD)] + x[['V1']] * x[['V2']] + x[['V3']] * x[['V4']] * x[['V5']]
|
||||
+ rnorm(1000, 0.001) + 3 * sin(x[['V7']])
|
||||
|
||||
train <- as.matrix(x)
|
||||
|
||||
# Interaction constraint list (column names form)
|
||||
interaction_list <- list(c('V1', 'V2'), c('V3', 'V4', 'V5'))
|
||||
|
||||
# Convert interaction constraint list into feature index form
|
||||
cols2ids <- function(object, col_names) {
|
||||
LUT <- seq_along(col_names) - 1
|
||||
names(LUT) <- col_names
|
||||
rapply(object, function(x) LUT[x], classes = "character", how = "replace")
|
||||
}
|
||||
interaction_list_fid <- cols2ids(interaction_list, colnames(train))
|
||||
|
||||
# Fit model with interaction constraints
|
||||
bst <- xgboost(data = train, label = y, max_depth = 4,
|
||||
eta = 0.1, nthread = 2, nrounds = 1000,
|
||||
interaction_constraints = interaction_list_fid)
|
||||
|
||||
bst_tree <- xgb.model.dt.tree(colnames(train), bst)
|
||||
bst_interactions <- treeInteractions(bst_tree, 4)
|
||||
# interactions constrained to combinations of V1*V2 and V3*V4*V5
|
||||
|
||||
# Fit model without interaction constraints
|
||||
bst2 <- xgboost(data = train, label = y, max_depth = 4,
|
||||
eta = 0.1, nthread = 2, nrounds = 1000)
|
||||
|
||||
bst2_tree <- xgb.model.dt.tree(colnames(train), bst2)
|
||||
bst2_interactions <- treeInteractions(bst2_tree, 4) # much more interactions
|
||||
|
||||
# Fit model with both interaction and monotonicity constraints
|
||||
bst3 <- xgboost(data = train, label = y, max_depth = 4,
|
||||
eta = 0.1, nthread = 2, nrounds = 1000,
|
||||
interaction_constraints = interaction_list_fid,
|
||||
monotone_constraints = c(-1, 0, 0, 0, 0, 0, 0, 0, 0, 0))
|
||||
|
||||
bst3_tree <- xgb.model.dt.tree(colnames(train), bst3)
|
||||
bst3_interactions <- treeInteractions(bst3_tree, 4)
|
||||
# interactions still constrained to combinations of V1*V2 and V3*V4*V5
|
||||
|
||||
# Show monotonic constraints still apply by checking scores after incrementing V1
|
||||
x1 <- sort(unique(x[['V1']]))
|
||||
for (i in seq_along(x1)){
|
||||
testdata <- copy(x[, - ('V1')])
|
||||
testdata[['V1']] <- x1[i]
|
||||
testdata <- testdata[, paste0('V', 1:10), with = FALSE]
|
||||
pred <- predict(bst3, as.matrix(testdata))
|
||||
|
||||
# Should not print out anything due to monotonic constraints
|
||||
if (i > 1) if (any(pred > prev_pred)) print(i)
|
||||
prev_pred <- pred
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
data(mtcars)
|
||||
head(mtcars)
|
||||
bst <- xgboost(data = as.matrix(mtcars[, -11]), label = mtcars[, 11],
|
||||
objective = 'count:poisson', nrounds = 5)
|
||||
pred <- predict(bst, as.matrix(mtcars[, -11]))
|
||||
sqrt(mean((pred - mtcars[, 11]) ^ 2))
|
||||
@@ -1,23 +0,0 @@
|
||||
require(xgboost)
|
||||
# load in the agaricus dataset
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
|
||||
param <- list(max_depth = 2, eta = 1, objective = 'binary:logistic')
|
||||
watchlist <- list(eval = dtest, train = dtrain)
|
||||
nrounds <- 2
|
||||
|
||||
# training the model for two rounds
|
||||
bst <- xgb.train(param, dtrain, nrounds, nthread = 2, watchlist)
|
||||
cat('start testing prediction from first n trees\n')
|
||||
labels <- getinfo(dtest, 'label')
|
||||
|
||||
### predict using first 1 tree
|
||||
ypred1 <- predict(bst, dtest, ntreelimit = 1)
|
||||
# by default, we predict using all the trees
|
||||
ypred2 <- predict(bst, dtest)
|
||||
|
||||
cat('error of ypred1=', mean(as.numeric(ypred1 > 0.5) != labels), '\n')
|
||||
cat('error of ypred2=', mean(as.numeric(ypred2 > 0.5) != labels), '\n')
|
||||
@@ -1,55 +0,0 @@
|
||||
require(xgboost)
|
||||
require(data.table)
|
||||
require(Matrix)
|
||||
|
||||
set.seed(1982)
|
||||
|
||||
# load in the agaricus dataset
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
dtrain <- xgb.DMatrix(data = agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(data = agaricus.test$data, label = agaricus.test$label)
|
||||
|
||||
param <- list(max_depth = 2, eta = 1, objective = 'binary:logistic')
|
||||
nrounds <- 4
|
||||
|
||||
# training the model for two rounds
|
||||
bst <- xgb.train(params = param, data = dtrain, nrounds = nrounds, nthread = 2)
|
||||
|
||||
# Model accuracy without new features
|
||||
accuracy.before <- (sum((predict(bst, agaricus.test$data) >= 0.5) == agaricus.test$label)
|
||||
/ length(agaricus.test$label))
|
||||
|
||||
# by default, we predict using all the trees
|
||||
pred_with_leaf <- predict(bst, dtest, predleaf = TRUE)
|
||||
head(pred_with_leaf)
|
||||
|
||||
create.new.tree.features <- function(model, original.features){
|
||||
pred_with_leaf <- predict(model, original.features, predleaf = TRUE)
|
||||
cols <- list()
|
||||
for (i in 1:model$niter) {
|
||||
# max is not the real max but it s not important for the purpose of adding features
|
||||
leaf.id <- sort(unique(pred_with_leaf[, i]))
|
||||
cols[[i]] <- factor(x = pred_with_leaf[, i], level = leaf.id)
|
||||
}
|
||||
cbind(original.features, sparse.model.matrix(~ . - 1, as.data.frame(cols)))
|
||||
}
|
||||
|
||||
# Convert previous features to one hot encoding
|
||||
new.features.train <- create.new.tree.features(bst, agaricus.train$data)
|
||||
new.features.test <- create.new.tree.features(bst, agaricus.test$data)
|
||||
colnames(new.features.test) <- colnames(new.features.train)
|
||||
|
||||
# learning with new features
|
||||
new.dtrain <- xgb.DMatrix(data = new.features.train, label = agaricus.train$label)
|
||||
new.dtest <- xgb.DMatrix(data = new.features.test, label = agaricus.test$label)
|
||||
watchlist <- list(train = new.dtrain)
|
||||
bst <- xgb.train(params = param, data = new.dtrain, nrounds = nrounds, nthread = 2)
|
||||
|
||||
# Model accuracy with new features
|
||||
accuracy.after <- (sum((predict(bst, new.dtest) >= 0.5) == agaricus.test$label)
|
||||
/ length(agaricus.test$label))
|
||||
|
||||
# Here the accuracy was already good and is now perfect.
|
||||
cat(paste("The accuracy was", accuracy.before, "before adding leaf features and it is now",
|
||||
accuracy.after, "!\n"))
|
||||
@@ -1,14 +0,0 @@
|
||||
# running all scripts in demo folder
|
||||
demo(basic_walkthrough, package = 'xgboost')
|
||||
demo(custom_objective, package = 'xgboost')
|
||||
demo(boost_from_prediction, package = 'xgboost')
|
||||
demo(predict_first_ntree, package = 'xgboost')
|
||||
demo(generalized_linear_model, package = 'xgboost')
|
||||
demo(cross_validation, package = 'xgboost')
|
||||
demo(create_sparse_matrix, package = 'xgboost')
|
||||
demo(predict_leaf_indices, package = 'xgboost')
|
||||
demo(early_stopping, package = 'xgboost')
|
||||
demo(poisson_regression, package = 'xgboost')
|
||||
demo(caret_wrapper, package = 'xgboost')
|
||||
demo(tweedie_regression, package = 'xgboost')
|
||||
#demo(gpu_accelerated, package = 'xgboost') # can only run when built with GPU support
|
||||
@@ -1,49 +0,0 @@
|
||||
library(xgboost)
|
||||
library(data.table)
|
||||
library(cplm)
|
||||
|
||||
data(AutoClaim)
|
||||
|
||||
# auto insurance dataset analyzed by Yip and Yau (2005)
|
||||
dt <- data.table(AutoClaim)
|
||||
|
||||
# exclude these columns from the model matrix
|
||||
exclude <- c('POLICYNO', 'PLCYDATE', 'CLM_FREQ5', 'CLM_AMT5', 'CLM_FLAG', 'IN_YY')
|
||||
|
||||
# retains the missing values
|
||||
# NOTE: this dataset is comes ready out of the box
|
||||
options(na.action = 'na.pass')
|
||||
x <- sparse.model.matrix(~ . - 1, data = dt[, -exclude, with = FALSE])
|
||||
options(na.action = 'na.omit')
|
||||
|
||||
# response
|
||||
y <- dt[, CLM_AMT5]
|
||||
|
||||
d_train <- xgb.DMatrix(data = x, label = y, missing = NA)
|
||||
|
||||
# the tweedie_variance_power parameter determines the shape of
|
||||
# distribution
|
||||
# - closer to 1 is more poisson like and the mass
|
||||
# is more concentrated near zero
|
||||
# - closer to 2 is more gamma like and the mass spreads to the
|
||||
# the right with less concentration near zero
|
||||
|
||||
params <- list(
|
||||
objective = 'reg:tweedie',
|
||||
eval_metric = 'rmse',
|
||||
tweedie_variance_power = 1.4,
|
||||
max_depth = 6,
|
||||
eta = 1)
|
||||
|
||||
bst <- xgb.train(
|
||||
data = d_train,
|
||||
params = params,
|
||||
maximize = FALSE,
|
||||
watchlist = list(train = d_train),
|
||||
nrounds = 20)
|
||||
|
||||
var_imp <- xgb.importance(attr(x, 'Dimnames')[[2]], model = bst)
|
||||
|
||||
preds <- predict(bst, d_train)
|
||||
|
||||
rmse <- sqrt(sum(mean((y - preds) ^ 2)))
|
||||
@@ -55,7 +55,7 @@ message(sprintf("Creating '%s' from '%s'", OUT_DEF_FILE, IN_DLL_FILE))
|
||||
}
|
||||
|
||||
# use objdump to dump all the symbols
|
||||
OBJDUMP_FILE <- "objdump-out.txt"
|
||||
OBJDUMP_FILE <- file.path(tempdir(), "objdump-out.txt")
|
||||
.pipe_shell_command_to_stdout(
|
||||
command = "objdump"
|
||||
, args = c("-p", IN_DLL_FILE)
|
||||
@@ -79,9 +79,9 @@ end_of_table <- empty_lines[empty_lines > start_index][1L]
|
||||
|
||||
# Read the contents of the table
|
||||
exported_symbols <- objdump_results[(start_index + 1L):end_of_table]
|
||||
exported_symbols <- gsub("\t", "", exported_symbols)
|
||||
exported_symbols <- gsub("\t", "", exported_symbols, fixed = TRUE)
|
||||
exported_symbols <- gsub(".*\\] ", "", exported_symbols)
|
||||
exported_symbols <- gsub(" ", "", exported_symbols)
|
||||
exported_symbols <- gsub(" ", "", exported_symbols, fixed = TRUE)
|
||||
|
||||
# Write R.def file
|
||||
writeLines(
|
||||
|
||||
@@ -2,48 +2,101 @@
|
||||
% Please edit documentation in R/utils.R
|
||||
\name{a-compatibility-note-for-saveRDS-save}
|
||||
\alias{a-compatibility-note-for-saveRDS-save}
|
||||
\title{Do not use \code{\link[base]{saveRDS}} or \code{\link[base]{save}} for long-term archival of
|
||||
models. Instead, use \code{\link{xgb.save}} or \code{\link{xgb.save.raw}}.}
|
||||
\title{Model Serialization and Compatibility}
|
||||
\description{
|
||||
It is a common practice to use the built-in \code{\link[base]{saveRDS}} function (or
|
||||
\code{\link[base]{save}}) to persist R objects to the disk. While it is possible to persist
|
||||
\code{xgb.Booster} objects using \code{\link[base]{saveRDS}}, it is not advisable to do so if
|
||||
the model is to be accessed in the future. If you train a model with the current version of
|
||||
XGBoost and persist it with \code{\link[base]{saveRDS}}, the model is not guaranteed to be
|
||||
accessible in later releases of XGBoost. To ensure that your model can be accessed in future
|
||||
releases of XGBoost, use \code{\link{xgb.save}} or \code{\link{xgb.save.raw}} instead.
|
||||
When it comes to serializing XGBoost models, it's possible to use R serializers such as
|
||||
\code{\link[=save]{save()}} or \code{\link[=saveRDS]{saveRDS()}} to serialize an XGBoost R model, but XGBoost also provides
|
||||
its own serializers with better compatibility guarantees, which allow loading
|
||||
said models in other language bindings of XGBoost.
|
||||
|
||||
Note that an \code{xgb.Booster} object (\strong{as produced by \code{\link[=xgb.train]{xgb.train()}}}, see rest of the doc
|
||||
for objects produced by \code{\link[=xgboost]{xgboost()}}), outside of its core components, might also keep:
|
||||
\itemize{
|
||||
\item Additional model configuration (accessible through \code{\link[=xgb.config]{xgb.config()}}), which includes
|
||||
model fitting parameters like \code{max_depth} and runtime parameters like \code{nthread}.
|
||||
These are not necessarily useful for prediction/importance/plotting.
|
||||
\item Additional R specific attributes - e.g. results of callbacks, such as evaluation logs,
|
||||
which are kept as a \code{data.table} object, accessible through
|
||||
\code{attributes(model)$evaluation_log} if present.
|
||||
}
|
||||
|
||||
The first one (configurations) does not have the same compatibility guarantees as
|
||||
the model itself, including attributes that are set and accessed through
|
||||
\code{\link[=xgb.attributes]{xgb.attributes()}} - that is, such configuration might be lost after loading the
|
||||
booster in a different XGBoost version, regardless of the serializer that was used.
|
||||
These are saved when using \code{\link[=saveRDS]{saveRDS()}}, but will be discarded if loaded into an
|
||||
incompatible XGBoost version. They are not saved when using XGBoost's
|
||||
serializers from its public interface including \code{\link[=xgb.save]{xgb.save()}} and \code{\link[=xgb.save.raw]{xgb.save.raw()}}.
|
||||
|
||||
The second ones (R attributes) are not part of the standard XGBoost model structure,
|
||||
and thus are not saved when using XGBoost's own serializers. These attributes are
|
||||
only used for informational purposes, such as keeping track of evaluation metrics as
|
||||
the model was fit, or saving the R call that produced the model, but are otherwise
|
||||
not used for prediction / importance / plotting / etc.
|
||||
These R attributes are only preserved when using R's serializers.
|
||||
|
||||
In addition to the regular \code{xgb.Booster} objects producted by \code{\link[=xgb.train]{xgb.train()}}, the
|
||||
function \code{\link[=xgboost]{xgboost()}} produces a different subclass \code{xgboost}, which keeps other
|
||||
additional metadata as R attributes such as class names in classification problems,
|
||||
and which has a dedicated \code{predict} method that uses different defaults. XGBoost's
|
||||
own serializers can work with this \code{xgboost} class, but as they do not keep R
|
||||
attributes, the resulting object, when deserialized, is downcasted to the regular
|
||||
\code{xgb.Booster} class (i.e. it loses the metadata, and the resulting object will use
|
||||
\code{predict.xgb.Booster} instead of \code{predict.xgboost}) - for these \code{xgboost} objects,
|
||||
\code{saveRDS} might thus be a better option if the extra functionalities are needed.
|
||||
|
||||
Note that XGBoost models in R starting from version \verb{2.1.0} and onwards, and
|
||||
XGBoost models before version \verb{2.1.0}; have a very different R object structure and
|
||||
are incompatible with each other. Hence, models that were saved with R serializers
|
||||
like \code{\link[=saveRDS]{saveRDS()}} or \code{\link[=save]{save()}} before version \verb{2.1.0} will not work with latter
|
||||
\code{xgboost} versions and vice versa. Be aware that the structure of R model objects
|
||||
could in theory change again in the future, so XGBoost's serializers
|
||||
should be preferred for long-term storage.
|
||||
|
||||
Furthermore, note that using the package \code{qs} for serialization will require
|
||||
version 0.26 or higher of said package, and will have the same compatibility
|
||||
restrictions as R serializers.
|
||||
}
|
||||
\details{
|
||||
Use \code{\link{xgb.save}} to save the XGBoost model as a stand-alone file. You may opt into
|
||||
Use \code{\link[=xgb.save]{xgb.save()}} to save the XGBoost model as a stand-alone file. You may opt into
|
||||
the JSON format by specifying the JSON extension. To read the model back, use
|
||||
\code{\link{xgb.load}}.
|
||||
\code{\link[=xgb.load]{xgb.load()}}.
|
||||
|
||||
Use \code{\link{xgb.save.raw}} to save the XGBoost model as a sequence (vector) of raw bytes
|
||||
Use \code{\link[=xgb.save.raw]{xgb.save.raw()}} to save the XGBoost model as a sequence (vector) of raw bytes
|
||||
in a future-proof manner. Future releases of XGBoost will be able to read the raw bytes and
|
||||
re-construct the corresponding model. To read the model back, use \code{\link{xgb.load.raw}}.
|
||||
The \code{\link{xgb.save.raw}} function is useful if you'd like to persist the XGBoost model
|
||||
re-construct the corresponding model. To read the model back, use \code{\link[=xgb.load.raw]{xgb.load.raw()}}.
|
||||
The \code{\link[=xgb.save.raw]{xgb.save.raw()}} function is useful if you would like to persist the XGBoost model
|
||||
as part of another R object.
|
||||
|
||||
Note: Do not use \code{\link{xgb.serialize}} to store models long-term. It persists not only the
|
||||
model but also internal configurations and parameters, and its format is not stable across
|
||||
multiple XGBoost versions. Use \code{\link{xgb.serialize}} only for checkpointing.
|
||||
Use \code{\link[=saveRDS]{saveRDS()}} if you require the R-specific attributes that a booster might have, such
|
||||
as evaluation logs or the model class \code{xgboost} instead of \code{xgb.Booster}, but note that
|
||||
future compatibility of such objects is outside XGBoost's control as it relies on R's
|
||||
serialization format (see e.g. the details section in \link{serialize} and \code{\link[=save]{save()}} from base R).
|
||||
|
||||
For more details and explanation about model persistence and archival, consult the page
|
||||
\url{https://xgboost.readthedocs.io/en/latest/tutorials/saving_model.html}.
|
||||
}
|
||||
\examples{
|
||||
data(agaricus.train, package='xgboost')
|
||||
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
data(agaricus.train, package = "xgboost")
|
||||
|
||||
bst <- xgb.train(
|
||||
data = xgb.DMatrix(agaricus.train$data, label = agaricus.train$label),
|
||||
max_depth = 2,
|
||||
eta = 1,
|
||||
nthread = 2,
|
||||
nrounds = 2,
|
||||
objective = "binary:logistic"
|
||||
)
|
||||
|
||||
# Save as a stand-alone file; load it with xgb.load()
|
||||
xgb.save(bst, 'xgb.model')
|
||||
bst2 <- xgb.load('xgb.model')
|
||||
fname <- file.path(tempdir(), "xgb_model.ubj")
|
||||
xgb.save(bst, fname)
|
||||
bst2 <- xgb.load(fname)
|
||||
|
||||
# Save as a stand-alone file (JSON); load it with xgb.load()
|
||||
xgb.save(bst, 'xgb.model.json')
|
||||
bst2 <- xgb.load('xgb.model.json')
|
||||
if (file.exists('xgb.model.json')) file.remove('xgb.model.json')
|
||||
fname <- file.path(tempdir(), "xgb_model.json")
|
||||
xgb.save(bst, fname)
|
||||
bst2 <- xgb.load(fname)
|
||||
|
||||
# Save as a raw byte vector; load it with xgb.load.raw()
|
||||
xgb_bytes <- xgb.save.raw(bst)
|
||||
@@ -54,11 +107,11 @@ obj <- list(xgb_model_bytes = xgb.save.raw(bst), description = "My first XGBoost
|
||||
# Persist the R object. Here, saveRDS() is okay, since it doesn't persist
|
||||
# xgb.Booster directly. What's being persisted is the future-proof byte representation
|
||||
# as given by xgb.save.raw().
|
||||
saveRDS(obj, 'my_object.rds')
|
||||
fname <- file.path(tempdir(), "my_object.Rds")
|
||||
saveRDS(obj, fname)
|
||||
# Read back the R object
|
||||
obj2 <- readRDS('my_object.rds')
|
||||
obj2 <- readRDS(fname)
|
||||
# Re-construct xgb.Booster object from the bytes
|
||||
bst2 <- xgb.load.raw(obj2$xgb_model_bytes)
|
||||
if (file.exists('my_object.rds')) file.remove('my_object.rds')
|
||||
|
||||
}
|
||||
|
||||
@@ -16,18 +16,17 @@ This data set is originally from the Mushroom data set,
|
||||
UCI Machine Learning Repository.
|
||||
}
|
||||
\details{
|
||||
This data set includes the following fields:
|
||||
|
||||
It includes the following fields:
|
||||
\itemize{
|
||||
\item \code{label} the label for each record
|
||||
\item \code{data} a sparse Matrix of \code{dgCMatrix} class, with 126 columns.
|
||||
\item \code{label}: The label for each record.
|
||||
\item \code{data}: A sparse Matrix of 'dgCMatrix' class with 126 columns.
|
||||
}
|
||||
}
|
||||
\references{
|
||||
https://archive.ics.uci.edu/ml/datasets/Mushroom
|
||||
\url{https://archive.ics.uci.edu/ml/datasets/Mushroom}
|
||||
|
||||
Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository
|
||||
[http://archive.ics.uci.edu/ml]. Irvine, CA: University of California,
|
||||
\url{http://archive.ics.uci.edu/ml}. Irvine, CA: University of California,
|
||||
School of Information and Computer Science.
|
||||
}
|
||||
\keyword{datasets}
|
||||
|
||||
@@ -16,18 +16,17 @@ This data set is originally from the Mushroom data set,
|
||||
UCI Machine Learning Repository.
|
||||
}
|
||||
\details{
|
||||
This data set includes the following fields:
|
||||
|
||||
It includes the following fields:
|
||||
\itemize{
|
||||
\item \code{label} the label for each record
|
||||
\item \code{data} a sparse Matrix of \code{dgCMatrix} class, with 126 columns.
|
||||
\item \code{label}: The label for each record.
|
||||
\item \code{data}: A sparse Matrix of 'dgCMatrix' class with 126 columns.
|
||||
}
|
||||
}
|
||||
\references{
|
||||
https://archive.ics.uci.edu/ml/datasets/Mushroom
|
||||
\url{https://archive.ics.uci.edu/ml/datasets/Mushroom}
|
||||
|
||||
Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository
|
||||
[http://archive.ics.uci.edu/ml]. Irvine, CA: University of California,
|
||||
\url{http://archive.ics.uci.edu/ml}. Irvine, CA: University of California,
|
||||
School of Information and Computer Science.
|
||||
}
|
||||
\keyword{datasets}
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/callbacks.R
|
||||
\name{callbacks}
|
||||
\alias{callbacks}
|
||||
\title{Callback closures for booster training.}
|
||||
\description{
|
||||
These are used to perform various service tasks either during boosting iterations or at the end.
|
||||
This approach helps to modularize many of such tasks without bloating the main training methods,
|
||||
and it offers .
|
||||
}
|
||||
\details{
|
||||
By default, a callback function is run after each boosting iteration.
|
||||
An R-attribute \code{is_pre_iteration} could be set for a callback to define a pre-iteration function.
|
||||
|
||||
When a callback function has \code{finalize} parameter, its finalizer part will also be run after
|
||||
the boosting is completed.
|
||||
|
||||
WARNING: side-effects!!! Be aware that these callback functions access and modify things in
|
||||
the environment from which they are called from, which is a fairly uncommon thing to do in R.
|
||||
|
||||
To write a custom callback closure, make sure you first understand the main concepts about R environments.
|
||||
Check either R documentation on \code{\link[base]{environment}} or the
|
||||
\href{http://adv-r.had.co.nz/Environments.html}{Environments chapter} from the "Advanced R"
|
||||
book by Hadley Wickham. Further, the best option is to read the code of some of the existing callbacks -
|
||||
choose ones that do something similar to what you want to achieve. Also, you would need to get familiar
|
||||
with the objects available inside of the \code{xgb.train} and \code{xgb.cv} internal environments.
|
||||
}
|
||||
\seealso{
|
||||
\code{\link{cb.print.evaluation}},
|
||||
\code{\link{cb.evaluation.log}},
|
||||
\code{\link{cb.reset.parameters}},
|
||||
\code{\link{cb.early.stop}},
|
||||
\code{\link{cb.save.model}},
|
||||
\code{\link{cb.cv.predict}},
|
||||
\code{\link{xgb.train}},
|
||||
\code{\link{xgb.cv}}
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/callbacks.R
|
||||
\name{cb.cv.predict}
|
||||
\alias{cb.cv.predict}
|
||||
\title{Callback closure for returning cross-validation based predictions.}
|
||||
\usage{
|
||||
cb.cv.predict(save_models = FALSE)
|
||||
}
|
||||
\arguments{
|
||||
\item{save_models}{a flag for whether to save the folds' models.}
|
||||
}
|
||||
\value{
|
||||
Predictions are returned inside of the \code{pred} element, which is either a vector or a matrix,
|
||||
depending on the number of prediction outputs per data row. The order of predictions corresponds
|
||||
to the order of rows in the original dataset. Note that when a custom \code{folds} list is
|
||||
provided in \code{xgb.cv}, the predictions would only be returned properly when this list is a
|
||||
non-overlapping list of k sets of indices, as in a standard k-fold CV. The predictions would not be
|
||||
meaningful when user-provided folds have overlapping indices as in, e.g., random sampling splits.
|
||||
When some of the indices in the training dataset are not included into user-provided \code{folds},
|
||||
their prediction value would be \code{NA}.
|
||||
}
|
||||
\description{
|
||||
Callback closure for returning cross-validation based predictions.
|
||||
}
|
||||
\details{
|
||||
This callback function saves predictions for all of the test folds,
|
||||
and also allows to save the folds' models.
|
||||
|
||||
It is a "finalizer" callback and it uses early stopping information whenever it is available,
|
||||
thus it must be run after the early stopping callback if the early stopping is used.
|
||||
|
||||
Callback function expects the following values to be set in its calling frame:
|
||||
\code{bst_folds},
|
||||
\code{basket},
|
||||
\code{data},
|
||||
\code{end_iteration},
|
||||
\code{params},
|
||||
\code{num_parallel_tree},
|
||||
\code{num_class}.
|
||||
}
|
||||
\seealso{
|
||||
\code{\link{callbacks}}
|
||||
}
|
||||
@@ -1,63 +0,0 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/callbacks.R
|
||||
\name{cb.early.stop}
|
||||
\alias{cb.early.stop}
|
||||
\title{Callback closure to activate the early stopping.}
|
||||
\usage{
|
||||
cb.early.stop(
|
||||
stopping_rounds,
|
||||
maximize = FALSE,
|
||||
metric_name = NULL,
|
||||
verbose = TRUE
|
||||
)
|
||||
}
|
||||
\arguments{
|
||||
\item{stopping_rounds}{The number of rounds with no improvement in
|
||||
the evaluation metric in order to stop the training.}
|
||||
|
||||
\item{maximize}{whether to maximize the evaluation metric}
|
||||
|
||||
\item{metric_name}{the name of an evaluation column to use as a criteria for early
|
||||
stopping. If not set, the last column would be used.
|
||||
Let's say the test data in \code{watchlist} was labelled as \code{dtest},
|
||||
and one wants to use the AUC in test data for early stopping regardless of where
|
||||
it is in the \code{watchlist}, then one of the following would need to be set:
|
||||
\code{metric_name='dtest-auc'} or \code{metric_name='dtest_auc'}.
|
||||
All dash '-' characters in metric names are considered equivalent to '_'.}
|
||||
|
||||
\item{verbose}{whether to print the early stopping information.}
|
||||
}
|
||||
\description{
|
||||
Callback closure to activate the early stopping.
|
||||
}
|
||||
\details{
|
||||
This callback function determines the condition for early stopping
|
||||
by setting the \code{stop_condition = TRUE} flag in its calling frame.
|
||||
|
||||
The following additional fields are assigned to the model's R object:
|
||||
\itemize{
|
||||
\item \code{best_score} the evaluation score at the best iteration
|
||||
\item \code{best_iteration} at which boosting iteration the best score has occurred (1-based index)
|
||||
}
|
||||
The Same values are also stored as xgb-attributes:
|
||||
\itemize{
|
||||
\item \code{best_iteration} is stored as a 0-based iteration index (for interoperability of binary models)
|
||||
\item \code{best_msg} message string is also stored.
|
||||
}
|
||||
|
||||
At least one data element is required in the evaluation watchlist for early stopping to work.
|
||||
|
||||
Callback function expects the following values to be set in its calling frame:
|
||||
\code{stop_condition},
|
||||
\code{bst_evaluation},
|
||||
\code{rank},
|
||||
\code{bst} (or \code{bst_folds} and \code{basket}),
|
||||
\code{iteration},
|
||||
\code{begin_iteration},
|
||||
\code{end_iteration},
|
||||
\code{num_parallel_tree}.
|
||||
}
|
||||
\seealso{
|
||||
\code{\link{callbacks}},
|
||||
\code{\link{xgb.attr}}
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/callbacks.R
|
||||
\name{cb.evaluation.log}
|
||||
\alias{cb.evaluation.log}
|
||||
\title{Callback closure for logging the evaluation history}
|
||||
\usage{
|
||||
cb.evaluation.log()
|
||||
}
|
||||
\description{
|
||||
Callback closure for logging the evaluation history
|
||||
}
|
||||
\details{
|
||||
This callback function appends the current iteration evaluation results \code{bst_evaluation}
|
||||
available in the calling parent frame to the \code{evaluation_log} list in a calling frame.
|
||||
|
||||
The finalizer callback (called with \code{finalize = TURE} in the end) converts
|
||||
the \code{evaluation_log} list into a final data.table.
|
||||
|
||||
The iteration evaluation result \code{bst_evaluation} must be a named numeric vector.
|
||||
|
||||
Note: in the column names of the final data.table, the dash '-' character is replaced with
|
||||
the underscore '_' in order to make the column names more like regular R identifiers.
|
||||
|
||||
Callback function expects the following values to be set in its calling frame:
|
||||
\code{evaluation_log},
|
||||
\code{bst_evaluation},
|
||||
\code{iteration}.
|
||||
}
|
||||
\seealso{
|
||||
\code{\link{callbacks}}
|
||||
}
|
||||
@@ -1,94 +0,0 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/callbacks.R
|
||||
\name{cb.gblinear.history}
|
||||
\alias{cb.gblinear.history}
|
||||
\title{Callback closure for collecting the model coefficients history of a gblinear booster
|
||||
during its training.}
|
||||
\usage{
|
||||
cb.gblinear.history(sparse = FALSE)
|
||||
}
|
||||
\arguments{
|
||||
\item{sparse}{when set to FALSE/TRUE, a dense/sparse matrix is used to store the result.
|
||||
Sparse format is useful when one expects only a subset of coefficients to be non-zero,
|
||||
when using the "thrifty" feature selector with fairly small number of top features
|
||||
selected per iteration.}
|
||||
}
|
||||
\value{
|
||||
Results are stored in the \code{coefs} element of the closure.
|
||||
The \code{\link{xgb.gblinear.history}} convenience function provides an easy way to access it.
|
||||
With \code{xgb.train}, it is either a dense of a sparse matrix.
|
||||
While with \code{xgb.cv}, it is a list (an element per each fold) of such matrices.
|
||||
}
|
||||
\description{
|
||||
Callback closure for collecting the model coefficients history of a gblinear booster
|
||||
during its training.
|
||||
}
|
||||
\details{
|
||||
To keep things fast and simple, gblinear booster does not internally store the history of linear
|
||||
model coefficients at each boosting iteration. This callback provides a workaround for storing
|
||||
the coefficients' path, by extracting them after each training iteration.
|
||||
|
||||
Callback function expects the following values to be set in its calling frame:
|
||||
\code{bst} (or \code{bst_folds}).
|
||||
}
|
||||
\examples{
|
||||
#### Binary classification:
|
||||
#
|
||||
# In the iris dataset, it is hard to linearly separate Versicolor class from the rest
|
||||
# without considering the 2nd order interactions:
|
||||
x <- model.matrix(Species ~ .^2, iris)[,-1]
|
||||
colnames(x)
|
||||
dtrain <- xgb.DMatrix(scale(x), label = 1*(iris$Species == "versicolor"))
|
||||
param <- list(booster = "gblinear", objective = "reg:logistic", eval_metric = "auc",
|
||||
lambda = 0.0003, alpha = 0.0003, nthread = 2)
|
||||
# For 'shotgun', which is a default linear updater, using high eta values may result in
|
||||
# unstable behaviour in some datasets. With this simple dataset, however, the high learning
|
||||
# rate does not break the convergence, but allows us to illustrate the typical pattern of
|
||||
# "stochastic explosion" behaviour of this lock-free algorithm at early boosting iterations.
|
||||
bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 200, eta = 1.,
|
||||
callbacks = list(cb.gblinear.history()))
|
||||
# Extract the coefficients' path and plot them vs boosting iteration number:
|
||||
coef_path <- xgb.gblinear.history(bst)
|
||||
matplot(coef_path, type = 'l')
|
||||
|
||||
# With the deterministic coordinate descent updater, it is safer to use higher learning rates.
|
||||
# Will try the classical componentwise boosting which selects a single best feature per round:
|
||||
bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 200, eta = 0.8,
|
||||
updater = 'coord_descent', feature_selector = 'thrifty', top_k = 1,
|
||||
callbacks = list(cb.gblinear.history()))
|
||||
matplot(xgb.gblinear.history(bst), type = 'l')
|
||||
# Componentwise boosting is known to have similar effect to Lasso regularization.
|
||||
# Try experimenting with various values of top_k, eta, nrounds,
|
||||
# as well as different feature_selectors.
|
||||
|
||||
# For xgb.cv:
|
||||
bst <- xgb.cv(param, dtrain, nfold = 5, nrounds = 100, eta = 0.8,
|
||||
callbacks = list(cb.gblinear.history()))
|
||||
# coefficients in the CV fold #3
|
||||
matplot(xgb.gblinear.history(bst)[[3]], type = 'l')
|
||||
|
||||
|
||||
#### Multiclass classification:
|
||||
#
|
||||
dtrain <- xgb.DMatrix(scale(x), label = as.numeric(iris$Species) - 1)
|
||||
param <- list(booster = "gblinear", objective = "multi:softprob", num_class = 3,
|
||||
lambda = 0.0003, alpha = 0.0003, nthread = 2)
|
||||
# For the default linear updater 'shotgun' it sometimes is helpful
|
||||
# to use smaller eta to reduce instability
|
||||
bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 70, eta = 0.5,
|
||||
callbacks = list(cb.gblinear.history()))
|
||||
# Will plot the coefficient paths separately for each class:
|
||||
matplot(xgb.gblinear.history(bst, class_index = 0), type = 'l')
|
||||
matplot(xgb.gblinear.history(bst, class_index = 1), type = 'l')
|
||||
matplot(xgb.gblinear.history(bst, class_index = 2), type = 'l')
|
||||
|
||||
# CV:
|
||||
bst <- xgb.cv(param, dtrain, nfold = 5, nrounds = 70, eta = 0.5,
|
||||
callbacks = list(cb.gblinear.history(FALSE)))
|
||||
# 1st fold of 1st class
|
||||
matplot(xgb.gblinear.history(bst, class_index = 0)[[1]], type = 'l')
|
||||
|
||||
}
|
||||
\seealso{
|
||||
\code{\link{callbacks}}, \code{\link{xgb.gblinear.history}}.
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/callbacks.R
|
||||
\name{cb.print.evaluation}
|
||||
\alias{cb.print.evaluation}
|
||||
\title{Callback closure for printing the result of evaluation}
|
||||
\usage{
|
||||
cb.print.evaluation(period = 1, showsd = TRUE)
|
||||
}
|
||||
\arguments{
|
||||
\item{period}{results would be printed every number of periods}
|
||||
|
||||
\item{showsd}{whether standard deviations should be printed (when available)}
|
||||
}
|
||||
\description{
|
||||
Callback closure for printing the result of evaluation
|
||||
}
|
||||
\details{
|
||||
The callback function prints the result of evaluation at every \code{period} iterations.
|
||||
The initial and the last iteration's evaluations are always printed.
|
||||
|
||||
Callback function expects the following values to be set in its calling frame:
|
||||
\code{bst_evaluation} (also \code{bst_evaluation_err} when available),
|
||||
\code{iteration},
|
||||
\code{begin_iteration},
|
||||
\code{end_iteration}.
|
||||
}
|
||||
\seealso{
|
||||
\code{\link{callbacks}}
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/callbacks.R
|
||||
\name{cb.save.model}
|
||||
\alias{cb.save.model}
|
||||
\title{Callback closure for saving a model file.}
|
||||
\usage{
|
||||
cb.save.model(save_period = 0, save_name = "xgboost.model")
|
||||
}
|
||||
\arguments{
|
||||
\item{save_period}{save the model to disk after every
|
||||
\code{save_period} iterations; 0 means save the model at the end.}
|
||||
|
||||
\item{save_name}{the name or path for the saved model file.
|
||||
It can contain a \code{\link[base]{sprintf}} formatting specifier
|
||||
to include the integer iteration number in the file name.
|
||||
E.g., with \code{save_name} = 'xgboost_%04d.model',
|
||||
the file saved at iteration 50 would be named "xgboost_0050.model".}
|
||||
}
|
||||
\description{
|
||||
Callback closure for saving a model file.
|
||||
}
|
||||
\details{
|
||||
This callback function allows to save an xgb-model file, either periodically after each \code{save_period}'s or at the end.
|
||||
|
||||
Callback function expects the following values to be set in its calling frame:
|
||||
\code{bst},
|
||||
\code{iteration},
|
||||
\code{begin_iteration},
|
||||
\code{end_iteration}.
|
||||
}
|
||||
\seealso{
|
||||
\code{\link{callbacks}}
|
||||
}
|
||||
54
R-package/man/coef.xgb.Booster.Rd
Normal file
54
R-package/man/coef.xgb.Booster.Rd
Normal file
@@ -0,0 +1,54 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/xgb.Booster.R
|
||||
\name{coef.xgb.Booster}
|
||||
\alias{coef.xgb.Booster}
|
||||
\title{Extract coefficients from linear booster}
|
||||
\usage{
|
||||
\method{coef}{xgb.Booster}(object, ...)
|
||||
}
|
||||
\arguments{
|
||||
\item{object}{A fitted booster of 'gblinear' type.}
|
||||
|
||||
\item{...}{Not used.}
|
||||
}
|
||||
\value{
|
||||
The extracted coefficients:
|
||||
\itemize{
|
||||
\item If there is only one coefficient per column in the data, will be returned as a
|
||||
vector, potentially containing the feature names if available, with the intercept
|
||||
as first column.
|
||||
\item If there is more than one coefficient per column in the data (e.g. when using
|
||||
\code{objective="multi:softmax"}), will be returned as a matrix with dimensions equal
|
||||
to \verb{[num_features, num_cols]}, with the intercepts as first row. Note that the column
|
||||
(classes in multi-class classification) dimension will not be named.
|
||||
}
|
||||
|
||||
The intercept returned here will include the 'base_score' parameter (unlike the 'bias'
|
||||
or the last coefficient in the model dump, which doesn't have 'base_score' added to it),
|
||||
hence one should get the same values from calling \code{predict(..., outputmargin = TRUE)} and
|
||||
from performing a matrix multiplication with \code{model.matrix(~., ...)}.
|
||||
|
||||
Be aware that the coefficients are obtained by first converting them to strings and
|
||||
back, so there will always be some very small lose of precision compared to the actual
|
||||
coefficients as used by \link{predict.xgb.Booster}.
|
||||
}
|
||||
\description{
|
||||
Extracts the coefficients from a 'gblinear' booster object,
|
||||
as produced by \code{\link[=xgb.train]{xgb.train()}} when using parameter \code{booster="gblinear"}.
|
||||
|
||||
Note: this function will error out if passing a booster model
|
||||
which is not of "gblinear" type.
|
||||
}
|
||||
\examples{
|
||||
library(xgboost)
|
||||
|
||||
data(mtcars)
|
||||
|
||||
y <- mtcars[, 1]
|
||||
x <- as.matrix(mtcars[, -1])
|
||||
|
||||
dm <- xgb.DMatrix(data = x, label = y, nthread = 1)
|
||||
params <- list(booster = "gblinear", nthread = 1)
|
||||
model <- xgb.train(data = dm, params = params, nrounds = 2)
|
||||
coef(model)
|
||||
}
|
||||
@@ -13,13 +13,14 @@
|
||||
Returns a vector of numbers of rows and of columns in an \code{xgb.DMatrix}.
|
||||
}
|
||||
\details{
|
||||
Note: since \code{nrow} and \code{ncol} internally use \code{dim}, they can also
|
||||
Note: since \code{\link[=nrow]{nrow()}} and \code{\link[=ncol]{ncol()}} internally use \code{\link[=dim]{dim()}}, they can also
|
||||
be directly used with an \code{xgb.DMatrix} object.
|
||||
}
|
||||
\examples{
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.train, package = "xgboost")
|
||||
|
||||
train <- agaricus.train
|
||||
dtrain <- xgb.DMatrix(train$data, label=train$label)
|
||||
dtrain <- xgb.DMatrix(train$data, label = train$label, nthread = 2)
|
||||
|
||||
stopifnot(nrow(dtrain) == nrow(train$data))
|
||||
stopifnot(ncol(dtrain) == ncol(train$data))
|
||||
|
||||
@@ -10,26 +10,27 @@
|
||||
\method{dimnames}{xgb.DMatrix}(x) <- value
|
||||
}
|
||||
\arguments{
|
||||
\item{x}{object of class \code{xgb.DMatrix}}
|
||||
\item{x}{Object of class \code{xgb.DMatrix}.}
|
||||
|
||||
\item{value}{a list of two elements: the first one is ignored
|
||||
\item{value}{A list of two elements: the first one is ignored
|
||||
and the second one is column names}
|
||||
}
|
||||
\description{
|
||||
Only column names are supported for \code{xgb.DMatrix}, thus setting of
|
||||
row names would have no effect and returned row names would be NULL.
|
||||
row names would have no effect and returned row names would be \code{NULL}.
|
||||
}
|
||||
\details{
|
||||
Generic \code{dimnames} methods are used by \code{colnames}.
|
||||
Since row names are irrelevant, it is recommended to use \code{colnames} directly.
|
||||
Generic \code{\link[=dimnames]{dimnames()}} methods are used by \code{\link[=colnames]{colnames()}}.
|
||||
Since row names are irrelevant, it is recommended to use \code{\link[=colnames]{colnames()}} directly.
|
||||
}
|
||||
\examples{
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.train, package = "xgboost")
|
||||
|
||||
train <- agaricus.train
|
||||
dtrain <- xgb.DMatrix(train$data, label=train$label)
|
||||
dtrain <- xgb.DMatrix(train$data, label = train$label, nthread = 2)
|
||||
dimnames(dtrain)
|
||||
colnames(dtrain)
|
||||
colnames(dtrain) <- make.names(1:ncol(train$data))
|
||||
print(dtrain, verbose=TRUE)
|
||||
print(dtrain, verbose = TRUE)
|
||||
|
||||
}
|
||||
|
||||
@@ -1,44 +1,97 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/xgb.DMatrix.R
|
||||
\name{getinfo}
|
||||
% Please edit documentation in R/xgb.Booster.R, R/xgb.DMatrix.R
|
||||
\name{getinfo.xgb.Booster}
|
||||
\alias{getinfo.xgb.Booster}
|
||||
\alias{setinfo.xgb.Booster}
|
||||
\alias{getinfo}
|
||||
\alias{getinfo.xgb.DMatrix}
|
||||
\title{Get information of an xgb.DMatrix object}
|
||||
\alias{setinfo}
|
||||
\alias{setinfo.xgb.DMatrix}
|
||||
\title{Get or set information of xgb.DMatrix and xgb.Booster objects}
|
||||
\usage{
|
||||
getinfo(object, ...)
|
||||
\method{getinfo}{xgb.Booster}(object, name)
|
||||
|
||||
\method{getinfo}{xgb.DMatrix}(object, name, ...)
|
||||
\method{setinfo}{xgb.Booster}(object, name, info)
|
||||
|
||||
getinfo(object, name)
|
||||
|
||||
\method{getinfo}{xgb.DMatrix}(object, name)
|
||||
|
||||
setinfo(object, name, info)
|
||||
|
||||
\method{setinfo}{xgb.DMatrix}(object, name, info)
|
||||
}
|
||||
\arguments{
|
||||
\item{object}{Object of class \code{xgb.DMatrix}}
|
||||
\item{object}{Object of class \code{xgb.DMatrix} or \code{xgb.Booster}.}
|
||||
|
||||
\item{...}{other parameters}
|
||||
\item{name}{The name of the information field to get (see details).}
|
||||
|
||||
\item{name}{the name of the information field to get (see details)}
|
||||
\item{info}{The specific field of information to set.}
|
||||
}
|
||||
\value{
|
||||
For \code{getinfo()}, will return the requested field. For \code{setinfo()},
|
||||
will always return value \code{TRUE} if it succeeds.
|
||||
}
|
||||
\description{
|
||||
Get information of an xgb.DMatrix object
|
||||
Get or set information of xgb.DMatrix and xgb.Booster objects
|
||||
}
|
||||
\details{
|
||||
The \code{name} field can be one of the following:
|
||||
|
||||
The \code{name} field can be one of the following for \code{xgb.DMatrix}:
|
||||
\itemize{
|
||||
\item \code{label}: label XGBoost learn from ;
|
||||
\item \code{weight}: to do a weight rescale ;
|
||||
\item \code{base_margin}: base margin is the base prediction XGBoost will boost from ;
|
||||
\item \code{nrow}: number of rows of the \code{xgb.DMatrix}.
|
||||
|
||||
\item label
|
||||
\item weight
|
||||
\item base_margin
|
||||
\item label_lower_bound
|
||||
\item label_upper_bound
|
||||
\item group
|
||||
\item feature_type
|
||||
\item feature_name
|
||||
\item nrow
|
||||
}
|
||||
|
||||
\code{group} can be setup by \code{setinfo} but can't be retrieved by \code{getinfo}.
|
||||
See the documentation for \code{\link[=xgb.DMatrix]{xgb.DMatrix()}} for more information about these fields.
|
||||
|
||||
For \code{xgb.Booster}, can be one of the following:
|
||||
\itemize{
|
||||
\item \code{feature_type}
|
||||
\item \code{feature_name}
|
||||
}
|
||||
|
||||
Note that, while 'qid' cannot be retrieved, it is possible to get the equivalent 'group'
|
||||
for a DMatrix that had 'qid' assigned.
|
||||
|
||||
\strong{Important}: when calling \code{\link[=setinfo]{setinfo()}}, the objects are modified in-place. See
|
||||
\code{\link[=xgb.copy.Booster]{xgb.copy.Booster()}} for an idea of this in-place assignment works.
|
||||
|
||||
See the documentation for \code{\link[=xgb.DMatrix]{xgb.DMatrix()}} for possible fields that can be set
|
||||
(which correspond to arguments in that function).
|
||||
|
||||
Note that the following fields are allowed in the construction of an \code{xgb.DMatrix}
|
||||
but \strong{are not} allowed here:
|
||||
\itemize{
|
||||
\item data
|
||||
\item missing
|
||||
\item silent
|
||||
\item nthread
|
||||
}
|
||||
}
|
||||
\examples{
|
||||
data(agaricus.train, package='xgboost')
|
||||
dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label))
|
||||
data(agaricus.train, package = "xgboost")
|
||||
|
||||
labels <- getinfo(dtrain, 'label')
|
||||
setinfo(dtrain, 'label', 1-labels)
|
||||
dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label, nthread = 2))
|
||||
|
||||
labels2 <- getinfo(dtrain, 'label')
|
||||
stopifnot(all(labels2 == 1-labels))
|
||||
labels <- getinfo(dtrain, "label")
|
||||
setinfo(dtrain, "label", 1 - labels)
|
||||
|
||||
labels2 <- getinfo(dtrain, "label")
|
||||
stopifnot(all(labels2 == 1 - labels))
|
||||
data(agaricus.train, package = "xgboost")
|
||||
|
||||
dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label, nthread = 2))
|
||||
|
||||
labels <- getinfo(dtrain, "label")
|
||||
setinfo(dtrain, "label", 1 - labels)
|
||||
|
||||
labels2 <- getinfo(dtrain, "label")
|
||||
stopifnot(all.equal(labels2, 1 - labels))
|
||||
}
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/xgb.ggplot.R
|
||||
\name{normalize}
|
||||
\alias{normalize}
|
||||
\title{Scale feature value to have mean 0, standard deviation 1}
|
||||
\usage{
|
||||
normalize(x)
|
||||
}
|
||||
\arguments{
|
||||
\item{x}{Numeric vector}
|
||||
}
|
||||
\value{
|
||||
Numeric vector with mean 0 and sd 1.
|
||||
}
|
||||
\description{
|
||||
This is used to compare multiple features on the same plot.
|
||||
Internal utility function
|
||||
}
|
||||
@@ -2,113 +2,183 @@
|
||||
% Please edit documentation in R/xgb.Booster.R
|
||||
\name{predict.xgb.Booster}
|
||||
\alias{predict.xgb.Booster}
|
||||
\alias{predict.xgb.Booster.handle}
|
||||
\title{Predict method for eXtreme Gradient Boosting model}
|
||||
\title{Predict method for XGBoost model}
|
||||
\usage{
|
||||
\method{predict}{xgb.Booster}(
|
||||
object,
|
||||
newdata,
|
||||
missing = NA,
|
||||
outputmargin = FALSE,
|
||||
ntreelimit = NULL,
|
||||
predleaf = FALSE,
|
||||
predcontrib = FALSE,
|
||||
approxcontrib = FALSE,
|
||||
predinteraction = FALSE,
|
||||
reshape = FALSE,
|
||||
training = FALSE,
|
||||
iterationrange = NULL,
|
||||
strict_shape = FALSE,
|
||||
avoid_transpose = FALSE,
|
||||
validate_features = FALSE,
|
||||
base_margin = NULL,
|
||||
...
|
||||
)
|
||||
|
||||
\method{predict}{xgb.Booster.handle}(object, ...)
|
||||
}
|
||||
\arguments{
|
||||
\item{object}{Object of class \code{xgb.Booster} or \code{xgb.Booster.handle}}
|
||||
\item{object}{Object of class \code{xgb.Booster}.}
|
||||
|
||||
\item{newdata}{takes \code{matrix}, \code{dgCMatrix}, \code{dgRMatrix}, \code{dsparseVector},
|
||||
local data file or \code{xgb.DMatrix}.
|
||||
\item{newdata}{Takes \code{data.frame}, \code{matrix}, \code{dgCMatrix}, \code{dgRMatrix}, \code{dsparseVector},
|
||||
local data file, or \code{xgb.DMatrix}.
|
||||
|
||||
For single-row predictions on sparse data, it's recommended to use CSR format. If passing
|
||||
a sparse vector, it will take it as a row vector.}
|
||||
For single-row predictions on sparse data, it is recommended to use CSR format. If passing
|
||||
a sparse vector, it will take it as a row vector.
|
||||
|
||||
\item{missing}{Missing is only used when input is dense matrix. Pick a float value that represents
|
||||
missing values in data (e.g., sometimes 0 or some other extreme value is used).}
|
||||
Note that, for repeated predictions on the same data, one might want to create a DMatrix to
|
||||
pass here instead of passing R types like matrices or data frames, as predictions will be
|
||||
faster on DMatrix.
|
||||
|
||||
\item{outputmargin}{whether the prediction should be returned in the for of original untransformed
|
||||
sum of predictions from boosting iterations' results. E.g., setting \code{outputmargin=TRUE} for
|
||||
logistic regression would result in predictions for log-odds instead of probabilities.}
|
||||
If \code{newdata} is a \code{data.frame}, be aware that:
|
||||
\itemize{
|
||||
\item Columns will be converted to numeric if they aren't already, which could potentially make
|
||||
the operation slower than in an equivalent \code{matrix} object.
|
||||
\item The order of the columns must match with that of the data from which the model was fitted
|
||||
(i.e. columns will not be referenced by their names, just by their order in the data).
|
||||
\item If the model was fitted to data with categorical columns, these columns must be of
|
||||
\code{factor} type here, and must use the same encoding (i.e. have the same levels).
|
||||
\item If \code{newdata} contains any \code{factor} columns, they will be converted to base-0
|
||||
encoding (same as during DMatrix creation) - hence, one should not pass a \code{factor}
|
||||
under a column which during training had a different type.
|
||||
}}
|
||||
|
||||
\item{ntreelimit}{Deprecated, use \code{iterationrange} instead.}
|
||||
\item{missing}{Float value that represents missing values in data
|
||||
(e.g., 0 or some other extreme value).
|
||||
|
||||
\item{predleaf}{whether predict leaf index.}
|
||||
This parameter is not used when \code{newdata} is an \code{xgb.DMatrix} - in such cases,
|
||||
should pass this as an argument to the DMatrix constructor instead.}
|
||||
|
||||
\item{predcontrib}{whether to return feature contributions to individual predictions (see Details).}
|
||||
\item{outputmargin}{Whether the prediction should be returned in the form of
|
||||
original untransformed sum of predictions from boosting iterations' results.
|
||||
E.g., setting \code{outputmargin = TRUE} for logistic regression would return log-odds
|
||||
instead of probabilities.}
|
||||
|
||||
\item{approxcontrib}{whether to use a fast approximation for feature contributions (see Details).}
|
||||
\item{predleaf}{Whether to predict per-tree leaf indices.}
|
||||
|
||||
\item{predinteraction}{whether to return contributions of feature interactions to individual predictions (see Details).}
|
||||
\item{predcontrib}{Whether to return feature contributions to individual predictions (see Details).}
|
||||
|
||||
\item{reshape}{whether to reshape the vector of predictions to a matrix form when there are several
|
||||
prediction outputs per case. This option has no effect when either of predleaf, predcontrib,
|
||||
or predinteraction flags is TRUE.}
|
||||
\item{approxcontrib}{Whether to use a fast approximation for feature contributions (see Details).}
|
||||
|
||||
\item{training}{whether is the prediction result used for training. For dart booster,
|
||||
\item{predinteraction}{Whether to return contributions of feature interactions to individual predictions (see Details).}
|
||||
|
||||
\item{training}{Whether the prediction result is used for training. For dart booster,
|
||||
training predicting will perform dropout.}
|
||||
|
||||
\item{iterationrange}{Specifies which layer of trees are used in prediction. For
|
||||
example, if a random forest is trained with 100 rounds. Specifying
|
||||
`iterationrange=(1, 21)`, then only the forests built during [1, 21) (half open set)
|
||||
rounds are used in this prediction. It's 1-based index just like R vector. When set
|
||||
to \code{c(1, 1)} XGBoost will use all trees.}
|
||||
\item{iterationrange}{Sequence of rounds/iterations from the model to use for prediction, specified by passing
|
||||
a two-dimensional vector with the start and end numbers in the sequence (same format as R's \code{seq} - i.e.
|
||||
base-1 indexing, and inclusive of both ends).
|
||||
|
||||
\item{strict_shape}{Default is \code{FALSE}. When it's set to \code{TRUE}, output
|
||||
type and shape of prediction are invariant to model type.}
|
||||
For example, passing \code{c(1,20)} will predict using the first twenty iterations, while passing \code{c(1,1)} will
|
||||
predict using only the first one.
|
||||
|
||||
\item{...}{Parameters passed to \code{predict.xgb.Booster}}
|
||||
If passing \code{NULL}, will either stop at the best iteration if the model used early stopping, or use all
|
||||
of the iterations (rounds) otherwise.
|
||||
|
||||
If passing "all", will use all of the rounds regardless of whether the model had early stopping or not.}
|
||||
|
||||
\item{strict_shape}{Whether to always return an array with the same dimensions for the given prediction mode
|
||||
regardless of the model type - meaning that, for example, both a multi-class and a binary classification
|
||||
model would generate output arrays with the same number of dimensions, with the 'class' dimension having
|
||||
size equal to '1' for the binary model.
|
||||
|
||||
If passing \code{FALSE} (the default), dimensions will be simplified according to the model type, so that a
|
||||
binary classification model for example would not have a redundant dimension for 'class'.
|
||||
|
||||
See documentation for the return type for the exact shape of the output arrays for each prediction mode.}
|
||||
|
||||
\item{avoid_transpose}{Whether to output the resulting predictions in the same memory layout in which they
|
||||
are generated by the core XGBoost library, without transposing them to match the expected output shape.
|
||||
|
||||
Internally, XGBoost uses row-major order for the predictions it generates, while R arrays use column-major
|
||||
order, hence the result needs to be transposed in order to have the expected shape when represented as
|
||||
an R array or matrix, which might be a slow operation.
|
||||
|
||||
If passing \code{TRUE}, then the result will have dimensions in reverse order - for example, rows
|
||||
will be the last dimensions instead of the first dimension.}
|
||||
|
||||
\item{validate_features}{When \code{TRUE}, validate that the Booster's and newdata's
|
||||
feature_names match (only applicable when both \code{object} and \code{newdata} have feature names).
|
||||
|
||||
If the column names differ and \code{newdata} is not an \code{xgb.DMatrix}, will try to reorder
|
||||
the columns in \code{newdata} to match with the booster's.
|
||||
|
||||
If the booster has feature types and \code{newdata} is either an \code{xgb.DMatrix} or
|
||||
\code{data.frame}, will additionally verify that categorical columns are of the
|
||||
correct type in \code{newdata}, throwing an error if they do not match.
|
||||
|
||||
If passing \code{FALSE}, it is assumed that the feature names and types are the same,
|
||||
and come in the same order as in the training data.
|
||||
|
||||
Note that this check might add some sizable latency to the predictions, so it's
|
||||
recommended to disable it for performance-sensitive applications.}
|
||||
|
||||
\item{base_margin}{Base margin used for boosting from existing model.
|
||||
|
||||
Note that, if \code{newdata} is an \code{xgb.DMatrix} object, this argument will
|
||||
be ignored as it needs to be added to the DMatrix instead (e.g. by passing it as
|
||||
an argument in its constructor, or by calling \code{\link[=setinfo.xgb.DMatrix]{setinfo.xgb.DMatrix()}}.}
|
||||
|
||||
\item{...}{Not used.}
|
||||
}
|
||||
\value{
|
||||
The return type is different depending whether \code{strict_shape} is set to \code{TRUE}. By default,
|
||||
for regression or binary classification, it returns a vector of length \code{nrows(newdata)}.
|
||||
For multiclass classification, either a \code{num_class * nrows(newdata)} vector or
|
||||
a \code{(nrows(newdata), num_class)} dimension matrix is returned, depending on
|
||||
the \code{reshape} value.
|
||||
A numeric vector or array, with corresponding dimensions depending on the prediction mode and on
|
||||
parameter \code{strict_shape} as follows:
|
||||
|
||||
When \code{predleaf = TRUE}, the output is a matrix object with the
|
||||
number of columns corresponding to the number of trees.
|
||||
If passing \code{strict_shape=FALSE}:\itemize{
|
||||
\item For regression or binary classification: a vector of length \code{nrows}.
|
||||
\item For multi-class and multi-target objectives: a matrix of dimensions \verb{[nrows, ngroups]}.
|
||||
|
||||
When \code{predcontrib = TRUE} and it is not a multiclass setting, the output is a matrix object with
|
||||
\code{num_features + 1} columns. The last "+ 1" column in a matrix corresponds to bias.
|
||||
For a multiclass case, a list of \code{num_class} elements is returned, where each element is
|
||||
such a matrix. The contribution values are on the scale of untransformed margin
|
||||
(e.g., for binary classification would mean that the contributions are log-odds deviations from bias).
|
||||
Note that objective variant \code{multi:softmax} defaults towards predicting most likely class (a vector
|
||||
\code{nrows}) instead of per-class probabilities.
|
||||
\item For \code{predleaf}: a matrix with one column per tree.
|
||||
|
||||
When \code{predinteraction = TRUE} and it is not a multiclass setting, the output is a 3d array with
|
||||
dimensions \code{c(nrow, num_features + 1, num_features + 1)}. The off-diagonal (in the last two dimensions)
|
||||
elements represent different features interaction contributions. The array is symmetric WRT the last
|
||||
two dimensions. The "+ 1" columns corresponds to bias. Summing this array along the last dimension should
|
||||
produce practically the same result as predict with \code{predcontrib = TRUE}.
|
||||
For a multiclass case, a list of \code{num_class} elements is returned, where each element is
|
||||
such an array.
|
||||
For multi-class / multi-target, they will be arranged so that columns in the output will have
|
||||
the leafs from one group followed by leafs of the other group (e.g. order will be \code{group1:feat1},
|
||||
\code{group1:feat2}, ..., \code{group2:feat1}, \code{group2:feat2}, ...).
|
||||
\item For \code{predcontrib}: when not multi-class / multi-target, a matrix with dimensions
|
||||
\verb{[nrows, nfeats+1]}. The last "+ 1" column corresponds to the baseline value.
|
||||
|
||||
When \code{strict_shape} is set to \code{TRUE}, the output is always an array. For
|
||||
normal prediction, the output is a 2-dimension array \code{(num_class, nrow(newdata))}.
|
||||
For multi-class and multi-target objectives, will be an array with dimensions \verb{[nrows, ngroups, nfeats+1]}.
|
||||
|
||||
For \code{predcontrib = TRUE}, output is \code{(ncol(newdata) + 1, num_class, nrow(newdata))}
|
||||
For \code{predinteraction = TRUE}, output is \code{(ncol(newdata) + 1, ncol(newdata) + 1, num_class, nrow(newdata))}
|
||||
For \code{predleaf = TRUE}, output is \code{(n_trees_in_forest, num_class, n_iterations, nrow(newdata))}
|
||||
The contribution values are on the scale of untransformed margin (e.g., for binary classification,
|
||||
the values are log-odds deviations from the baseline).
|
||||
\item For \code{predinteraction}: when not multi-class / multi-target, the output is a 3D array of
|
||||
dimensions \verb{[nrows, nfeats+1, nfeats+1]}. The off-diagonal (in the last two dimensions)
|
||||
elements represent different feature interaction contributions. The array is symmetric w.r.t. the last
|
||||
two dimensions. The "+ 1" columns corresponds to the baselines. Summing this array along the last
|
||||
dimension should produce practically the same result as \code{predcontrib = TRUE}.
|
||||
|
||||
For multi-class and multi-target, will be a 4D array with dimensions \verb{[nrows, ngroups, nfeats+1, nfeats+1]}
|
||||
}
|
||||
|
||||
If passing \code{strict_shape=FALSE}, the result is always an array:
|
||||
\itemize{
|
||||
\item For normal predictions, the dimension is \verb{[nrows, ngroups]}.
|
||||
\item For \code{predcontrib=TRUE}, the dimension is \verb{[nrows, ngroups, nfeats+1]}.
|
||||
\item For \code{predinteraction=TRUE}, the dimension is \verb{[nrows, ngroups, nfeats+1, nfeats+1]}.
|
||||
\item For \code{predleaf=TRUE}, the dimension is \verb{[nrows, niter, ngroups, num_parallel_tree]}.
|
||||
}
|
||||
|
||||
If passing \code{avoid_transpose=TRUE}, then the dimensions in all cases will be in reverse order - for
|
||||
example, for \code{predinteraction}, they will be \verb{[nfeats+1, nfeats+1, ngroups, nrows]}
|
||||
instead of \verb{[nrows, ngroups, nfeats+1, nfeats+1]}.
|
||||
}
|
||||
\description{
|
||||
Predicted values based on either xgboost model or model handle object.
|
||||
Predict values on data based on XGBoost model.
|
||||
}
|
||||
\details{
|
||||
Note that \code{iterationrange} would currently do nothing for predictions from gblinear,
|
||||
since gblinear doesn't keep its boosting history.
|
||||
Note that \code{iterationrange} would currently do nothing for predictions from "gblinear",
|
||||
since "gblinear" doesn't keep its boosting history.
|
||||
|
||||
One possible practical applications of the \code{predleaf} option is to use the model
|
||||
as a generator of new features which capture non-linearity and interactions,
|
||||
e.g., as implemented in \code{\link{xgb.create.features}}.
|
||||
e.g., as implemented in \code{\link[=xgb.create.features]{xgb.create.features()}}.
|
||||
|
||||
Setting \code{predcontrib = TRUE} allows to calculate contributions of each feature to
|
||||
individual predictions. For "gblinear" booster, feature contributions are simply linear terms
|
||||
@@ -122,21 +192,37 @@ With \code{predinteraction = TRUE}, SHAP values of contributions of interaction
|
||||
are computed. Note that this operation might be rather expensive in terms of compute and memory.
|
||||
Since it quadratically depends on the number of features, it is recommended to perform selection
|
||||
of the most important features first. See below about the format of the returned results.
|
||||
|
||||
The \code{predict()} method uses as many threads as defined in \code{xgb.Booster} object (all by default).
|
||||
If you want to change their number, assign a new number to \code{nthread} using \code{\link[=xgb.parameters<-]{xgb.parameters<-()}}.
|
||||
Note that converting a matrix to \code{\link[=xgb.DMatrix]{xgb.DMatrix()}} uses multiple threads too.
|
||||
}
|
||||
\examples{
|
||||
## binary classification:
|
||||
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
data(agaricus.train, package = "xgboost")
|
||||
data(agaricus.test, package = "xgboost")
|
||||
|
||||
## Keep the number of threads to 2 for examples
|
||||
nthread <- 2
|
||||
data.table::setDTthreads(nthread)
|
||||
|
||||
train <- agaricus.train
|
||||
test <- agaricus.test
|
||||
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
eta = 0.5, nthread = 2, nrounds = 5, objective = "binary:logistic")
|
||||
bst <- xgb.train(
|
||||
data = xgb.DMatrix(train$data, label = train$label),
|
||||
max_depth = 2,
|
||||
eta = 0.5,
|
||||
nthread = nthread,
|
||||
nrounds = 5,
|
||||
objective = "binary:logistic"
|
||||
)
|
||||
|
||||
# use all trees by default
|
||||
pred <- predict(bst, test$data)
|
||||
# use only the 1st tree
|
||||
pred1 <- predict(bst, test$data, iterationrange = c(1, 2))
|
||||
pred1 <- predict(bst, test$data, iterationrange = c(1, 1))
|
||||
|
||||
# Predicting tree leafs:
|
||||
# the result is an nsamples X ntrees matrix
|
||||
@@ -151,7 +237,7 @@ str(pred_contr)
|
||||
summary(rowSums(pred_contr) - qlogis(pred))
|
||||
# for the 1st record, let's inspect its features that had non-zero contribution to prediction:
|
||||
contr1 <- pred_contr[1,]
|
||||
contr1 <- contr1[-length(contr1)] # drop BIAS
|
||||
contr1 <- contr1[-length(contr1)] # drop intercept
|
||||
contr1 <- contr1[contr1 != 0] # drop non-contributing features
|
||||
contr1 <- contr1[order(abs(contr1))] # order by contribution magnitude
|
||||
old_mar <- par("mar")
|
||||
@@ -164,39 +250,59 @@ par(mar = old_mar)
|
||||
|
||||
lb <- as.numeric(iris$Species) - 1
|
||||
num_class <- 3
|
||||
|
||||
set.seed(11)
|
||||
bst <- xgboost(data = as.matrix(iris[, -5]), label = lb,
|
||||
max_depth = 4, eta = 0.5, nthread = 2, nrounds = 10, subsample = 0.5,
|
||||
objective = "multi:softprob", num_class = num_class)
|
||||
|
||||
bst <- xgb.train(
|
||||
data = xgb.DMatrix(as.matrix(iris[, -5]), label = lb),
|
||||
max_depth = 4,
|
||||
eta = 0.5,
|
||||
nthread = 2,
|
||||
nrounds = 10,
|
||||
subsample = 0.5,
|
||||
objective = "multi:softprob",
|
||||
num_class = num_class
|
||||
)
|
||||
|
||||
# predict for softmax returns num_class probability numbers per case:
|
||||
pred <- predict(bst, as.matrix(iris[, -5]))
|
||||
str(pred)
|
||||
# reshape it to a num_class-columns matrix
|
||||
pred <- matrix(pred, ncol=num_class, byrow=TRUE)
|
||||
# convert the probabilities to softmax labels
|
||||
pred_labels <- max.col(pred) - 1
|
||||
# the following should result in the same error as seen in the last iteration
|
||||
sum(pred_labels != lb)/length(lb)
|
||||
sum(pred_labels != lb) / length(lb)
|
||||
|
||||
# compare that to the predictions from softmax:
|
||||
# compare with predictions from softmax:
|
||||
set.seed(11)
|
||||
bst <- xgboost(data = as.matrix(iris[, -5]), label = lb,
|
||||
max_depth = 4, eta = 0.5, nthread = 2, nrounds = 10, subsample = 0.5,
|
||||
objective = "multi:softmax", num_class = num_class)
|
||||
|
||||
bst <- xgb.train(
|
||||
data = xgb.DMatrix(as.matrix(iris[, -5]), label = lb),
|
||||
max_depth = 4,
|
||||
eta = 0.5,
|
||||
nthread = 2,
|
||||
nrounds = 10,
|
||||
subsample = 0.5,
|
||||
objective = "multi:softmax",
|
||||
num_class = num_class
|
||||
)
|
||||
|
||||
pred <- predict(bst, as.matrix(iris[, -5]))
|
||||
str(pred)
|
||||
all.equal(pred, pred_labels)
|
||||
# prediction from using only 5 iterations should result
|
||||
# in the same error as seen in iteration 5:
|
||||
pred5 <- predict(bst, as.matrix(iris[, -5]), iterationrange=c(1, 6))
|
||||
sum(pred5 != lb)/length(lb)
|
||||
pred5 <- predict(bst, as.matrix(iris[, -5]), iterationrange = c(1, 5))
|
||||
sum(pred5 != lb) / length(lb)
|
||||
|
||||
}
|
||||
\references{
|
||||
Scott M. Lundberg, Su-In Lee, "A Unified Approach to Interpreting Model Predictions", NIPS Proceedings 2017, \url{https://arxiv.org/abs/1705.07874}
|
||||
|
||||
Scott M. Lundberg, Su-In Lee, "Consistent feature attribution for tree ensembles", \url{https://arxiv.org/abs/1706.06060}
|
||||
\enumerate{
|
||||
\item Scott M. Lundberg, Su-In Lee, "A Unified Approach to Interpreting Model Predictions",
|
||||
NIPS Proceedings 2017, \url{https://arxiv.org/abs/1705.07874}
|
||||
\item Scott M. Lundberg, Su-In Lee, "Consistent feature attribution for tree ensembles",
|
||||
\url{https://arxiv.org/abs/1706.06060}
|
||||
}
|
||||
}
|
||||
\seealso{
|
||||
\code{\link{xgb.train}}.
|
||||
\code{\link[=xgb.train]{xgb.train()}}
|
||||
}
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/xgb.ggplot.R
|
||||
\name{prepare.ggplot.shap.data}
|
||||
\alias{prepare.ggplot.shap.data}
|
||||
\title{Combine and melt feature values and SHAP contributions for sample
|
||||
observations.}
|
||||
\usage{
|
||||
prepare.ggplot.shap.data(data_list, normalize = FALSE)
|
||||
}
|
||||
\arguments{
|
||||
\item{data_list}{List containing 'data' and 'shap_contrib' returned by
|
||||
\code{xgb.shap.data()}.}
|
||||
|
||||
\item{normalize}{Whether to standardize feature values to have mean 0 and
|
||||
standard deviation 1 (useful for comparing multiple features on the same
|
||||
plot). Default \code{FALSE}.}
|
||||
}
|
||||
\value{
|
||||
A data.table containing the observation ID, the feature name, the
|
||||
feature value (normalized if specified), and the SHAP contribution value.
|
||||
}
|
||||
\description{
|
||||
Conforms to data format required for ggplot functions.
|
||||
}
|
||||
\details{
|
||||
Internal utility function.
|
||||
}
|
||||
@@ -4,26 +4,33 @@
|
||||
\alias{print.xgb.Booster}
|
||||
\title{Print xgb.Booster}
|
||||
\usage{
|
||||
\method{print}{xgb.Booster}(x, verbose = FALSE, ...)
|
||||
\method{print}{xgb.Booster}(x, ...)
|
||||
}
|
||||
\arguments{
|
||||
\item{x}{an xgb.Booster object}
|
||||
\item{x}{An \code{xgb.Booster} object.}
|
||||
|
||||
\item{verbose}{whether to print detailed data (e.g., attribute values)}
|
||||
|
||||
\item{...}{not currently used}
|
||||
\item{...}{Not used.}
|
||||
}
|
||||
\value{
|
||||
The same \code{x} object, returned invisibly
|
||||
}
|
||||
\description{
|
||||
Print information about xgb.Booster.
|
||||
Print information about \code{xgb.Booster}.
|
||||
}
|
||||
\examples{
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.train, package = "xgboost")
|
||||
train <- agaricus.train
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
attr(bst, 'myattr') <- 'memo'
|
||||
|
||||
bst <- xgb.train(
|
||||
data = xgb.DMatrix(train$data, label = train$label),
|
||||
max_depth = 2,
|
||||
eta = 1,
|
||||
nthread = 2,
|
||||
nrounds = 2,
|
||||
objective = "binary:logistic"
|
||||
)
|
||||
|
||||
attr(bst, "myattr") <- "memo"
|
||||
|
||||
print(bst)
|
||||
print(bst, verbose=TRUE)
|
||||
|
||||
}
|
||||
|
||||
@@ -7,21 +7,22 @@
|
||||
\method{print}{xgb.DMatrix}(x, verbose = FALSE, ...)
|
||||
}
|
||||
\arguments{
|
||||
\item{x}{an xgb.DMatrix object}
|
||||
\item{x}{An xgb.DMatrix object.}
|
||||
|
||||
\item{verbose}{whether to print colnames (when present)}
|
||||
\item{verbose}{Whether to print colnames (when present).}
|
||||
|
||||
\item{...}{not currently used}
|
||||
\item{...}{Not currently used.}
|
||||
}
|
||||
\description{
|
||||
Print information about xgb.DMatrix.
|
||||
Currently it displays dimensions and presence of info-fields and colnames.
|
||||
}
|
||||
\examples{
|
||||
data(agaricus.train, package='xgboost')
|
||||
dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label))
|
||||
data(agaricus.train, package = "xgboost")
|
||||
|
||||
dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label, nthread = 2))
|
||||
dtrain
|
||||
print(dtrain, verbose=TRUE)
|
||||
|
||||
print(dtrain, verbose = TRUE)
|
||||
|
||||
}
|
||||
|
||||
@@ -7,25 +7,33 @@
|
||||
\method{print}{xgb.cv.synchronous}(x, verbose = FALSE, ...)
|
||||
}
|
||||
\arguments{
|
||||
\item{x}{an \code{xgb.cv.synchronous} object}
|
||||
\item{x}{An \code{xgb.cv.synchronous} object.}
|
||||
|
||||
\item{verbose}{whether to print detailed data}
|
||||
\item{verbose}{Whether to print detailed data.}
|
||||
|
||||
\item{...}{passed to \code{data.table.print}}
|
||||
\item{...}{Passed to \code{data.table.print()}.}
|
||||
}
|
||||
\description{
|
||||
Prints formatted results of \code{xgb.cv}.
|
||||
Prints formatted results of \code{\link[=xgb.cv]{xgb.cv()}}.
|
||||
}
|
||||
\details{
|
||||
When not verbose, it would only print the evaluation results,
|
||||
including the best iteration (when available).
|
||||
}
|
||||
\examples{
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.train, package = "xgboost")
|
||||
|
||||
train <- agaricus.train
|
||||
cv <- xgb.cv(data = train$data, label = train$label, nfold = 5, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
cv <- xgb.cv(
|
||||
data = xgb.DMatrix(train$data, label = train$label),
|
||||
nfold = 5,
|
||||
max_depth = 2,
|
||||
eta = 1,
|
||||
nthread = 2,
|
||||
nrounds = 2,
|
||||
objective = "binary:logistic"
|
||||
)
|
||||
print(cv)
|
||||
print(cv, verbose=TRUE)
|
||||
print(cv, verbose = TRUE)
|
||||
|
||||
}
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/xgb.DMatrix.R
|
||||
\name{setinfo}
|
||||
\alias{setinfo}
|
||||
\alias{setinfo.xgb.DMatrix}
|
||||
\title{Set information of an xgb.DMatrix object}
|
||||
\usage{
|
||||
setinfo(object, ...)
|
||||
|
||||
\method{setinfo}{xgb.DMatrix}(object, name, info, ...)
|
||||
}
|
||||
\arguments{
|
||||
\item{object}{Object of class "xgb.DMatrix"}
|
||||
|
||||
\item{...}{other parameters}
|
||||
|
||||
\item{name}{the name of the field to get}
|
||||
|
||||
\item{info}{the specific field of information to set}
|
||||
}
|
||||
\description{
|
||||
Set information of an xgb.DMatrix object
|
||||
}
|
||||
\details{
|
||||
The \code{name} field can be one of the following:
|
||||
|
||||
\itemize{
|
||||
\item \code{label}: label XGBoost learn from ;
|
||||
\item \code{weight}: to do a weight rescale ;
|
||||
\item \code{base_margin}: base margin is the base prediction XGBoost will boost from ;
|
||||
\item \code{group}: number of rows in each group (to use with \code{rank:pairwise} objective).
|
||||
}
|
||||
}
|
||||
\examples{
|
||||
data(agaricus.train, package='xgboost')
|
||||
dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label))
|
||||
|
||||
labels <- getinfo(dtrain, 'label')
|
||||
setinfo(dtrain, 'label', 1-labels)
|
||||
labels2 <- getinfo(dtrain, 'label')
|
||||
stopifnot(all.equal(labels2, 1-labels))
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/xgb.DMatrix.R
|
||||
\name{slice}
|
||||
\alias{slice}
|
||||
\alias{slice.xgb.DMatrix}
|
||||
\alias{[.xgb.DMatrix}
|
||||
\title{Get a new DMatrix containing the specified rows of
|
||||
original xgb.DMatrix object}
|
||||
\usage{
|
||||
slice(object, ...)
|
||||
|
||||
\method{slice}{xgb.DMatrix}(object, idxset, ...)
|
||||
|
||||
\method{[}{xgb.DMatrix}(object, idxset, colset = NULL)
|
||||
}
|
||||
\arguments{
|
||||
\item{object}{Object of class "xgb.DMatrix"}
|
||||
|
||||
\item{...}{other parameters (currently not used)}
|
||||
|
||||
\item{idxset}{a integer vector of indices of rows needed}
|
||||
|
||||
\item{colset}{currently not used (columns subsetting is not available)}
|
||||
}
|
||||
\description{
|
||||
Get a new DMatrix containing the specified rows of
|
||||
original xgb.DMatrix object
|
||||
}
|
||||
\examples{
|
||||
data(agaricus.train, package='xgboost')
|
||||
dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label))
|
||||
|
||||
dsub <- slice(dtrain, 1:42)
|
||||
labels1 <- getinfo(dsub, 'label')
|
||||
dsub <- dtrain[1:42, ]
|
||||
labels2 <- getinfo(dsub, 'label')
|
||||
all.equal(labels1, labels2)
|
||||
|
||||
}
|
||||
22
R-package/man/variable.names.xgb.Booster.Rd
Normal file
22
R-package/man/variable.names.xgb.Booster.Rd
Normal file
@@ -0,0 +1,22 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/xgb.Booster.R
|
||||
\name{variable.names.xgb.Booster}
|
||||
\alias{variable.names.xgb.Booster}
|
||||
\title{Get Features Names from Booster}
|
||||
\usage{
|
||||
\method{variable.names}{xgb.Booster}(object, ...)
|
||||
}
|
||||
\arguments{
|
||||
\item{object}{An \code{xgb.Booster} object.}
|
||||
|
||||
\item{...}{Not used.}
|
||||
}
|
||||
\description{
|
||||
Returns the feature / variable / column names from a fitted
|
||||
booster object, which are set automatically during the call to \code{\link[=xgb.train]{xgb.train()}}
|
||||
from the DMatrix names, or which can be set manually through \code{\link[=setinfo]{setinfo()}}.
|
||||
|
||||
If the object doesn't have feature names, will return \code{NULL}.
|
||||
|
||||
It is equivalent to calling \code{getinfo(object, "feature_name")}.
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/xgb.Booster.R
|
||||
\name{xgb.Booster.complete}
|
||||
\alias{xgb.Booster.complete}
|
||||
\title{Restore missing parts of an incomplete xgb.Booster object.}
|
||||
\usage{
|
||||
xgb.Booster.complete(object, saveraw = TRUE)
|
||||
}
|
||||
\arguments{
|
||||
\item{object}{object of class \code{xgb.Booster}}
|
||||
|
||||
\item{saveraw}{a flag indicating whether to append \code{raw} Booster memory dump data
|
||||
when it doesn't already exist.}
|
||||
}
|
||||
\value{
|
||||
An object of \code{xgb.Booster} class.
|
||||
}
|
||||
\description{
|
||||
It attempts to complete an \code{xgb.Booster} object by restoring either its missing
|
||||
raw model memory dump (when it has no \code{raw} data but its \code{xgb.Booster.handle} is valid)
|
||||
or its missing internal handle (when its \code{xgb.Booster.handle} is not valid
|
||||
but it has a raw Booster memory dump).
|
||||
}
|
||||
\details{
|
||||
While this method is primarily for internal use, it might be useful in some practical situations.
|
||||
|
||||
E.g., when an \code{xgb.Booster} model is saved as an R object and then is loaded as an R object,
|
||||
its handle (pointer) to an internal xgboost model would be invalid. The majority of xgboost methods
|
||||
should still work for such a model object since those methods would be using
|
||||
\code{xgb.Booster.complete} internally. However, one might find it to be more efficient to call the
|
||||
\code{xgb.Booster.complete} function explicitly once after loading a model as an R-object.
|
||||
That would prevent further repeated implicit reconstruction of an internal booster model.
|
||||
}
|
||||
\examples{
|
||||
|
||||
data(agaricus.train, package='xgboost')
|
||||
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
saveRDS(bst, "xgb.model.rds")
|
||||
|
||||
# Warning: The resulting RDS file is only compatible with the current XGBoost version.
|
||||
# Refer to the section titled "a-compatibility-note-for-saveRDS-save".
|
||||
bst1 <- readRDS("xgb.model.rds")
|
||||
if (file.exists("xgb.model.rds")) file.remove("xgb.model.rds")
|
||||
# the handle is invalid:
|
||||
print(bst1$handle)
|
||||
|
||||
bst1 <- xgb.Booster.complete(bst1)
|
||||
# now the handle points to a valid internal booster model:
|
||||
print(bst1$handle)
|
||||
|
||||
}
|
||||
243
R-package/man/xgb.Callback.Rd
Normal file
243
R-package/man/xgb.Callback.Rd
Normal file
@@ -0,0 +1,243 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/callbacks.R
|
||||
\name{xgb.Callback}
|
||||
\alias{xgb.Callback}
|
||||
\title{XGBoost Callback Constructor}
|
||||
\usage{
|
||||
xgb.Callback(
|
||||
cb_name = "custom_callback",
|
||||
env = new.env(),
|
||||
f_before_training = function(env, model, data, evals, begin_iteration, end_iteration)
|
||||
NULL,
|
||||
f_before_iter = function(env, model, data, evals, iteration) NULL,
|
||||
f_after_iter = function(env, model, data, evals, iteration, iter_feval) NULL,
|
||||
f_after_training = function(env, model, data, evals, iteration, final_feval,
|
||||
prev_cb_res) NULL
|
||||
)
|
||||
}
|
||||
\arguments{
|
||||
\item{cb_name}{Name for the callback.
|
||||
|
||||
If the callback produces some non-NULL result (from executing the function passed under
|
||||
\code{f_after_training}), that result will be added as an R attribute to the resulting booster
|
||||
(or as a named element in the result of CV), with the attribute name specified here.
|
||||
|
||||
Names of callbacks must be unique - i.e. there cannot be two callbacks with the same name.}
|
||||
|
||||
\item{env}{An environment object that will be passed to the different functions in the callback.
|
||||
Note that this environment will not be shared with other callbacks.}
|
||||
|
||||
\item{f_before_training}{A function that will be executed before the training has started.
|
||||
|
||||
If passing \code{NULL} for this or for the other function inputs, then no function will be executed.
|
||||
|
||||
If passing a function, it will be called with parameters supplied as non-named arguments
|
||||
matching the function signatures that are shown in the default value for each function argument.}
|
||||
|
||||
\item{f_before_iter}{A function that will be executed before each boosting round.
|
||||
|
||||
This function can signal whether the training should be finalized or not, by outputting
|
||||
a value that evaluates to \code{TRUE} - i.e. if the output from the function provided here at
|
||||
a given round is \code{TRUE}, then training will be stopped before the current iteration happens.
|
||||
|
||||
Return values of \code{NULL} will be interpreted as \code{FALSE}.}
|
||||
|
||||
\item{f_after_iter}{A function that will be executed after each boosting round.
|
||||
|
||||
This function can signal whether the training should be finalized or not, by outputting
|
||||
a value that evaluates to \code{TRUE} - i.e. if the output from the function provided here at
|
||||
a given round is \code{TRUE}, then training will be stopped at that round.
|
||||
|
||||
Return values of \code{NULL} will be interpreted as \code{FALSE}.}
|
||||
|
||||
\item{f_after_training}{A function that will be executed after training is finished.
|
||||
|
||||
This function can optionally output something non-NULL, which will become part of the R
|
||||
attributes of the booster (assuming one passes \code{keep_extra_attributes=TRUE} to \code{\link[=xgb.train]{xgb.train()}})
|
||||
under the name supplied for parameter \code{cb_name} imn the case of \code{\link[=xgb.train]{xgb.train()}}; or a part
|
||||
of the named elements in the result of \code{\link[=xgb.cv]{xgb.cv()}}.}
|
||||
}
|
||||
\value{
|
||||
An \code{xgb.Callback} object, which can be passed to \code{\link[=xgb.train]{xgb.train()}} or \code{\link[=xgb.cv]{xgb.cv()}}.
|
||||
}
|
||||
\description{
|
||||
Constructor for defining the structure of callback functions that can be executed
|
||||
at different stages of model training (before / after training, before / after each boosting
|
||||
iteration).
|
||||
}
|
||||
\details{
|
||||
Arguments that will be passed to the supplied functions are as follows:
|
||||
\itemize{
|
||||
\item env The same environment that is passed under argument \code{env}.
|
||||
|
||||
It may be modified by the functions in order to e.g. keep tracking of what happens
|
||||
across iterations or similar.
|
||||
|
||||
This environment is only used by the functions supplied to the callback, and will
|
||||
not be kept after the model fitting function terminates (see parameter \code{f_after_training}).
|
||||
\item model The booster object when using \code{\link[=xgb.train]{xgb.train()}}, or the folds when using \code{\link[=xgb.cv]{xgb.cv()}}.
|
||||
|
||||
For \code{\link[=xgb.cv]{xgb.cv()}}, folds are a list with a structure as follows:
|
||||
\itemize{
|
||||
\item \code{dtrain}: The training data for the fold (as an \code{xgb.DMatrix} object).
|
||||
\item \code{bst}: Rhe \code{xgb.Booster} object for the fold.
|
||||
\item \code{evals}: A list containing two DMatrices, with names \code{train} and \code{test}
|
||||
(\code{test} is the held-out data for the fold).
|
||||
\item \code{index}: The indices of the hold-out data for that fold (base-1 indexing),
|
||||
from which the \code{test} entry in \code{evals} was obtained.
|
||||
}
|
||||
|
||||
This object should \strong{not} be in-place modified in ways that conflict with the
|
||||
training (e.g. resetting the parameters for a training update in a way that resets
|
||||
the number of rounds to zero in order to overwrite rounds).
|
||||
|
||||
Note that any R attributes that are assigned to the booster during the callback functions,
|
||||
will not be kept thereafter as the booster object variable is not re-assigned during
|
||||
training. It is however possible to set C-level attributes of the booster through
|
||||
\code{\link[=xgb.attr]{xgb.attr()}} or \code{\link[=xgb.attributes]{xgb.attributes()}}, which should remain available for the rest
|
||||
of the iterations and after the training is done.
|
||||
|
||||
For keeping variables across iterations, it's recommended to use \code{env} instead.
|
||||
\item data The data to which the model is being fit, as an \code{xgb.DMatrix} object.
|
||||
|
||||
Note that, for \code{\link[=xgb.cv]{xgb.cv()}}, this will be the full data, while data for the specific
|
||||
folds can be found in the \code{model} object.
|
||||
\item evals The evaluation data, as passed under argument \code{evals} to \code{\link[=xgb.train]{xgb.train()}}.
|
||||
|
||||
For \code{\link[=xgb.cv]{xgb.cv()}}, this will always be \code{NULL}.
|
||||
\item begin_iteration Index of the first boosting iteration that will be executed (base-1 indexing).
|
||||
|
||||
This will typically be '1', but when using training continuation, depending on the
|
||||
parameters for updates, boosting rounds will be continued from where the previous
|
||||
model ended, in which case this will be larger than 1.
|
||||
\item end_iteration Index of the last boostign iteration that will be executed
|
||||
(base-1 indexing, inclusive of this end).
|
||||
|
||||
It should match with argument \code{nrounds} passed to \code{\link[=xgb.train]{xgb.train()}} or \code{\link[=xgb.cv]{xgb.cv()}}.
|
||||
|
||||
Note that boosting might be interrupted before reaching this last iteration, for
|
||||
example by using the early stopping callback \code{\link[=xgb.cb.early.stop]{xgb.cb.early.stop()}}.
|
||||
\item iteration Index of the iteration number that is being executed (first iteration
|
||||
will be the same as parameter \code{begin_iteration}, then next one will add +1, and so on).
|
||||
\item iter_feval Evaluation metrics for \code{evals} that were supplied, either
|
||||
determined by the objective, or by parameter \code{feval}.
|
||||
|
||||
For \code{\link[=xgb.train]{xgb.train()}}, this will be a named vector with one entry per element in
|
||||
\code{evals}, where the names are determined as 'evals name' + '-' + 'metric name' - for
|
||||
example, if \code{evals} contains an entry named "tr" and the metric is "rmse",
|
||||
this will be a one-element vector with name "tr-rmse".
|
||||
|
||||
For \code{\link[=xgb.cv]{xgb.cv()}}, this will be a 2d matrix with dimensions \verb{[length(evals), nfolds]},
|
||||
where the row names will follow the same naming logic as the one-dimensional vector
|
||||
that is passed in \code{\link[=xgb.train]{xgb.train()}}.
|
||||
|
||||
Note that, internally, the built-in callbacks such as \link{xgb.cb.print.evaluation} summarize
|
||||
this table by calculating the row-wise means and standard deviations.
|
||||
\item final_feval The evaluation results after the last boosting round is executed
|
||||
(same format as \code{iter_feval}, and will be the exact same input as passed under
|
||||
\code{iter_feval} to the last round that is executed during model fitting).
|
||||
\item prev_cb_res Result from a previous run of a callback sharing the same name
|
||||
(as given by parameter \code{cb_name}) when conducting training continuation, if there
|
||||
was any in the booster R attributes.
|
||||
|
||||
Sometimes, one might want to append the new results to the previous one, and this will
|
||||
be done automatically by the built-in callbacks such as \link{xgb.cb.evaluation.log},
|
||||
which will append the new rows to the previous table.
|
||||
|
||||
If no such previous callback result is available (which it never will when fitting
|
||||
a model from start instead of updating an existing model), this will be \code{NULL}.
|
||||
|
||||
For \code{\link[=xgb.cv]{xgb.cv()}}, which doesn't support training continuation, this will always be \code{NULL}.
|
||||
}
|
||||
|
||||
The following names (\code{cb_name} values) are reserved for internal callbacks:
|
||||
\itemize{
|
||||
\item print_evaluation
|
||||
\item evaluation_log
|
||||
\item reset_parameters
|
||||
\item early_stop
|
||||
\item save_model
|
||||
\item cv_predict
|
||||
\item gblinear_history
|
||||
}
|
||||
|
||||
The following names are reserved for other non-callback attributes:
|
||||
\itemize{
|
||||
\item names
|
||||
\item class
|
||||
\item call
|
||||
\item params
|
||||
\item niter
|
||||
\item nfeatures
|
||||
\item folds
|
||||
}
|
||||
|
||||
When using the built-in early stopping callback (\link{xgb.cb.early.stop}), said callback
|
||||
will always be executed before the others, as it sets some booster C-level attributes
|
||||
that other callbacks might also use. Otherwise, the order of execution will match with
|
||||
the order in which the callbacks are passed to the model fitting function.
|
||||
}
|
||||
\examples{
|
||||
# Example constructing a custom callback that calculates
|
||||
# squared error on the training data (no separate test set),
|
||||
# and outputs the per-iteration results.
|
||||
ssq_callback <- xgb.Callback(
|
||||
cb_name = "ssq",
|
||||
f_before_training = function(env, model, data, evals,
|
||||
begin_iteration, end_iteration) {
|
||||
# A vector to keep track of a number at each iteration
|
||||
env$logs <- rep(NA_real_, end_iteration - begin_iteration + 1)
|
||||
},
|
||||
f_after_iter = function(env, model, data, evals, iteration, iter_feval) {
|
||||
# This calculates the sum of squared errors on the training data.
|
||||
# Note that this can be better done by passing an 'evals' entry,
|
||||
# but this demonstrates a way in which callbacks can be structured.
|
||||
pred <- predict(model, data)
|
||||
err <- pred - getinfo(data, "label")
|
||||
sq_err <- sum(err^2)
|
||||
env$logs[iteration] <- sq_err
|
||||
cat(
|
||||
sprintf(
|
||||
"Squared error at iteration \%d: \%.2f\n",
|
||||
iteration, sq_err
|
||||
)
|
||||
)
|
||||
|
||||
# A return value of 'TRUE' here would signal to finalize the training
|
||||
return(FALSE)
|
||||
},
|
||||
f_after_training = function(env, model, data, evals, iteration,
|
||||
final_feval, prev_cb_res) {
|
||||
return(env$logs)
|
||||
}
|
||||
)
|
||||
|
||||
data(mtcars)
|
||||
|
||||
y <- mtcars$mpg
|
||||
x <- as.matrix(mtcars[, -1])
|
||||
|
||||
dm <- xgb.DMatrix(x, label = y, nthread = 1)
|
||||
model <- xgb.train(
|
||||
data = dm,
|
||||
params = list(objective = "reg:squarederror", nthread = 1),
|
||||
nrounds = 5,
|
||||
callbacks = list(ssq_callback),
|
||||
keep_extra_attributes = TRUE
|
||||
)
|
||||
|
||||
# Result from 'f_after_iter' will be available as an attribute
|
||||
attributes(model)$ssq
|
||||
}
|
||||
\seealso{
|
||||
Built-in callbacks:
|
||||
\itemize{
|
||||
\item \link{xgb.cb.print.evaluation}
|
||||
\item \link{xgb.cb.evaluation.log}
|
||||
\item \link{xgb.cb.reset.parameters}
|
||||
\item \link{xgb.cb.early.stop}
|
||||
\item \link{xgb.cb.save.model}
|
||||
\item \link{xgb.cb.cv.predict}
|
||||
\item \link{xgb.cb.gblinear.history}
|
||||
}
|
||||
}
|
||||
@@ -2,44 +2,197 @@
|
||||
% Please edit documentation in R/xgb.DMatrix.R
|
||||
\name{xgb.DMatrix}
|
||||
\alias{xgb.DMatrix}
|
||||
\alias{xgb.QuantileDMatrix}
|
||||
\title{Construct xgb.DMatrix object}
|
||||
\usage{
|
||||
xgb.DMatrix(
|
||||
data,
|
||||
info = list(),
|
||||
label = NULL,
|
||||
weight = NULL,
|
||||
base_margin = NULL,
|
||||
missing = NA,
|
||||
silent = FALSE,
|
||||
feature_names = colnames(data),
|
||||
feature_types = NULL,
|
||||
nthread = NULL,
|
||||
...
|
||||
group = NULL,
|
||||
qid = NULL,
|
||||
label_lower_bound = NULL,
|
||||
label_upper_bound = NULL,
|
||||
feature_weights = NULL,
|
||||
data_split_mode = "row"
|
||||
)
|
||||
|
||||
xgb.QuantileDMatrix(
|
||||
data,
|
||||
label = NULL,
|
||||
weight = NULL,
|
||||
base_margin = NULL,
|
||||
missing = NA,
|
||||
feature_names = colnames(data),
|
||||
feature_types = NULL,
|
||||
nthread = NULL,
|
||||
group = NULL,
|
||||
qid = NULL,
|
||||
label_lower_bound = NULL,
|
||||
label_upper_bound = NULL,
|
||||
feature_weights = NULL,
|
||||
ref = NULL,
|
||||
max_bin = NULL
|
||||
)
|
||||
}
|
||||
\arguments{
|
||||
\item{data}{a \code{matrix} object (either numeric or integer), a \code{dgCMatrix} object,
|
||||
a \code{dgRMatrix} object (only when making predictions from a fitted model),
|
||||
a \code{dsparseVector} object (only when making predictions from a fitted model, will be
|
||||
interpreted as a row vector), or a character string representing a filename.}
|
||||
\item{data}{Data from which to create a DMatrix, which can then be used for fitting models or
|
||||
for getting predictions out of a fitted model.
|
||||
|
||||
\item{info}{a named list of additional information to store in the \code{xgb.DMatrix} object.
|
||||
See \code{\link{setinfo}} for the specific allowed kinds of}
|
||||
Supported input types are as follows:\itemize{
|
||||
\item \code{matrix} objects, with types \code{numeric}, \code{integer}, or \code{logical}.
|
||||
\item \code{data.frame} objects, with columns of types \code{numeric}, \code{integer}, \code{logical}, or \code{factor}.
|
||||
|
||||
\item{missing}{a float value to represents missing values in data (used only when input is a dense matrix).
|
||||
It is useful when a 0 or some other extreme value represents missing values in data.}
|
||||
Note that xgboost uses base-0 encoding for categorical types, hence \code{factor} types (which use base-1
|
||||
encoding') will be converted inside the function call. Be aware that the encoding used for \code{factor}
|
||||
types is not kept as part of the model, so in subsequent calls to \code{predict}, it is the user's
|
||||
responsibility to ensure that factor columns have the same levels as the ones from which the DMatrix
|
||||
was constructed.
|
||||
|
||||
Other column types are not supported.
|
||||
\item CSR matrices, as class \code{dgRMatrix} from package \code{Matrix}.
|
||||
\item CSC matrices, as class \code{dgCMatrix} from package \code{Matrix}. These are \strong{not} supported for
|
||||
'xgb.QuantileDMatrix'.
|
||||
\item Single-row CSR matrices, as class \code{dsparseVector} from package \code{Matrix}, which is interpreted
|
||||
as a single row (only when making predictions from a fitted model).
|
||||
\item Text files in a supported format, passed as a \code{character} variable containing the URI path to
|
||||
the file, with an optional format specifier.
|
||||
|
||||
These are \strong{not} supported for \code{xgb.QuantileDMatrix}. Supported formats are:\itemize{
|
||||
\item XGBoost's own binary format for DMatrices, as produced by \code{\link[=xgb.DMatrix.save]{xgb.DMatrix.save()}}.
|
||||
\item SVMLight (a.k.a. LibSVM) format for CSR matrices. This format can be signaled by suffix
|
||||
\code{?format=libsvm} at the end of the file path. It will be the default format if not
|
||||
otherwise specified.
|
||||
\item CSV files (comma-separated values). This format can be specified by adding suffix
|
||||
\code{?format=csv} at the end ofthe file path. It will \strong{not} be auto-deduced from file extensions.
|
||||
}
|
||||
|
||||
Be aware that the format of the file will not be auto-deduced - for example, if a file is named 'file.csv',
|
||||
it will not look at the extension or file contents to determine that it is a comma-separated value.
|
||||
Instead, the format must be specified following the URI format, so the input to \code{data} should be passed
|
||||
like this: \code{"file.csv?format=csv"} (or \code{"file.csv?format=csv&label_column=0"} if the first column
|
||||
corresponds to the labels).
|
||||
|
||||
For more information about passing text files as input, see the articles
|
||||
\href{https://xgboost.readthedocs.io/en/stable/tutorials/input_format.html}{Text Input Format of DMatrix} and
|
||||
\href{https://xgboost.readthedocs.io/en/stable/python/python_intro.html#python-data-interface}{Data Interface}.
|
||||
}}
|
||||
|
||||
\item{label}{Label of the training data. For classification problems, should be passed encoded as
|
||||
integers with numeration starting at zero.}
|
||||
|
||||
\item{weight}{Weight for each instance.
|
||||
|
||||
Note that, for ranking task, weights are per-group. In ranking task, one weight
|
||||
is assigned to each group (not each data point). This is because we
|
||||
only care about the relative ordering of data points within each group,
|
||||
so it doesn't make sense to assign weights to individual data points.}
|
||||
|
||||
\item{base_margin}{Base margin used for boosting from existing model.
|
||||
|
||||
In the case of multi-output models, one can also pass multi-dimensional base_margin.}
|
||||
|
||||
\item{missing}{A float value to represents missing values in data (not used when creating DMatrix
|
||||
from text files). It is useful to change when a zero, infinite, or some other
|
||||
extreme value represents missing values in data.}
|
||||
|
||||
\item{silent}{whether to suppress printing an informational message after loading from a file.}
|
||||
|
||||
\item{feature_names}{Set names for features. Overrides column names in data frame and matrix.
|
||||
|
||||
Note: columns are not referenced by name when calling \code{predict}, so the column order there
|
||||
must be the same as in the DMatrix construction, regardless of the column names.}
|
||||
|
||||
\item{feature_types}{Set types for features.
|
||||
|
||||
If \code{data} is a \code{data.frame} and passing \code{feature_types} is not supplied,
|
||||
feature types will be deduced automatically from the column types.
|
||||
|
||||
Otherwise, one can pass a character vector with the same length as number of columns in \code{data},
|
||||
with the following possible values:
|
||||
\itemize{
|
||||
\item "c", which represents categorical columns.
|
||||
\item "q", which represents numeric columns.
|
||||
\item "int", which represents integer columns.
|
||||
\item "i", which represents logical (boolean) columns.
|
||||
}
|
||||
|
||||
Note that, while categorical types are treated differently from the rest for model fitting
|
||||
purposes, the other types do not influence the generated model, but have effects in other
|
||||
functionalities such as feature importances.
|
||||
|
||||
\strong{Important}: Categorical features, if specified manually through \code{feature_types}, must
|
||||
be encoded as integers with numeration starting at zero, and the same encoding needs to be
|
||||
applied when passing data to \code{\link[=predict]{predict()}}. Even if passing \code{factor} types, the encoding will
|
||||
not be saved, so make sure that \code{factor} columns passed to \code{predict} have the same \code{levels}.}
|
||||
|
||||
\item{nthread}{Number of threads used for creating DMatrix.}
|
||||
|
||||
\item{...}{the \code{info} data could be passed directly as parameters, without creating an \code{info} list.}
|
||||
\item{group}{Group size for all ranking group.}
|
||||
|
||||
\item{qid}{Query ID for data samples, used for ranking.}
|
||||
|
||||
\item{label_lower_bound}{Lower bound for survival training.}
|
||||
|
||||
\item{label_upper_bound}{Upper bound for survival training.}
|
||||
|
||||
\item{feature_weights}{Set feature weights for column sampling.}
|
||||
|
||||
\item{data_split_mode}{When passing a URI (as R \code{character}) as input, this signals
|
||||
whether to split by row or column. Allowed values are \code{"row"} and \code{"col"}.
|
||||
|
||||
In distributed mode, the file is split accordingly; otherwise this is only an indicator on
|
||||
how the file was split beforehand. Default to row.
|
||||
|
||||
This is not used when \code{data} is not a URI.}
|
||||
|
||||
\item{ref}{The training dataset that provides quantile information, needed when creating
|
||||
validation/test dataset with \code{\link[=xgb.QuantileDMatrix]{xgb.QuantileDMatrix()}}. Supplying the training DMatrix
|
||||
as a reference means that the same quantisation applied to the training data is
|
||||
applied to the validation/test data}
|
||||
|
||||
\item{max_bin}{The number of histogram bin, should be consistent with the training parameter
|
||||
\code{max_bin}.
|
||||
|
||||
This is only supported when constructing a QuantileDMatrix.}
|
||||
}
|
||||
\value{
|
||||
An 'xgb.DMatrix' object. If calling 'xgb.QuantileDMatrix', it will have additional
|
||||
subclass 'xgb.QuantileDMatrix'.
|
||||
}
|
||||
\description{
|
||||
Construct xgb.DMatrix object from either a dense matrix, a sparse matrix, or a local file.
|
||||
Supported input file formats are either a LIBSVM text file or a binary file that was created previously by
|
||||
\code{\link{xgb.DMatrix.save}}).
|
||||
Construct an 'xgb.DMatrix' object from a given data source, which can then be passed to functions
|
||||
such as \code{\link[=xgb.train]{xgb.train()}} or \code{\link[=predict]{predict()}}.
|
||||
}
|
||||
\details{
|
||||
Function \code{xgb.QuantileDMatrix()} will construct a DMatrix with quantization for the histogram
|
||||
method already applied to it, which can be used to reduce memory usage (compared to using a
|
||||
a regular DMatrix first and then creating a quantization out of it) when using the histogram
|
||||
method (\code{tree_method = "hist"}, which is the default algorithm), but is not usable for the
|
||||
sorted-indices method (\code{tree_method = "exact"}), nor for the approximate method
|
||||
(\code{tree_method = "approx"}).
|
||||
|
||||
Note that DMatrix objects are not serializable through R functions such as \code{\link[=saveRDS]{saveRDS()}} or \code{\link[=save]{save()}}.
|
||||
If a DMatrix gets serialized and then de-serialized (for example, when saving data in an R session or caching
|
||||
chunks in an Rmd file), the resulting object will not be usable anymore and will need to be reconstructed
|
||||
from the original source of data.
|
||||
}
|
||||
\examples{
|
||||
data(agaricus.train, package='xgboost')
|
||||
dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label))
|
||||
xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data')
|
||||
dtrain <- xgb.DMatrix('xgb.DMatrix.data')
|
||||
if (file.exists('xgb.DMatrix.data')) file.remove('xgb.DMatrix.data')
|
||||
data(agaricus.train, package = "xgboost")
|
||||
|
||||
## Keep the number of threads to 1 for examples
|
||||
nthread <- 1
|
||||
data.table::setDTthreads(nthread)
|
||||
dtrain <- with(
|
||||
agaricus.train, xgb.DMatrix(data, label = label, nthread = nthread)
|
||||
)
|
||||
fname <- file.path(tempdir(), "xgb.DMatrix.data")
|
||||
xgb.DMatrix.save(dtrain, fname)
|
||||
dtrain <- xgb.DMatrix(fname)
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user