Compare commits
565 Commits
release_0.
...
v1.1.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f5d4fddafe | ||
|
|
66690f3d07 | ||
|
|
c42f533ae9 | ||
|
|
751160b69c | ||
|
|
8aaabce7c9 | ||
|
|
14543176d1 | ||
|
|
afa6e086cc | ||
|
|
636ab6b522 | ||
|
|
6daa6ee4e0 | ||
|
|
4979991d5b | ||
|
|
02faddc5f3 | ||
|
|
844d7c1d5b | ||
|
|
3728855ce9 | ||
|
|
ef26bc45bf | ||
|
|
660be66207 | ||
|
|
92913aaf7f | ||
|
|
e4f5b6c84f | ||
|
|
f27b6f9ba6 | ||
|
|
c355ab65ed | ||
|
|
564b22cee5 | ||
|
|
a734f52807 | ||
|
|
73142041b9 | ||
|
|
9c1103e06c | ||
|
|
fcbedcedf8 | ||
|
|
29a4cfe400 | ||
|
|
397d8f0ee7 | ||
|
|
b809f5d8b8 | ||
|
|
ccd30e4491 | ||
|
|
b2827a80e1 | ||
|
|
d6d1035950 | ||
|
|
e1f22baf8c | ||
|
|
c245eb8755 | ||
|
|
93df871c8c | ||
|
|
c69a19e2b1 | ||
|
|
cfee9fae91 | ||
|
|
bb29ce2818 | ||
|
|
a2d86b8e4b | ||
|
|
e268fb0093 | ||
|
|
6a169cd41a | ||
|
|
468b1594d3 | ||
|
|
0676a19e70 | ||
|
|
ec02f40d42 | ||
|
|
8b04736b81 | ||
|
|
ca4e05660e | ||
|
|
a2f54963b6 | ||
|
|
1b1969f20d | ||
|
|
2809fb8b6f | ||
|
|
c90119eb67 | ||
|
|
88b64c8162 | ||
|
|
04f69b43e6 | ||
|
|
449ab79e0c | ||
|
|
b56c902841 | ||
|
|
a3db79df22 | ||
|
|
093e2227e3 | ||
|
|
4a0c8ef237 | ||
|
|
1334aca437 | ||
|
|
866a477319 | ||
|
|
bd653fad4c | ||
|
|
7d52c0b8c2 | ||
|
|
dc2950fd90 | ||
|
|
6671b42dd4 | ||
|
|
ad826e913f | ||
|
|
8bc595ea1e | ||
|
|
a1085396e2 | ||
|
|
9097e8f0d9 | ||
|
|
c362125d7b | ||
|
|
0012f2ef93 | ||
|
|
30e94ddd04 | ||
|
|
15800107ad | ||
|
|
a9313802ea | ||
|
|
5fc5ec539d | ||
|
|
939973630d | ||
|
|
86beb68ce8 | ||
|
|
459b175dc6 | ||
|
|
c218d8ffbf | ||
|
|
d0b86c75d9 | ||
|
|
29c6ad943a | ||
|
|
e86030c360 | ||
|
|
babcb996e7 | ||
|
|
15f40e51e9 | ||
|
|
6601a641d7 | ||
|
|
7f980e9f83 | ||
|
|
27a8e36fc3 | ||
|
|
13b10a6370 | ||
|
|
780de49ddb | ||
|
|
4942da64ae | ||
|
|
7146b91d5a | ||
|
|
dcf439932a | ||
|
|
1de36cdf1e | ||
|
|
d2231fc840 | ||
|
|
cd7d6f7d59 | ||
|
|
4b7e2b7bff | ||
|
|
abca9908ba | ||
|
|
3cf665d3ec | ||
|
|
760d5d0c3c | ||
|
|
8ca06ab329 | ||
|
|
b51124c158 | ||
|
|
761a5dbdfc | ||
|
|
21b671aa06 | ||
|
|
668e432e2d | ||
|
|
fc88105620 | ||
|
|
ab7a46a1a4 | ||
|
|
b745b7acce | ||
|
|
bb8c8df39d | ||
|
|
45a97ddf32 | ||
|
|
3ad4333b0e | ||
|
|
7a99f8f27f | ||
|
|
a931589c96 | ||
|
|
a38e7bd19c | ||
|
|
0dd97c206b | ||
|
|
1ba6706167 | ||
|
|
8d06878bf9 | ||
|
|
9775da02d9 | ||
|
|
5dc8e894c9 | ||
|
|
71a8b8c65a | ||
|
|
1b97eaf7a7 | ||
|
|
b81f8cbbc0 | ||
|
|
2ba8c13b69 | ||
|
|
9a5efffebe | ||
|
|
2d76d40dfd | ||
|
|
a461a9a90a | ||
|
|
0fd455e162 | ||
|
|
f2b8cd2922 | ||
|
|
e0509b3307 | ||
|
|
b0ed3f0a66 | ||
|
|
655cf17b60 | ||
|
|
70a91ec3ba | ||
|
|
cfae247231 | ||
|
|
d6b31df449 | ||
|
|
7ac7e8778f | ||
|
|
8aa8ef1031 | ||
|
|
bc96ceb8b2 | ||
|
|
b2b2c4e231 | ||
|
|
9f77c18b0d | ||
|
|
0110754a76 | ||
|
|
e433a379e4 | ||
|
|
7e32af5c21 | ||
|
|
ed2465cce4 | ||
|
|
8ca9744b07 | ||
|
|
c35cdecddd | ||
|
|
24ad9dec0b | ||
|
|
911a902835 | ||
|
|
29eeea709a | ||
|
|
2e0067e790 | ||
|
|
94828a7c0c | ||
|
|
84e395d91e | ||
|
|
595a00466d | ||
|
|
e4b74c4d22 | ||
|
|
c74216f22c | ||
|
|
71e7e3b96f | ||
|
|
a5cc112eea | ||
|
|
ed0216642f | ||
|
|
856b81c727 | ||
|
|
d7b45fbcaf | ||
|
|
fa26313feb | ||
|
|
fe8d72b50b | ||
|
|
adc795929a | ||
|
|
472ded549d | ||
|
|
c67163250e | ||
|
|
4240daed4e | ||
|
|
cb3ed404cf | ||
|
|
f7105fa44f | ||
|
|
43974939f4 | ||
|
|
b513dcd352 | ||
|
|
ef19480eda | ||
|
|
0c7455276d | ||
|
|
1b3947d929 | ||
|
|
3eb1279bbf | ||
|
|
40680368cf | ||
|
|
44469a0ca9 | ||
|
|
b4f952bd22 | ||
|
|
aa9a68010b | ||
|
|
1891cc766d | ||
|
|
5d4c24a1fc | ||
|
|
9c56480c61 | ||
|
|
2a071cebc5 | ||
|
|
ff1342b252 | ||
|
|
e526871f0a | ||
|
|
5199b86126 | ||
|
|
808f61081b | ||
|
|
0184f2e9f7 | ||
|
|
a73e25e15f | ||
|
|
f100b8d878 | ||
|
|
7b65698187 | ||
|
|
8cbcc53ccb | ||
|
|
87ebfc1315 | ||
|
|
9559f81377 | ||
|
|
9049c7c653 | ||
|
|
ee287808fb | ||
|
|
77cfbff5a7 | ||
|
|
ebc86a3afa | ||
|
|
2b9a62a806 | ||
|
|
2d95b9a4b6 | ||
|
|
7b17e76c5b | ||
|
|
04db125699 | ||
|
|
018df6004e | ||
|
|
139ccc9902 | ||
|
|
d55489af14 | ||
|
|
6848d0426f | ||
|
|
61286c6e8f | ||
|
|
ee81ba8e1f | ||
|
|
9b0af6e882 | ||
|
|
f3d7877802 | ||
|
|
ced3660f60 | ||
|
|
298ebe68ac | ||
|
|
73b1bd2789 | ||
|
|
0202e04a8e | ||
|
|
1d0ca49761 | ||
|
|
a4b929385e | ||
|
|
c8bdb652c4 | ||
|
|
3d04a8cc97 | ||
|
|
b915788708 | ||
|
|
74f545bde3 | ||
|
|
e521bb6f83 | ||
|
|
37fdfa03f8 | ||
|
|
bc9d88259f | ||
|
|
27b3646d29 | ||
|
|
63ffd2f686 | ||
|
|
2fdb34ed2e | ||
|
|
3136185bc5 | ||
|
|
5aa007d7b2 | ||
|
|
ad4a1c732c | ||
|
|
208ab3b1ff | ||
|
|
c7cc657a4d | ||
|
|
e089e16e3d | ||
|
|
979f74d51a | ||
|
|
1cb6bcc382 | ||
|
|
b1789b0346 | ||
|
|
38763aa4fa | ||
|
|
608ebbe444 | ||
|
|
7ef5b78003 | ||
|
|
2dcb62ddfb | ||
|
|
64af1ecf86 | ||
|
|
f3d8536702 | ||
|
|
df9bdbbcb9 | ||
|
|
f5e13dcb9b | ||
|
|
f0ca53d9ec | ||
|
|
dcde433402 | ||
|
|
e3c34c79be | ||
|
|
f2277e7106 | ||
|
|
64f4361b47 | ||
|
|
761e938dbe | ||
|
|
b9dbfe0931 | ||
|
|
9f52e834dc | ||
|
|
d667ea9335 | ||
|
|
04c640f562 | ||
|
|
a4f5c86276 | ||
|
|
4d2779663e | ||
|
|
98b051269b | ||
|
|
e67388fb8f | ||
|
|
0afcc55d98 | ||
|
|
97abcc7ee2 | ||
|
|
886bf93ba4 | ||
|
|
2abe69d774 | ||
|
|
f4e7b707c9 | ||
|
|
1733c9e8f7 | ||
|
|
374648c21a | ||
|
|
7663de956c | ||
|
|
807a244517 | ||
|
|
b29b8c2f34 | ||
|
|
a37691428f | ||
|
|
6fac40cfb4 | ||
|
|
755a606201 | ||
|
|
6ec7e300bd | ||
|
|
96cd7ec2bb | ||
|
|
da6e74f7bb | ||
|
|
ac457c56a2 | ||
|
|
f24be2efb4 | ||
|
|
5b1715d97c | ||
|
|
310fe60b35 | ||
|
|
5620322a48 | ||
|
|
7e477a2adb | ||
|
|
95295ce026 | ||
|
|
741fbf47c4 | ||
|
|
4771bb0d41 | ||
|
|
010b8f1428 | ||
|
|
86ed01c4bb | ||
|
|
31030a8d3a | ||
|
|
ae536756ae | ||
|
|
9fc681001a | ||
|
|
a78d4e7aa8 | ||
|
|
60748b2071 | ||
|
|
185e3f1916 | ||
|
|
7e72a12871 | ||
|
|
2ebdec8aa6 | ||
|
|
b61d534472 | ||
|
|
a9053aff83 | ||
|
|
0e0849fa1e | ||
|
|
3d46bd0fa5 | ||
|
|
05d4751540 | ||
|
|
08ff510e48 | ||
|
|
f7487e4c2a | ||
|
|
5b4f28cc46 | ||
|
|
4bbf062ed3 | ||
|
|
c2cce4fac3 | ||
|
|
6c9b6f11da | ||
|
|
aefb1e5c2f | ||
|
|
80977182c5 | ||
|
|
095de3bf5f | ||
|
|
4ab1df5fe6 | ||
|
|
7e24a8d245 | ||
|
|
d30e63a0a5 | ||
|
|
2fa8b359e0 | ||
|
|
82ee2317e8 | ||
|
|
b8433c455a | ||
|
|
562bb0ae31 | ||
|
|
0b89cd1dfa | ||
|
|
a40b72d127 | ||
|
|
c7416002e9 | ||
|
|
fc8c9b0521 | ||
|
|
277e25797b | ||
|
|
22209b7b95 | ||
|
|
57106a3459 | ||
|
|
006eb80578 | ||
|
|
d669ea1eaa | ||
|
|
0e0955a6d8 | ||
|
|
5374f52531 | ||
|
|
125bcec62e | ||
|
|
512f037e55 | ||
|
|
c89bcc4de5 | ||
|
|
6a5e805886 | ||
|
|
0fc7dcfe6c | ||
|
|
f90e7f9aa8 | ||
|
|
a5f232feb8 | ||
|
|
52d44e07fe | ||
|
|
c0fbeff0ab | ||
|
|
2aed0ae230 | ||
|
|
733ed24dd9 | ||
|
|
0184eb5d02 | ||
|
|
830e73901d | ||
|
|
516955564b | ||
|
|
38ab79f889 | ||
|
|
41227d1933 | ||
|
|
6e6216ad67 | ||
|
|
fba298fecb | ||
|
|
3fa2ceb193 | ||
|
|
9700776597 | ||
|
|
ab357dd41c | ||
|
|
c358d95c44 | ||
|
|
c81238b5c4 | ||
|
|
b9b57f2289 | ||
|
|
53d4272c2a | ||
|
|
7b5cbcc846 | ||
|
|
ef9af33a00 | ||
|
|
c0ffe65f5c | ||
|
|
c5b229632d | ||
|
|
198f3a6c4a | ||
|
|
19f9fd5de9 | ||
|
|
f22b1c0348 | ||
|
|
602484e19f | ||
|
|
3e2c472944 | ||
|
|
851b5b3808 | ||
|
|
2a4df8e29f | ||
|
|
9c469b3844 | ||
|
|
97eece6ea0 | ||
|
|
b68de018b8 | ||
|
|
4fe0d8203e | ||
|
|
6edddd7966 | ||
|
|
e930a8e54f | ||
|
|
cb9a80ca90 | ||
|
|
166def9f75 | ||
|
|
b43f08bea5 | ||
|
|
d2e1e4d5b4 | ||
|
|
9b9e298ff2 | ||
|
|
7b74b1b64d | ||
|
|
59bc1ef330 | ||
|
|
2758c5acea | ||
|
|
d5c386ae24 | ||
|
|
001aaaee5f | ||
|
|
d4e0a30582 | ||
|
|
f0064c07ab | ||
|
|
ad1192e8a3 | ||
|
|
b45258ce66 | ||
|
|
4ef6d216b9 | ||
|
|
8ac8fbef29 | ||
|
|
1595e3f57b | ||
|
|
01b0c9047c | ||
|
|
3c506b076e | ||
|
|
5544a730f1 | ||
|
|
6323ef94ad | ||
|
|
9975c533c7 | ||
|
|
2973416f2e | ||
|
|
61f764946f | ||
|
|
beb7b295a8 | ||
|
|
3e339d9557 | ||
|
|
7a388cbf8b | ||
|
|
cd1526d3b1 | ||
|
|
30204b50fe | ||
|
|
d333918f5e | ||
|
|
1aaf4a679d | ||
|
|
562d9ae963 | ||
|
|
d9a47794a5 | ||
|
|
b7a1f22d24 | ||
|
|
4df246191f | ||
|
|
96bf91725b | ||
|
|
4e9fad74eb | ||
|
|
986fee6022 | ||
|
|
45876bf41b | ||
|
|
a30176907f | ||
|
|
923e6c86ba | ||
|
|
63ec95623d | ||
|
|
4d6590be3c | ||
|
|
abffbe014e | ||
|
|
dd01f7c4f5 | ||
|
|
cd3a3f99da | ||
|
|
5b2f805e74 | ||
|
|
8bdf15120a | ||
|
|
fe2de6f415 | ||
|
|
1f98f18cb8 | ||
|
|
2cff735126 | ||
|
|
9fa29ad753 | ||
|
|
30e1cb4e9e | ||
|
|
77fc28427d | ||
|
|
9494950ee7 | ||
|
|
6125521caf | ||
|
|
fdf27a5b82 | ||
|
|
221e163185 | ||
|
|
0c50f8417a | ||
|
|
ae05948e32 | ||
|
|
570374effe | ||
|
|
e94f85f0e4 | ||
|
|
6757654337 | ||
|
|
ba1d848767 | ||
|
|
7ae11c9284 | ||
|
|
90f683b25b | ||
|
|
a22368d210 | ||
|
|
66f9951d70 | ||
|
|
c5719cc457 | ||
|
|
a2042b685a | ||
|
|
4591039eba | ||
|
|
4e9965cb9d | ||
|
|
2f1319f273 | ||
|
|
9683fd433e | ||
|
|
da21ac0cc2 | ||
|
|
59ae42a179 | ||
|
|
afa99e6d9d | ||
|
|
3f2fe25a32 | ||
|
|
23a10c8339 | ||
|
|
399fabed49 | ||
|
|
c2a3902ba3 | ||
|
|
ea44417754 | ||
|
|
fbbae3386a | ||
|
|
dd60fc23e6 | ||
|
|
b48f895027 | ||
|
|
fed665ae8a | ||
|
|
6e16900711 | ||
|
|
c589eff941 | ||
|
|
a3fedbeaa8 | ||
|
|
972f693eaf | ||
|
|
3f7e5d9c47 | ||
|
|
09b90d9329 | ||
|
|
55e645c5f5 | ||
|
|
8ddd2715ee | ||
|
|
0ce300e73a | ||
|
|
278562db13 | ||
|
|
5a567ec249 | ||
|
|
515f5f5c47 | ||
|
|
adcd8ea7c6 | ||
|
|
cf2400036e | ||
|
|
3e930e4f2d | ||
|
|
a9ec2dd295 | ||
|
|
df2cdaca50 | ||
|
|
c6f2a7e186 | ||
|
|
e7d17ec4f4 | ||
|
|
b5f7cbfadf | ||
|
|
be0f346ec9 | ||
|
|
d16d9a9988 | ||
|
|
6ff994126a | ||
|
|
18e4fc3690 | ||
|
|
8da4907e89 | ||
|
|
ade3f30237 | ||
|
|
b511638ca1 | ||
|
|
eabcc0e210 | ||
|
|
5de7e12704 | ||
|
|
8d1098a983 | ||
|
|
9252b686ae | ||
|
|
2be85fc62a | ||
|
|
feb6ae3e18 | ||
|
|
54980b8959 | ||
|
|
c1e4a0f2c6 | ||
|
|
bfddc2c42c | ||
|
|
17df5fd296 | ||
|
|
4c74336384 | ||
|
|
ba98e0cdf2 | ||
|
|
eaab364a63 | ||
|
|
797ba8e72d | ||
|
|
253fdd8a42 | ||
|
|
91c513a0c1 | ||
|
|
5e582b0fa7 | ||
|
|
146e83f3b3 | ||
|
|
5dfb27fb2d | ||
|
|
77c03538b0 | ||
|
|
37dc82c3ff | ||
|
|
ea850ecd20 | ||
|
|
995698b0cb | ||
|
|
2d875ec019 | ||
|
|
503cc42f48 | ||
|
|
2c61f02add | ||
|
|
bbe0dbd7ec | ||
|
|
5e97de6a41 | ||
|
|
65db8d0626 | ||
|
|
711397d645 | ||
|
|
207f058711 | ||
|
|
84d992babc | ||
|
|
be7bc07ca3 | ||
|
|
edae664afb | ||
|
|
f4521bf6aa | ||
|
|
3078b5944d | ||
|
|
a448a8320c | ||
|
|
956e73f183 | ||
|
|
5c2575535f | ||
|
|
81c1cd40ca | ||
|
|
b72eab3e07 | ||
|
|
360f25ec27 | ||
|
|
c7bc739ed2 | ||
|
|
60a9af567c | ||
|
|
9080bba815 | ||
|
|
2e052e74b6 | ||
|
|
1ca5698221 | ||
|
|
70be1e38c2 | ||
|
|
37c75aac41 | ||
|
|
82dca3c108 | ||
|
|
2f7087eba1 | ||
|
|
680a1b36f3 | ||
|
|
ad4de0d718 | ||
|
|
7ea5b772fb | ||
|
|
7aed8f3d48 | ||
|
|
8c8021dfa7 | ||
|
|
3f312e30db | ||
|
|
c85181dd8a | ||
|
|
6d5b34d824 | ||
|
|
5aa42b5f11 | ||
|
|
263e2038e9 | ||
|
|
b374e0a7ab | ||
|
|
45c89a6792 | ||
|
|
8eab966998 | ||
|
|
09bd9e68cf | ||
|
|
00465d243d | ||
|
|
7814183199 | ||
|
|
359ed9c5bc | ||
|
|
29a1356669 | ||
|
|
cf8d5b9b76 | ||
|
|
fdcae024e7 | ||
|
|
7b1b11390a | ||
|
|
5465b73e7c | ||
|
|
4352fcdb15 | ||
|
|
b833b642ec | ||
|
|
99a714be64 | ||
|
|
7b9043cf71 | ||
|
|
259fb809e9 | ||
|
|
a36c3ed4f4 | ||
|
|
6fb4c5efef | ||
|
|
4eeeded7d1 | ||
|
|
f83e62dca5 | ||
|
|
331cd3e4f7 | ||
|
|
617f572c0f | ||
|
|
20845e8ccf | ||
|
|
224786f67f | ||
|
|
9837b09b20 | ||
|
|
0944360416 | ||
|
|
ac3d03089b | ||
|
|
28bd6cde22 | ||
|
|
00ea7b83c9 | ||
|
|
67c38805a1 | ||
|
|
5f34078fba |
40
.clang-tidy
40
.clang-tidy
@@ -1,21 +1,21 @@
|
||||
Checks: 'modernize-*,-modernize-make-*,-modernize-use-auto,-modernize-raw-string-literal,google-*,-google-default-arguments,-clang-diagnostic-#pragma-messages,readability-identifier-naming'
|
||||
Checks: 'modernize-*,-modernize-make-*,-modernize-use-auto,-modernize-raw-string-literal,-modernize-avoid-c-arrays,-modernize-use-trailing-return-type,google-*,-google-default-arguments,-clang-diagnostic-#pragma-messages,readability-identifier-naming'
|
||||
CheckOptions:
|
||||
- { key: readability-identifier-naming.ClassCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.StructCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.TypeAliasCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.TypedefCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.TypeTemplateParameterCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.MemberCase, value: lower_case }
|
||||
- { key: readability-identifier-naming.PrivateMemberSuffix, value: '_' }
|
||||
- { key: readability-identifier-naming.ProtectedMemberSuffix, value: '_' }
|
||||
- { key: readability-identifier-naming.EnumCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.EnumConstant, value: CamelCase }
|
||||
- { key: readability-identifier-naming.EnumConstantPrefix, value: k }
|
||||
- { key: readability-identifier-naming.GlobalConstantCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.GlobalConstantPrefix, value: k }
|
||||
- { key: readability-identifier-naming.StaticConstantCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.StaticConstantPrefix, value: k }
|
||||
- { key: readability-identifier-naming.ConstexprVariableCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.ConstexprVariablePrefix, value: k }
|
||||
- { key: readability-identifier-naming.FunctionCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.NamespaceCase, value: lower_case }
|
||||
- { key: readability-identifier-naming.ClassCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.StructCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.TypeAliasCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.TypedefCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.TypeTemplateParameterCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.MemberCase, value: lower_case }
|
||||
- { key: readability-identifier-naming.PrivateMemberSuffix, value: '_' }
|
||||
- { key: readability-identifier-naming.ProtectedMemberSuffix, value: '_' }
|
||||
- { key: readability-identifier-naming.EnumCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.EnumConstant, value: CamelCase }
|
||||
- { key: readability-identifier-naming.EnumConstantPrefix, value: k }
|
||||
- { key: readability-identifier-naming.GlobalConstantCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.GlobalConstantPrefix, value: k }
|
||||
- { key: readability-identifier-naming.StaticConstantCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.StaticConstantPrefix, value: k }
|
||||
- { key: readability-identifier-naming.ConstexprVariableCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.ConstexprVariablePrefix, value: k }
|
||||
- { key: readability-identifier-naming.FunctionCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.NamespaceCase, value: lower_case }
|
||||
|
||||
1
.github/FUNDING.yml
vendored
Normal file
1
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1 @@
|
||||
open_collective: xgboost
|
||||
21
.gitignore
vendored
21
.gitignore
vendored
@@ -17,7 +17,7 @@
|
||||
*.tar.gz
|
||||
*conf
|
||||
*buffer
|
||||
*model
|
||||
*.model
|
||||
*pyc
|
||||
*.train
|
||||
*.test
|
||||
@@ -65,14 +65,11 @@ nb-configuration*
|
||||
.pydevproject
|
||||
.settings/
|
||||
build
|
||||
config.mk
|
||||
/xgboost
|
||||
*.data
|
||||
build_plugin
|
||||
.idea
|
||||
recommonmark/
|
||||
tags
|
||||
*.iml
|
||||
*.class
|
||||
target
|
||||
*.swp
|
||||
@@ -90,5 +87,19 @@ lib/
|
||||
# spark
|
||||
metastore_db
|
||||
|
||||
plugin/updater_gpu/test/cpp/data
|
||||
/include/xgboost/build_config.h
|
||||
|
||||
# files from R-package source install
|
||||
**/config.status
|
||||
R-package/src/Makevars
|
||||
|
||||
# Visual Studio Code
|
||||
/.vscode/
|
||||
|
||||
# IntelliJ/CLion
|
||||
.idea
|
||||
*.iml
|
||||
/cmake-build-debug/
|
||||
|
||||
# GDB
|
||||
.gdb_history
|
||||
73
.travis.yml
73
.travis.yml
@@ -1,80 +1,55 @@
|
||||
# disable sudo for container build.
|
||||
sudo: required
|
||||
|
||||
# Enabling test on Linux and OS X
|
||||
# Enabling test OS X
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
|
||||
osx_image: xcode9.3
|
||||
osx_image: xcode10.1
|
||||
dist: bionic
|
||||
|
||||
# Use Build Matrix to do lint and build seperately
|
||||
env:
|
||||
matrix:
|
||||
# code lint
|
||||
- TASK=lint
|
||||
# r package test
|
||||
- TASK=r_test
|
||||
# python package test
|
||||
- TASK=python_test
|
||||
- TASK=python_lightweight_test
|
||||
# test installation of Python source distribution
|
||||
- TASK=python_sdist_test
|
||||
# java package test
|
||||
- TASK=java_test
|
||||
# cmake test
|
||||
- TASK=cmake_test
|
||||
# c++ test
|
||||
- TASK=cpp_test
|
||||
# distributed test
|
||||
- TASK=distributed_test
|
||||
# address sanitizer test
|
||||
- TASK=sanitizer_test
|
||||
|
||||
global:
|
||||
- secure: "PR16i9F8QtNwn99C5NDp8nptAS+97xwDtXEJJfEiEVhxPaaRkOp0MPWhogCaK0Eclxk1TqkgWbdXFknwGycX620AzZWa/A1K3gAs+GrpzqhnPMuoBJ0Z9qxXTbSJvCyvMbYwVrjaxc/zWqdMU8waWz8A7iqKGKs/SqbQ3rO6v7c="
|
||||
- secure: "dAGAjBokqm/0nVoLMofQni/fWIBcYSmdq4XvCBX1ZAMDsWnuOfz/4XCY6h2lEI1rVHZQ+UdZkc9PioOHGPZh5BnvE49/xVVWr9c4/61lrDOlkD01ZjSAeoV0fAZq+93V/wPl4QV+MM+Sem9hNNzFSbN5VsQLAiWCSapWsLdKzqA="
|
||||
|
||||
matrix:
|
||||
exclude:
|
||||
- os: osx
|
||||
env: TASK=lint
|
||||
- os: osx
|
||||
env: TASK=cmake_test
|
||||
- os: linux
|
||||
env: TASK=r_test
|
||||
- os: osx
|
||||
env: TASK=python_lightweight_test
|
||||
- os: osx
|
||||
env: TASK=cpp_test
|
||||
- os: osx
|
||||
env: TASK=distributed_test
|
||||
- os: osx
|
||||
env: TASK=sanitizer_test
|
||||
env: TASK=python_test
|
||||
- os: linux
|
||||
env: TASK=java_test
|
||||
- os: linux
|
||||
env: TASK=cmake_test
|
||||
|
||||
# dependent apt packages
|
||||
# dependent brew packages
|
||||
addons:
|
||||
apt:
|
||||
sources:
|
||||
- llvm-toolchain-trusty-5.0
|
||||
- ubuntu-toolchain-r-test
|
||||
- george-edison55-precise-backports
|
||||
packages:
|
||||
- clang
|
||||
- clang-tidy-5.0
|
||||
- cmake-data
|
||||
- doxygen
|
||||
- wget
|
||||
- libcurl4-openssl-dev
|
||||
- unzip
|
||||
- graphviz
|
||||
- gcc-4.8
|
||||
- g++-4.8
|
||||
- gcc-7
|
||||
- g++-7
|
||||
homebrew:
|
||||
packages:
|
||||
- gcc@7
|
||||
- cmake
|
||||
- libomp
|
||||
- graphviz
|
||||
- openssl
|
||||
- libgit2
|
||||
- wget
|
||||
- r
|
||||
update: true
|
||||
|
||||
before_install:
|
||||
- source dmlc-core/scripts/travis/travis_setup_env.sh
|
||||
- export PYTHONPATH=${PYTHONPATH}:${PWD}/python-package
|
||||
- source tests/travis/travis_setup_env.sh
|
||||
- if [ "${TASK}" != "python_sdist_test" ]; then export PYTHONPATH=${PYTHONPATH}:${PWD}/python-package; fi
|
||||
- echo "MAVEN_OPTS='-Xmx2g -XX:MaxPermSize=1024m -XX:ReservedCodeCacheSize=512m -Dorg.slf4j.simpleLogger.defaultLogLevel=error'" > ~/.mavenrc
|
||||
|
||||
install:
|
||||
@@ -89,7 +64,7 @@ cache:
|
||||
- ${HOME}/.cache/pip
|
||||
|
||||
before_cache:
|
||||
- dmlc-core/scripts/travis/travis_before_cache.sh
|
||||
- tests/travis/travis_before_cache.sh
|
||||
|
||||
after_failure:
|
||||
- tests/travis/travis_after_failure.sh
|
||||
|
||||
530
CMakeLists.txt
530
CMakeLists.txt
@@ -1,344 +1,276 @@
|
||||
cmake_minimum_required (VERSION 3.2)
|
||||
project(xgboost)
|
||||
cmake_minimum_required(VERSION 3.13)
|
||||
project(xgboost LANGUAGES CXX C VERSION 1.1.0)
|
||||
include(cmake/Utils.cmake)
|
||||
list(APPEND CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake/modules")
|
||||
find_package(OpenMP)
|
||||
list(APPEND CMAKE_MODULE_PATH "${xgboost_SOURCE_DIR}/cmake/modules")
|
||||
cmake_policy(SET CMP0022 NEW)
|
||||
cmake_policy(SET CMP0079 NEW)
|
||||
cmake_policy(SET CMP0063 NEW)
|
||||
|
||||
if ((${CMAKE_VERSION} VERSION_GREATER 3.13) OR (${CMAKE_VERSION} VERSION_EQUAL 3.13))
|
||||
cmake_policy(SET CMP0077 NEW)
|
||||
endif ((${CMAKE_VERSION} VERSION_GREATER 3.13) OR (${CMAKE_VERSION} VERSION_EQUAL 3.13))
|
||||
|
||||
message(STATUS "CMake version ${CMAKE_VERSION}")
|
||||
|
||||
if (CMAKE_COMPILER_IS_GNUCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.0)
|
||||
message(FATAL_ERROR "GCC version must be at least 5.0!")
|
||||
endif()
|
||||
|
||||
include(${xgboost_SOURCE_DIR}/cmake/FindPrefetchIntrinsics.cmake)
|
||||
find_prefetch_intrinsics()
|
||||
include(${xgboost_SOURCE_DIR}/cmake/Version.cmake)
|
||||
write_version()
|
||||
set_default_configuration_release()
|
||||
msvc_use_static_runtime()
|
||||
|
||||
# Options
|
||||
## GPUs
|
||||
option(USE_CUDA "Build with GPU acceleration" OFF)
|
||||
option(USE_NCCL "Build with multiple GPUs support" OFF)
|
||||
set(GPU_COMPUTE_VER "" CACHE STRING
|
||||
"Space separated list of compute versions to be built against, e.g. '35 61'")
|
||||
|
||||
#-- Options
|
||||
option(BUILD_C_DOC "Build documentation for C APIs using Doxygen." OFF)
|
||||
option(USE_OPENMP "Build with OpenMP support." ON)
|
||||
option(BUILD_STATIC_LIB "Build static library" OFF)
|
||||
## Bindings
|
||||
option(JVM_BINDINGS "Build JVM bindings" OFF)
|
||||
option(R_LIB "Build shared library for R package" OFF)
|
||||
|
||||
## Devs
|
||||
## Dev
|
||||
option(USE_DEBUG_OUTPUT "Dump internal training results like gradients and predictions to stdout.
|
||||
Should only be used for debugging." OFF)
|
||||
option(GOOGLE_TEST "Build google tests" OFF)
|
||||
option(USE_DMLC_GTEST "Use google tests bundled with dmlc-core submodule" OFF)
|
||||
option(USE_NVTX "Build with cuda profiling annotations. Developers only." OFF)
|
||||
set(NVTX_HEADER_DIR "" CACHE PATH "Path to the stand-alone nvtx header")
|
||||
option(RABIT_MOCK "Build rabit with mock" OFF)
|
||||
option(HIDE_CXX_SYMBOLS "Build shared library and hide all C++ symbols" OFF)
|
||||
## CUDA
|
||||
option(USE_CUDA "Build with GPU acceleration" OFF)
|
||||
option(USE_NCCL "Build with NCCL to enable distributed GPU support." OFF)
|
||||
option(BUILD_WITH_SHARED_NCCL "Build with shared NCCL library." OFF)
|
||||
set(GPU_COMPUTE_VER "" CACHE STRING
|
||||
"Semicolon separated list of compute versions to be built against, e.g. '35;61'")
|
||||
## Copied From dmlc
|
||||
option(USE_HDFS "Build with HDFS support" OFF)
|
||||
option(USE_AZURE "Build with AZURE support" OFF)
|
||||
option(USE_S3 "Build with S3 support" OFF)
|
||||
## Sanitizers
|
||||
option(USE_SANITIZER "Use santizer flags" OFF)
|
||||
option(SANITIZER_PATH "Path to sanitizes.")
|
||||
set(ENABLED_SANITIZERS "address" "leak" CACHE STRING
|
||||
"Semicolon separated list of sanitizer names. E.g 'address;leak'. Supported sanitizers are
|
||||
address, leak and thread.")
|
||||
option(GOOGLE_TEST "Build google tests" OFF)
|
||||
|
||||
# Plugins
|
||||
address, leak, undefined and thread.")
|
||||
## Plugins
|
||||
option(PLUGIN_LZ4 "Build lz4 plugin" OFF)
|
||||
option(PLUGIN_DENSE_PARSER "Build dense parser plugin" OFF)
|
||||
|
||||
# Deprecation warning
|
||||
if(USE_AVX)
|
||||
message(WARNING "The option 'USE_AVX' is deprecated as experimental AVX features have been removed from xgboost.")
|
||||
endif()
|
||||
#-- Checks for building XGBoost
|
||||
if (USE_DEBUG_OUTPUT AND (NOT (CMAKE_BUILD_TYPE MATCHES Debug)))
|
||||
message(SEND_ERROR "Do not enable `USE_DEBUG_OUTPUT' with release build.")
|
||||
endif (USE_DEBUG_OUTPUT AND (NOT (CMAKE_BUILD_TYPE MATCHES Debug)))
|
||||
if (USE_NCCL AND NOT (USE_CUDA))
|
||||
message(SEND_ERROR "`USE_NCCL` must be enabled with `USE_CUDA` flag.")
|
||||
endif (USE_NCCL AND NOT (USE_CUDA))
|
||||
if (BUILD_WITH_SHARED_NCCL AND (NOT USE_NCCL))
|
||||
message(SEND_ERROR "Build XGBoost with -DUSE_NCCL=ON to enable BUILD_WITH_SHARED_NCCL.")
|
||||
endif (BUILD_WITH_SHARED_NCCL AND (NOT USE_NCCL))
|
||||
if (JVM_BINDINGS AND R_LIB)
|
||||
message(SEND_ERROR "`R_LIB' is not compatible with `JVM_BINDINGS' as they both have customized configurations.")
|
||||
endif (JVM_BINDINGS AND R_LIB)
|
||||
if (R_LIB AND GOOGLE_TEST)
|
||||
message(WARNING "Some C++ unittests will fail with `R_LIB` enabled,
|
||||
as R package redirects some functions to R runtime implementation.")
|
||||
endif (R_LIB AND GOOGLE_TEST)
|
||||
if (USE_AVX)
|
||||
message(SEND_ERROR "The option 'USE_AVX' is deprecated as experimental AVX features have been removed from XGBoost.")
|
||||
endif (USE_AVX)
|
||||
|
||||
# Compiler flags
|
||||
set(CMAKE_CXX_STANDARD 11)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
if(OpenMP_CXX_FOUND OR OPENMP_FOUND)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
|
||||
endif()
|
||||
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
|
||||
if(MSVC)
|
||||
# Multithreaded compilation
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP")
|
||||
else()
|
||||
# Correct error for GCC 5 and cuda
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_MWAITXINTRIN_H_INCLUDED -D_FORCE_INLINES")
|
||||
# Performance
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -funroll-loops")
|
||||
endif()
|
||||
if(WIN32 AND MINGW)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -static-libstdc++")
|
||||
endif()
|
||||
|
||||
# Check existence of software pre-fetching
|
||||
include(CheckCXXSourceCompiles)
|
||||
check_cxx_source_compiles("
|
||||
#include <xmmintrin.h>
|
||||
int main() {
|
||||
char data = 0;
|
||||
const char* address = &data;
|
||||
_mm_prefetch(address, _MM_HINT_NTA);
|
||||
return 0;
|
||||
}
|
||||
" XGBOOST_MM_PREFETCH_PRESENT)
|
||||
check_cxx_source_compiles("
|
||||
int main() {
|
||||
char data = 0;
|
||||
const char* address = &data;
|
||||
__builtin_prefetch(address, 0, 0);
|
||||
return 0;
|
||||
}
|
||||
" XGBOOST_BUILTIN_PREFETCH_PRESENT)
|
||||
|
||||
# Sanitizer
|
||||
if(USE_SANITIZER)
|
||||
#-- Sanitizer
|
||||
if (USE_SANITIZER)
|
||||
include(cmake/Sanitizer.cmake)
|
||||
enable_sanitizers("${ENABLED_SANITIZERS}")
|
||||
endif(USE_SANITIZER)
|
||||
endif (USE_SANITIZER)
|
||||
|
||||
if (USE_CUDA)
|
||||
SET(USE_OPENMP ON CACHE BOOL "CUDA requires OpenMP" FORCE)
|
||||
# `export CXX=' is ignored by CMake CUDA.
|
||||
set(CMAKE_CUDA_HOST_COMPILER ${CMAKE_CXX_COMPILER})
|
||||
message(STATUS "Configured CUDA host compiler: ${CMAKE_CUDA_HOST_COMPILER}")
|
||||
|
||||
enable_language(CUDA)
|
||||
set(GEN_CODE "")
|
||||
format_gencode_flags("${GPU_COMPUTE_VER}" GEN_CODE)
|
||||
message(STATUS "CUDA GEN_CODE: ${GEN_CODE}")
|
||||
endif (USE_CUDA)
|
||||
|
||||
find_package(Threads REQUIRED)
|
||||
|
||||
if (USE_OPENMP)
|
||||
if (APPLE)
|
||||
# Require CMake 3.16+ on Mac OSX, as previous versions of CMake had trouble locating
|
||||
# OpenMP on Mac. See https://github.com/dmlc/xgboost/pull/5146#issuecomment-568312706
|
||||
cmake_minimum_required(VERSION 3.16)
|
||||
endif (APPLE)
|
||||
find_package(OpenMP REQUIRED)
|
||||
endif (USE_OPENMP)
|
||||
|
||||
# dmlc-core
|
||||
add_subdirectory(dmlc-core)
|
||||
set(LINK_LIBRARIES dmlc rabit)
|
||||
|
||||
# enable custom logging
|
||||
add_definitions(-DDMLC_LOG_CUSTOMIZE=1)
|
||||
|
||||
# compiled code customizations for R package
|
||||
if(R_LIB)
|
||||
add_definitions(
|
||||
-DXGBOOST_STRICT_R_MODE=1
|
||||
-DXGBOOST_CUSTOMIZE_GLOBAL_PRNG=1
|
||||
-DDMLC_LOG_BEFORE_THROW=0
|
||||
-DDMLC_DISABLE_STDIN=1
|
||||
-DDMLC_LOG_CUSTOMIZE=1
|
||||
-DRABIT_CUSTOMIZE_MSG_
|
||||
-DRABIT_STRICT_CXX98_
|
||||
)
|
||||
endif()
|
||||
|
||||
# Gather source files
|
||||
include_directories (
|
||||
${PROJECT_SOURCE_DIR}/include
|
||||
${PROJECT_SOURCE_DIR}/dmlc-core/include
|
||||
${PROJECT_SOURCE_DIR}/rabit/include
|
||||
)
|
||||
|
||||
# Generate configurable header
|
||||
set(CMAKE_LOCAL "${PROJECT_SOURCE_DIR}/cmake")
|
||||
set(INCLUDE_ROOT "${PROJECT_SOURCE_DIR}/include")
|
||||
message(STATUS "${CMAKE_LOCAL}/build_config.h.in -> ${INCLUDE_ROOT}/xgboost/build_config.h")
|
||||
configure_file("${CMAKE_LOCAL}/build_config.h.in" "${INCLUDE_ROOT}/xgboost/build_config.h")
|
||||
|
||||
file(GLOB_RECURSE SOURCES
|
||||
src/*.cc
|
||||
src/*.h
|
||||
include/*.h
|
||||
)
|
||||
|
||||
# Only add main function for executable target
|
||||
list(REMOVE_ITEM SOURCES ${PROJECT_SOURCE_DIR}/src/cli_main.cc)
|
||||
|
||||
file(GLOB_RECURSE CUDA_SOURCES
|
||||
src/*.cu
|
||||
src/*.cuh
|
||||
)
|
||||
|
||||
# Add plugins to source files
|
||||
if(PLUGIN_LZ4)
|
||||
list(APPEND SOURCES plugin/lz4/sparse_page_lz4_format.cc)
|
||||
link_libraries(lz4)
|
||||
endif()
|
||||
if(PLUGIN_DENSE_PARSER)
|
||||
list(APPEND SOURCES plugin/dense_parser/dense_libsvm.cc)
|
||||
endif()
|
||||
msvc_use_static_runtime()
|
||||
add_subdirectory(${xgboost_SOURCE_DIR}/dmlc-core)
|
||||
set_target_properties(dmlc PROPERTIES
|
||||
CXX_STANDARD 11
|
||||
CXX_STANDARD_REQUIRED ON
|
||||
POSITION_INDEPENDENT_CODE ON)
|
||||
list(APPEND LINKED_LIBRARIES_PRIVATE dmlc)
|
||||
|
||||
# rabit
|
||||
# TODO: Use CMakeLists.txt from rabit.
|
||||
set(RABIT_SOURCES
|
||||
rabit/src/allreduce_base.cc
|
||||
rabit/src/allreduce_robust.cc
|
||||
rabit/src/engine.cc
|
||||
rabit/src/c_api.cc
|
||||
)
|
||||
set(RABIT_EMPTY_SOURCES
|
||||
rabit/src/engine_empty.cc
|
||||
rabit/src/c_api.cc
|
||||
)
|
||||
set(RABIT_BUILD_DMLC OFF)
|
||||
set(DMLC_ROOT ${xgboost_SOURCE_DIR}/dmlc-core)
|
||||
set(RABIT_WITH_R_LIB ${R_LIB})
|
||||
add_subdirectory(rabit)
|
||||
|
||||
if(MINGW OR R_LIB)
|
||||
# build a dummy rabit library
|
||||
add_library(rabit STATIC ${RABIT_EMPTY_SOURCES})
|
||||
if (RABIT_MOCK)
|
||||
list(APPEND LINKED_LIBRARIES_PRIVATE rabit_mock_static)
|
||||
else()
|
||||
add_library(rabit STATIC ${RABIT_SOURCES})
|
||||
endif()
|
||||
list(APPEND LINKED_LIBRARIES_PRIVATE rabit)
|
||||
endif(RABIT_MOCK)
|
||||
foreach(lib rabit rabit_base rabit_empty rabit_mock rabit_mock_static)
|
||||
# Explicitly link dmlc to rabit, so that configured header (build_config.h)
|
||||
# from dmlc is correctly applied to rabit.
|
||||
if (TARGET ${lib})
|
||||
target_link_libraries(${lib} dmlc ${CMAKE_THREAD_LIBS_INIT})
|
||||
if (HIDE_CXX_SYMBOLS) # Hide all C++ symbols from Rabit
|
||||
set_target_properties(${lib} PROPERTIES CXX_VISIBILITY_PRESET hidden)
|
||||
endif (HIDE_CXX_SYMBOLS)
|
||||
endif (TARGET ${lib})
|
||||
endforeach()
|
||||
|
||||
if (GENERATE_COMPILATION_DATABASE)
|
||||
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
||||
endif (GENERATE_COMPILATION_DATABASE)
|
||||
# Exports some R specific definitions and objects
|
||||
if (R_LIB)
|
||||
add_subdirectory(${xgboost_SOURCE_DIR}/R-package)
|
||||
endif (R_LIB)
|
||||
|
||||
if(USE_CUDA AND (NOT GENERATE_COMPILATION_DATABASE))
|
||||
find_package(CUDA 8.0 REQUIRED)
|
||||
cmake_minimum_required(VERSION 3.5)
|
||||
# core xgboost
|
||||
list(APPEND LINKED_LIBRARIES_PRIVATE Threads::Threads ${CMAKE_THREAD_LIBS_INIT})
|
||||
add_subdirectory(${xgboost_SOURCE_DIR}/plugin)
|
||||
add_subdirectory(${xgboost_SOURCE_DIR}/src)
|
||||
target_link_libraries(objxgboost PUBLIC dmlc)
|
||||
set(XGBOOST_OBJ_SOURCES "${XGBOOST_OBJ_SOURCES};$<TARGET_OBJECTS:objxgboost>")
|
||||
|
||||
add_definitions(-DXGBOOST_USE_CUDA)
|
||||
#-- library
|
||||
if (BUILD_STATIC_LIB)
|
||||
add_library(xgboost STATIC ${XGBOOST_OBJ_SOURCES})
|
||||
else (BUILD_STATIC_LIB)
|
||||
add_library(xgboost SHARED ${XGBOOST_OBJ_SOURCES})
|
||||
endif (BUILD_STATIC_LIB)
|
||||
|
||||
include_directories(cub)
|
||||
#-- Hide all C++ symbols
|
||||
if (HIDE_CXX_SYMBOLS)
|
||||
set_target_properties(objxgboost PROPERTIES CXX_VISIBILITY_PRESET hidden)
|
||||
set_target_properties(xgboost PROPERTIES CXX_VISIBILITY_PRESET hidden)
|
||||
endif (HIDE_CXX_SYMBOLS)
|
||||
|
||||
if(USE_NCCL)
|
||||
find_package(Nccl REQUIRED)
|
||||
cuda_include_directories(${NCCL_INCLUDE_DIR})
|
||||
add_definitions(-DXGBOOST_USE_NCCL)
|
||||
endif()
|
||||
target_include_directories(xgboost
|
||||
INTERFACE
|
||||
$<INSTALL_INTERFACE:${CMAKE_INSTALL_PREFIX}/include>
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_LIST_DIR}/include>)
|
||||
target_link_libraries(xgboost PRIVATE ${LINKED_LIBRARIES_PRIVATE})
|
||||
|
||||
set(GENCODE_FLAGS "")
|
||||
format_gencode_flags("${GPU_COMPUTE_VER}" GENCODE_FLAGS)
|
||||
message("cuda architecture flags: ${GENCODE_FLAGS}")
|
||||
# This creates its own shared library `xgboost4j'.
|
||||
if (JVM_BINDINGS)
|
||||
add_subdirectory(${xgboost_SOURCE_DIR}/jvm-packages)
|
||||
endif (JVM_BINDINGS)
|
||||
#-- End shared library
|
||||
|
||||
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS};--expt-extended-lambda;--expt-relaxed-constexpr;${GENCODE_FLAGS};-lineinfo;")
|
||||
if(NOT MSVC)
|
||||
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS};-Xcompiler -fPIC; -Xcompiler -Werror; -std=c++11")
|
||||
endif()
|
||||
#-- CLI for xgboost
|
||||
add_executable(runxgboost ${xgboost_SOURCE_DIR}/src/cli_main.cc ${XGBOOST_OBJ_SOURCES})
|
||||
|
||||
cuda_add_library(gpuxgboost ${CUDA_SOURCES} STATIC)
|
||||
target_include_directories(runxgboost
|
||||
PRIVATE
|
||||
${xgboost_SOURCE_DIR}/include
|
||||
${xgboost_SOURCE_DIR}/dmlc-core/include
|
||||
${xgboost_SOURCE_DIR}/rabit/include)
|
||||
target_link_libraries(runxgboost PRIVATE ${LINKED_LIBRARIES_PRIVATE})
|
||||
set_target_properties(
|
||||
runxgboost PROPERTIES
|
||||
OUTPUT_NAME xgboost
|
||||
CXX_STANDARD 11
|
||||
CXX_STANDARD_REQUIRED ON)
|
||||
#-- End CLI for xgboost
|
||||
|
||||
if(USE_NCCL)
|
||||
link_directories(${NCCL_LIBRARY})
|
||||
target_link_libraries(gpuxgboost ${NCCL_LIB_NAME})
|
||||
endif()
|
||||
list(APPEND LINK_LIBRARIES gpuxgboost)
|
||||
set_output_directory(runxgboost ${xgboost_SOURCE_DIR})
|
||||
set_output_directory(xgboost ${xgboost_SOURCE_DIR}/lib)
|
||||
# Ensure these two targets do not build simultaneously, as they produce outputs with conflicting names
|
||||
add_dependencies(xgboost runxgboost)
|
||||
|
||||
elseif (USE_CUDA AND GENERATE_COMPILATION_DATABASE)
|
||||
# Enable CUDA language to generate a compilation database.
|
||||
cmake_minimum_required(VERSION 3.8)
|
||||
|
||||
find_package(CUDA 8.0 REQUIRED)
|
||||
enable_language(CUDA)
|
||||
set(CMAKE_CUDA_COMPILER clang++)
|
||||
set(CUDA_SEPARABLE_COMPILATION ON)
|
||||
if (NOT CLANG_CUDA_GENCODE)
|
||||
set(CLANG_CUDA_GENCODE "--cuda-gpu-arch=sm_35")
|
||||
endif (NOT CLANG_CUDA_GENCODE)
|
||||
set(CMAKE_CUDA_FLAGS " -Wno-deprecated ${CLANG_CUDA_GENCODE} -fPIC ${GENCODE} -std=c++11 -x cuda")
|
||||
message(STATUS "CMAKE_CUDA_FLAGS: ${CMAKE_CUDA_FLAGS}")
|
||||
|
||||
add_library(gpuxgboost STATIC ${CUDA_SOURCES})
|
||||
|
||||
if(USE_NCCL)
|
||||
find_package(Nccl REQUIRED)
|
||||
target_include_directories(gpuxgboost PUBLIC ${NCCL_INCLUDE_DIR})
|
||||
target_compile_definitions(gpuxgboost PUBLIC -DXGBOOST_USE_NCCL)
|
||||
target_link_libraries(gpuxgboost PUBLIC ${NCCL_LIB_NAME})
|
||||
endif()
|
||||
|
||||
target_compile_definitions(gpuxgboost PUBLIC -DXGBOOST_USE_CUDA)
|
||||
# A hack for CMake to make arguments valid for clang++
|
||||
string(REPLACE "-x cu" "-x cuda" CMAKE_CUDA_COMPILE_PTX_COMPILATION
|
||||
${CMAKE_CUDA_COMPILE_PTX_COMPILATION})
|
||||
string(REPLACE "-x cu" "-x cuda" CMAKE_CUDA_COMPILE_WHOLE_COMPILATION
|
||||
${CMAKE_CUDA_COMPILE_WHOLE_COMPILATION})
|
||||
string(REPLACE "-x cu" "-x cuda" CMAKE_CUDA_COMPILE_SEPARABLE_COMPILATION
|
||||
${CMAKE_CUDA_COMPILE_SEPARABLE_COMPILATION})
|
||||
target_include_directories(gpuxgboost PUBLIC cub)
|
||||
endif()
|
||||
|
||||
|
||||
# flags and sources for R-package
|
||||
if(R_LIB)
|
||||
file(GLOB_RECURSE R_SOURCES
|
||||
R-package/src/*.h
|
||||
R-package/src/*.c
|
||||
R-package/src/*.cc
|
||||
)
|
||||
list(APPEND SOURCES ${R_SOURCES})
|
||||
endif()
|
||||
|
||||
add_library(objxgboost OBJECT ${SOURCES})
|
||||
|
||||
# building shared library for R package
|
||||
if(R_LIB)
|
||||
find_package(LibR REQUIRED)
|
||||
|
||||
list(APPEND LINK_LIBRARIES "${LIBR_CORE_LIBRARY}")
|
||||
MESSAGE(STATUS "LIBR_CORE_LIBRARY " ${LIBR_CORE_LIBRARY})
|
||||
|
||||
# Shared library target for the R package
|
||||
add_library(xgboost SHARED $<TARGET_OBJECTS:objxgboost>)
|
||||
include_directories(xgboost
|
||||
"${LIBR_INCLUDE_DIRS}"
|
||||
"${PROJECT_SOURCE_DIR}"
|
||||
)
|
||||
|
||||
target_link_libraries(xgboost ${LINK_LIBRARIES})
|
||||
# R uses no lib prefix in shared library names of its packages
|
||||
#-- Installing XGBoost
|
||||
if (R_LIB)
|
||||
set_target_properties(xgboost PROPERTIES PREFIX "")
|
||||
if(APPLE)
|
||||
if (APPLE)
|
||||
set_target_properties(xgboost PROPERTIES SUFFIX ".so")
|
||||
endif()
|
||||
|
||||
endif (APPLE)
|
||||
setup_rpackage_install_target(xgboost ${CMAKE_CURRENT_BINARY_DIR})
|
||||
# use a dummy location for any other remaining installs
|
||||
set(CMAKE_INSTALL_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/dummy_inst")
|
||||
endif (R_LIB)
|
||||
if (MINGW)
|
||||
set_target_properties(xgboost PROPERTIES PREFIX "")
|
||||
endif (MINGW)
|
||||
|
||||
# main targets: shared library & exe
|
||||
else()
|
||||
# Executable
|
||||
add_executable(runxgboost $<TARGET_OBJECTS:objxgboost> src/cli_main.cc)
|
||||
set_target_properties(runxgboost PROPERTIES
|
||||
OUTPUT_NAME xgboost
|
||||
)
|
||||
set_output_directory(runxgboost ${PROJECT_SOURCE_DIR})
|
||||
target_link_libraries(runxgboost ${LINK_LIBRARIES})
|
||||
if (BUILD_C_DOC)
|
||||
include(cmake/Doc.cmake)
|
||||
run_doxygen()
|
||||
endif (BUILD_C_DOC)
|
||||
|
||||
# Shared library
|
||||
add_library(xgboost SHARED $<TARGET_OBJECTS:objxgboost>)
|
||||
target_link_libraries(xgboost ${LINK_LIBRARIES})
|
||||
set_output_directory(xgboost ${PROJECT_SOURCE_DIR}/lib)
|
||||
if(MINGW)
|
||||
# remove the 'lib' prefix to conform to windows convention for shared library names
|
||||
set_target_properties(xgboost PROPERTIES PREFIX "")
|
||||
endif()
|
||||
include(GNUInstallDirs)
|
||||
# Install all headers. Please note that currently the C++ headers does not form an "API".
|
||||
install(DIRECTORY ${xgboost_SOURCE_DIR}/include/xgboost
|
||||
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
|
||||
|
||||
#Ensure these two targets do not build simultaneously, as they produce outputs with conflicting names
|
||||
add_dependencies(xgboost runxgboost)
|
||||
endif()
|
||||
install(TARGETS xgboost runxgboost
|
||||
EXPORT XGBoostTargets
|
||||
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
|
||||
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
|
||||
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
|
||||
INCLUDES DESTINATION ${LIBLEGACY_INCLUDE_DIRS})
|
||||
install(EXPORT XGBoostTargets
|
||||
FILE XGBoostTargets.cmake
|
||||
NAMESPACE xgboost::
|
||||
DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/xgboost)
|
||||
|
||||
# JVM
|
||||
if(JVM_BINDINGS)
|
||||
find_package(JNI QUIET REQUIRED)
|
||||
include(CMakePackageConfigHelpers)
|
||||
configure_package_config_file(
|
||||
${CMAKE_CURRENT_LIST_DIR}/cmake/xgboost-config.cmake.in
|
||||
${CMAKE_CURRENT_BINARY_DIR}/cmake/xgboost-config.cmake
|
||||
INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/xgboost)
|
||||
write_basic_package_version_file(
|
||||
${CMAKE_BINARY_DIR}/cmake/xgboost-config-version.cmake
|
||||
VERSION ${XGBOOST_VERSION}
|
||||
COMPATIBILITY AnyNewerVersion)
|
||||
install(
|
||||
FILES
|
||||
${CMAKE_BINARY_DIR}/cmake/xgboost-config.cmake
|
||||
${CMAKE_BINARY_DIR}/cmake/xgboost-config-version.cmake
|
||||
DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/xgboost)
|
||||
|
||||
add_library(xgboost4j SHARED
|
||||
$<TARGET_OBJECTS:objxgboost>
|
||||
jvm-packages/xgboost4j/src/native/xgboost4j.cpp)
|
||||
target_include_directories(xgboost4j
|
||||
PRIVATE ${JNI_INCLUDE_DIRS}
|
||||
PRIVATE jvm-packages/xgboost4j/src/native)
|
||||
target_link_libraries(xgboost4j
|
||||
${LINK_LIBRARIES}
|
||||
${JAVA_JVM_LIBRARY})
|
||||
set_output_directory(xgboost4j ${PROJECT_SOURCE_DIR}/lib)
|
||||
endif()
|
||||
|
||||
|
||||
# Test
|
||||
if(GOOGLE_TEST)
|
||||
#-- Test
|
||||
if (GOOGLE_TEST)
|
||||
enable_testing()
|
||||
find_package(GTest REQUIRED)
|
||||
# Unittests.
|
||||
add_subdirectory(${xgboost_SOURCE_DIR}/tests/cpp)
|
||||
add_test(
|
||||
NAME TestXGBoostLib
|
||||
COMMAND testxgboost
|
||||
WORKING_DIRECTORY ${xgboost_BINARY_DIR})
|
||||
|
||||
file(GLOB_RECURSE TEST_SOURCES "tests/cpp/*.cc")
|
||||
auto_source_group("${TEST_SOURCES}")
|
||||
# CLI tests
|
||||
configure_file(
|
||||
${xgboost_SOURCE_DIR}/tests/cli/machine.conf.in
|
||||
${xgboost_BINARY_DIR}/tests/cli/machine.conf
|
||||
@ONLY)
|
||||
add_test(
|
||||
NAME TestXGBoostCLI
|
||||
COMMAND runxgboost ${xgboost_BINARY_DIR}/tests/cli/machine.conf
|
||||
WORKING_DIRECTORY ${xgboost_BINARY_DIR})
|
||||
set_tests_properties(TestXGBoostCLI
|
||||
PROPERTIES
|
||||
PASS_REGULAR_EXPRESSION ".*test-rmse:0.087.*")
|
||||
endif (GOOGLE_TEST)
|
||||
|
||||
if(USE_CUDA AND (NOT GENERATE_COMPILATION_DATABASE))
|
||||
file(GLOB_RECURSE CUDA_TEST_SOURCES "tests/cpp/*.cu")
|
||||
cuda_include_directories(${GTEST_INCLUDE_DIRS})
|
||||
cuda_compile(CUDA_TEST_OBJS ${CUDA_TEST_SOURCES})
|
||||
elseif (USE_CUDA AND GENERATE_COMPILATION_DATABASE)
|
||||
file(GLOB_RECURSE CUDA_TEST_SOURCES "tests/cpp/*.cu")
|
||||
else()
|
||||
set(CUDA_TEST_OBJS "")
|
||||
endif()
|
||||
|
||||
if (USE_CUDA AND GENERATE_COMPILATION_DATABASE)
|
||||
add_executable(testxgboost ${TEST_SOURCES} ${CUDA_TEST_SOURCES}
|
||||
$<TARGET_OBJECTS:objxgboost>)
|
||||
target_include_directories(testxgboost PRIVATE cub)
|
||||
else ()
|
||||
add_executable(testxgboost ${TEST_SOURCES} ${CUDA_TEST_OBJS}
|
||||
$<TARGET_OBJECTS:objxgboost>)
|
||||
endif ()
|
||||
|
||||
set_output_directory(testxgboost ${PROJECT_SOURCE_DIR})
|
||||
target_include_directories(testxgboost
|
||||
PRIVATE ${GTEST_INCLUDE_DIRS})
|
||||
target_link_libraries(testxgboost ${GTEST_LIBRARIES} ${LINK_LIBRARIES})
|
||||
|
||||
add_test(TestXGBoost testxgboost)
|
||||
endif()
|
||||
|
||||
|
||||
# Group sources
|
||||
auto_source_group("${SOURCES}")
|
||||
# For MSVC: Call msvc_use_static_runtime() once again to completely
|
||||
# replace /MD with /MT. See https://github.com/dmlc/xgboost/issues/4462
|
||||
# for issues caused by mixing of /MD and /MT flags
|
||||
msvc_use_static_runtime()
|
||||
|
||||
@@ -2,34 +2,42 @@ Contributors of DMLC/XGBoost
|
||||
============================
|
||||
XGBoost has been developed and used by a group of active community. Everyone is more than welcomed to is a great way to make the project better and more accessible to more users.
|
||||
|
||||
Committers
|
||||
Project Management Committee(PMC)
|
||||
----------
|
||||
Committers are people who have made substantial contribution to the project and granted write access to the project.
|
||||
The Project Management Committee(PMC) consists group of active committers that moderate the discussion, manage the project release, and proposes new committer/PMC members.
|
||||
|
||||
* [Tianqi Chen](https://github.com/tqchen), University of Washington
|
||||
- Tianqi is a Ph.D. student working on large-scale machine learning. He is the creator of the project.
|
||||
* [Tong He](https://github.com/hetong007), Amazon AI
|
||||
- Tong is an applied scientist in Amazon AI. He is the maintainer of XGBoost R package.
|
||||
* [Vadim Khotilovich](https://github.com/khotilov)
|
||||
- Vadim contributes many improvements in R and core packages.
|
||||
* [Bing Xu](https://github.com/antinucleon)
|
||||
- Bing is the original creator of XGBoost Python package and currently the maintainer of [XGBoost.jl](https://github.com/antinucleon/XGBoost.jl).
|
||||
* [Michael Benesty](https://github.com/pommedeterresautee)
|
||||
- Michael is a lawyer and data scientist in France. He is the creator of XGBoost interactive analysis module in R.
|
||||
* [Yuan Tang](https://github.com/terrytangyuan), Ant Financial
|
||||
- Yuan is a software engineer in Ant Financial. He contributed mostly in R and Python packages.
|
||||
* [Nan Zhu](https://github.com/CodingCat), Uber
|
||||
- Nan is a software engineer in Uber. He contributed mostly in JVM packages.
|
||||
* [Sergei Lebedev](https://github.com/superbobry), Criteo
|
||||
- Sergei is a software engineer in Criteo. He contributed mostly in JVM packages.
|
||||
* [Hongliang Liu](https://github.com/phunterlau)
|
||||
* [Scott Lundberg](http://scottlundberg.com/), University of Washington
|
||||
- Scott is a Ph.D. student at University of Washington. He is the creator of SHAP, a unified approach to explain the output of machine learning models such as decision tree ensembles. He also helps maintain the XGBoost Julia package.
|
||||
* [Jiaming Yuan](https://github.com/trivialfis)
|
||||
- Jiaming contributed to the GPU algorithms. He has also introduced new abstractions to improve the quality of the C++ codebase.
|
||||
* [Hyunsu Cho](http://hyunsu-cho.io/), NVIDIA
|
||||
- Hyunsu is the maintainer of the XGBoost Python package. He also manages the Jenkins continuous integration system (https://xgboost-ci.net/). He is the initial author of the CPU 'hist' updater.
|
||||
* [Rory Mitchell](https://github.com/RAMitchell), University of Waikato
|
||||
- Rory is a Ph.D. student at University of Waikato. He is the original creator of the GPU training algorithms. He improved the CMake build system and continuous integration.
|
||||
* [Hyunsu Cho](http://hyunsu-cho.io/), Amazon AI
|
||||
- Hyunsu is an applied scientist in Amazon AI. He is the maintainer of the XGBoost Python package. He also manages the Jenkins continuous integration system (https://xgboost-ci.net/). He is the initial author of the CPU 'hist' updater.
|
||||
* [Jiaming](https://github.com/trivialfis)
|
||||
- Jiaming contributed to the GPU algorithms. He has also introduced new abstractions to improve the quality of the C++ codebase.
|
||||
* [Hongliang Liu](https://github.com/phunterlau)
|
||||
|
||||
|
||||
Committers
|
||||
----------
|
||||
Committers are people who have made substantial contribution to the project and granted write access to the project.
|
||||
|
||||
* [Tong He](https://github.com/hetong007), Amazon AI
|
||||
- Tong is an applied scientist in Amazon AI. He is the maintainer of XGBoost R package.
|
||||
* [Vadim Khotilovich](https://github.com/khotilov)
|
||||
- Vadim contributes many improvements in R and core packages.
|
||||
* [Bing Xu](https://github.com/antinucleon)
|
||||
- Bing is the original creator of XGBoost Python package and currently the maintainer of [XGBoost.jl](https://github.com/antinucleon/XGBoost.jl).
|
||||
* [Sergei Lebedev](https://github.com/superbobry), Criteo
|
||||
- Sergei is a software engineer in Criteo. He contributed mostly in JVM packages.
|
||||
* [Scott Lundberg](http://scottlundberg.com/), University of Washington
|
||||
- Scott is a Ph.D. student at University of Washington. He is the creator of SHAP, a unified approach to explain the output of machine learning models such as decision tree ensembles. He also helps maintain the XGBoost Julia package.
|
||||
|
||||
|
||||
Become a Committer
|
||||
------------------
|
||||
@@ -88,3 +96,9 @@ List of Contributors
|
||||
* [Chen Qin](https://github.com/chenqin)
|
||||
* [Sam Wilkinson](https://samwilkinson.io)
|
||||
* [Matthew Jones](https://github.com/mt-jones)
|
||||
* [Jiaxiang Li](https://github.com/JiaxiangBU)
|
||||
* [Bryan Woods](https://github.com/bryan-woods)
|
||||
- Bryan added support for cross-validation for the ranking objective
|
||||
* [Haoda Fu](https://github.com/fuhaoda)
|
||||
* [Evan Kepner](https://github.com/EvanKepner)
|
||||
- Evan Kepner added support for os.PathLike file paths in Python
|
||||
|
||||
536
Jenkinsfile
vendored
536
Jenkinsfile
vendored
@@ -3,125 +3,433 @@
|
||||
// Jenkins pipeline
|
||||
// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
|
||||
|
||||
// Command to run command inside a docker container
|
||||
dockerRun = 'tests/ci_build/ci_build.sh'
|
||||
|
||||
import groovy.transform.Field
|
||||
|
||||
/* Unrestricted tasks: tasks that do NOT generate artifacts */
|
||||
|
||||
// Command to run command inside a docker container
|
||||
def dockerRun = 'tests/ci_build/ci_build.sh'
|
||||
// Utility functions
|
||||
@Field
|
||||
def utils
|
||||
|
||||
def buildMatrix = [
|
||||
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": true, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "9.2", "multiGpu": true],
|
||||
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": true, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "9.2" ],
|
||||
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": true, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "8.0" ],
|
||||
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": false, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "8.0" ],
|
||||
]
|
||||
def commit_id // necessary to pass a variable from one stage to another
|
||||
|
||||
pipeline {
|
||||
// Each stage specify its own agent
|
||||
agent none
|
||||
// Each stage specify its own agent
|
||||
agent none
|
||||
|
||||
// Setup common job properties
|
||||
options {
|
||||
ansiColor('xterm')
|
||||
timestamps()
|
||||
timeout(time: 120, unit: 'MINUTES')
|
||||
buildDiscarder(logRotator(numToKeepStr: '10'))
|
||||
}
|
||||
|
||||
// Build stages
|
||||
stages {
|
||||
stage('Jenkins: Get sources') {
|
||||
agent {
|
||||
label 'unrestricted'
|
||||
}
|
||||
steps {
|
||||
script {
|
||||
utils = load('tests/ci_build/jenkins_tools.Groovy')
|
||||
utils.checkoutSrcs()
|
||||
}
|
||||
stash name: 'srcs', excludes: '.git/'
|
||||
milestone label: 'Sources ready', ordinal: 1
|
||||
}
|
||||
}
|
||||
stage('Jenkins: Build & Test') {
|
||||
steps {
|
||||
script {
|
||||
parallel (buildMatrix.findAll{it['enabled']}.collectEntries{ c ->
|
||||
def buildName = utils.getBuildName(c)
|
||||
utils.buildFactory(buildName, c, false, this.&buildPlatformCmake)
|
||||
} + [ "clang-tidy" : { buildClangTidyJob() } ])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build platform and test it via cmake.
|
||||
*/
|
||||
def buildPlatformCmake(buildName, conf, nodeReq, dockerTarget) {
|
||||
def opts = utils.cmakeOptions(conf)
|
||||
// Destination dir for artifacts
|
||||
def distDir = "dist/${buildName}"
|
||||
def dockerArgs = ""
|
||||
if (conf["withGpu"]) {
|
||||
dockerArgs = "--build-arg CUDA_VERSION=" + conf["cudaVersion"]
|
||||
}
|
||||
def test_suite = conf["withGpu"] ? (conf["multiGpu"] ? "mgpu" : "gpu") : "cpu"
|
||||
// Build node - this is returned result
|
||||
retry(1) {
|
||||
node(nodeReq) {
|
||||
unstash name: 'srcs'
|
||||
echo """
|
||||
|===== XGBoost CMake build =====
|
||||
| dockerTarget: ${dockerTarget}
|
||||
| cmakeOpts : ${opts}
|
||||
|=========================
|
||||
""".stripMargin('|')
|
||||
// Invoke command inside docker
|
||||
sh """
|
||||
${dockerRun} ${dockerTarget} ${dockerArgs} tests/ci_build/build_via_cmake.sh ${opts}
|
||||
${dockerRun} ${dockerTarget} ${dockerArgs} tests/ci_build/test_${test_suite}.sh
|
||||
"""
|
||||
if (!conf["multiGpu"]) {
|
||||
sh """
|
||||
${dockerRun} ${dockerTarget} ${dockerArgs} bash -c "cd python-package; rm -f dist/*; python setup.py bdist_wheel --universal"
|
||||
rm -rf "${distDir}"; mkdir -p "${distDir}/py"
|
||||
cp xgboost "${distDir}"
|
||||
cp -r python-package/dist "${distDir}/py"
|
||||
# Test the wheel for compatibility on a barebones CPU container
|
||||
${dockerRun} release ${dockerArgs} bash -c " \
|
||||
pip install --user python-package/dist/xgboost-*-none-any.whl && \
|
||||
pytest -v --fulltrace -s tests/python"
|
||||
# Test the wheel for compatibility on CUDA 10.0 container
|
||||
${dockerRun} gpu --build-arg CUDA_VERSION=10.0 bash -c " \
|
||||
pip install --user python-package/dist/xgboost-*-none-any.whl && \
|
||||
pytest -v -s --fulltrace -m '(not mgpu) and (not slow)' tests/python-gpu"
|
||||
"""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Run a clang-tidy job on a GPU machine
|
||||
*/
|
||||
def buildClangTidyJob() {
|
||||
def nodeReq = "linux && gpu && unrestricted"
|
||||
node(nodeReq) {
|
||||
unstash name: 'srcs'
|
||||
echo "Running clang-tidy job..."
|
||||
// Invoke command inside docker
|
||||
// Install Google Test and Python yaml
|
||||
dockerTarget = "clang_tidy"
|
||||
dockerArgs = "--build-arg CUDA_VERSION=9.2"
|
||||
sh """
|
||||
${dockerRun} ${dockerTarget} ${dockerArgs} tests/ci_build/clang_tidy.sh
|
||||
"""
|
||||
}
|
||||
environment {
|
||||
DOCKER_CACHE_ECR_ID = '492475357299'
|
||||
DOCKER_CACHE_ECR_REGION = 'us-west-2'
|
||||
}
|
||||
|
||||
// Setup common job properties
|
||||
options {
|
||||
ansiColor('xterm')
|
||||
timestamps()
|
||||
timeout(time: 240, unit: 'MINUTES')
|
||||
buildDiscarder(logRotator(numToKeepStr: '10'))
|
||||
preserveStashes()
|
||||
}
|
||||
|
||||
// Build stages
|
||||
stages {
|
||||
stage('Jenkins Linux: Get sources') {
|
||||
agent { label 'linux && cpu' }
|
||||
steps {
|
||||
script {
|
||||
checkoutSrcs()
|
||||
commit_id = "${GIT_COMMIT}"
|
||||
}
|
||||
stash name: 'srcs'
|
||||
milestone ordinal: 1
|
||||
}
|
||||
}
|
||||
stage('Jenkins Linux: Formatting Check') {
|
||||
agent none
|
||||
steps {
|
||||
script {
|
||||
parallel ([
|
||||
'clang-tidy': { ClangTidy() },
|
||||
'lint': { Lint() },
|
||||
'sphinx-doc': { SphinxDoc() },
|
||||
'doxygen': { Doxygen() }
|
||||
])
|
||||
}
|
||||
milestone ordinal: 2
|
||||
}
|
||||
}
|
||||
stage('Jenkins Linux: Build') {
|
||||
agent none
|
||||
steps {
|
||||
script {
|
||||
parallel ([
|
||||
'build-cpu': { BuildCPU() },
|
||||
'build-cpu-rabit-mock': { BuildCPUMock() },
|
||||
'build-cpu-non-omp': { BuildCPUNonOmp() },
|
||||
'build-gpu-cuda10.0': { BuildCUDA(cuda_version: '10.0') },
|
||||
'build-gpu-cuda10.1': { BuildCUDA(cuda_version: '10.1') },
|
||||
'build-jvm-packages': { BuildJVMPackages(spark_version: '2.4.3') },
|
||||
'build-jvm-doc': { BuildJVMDoc() }
|
||||
])
|
||||
}
|
||||
milestone ordinal: 3
|
||||
}
|
||||
}
|
||||
stage('Jenkins Linux: Test') {
|
||||
agent none
|
||||
steps {
|
||||
script {
|
||||
parallel ([
|
||||
'test-python-cpu': { TestPythonCPU() },
|
||||
'test-python-gpu-cuda9.0': { TestPythonGPU(cuda_version: '9.0') },
|
||||
'test-python-gpu-cuda10.0': { TestPythonGPU(cuda_version: '10.0') },
|
||||
'test-python-gpu-cuda10.1': { TestPythonGPU(cuda_version: '10.1') },
|
||||
'test-python-mgpu-cuda10.1': { TestPythonGPU(cuda_version: '10.1', multi_gpu: true) },
|
||||
'test-cpp-gpu': { TestCppGPU(cuda_version: '10.1') },
|
||||
'test-cpp-mgpu': { TestCppGPU(cuda_version: '10.1', multi_gpu: true) },
|
||||
'test-jvm-jdk8': { CrossTestJVMwithJDK(jdk_version: '8', spark_version: '2.4.3') },
|
||||
'test-jvm-jdk11': { CrossTestJVMwithJDK(jdk_version: '11') },
|
||||
'test-jvm-jdk12': { CrossTestJVMwithJDK(jdk_version: '12') },
|
||||
'test-r-3.5.3': { TestR(use_r35: true) }
|
||||
])
|
||||
}
|
||||
milestone ordinal: 4
|
||||
}
|
||||
}
|
||||
stage('Jenkins Linux: Deploy') {
|
||||
agent none
|
||||
steps {
|
||||
script {
|
||||
parallel ([
|
||||
'deploy-jvm-packages': { DeployJVMPackages(spark_version: '2.4.3') }
|
||||
])
|
||||
}
|
||||
milestone ordinal: 5
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// check out source code from git
|
||||
def checkoutSrcs() {
|
||||
retry(5) {
|
||||
try {
|
||||
timeout(time: 2, unit: 'MINUTES') {
|
||||
checkout scm
|
||||
sh 'git submodule update --init'
|
||||
}
|
||||
} catch (exc) {
|
||||
deleteDir()
|
||||
error "Failed to fetch source codes"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def ClangTidy() {
|
||||
node('linux && cpu') {
|
||||
unstash name: 'srcs'
|
||||
echo "Running clang-tidy job..."
|
||||
def container_type = "clang_tidy"
|
||||
def docker_binary = "docker"
|
||||
def dockerArgs = "--build-arg CUDA_VERSION=10.1"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} ${dockerArgs} python3 tests/ci_build/tidy.py
|
||||
"""
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def Lint() {
|
||||
node('linux && cpu') {
|
||||
unstash name: 'srcs'
|
||||
echo "Running lint..."
|
||||
def container_type = "cpu"
|
||||
def docker_binary = "docker"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} make lint
|
||||
"""
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def SphinxDoc() {
|
||||
node('linux && cpu') {
|
||||
unstash name: 'srcs'
|
||||
echo "Running sphinx-doc..."
|
||||
def container_type = "cpu"
|
||||
def docker_binary = "docker"
|
||||
def docker_extra_params = "CI_DOCKER_EXTRA_PARAMS_INIT='-e SPHINX_GIT_BRANCH=${BRANCH_NAME}'"
|
||||
sh """#!/bin/bash
|
||||
${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} make -C doc html
|
||||
"""
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def Doxygen() {
|
||||
node('linux && cpu') {
|
||||
unstash name: 'srcs'
|
||||
echo "Running doxygen..."
|
||||
def container_type = "cpu"
|
||||
def docker_binary = "docker"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/doxygen.sh ${BRANCH_NAME}
|
||||
"""
|
||||
echo 'Uploading doc...'
|
||||
s3Upload file: "build/${BRANCH_NAME}.tar.bz2", bucket: 'xgboost-docs', acl: 'PublicRead', path: "doxygen/${BRANCH_NAME}.tar.bz2"
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def BuildCPU() {
|
||||
node('linux && cpu') {
|
||||
unstash name: 'srcs'
|
||||
echo "Build CPU"
|
||||
def container_type = "cpu"
|
||||
def docker_binary = "docker"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} rm -fv dmlc-core/include/dmlc/build_config_default.h
|
||||
# This step is not necessary, but here we include it, to ensure that DMLC_CORE_USE_CMAKE flag is correctly propagated
|
||||
# We want to make sure that we use the configured header build/dmlc/build_config.h instead of include/dmlc/build_config_default.h.
|
||||
# See discussion at https://github.com/dmlc/xgboost/issues/5510
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_via_cmake.sh
|
||||
${dockerRun} ${container_type} ${docker_binary} build/testxgboost
|
||||
"""
|
||||
// Sanitizer test
|
||||
def docker_extra_params = "CI_DOCKER_EXTRA_PARAMS_INIT='-e ASAN_SYMBOLIZER_PATH=/usr/bin/llvm-symbolizer -e ASAN_OPTIONS=symbolize=1 -e UBSAN_OPTIONS=print_stacktrace=1:log_path=ubsan_error.log --cap-add SYS_PTRACE'"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_via_cmake.sh -DUSE_SANITIZER=ON -DENABLED_SANITIZERS="address;leak;undefined" \
|
||||
-DCMAKE_BUILD_TYPE=Debug -DSANITIZER_PATH=/usr/lib/x86_64-linux-gnu/
|
||||
${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} build/testxgboost
|
||||
"""
|
||||
|
||||
stash name: 'xgboost_cli', includes: 'xgboost'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def BuildCPUMock() {
|
||||
node('linux && cpu') {
|
||||
unstash name: 'srcs'
|
||||
echo "Build CPU with rabit mock"
|
||||
def container_type = "cpu"
|
||||
def docker_binary = "docker"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_mock_cmake.sh
|
||||
"""
|
||||
echo 'Stashing rabit C++ test executable (xgboost)...'
|
||||
stash name: 'xgboost_rabit_tests', includes: 'xgboost'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def BuildCPUNonOmp() {
|
||||
node('linux && cpu') {
|
||||
unstash name: 'srcs'
|
||||
echo "Build CPU without OpenMP"
|
||||
def container_type = "cpu"
|
||||
def docker_binary = "docker"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_via_cmake.sh -DUSE_OPENMP=OFF
|
||||
"""
|
||||
echo "Running Non-OpenMP C++ test..."
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} build/testxgboost
|
||||
"""
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def BuildCUDA(args) {
|
||||
node('linux && cpu') {
|
||||
unstash name: 'srcs'
|
||||
echo "Build with CUDA ${args.cuda_version}"
|
||||
def container_type = "gpu_build"
|
||||
def docker_binary = "docker"
|
||||
def docker_args = "--build-arg CUDA_VERSION=${args.cuda_version}"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_via_cmake.sh -DUSE_CUDA=ON -DUSE_NCCL=ON -DOPEN_MP:BOOL=ON -DHIDE_CXX_SYMBOLS=ON
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} bash -c "cd python-package && rm -rf dist/* && python setup.py bdist_wheel --universal"
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} python3 tests/ci_build/rename_whl.py python-package/dist/*.whl ${commit_id} manylinux2010_x86_64
|
||||
"""
|
||||
// Stash wheel for CUDA 10.0 target
|
||||
if (args.cuda_version == '10.0') {
|
||||
echo 'Stashing Python wheel...'
|
||||
stash name: 'xgboost_whl_cuda10', includes: 'python-package/dist/*.whl'
|
||||
path = ("${BRANCH_NAME}" == 'master') ? '' : "${BRANCH_NAME}/"
|
||||
s3Upload bucket: 'xgboost-nightly-builds', path: path, acl: 'PublicRead', workingDir: 'python-package/dist', includePathPattern:'**/*.whl'
|
||||
echo 'Stashing C++ test executable (testxgboost)...'
|
||||
stash name: 'xgboost_cpp_tests', includes: 'build/testxgboost'
|
||||
}
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def BuildJVMPackages(args) {
|
||||
node('linux && cpu') {
|
||||
unstash name: 'srcs'
|
||||
echo "Build XGBoost4J-Spark with Spark ${args.spark_version}"
|
||||
def container_type = "jvm"
|
||||
def docker_binary = "docker"
|
||||
// Use only 4 CPU cores
|
||||
def docker_extra_params = "CI_DOCKER_EXTRA_PARAMS_INIT='--cpuset-cpus 0-3'"
|
||||
sh """
|
||||
${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_jvm_packages.sh ${args.spark_version}
|
||||
"""
|
||||
echo 'Stashing XGBoost4J JAR...'
|
||||
stash name: 'xgboost4j_jar', includes: "jvm-packages/xgboost4j/target/*.jar,jvm-packages/xgboost4j-spark/target/*.jar,jvm-packages/xgboost4j-example/target/*.jar"
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def BuildJVMDoc() {
|
||||
node('linux && cpu') {
|
||||
unstash name: 'srcs'
|
||||
echo "Building JVM doc..."
|
||||
def container_type = "jvm"
|
||||
def docker_binary = "docker"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_jvm_doc.sh ${BRANCH_NAME}
|
||||
"""
|
||||
echo 'Uploading doc...'
|
||||
s3Upload file: "jvm-packages/${BRANCH_NAME}.tar.bz2", bucket: 'xgboost-docs', acl: 'PublicRead', path: "${BRANCH_NAME}.tar.bz2"
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def TestPythonCPU() {
|
||||
node('linux && cpu') {
|
||||
unstash name: 'xgboost_whl_cuda10'
|
||||
unstash name: 'srcs'
|
||||
unstash name: 'xgboost_cli'
|
||||
echo "Test Python CPU"
|
||||
def container_type = "cpu"
|
||||
def docker_binary = "docker"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/test_python.sh cpu
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/test_python.sh cpu-py35
|
||||
"""
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def TestPythonGPU(args) {
|
||||
nodeReq = (args.multi_gpu) ? 'linux && mgpu' : 'linux && gpu'
|
||||
node(nodeReq) {
|
||||
unstash name: 'xgboost_whl_cuda10'
|
||||
unstash name: 'srcs'
|
||||
echo "Test Python GPU: CUDA ${args.cuda_version}"
|
||||
def container_type = "gpu"
|
||||
def docker_binary = "nvidia-docker"
|
||||
def docker_args = "--build-arg CUDA_VERSION=${args.cuda_version}"
|
||||
if (args.multi_gpu) {
|
||||
echo "Using multiple GPUs"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_python.sh mgpu
|
||||
"""
|
||||
if (args.cuda_version != '9.0') {
|
||||
echo "Running tests with cuDF..."
|
||||
sh """
|
||||
${dockerRun} cudf ${docker_binary} ${docker_args} tests/ci_build/test_python.sh mgpu-cudf
|
||||
"""
|
||||
}
|
||||
} else {
|
||||
echo "Using a single GPU"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_python.sh gpu
|
||||
"""
|
||||
if (args.cuda_version != '9.0') {
|
||||
echo "Running tests with cuDF..."
|
||||
sh """
|
||||
${dockerRun} cudf ${docker_binary} ${docker_args} tests/ci_build/test_python.sh cudf
|
||||
"""
|
||||
}
|
||||
}
|
||||
// For CUDA 10.0 target, run cuDF tests too
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def TestCppRabit() {
|
||||
node(nodeReq) {
|
||||
unstash name: 'xgboost_rabit_tests'
|
||||
unstash name: 'srcs'
|
||||
echo "Test C++, rabit mock on"
|
||||
def container_type = "cpu"
|
||||
def docker_binary = "docker"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/runxgb.sh xgboost tests/ci_build/approx.conf.in
|
||||
"""
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def TestCppGPU(args) {
|
||||
nodeReq = (args.multi_gpu) ? 'linux && mgpu' : 'linux && gpu'
|
||||
node(nodeReq) {
|
||||
unstash name: 'xgboost_cpp_tests'
|
||||
unstash name: 'srcs'
|
||||
echo "Test C++, CUDA ${args.cuda_version}"
|
||||
def container_type = "gpu"
|
||||
def docker_binary = "nvidia-docker"
|
||||
def docker_args = "--build-arg CUDA_VERSION=${args.cuda_version}"
|
||||
if (args.multi_gpu) {
|
||||
echo "Using multiple GPUs"
|
||||
sh "${dockerRun} ${container_type} ${docker_binary} ${docker_args} build/testxgboost --gtest_filter=*.MGPU_*"
|
||||
} else {
|
||||
echo "Using a single GPU"
|
||||
sh "${dockerRun} ${container_type} ${docker_binary} ${docker_args} build/testxgboost --gtest_filter=-*.MGPU_*"
|
||||
}
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def CrossTestJVMwithJDK(args) {
|
||||
node('linux && cpu') {
|
||||
unstash name: 'xgboost4j_jar'
|
||||
unstash name: 'srcs'
|
||||
if (args.spark_version != null) {
|
||||
echo "Test XGBoost4J on a machine with JDK ${args.jdk_version}, Spark ${args.spark_version}"
|
||||
} else {
|
||||
echo "Test XGBoost4J on a machine with JDK ${args.jdk_version}"
|
||||
}
|
||||
def container_type = "jvm_cross"
|
||||
def docker_binary = "docker"
|
||||
def spark_arg = (args.spark_version != null) ? "--build-arg SPARK_VERSION=${args.spark_version}" : ""
|
||||
def docker_args = "--build-arg JDK_VERSION=${args.jdk_version} ${spark_arg}"
|
||||
// Run integration tests only when spark_version is given
|
||||
def docker_extra_params = (args.spark_version != null) ? "CI_DOCKER_EXTRA_PARAMS_INIT='-e RUN_INTEGRATION_TEST=1'" : ""
|
||||
sh """
|
||||
${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_jvm_cross.sh
|
||||
"""
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def TestR(args) {
|
||||
node('linux && cpu') {
|
||||
unstash name: 'srcs'
|
||||
echo "Test R package"
|
||||
def container_type = "rproject"
|
||||
def docker_binary = "docker"
|
||||
def use_r35_flag = (args.use_r35) ? "1" : "0"
|
||||
def docker_args = "--build-arg USE_R35=${use_r35_flag}"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_test_rpkg.sh || tests/ci_build/print_r_stacktrace.sh
|
||||
"""
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def DeployJVMPackages(args) {
|
||||
node('linux && cpu') {
|
||||
unstash name: 'srcs'
|
||||
if (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release')) {
|
||||
echo 'Deploying to xgboost-maven-repo S3 repo...'
|
||||
def container_type = "jvm"
|
||||
def docker_binary = "docker"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/deploy_jvm_packages.sh ${args.spark_version}
|
||||
"""
|
||||
}
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,123 +0,0 @@
|
||||
#!/usr/bin/groovy
|
||||
// -*- mode: groovy -*-
|
||||
// Jenkins pipeline
|
||||
// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
|
||||
|
||||
import groovy.transform.Field
|
||||
|
||||
/* Restricted tasks: tasks generating artifacts, such as binary wheels and
|
||||
documentation */
|
||||
|
||||
// Command to run command inside a docker container
|
||||
def dockerRun = 'tests/ci_build/ci_build.sh'
|
||||
// Utility functions
|
||||
@Field
|
||||
def utils
|
||||
@Field
|
||||
def commit_id
|
||||
@Field
|
||||
def branch_name
|
||||
|
||||
def buildMatrix = [
|
||||
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": true, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "9.2" ],
|
||||
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": true, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "8.0" ],
|
||||
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": false, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "8.0" ],
|
||||
]
|
||||
|
||||
pipeline {
|
||||
// Each stage specify its own agent
|
||||
agent none
|
||||
|
||||
// Setup common job properties
|
||||
options {
|
||||
ansiColor('xterm')
|
||||
timestamps()
|
||||
timeout(time: 120, unit: 'MINUTES')
|
||||
buildDiscarder(logRotator(numToKeepStr: '10'))
|
||||
}
|
||||
|
||||
// Build stages
|
||||
stages {
|
||||
stage('Jenkins: Get sources') {
|
||||
agent {
|
||||
label 'restricted'
|
||||
}
|
||||
steps {
|
||||
script {
|
||||
utils = load('tests/ci_build/jenkins_tools.Groovy')
|
||||
utils.checkoutSrcs()
|
||||
commit_id = "${GIT_COMMIT}"
|
||||
branch_name = "${GIT_LOCAL_BRANCH}"
|
||||
}
|
||||
stash name: 'srcs', excludes: '.git/'
|
||||
milestone label: 'Sources ready', ordinal: 1
|
||||
}
|
||||
}
|
||||
stage('Jenkins: Build doc') {
|
||||
steps {
|
||||
script {
|
||||
retry(1) {
|
||||
node('linux && cpu && restricted') {
|
||||
unstash name: 'srcs'
|
||||
echo 'Building doc...'
|
||||
dir ('jvm-packages') {
|
||||
sh "bash ./build_doc.sh ${commit_id}"
|
||||
archiveArtifacts artifacts: "${commit_id}.tar.bz2", allowEmptyArchive: true
|
||||
echo 'Deploying doc...'
|
||||
withAWS(credentials:'xgboost-doc-bucket') {
|
||||
s3Upload file: "${commit_id}.tar.bz2", bucket: 'xgboost-docs', acl: 'PublicRead', path: "${branch_name}.tar.bz2"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage('Jenkins: Build artifacts') {
|
||||
steps {
|
||||
script {
|
||||
parallel (buildMatrix.findAll{it['enabled']}.collectEntries{ c ->
|
||||
def buildName = utils.getBuildName(c)
|
||||
utils.buildFactory(buildName, c, true, this.&buildPlatformCmake)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build platform and test it via cmake.
|
||||
*/
|
||||
def buildPlatformCmake(buildName, conf, nodeReq, dockerTarget) {
|
||||
def opts = utils.cmakeOptions(conf)
|
||||
// Destination dir for artifacts
|
||||
def distDir = "dist/${buildName}"
|
||||
def dockerArgs = ""
|
||||
if(conf["withGpu"]){
|
||||
dockerArgs = "--build-arg CUDA_VERSION=" + conf["cudaVersion"]
|
||||
}
|
||||
// Build node - this is returned result
|
||||
retry(1) {
|
||||
node(nodeReq) {
|
||||
unstash name: 'srcs'
|
||||
echo """
|
||||
|===== XGBoost CMake build =====
|
||||
| dockerTarget: ${dockerTarget}
|
||||
| cmakeOpts : ${opts}
|
||||
|=========================
|
||||
""".stripMargin('|')
|
||||
// Invoke command inside docker
|
||||
sh """
|
||||
${dockerRun} ${dockerTarget} ${dockerArgs} tests/ci_build/build_via_cmake.sh ${opts}
|
||||
${dockerRun} ${dockerTarget} ${dockerArgs} bash -c "cd python-package; rm -f dist/*; python setup.py bdist_wheel --universal"
|
||||
rm -rf "${distDir}"; mkdir -p "${distDir}/py"
|
||||
cp xgboost "${distDir}"
|
||||
cp -r lib "${distDir}"
|
||||
cp -r python-package/dist "${distDir}/py"
|
||||
"""
|
||||
archiveArtifacts artifacts: "${distDir}/**/*.*", allowEmptyArchive: true
|
||||
}
|
||||
}
|
||||
}
|
||||
151
Jenkinsfile-win64
Normal file
151
Jenkinsfile-win64
Normal file
@@ -0,0 +1,151 @@
|
||||
#!/usr/bin/groovy
|
||||
// -*- mode: groovy -*-
|
||||
|
||||
/* Jenkins pipeline for Windows AMD64 target */
|
||||
|
||||
import groovy.transform.Field
|
||||
|
||||
@Field
|
||||
def commit_id // necessary to pass a variable from one stage to another
|
||||
|
||||
pipeline {
|
||||
agent none
|
||||
// Build stages
|
||||
stages {
|
||||
stage('Jenkins Win64: Get sources') {
|
||||
agent { label 'win64 && build' }
|
||||
steps {
|
||||
script {
|
||||
checkoutSrcs()
|
||||
commit_id = "${GIT_COMMIT}"
|
||||
}
|
||||
stash name: 'srcs'
|
||||
milestone ordinal: 1
|
||||
}
|
||||
}
|
||||
stage('Jenkins Win64: Build') {
|
||||
agent none
|
||||
steps {
|
||||
script {
|
||||
parallel ([
|
||||
'build-win64-cuda9.0': { BuildWin64() }
|
||||
])
|
||||
}
|
||||
milestone ordinal: 2
|
||||
}
|
||||
}
|
||||
stage('Jenkins Win64: Test') {
|
||||
agent none
|
||||
steps {
|
||||
script {
|
||||
parallel ([
|
||||
'test-win64-cpu': { TestWin64CPU() },
|
||||
'test-win64-gpu-cuda9.0': { TestWin64GPU(cuda_target: 'cuda9') },
|
||||
'test-win64-gpu-cuda10.0': { TestWin64GPU(cuda_target: 'cuda10_0') },
|
||||
'test-win64-gpu-cuda10.1': { TestWin64GPU(cuda_target: 'cuda10_1') }
|
||||
])
|
||||
}
|
||||
milestone ordinal: 3
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// check out source code from git
|
||||
def checkoutSrcs() {
|
||||
retry(5) {
|
||||
try {
|
||||
timeout(time: 2, unit: 'MINUTES') {
|
||||
checkout scm
|
||||
sh 'git submodule update --init'
|
||||
}
|
||||
} catch (exc) {
|
||||
deleteDir()
|
||||
error "Failed to fetch source codes"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def BuildWin64() {
|
||||
node('win64 && build') {
|
||||
unstash name: 'srcs'
|
||||
echo "Building XGBoost for Windows AMD64 target..."
|
||||
bat "nvcc --version"
|
||||
bat """
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -G"Visual Studio 15 2017 Win64" -DUSE_CUDA=ON -DCMAKE_VERBOSE_MAKEFILE=ON -DGOOGLE_TEST=ON -DUSE_DMLC_GTEST=ON
|
||||
"""
|
||||
bat """
|
||||
cd build
|
||||
"C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\MSBuild\\15.0\\Bin\\MSBuild.exe" xgboost.sln /m /p:Configuration=Release /nodeReuse:false
|
||||
"""
|
||||
bat """
|
||||
cd python-package
|
||||
conda activate && python setup.py bdist_wheel --universal && for /R %%i in (dist\\*.whl) DO python ../tests/ci_build/rename_whl.py "%%i" ${commit_id} win_amd64
|
||||
"""
|
||||
echo "Insert vcomp140.dll (OpenMP runtime) into the wheel..."
|
||||
bat """
|
||||
cd python-package\\dist
|
||||
COPY /B ..\\..\\tests\\ci_build\\insert_vcomp140.py
|
||||
conda activate && python insert_vcomp140.py *.whl
|
||||
"""
|
||||
echo 'Stashing Python wheel...'
|
||||
stash name: 'xgboost_whl', includes: 'python-package/dist/*.whl'
|
||||
path = ("${BRANCH_NAME}" == 'master') ? '' : "${BRANCH_NAME}/"
|
||||
s3Upload bucket: 'xgboost-nightly-builds', path: path, acl: 'PublicRead', workingDir: 'python-package/dist', includePathPattern:'**/*.whl'
|
||||
echo 'Stashing C++ test executable (testxgboost)...'
|
||||
stash name: 'xgboost_cpp_tests', includes: 'build/testxgboost.exe'
|
||||
stash name: 'xgboost_cli', includes: 'xgboost.exe'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def TestWin64CPU() {
|
||||
node('win64 && cpu') {
|
||||
unstash name: 'srcs'
|
||||
unstash name: 'xgboost_whl'
|
||||
unstash name: 'xgboost_cli'
|
||||
echo "Test Win64 CPU"
|
||||
echo "Installing Python wheel..."
|
||||
bat "conda activate && (python -m pip uninstall -y xgboost || cd .)"
|
||||
bat """
|
||||
conda activate && for /R %%i in (python-package\\dist\\*.whl) DO python -m pip install "%%i"
|
||||
"""
|
||||
echo "Installing Python dependencies..."
|
||||
bat """
|
||||
conda activate && conda upgrade scikit-learn pandas numpy
|
||||
"""
|
||||
echo "Running Python tests..."
|
||||
bat "conda activate && python -m pytest -v -s --fulltrace tests\\python"
|
||||
bat "conda activate && python -m pip uninstall -y xgboost"
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def TestWin64GPU(args) {
|
||||
node("win64 && gpu && ${args.cuda_target}") {
|
||||
unstash name: 'srcs'
|
||||
unstash name: 'xgboost_whl'
|
||||
unstash name: 'xgboost_cpp_tests'
|
||||
echo "Test Win64 GPU (${args.cuda_target})"
|
||||
bat "nvcc --version"
|
||||
echo "Running C++ tests..."
|
||||
bat "build\\testxgboost.exe"
|
||||
echo "Installing Python wheel..."
|
||||
bat "conda activate && (python -m pip uninstall -y xgboost || cd .)"
|
||||
bat """
|
||||
conda activate && for /R %%i in (python-package\\dist\\*.whl) DO python -m pip install "%%i"
|
||||
"""
|
||||
echo "Installing Python dependencies..."
|
||||
bat """
|
||||
conda activate && conda upgrade scikit-learn pandas numpy
|
||||
"""
|
||||
echo "Running Python tests..."
|
||||
bat """
|
||||
conda activate && python -m pytest -v -s --fulltrace -m "(not slow) and (not mgpu)" tests\\python-gpu
|
||||
"""
|
||||
bat "conda activate && python -m pip uninstall -y xgboost"
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
2
LICENSE
2
LICENSE
@@ -186,7 +186,7 @@
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright (c) 2018 by Contributors
|
||||
Copyright (c) 2019 by Contributors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
166
Makefile
166
Makefile
@@ -1,11 +1,3 @@
|
||||
ifndef config
|
||||
ifneq ("$(wildcard ./config.mk)","")
|
||||
config = config.mk
|
||||
else
|
||||
config = make/config.mk
|
||||
endif
|
||||
endif
|
||||
|
||||
ifndef DMLC_CORE
|
||||
DMLC_CORE = dmlc-core
|
||||
endif
|
||||
@@ -30,23 +22,8 @@ ifndef MAKE_OK
|
||||
endif
|
||||
$(warning MAKE [$(MAKE)] - $(if $(MAKE_OK),checked OK,PROBLEM))
|
||||
|
||||
ifeq ($(OS), Windows_NT)
|
||||
UNAME="Windows"
|
||||
else
|
||||
UNAME=$(shell uname)
|
||||
endif
|
||||
|
||||
include $(config)
|
||||
ifeq ($(USE_OPENMP), 0)
|
||||
export NO_OPENMP = 1
|
||||
endif
|
||||
include $(DMLC_CORE)/make/dmlc.mk
|
||||
|
||||
# include the plugins
|
||||
ifdef XGB_PLUGINS
|
||||
include $(XGB_PLUGINS)
|
||||
endif
|
||||
|
||||
# set compiler defaults for OSX versus *nix
|
||||
# let people override either
|
||||
OS := $(shell uname)
|
||||
@@ -67,126 +44,40 @@ export CXX = g++
|
||||
endif
|
||||
endif
|
||||
|
||||
export LDFLAGS= -pthread -lm $(ADD_LDFLAGS) $(DMLC_LDFLAGS) $(PLUGIN_LDFLAGS)
|
||||
export CFLAGS= -DDMLC_LOG_CUSTOMIZE=1 -std=c++11 -Wall -Wno-unknown-pragmas -Iinclude $(ADD_CFLAGS) $(PLUGIN_CFLAGS)
|
||||
export CFLAGS= -DDMLC_LOG_CUSTOMIZE=1 -std=c++11 -Wall -Wno-unknown-pragmas -Iinclude $(ADD_CFLAGS)
|
||||
CFLAGS += -I$(DMLC_CORE)/include -I$(RABIT)/include -I$(GTEST_PATH)/include
|
||||
#java include path
|
||||
export JAVAINCFLAGS = -I${JAVA_HOME}/include -I./java
|
||||
|
||||
ifeq ($(TEST_COVER), 1)
|
||||
CFLAGS += -g -O0 -fprofile-arcs -ftest-coverage
|
||||
else
|
||||
CFLAGS += -O3 -funroll-loops
|
||||
ifeq ($(USE_SSE), 1)
|
||||
CFLAGS += -msse2
|
||||
endif
|
||||
endif
|
||||
|
||||
ifndef LINT_LANG
|
||||
LINT_LANG= "all"
|
||||
endif
|
||||
|
||||
ifeq ($(UNAME), Windows)
|
||||
XGBOOST_DYLIB = lib/xgboost.dll
|
||||
JAVAINCFLAGS += -I${JAVA_HOME}/include/win32
|
||||
else
|
||||
ifeq ($(UNAME), Darwin)
|
||||
XGBOOST_DYLIB = lib/libxgboost.dylib
|
||||
CFLAGS += -fPIC
|
||||
else
|
||||
XGBOOST_DYLIB = lib/libxgboost.so
|
||||
CFLAGS += -fPIC
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(UNAME), Linux)
|
||||
LDFLAGS += -lrt
|
||||
JAVAINCFLAGS += -I${JAVA_HOME}/include/linux
|
||||
endif
|
||||
|
||||
ifeq ($(UNAME), Darwin)
|
||||
JAVAINCFLAGS += -I${JAVA_HOME}/include/darwin
|
||||
endif
|
||||
|
||||
OPENMP_FLAGS =
|
||||
ifeq ($(USE_OPENMP), 1)
|
||||
OPENMP_FLAGS = -fopenmp
|
||||
else
|
||||
OPENMP_FLAGS = -DDISABLE_OPENMP
|
||||
endif
|
||||
CFLAGS += $(OPENMP_FLAGS)
|
||||
|
||||
# specify tensor path
|
||||
.PHONY: clean all lint clean_all doxygen rcpplint pypack Rpack Rbuild Rcheck java pylint
|
||||
|
||||
all: lib/libxgboost.a $(XGBOOST_DYLIB) xgboost
|
||||
|
||||
$(DMLC_CORE)/libdmlc.a: $(wildcard $(DMLC_CORE)/src/*.cc $(DMLC_CORE)/src/*/*.cc)
|
||||
+ cd $(DMLC_CORE); "$(MAKE)" libdmlc.a config=$(ROOTDIR)/$(config); cd $(ROOTDIR)
|
||||
|
||||
$(RABIT)/lib/$(LIB_RABIT): $(wildcard $(RABIT)/src/*.cc)
|
||||
+ cd $(RABIT); "$(MAKE)" lib/$(LIB_RABIT) USE_SSE=$(USE_SSE); cd $(ROOTDIR)
|
||||
|
||||
jvm: jvm-packages/lib/libxgboost4j.so
|
||||
|
||||
SRC = $(wildcard src/*.cc src/*/*.cc)
|
||||
ALL_OBJ = $(patsubst src/%.cc, build/%.o, $(SRC)) $(PLUGIN_OBJS)
|
||||
AMALGA_OBJ = amalgamation/xgboost-all0.o
|
||||
LIB_DEP = $(DMLC_CORE)/libdmlc.a $(RABIT)/lib/$(LIB_RABIT)
|
||||
ALL_DEP = $(filter-out build/cli_main.o, $(ALL_OBJ)) $(LIB_DEP)
|
||||
CLI_OBJ = build/cli_main.o
|
||||
include tests/cpp/xgboost_test.mk
|
||||
.PHONY: clean all lint clean_all doxygen rcpplint pypack Rpack Rbuild Rcheck
|
||||
|
||||
build/%.o: src/%.cc
|
||||
@mkdir -p $(@D)
|
||||
$(CXX) $(CFLAGS) -MM -MT build/$*.o $< >build/$*.d
|
||||
$(CXX) -c $(CFLAGS) $< -o $@
|
||||
|
||||
build_plugin/%.o: plugin/%.cc
|
||||
@mkdir -p $(@D)
|
||||
$(CXX) $(CFLAGS) -MM -MT build_plugin/$*.o $< >build_plugin/$*.d
|
||||
$(CXX) -c $(CFLAGS) $< -o $@
|
||||
|
||||
# The should be equivalent to $(ALL_OBJ) except for build/cli_main.o
|
||||
amalgamation/xgboost-all0.o: amalgamation/xgboost-all0.cc
|
||||
$(CXX) -c $(CFLAGS) $< -o $@
|
||||
|
||||
# Equivalent to lib/libxgboost_all.so
|
||||
lib/libxgboost_all.so: $(AMALGA_OBJ) $(LIB_DEP)
|
||||
@mkdir -p $(@D)
|
||||
$(CXX) $(CFLAGS) -shared -o $@ $(filter %.o %.a, $^) $(LDFLAGS)
|
||||
|
||||
lib/libxgboost.a: $(ALL_DEP)
|
||||
@mkdir -p $(@D)
|
||||
ar crv $@ $(filter %.o, $?)
|
||||
|
||||
lib/xgboost.dll lib/libxgboost.so lib/libxgboost.dylib: $(ALL_DEP)
|
||||
@mkdir -p $(@D)
|
||||
$(CXX) $(CFLAGS) -shared -o $@ $(filter %.o %a, $^) $(LDFLAGS)
|
||||
|
||||
jvm-packages/lib/libxgboost4j.so: jvm-packages/xgboost4j/src/native/xgboost4j.cpp $(ALL_DEP)
|
||||
@mkdir -p $(@D)
|
||||
$(CXX) $(CFLAGS) $(JAVAINCFLAGS) -shared -o $@ $(filter %.cpp %.o %.a, $^) $(LDFLAGS)
|
||||
|
||||
|
||||
xgboost: $(CLI_OBJ) $(ALL_DEP)
|
||||
$(CXX) $(CFLAGS) -o $@ $(filter %.o %.a, $^) $(LDFLAGS)
|
||||
|
||||
rcpplint:
|
||||
python2 dmlc-core/scripts/lint.py xgboost ${LINT_LANG} R-package/src
|
||||
python3 dmlc-core/scripts/lint.py xgboost ${LINT_LANG} R-package/src
|
||||
|
||||
lint: rcpplint
|
||||
python2 dmlc-core/scripts/lint.py xgboost ${LINT_LANG} include src plugin python-package
|
||||
|
||||
pylint:
|
||||
flake8 --ignore E501 python-package
|
||||
flake8 --ignore E501 tests/python
|
||||
|
||||
test: $(ALL_TEST)
|
||||
$(ALL_TEST)
|
||||
|
||||
check: test
|
||||
./tests/cpp/xgboost_test
|
||||
python3 dmlc-core/scripts/lint.py --exclude_path python-package/xgboost/dmlc-core \
|
||||
python-package/xgboost/include python-package/xgboost/lib \
|
||||
python-package/xgboost/make python-package/xgboost/rabit \
|
||||
python-package/xgboost/src --pylint-rc ${PWD}/python-package/.pylintrc xgboost \
|
||||
${LINT_LANG} include src python-package
|
||||
|
||||
ifeq ($(TEST_COVER), 1)
|
||||
cover: check
|
||||
@@ -196,7 +87,7 @@ cover: check
|
||||
endif
|
||||
|
||||
clean:
|
||||
$(RM) -rf build build_plugin lib bin *~ */*~ */*/*~ */*/*/*~ */*.o */*/*.o */*/*/*.o #xgboost
|
||||
$(RM) -rf build lib bin *~ */*~ */*/*~ */*/*/*~ */*.o */*/*.o */*/*/*.o #xgboost
|
||||
$(RM) -rf build_tests *.gcov tests/cpp/xgboost_test
|
||||
if [ -d "R-package/src" ]; then \
|
||||
cd R-package/src; \
|
||||
@@ -208,36 +99,9 @@ clean_all: clean
|
||||
cd $(DMLC_CORE); "$(MAKE)" clean; cd $(ROOTDIR)
|
||||
cd $(RABIT); "$(MAKE)" clean; cd $(ROOTDIR)
|
||||
|
||||
doxygen:
|
||||
doxygen doc/Doxyfile
|
||||
|
||||
# create standalone python tar file.
|
||||
pypack: ${XGBOOST_DYLIB}
|
||||
cp ${XGBOOST_DYLIB} python-package/xgboost
|
||||
cd python-package; tar cf xgboost.tar xgboost; cd ..
|
||||
|
||||
# create pip source dist (sdist) pack for PyPI
|
||||
pippack: clean_all
|
||||
rm -rf xgboost-python
|
||||
# remove symlinked directories in python-package/xgboost
|
||||
rm -rf python-package/xgboost/lib
|
||||
rm -rf python-package/xgboost/dmlc-core
|
||||
rm -rf python-package/xgboost/include
|
||||
rm -rf python-package/xgboost/make
|
||||
rm -rf python-package/xgboost/rabit
|
||||
rm -rf python-package/xgboost/src
|
||||
cp -r python-package xgboost-python
|
||||
cp -r Makefile xgboost-python/xgboost/
|
||||
cp -r make xgboost-python/xgboost/
|
||||
cp -r src xgboost-python/xgboost/
|
||||
cp -r tests xgboost-python/xgboost/
|
||||
cp -r include xgboost-python/xgboost/
|
||||
cp -r dmlc-core xgboost-python/xgboost/
|
||||
cp -r rabit xgboost-python/xgboost/
|
||||
# Use setup_pip.py instead of setup.py
|
||||
mv xgboost-python/setup_pip.py xgboost-python/setup.py
|
||||
# Build sdist tarball
|
||||
cd xgboost-python; python setup.py sdist; mv dist/*.tar.gz ..; cd ..
|
||||
cd python-package; python setup.py sdist; mv dist/*.tar.gz ..; cd ..
|
||||
|
||||
# Script to make a clean installable R package.
|
||||
Rpack: clean_all
|
||||
@@ -258,10 +122,17 @@ Rpack: clean_all
|
||||
cp -r dmlc-core/include xgboost/src/dmlc-core/include
|
||||
cp -r dmlc-core/src xgboost/src/dmlc-core/src
|
||||
cp ./LICENSE xgboost
|
||||
cat R-package/src/Makevars.in|sed '2s/.*/PKGROOT=./' | sed '3s/.*/ENABLE_STD_THREAD=0/' > xgboost/src/Makevars.in
|
||||
# Modify PKGROOT in Makevars.in
|
||||
cat R-package/src/Makevars.in|sed '2s/.*/PKGROOT=./' > xgboost/src/Makevars.in
|
||||
# Configure Makevars.win (Windows-specific Makevars, likely using MinGW)
|
||||
cp xgboost/src/Makevars.in xgboost/src/Makevars.win
|
||||
cat xgboost/src/Makevars.in| sed '3s/.*/ENABLE_STD_THREAD=0/' > xgboost/src/Makevars.win
|
||||
sed -i -e 's/@OPENMP_CXXFLAGS@/$$\(SHLIB_OPENMP_CXXFLAGS\)/g' xgboost/src/Makevars.win
|
||||
sed -i -e 's/-pthread/$$\(SHLIB_PTHREAD_FLAGS\)/g' xgboost/src/Makevars.win
|
||||
sed -i -e 's/@ENDIAN_FLAG@/-DDMLC_CMAKE_LITTLE_ENDIAN=1/g' xgboost/src/Makevars.win
|
||||
sed -i -e 's/@BACKTRACE_LIB@//g' xgboost/src/Makevars.win
|
||||
sed -i -e 's/@OPENMP_LIB@//g' xgboost/src/Makevars.win
|
||||
rm -f xgboost/src/Makevars.win-e # OSX sed create this extra file; remove it
|
||||
bash R-package/remove_warning_suppression_pragma.sh
|
||||
rm xgboost/remove_warning_suppression_pragma.sh
|
||||
|
||||
@@ -274,4 +145,3 @@ Rcheck: Rbuild
|
||||
|
||||
-include build/*.d
|
||||
-include build/*/*.d
|
||||
-include build_plugin/*/*.d
|
||||
|
||||
444
NEWS.md
444
NEWS.md
@@ -3,6 +3,450 @@ XGBoost Change Log
|
||||
|
||||
This file records the changes in xgboost library in reverse chronological order.
|
||||
|
||||
## v1.0.0 (2020.02.19)
|
||||
This release marks a major milestone for the XGBoost project.
|
||||
|
||||
### Apache-style governance, contribution policy, and semantic versioning (#4646, #4659)
|
||||
* Starting with 1.0.0 release, the XGBoost Project is adopting Apache-style governance. The full community guideline is [available in the doc website](https://xgboost.readthedocs.io/en/release_1.0.0/contrib/community.html). Note that we now have Project Management Committee (PMC) who would steward the project on the long-term basis. The PMC is also entrusted to run and fund the project's continuous integration (CI) infrastructure (https://xgboost-ci.net).
|
||||
* We also adopt the [semantic versioning](https://semver.org/). See [our release versioning policy](https://xgboost.readthedocs.io/en/release_1.0.0/contrib/release.html).
|
||||
|
||||
### Better performance scaling for multi-core CPUs (#4502, #4529, #4716, #4851, #5008, #5107, #5138, #5156)
|
||||
* Poor performance scaling of the `hist` algorithm for multi-core CPUs has been under investigation (#3810). Previous effort #4529 was replaced with a series of pull requests (#5107, #5138, #5156) aimed at achieving the same performance benefits while keeping the C++ codebase legible. The latest performance benchmark results show [up to 5x speedup on Intel CPUs with many cores](https://github.com/dmlc/xgboost/pull/5156#issuecomment-580024413). Note: #5244, which concludes the effort, will become part of the upcoming release 1.1.0.
|
||||
|
||||
### Improved installation experience on Mac OSX (#4672, #5074, #5080, #5146, #5240)
|
||||
* It used to be quite complicated to install XGBoost on Mac OSX. XGBoost uses OpenMP to distribute work among multiple CPU cores, and Mac's default C++ compiler (Apple Clang) does not come with OpenMP. Existing work-around (using another C++ compiler) was complex and prone to fail with cryptic diagnosis (#4933, #4949, #4969).
|
||||
* Now it only takes two commands to install XGBoost: `brew install libomp` followed by `pip install xgboost`. The installed XGBoost will use all CPU cores.
|
||||
* Even better, XGBoost is now available from Homebrew: `brew install xgboost`. See Homebrew/homebrew-core#50467.
|
||||
* Previously, if you installed the XGBoost R package using the command `install.packages('xgboost')`, it could only use a single CPU core and you would experience slow training performance. With 1.0.0 release, the R package will use all CPU cores out of box.
|
||||
|
||||
### Distributed XGBoost now available on Kubernetes (#4621, #4939)
|
||||
* Check out the [tutorial for setting up distributed XGBoost on a Kubernetes cluster](https://xgboost.readthedocs.io/en/release_1.0.0/tutorials/kubernetes.html).
|
||||
|
||||
### Ruby binding for XGBoost (#4856)
|
||||
|
||||
### New Native Dask interface for multi-GPU and multi-node scaling (#4473, #4507, #4617, #4819, #4907, #4914, #4941, #4942, #4951, #4973, #5048, #5077, #5144, #5270)
|
||||
* XGBoost now integrates seamlessly with [Dask](https://dask.org/), a lightweight distributed framework for data processing. Together with the first-class support for cuDF data frames (see below), it is now easier than ever to create end-to-end data pipeline running on one or more NVIDIA GPUs.
|
||||
* Multi-GPU training with Dask is now up to 20% faster than the previous release (#4914, #4951).
|
||||
|
||||
### First-class support for cuDF data frames and cuPy arrays (#4737, #4745, #4794, #4850, #4891, #4902, #4918, #4927, #4928, #5053, #5189, #5194, #5206, #5219, #5225)
|
||||
* [cuDF](https://github.com/rapidsai/cudf) is a data frame library for loading and processing tabular data on NVIDIA GPUs. It provides a Pandas-like API.
|
||||
* [cuPy](https://github.com/cupy/cupy) implements a NumPy-compatible multi-dimensional array on NVIDIA GPUs.
|
||||
* Now users can keep the data on the GPU memory throughout the end-to-end data pipeline, obviating the need for copying data between the main memory and GPU memory.
|
||||
* XGBoost can accept any data structure that exposes `__array_interface__` signature, opening way to support other columar formats that are compatible with Apache Arrow.
|
||||
|
||||
### [Feature interaction constraint](https://xgboost.readthedocs.io/en/release_1.0.0/tutorials/feature_interaction_constraint.html) is now available with `approx` and `gpu_hist` algorithms (#4534, #4587, #4596, #5034).
|
||||
|
||||
### Learning to rank is now GPU accelerated (#4873, #5004, #5129)
|
||||
* Supported ranking objectives: NDGC, Map, Pairwise.
|
||||
* [Up to 2x improved training performance on GPUs](https://devblogs.nvidia.com/learning-to-rank-with-xgboost-and-gpu/).
|
||||
|
||||
### Enable `gamma` parameter for GPU training (#4874, #4953)
|
||||
* The `gamma` parameter specifies the minimum loss reduction required to add a new split in a tree. A larger value for `gamma` has the effect of pre-pruning the tree, by making harder to add splits.
|
||||
|
||||
### External memory for GPU training (#4486, #4526, #4747, #4833, #4879, #5014)
|
||||
* It is now possible to use NVIDIA GPUs even when the size of training data exceeds the available GPU memory. Note that the external memory support for GPU is still experimental. #5093 will further improve performance and will become part of the upcoming release 1.1.0.
|
||||
* RFC for enabling external memory with GPU algorithms: #4357
|
||||
|
||||
### Improve Scikit-Learn interface (#4558, #4842, #4929, #5049, #5151, #5130, #5227)
|
||||
* Many users of XGBoost enjoy the convenience and breadth of Scikit-Learn ecosystem. In this release, we revise the Scikit-Learn API of XGBoost (`XGBRegressor`, `XGBClassifier`, and `XGBRanker`) to achieve feature parity with the traditional XGBoost interface (`xgboost.train()`).
|
||||
* Insert check to validate data shapes.
|
||||
* Produce an error message if `eval_set` is not a tuple. An error message is better than silently crashing.
|
||||
* Allow using `numpy.RandomState` object.
|
||||
* Add `n_jobs` as an alias of `nthread`.
|
||||
* Roadmap: #5152
|
||||
|
||||
### XGBoost4J-Spark: Redesigning checkpointing mechanism
|
||||
* RFC is available at #4786
|
||||
* Clean up checkpoint file after a successful training job (#4754): The current implementation in XGBoost4J-Spark does not clean up the checkpoint file after a successful training job. If the user runs another job with the same checkpointing directory, she will get a wrong model because the second job will re-use the checkpoint file left over from the first job. To prevent this scenario, we propose to always clean up the checkpoint file after every successful training job.
|
||||
* Avoid Multiple Jobs for Checkpointing (#5082): The current method for checkpoint is to collect the booster produced at the last iteration of each checkpoint internal to Driver and persist it in HDFS. The major issue with this approach is that it needs to re-perform the data preparation for training if the user did not choose to cache the training dataset. To avoid re-performing data prep, we build external-memory checkpointing in the XGBoost4J layer as well.
|
||||
* Enable deterministic repartitioning when checkpoint is enabled (#4807): Distributed algorithm for gradient boosting assumes a fixed partition of the training data between multiple iterations. In previous versions, there was no guarantee that data partition would stay the same, especially when a worker goes down and some data had to recovered from previous checkpoint. In this release, we make data partition deterministic by using the data hash value of each data row in computing the partition.
|
||||
|
||||
### XGBoost4J-Spark: handle errors thrown by the native code (#4560)
|
||||
* All core logic of XGBoost is written in C++, so XGBoost4J-Spark internally uses the C++ code via Java Native Interface (JNI). #4560 adds a proper error handling for any errors or exceptions arising from the C++ code, so that the XGBoost Spark application can be torn down in an orderly fashion.
|
||||
|
||||
### XGBoost4J-Spark: Refine method to count the number of alive cores (#4858)
|
||||
* The `SparkParallelismTracker` class ensures that sufficient number of executor cores are alive. To that end, it is important to query the number of alive cores reliably.
|
||||
|
||||
### XGBoost4J: Add `BigDenseMatrix` to store more than `Integer.MAX_VALUE` elements (#4383)
|
||||
|
||||
### Robust model serialization with JSON (#4632, #4708, #4739, #4868, #4936, #4945, #4974, #5086, #5087, #5089, #5091, #5094, #5110, #5111, #5112, #5120, #5137, #5218, #5222, #5236, #5245, #5248, #5281)
|
||||
* In this release, we introduce an experimental support of using [JSON](https://www.json.org/json-en.html) for serializing (saving/loading) XGBoost models and related hyperparameters for training. We would like to eventually replace the old binary format with JSON, since it is an open format and parsers are available in many programming languages and platforms. See [the documentation for model I/O using JSON](https://xgboost.readthedocs.io/en/release_1.0.0/tutorials/saving_model.html). #3980 explains why JSON was chosen over other alternatives.
|
||||
* To maximize interoperability and compatibility of the serialized models, we now split serialization into two parts (#4855):
|
||||
1. Model, e.g. decision trees and strictly related metadata like `num_features`.
|
||||
2. Internal configuration, consisting of training parameters and other configurable parameters. For example, `max_delta_step`, `tree_method`, `objective`, `predictor`, `gpu_id`.
|
||||
|
||||
Previously, users often ran into issues where the model file produced by one machine could not load or run on another machine. For example, models trained using a machine with an NVIDIA GPU could not run on another machine without a GPU (#5291, #5234). The reason is that the old binary format saved some internal configuration that were not universally applicable to all machines, e.g. `predictor='gpu_predictor'`.
|
||||
|
||||
Now, model saving function (`Booster.save_model()` in Python) will save only the model, without internal configuration. This will guarantee that your model file would be used anywhere. Internal configuration will be serialized in limited circumstances such as:
|
||||
* Multiple nodes in a distributed system exchange model details over the network.
|
||||
* Model checkpointing, to recover from possible crashes.
|
||||
|
||||
This work proved to be useful for parameter validation as well (see below).
|
||||
* Starting with 1.0.0 release, we will use semantic versioning to indicate whether the model produced by one version of XGBoost would be compatible with another version of XGBoost. Any change in the major version indicates a breaking change in the serialization format.
|
||||
* We now provide a robust method to save and load scikit-learn related attributes (#5245). Previously, we used Python pickle to save Python attributes related to `XGBClassifier`, `XGBRegressor`, and `XGBRanker` objects. The attributes are necessary to properly interact with scikit-learn. See #4639 for more details. The use of pickling hampered interoperability, as a pickle from one machine may not necessarily work on another machine. Starting with this release, we use an alternative method to serialize the scikit-learn related attributes. The use of Python pickle is now discouraged (#5236, #5281).
|
||||
|
||||
### Parameter validation: detection of unused or incorrect parameters (#4553, #4577, #4738, #4801, #4961, #5101, #5157, #5167, #5256)
|
||||
* Mis-spelled training parameter is a common user mistake. In previous versions of XGBoost, mis-spelled parameters were silently ignored. Starting with 1.0.0 release, XGBoost will produce a warning message if there is any unused training parameters. Currently, parameter validation is available to R users and Python XGBoost API users. We are working to extend its support to scikit-learn users.
|
||||
* Configuration steps now have well-defined semantics (#4542, #4738), so we know exactly where and how the internal configurable parameters are changed.
|
||||
* The user can now use `save_config()` function to inspect all (used) training parameters. This is helpful for debugging model performance.
|
||||
|
||||
### Allow individual workers to recover from faults (#4808, #4966)
|
||||
* Status quo: if a worker fails, all workers are shut down and restarted, and learning resumes from the last checkpoint. This involves requesting resources from the scheduler (e.g. Spark) and shuffling all the data again from scratch. Both of these operations can be quite costly and block training for extended periods of time, especially if the training data is big and the number of worker nodes is in the hundreds.
|
||||
* The proposed solution is to recover the single node that failed, instead of shutting down all workers. The rest of the clusters wait until the single failed worker is bootstrapped and catches up with the rest.
|
||||
* See roadmap at #4753. Note that this is work in progress. In particular, the feature is not yet available from XGBoost4J-Spark.
|
||||
|
||||
### Accurate prediction for DART models
|
||||
* Use DART tree weights when computing SHAPs (#5050)
|
||||
* Don't drop trees during DART prediction by default (#5115)
|
||||
* Fix DART prediction in R (#5204)
|
||||
|
||||
### Make external memory more robust
|
||||
* Fix issues with training with external memory on cpu (#4487)
|
||||
* Fix crash with approx tree method on cpu (#4510)
|
||||
* Fix external memory race in `exact` (#4980). Note: `dmlc::ThreadedIter` is not actually thread-safe. We would like to re-design it in the long term.
|
||||
|
||||
### Major refactoring of the `DMatrix` class (#4686, #4744, #4748, #5044, #5092, #5108, #5188, #5198)
|
||||
* Goal 1: improve performance and reduce memory consumption. Right now, if the user trains a model with a NumPy array as training data, the array gets copies 2-3 times before training begins. We'd like to reduce duplication of the data matrix.
|
||||
* Goal 2: Expose a common interface to external data, unify the way DMatrix objects are constructed and simplify the process of adding new external data sources. This work is essential for ingesting cuPy arrays.
|
||||
* Goal 3: Handle missing values consistently.
|
||||
* RFC: #4354, Roadmap: #5143
|
||||
* This work is also relevant to external memory support on GPUs.
|
||||
|
||||
### Breaking: XGBoost Python package now requires Python 3.5 or newer (#5021, #5274)
|
||||
* Python 3.4 has reached its end-of-life on March 16, 2019, so we now require Python 3.5 or newer.
|
||||
|
||||
### Breaking: GPU algorithm now requires CUDA 9.0 and higher (#4527, #4580)
|
||||
|
||||
### Breaking: `n_gpus` parameter removed; multi-GPU training now requires a distributed framework (#4579, #4749, #4773, #4810, #4867, #4908)
|
||||
* #4531 proposed removing support for single-process multi-GPU training. Contributors would focus on multi-GPU support through distributed frameworks such as Dask and Spark, where the framework would be expected to assign a worker process for each GPU independently. By delegating GPU management and data movement to the distributed framework, we can greatly simplify the core XGBoost codebase, make multi-GPU training more robust, and reduce burden for future development.
|
||||
|
||||
### Breaking: Some deprecated features have been removed
|
||||
* ``gpu_exact`` training method (#4527, #4742, #4777). Use ``gpu_hist`` instead.
|
||||
* ``learning_rates`` parameter in Python (#5155). Use the callback API instead.
|
||||
* ``num_roots`` (#5059, #5165), since the current training code always uses a single root node.
|
||||
* GPU-specific objectives (#4690), such as `gpu:reg:linear`. Use objectives without `gpu:` prefix; GPU will be used automatically if your machine has one.
|
||||
|
||||
### Breaking: the C API function `XGBoosterPredict()` now asks for an extra parameter `training`.
|
||||
|
||||
### Breaking: We now use CMake exclusively to build XGBoost. `Makefile` is being sunset.
|
||||
* Exception: the R package uses Autotools, as the CRAN ecosystem did not yet adopt CMake widely.
|
||||
|
||||
### Performance improvements
|
||||
* Smarter choice of histogram construction for distributed `gpu_hist` (#4519)
|
||||
* Optimizations for quantization on device (#4572)
|
||||
* Introduce caching memory allocator to avoid latency associated with GPU memory allocation (#4554, #4615)
|
||||
* Optimize the initialization stage of the CPU `hist` algorithm for sparse datasets (#4625)
|
||||
* Prevent unnecessary data copies from GPU memory to the host (#4795)
|
||||
* Improve operation efficiency for single prediction (#5016)
|
||||
* Group builder modified for incremental building, to speed up building large `DMatrix` (#5098)
|
||||
|
||||
### Bug-fixes
|
||||
* Eliminate `FutureWarning: Series.base is deprecated` (#4337)
|
||||
* Ensure pandas DataFrame column names are treated as strings in type error message (#4481)
|
||||
* [jvm-packages] Add back `reg:linear` for scala, as it is only deprecated and not meant to be removed yet (#4490)
|
||||
* Fix library loading for Cygwin users (#4499)
|
||||
* Fix prediction from loaded pickle (#4516)
|
||||
* Enforce exclusion between `pred_interactions=True` and `pred_interactions=True` (#4522)
|
||||
* Do not return dangling reference to local `std::string` (#4543)
|
||||
* Set the appropriate device before freeing device memory (#4566)
|
||||
* Mark `SparsePageDmatrix` destructor default. (#4568)
|
||||
* Choose the appropriate tree method only when the tree method is 'auto' (#4571)
|
||||
* Fix `benchmark_tree.py` (#4593)
|
||||
* [jvm-packages] Fix silly bug in feature scoring (#4604)
|
||||
* Fix GPU predictor when the test data matrix has different number of features than the training data matrix used to train the model (#4613)
|
||||
* Fix external memory for get column batches. (#4622)
|
||||
* [R] Use built-in label when xgb.DMatrix is given to xgb.cv() (#4631)
|
||||
* Fix early stopping in the Python package (#4638)
|
||||
* Fix AUC error in distributed mode caused by imbalanced dataset (#4645, #4798)
|
||||
* [jvm-packages] Expose `setMissing` method in `XGBoostClassificationModel` / `XGBoostRegressionModel` (#4643)
|
||||
* Remove initializing stringstream reference. (#4788)
|
||||
* [R] `xgb.get.handle` now checks all class listed of `object` (#4800)
|
||||
* Do not use `gpu_predictor` unless data comes from GPU (#4836)
|
||||
* Fix data loading (#4862)
|
||||
* Workaround `isnan` across different environments. (#4883)
|
||||
* [jvm-packages] Handle Long-type parameter (#4885)
|
||||
* Don't `set_params` at the end of `set_state` (#4947). Ensure that the model does not change after pickling and unpickling multiple times.
|
||||
* C++ exceptions should not crash OpenMP loops (#4960)
|
||||
* Fix `usegpu` flag in DART. (#4984)
|
||||
* Run training with empty `DMatrix` (#4990, #5159)
|
||||
* Ensure that no two processes can use the same GPU (#4990)
|
||||
* Fix repeated split and 0 cover nodes (#5010)
|
||||
* Reset histogram hit counter between multiple data batches (#5035)
|
||||
* Fix `feature_name` crated from int64index dataframe. (#5081)
|
||||
* Don't use 0 for "fresh leaf" (#5084)
|
||||
* Throw error when user attempts to use multi-GPU training and XGBoost has not been compiled with NCCL (#5170)
|
||||
* Fix metric name loading (#5122)
|
||||
* Quick fix for memory leak in CPU `hist` algorithm (#5153)
|
||||
* Fix wrapping GPU ID and prevent data copying (#5160)
|
||||
* Fix signature of Span constructor (#5166)
|
||||
* Lazy initialization of device vector, so that XGBoost compiled with CUDA can run on a machine without any GPU (#5173)
|
||||
* Model loading should not change system locale (#5314)
|
||||
* Distributed training jobs would sometimes hang; revert Rabit to fix this regression (dmlc/rabit#132, #5237)
|
||||
|
||||
### API changes
|
||||
* Add support for cross-validation using query ID (#4474)
|
||||
* Enable feature importance property for DART model (#4525)
|
||||
* Add `rmsle` metric and `reg:squaredlogerror` objective (#4541)
|
||||
* All objective and evaluation metrics are now exposed to JVM packages (#4560)
|
||||
* `dump_model()` and `get_dump()` now support exporting in GraphViz language (#4602)
|
||||
* Support metrics `ndcg-` and `map-` (#4635)
|
||||
* [jvm-packages] Allow chaining prediction (transform) in XGBoost4J-Spark (#4667)
|
||||
* [jvm-packages] Add option to bypass missing value check in the Spark layer (#4805). Only use this option if you know what you are doing.
|
||||
* [jvm-packages] Add public group getter (#4838)
|
||||
* `XGDMatrixSetGroup` C API is now deprecated (#4864). Use `XGDMatrixSetUIntInfo` instead.
|
||||
* [R] Added new `train_folds` parameter to `xgb.cv()` (#5114)
|
||||
* Ingest meta information from Pandas DataFrame, such as data weights (#5216)
|
||||
|
||||
### Maintenance: Refactor code for legibility and maintainability
|
||||
* De-duplicate GPU parameters (#4454)
|
||||
* Simplify INI-style config reader using C++11 STL (#4478, #4521)
|
||||
* Refactor histogram building code for `gpu_hist` (#4528)
|
||||
* Overload device memory allocator, to enable instrumentation for compiling memory usage statistics (#4532)
|
||||
* Refactor out row partitioning logic from `gpu_hist` (#4554)
|
||||
* Remove an unused variable (#4588)
|
||||
* Implement tree model dump with code generator, to de-duplicate code for generating dumps in 3 different formats (#4602)
|
||||
* Remove `RowSet` class which is no longer being used (#4697)
|
||||
* Remove some unused functions as reported by cppcheck (#4743)
|
||||
* Mimic CUDA assert output in Span check (#4762)
|
||||
* [jvm-packages] Refactor `XGBoost.scala` to put all params processing in one place (#4815)
|
||||
* Add some comments for GPU row partitioner (#4832)
|
||||
* Span: use `size_t' for index_type, add `front' and `back'. (#4935)
|
||||
* Remove dead code in `exact` algorithm (#5034, #5105)
|
||||
* Unify integer types used for row and column indices (#5034)
|
||||
* Extract feature interaction constraint from `SplitEvaluator` class. (#5034)
|
||||
* [Breaking] De-duplicate paramters and docstrings in the constructors of Scikit-Learn models (#5130)
|
||||
* Remove benchmark code from GPU tests (#5141)
|
||||
* Clean up Python 2 compatibility code. (#5161)
|
||||
* Extensible binary serialization format for `DMatrix::MetaInfo` (#5187). This will be useful for implementing censored labels for survival analysis applications.
|
||||
* Cleanup clang-tidy warnings. (#5247)
|
||||
|
||||
### Maintenance: testing, continuous integration, build system
|
||||
* Use `yaml.safe_load` instead of `yaml.load`. (#4537)
|
||||
* Ensure GCC is at least 5.x (#4538)
|
||||
* Remove all mention of `reg:linear` from tests (#4544)
|
||||
* [jvm-packages] Upgrade to Scala 2.12 (#4574)
|
||||
* [jvm-packages] Update kryo dependency to 2.22 (#4575)
|
||||
* [CI] Specify account ID when logging into ECR Docker registry (#4584)
|
||||
* Use Sphinx 2.1+ to compile documentation (#4609)
|
||||
* Make Pandas optional for running Python unit tests (#4620)
|
||||
* Fix spark tests on machines with many cores (#4634)
|
||||
* [jvm-packages] Update local dev build process (#4640)
|
||||
* Add optional dependencies to setup.py (#4655)
|
||||
* [jvm-packages] Fix maven warnings (#4664)
|
||||
* Remove extraneous files from the R package, to comply with CRAN policy (#4699)
|
||||
* Remove VC-2013 support, since it is not C++11 compliant (#4701)
|
||||
* [CI] Fix broken installation of Pandas (#4704, #4722)
|
||||
* [jvm-packages] Clean up temporary files afer running tests (#4706)
|
||||
* Specify version macro in CMake. (#4730)
|
||||
* Include dmlc-tracker into XGBoost Python package (#4731)
|
||||
* [CI] Use long key ID for Ubuntu repository fingerprints. (#4783)
|
||||
* Remove plugin, cuda related code in automake & autoconf files (#4789)
|
||||
* Skip related tests when scikit-learn is not installed. (#4791)
|
||||
* Ignore vscode and clion files (#4866)
|
||||
* Use bundled Google Test by default (#4900)
|
||||
* [CI] Raise timeout threshold in Jenkins (#4938)
|
||||
* Copy CMake parameter from dmlc-core. (#4948)
|
||||
* Set correct file permission. (#4964)
|
||||
* [CI] Update lint configuration to support latest pylint convention (#4971)
|
||||
* [CI] Upload nightly builds to S3 (#4976, #4979)
|
||||
* Add asan.so.5 to cmake script. (#4999)
|
||||
* [CI] Fix Travis tests. (#5062)
|
||||
* [CI] Locate vcomp140.dll from System32 directory (#5078)
|
||||
* Implement training observer to dump internal states of objects (#5088). This will be useful for debugging.
|
||||
* Fix visual studio output library directories (#5119)
|
||||
* [jvm-packages] Comply with scala style convention + fix broken unit test (#5134)
|
||||
* [CI] Repair download URL for Maven 3.6.1 (#5139)
|
||||
* Don't use modernize-use-trailing-return-type in clang-tidy. (#5169)
|
||||
* Explicitly use UTF-8 codepage when using MSVC (#5197)
|
||||
* Add CMake option to run Undefined Behavior Sanitizer (UBSan) (#5211)
|
||||
* Make some GPU tests deterministic (#5229)
|
||||
* [R] Robust endian detection in CRAN xgboost build (#5232)
|
||||
* Support FreeBSD (#5233)
|
||||
* Make `pip install xgboost*.tar.gz` work by fixing build-python.sh (#5241)
|
||||
* Fix compilation error due to 64-bit integer narrowing to `size_t` (#5250)
|
||||
* Remove use of `std::cout` from R package, to comply with CRAN policy (#5261)
|
||||
* Update DMLC-Core submodule (#4674, #4688, #4726, #4924)
|
||||
* Update Rabit submodule (#4560, #4667, #4718, #4808, #4966, #5237)
|
||||
|
||||
### Usability Improvements, Documentation
|
||||
* Add Random Forest API to Python API doc (#4500)
|
||||
* Fix Python demo and doc. (#4545)
|
||||
* Remove doc about not supporting cuda 10.1 (#4578)
|
||||
* Address some sphinx warnings and errors, add doc for building doc. (#4589)
|
||||
* Add instruction to run formatting checks locally (#4591)
|
||||
* Fix docstring for `XGBModel.predict()` (#4592)
|
||||
* Doc and demo for customized metric and objective (#4598, #4608)
|
||||
* Add to documentation how to run tests locally (#4610)
|
||||
* Empty evaluation list in early stopping should produce meaningful error message (#4633)
|
||||
* Fixed year to 2019 in conf.py, helpers.h and LICENSE (#4661)
|
||||
* Minor updates to links and grammar (#4673)
|
||||
* Remove `silent` in doc (#4689)
|
||||
* Remove old Python trouble shooting doc (#4729)
|
||||
* Add `os.PathLike` support for file paths to DMatrix and Booster Python classes (#4757)
|
||||
* Update XGBoost4J-Spark doc (#4804)
|
||||
* Regular formatting for evaluation metrics (#4803)
|
||||
* [jvm-packages] Refine documentation for handling missing values in XGBoost4J-Spark (#4805)
|
||||
* Monitor for distributed envorinment (#4829). This is useful for identifying performance bottleneck.
|
||||
* Add check for length of weights and produce a good error message (#4872)
|
||||
* Fix DMatrix doc (#4884)
|
||||
* Export C++ headers in CMake installation (#4897)
|
||||
* Update license year in README.md to 2019 (#4940)
|
||||
* Fix incorrectly displayed Note in the doc (#4943)
|
||||
* Follow PEP 257 Docstring Conventions (#4959)
|
||||
* Document minimum version required for Google Test (#5001)
|
||||
* Add better error message for invalid feature names (#5024)
|
||||
* Some guidelines on device memory usage (#5038)
|
||||
* [doc] Some notes for external memory. (#5065)
|
||||
* Update document for `tree_method` (#5106)
|
||||
* Update demo for ranking. (#5154)
|
||||
* Add new lines for Spark XGBoost missing values section (#5180)
|
||||
* Fix simple typo: utilty -> utility (#5182)
|
||||
* Update R doc by roxygen2 (#5201)
|
||||
* [R] Direct user to use `set.seed()` instead of setting `seed` parameter (#5125)
|
||||
* Add Optuna badge to `README.md` (#5208)
|
||||
* Fix compilation error in `c-api-demo.c` (#5215)
|
||||
|
||||
### Acknowledgement
|
||||
**Contributors**: Nan Zhu (@CodingCat), Crissman Loomis (@Crissman), Cyprien Ricque (@Cyprien-Ricque), Evan Kepner (@EvanKepner), K.O. (@Hi-king), KaiJin Ji (@KerryJi), Peter Badida (@KeyWeeUsr), Kodi Arfer (@Kodiologist), Rory Mitchell (@RAMitchell), Egor Smirnov (@SmirnovEgorRu), Jacob Kim (@TheJacobKim), Vibhu Jawa (@VibhuJawa), Marcos (@astrowonk), Andy Adinets (@canonizer), Chen Qin (@chenqin), Christopher Cowden (@cowden), @cpfarrell, @david-cortes, Liangcai Li (@firestarman), @fuhaoda, Philip Hyunsu Cho (@hcho3), @here-nagini, Tong He (@hetong007), Michal Kurka (@michalkurka), Honza Sterba (@honzasterba), @iblumin, @koertkuipers, mattn (@mattn), Mingjie Tang (@merlintang), OrdoAbChao (@mglowacki100), Matthew Jones (@mt-jones), mitama (@nigimitama), Nathan Moore (@nmoorenz), Daniel Stahl (@phillyfan1138), Michaël Benesty (@pommedeterresautee), Rong Ou (@rongou), Sebastian (@sfahnens), Xu Xiao (@sperlingxx), @sriramch, Sean Owen (@srowen), Stephanie Yang (@stpyang), Yuan Tang (@terrytangyuan), Mathew Wicks (@thesuperzapper), Tim Gates (@timgates42), TinkleG (@tinkle1129), Oleksandr Pryimak (@trams), Jiaming Yuan (@trivialfis), Matvey Turkov (@turk0v), Bobby Wang (@wbo4958), yage (@yage99), @yellowdolphin
|
||||
|
||||
**Reviewers**: Nan Zhu (@CodingCat), Crissman Loomis (@Crissman), Cyprien Ricque (@Cyprien-Ricque), Evan Kepner (@EvanKepner), John Zedlewski (@JohnZed), KOLANICH (@KOLANICH), KaiJin Ji (@KerryJi), Kodi Arfer (@Kodiologist), Rory Mitchell (@RAMitchell), Egor Smirnov (@SmirnovEgorRu), Nikita Titov (@StrikerRUS), Jacob Kim (@TheJacobKim), Vibhu Jawa (@VibhuJawa), Andrew Kane (@ankane), Arno Candel (@arnocandel), Marcos (@astrowonk), Bryan Woods (@bryan-woods), Andy Adinets (@canonizer), Chen Qin (@chenqin), Thomas Franke (@coding-komek), Peter (@codingforfun), @cpfarrell, Joshua Patterson (@datametrician), @fuhaoda, Philip Hyunsu Cho (@hcho3), Tong He (@hetong007), Honza Sterba (@honzasterba), @iblumin, @jakirkham, Vadim Khotilovich (@khotilov), Keith Kraus (@kkraus14), @koertkuipers, @melonki, Mingjie Tang (@merlintang), OrdoAbChao (@mglowacki100), Daniel Mahler (@mhlr), Matthew Rocklin (@mrocklin), Matthew Jones (@mt-jones), Michaël Benesty (@pommedeterresautee), PSEUDOTENSOR / Jonathan McKinney (@pseudotensor), Rong Ou (@rongou), Vladimir (@sh1ng), Scott Lundberg (@slundberg), Xu Xiao (@sperlingxx), @sriramch, Pasha Stetsenko (@st-pasha), Stephanie Yang (@stpyang), Yuan Tang (@terrytangyuan), Mathew Wicks (@thesuperzapper), Theodore Vasiloudis (@thvasilo), TinkleG (@tinkle1129), Oleksandr Pryimak (@trams), Jiaming Yuan (@trivialfis), Bobby Wang (@wbo4958), yage (@yage99), @yellowdolphin, Yin Lou (@yinlou)
|
||||
|
||||
## v0.90 (2019.05.18)
|
||||
|
||||
### XGBoost Python package drops Python 2.x (#4379, #4381)
|
||||
Python 2.x is reaching its end-of-life at the end of this year. [Many scientific Python packages are now moving to drop Python 2.x](https://python3statement.org/).
|
||||
|
||||
### XGBoost4J-Spark now requires Spark 2.4.x (#4377)
|
||||
* Spark 2.3 is reaching its end-of-life soon. See discussion at #4389.
|
||||
* **Consistent handling of missing values** (#4309, #4349, #4411): Many users had reported issue with inconsistent predictions between XGBoost4J-Spark and the Python XGBoost package. The issue was caused by Spark mis-handling non-zero missing values (NaN, -1, 999 etc). We now alert the user whenever Spark doesn't handle missing values correctly (#4309, #4349). See [the tutorial for dealing with missing values in XGBoost4J-Spark](https://xgboost.readthedocs.io/en/release_0.90/jvm/xgboost4j_spark_tutorial.html#dealing-with-missing-values). This fix also depends on the availability of Spark 2.4.x.
|
||||
|
||||
### Roadmap: better performance scaling for multi-core CPUs (#4310)
|
||||
* Poor performance scaling of the `hist` algorithm for multi-core CPUs has been under investigation (#3810). #4310 optimizes quantile sketches and other pre-processing tasks. Special thanks to @SmirnovEgorRu.
|
||||
|
||||
### Roadmap: Harden distributed training (#4250)
|
||||
* Make distributed training in XGBoost more robust by hardening [Rabit](https://github.com/dmlc/rabit), which implements [the AllReduce primitive](https://en.wikipedia.org/wiki/Reduce_%28parallel_pattern%29). In particular, improve test coverage on mechanisms for fault tolerance and recovery. Special thanks to @chenqin.
|
||||
|
||||
### New feature: Multi-class metric functions for GPUs (#4368)
|
||||
* Metrics for multi-class classification have been ported to GPU: `merror`, `mlogloss`. Special thanks to @trivialfis.
|
||||
* With supported metrics, XGBoost will select the correct devices based on your system and `n_gpus` parameter.
|
||||
|
||||
### New feature: Scikit-learn-like random forest API (#4148, #4255, #4258)
|
||||
* XGBoost Python package now offers `XGBRFClassifier` and `XGBRFRegressor` API to train random forests. See [the tutorial](https://xgboost.readthedocs.io/en/release_0.90/tutorials/rf.html). Special thanks to @canonizer
|
||||
|
||||
### New feature: use external memory in GPU predictor (#4284, #4396, #4438, #4457)
|
||||
* It is now possible to make predictions on GPU when the input is read from external memory. This is useful when you want to make predictions with big dataset that does not fit into the GPU memory. Special thanks to @rongou, @canonizer, @sriramch.
|
||||
|
||||
```python
|
||||
dtest = xgboost.DMatrix('test_data.libsvm#dtest.cache')
|
||||
bst.set_param('predictor', 'gpu_predictor')
|
||||
bst.predict(dtest)
|
||||
```
|
||||
|
||||
* Coming soon: GPU training (`gpu_hist`) with external memory
|
||||
|
||||
### New feature: XGBoost can now handle comments in LIBSVM files (#4430)
|
||||
* Special thanks to @trivialfis and @hcho3
|
||||
|
||||
### New feature: Embed XGBoost in your C/C++ applications using CMake (#4323, #4333, #4453)
|
||||
* It is now easier than ever to embed XGBoost in your C/C++ applications. In your CMakeLists.txt, add `xgboost::xgboost` as a linked library:
|
||||
|
||||
```cmake
|
||||
find_package(xgboost REQUIRED)
|
||||
add_executable(api-demo c-api-demo.c)
|
||||
target_link_libraries(api-demo xgboost::xgboost)
|
||||
```
|
||||
|
||||
[XGBoost C API documentation is available.](https://xgboost.readthedocs.io/en/release_0.90/dev) Special thanks to @trivialfis
|
||||
|
||||
### Performance improvements
|
||||
* Use feature interaction constraints to narrow split search space (#4341, #4428)
|
||||
* Additional optimizations for `gpu_hist` (#4248, #4283)
|
||||
* Reduce OpenMP thread launches in `gpu_hist` (#4343)
|
||||
* Additional optimizations for multi-node multi-GPU random forests. (#4238)
|
||||
* Allocate unique prediction buffer for each input matrix, to avoid re-sizing GPU array (#4275)
|
||||
* Remove various synchronisations from CUDA API calls (#4205)
|
||||
* XGBoost4J-Spark
|
||||
- Allow the user to control whether to cache partitioned training data, to potentially reduce execution time (#4268)
|
||||
|
||||
### Bug-fixes
|
||||
* Fix node reuse in `hist` (#4404)
|
||||
* Fix GPU histogram allocation (#4347)
|
||||
* Fix matrix attributes not sliced (#4311)
|
||||
* Revise AUC and AUCPR metrics now work with weighted ranking task (#4216, #4436)
|
||||
* Fix timer invocation for InitDataOnce() in `gpu_hist` (#4206)
|
||||
* Fix R-devel errors (#4251)
|
||||
* Make gradient update in GPU linear updater thread-safe (#4259)
|
||||
* Prevent out-of-range access in column matrix (#4231)
|
||||
* Don't store DMatrix handle in Python object until it's initialized, to improve exception safety (#4317)
|
||||
* XGBoost4J-Spark
|
||||
- Fix non-deterministic order within a zipped partition on prediction (#4388)
|
||||
- Remove race condition on tracker shutdown (#4224)
|
||||
- Allow set the parameter `maxLeaves`. (#4226)
|
||||
- Allow partial evaluation of dataframe before prediction (#4407)
|
||||
- Automatically set `maximize_evaluation_metrics` if not explicitly given (#4446)
|
||||
|
||||
### API changes
|
||||
* Deprecate `reg:linear` in favor of `reg:squarederror`. (#4267, #4427)
|
||||
* Add attribute getter and setter to the Booster object in XGBoost4J (#4336)
|
||||
|
||||
### Maintenance: Refactor C++ code for legibility and maintainability
|
||||
* Fix clang-tidy warnings. (#4149)
|
||||
* Remove deprecated C APIs. (#4266)
|
||||
* Use Monitor class to time functions in `hist`. (#4273)
|
||||
* Retire DVec class in favour of c++20 style span for device memory. (#4293)
|
||||
* Improve HostDeviceVector exception safety (#4301)
|
||||
|
||||
### Maintenance: testing, continuous integration, build system
|
||||
* **Major refactor of CMakeLists.txt** (#4323, #4333, #4453): adopt modern CMake and export XGBoost as a target
|
||||
* **Major improvement in Jenkins CI pipeline** (#4234)
|
||||
- Migrate all Linux tests to Jenkins (#4401)
|
||||
- Builds and tests are now de-coupled, to test an artifact against multiple versions of CUDA, JDK, and other dependencies (#4401)
|
||||
- Add Windows GPU to Jenkins CI pipeline (#4463, #4469)
|
||||
* Support CUDA 10.1 (#4223, #4232, #4265, #4468)
|
||||
* Python wheels are now built with CUDA 9.0, so that JIT is not required on Volta architecture (#4459)
|
||||
* Integrate with NVTX CUDA profiler (#4205)
|
||||
* Add a test for cpu predictor using external memory (#4308)
|
||||
* Refactor tests to get rid of duplication (#4358)
|
||||
* Remove test dependency on `craigcitro/r-travis`, since it's deprecated (#4353)
|
||||
* Add files from local R build to `.gitignore` (#4346)
|
||||
* Make XGBoost4J compatible with Java 9+ by revising NativeLibLoader (#4351)
|
||||
* Jenkins build for CUDA 10.0 (#4281)
|
||||
* Remove remaining `silent` and `debug_verbose` in Python tests (#4299)
|
||||
* Use all cores to build XGBoost4J lib on linux (#4304)
|
||||
* Upgrade Jenkins Linux build environment to GCC 5.3.1, CMake 3.6.0 (#4306)
|
||||
* Make CMakeLists.txt compatible with CMake 3.3 (#4420)
|
||||
* Add OpenMP option in CMakeLists.txt (#4339)
|
||||
* Get rid of a few trivial compiler warnings (#4312)
|
||||
* Add external Docker build cache, to speed up builds on Jenkins CI (#4331, #4334, #4458)
|
||||
* Fix Windows tests (#4403)
|
||||
* Fix a broken python test (#4395)
|
||||
* Use a fixed seed to split data in XGBoost4J-Spark tests, for reproducibility (#4417)
|
||||
* Add additional Python tests to test training under constraints (#4426)
|
||||
* Enable building with shared NCCL. (#4447)
|
||||
|
||||
### Usability Improvements, Documentation
|
||||
* Document limitation of one-split-at-a-time Greedy tree learning heuristic (#4233)
|
||||
* Update build doc: PyPI wheel now support multi-GPU (#4219)
|
||||
* Fix docs for `num_parallel_tree` (#4221)
|
||||
* Fix document about `colsample_by*` parameter (#4340)
|
||||
* Make the train and test input with same colnames. (#4329)
|
||||
* Update R contribute link. (#4236)
|
||||
* Fix travis R tests (#4277)
|
||||
* Log version number in crash log in XGBoost4J-Spark (#4271, #4303)
|
||||
* Allow supression of Rabit output in Booster::train in XGBoost4J (#4262)
|
||||
* Add tutorial on handling missing values in XGBoost4J-Spark (#4425)
|
||||
* Fix typos (#4345, #4393, #4432, #4435)
|
||||
* Added language classifier in setup.py (#4327)
|
||||
* Added Travis CI badge (#4344)
|
||||
* Add BentoML to use case section (#4400)
|
||||
* Remove subtly sexist remark (#4418)
|
||||
* Add R vignette about parsing JSON dumps (#4439)
|
||||
|
||||
### Acknowledgement
|
||||
**Contributors**: Nan Zhu (@CodingCat), Adam Pocock (@Craigacp), Daniel Hen (@Daniel8hen), Jiaxiang Li (@JiaxiangBU), Rory Mitchell (@RAMitchell), Egor Smirnov (@SmirnovEgorRu), Andy Adinets (@canonizer), Jonas (@elcombato), Harry Braviner (@harrybraviner), Philip Hyunsu Cho (@hcho3), Tong He (@hetong007), James Lamb (@jameslamb), Jean-Francois Zinque (@jeffzi), Yang Yang (@jokerkeny), Mayank Suman (@mayanksuman), jess (@monkeywithacupcake), Hajime Morrita (@omo), Ravi Kalia (@project-delphi), @ras44, Rong Ou (@rongou), Shaochen Shi (@shishaochen), Xu Xiao (@sperlingxx), @sriramch, Jiaming Yuan (@trivialfis), Christopher Suchanek (@wsuchy), Bozhao (@yubozhao)
|
||||
|
||||
**Reviewers**: Nan Zhu (@CodingCat), Adam Pocock (@Craigacp), Daniel Hen (@Daniel8hen), Jiaxiang Li (@JiaxiangBU), Laurae (@Laurae2), Rory Mitchell (@RAMitchell), Egor Smirnov (@SmirnovEgorRu), @alois-bissuel, Andy Adinets (@canonizer), Chen Qin (@chenqin), Harry Braviner (@harrybraviner), Philip Hyunsu Cho (@hcho3), Tong He (@hetong007), @jakirkham, James Lamb (@jameslamb), Julien Schueller (@jschueller), Mayank Suman (@mayanksuman), Hajime Morrita (@omo), Rong Ou (@rongou), Sara Robinson (@sararob), Shaochen Shi (@shishaochen), Xu Xiao (@sperlingxx), @sriramch, Sean Owen (@srowen), Sergei Lebedev (@superbobry), Yuan (Terry) Tang (@terrytangyuan), Theodore Vasiloudis (@thvasilo), Matthew Tovbin (@tovbinm), Jiaming Yuan (@trivialfis), Xin Yin (@xydrolase)
|
||||
|
||||
## v0.82 (2019.03.03)
|
||||
This release is packed with many new features and bug fixes.
|
||||
|
||||
|
||||
38
R-package/CMakeLists.txt
Normal file
38
R-package/CMakeLists.txt
Normal file
@@ -0,0 +1,38 @@
|
||||
find_package(LibR REQUIRED)
|
||||
message(STATUS "LIBR_CORE_LIBRARY " ${LIBR_CORE_LIBRARY})
|
||||
|
||||
file(GLOB_RECURSE R_SOURCES
|
||||
${CMAKE_CURRENT_LIST_DIR}/src/*.cc
|
||||
${CMAKE_CURRENT_LIST_DIR}/src/*.c)
|
||||
# Use object library to expose symbols
|
||||
add_library(xgboost-r OBJECT ${R_SOURCES})
|
||||
|
||||
set(R_DEFINITIONS
|
||||
-DXGBOOST_STRICT_R_MODE=1
|
||||
-DXGBOOST_CUSTOMIZE_GLOBAL_PRNG=1
|
||||
-DDMLC_LOG_BEFORE_THROW=0
|
||||
-DDMLC_DISABLE_STDIN=1
|
||||
-DDMLC_LOG_CUSTOMIZE=1
|
||||
-DRABIT_CUSTOMIZE_MSG_
|
||||
-DRABIT_STRICT_CXX98_)
|
||||
target_compile_definitions(xgboost-r
|
||||
PRIVATE ${R_DEFINITIONS})
|
||||
target_include_directories(xgboost-r
|
||||
PRIVATE
|
||||
${LIBR_INCLUDE_DIRS}
|
||||
${PROJECT_SOURCE_DIR}/include
|
||||
${PROJECT_SOURCE_DIR}/dmlc-core/include
|
||||
${PROJECT_SOURCE_DIR}/rabit/include)
|
||||
set_target_properties(
|
||||
xgboost-r PROPERTIES
|
||||
CXX_STANDARD 11
|
||||
CXX_STANDARD_REQUIRED ON
|
||||
POSITION_INDEPENDENT_CODE ON)
|
||||
|
||||
set(XGBOOST_DEFINITIONS "${XGBOOST_DEFINITIONS};${R_DEFINITIONS}" PARENT_SCOPE)
|
||||
set(XGBOOST_OBJ_SOURCES $<TARGET_OBJECTS:xgboost-r> PARENT_SCOPE)
|
||||
set(LINKED_LIBRARIES_PRIVATE ${LINKED_LIBRARIES_PRIVATE} ${LIBR_CORE_LIBRARY} PARENT_SCOPE)
|
||||
|
||||
if (USE_OPENMP)
|
||||
target_link_libraries(xgboost-r PRIVATE OpenMP::OpenMP_CXX)
|
||||
endif ()
|
||||
@@ -1,8 +1,8 @@
|
||||
Package: xgboost
|
||||
Type: Package
|
||||
Title: Extreme Gradient Boosting
|
||||
Version: 0.81.0.1
|
||||
Date: 2018-08-13
|
||||
Version: 1.1.0.1
|
||||
Date: 2020-02-21
|
||||
Authors@R: c(
|
||||
person("Tianqi", "Chen", role = c("aut"),
|
||||
email = "tianqi.tchen@gmail.com"),
|
||||
@@ -52,7 +52,9 @@ Suggests:
|
||||
vcd (>= 1.3),
|
||||
testthat,
|
||||
lintr,
|
||||
igraph (>= 1.0.1)
|
||||
igraph (>= 1.0.1),
|
||||
jsonlite,
|
||||
float
|
||||
Depends:
|
||||
R (>= 3.3.0)
|
||||
Imports:
|
||||
@@ -61,5 +63,5 @@ Imports:
|
||||
data.table (>= 1.9.6),
|
||||
magrittr (>= 1.5),
|
||||
stringi (>= 0.5.2)
|
||||
RoxygenNote: 6.1.0
|
||||
RoxygenNote: 7.1.0
|
||||
SystemRequirements: GNU make, C++11
|
||||
|
||||
@@ -14,6 +14,7 @@ S3method(setinfo,xgb.DMatrix)
|
||||
S3method(slice,xgb.DMatrix)
|
||||
export("xgb.attr<-")
|
||||
export("xgb.attributes<-")
|
||||
export("xgb.config<-")
|
||||
export("xgb.parameters<-")
|
||||
export(cb.cv.predict)
|
||||
export(cb.early.stop)
|
||||
@@ -30,6 +31,7 @@ export(xgb.DMatrix)
|
||||
export(xgb.DMatrix.save)
|
||||
export(xgb.attr)
|
||||
export(xgb.attributes)
|
||||
export(xgb.config)
|
||||
export(xgb.create.features)
|
||||
export(xgb.cv)
|
||||
export(xgb.dump)
|
||||
@@ -38,6 +40,7 @@ export(xgb.ggplot.deepness)
|
||||
export(xgb.ggplot.importance)
|
||||
export(xgb.importance)
|
||||
export(xgb.load)
|
||||
export(xgb.load.raw)
|
||||
export(xgb.model.dt.tree)
|
||||
export(xgb.plot.deepness)
|
||||
export(xgb.plot.importance)
|
||||
@@ -46,7 +49,9 @@ export(xgb.plot.shap)
|
||||
export(xgb.plot.tree)
|
||||
export(xgb.save)
|
||||
export(xgb.save.raw)
|
||||
export(xgb.serialize)
|
||||
export(xgb.train)
|
||||
export(xgb.unserialize)
|
||||
export(xgboost)
|
||||
import(methods)
|
||||
importClassesFrom(Matrix,dgCMatrix)
|
||||
|
||||
@@ -1,26 +1,26 @@
|
||||
#' Callback closures for booster training.
|
||||
#'
|
||||
#' These are used to perform various service tasks either during boosting iterations or at the end.
|
||||
#' This approach helps to modularize many of such tasks without bloating the main training methods,
|
||||
#' This approach helps to modularize many of such tasks without bloating the main training methods,
|
||||
#' and it offers .
|
||||
#'
|
||||
#'
|
||||
#' @details
|
||||
#' By default, a callback function is run after each boosting iteration.
|
||||
#' An R-attribute \code{is_pre_iteration} could be set for a callback to define a pre-iteration function.
|
||||
#'
|
||||
#' When a callback function has \code{finalize} parameter, its finalizer part will also be run after
|
||||
#'
|
||||
#' When a callback function has \code{finalize} parameter, its finalizer part will also be run after
|
||||
#' the boosting is completed.
|
||||
#'
|
||||
#' WARNING: side-effects!!! Be aware that these callback functions access and modify things in
|
||||
#'
|
||||
#' WARNING: side-effects!!! Be aware that these callback functions access and modify things in
|
||||
#' the environment from which they are called from, which is a fairly uncommon thing to do in R.
|
||||
#'
|
||||
#' To write a custom callback closure, make sure you first understand the main concepts about R envoronments.
|
||||
#' Check either R documentation on \code{\link[base]{environment}} or the
|
||||
#' \href{http://adv-r.had.co.nz/Environments.html}{Environments chapter} from the "Advanced R"
|
||||
#'
|
||||
#' To write a custom callback closure, make sure you first understand the main concepts about R environments.
|
||||
#' Check either R documentation on \code{\link[base]{environment}} or the
|
||||
#' \href{http://adv-r.had.co.nz/Environments.html}{Environments chapter} from the "Advanced R"
|
||||
#' book by Hadley Wickham. Further, the best option is to read the code of some of the existing callbacks -
|
||||
#' choose ones that do something similar to what you want to achieve. Also, you would need to get familiar
|
||||
#' choose ones that do something similar to what you want to achieve. Also, you would need to get familiar
|
||||
#' with the objects available inside of the \code{xgb.train} and \code{xgb.cv} internal environments.
|
||||
#'
|
||||
#'
|
||||
#' @seealso
|
||||
#' \code{\link{cb.print.evaluation}},
|
||||
#' \code{\link{cb.evaluation.log}},
|
||||
@@ -30,42 +30,42 @@
|
||||
#' \code{\link{cb.cv.predict}},
|
||||
#' \code{\link{xgb.train}},
|
||||
#' \code{\link{xgb.cv}}
|
||||
#'
|
||||
#'
|
||||
#' @name callbacks
|
||||
NULL
|
||||
|
||||
#
|
||||
# Callbacks -------------------------------------------------------------------
|
||||
#
|
||||
#
|
||||
|
||||
#' Callback closure for printing the result of evaluation
|
||||
#'
|
||||
#'
|
||||
#' @param period results would be printed every number of periods
|
||||
#' @param showsd whether standard deviations should be printed (when available)
|
||||
#'
|
||||
#'
|
||||
#' @details
|
||||
#' The callback function prints the result of evaluation at every \code{period} iterations.
|
||||
#' The initial and the last iteration's evaluations are always printed.
|
||||
#'
|
||||
#'
|
||||
#' Callback function expects the following values to be set in its calling frame:
|
||||
#' \code{bst_evaluation} (also \code{bst_evaluation_err} when available),
|
||||
#' \code{iteration},
|
||||
#' \code{begin_iteration},
|
||||
#' \code{end_iteration}.
|
||||
#'
|
||||
#'
|
||||
#' @seealso
|
||||
#' \code{\link{callbacks}}
|
||||
#'
|
||||
#'
|
||||
#' @export
|
||||
cb.print.evaluation <- function(period = 1, showsd = TRUE) {
|
||||
|
||||
|
||||
callback <- function(env = parent.frame()) {
|
||||
if (length(env$bst_evaluation) == 0 ||
|
||||
period == 0 ||
|
||||
NVL(env$rank, 0) != 0 )
|
||||
return()
|
||||
|
||||
i <- env$iteration
|
||||
|
||||
i <- env$iteration
|
||||
if ((i-1) %% period == 0 ||
|
||||
i == env$begin_iteration ||
|
||||
i == env$end_iteration) {
|
||||
@@ -81,48 +81,48 @@ cb.print.evaluation <- function(period = 1, showsd = TRUE) {
|
||||
|
||||
|
||||
#' Callback closure for logging the evaluation history
|
||||
#'
|
||||
#'
|
||||
#' @details
|
||||
#' This callback function appends the current iteration evaluation results \code{bst_evaluation}
|
||||
#' available in the calling parent frame to the \code{evaluation_log} list in a calling frame.
|
||||
#'
|
||||
#' The finalizer callback (called with \code{finalize = TURE} in the end) converts
|
||||
#'
|
||||
#' The finalizer callback (called with \code{finalize = TURE} in the end) converts
|
||||
#' the \code{evaluation_log} list into a final data.table.
|
||||
#'
|
||||
#' The iteration evaluation result \code{bst_evaluation} must be a named numeric vector.
|
||||
#'
|
||||
#' Note: in the column names of the final data.table, the dash '-' character is replaced with
|
||||
#'
|
||||
#' The iteration evaluation result \code{bst_evaluation} must be a named numeric vector.
|
||||
#'
|
||||
#' Note: in the column names of the final data.table, the dash '-' character is replaced with
|
||||
#' the underscore '_' in order to make the column names more like regular R identifiers.
|
||||
#'
|
||||
#'
|
||||
#' Callback function expects the following values to be set in its calling frame:
|
||||
#' \code{evaluation_log},
|
||||
#' \code{bst_evaluation},
|
||||
#' \code{iteration}.
|
||||
#'
|
||||
#'
|
||||
#' @seealso
|
||||
#' \code{\link{callbacks}}
|
||||
#'
|
||||
#'
|
||||
#' @export
|
||||
cb.evaluation.log <- function() {
|
||||
|
||||
mnames <- NULL
|
||||
|
||||
|
||||
init <- function(env) {
|
||||
if (!is.list(env$evaluation_log))
|
||||
stop("'evaluation_log' has to be a list")
|
||||
mnames <<- names(env$bst_evaluation)
|
||||
if (is.null(mnames) || any(mnames == ""))
|
||||
stop("bst_evaluation must have non-empty names")
|
||||
|
||||
|
||||
mnames <<- gsub('-', '_', names(env$bst_evaluation))
|
||||
if(!is.null(env$bst_evaluation_err))
|
||||
mnames <<- c(paste0(mnames, '_mean'), paste0(mnames, '_std'))
|
||||
}
|
||||
|
||||
|
||||
finalizer <- function(env) {
|
||||
env$evaluation_log <- as.data.table(t(simplify2array(env$evaluation_log)))
|
||||
setnames(env$evaluation_log, c('iter', mnames))
|
||||
|
||||
|
||||
if(!is.null(env$bst_evaluation_err)) {
|
||||
# rearrange col order from _mean,_mean,...,_std,_std,...
|
||||
# to be _mean,_std,_mean,_std,...
|
||||
@@ -135,18 +135,18 @@ cb.evaluation.log <- function() {
|
||||
env$evaluation_log <- env$evaluation_log[, c('iter', cnames), with = FALSE]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
callback <- function(env = parent.frame(), finalize = FALSE) {
|
||||
if (is.null(mnames))
|
||||
init(env)
|
||||
|
||||
if (finalize)
|
||||
return(finalizer(env))
|
||||
|
||||
|
||||
ev <- env$bst_evaluation
|
||||
if(!is.null(env$bst_evaluation_err))
|
||||
ev <- c(ev, env$bst_evaluation_err)
|
||||
env$evaluation_log <- c(env$evaluation_log,
|
||||
env$evaluation_log <- c(env$evaluation_log,
|
||||
list(c(iter = env$iteration, ev)))
|
||||
}
|
||||
attr(callback, 'call') <- match.call()
|
||||
@@ -154,21 +154,21 @@ cb.evaluation.log <- function() {
|
||||
callback
|
||||
}
|
||||
|
||||
#' Callback closure for restetting the booster's parameters at each iteration.
|
||||
#'
|
||||
#' Callback closure for resetting the booster's parameters at each iteration.
|
||||
#'
|
||||
#' @param new_params a list where each element corresponds to a parameter that needs to be reset.
|
||||
#' Each element's value must be either a vector of values of length \code{nrounds}
|
||||
#' to be set at each iteration,
|
||||
#' or a function of two parameters \code{learning_rates(iteration, nrounds)}
|
||||
#' which returns a new parameter value by using the current iteration number
|
||||
#' Each element's value must be either a vector of values of length \code{nrounds}
|
||||
#' to be set at each iteration,
|
||||
#' or a function of two parameters \code{learning_rates(iteration, nrounds)}
|
||||
#' which returns a new parameter value by using the current iteration number
|
||||
#' and the total number of boosting rounds.
|
||||
#'
|
||||
#' @details
|
||||
#'
|
||||
#' @details
|
||||
#' This is a "pre-iteration" callback function used to reset booster's parameters
|
||||
#' at the beginning of each iteration.
|
||||
#'
|
||||
#' Note that when training is resumed from some previous model, and a function is used to
|
||||
#' reset a parameter value, the \code{nrounds} argument in this function would be the
|
||||
#'
|
||||
#' Note that when training is resumed from some previous model, and a function is used to
|
||||
#' reset a parameter value, the \code{nrounds} argument in this function would be the
|
||||
#' the number of boosting rounds in the current training.
|
||||
#'
|
||||
#' Callback function expects the following values to be set in its calling frame:
|
||||
@@ -176,32 +176,32 @@ cb.evaluation.log <- function() {
|
||||
#' \code{iteration},
|
||||
#' \code{begin_iteration},
|
||||
#' \code{end_iteration}.
|
||||
#'
|
||||
#'
|
||||
#' @seealso
|
||||
#' \code{\link{callbacks}}
|
||||
#'
|
||||
#'
|
||||
#' @export
|
||||
cb.reset.parameters <- function(new_params) {
|
||||
|
||||
if (typeof(new_params) != "list")
|
||||
if (typeof(new_params) != "list")
|
||||
stop("'new_params' must be a list")
|
||||
pnames <- gsub("\\.", "_", names(new_params))
|
||||
nrounds <- NULL
|
||||
|
||||
|
||||
# run some checks in the begining
|
||||
init <- function(env) {
|
||||
nrounds <<- env$end_iteration - env$begin_iteration + 1
|
||||
|
||||
|
||||
if (is.null(env$bst) && is.null(env$bst_folds))
|
||||
stop("Parent frame has neither 'bst' nor 'bst_folds'")
|
||||
|
||||
|
||||
# Some parameters are not allowed to be changed,
|
||||
# since changing them would simply wreck some chaos
|
||||
not_allowed <- pnames %in%
|
||||
not_allowed <- pnames %in%
|
||||
c('num_class', 'num_output_group', 'size_leaf_vector', 'updater_seq')
|
||||
if (any(not_allowed))
|
||||
stop('Parameters ', paste(pnames[not_allowed]), " cannot be changed during boosting.")
|
||||
|
||||
|
||||
for (n in pnames) {
|
||||
p <- new_params[[n]]
|
||||
if (is.function(p)) {
|
||||
@@ -215,18 +215,18 @@ cb.reset.parameters <- function(new_params) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
callback <- function(env = parent.frame()) {
|
||||
if (is.null(nrounds))
|
||||
init(env)
|
||||
|
||||
|
||||
i <- env$iteration
|
||||
pars <- lapply(new_params, function(p) {
|
||||
if (is.function(p))
|
||||
return(p(i, nrounds))
|
||||
p[i]
|
||||
})
|
||||
|
||||
|
||||
if (!is.null(env$bst)) {
|
||||
xgb.parameters(env$bst$handle) <- pars
|
||||
} else {
|
||||
@@ -242,23 +242,23 @@ cb.reset.parameters <- function(new_params) {
|
||||
|
||||
|
||||
#' Callback closure to activate the early stopping.
|
||||
#'
|
||||
#' @param stopping_rounds The number of rounds with no improvement in
|
||||
#'
|
||||
#' @param stopping_rounds The number of rounds with no improvement in
|
||||
#' the evaluation metric in order to stop the training.
|
||||
#' @param maximize whether to maximize the evaluation metric
|
||||
#' @param metric_name the name of an evaluation column to use as a criteria for early
|
||||
#' stopping. If not set, the last column would be used.
|
||||
#' Let's say the test data in \code{watchlist} was labelled as \code{dtest},
|
||||
#' and one wants to use the AUC in test data for early stopping regardless of where
|
||||
#' Let's say the test data in \code{watchlist} was labelled as \code{dtest},
|
||||
#' and one wants to use the AUC in test data for early stopping regardless of where
|
||||
#' it is in the \code{watchlist}, then one of the following would need to be set:
|
||||
#' \code{metric_name='dtest-auc'} or \code{metric_name='dtest_auc'}.
|
||||
#' All dash '-' characters in metric names are considered equivalent to '_'.
|
||||
#' @param verbose whether to print the early stopping information.
|
||||
#'
|
||||
#'
|
||||
#' @details
|
||||
#' This callback function determines the condition for early stopping
|
||||
#' This callback function determines the condition for early stopping
|
||||
#' by setting the \code{stop_condition = TRUE} flag in its calling frame.
|
||||
#'
|
||||
#'
|
||||
#' The following additional fields are assigned to the model's R object:
|
||||
#' \itemize{
|
||||
#' \item \code{best_score} the evaluation score at the best iteration
|
||||
@@ -266,13 +266,13 @@ cb.reset.parameters <- function(new_params) {
|
||||
#' \item \code{best_ntreelimit} to use with the \code{ntreelimit} parameter in \code{predict}.
|
||||
#' It differs from \code{best_iteration} in multiclass or random forest settings.
|
||||
#' }
|
||||
#'
|
||||
#'
|
||||
#' The Same values are also stored as xgb-attributes:
|
||||
#' \itemize{
|
||||
#' \item \code{best_iteration} is stored as a 0-based iteration index (for interoperability of binary models)
|
||||
#' \item \code{best_msg} message string is also stored.
|
||||
#' }
|
||||
#'
|
||||
#'
|
||||
#' At least one data element is required in the evaluation watchlist for early stopping to work.
|
||||
#'
|
||||
#' Callback function expects the following values to be set in its calling frame:
|
||||
@@ -284,13 +284,13 @@ cb.reset.parameters <- function(new_params) {
|
||||
#' \code{begin_iteration},
|
||||
#' \code{end_iteration},
|
||||
#' \code{num_parallel_tree}.
|
||||
#'
|
||||
#'
|
||||
#' @seealso
|
||||
#' \code{\link{callbacks}},
|
||||
#' \code{\link{xgb.attr}}
|
||||
#'
|
||||
#'
|
||||
#' @export
|
||||
cb.early.stop <- function(stopping_rounds, maximize = FALSE,
|
||||
cb.early.stop <- function(stopping_rounds, maximize = FALSE,
|
||||
metric_name = NULL, verbose = TRUE) {
|
||||
# state variables
|
||||
best_iteration <- -1
|
||||
@@ -298,11 +298,11 @@ cb.early.stop <- function(stopping_rounds, maximize = FALSE,
|
||||
best_score <- Inf
|
||||
best_msg <- NULL
|
||||
metric_idx <- 1
|
||||
|
||||
|
||||
init <- function(env) {
|
||||
if (length(env$bst_evaluation) == 0)
|
||||
stop("For early stopping, watchlist must have at least one element")
|
||||
|
||||
|
||||
eval_names <- gsub('-', '_', names(env$bst_evaluation))
|
||||
if (!is.null(metric_name)) {
|
||||
metric_idx <<- which(gsub('-', '_', metric_name) == eval_names)
|
||||
@@ -314,25 +314,25 @@ cb.early.stop <- function(stopping_rounds, maximize = FALSE,
|
||||
length(env$bst_evaluation) > 1) {
|
||||
metric_idx <<- length(eval_names)
|
||||
if (verbose)
|
||||
cat('Multiple eval metrics are present. Will use ',
|
||||
cat('Multiple eval metrics are present. Will use ',
|
||||
eval_names[metric_idx], ' for early stopping.\n', sep = '')
|
||||
}
|
||||
|
||||
|
||||
metric_name <<- eval_names[metric_idx]
|
||||
|
||||
|
||||
# maximize is usually NULL when not set in xgb.train and built-in metrics
|
||||
if (is.null(maximize))
|
||||
maximize <<- grepl('(_auc|_map|_ndcg)', metric_name)
|
||||
|
||||
if (verbose && NVL(env$rank, 0) == 0)
|
||||
cat("Will train until ", metric_name, " hasn't improved in ",
|
||||
cat("Will train until ", metric_name, " hasn't improved in ",
|
||||
stopping_rounds, " rounds.\n\n", sep = '')
|
||||
|
||||
best_iteration <<- 1
|
||||
if (maximize) best_score <<- -Inf
|
||||
|
||||
|
||||
env$stop_condition <- FALSE
|
||||
|
||||
|
||||
if (!is.null(env$bst)) {
|
||||
if (!inherits(env$bst, 'xgb.Booster'))
|
||||
stop("'bst' in the parent frame must be an 'xgb.Booster'")
|
||||
@@ -348,7 +348,7 @@ cb.early.stop <- function(stopping_rounds, maximize = FALSE,
|
||||
stop("Parent frame has neither 'bst' nor ('bst_folds' and 'basket')")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
finalizer <- function(env) {
|
||||
if (!is.null(env$bst)) {
|
||||
attr_best_score = as.numeric(xgb.attr(env$bst$handle, 'best_score'))
|
||||
@@ -367,16 +367,16 @@ cb.early.stop <- function(stopping_rounds, maximize = FALSE,
|
||||
callback <- function(env = parent.frame(), finalize = FALSE) {
|
||||
if (best_iteration < 0)
|
||||
init(env)
|
||||
|
||||
|
||||
if (finalize)
|
||||
return(finalizer(env))
|
||||
|
||||
|
||||
i <- env$iteration
|
||||
score = env$bst_evaluation[metric_idx]
|
||||
|
||||
|
||||
if (( maximize && score > best_score) ||
|
||||
(!maximize && score < best_score)) {
|
||||
|
||||
|
||||
best_msg <<- format.eval.string(i, env$bst_evaluation, env$bst_evaluation_err)
|
||||
best_score <<- score
|
||||
best_iteration <<- i
|
||||
@@ -403,37 +403,37 @@ cb.early.stop <- function(stopping_rounds, maximize = FALSE,
|
||||
|
||||
|
||||
#' Callback closure for saving a model file.
|
||||
#'
|
||||
#' @param save_period save the model to disk after every
|
||||
#'
|
||||
#' @param save_period save the model to disk after every
|
||||
#' \code{save_period} iterations; 0 means save the model at the end.
|
||||
#' @param save_name the name or path for the saved model file.
|
||||
#' It can contain a \code{\link[base]{sprintf}} formatting specifier
|
||||
#' It can contain a \code{\link[base]{sprintf}} formatting specifier
|
||||
#' to include the integer iteration number in the file name.
|
||||
#' E.g., with \code{save_name} = 'xgboost_%04d.model',
|
||||
#' E.g., with \code{save_name} = 'xgboost_%04d.model',
|
||||
#' the file saved at iteration 50 would be named "xgboost_0050.model".
|
||||
#'
|
||||
#' @details
|
||||
#'
|
||||
#' @details
|
||||
#' This callback function allows to save an xgb-model file, either periodically after each \code{save_period}'s or at the end.
|
||||
#'
|
||||
#'
|
||||
#' Callback function expects the following values to be set in its calling frame:
|
||||
#' \code{bst},
|
||||
#' \code{iteration},
|
||||
#' \code{begin_iteration},
|
||||
#' \code{end_iteration}.
|
||||
#'
|
||||
#'
|
||||
#' @seealso
|
||||
#' \code{\link{callbacks}}
|
||||
#'
|
||||
#'
|
||||
#' @export
|
||||
cb.save.model <- function(save_period = 0, save_name = "xgboost.model") {
|
||||
|
||||
|
||||
if (save_period < 0)
|
||||
stop("'save_period' cannot be negative")
|
||||
|
||||
callback <- function(env = parent.frame()) {
|
||||
if (is.null(env$bst))
|
||||
stop("'save_model' callback requires the 'bst' booster object in its calling frame")
|
||||
|
||||
|
||||
if ((save_period > 0 && (env$iteration - env$begin_iteration) %% save_period == 0) ||
|
||||
(save_period == 0 && env$iteration == env$end_iteration))
|
||||
xgb.save(env$bst, sprintf(save_name, env$iteration))
|
||||
@@ -445,16 +445,16 @@ cb.save.model <- function(save_period = 0, save_name = "xgboost.model") {
|
||||
|
||||
|
||||
#' Callback closure for returning cross-validation based predictions.
|
||||
#'
|
||||
#'
|
||||
#' @param save_models a flag for whether to save the folds' models.
|
||||
#'
|
||||
#' @details
|
||||
#'
|
||||
#' @details
|
||||
#' This callback function saves predictions for all of the test folds,
|
||||
#' and also allows to save the folds' models.
|
||||
#'
|
||||
#'
|
||||
#' It is a "finalizer" callback and it uses early stopping information whenever it is available,
|
||||
#' thus it must be run after the early stopping callback if the early stopping is used.
|
||||
#'
|
||||
#'
|
||||
#' Callback function expects the following values to be set in its calling frame:
|
||||
#' \code{bst_folds},
|
||||
#' \code{basket},
|
||||
@@ -463,36 +463,36 @@ cb.save.model <- function(save_period = 0, save_name = "xgboost.model") {
|
||||
#' \code{params},
|
||||
#' \code{num_parallel_tree},
|
||||
#' \code{num_class}.
|
||||
#'
|
||||
#' @return
|
||||
#'
|
||||
#' @return
|
||||
#' Predictions are returned inside of the \code{pred} element, which is either a vector or a matrix,
|
||||
#' depending on the number of prediction outputs per data row. The order of predictions corresponds
|
||||
#' to the order of rows in the original dataset. Note that when a custom \code{folds} list is
|
||||
#' provided in \code{xgb.cv}, the predictions would only be returned properly when this list is a
|
||||
#' non-overlapping list of k sets of indices, as in a standard k-fold CV. The predictions would not be
|
||||
#' meaningful when user-profided folds have overlapping indices as in, e.g., random sampling splits.
|
||||
#' depending on the number of prediction outputs per data row. The order of predictions corresponds
|
||||
#' to the order of rows in the original dataset. Note that when a custom \code{folds} list is
|
||||
#' provided in \code{xgb.cv}, the predictions would only be returned properly when this list is a
|
||||
#' non-overlapping list of k sets of indices, as in a standard k-fold CV. The predictions would not be
|
||||
#' meaningful when user-provided folds have overlapping indices as in, e.g., random sampling splits.
|
||||
#' When some of the indices in the training dataset are not included into user-provided \code{folds},
|
||||
#' their prediction value would be \code{NA}.
|
||||
#'
|
||||
#'
|
||||
#' @seealso
|
||||
#' \code{\link{callbacks}}
|
||||
#'
|
||||
#'
|
||||
#' @export
|
||||
cb.cv.predict <- function(save_models = FALSE) {
|
||||
|
||||
|
||||
finalizer <- function(env) {
|
||||
if (is.null(env$basket) || is.null(env$bst_folds))
|
||||
stop("'cb.cv.predict' callback requires 'basket' and 'bst_folds' lists in its calling frame")
|
||||
|
||||
|
||||
N <- nrow(env$data)
|
||||
pred <-
|
||||
pred <-
|
||||
if (env$num_class > 1) {
|
||||
matrix(NA_real_, N, env$num_class)
|
||||
} else {
|
||||
rep(NA_real_, N)
|
||||
}
|
||||
|
||||
ntreelimit <- NVL(env$basket$best_ntreelimit,
|
||||
ntreelimit <- NVL(env$basket$best_ntreelimit,
|
||||
env$end_iteration * env$num_parallel_tree)
|
||||
if (NVL(env$params[['booster']], '') == 'gblinear') {
|
||||
ntreelimit <- 0 # must be 0 for gblinear
|
||||
@@ -569,7 +569,7 @@ cb.cv.predict <- function(save_models = FALSE) {
|
||||
#' # Extract the coefficients' path and plot them vs boosting iteration number:
|
||||
#' coef_path <- xgb.gblinear.history(bst)
|
||||
#' matplot(coef_path, type = 'l')
|
||||
#'
|
||||
#'
|
||||
#' # With the deterministic coordinate descent updater, it is safer to use higher learning rates.
|
||||
#' # Will try the classical componentwise boosting which selects a single best feature per round:
|
||||
#' bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 200, eta = 0.8,
|
||||
@@ -586,7 +586,7 @@ cb.cv.predict <- function(save_models = FALSE) {
|
||||
#' # coefficients in the CV fold #3
|
||||
#' xgb.gblinear.history(bst)[[3]] %>% matplot(type = 'l')
|
||||
#'
|
||||
#'
|
||||
#'
|
||||
#' #### Multiclass classification:
|
||||
#' #
|
||||
#' dtrain <- xgb.DMatrix(scale(x), label = as.numeric(iris$Species) - 1)
|
||||
@@ -681,9 +681,9 @@ cb.gblinear.history <- function(sparse=FALSE) {
|
||||
#' using the \code{cb.gblinear.history()} callback.
|
||||
#' @param class_index zero-based class index to extract the coefficients for only that
|
||||
#' specific class in a multinomial multiclass model. When it is NULL, all the
|
||||
#' coeffients are returned. Has no effect in non-multiclass models.
|
||||
#' coefficients are returned. Has no effect in non-multiclass models.
|
||||
#'
|
||||
#' @return
|
||||
#' @return
|
||||
#' For an \code{xgb.train} result, a matrix (either dense or sparse) with the columns
|
||||
#' corresponding to iteration's coefficients (in the order as \code{xgb.dump()} would
|
||||
#' return) and the rows corresponding to boosting iterations.
|
||||
@@ -731,7 +731,7 @@ xgb.gblinear.history <- function(model, class_index = NULL) {
|
||||
coef_path <- environment(model$callbacks$cb.gblinear.history)[["coefs"]]
|
||||
if (!is.null(class_index) && num_class > 1) {
|
||||
coef_path <- if (is.list(coef_path)) {
|
||||
lapply(coef_path,
|
||||
lapply(coef_path,
|
||||
function(x) x[, seq(1 + class_index, by=num_class, length.out=num_feat)])
|
||||
} else {
|
||||
coef_path <- coef_path[, seq(1 + class_index, by=num_class, length.out=num_feat)]
|
||||
@@ -743,7 +743,7 @@ xgb.gblinear.history <- function(model, class_index = NULL) {
|
||||
|
||||
#
|
||||
# Internal utility functions for callbacks ------------------------------------
|
||||
#
|
||||
#
|
||||
|
||||
# Format the evaluation metric string
|
||||
format.eval.string <- function(iter, eval_res, eval_err = NULL) {
|
||||
@@ -773,7 +773,7 @@ callback.calls <- function(cb_list) {
|
||||
unlist(lapply(cb_list, function(x) attr(x, 'call')))
|
||||
}
|
||||
|
||||
# Add a callback cb to the list and make sure that
|
||||
# Add a callback cb to the list and make sure that
|
||||
# cb.early.stop and cb.cv.predict are at the end of the list
|
||||
# with cb.cv.predict being the last (when present)
|
||||
add.cb <- function(cb_list, cb) {
|
||||
@@ -782,11 +782,11 @@ add.cb <- function(cb_list, cb) {
|
||||
if ('cb.early.stop' %in% names(cb_list)) {
|
||||
cb_list <- c(cb_list, cb_list['cb.early.stop'])
|
||||
# this removes only the first one
|
||||
cb_list['cb.early.stop'] <- NULL
|
||||
cb_list['cb.early.stop'] <- NULL
|
||||
}
|
||||
if ('cb.cv.predict' %in% names(cb_list)) {
|
||||
cb_list <- c(cb_list, cb_list['cb.cv.predict'])
|
||||
cb_list['cb.cv.predict'] <- NULL
|
||||
cb_list['cb.cv.predict'] <- NULL
|
||||
}
|
||||
cb_list
|
||||
}
|
||||
@@ -796,7 +796,7 @@ categorize.callbacks <- function(cb_list) {
|
||||
list(
|
||||
pre_iter = Filter(function(x) {
|
||||
pre <- attr(x, 'is_pre_iteration')
|
||||
!is.null(pre) && pre
|
||||
!is.null(pre) && pre
|
||||
}, cb_list),
|
||||
post_iter = Filter(function(x) {
|
||||
pre <- attr(x, 'is_pre_iteration')
|
||||
|
||||
@@ -28,12 +28,12 @@ NVL <- function(x, val) {
|
||||
# Merges booster params with whatever is provided in ...
|
||||
# plus runs some checks
|
||||
check.booster.params <- function(params, ...) {
|
||||
if (typeof(params) != "list")
|
||||
if (!identical(class(params), "list"))
|
||||
stop("params must be a list")
|
||||
|
||||
|
||||
# in R interface, allow for '.' instead of '_' in parameter names
|
||||
names(params) <- gsub("\\.", "_", names(params))
|
||||
|
||||
|
||||
# merge parameters from the params and the dots-expansion
|
||||
dot_params <- list(...)
|
||||
names(dot_params) <- gsub("\\.", "_", names(dot_params))
|
||||
@@ -41,15 +41,15 @@ check.booster.params <- function(params, ...) {
|
||||
names(dot_params))) > 0)
|
||||
stop("Same parameters in 'params' and in the call are not allowed. Please check your 'params' list.")
|
||||
params <- c(params, dot_params)
|
||||
|
||||
|
||||
# providing a parameter multiple times makes sense only for 'eval_metric'
|
||||
name_freqs <- table(names(params))
|
||||
multi_names <- setdiff(names(name_freqs[name_freqs > 1]), 'eval_metric')
|
||||
if (length(multi_names) > 0) {
|
||||
warning("The following parameters were provided multiple times:\n\t",
|
||||
paste(multi_names, collapse = ', '), "\n Only the last value for each of them will be used.\n")
|
||||
# While xgboost internals would choose the last value for a multiple-times parameter,
|
||||
# enforce it here in R as well (b/c multi-parameters might be used further in R code,
|
||||
# While xgboost internals would choose the last value for a multiple-times parameter,
|
||||
# enforce it here in R as well (b/c multi-parameters might be used further in R code,
|
||||
# and R takes the 1st value when multiple elements with the same name are present in a list).
|
||||
for (n in multi_names) {
|
||||
del_idx <- which(n == names(params))
|
||||
@@ -57,28 +57,28 @@ check.booster.params <- function(params, ...) {
|
||||
params[[del_idx]] <- NULL
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# for multiclass, expect num_class to be set
|
||||
if (typeof(params[['objective']]) == "character" &&
|
||||
substr(NVL(params[['objective']], 'x'), 1, 6) == 'multi:' &&
|
||||
as.numeric(NVL(params[['num_class']], 0)) < 2) {
|
||||
stop("'num_class' > 1 parameter must be set for multiclass classification")
|
||||
}
|
||||
|
||||
|
||||
# monotone_constraints parser
|
||||
|
||||
|
||||
if (!is.null(params[['monotone_constraints']]) &&
|
||||
typeof(params[['monotone_constraints']]) != "character") {
|
||||
vec2str = paste(params[['monotone_constraints']], collapse = ',')
|
||||
vec2str = paste0('(', vec2str, ')')
|
||||
params[['monotone_constraints']] = vec2str
|
||||
}
|
||||
|
||||
|
||||
# interaction constraints parser (convert from list of column indices to string)
|
||||
if (!is.null(params[['interaction_constraints']]) &&
|
||||
if (!is.null(params[['interaction_constraints']]) &&
|
||||
typeof(params[['interaction_constraints']]) != "character"){
|
||||
# check input class
|
||||
if (class(params[['interaction_constraints']]) != 'list') stop('interaction_constraints should be class list')
|
||||
if (!identical(class(params[['interaction_constraints']]),'list')) stop('interaction_constraints should be class list')
|
||||
if (!all(unique(sapply(params[['interaction_constraints']], class)) %in% c('numeric','integer'))) {
|
||||
stop('interaction_constraints should be a list of numeric/integer vectors')
|
||||
}
|
||||
@@ -96,10 +96,10 @@ check.booster.params <- function(params, ...) {
|
||||
check.custom.obj <- function(env = parent.frame()) {
|
||||
if (!is.null(env$params[['objective']]) && !is.null(env$obj))
|
||||
stop("Setting objectives in 'params' and 'obj' at the same time is not allowed")
|
||||
|
||||
|
||||
if (!is.null(env$obj) && typeof(env$obj) != 'closure')
|
||||
stop("'obj' must be a function")
|
||||
|
||||
|
||||
# handle the case when custom objective function was provided through params
|
||||
if (!is.null(env$params[['objective']]) &&
|
||||
typeof(env$params$objective) == 'closure') {
|
||||
@@ -113,21 +113,21 @@ check.custom.obj <- function(env = parent.frame()) {
|
||||
check.custom.eval <- function(env = parent.frame()) {
|
||||
if (!is.null(env$params[['eval_metric']]) && !is.null(env$feval))
|
||||
stop("Setting evaluation metrics in 'params' and 'feval' at the same time is not allowed")
|
||||
|
||||
|
||||
if (!is.null(env$feval) && typeof(env$feval) != 'closure')
|
||||
stop("'feval' must be a function")
|
||||
|
||||
|
||||
# handle a situation when custom eval function was provided through params
|
||||
if (!is.null(env$params[['eval_metric']]) &&
|
||||
typeof(env$params$eval_metric) == 'closure') {
|
||||
env$feval <- env$params$eval_metric
|
||||
env$params$eval_metric <- NULL
|
||||
}
|
||||
|
||||
|
||||
# require maximize to be set when custom feval and early stopping are used together
|
||||
if (!is.null(env$feval) &&
|
||||
is.null(env$maximize) && (
|
||||
!is.null(env$early_stopping_rounds) ||
|
||||
!is.null(env$early_stopping_rounds) ||
|
||||
has.callbacks(env$callbacks, 'cb.early.stop')))
|
||||
stop("Please set 'maximize' to indicate whether the evaluation metric needs to be maximized or not")
|
||||
}
|
||||
@@ -145,7 +145,7 @@ xgb.iter.update <- function(booster_handle, dtrain, iter, obj = NULL) {
|
||||
if (is.null(obj)) {
|
||||
.Call(XGBoosterUpdateOneIter_R, booster_handle, as.integer(iter), dtrain)
|
||||
} else {
|
||||
pred <- predict(booster_handle, dtrain)
|
||||
pred <- predict(booster_handle, dtrain, outputmargin = TRUE, training = TRUE)
|
||||
gpair <- obj(pred, dtrain)
|
||||
.Call(XGBoosterBoostOneIter_R, booster_handle, dtrain, gpair$grad, gpair$hess)
|
||||
}
|
||||
@@ -154,15 +154,15 @@ xgb.iter.update <- function(booster_handle, dtrain, iter, obj = NULL) {
|
||||
|
||||
|
||||
# Evaluate one iteration.
|
||||
# Returns a named vector of evaluation metrics
|
||||
# Returns a named vector of evaluation metrics
|
||||
# with the names in a 'datasetname-metricname' format.
|
||||
xgb.iter.eval <- function(booster_handle, watchlist, iter, feval = NULL) {
|
||||
if (!identical(class(booster_handle), "xgb.Booster.handle"))
|
||||
stop("class of booster_handle must be xgb.Booster.handle")
|
||||
|
||||
if (length(watchlist) == 0)
|
||||
if (length(watchlist) == 0)
|
||||
return(NULL)
|
||||
|
||||
|
||||
evnames <- names(watchlist)
|
||||
if (is.null(feval)) {
|
||||
msg <- .Call(XGBoosterEvalOneIter_R, booster_handle, as.integer(iter), watchlist, as.list(evnames))
|
||||
@@ -189,7 +189,7 @@ xgb.iter.eval <- function(booster_handle, watchlist, iter, feval = NULL) {
|
||||
|
||||
# Generates random (stratified if needed) CV folds
|
||||
generate.cv.folds <- function(nfold, nrows, stratified, label, params) {
|
||||
|
||||
|
||||
# cannot do it for rank
|
||||
if (exists('objective', where = params) &&
|
||||
is.character(params$objective) &&
|
||||
@@ -209,13 +209,14 @@ generate.cv.folds <- function(nfold, nrows, stratified, label, params) {
|
||||
if (exists('objective', where = params) &&
|
||||
is.character(params$objective)) {
|
||||
# If 'objective' provided in params, assume that y is a classification label
|
||||
# unless objective is reg:linear
|
||||
if (params$objective != 'reg:linear')
|
||||
# unless objective is reg:squarederror
|
||||
if (params$objective != 'reg:squarederror')
|
||||
y <- factor(y)
|
||||
} else {
|
||||
# If no 'objective' given in params, it means that user either wants to use
|
||||
# the default 'reg:linear' objective or has provided a custom obj function.
|
||||
# Here, assume classification setting when y has 5 or less unique values:
|
||||
# If no 'objective' given in params, it means that user either wants to
|
||||
# use the default 'reg:squarederror' objective or has provided a custom
|
||||
# obj function. Here, assume classification setting when y has 5 or less
|
||||
# unique values:
|
||||
if (length(unique(y)) <= 5)
|
||||
y <- factor(y)
|
||||
}
|
||||
@@ -293,22 +294,22 @@ xgb.createFolds <- function(y, k = 10)
|
||||
#
|
||||
|
||||
#' Deprecation notices.
|
||||
#'
|
||||
#'
|
||||
#' At this time, some of the parameter names were changed in order to make the code style more uniform.
|
||||
#' The deprecated parameters would be removed in the next release.
|
||||
#'
|
||||
#'
|
||||
#' To see all the current deprecated and new parameters, check the \code{xgboost:::depr_par_lut} table.
|
||||
#'
|
||||
#' A deprecation warning is shown when any of the deprecated parameters is used in a call.
|
||||
#' An additional warning is shown when there was a partial match to a deprecated parameter
|
||||
#'
|
||||
#' A deprecation warning is shown when any of the deprecated parameters is used in a call.
|
||||
#' An additional warning is shown when there was a partial match to a deprecated parameter
|
||||
#' (as R is able to partially match parameter names).
|
||||
#'
|
||||
#'
|
||||
#' @name xgboost-deprecated
|
||||
NULL
|
||||
|
||||
# Lookup table for the deprecated parameters bookkeeping
|
||||
depr_par_lut <- matrix(c(
|
||||
'print.every.n', 'print_every_n',
|
||||
'print.every.n', 'print_every_n',
|
||||
'early.stop.round', 'early_stopping_rounds',
|
||||
'training.data', 'data',
|
||||
'with.stats', 'with_stats',
|
||||
|
||||
@@ -5,20 +5,34 @@ xgb.Booster.handle <- function(params = list(), cachelist = list(), modelfile =
|
||||
!all(vapply(cachelist, inherits, logical(1), what = 'xgb.DMatrix'))) {
|
||||
stop("cachelist must be a list of xgb.DMatrix objects")
|
||||
}
|
||||
|
||||
handle <- .Call(XGBoosterCreate_R, cachelist)
|
||||
## Load existing model, dispatch for on disk model file and in memory buffer
|
||||
if (!is.null(modelfile)) {
|
||||
if (typeof(modelfile) == "character") {
|
||||
## A filename
|
||||
handle <- .Call(XGBoosterCreate_R, cachelist)
|
||||
.Call(XGBoosterLoadModel_R, handle, modelfile[1])
|
||||
class(handle) <- "xgb.Booster.handle"
|
||||
if (length(params) > 0) {
|
||||
xgb.parameters(handle) <- params
|
||||
}
|
||||
return(handle)
|
||||
} else if (typeof(modelfile) == "raw") {
|
||||
.Call(XGBoosterLoadModelFromRaw_R, handle, modelfile)
|
||||
## A memory buffer
|
||||
bst <- xgb.unserialize(modelfile)
|
||||
xgb.parameters(bst) <- params
|
||||
return (bst)
|
||||
} else if (inherits(modelfile, "xgb.Booster")) {
|
||||
## A booster object
|
||||
bst <- xgb.Booster.complete(modelfile, saveraw = TRUE)
|
||||
.Call(XGBoosterLoadModelFromRaw_R, handle, bst$raw)
|
||||
bst <- xgb.unserialize(bst$raw)
|
||||
xgb.parameters(bst) <- params
|
||||
return (bst)
|
||||
} else {
|
||||
stop("modelfile must be either character filename, or raw booster dump, or xgb.Booster object")
|
||||
}
|
||||
}
|
||||
## Create new model
|
||||
handle <- .Call(XGBoosterCreate_R, cachelist)
|
||||
class(handle) <- "xgb.Booster.handle"
|
||||
if (length(params) > 0) {
|
||||
xgb.parameters(handle) <- params
|
||||
@@ -51,11 +65,13 @@ is.null.handle <- function(handle) {
|
||||
# Return a verified to be valid handle out of either xgb.Booster.handle or xgb.Booster
|
||||
# internal utility function
|
||||
xgb.get.handle <- function(object) {
|
||||
handle <- switch(class(object)[1],
|
||||
xgb.Booster = object$handle,
|
||||
xgb.Booster.handle = object,
|
||||
if (inherits(object, "xgb.Booster")) {
|
||||
handle <- object$handle
|
||||
} else if (inherits(object, "xgb.Booster.handle")) {
|
||||
handle <- object
|
||||
} else {
|
||||
stop("argument must be of either xgb.Booster or xgb.Booster.handle class")
|
||||
)
|
||||
}
|
||||
if (is.null.handle(handle)) {
|
||||
stop("invalid xgb.Booster.handle")
|
||||
}
|
||||
@@ -81,7 +97,7 @@ xgb.get.handle <- function(object) {
|
||||
#' its handle (pointer) to an internal xgboost model would be invalid. The majority of xgboost methods
|
||||
#' should still work for such a model object since those methods would be using
|
||||
#' \code{xgb.Booster.complete} internally. However, one might find it to be more efficient to call the
|
||||
#' \code{xgb.Booster.complete} function explicitely once after loading a model as an R-object.
|
||||
#' \code{xgb.Booster.complete} function explicitly once after loading a model as an R-object.
|
||||
#' That would prevent further repeated implicit reconstruction of an internal booster model.
|
||||
#'
|
||||
#' @return
|
||||
@@ -95,6 +111,7 @@ xgb.get.handle <- function(object) {
|
||||
#' saveRDS(bst, "xgb.model.rds")
|
||||
#'
|
||||
#' bst1 <- readRDS("xgb.model.rds")
|
||||
#' if (file.exists("xgb.model.rds")) file.remove("xgb.model.rds")
|
||||
#' # the handle is invalid:
|
||||
#' print(bst1$handle)
|
||||
#'
|
||||
@@ -110,9 +127,29 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
|
||||
if (is.null.handle(object$handle)) {
|
||||
object$handle <- xgb.Booster.handle(modelfile = object$raw)
|
||||
} else {
|
||||
if (is.null(object$raw) && saveraw)
|
||||
object$raw <- xgb.save.raw(object$handle)
|
||||
if (is.null(object$raw) && saveraw) {
|
||||
object$raw <- xgb.serialize(object$handle)
|
||||
}
|
||||
}
|
||||
|
||||
attrs <- xgb.attributes(object)
|
||||
if (!is.null(attrs$best_ntreelimit)) {
|
||||
object$best_ntreelimit <- as.integer(attrs$best_ntreelimit)
|
||||
}
|
||||
if (!is.null(attrs$best_iteration)) {
|
||||
## Convert from 0 based back to 1 based.
|
||||
object$best_iteration <- as.integer(attrs$best_iteration) + 1
|
||||
}
|
||||
if (!is.null(attrs$best_score)) {
|
||||
object$best_score <- as.numeric(attrs$best_score)
|
||||
}
|
||||
if (!is.null(attrs$best_msg)) {
|
||||
object$best_msg <- attrs$best_msg
|
||||
}
|
||||
if (!is.null(attrs$niter)) {
|
||||
object$niter <- as.integer(attrs$niter)
|
||||
}
|
||||
|
||||
return(object)
|
||||
}
|
||||
|
||||
@@ -136,6 +173,8 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
|
||||
#' @param reshape whether to reshape the vector of predictions to a matrix form when there are several
|
||||
#' prediction outputs per case. This option has no effect when either of predleaf, predcontrib,
|
||||
#' or predinteraction flags is TRUE.
|
||||
#' @param training whether is the prediction result used for training. For dart booster,
|
||||
#' training predicting will perform dropout.
|
||||
#' @param ... Parameters passed to \code{predict.xgb.Booster}
|
||||
#'
|
||||
#' @details
|
||||
@@ -162,7 +201,7 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
|
||||
#'
|
||||
#' With \code{predinteraction = TRUE}, SHAP values of contributions of interaction of each pair of features
|
||||
#' are computed. Note that this operation might be rather expensive in terms of compute and memory.
|
||||
#' Since it quadratically depends on the number of features, it is recommended to perfom selection
|
||||
#' Since it quadratically depends on the number of features, it is recommended to perform selection
|
||||
#' of the most important features first. See below about the format of the returned results.
|
||||
#'
|
||||
#' @return
|
||||
@@ -190,7 +229,7 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
|
||||
#'
|
||||
#' @seealso
|
||||
#' \code{\link{xgb.train}}.
|
||||
#'
|
||||
#'
|
||||
#' @references
|
||||
#'
|
||||
#' Scott M. Lundberg, Su-In Lee, "A Unified Approach to Interpreting Model Predictions", NIPS Proceedings 2017, \url{https://arxiv.org/abs/1705.07874}
|
||||
@@ -285,7 +324,7 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
|
||||
#' @export
|
||||
predict.xgb.Booster <- function(object, newdata, missing = NA, outputmargin = FALSE, ntreelimit = NULL,
|
||||
predleaf = FALSE, predcontrib = FALSE, approxcontrib = FALSE, predinteraction = FALSE,
|
||||
reshape = FALSE, ...) {
|
||||
reshape = FALSE, training = FALSE, ...) {
|
||||
|
||||
object <- xgb.Booster.complete(object, saveraw = FALSE)
|
||||
if (!inherits(newdata, "xgb.DMatrix"))
|
||||
@@ -304,7 +343,8 @@ predict.xgb.Booster <- function(object, newdata, missing = NA, outputmargin = FA
|
||||
option <- 0L + 1L * as.logical(outputmargin) + 2L * as.logical(predleaf) + 4L * as.logical(predcontrib) +
|
||||
8L * as.logical(approxcontrib) + 16L * as.logical(predinteraction)
|
||||
|
||||
ret <- .Call(XGBoosterPredict_R, object$handle, newdata, option[1], as.integer(ntreelimit))
|
||||
ret <- .Call(XGBoosterPredict_R, object$handle, newdata, option[1],
|
||||
as.integer(ntreelimit), as.integer(training))
|
||||
|
||||
n_ret <- length(ret)
|
||||
n_row <- nrow(newdata)
|
||||
@@ -393,7 +433,7 @@ predict.xgb.Booster.handle <- function(object, ...) {
|
||||
#' That would only matter if attributes need to be set many times.
|
||||
#' Note, however, that when feeding a handle of an \code{xgb.Booster} object to the attribute setters,
|
||||
#' the raw model cache of an \code{xgb.Booster} object would not be automatically updated,
|
||||
#' and it would be user's responsibility to call \code{xgb.save.raw} to update it.
|
||||
#' and it would be user's responsibility to call \code{xgb.serialize} to update it.
|
||||
#'
|
||||
#' The \code{xgb.attributes<-} setter either updates the existing or adds one or several attributes,
|
||||
#' but it doesn't delete the other existing attributes.
|
||||
@@ -418,6 +458,7 @@ predict.xgb.Booster.handle <- function(object, ...) {
|
||||
#'
|
||||
#' xgb.save(bst, 'xgb.model')
|
||||
#' bst1 <- xgb.load('xgb.model')
|
||||
#' if (file.exists('xgb.model')) file.remove('xgb.model')
|
||||
#' print(xgb.attr(bst1, "my_attribute"))
|
||||
#' print(xgb.attributes(bst1))
|
||||
#'
|
||||
@@ -451,7 +492,7 @@ xgb.attr <- function(object, name) {
|
||||
}
|
||||
.Call(XGBoosterSetAttr_R, handle, as.character(name[1]), value)
|
||||
if (is(object, 'xgb.Booster') && !is.null(object$raw)) {
|
||||
object$raw <- xgb.save.raw(object$handle)
|
||||
object$raw <- xgb.serialize(object$handle)
|
||||
}
|
||||
object
|
||||
}
|
||||
@@ -491,11 +532,41 @@ xgb.attributes <- function(object) {
|
||||
.Call(XGBoosterSetAttr_R, handle, names(a[i]), a[[i]])
|
||||
}
|
||||
if (is(object, 'xgb.Booster') && !is.null(object$raw)) {
|
||||
object$raw <- xgb.save.raw(object$handle)
|
||||
object$raw <- xgb.serialize(object$handle)
|
||||
}
|
||||
object
|
||||
}
|
||||
|
||||
#' Accessors for model parameters as JSON string.
|
||||
#'
|
||||
#' @param object Object of class \code{xgb.Booster}
|
||||
#' @param value A JSON string.
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' train <- agaricus.train
|
||||
#'
|
||||
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
#' config <- xgb.config(bst)
|
||||
#'
|
||||
#' @rdname xgb.config
|
||||
#' @export
|
||||
xgb.config <- function(object) {
|
||||
handle <- xgb.get.handle(object)
|
||||
.Call(XGBoosterSaveJsonConfig_R, handle);
|
||||
}
|
||||
|
||||
#' @rdname xgb.config
|
||||
#' @export
|
||||
`xgb.config<-` <- function(object, value) {
|
||||
handle <- xgb.get.handle(object)
|
||||
.Call(XGBoosterLoadJsonConfig_R, handle, value)
|
||||
object$raw <- NULL # force renew the raw buffer
|
||||
object <- xgb.Booster.complete(object)
|
||||
object
|
||||
}
|
||||
|
||||
#' Accessors for model parameters.
|
||||
#'
|
||||
#' Only the setter for xgboost parameters is currently implemented.
|
||||
@@ -532,7 +603,7 @@ xgb.attributes <- function(object) {
|
||||
.Call(XGBoosterSetParam_R, handle, names(p[i]), p[[i]])
|
||||
}
|
||||
if (is(object, 'xgb.Booster') && !is.null(object$raw)) {
|
||||
object$raw <- xgb.save.raw(object$handle)
|
||||
object$raw <- xgb.serialize(object$handle)
|
||||
}
|
||||
object
|
||||
}
|
||||
|
||||
@@ -1,24 +1,25 @@
|
||||
#' Construct xgb.DMatrix object
|
||||
#'
|
||||
#'
|
||||
#' Construct xgb.DMatrix object from either a dense matrix, a sparse matrix, or a local file.
|
||||
#' Supported input file formats are either a libsvm text file or a binary file that was created previously by
|
||||
#' \code{\link{xgb.DMatrix.save}}).
|
||||
#'
|
||||
#' @param data a \code{matrix} object (either numeric or integer), a \code{dgCMatrix} object, or a character
|
||||
#'
|
||||
#' @param data a \code{matrix} object (either numeric or integer), a \code{dgCMatrix} object, or a character
|
||||
#' string representing a filename.
|
||||
#' @param info a named list of additional information to store in the \code{xgb.DMatrix} object.
|
||||
#' See \code{\link{setinfo}} for the specific allowed kinds of
|
||||
#' See \code{\link{setinfo}} for the specific allowed kinds of
|
||||
#' @param missing a float value to represents missing values in data (used only when input is a dense matrix).
|
||||
#' It is useful when a 0 or some other extreme value represents missing values in data.
|
||||
#' @param silent whether to suppress printing an informational message after loading from a file.
|
||||
#' @param ... the \code{info} data could be passed directly as parameters, without creating an \code{info} list.
|
||||
#'
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' train <- agaricus.train
|
||||
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
|
||||
#' xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data')
|
||||
#' dtrain <- xgb.DMatrix('xgb.DMatrix.data')
|
||||
#' if (file.exists('xgb.DMatrix.data')) file.remove('xgb.DMatrix.data')
|
||||
#' @export
|
||||
xgb.DMatrix <- function(data, info = list(), missing = NA, silent = FALSE, ...) {
|
||||
cnames <- NULL
|
||||
@@ -78,23 +79,23 @@ xgb.get.DMatrix <- function(data, label = NULL, missing = NA, weight = NULL) {
|
||||
|
||||
|
||||
#' Dimensions of xgb.DMatrix
|
||||
#'
|
||||
#'
|
||||
#' Returns a vector of numbers of rows and of columns in an \code{xgb.DMatrix}.
|
||||
#' @param x Object of class \code{xgb.DMatrix}
|
||||
#'
|
||||
#'
|
||||
#' @details
|
||||
#' Note: since \code{nrow} and \code{ncol} internally use \code{dim}, they can also
|
||||
#' Note: since \code{nrow} and \code{ncol} internally use \code{dim}, they can also
|
||||
#' be directly used with an \code{xgb.DMatrix} object.
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' train <- agaricus.train
|
||||
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
|
||||
#'
|
||||
#'
|
||||
#' stopifnot(nrow(dtrain) == nrow(train$data))
|
||||
#' stopifnot(ncol(dtrain) == ncol(train$data))
|
||||
#' stopifnot(all(dim(dtrain) == dim(train$data)))
|
||||
#'
|
||||
#'
|
||||
#' @export
|
||||
dim.xgb.DMatrix <- function(x) {
|
||||
c(.Call(XGDMatrixNumRow_R, x), .Call(XGDMatrixNumCol_R, x))
|
||||
@@ -102,14 +103,14 @@ dim.xgb.DMatrix <- function(x) {
|
||||
|
||||
|
||||
#' Handling of column names of \code{xgb.DMatrix}
|
||||
#'
|
||||
#' Only column names are supported for \code{xgb.DMatrix}, thus setting of
|
||||
#' row names would have no effect and returnten row names would be NULL.
|
||||
#'
|
||||
#'
|
||||
#' Only column names are supported for \code{xgb.DMatrix}, thus setting of
|
||||
#' row names would have no effect and returned row names would be NULL.
|
||||
#'
|
||||
#' @param x object of class \code{xgb.DMatrix}
|
||||
#' @param value a list of two elements: the first one is ignored
|
||||
#' and the second one is column names
|
||||
#'
|
||||
#' and the second one is column names
|
||||
#'
|
||||
#' @details
|
||||
#' Generic \code{dimnames} methods are used by \code{colnames}.
|
||||
#' Since row names are irrelevant, it is recommended to use \code{colnames} directly.
|
||||
@@ -122,7 +123,7 @@ dim.xgb.DMatrix <- function(x) {
|
||||
#' colnames(dtrain)
|
||||
#' colnames(dtrain) <- make.names(1:ncol(train$data))
|
||||
#' print(dtrain, verbose=TRUE)
|
||||
#'
|
||||
#'
|
||||
#' @rdname dimnames.xgb.DMatrix
|
||||
#' @export
|
||||
dimnames.xgb.DMatrix <- function(x) {
|
||||
@@ -140,8 +141,8 @@ dimnames.xgb.DMatrix <- function(x) {
|
||||
attr(x, '.Dimnames') <- NULL
|
||||
return(x)
|
||||
}
|
||||
if (ncol(x) != length(value[[2]]))
|
||||
stop("can't assign ", length(value[[2]]), " colnames to a ",
|
||||
if (ncol(x) != length(value[[2]]))
|
||||
stop("can't assign ", length(value[[2]]), " colnames to a ",
|
||||
ncol(x), " column xgb.DMatrix")
|
||||
attr(x, '.Dimnames') <- value
|
||||
x
|
||||
@@ -149,33 +150,33 @@ dimnames.xgb.DMatrix <- function(x) {
|
||||
|
||||
|
||||
#' Get information of an xgb.DMatrix object
|
||||
#'
|
||||
#'
|
||||
#' Get information of an xgb.DMatrix object
|
||||
#' @param object Object of class \code{xgb.DMatrix}
|
||||
#' @param name the name of the information field to get (see details)
|
||||
#' @param ... other parameters
|
||||
#'
|
||||
#'
|
||||
#' @details
|
||||
#' The \code{name} field can be one of the following:
|
||||
#'
|
||||
#'
|
||||
#' \itemize{
|
||||
#' \item \code{label}: label Xgboost learn from ;
|
||||
#' \item \code{weight}: to do a weight rescale ;
|
||||
#' \item \code{base_margin}: base margin is the base prediction Xgboost will boost from ;
|
||||
#' \item \code{nrow}: number of rows of the \code{xgb.DMatrix}.
|
||||
#'
|
||||
#'
|
||||
#' }
|
||||
#'
|
||||
#'
|
||||
#' \code{group} can be setup by \code{setinfo} but can't be retrieved by \code{getinfo}.
|
||||
#'
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' train <- agaricus.train
|
||||
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
|
||||
#'
|
||||
#'
|
||||
#' labels <- getinfo(dtrain, 'label')
|
||||
#' setinfo(dtrain, 'label', 1-labels)
|
||||
#'
|
||||
#'
|
||||
#' labels2 <- getinfo(dtrain, 'label')
|
||||
#' stopifnot(all(labels2 == 1-labels))
|
||||
#' @rdname getinfo
|
||||
@@ -187,9 +188,10 @@ getinfo <- function(object, ...) UseMethod("getinfo")
|
||||
getinfo.xgb.DMatrix <- function(object, name, ...) {
|
||||
if (typeof(name) != "character" ||
|
||||
length(name) != 1 ||
|
||||
!name %in% c('label', 'weight', 'base_margin', 'nrow')) {
|
||||
!name %in% c('label', 'weight', 'base_margin', 'nrow',
|
||||
'label_lower_bound', 'label_upper_bound')) {
|
||||
stop("getinfo: name must be one of the following\n",
|
||||
" 'label', 'weight', 'base_margin', 'nrow'")
|
||||
" 'label', 'weight', 'base_margin', 'nrow', 'label_lower_bound', 'label_upper_bound'")
|
||||
}
|
||||
if (name != "nrow"){
|
||||
ret <- .Call(XGDMatrixGetInfo_R, object, name)
|
||||
@@ -202,9 +204,9 @@ getinfo.xgb.DMatrix <- function(object, name, ...) {
|
||||
|
||||
|
||||
#' Set information of an xgb.DMatrix object
|
||||
#'
|
||||
#'
|
||||
#' Set information of an xgb.DMatrix object
|
||||
#'
|
||||
#'
|
||||
#' @param object Object of class "xgb.DMatrix"
|
||||
#' @param name the name of the field to get
|
||||
#' @param info the specific field of information to set
|
||||
@@ -212,19 +214,19 @@ getinfo.xgb.DMatrix <- function(object, name, ...) {
|
||||
#'
|
||||
#' @details
|
||||
#' The \code{name} field can be one of the following:
|
||||
#'
|
||||
#'
|
||||
#' \itemize{
|
||||
#' \item \code{label}: label Xgboost learn from ;
|
||||
#' \item \code{weight}: to do a weight rescale ;
|
||||
#' \item \code{base_margin}: base margin is the base prediction Xgboost will boost from ;
|
||||
#' \item \code{group}: number of rows in each group (to use with \code{rank:pairwise} objective).
|
||||
#' }
|
||||
#'
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' train <- agaricus.train
|
||||
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
|
||||
#'
|
||||
#'
|
||||
#' labels <- getinfo(dtrain, 'label')
|
||||
#' setinfo(dtrain, 'label', 1-labels)
|
||||
#' labels2 <- getinfo(dtrain, 'label')
|
||||
@@ -242,6 +244,18 @@ setinfo.xgb.DMatrix <- function(object, name, info, ...) {
|
||||
.Call(XGDMatrixSetInfo_R, object, name, as.numeric(info))
|
||||
return(TRUE)
|
||||
}
|
||||
if (name == "label_lower_bound") {
|
||||
if (length(info) != nrow(object))
|
||||
stop("The length of lower-bound labels must equal to the number of rows in the input data")
|
||||
.Call(XGDMatrixSetInfo_R, object, name, as.numeric(info))
|
||||
return(TRUE)
|
||||
}
|
||||
if (name == "label_upper_bound") {
|
||||
if (length(info) != nrow(object))
|
||||
stop("The length of upper-bound labels must equal to the number of rows in the input data")
|
||||
.Call(XGDMatrixSetInfo_R, object, name, as.numeric(info))
|
||||
return(TRUE)
|
||||
}
|
||||
if (name == "weight") {
|
||||
if (length(info) != nrow(object))
|
||||
stop("The length of weights must equal to the number of rows in the input data")
|
||||
@@ -266,27 +280,27 @@ setinfo.xgb.DMatrix <- function(object, name, info, ...) {
|
||||
|
||||
|
||||
#' Get a new DMatrix containing the specified rows of
|
||||
#' orginal xgb.DMatrix object
|
||||
#' original xgb.DMatrix object
|
||||
#'
|
||||
#' Get a new DMatrix containing the specified rows of
|
||||
#' orginal xgb.DMatrix object
|
||||
#'
|
||||
#' original xgb.DMatrix object
|
||||
#'
|
||||
#' @param object Object of class "xgb.DMatrix"
|
||||
#' @param idxset a integer vector of indices of rows needed
|
||||
#' @param colset currently not used (columns subsetting is not available)
|
||||
#' @param ... other parameters (currently not used)
|
||||
#'
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' train <- agaricus.train
|
||||
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
|
||||
#'
|
||||
#'
|
||||
#' dsub <- slice(dtrain, 1:42)
|
||||
#' labels1 <- getinfo(dsub, 'label')
|
||||
#' dsub <- dtrain[1:42, ]
|
||||
#' labels2 <- getinfo(dsub, 'label')
|
||||
#' all.equal(labels1, labels2)
|
||||
#'
|
||||
#'
|
||||
#' @rdname slice.xgb.DMatrix
|
||||
#' @export
|
||||
slice <- function(object, ...) UseMethod("slice")
|
||||
@@ -301,12 +315,17 @@ slice.xgb.DMatrix <- function(object, idxset, ...) {
|
||||
|
||||
attr_list <- attributes(object)
|
||||
nr <- nrow(object)
|
||||
len <- sapply(attr_list, length)
|
||||
len <- sapply(attr_list, NROW)
|
||||
ind <- which(len == nr)
|
||||
if (length(ind) > 0) {
|
||||
nms <- names(attr_list)[ind]
|
||||
for (i in seq_along(ind)) {
|
||||
attr(ret, nms[i]) <- attr(object, nms[i])[idxset]
|
||||
obj_attr <- attr(object, nms[i])
|
||||
if (NCOL(obj_attr) > 1) {
|
||||
attr(ret, nms[i]) <- obj_attr[idxset,]
|
||||
} else {
|
||||
attr(ret, nms[i]) <- obj_attr[idxset]
|
||||
}
|
||||
}
|
||||
}
|
||||
return(structure(ret, class = "xgb.DMatrix"))
|
||||
@@ -320,22 +339,22 @@ slice.xgb.DMatrix <- function(object, idxset, ...) {
|
||||
|
||||
|
||||
#' Print xgb.DMatrix
|
||||
#'
|
||||
#' Print information about xgb.DMatrix.
|
||||
#'
|
||||
#' Print information about xgb.DMatrix.
|
||||
#' Currently it displays dimensions and presence of info-fields and colnames.
|
||||
#'
|
||||
#'
|
||||
#' @param x an xgb.DMatrix object
|
||||
#' @param verbose whether to print colnames (when present)
|
||||
#' @param ... not currently used
|
||||
#'
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' train <- agaricus.train
|
||||
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
|
||||
#'
|
||||
#'
|
||||
#' dtrain
|
||||
#' print(dtrain, verbose=TRUE)
|
||||
#'
|
||||
#'
|
||||
#' @method print xgb.DMatrix
|
||||
#' @export
|
||||
print.xgb.DMatrix <- function(x, verbose = FALSE, ...) {
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
|
||||
#' xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data')
|
||||
#' dtrain <- xgb.DMatrix('xgb.DMatrix.data')
|
||||
#' if (file.exists('xgb.DMatrix.data')) file.remove('xgb.DMatrix.data')
|
||||
#' @export
|
||||
xgb.DMatrix.save <- function(dmatrix, fname) {
|
||||
if (typeof(fname) != "character")
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
#' Cross Validation
|
||||
#'
|
||||
#'
|
||||
#' The cross validation function of xgboost
|
||||
#'
|
||||
#'
|
||||
#' @param params the list of parameters. Commonly used ones are:
|
||||
#' \itemize{
|
||||
#' \item \code{objective} objective function, common ones are
|
||||
#' \itemize{
|
||||
#' \item \code{reg:linear} linear regression
|
||||
#' \item \code{reg:squarederror} Regression with squared loss
|
||||
#' \item \code{binary:logistic} logistic regression for classification
|
||||
#' }
|
||||
#' \item \code{eta} step size of each boosting step
|
||||
@@ -18,12 +18,12 @@
|
||||
#' See also demo/ for walkthrough example in R.
|
||||
#' @param data takes an \code{xgb.DMatrix}, \code{matrix}, or \code{dgCMatrix} as the input.
|
||||
#' @param nrounds the max number of iterations
|
||||
#' @param nfold the original dataset is randomly partitioned into \code{nfold} equal size subsamples.
|
||||
#' @param nfold the original dataset is randomly partitioned into \code{nfold} equal size subsamples.
|
||||
#' @param label vector of response values. Should be provided only when data is an R-matrix.
|
||||
#' @param missing is only used when input is a dense matrix. By default is set to NA, which means
|
||||
#' that NA values should be considered as 'missing' by the algorithm.
|
||||
#' @param missing is only used when input is a dense matrix. By default is set to NA, which means
|
||||
#' that NA values should be considered as 'missing' by the algorithm.
|
||||
#' Sometimes, 0 or other extreme value might be used to represent missing values.
|
||||
#' @param prediction A logical value indicating whether to return the test fold predictions
|
||||
#' @param prediction A logical value indicating whether to return the test fold predictions
|
||||
#' from each CV model. This parameter engages the \code{\link{cb.cv.predict}} callback.
|
||||
#' @param showsd \code{boolean}, whether to show standard deviation of cross validation
|
||||
#' @param metrics, list of evaluation metrics to be used in cross validation,
|
||||
@@ -37,22 +37,24 @@
|
||||
#' \item \code{aucpr} Area under PR curve
|
||||
#' \item \code{merror} Exact matching error, used to evaluate multi-class classification
|
||||
#' }
|
||||
#' @param obj customized objective function. Returns gradient and second order
|
||||
#' @param obj customized objective function. Returns gradient and second order
|
||||
#' gradient with given prediction and dtrain.
|
||||
#' @param feval custimized evaluation function. Returns
|
||||
#' \code{list(metric='metric-name', value='metric-value')} with given
|
||||
#' @param feval customized evaluation function. Returns
|
||||
#' \code{list(metric='metric-name', value='metric-value')} with given
|
||||
#' prediction and dtrain.
|
||||
#' @param stratified a \code{boolean} indicating whether sampling of folds should be stratified
|
||||
#' @param stratified a \code{boolean} indicating whether sampling of folds should be stratified
|
||||
#' by the values of outcome labels.
|
||||
#' @param folds \code{list} provides a possibility to use a list of pre-defined CV folds
|
||||
#' (each element must be a vector of test fold's indices). When folds are supplied,
|
||||
#' (each element must be a vector of test fold's indices). When folds are supplied,
|
||||
#' the \code{nfold} and \code{stratified} parameters are ignored.
|
||||
#' @param train_folds \code{list} list specifying which indicies to use for training. If \code{NULL}
|
||||
#' (the default) all indices not specified in \code{folds} will be used for training.
|
||||
#' @param verbose \code{boolean}, print the statistics during the process
|
||||
#' @param print_every_n Print each n-th iteration evaluation messages when \code{verbose>0}.
|
||||
#' Default is 1 which means all messages are printed. This parameter is passed to the
|
||||
#' Default is 1 which means all messages are printed. This parameter is passed to the
|
||||
#' \code{\link{cb.print.evaluation}} callback.
|
||||
#' @param early_stopping_rounds If \code{NULL}, the early stopping function is not triggered.
|
||||
#' If set to an integer \code{k}, training with a validation set will stop if the performance
|
||||
#' @param early_stopping_rounds If \code{NULL}, the early stopping function is not triggered.
|
||||
#' If set to an integer \code{k}, training with a validation set will stop if the performance
|
||||
#' doesn't improve for \code{k} rounds.
|
||||
#' Setting this parameter engages the \code{\link{cb.early.stop}} callback.
|
||||
#' @param maximize If \code{feval} and \code{early_stopping_rounds} are set,
|
||||
@@ -60,46 +62,46 @@
|
||||
#' When it is \code{TRUE}, it means the larger the evaluation score the better.
|
||||
#' This parameter is passed to the \code{\link{cb.early.stop}} callback.
|
||||
#' @param callbacks a list of callback functions to perform various task during boosting.
|
||||
#' See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the
|
||||
#' parameters' values. User can provide either existing or their own callback methods in order
|
||||
#' See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the
|
||||
#' parameters' values. User can provide either existing or their own callback methods in order
|
||||
#' to customize the training process.
|
||||
#' @param ... other parameters to pass to \code{params}.
|
||||
#'
|
||||
#' @details
|
||||
#' The original sample is randomly partitioned into \code{nfold} equal size subsamples.
|
||||
#'
|
||||
#' Of the \code{nfold} subsamples, a single subsample is retained as the validation data for testing the model, and the remaining \code{nfold - 1} subsamples are used as training data.
|
||||
#'
|
||||
#'
|
||||
#' @details
|
||||
#' The original sample is randomly partitioned into \code{nfold} equal size subsamples.
|
||||
#'
|
||||
#' Of the \code{nfold} subsamples, a single subsample is retained as the validation data for testing the model, and the remaining \code{nfold - 1} subsamples are used as training data.
|
||||
#'
|
||||
#' The cross-validation process is then repeated \code{nrounds} times, with each of the \code{nfold} subsamples used exactly once as the validation data.
|
||||
#'
|
||||
#'
|
||||
#' All observations are used for both training and validation.
|
||||
#'
|
||||
#'
|
||||
#' Adapted from \url{http://en.wikipedia.org/wiki/Cross-validation_\%28statistics\%29#k-fold_cross-validation}
|
||||
#'
|
||||
#' @return
|
||||
#' @return
|
||||
#' An object of class \code{xgb.cv.synchronous} with the following elements:
|
||||
#' \itemize{
|
||||
#' \item \code{call} a function call.
|
||||
#' \item \code{params} parameters that were passed to the xgboost library. Note that it does not
|
||||
#' \item \code{params} parameters that were passed to the xgboost library. Note that it does not
|
||||
#' capture parameters changed by the \code{\link{cb.reset.parameters}} callback.
|
||||
#' \item \code{callbacks} callback functions that were either automatically assigned or
|
||||
#' \item \code{callbacks} callback functions that were either automatically assigned or
|
||||
#' explicitly passed.
|
||||
#' \item \code{evaluation_log} evaluation history storead as a \code{data.table} with the
|
||||
#' first column corresponding to iteration number and the rest corresponding to the
|
||||
#' \item \code{evaluation_log} evaluation history stored as a \code{data.table} with the
|
||||
#' first column corresponding to iteration number and the rest corresponding to the
|
||||
#' CV-based evaluation means and standard deviations for the training and test CV-sets.
|
||||
#' It is created by the \code{\link{cb.evaluation.log}} callback.
|
||||
#' \item \code{niter} number of boosting iterations.
|
||||
#' \item \code{nfeatures} number of features in training data.
|
||||
#' \item \code{folds} the list of CV folds' indices - either those passed through the \code{folds}
|
||||
#' \item \code{folds} the list of CV folds' indices - either those passed through the \code{folds}
|
||||
#' parameter or randomly generated.
|
||||
#' \item \code{best_iteration} iteration number with the best evaluation metric value
|
||||
#' (only available with early stopping).
|
||||
#' \item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration,
|
||||
#' \item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration,
|
||||
#' which could further be used in \code{predict} method
|
||||
#' (only available with early stopping).
|
||||
#' \item \code{pred} CV prediction values available when \code{prediction} is set.
|
||||
#' \item \code{pred} CV prediction values available when \code{prediction} is set.
|
||||
#' It is either vector or matrix (see \code{\link{cb.cv.predict}}).
|
||||
#' \item \code{models} a liost of the CV folds' models. It is only available with the explicit
|
||||
#' \item \code{models} a list of the CV folds' models. It is only available with the explicit
|
||||
#' setting of the \code{cb.cv.predict(save_models = TRUE)} callback.
|
||||
#' }
|
||||
#'
|
||||
@@ -110,32 +112,39 @@
|
||||
#' max_depth = 3, eta = 1, objective = "binary:logistic")
|
||||
#' print(cv)
|
||||
#' print(cv, verbose=TRUE)
|
||||
#'
|
||||
#'
|
||||
#' @export
|
||||
xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing = NA,
|
||||
prediction = FALSE, showsd = TRUE, metrics=list(),
|
||||
obj = NULL, feval = NULL, stratified = TRUE, folds = NULL,
|
||||
obj = NULL, feval = NULL, stratified = TRUE, folds = NULL, train_folds = NULL,
|
||||
verbose = TRUE, print_every_n=1L,
|
||||
early_stopping_rounds = NULL, maximize = NULL, callbacks = list(), ...) {
|
||||
|
||||
check.deprecation(...)
|
||||
|
||||
|
||||
params <- check.booster.params(params, ...)
|
||||
# TODO: should we deprecate the redundant 'metrics' parameter?
|
||||
for (m in metrics)
|
||||
params <- c(params, list("eval_metric" = m))
|
||||
|
||||
|
||||
check.custom.obj()
|
||||
check.custom.eval()
|
||||
|
||||
#if (is.null(params[['eval_metric']]) && is.null(feval))
|
||||
# stop("Either 'eval_metric' or 'feval' must be provided for CV")
|
||||
|
||||
|
||||
# Check the labels
|
||||
if ( (inherits(data, 'xgb.DMatrix') && is.null(getinfo(data, 'label'))) ||
|
||||
(!inherits(data, 'xgb.DMatrix') && is.null(label)))
|
||||
(!inherits(data, 'xgb.DMatrix') && is.null(label))) {
|
||||
stop("Labels must be provided for CV either through xgb.DMatrix, or through 'label=' when 'data' is matrix")
|
||||
|
||||
} else if (inherits(data, 'xgb.DMatrix')) {
|
||||
if (!is.null(label))
|
||||
warning("xgb.cv: label will be ignored, since data is of type xgb.DMatrix")
|
||||
cv_label = getinfo(data, 'label')
|
||||
} else {
|
||||
cv_label = label
|
||||
}
|
||||
|
||||
# CV folds
|
||||
if(!is.null(folds)) {
|
||||
if(!is.list(folds) || length(folds) < 2)
|
||||
@@ -144,9 +153,9 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
|
||||
} else {
|
||||
if (nfold <= 1)
|
||||
stop("'nfold' must be > 1")
|
||||
folds <- generate.cv.folds(nfold, nrow(data), stratified, label, params)
|
||||
folds <- generate.cv.folds(nfold, nrow(data), stratified, cv_label, params)
|
||||
}
|
||||
|
||||
|
||||
# Potential TODO: sequential CV
|
||||
#if (strategy == 'sequential')
|
||||
# stop('Sequential CV strategy is not yet implemented')
|
||||
@@ -166,7 +175,7 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
|
||||
stop_condition <- FALSE
|
||||
if (!is.null(early_stopping_rounds) &&
|
||||
!has.callbacks(callbacks, 'cb.early.stop')) {
|
||||
callbacks <- add.cb(callbacks, cb.early.stop(early_stopping_rounds,
|
||||
callbacks <- add.cb(callbacks, cb.early.stop(early_stopping_rounds,
|
||||
maximize = maximize, verbose = verbose))
|
||||
}
|
||||
# CV-predictions callback
|
||||
@@ -177,12 +186,17 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
|
||||
# Sort the callbacks into categories
|
||||
cb <- categorize.callbacks(callbacks)
|
||||
|
||||
|
||||
|
||||
# create the booster-folds
|
||||
# train_folds
|
||||
dall <- xgb.get.DMatrix(data, label, missing)
|
||||
bst_folds <- lapply(seq_along(folds), function(k) {
|
||||
dtest <- slice(dall, folds[[k]])
|
||||
dtrain <- slice(dall, unlist(folds[-k]))
|
||||
# code originally contributed by @RolandASc on stackoverflow
|
||||
if(is.null(train_folds))
|
||||
dtrain <- slice(dall, unlist(folds[-k]))
|
||||
else
|
||||
dtrain <- slice(dall, train_folds[[k]])
|
||||
handle <- xgb.Booster.handle(params, list(dtrain, dtest))
|
||||
list(dtrain = dtrain, bst = handle, watchlist = list(train = dtrain, test=dtest), index = folds[[k]])
|
||||
})
|
||||
@@ -197,12 +211,12 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
|
||||
# those are fixed for CV (no training continuation)
|
||||
begin_iteration <- 1
|
||||
end_iteration <- nrounds
|
||||
|
||||
|
||||
# synchronous CV boosting: run CV folds' models within each iteration
|
||||
for (iteration in begin_iteration:end_iteration) {
|
||||
|
||||
|
||||
for (f in cb$pre_iter) f()
|
||||
|
||||
|
||||
msg <- lapply(bst_folds, function(fd) {
|
||||
xgb.iter.update(fd$bst, fd$dtrain, iteration - 1, obj)
|
||||
xgb.iter.eval(fd$bst, fd$watchlist, iteration - 1, feval)
|
||||
@@ -210,9 +224,9 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
|
||||
msg <- simplify2array(msg)
|
||||
bst_evaluation <- rowMeans(msg)
|
||||
bst_evaluation_err <- sqrt(rowMeans(msg^2) - bst_evaluation^2)
|
||||
|
||||
|
||||
for (f in cb$post_iter) f()
|
||||
|
||||
|
||||
if (stop_condition) break
|
||||
}
|
||||
for (f in cb$finalize) f(finalize = TRUE)
|
||||
@@ -236,17 +250,17 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
|
||||
|
||||
|
||||
#' Print xgb.cv result
|
||||
#'
|
||||
#'
|
||||
#' Prints formatted results of \code{xgb.cv}.
|
||||
#'
|
||||
#'
|
||||
#' @param x an \code{xgb.cv.synchronous} object
|
||||
#' @param verbose whether to print detailed data
|
||||
#' @param ... passed to \code{data.table.print}
|
||||
#'
|
||||
#'
|
||||
#' @details
|
||||
#' When not verbose, it would only print the evaluation results,
|
||||
#' When not verbose, it would only print the evaluation results,
|
||||
#' including the best iteration (when available).
|
||||
#'
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' train <- agaricus.train
|
||||
@@ -254,13 +268,13 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
|
||||
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
#' print(cv)
|
||||
#' print(cv, verbose=TRUE)
|
||||
#'
|
||||
#'
|
||||
#' @rdname print.xgb.cv
|
||||
#' @method print xgb.cv.synchronous
|
||||
#' @export
|
||||
print.xgb.cv.synchronous <- function(x, verbose = FALSE, ...) {
|
||||
cat('##### xgb.cv ', length(x$folds), '-folds\n', sep = '')
|
||||
|
||||
|
||||
if (verbose) {
|
||||
if (!is.null(x$call)) {
|
||||
cat('call:\n ')
|
||||
@@ -268,8 +282,8 @@ print.xgb.cv.synchronous <- function(x, verbose = FALSE, ...) {
|
||||
}
|
||||
if (!is.null(x$params)) {
|
||||
cat('params (as set within xgb.cv):\n')
|
||||
cat( ' ',
|
||||
paste(names(x$params),
|
||||
cat( ' ',
|
||||
paste(names(x$params),
|
||||
paste0('"', unlist(x$params), '"'),
|
||||
sep = ' = ', collapse = ', '), '\n', sep = '')
|
||||
}
|
||||
@@ -280,9 +294,9 @@ print.xgb.cv.synchronous <- function(x, verbose = FALSE, ...) {
|
||||
print(x)
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
for (n in c('niter', 'best_iteration', 'best_ntreelimit')) {
|
||||
if (is.null(x[[n]]))
|
||||
if (is.null(x[[n]]))
|
||||
next
|
||||
cat(n, ': ', x[[n]], '\n', sep = '')
|
||||
}
|
||||
@@ -293,10 +307,10 @@ print.xgb.cv.synchronous <- function(x, verbose = FALSE, ...) {
|
||||
}
|
||||
}
|
||||
|
||||
if (verbose)
|
||||
if (verbose)
|
||||
cat('evaluation_log:\n')
|
||||
print(x$evaluation_log, row.names = FALSE, ...)
|
||||
|
||||
|
||||
if (!is.null(x$best_iteration)) {
|
||||
cat('Best iteration:\n')
|
||||
print(x$evaluation_log[x$best_iteration], row.names = FALSE, ...)
|
||||
|
||||
@@ -1,33 +1,34 @@
|
||||
#' Load xgboost model from binary file
|
||||
#'
|
||||
#' Load xgboost model from the binary model file.
|
||||
#'
|
||||
#'
|
||||
#' Load xgboost model from the binary model file.
|
||||
#'
|
||||
#' @param modelfile the name of the binary input file.
|
||||
#'
|
||||
#' @details
|
||||
#'
|
||||
#' @details
|
||||
#' The input file is expected to contain a model saved in an xgboost-internal binary format
|
||||
#' using either \code{\link{xgb.save}} or \code{\link{cb.save.model}} in R, or using some
|
||||
#' appropriate methods from other xgboost interfaces. E.g., a model trained in Python and
|
||||
#' using either \code{\link{xgb.save}} or \code{\link{cb.save.model}} in R, or using some
|
||||
#' appropriate methods from other xgboost interfaces. E.g., a model trained in Python and
|
||||
#' saved from there in xgboost format, could be loaded from R.
|
||||
#'
|
||||
#'
|
||||
#' Note: a model saved as an R-object, has to be loaded using corresponding R-methods,
|
||||
#' not \code{xgb.load}.
|
||||
#'
|
||||
#' @return
|
||||
#'
|
||||
#' @return
|
||||
#' An object of \code{xgb.Booster} class.
|
||||
#'
|
||||
#' @seealso
|
||||
#' \code{\link{xgb.save}}, \code{\link{xgb.Booster.complete}}.
|
||||
#'
|
||||
#'
|
||||
#' @seealso
|
||||
#' \code{\link{xgb.save}}, \code{\link{xgb.Booster.complete}}.
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' data(agaricus.test, package='xgboost')
|
||||
#' train <- agaricus.train
|
||||
#' test <- agaricus.test
|
||||
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||
#' xgb.save(bst, 'xgb.model')
|
||||
#' bst <- xgb.load('xgb.model')
|
||||
#' if (file.exists('xgb.model')) file.remove('xgb.model')
|
||||
#' pred <- predict(bst, test$data)
|
||||
#' @export
|
||||
xgb.load <- function(modelfile) {
|
||||
|
||||
14
R-package/R/xgb.load.raw.R
Normal file
14
R-package/R/xgb.load.raw.R
Normal file
@@ -0,0 +1,14 @@
|
||||
#' Load serialised xgboost model from R's raw vector
|
||||
#'
|
||||
#' User can generate raw memory buffer by calling xgb.save.raw
|
||||
#'
|
||||
#' @param buffer the buffer returned by xgb.save.raw
|
||||
#'
|
||||
#' @export
|
||||
xgb.load.raw <- function(buffer) {
|
||||
cachelist <- list()
|
||||
handle <- .Call(XGBoosterCreate_R, cachelist)
|
||||
.Call(XGBoosterLoadModelFromRaw_R, handle, buffer)
|
||||
class(handle) <- "xgb.Booster.handle"
|
||||
return (handle)
|
||||
}
|
||||
@@ -5,16 +5,16 @@
|
||||
#'
|
||||
#' @param importance_matrix a \code{data.table} returned by \code{\link{xgb.importance}}.
|
||||
#' @param top_n maximal number of top features to include into the plot.
|
||||
#' @param measure the name of importance measure to plot.
|
||||
#' @param measure the name of importance measure to plot.
|
||||
#' When \code{NULL}, 'Gain' would be used for trees and 'Weight' would be used for gblinear.
|
||||
#' @param rel_to_first whether importance values should be represented as relative to the highest ranked feature.
|
||||
#' See Details.
|
||||
#' @param left_margin (base R barplot) allows to adjust the left margin size to fit feature names.
|
||||
#' When it is NULL, the existing \code{par('mar')} is used.
|
||||
#' @param cex (base R barplot) passed as \code{cex.names} parameter to \code{barplot}.
|
||||
#' @param plot (base R barplot) whether a barplot should be produced.
|
||||
#' @param plot (base R barplot) whether a barplot should be produced.
|
||||
#' If FALSE, only a data.table is returned.
|
||||
#' @param n_clusters (ggplot only) a \code{numeric} vector containing the min and the max range
|
||||
#' @param n_clusters (ggplot only) a \code{numeric} vector containing the min and the max range
|
||||
#' of the possible number of clusters of bars.
|
||||
#' @param ... other parameters passed to \code{barplot} (except horiz, border, cex.names, names.arg, and las).
|
||||
#'
|
||||
@@ -22,27 +22,27 @@
|
||||
#' The graph represents each feature as a horizontal bar of length proportional to the importance of a feature.
|
||||
#' Features are shown ranked in a decreasing importance order.
|
||||
#' It works for importances from both \code{gblinear} and \code{gbtree} models.
|
||||
#'
|
||||
#'
|
||||
#' When \code{rel_to_first = FALSE}, the values would be plotted as they were in \code{importance_matrix}.
|
||||
#' For gbtree model, that would mean being normalized to the total of 1
|
||||
#' For gbtree model, that would mean being normalized to the total of 1
|
||||
#' ("what is feature's importance contribution relative to the whole model?").
|
||||
#' For linear models, \code{rel_to_first = FALSE} would show actual values of the coefficients.
|
||||
#' Setting \code{rel_to_first = TRUE} allows to see the picture from the perspective of
|
||||
#' Setting \code{rel_to_first = TRUE} allows to see the picture from the perspective of
|
||||
#' "what is feature's importance contribution relative to the most important feature?"
|
||||
#'
|
||||
#' The ggplot-backend method also performs 1-D custering of the importance values,
|
||||
#' with bar colors coresponding to different clusters that have somewhat similar importance values.
|
||||
#'
|
||||
#'
|
||||
#' The ggplot-backend method also performs 1-D clustering of the importance values,
|
||||
#' with bar colors corresponding to different clusters that have somewhat similar importance values.
|
||||
#'
|
||||
#' @return
|
||||
#' The \code{xgb.plot.importance} function creates a \code{barplot} (when \code{plot=TRUE})
|
||||
#' and silently returns a processed data.table with \code{n_top} features sorted by importance.
|
||||
#'
|
||||
#'
|
||||
#' The \code{xgb.ggplot.importance} function returns a ggplot graph which could be customized afterwards.
|
||||
#' E.g., to change the title of the graph, add \code{+ ggtitle("A GRAPH NAME")} to the result.
|
||||
#'
|
||||
#' @seealso
|
||||
#' \code{\link[graphics]{barplot}}.
|
||||
#'
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train)
|
||||
#'
|
||||
@@ -50,15 +50,15 @@
|
||||
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
#'
|
||||
#' importance_matrix <- xgb.importance(colnames(agaricus.train$data), model = bst)
|
||||
#'
|
||||
#'
|
||||
#' xgb.plot.importance(importance_matrix, rel_to_first = TRUE, xlab = "Relative importance")
|
||||
#'
|
||||
#'
|
||||
#' (gg <- xgb.ggplot.importance(importance_matrix, measure = "Frequency", rel_to_first = TRUE))
|
||||
#' gg + ggplot2::ylab("Frequency")
|
||||
#'
|
||||
#' @rdname xgb.plot.importance
|
||||
#' @export
|
||||
xgb.plot.importance <- function(importance_matrix = NULL, top_n = NULL, measure = NULL,
|
||||
xgb.plot.importance <- function(importance_matrix = NULL, top_n = NULL, measure = NULL,
|
||||
rel_to_first = FALSE, left_margin = 10, cex = NULL, plot = TRUE, ...) {
|
||||
check.deprecation(...)
|
||||
if (!is.data.table(importance_matrix)) {
|
||||
@@ -80,13 +80,13 @@ xgb.plot.importance <- function(importance_matrix = NULL, top_n = NULL, measure
|
||||
if (!"Feature" %in% imp_names)
|
||||
stop("Importance matrix column names are not as expected!")
|
||||
}
|
||||
|
||||
|
||||
# also aggregate, just in case when the values were not yet summed up by feature
|
||||
importance_matrix <- importance_matrix[, Importance := sum(get(measure)), by = Feature]
|
||||
|
||||
|
||||
# make sure it's ordered
|
||||
importance_matrix <- importance_matrix[order(-abs(Importance))]
|
||||
|
||||
|
||||
if (!is.null(top_n)) {
|
||||
top_n <- min(top_n, nrow(importance_matrix))
|
||||
importance_matrix <- head(importance_matrix, top_n)
|
||||
@@ -97,14 +97,14 @@ xgb.plot.importance <- function(importance_matrix = NULL, top_n = NULL, measure
|
||||
if (is.null(cex)) {
|
||||
cex <- 2.5/log2(1 + nrow(importance_matrix))
|
||||
}
|
||||
|
||||
|
||||
if (plot) {
|
||||
op <- par(no.readonly = TRUE)
|
||||
mar <- op$mar
|
||||
if (!is.null(left_margin))
|
||||
mar[2] <- left_margin
|
||||
par(mar = mar)
|
||||
|
||||
|
||||
# reverse the order of rows to have the highest ranked at the top
|
||||
importance_matrix[nrow(importance_matrix):1,
|
||||
barplot(Importance, horiz = TRUE, border = NA, cex.names = cex,
|
||||
@@ -115,7 +115,7 @@ xgb.plot.importance <- function(importance_matrix = NULL, top_n = NULL, measure
|
||||
barplot(Importance, horiz = TRUE, border = NA, add = TRUE)]
|
||||
par(op)
|
||||
}
|
||||
|
||||
|
||||
invisible(importance_matrix)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
#' SHAP contribution dependency plots
|
||||
#'
|
||||
#' Visualizing the SHAP feature contribution to prediction dependencies on feature value.
|
||||
#'
|
||||
#'
|
||||
#' @param data data as a \code{matrix} or \code{dgCMatrix}.
|
||||
#' @param shap_contrib a matrix of SHAP contributions that was computed earlier for the above
|
||||
#' @param shap_contrib a matrix of SHAP contributions that was computed earlier for the above
|
||||
#' \code{data}. When it is NULL, it is computed internally using \code{model} and \code{data}.
|
||||
#' @param features a vector of either column indices or of feature names to plot. When it is NULL,
|
||||
#' feature importance is calculated, and \code{top_n} high ranked features are taken.
|
||||
@@ -31,32 +31,32 @@
|
||||
#' @param plot_loess whether to plot loess-smoothed curves. The smoothing is only done for features with
|
||||
#' more than 5 distinct values.
|
||||
#' @param col_loess a color to use for the loess curves.
|
||||
#' @param span_loess the \code{span} paramerer in \code{\link[stats]{loess}}'s call.
|
||||
#' @param span_loess the \code{span} parameter in \code{\link[stats]{loess}}'s call.
|
||||
#' @param which whether to do univariate or bivariate plotting. NOTE: only 1D is implemented so far.
|
||||
#' @param plot whether a plot should be drawn. If FALSE, only a lits of matrices is returned.
|
||||
#' @param ... other parameters passed to \code{plot}.
|
||||
#'
|
||||
#'
|
||||
#' @details
|
||||
#'
|
||||
#'
|
||||
#' These scatterplots represent how SHAP feature contributions depend of feature values.
|
||||
#' The similarity to partial dependency plots is that they also give an idea for how feature values
|
||||
#' affect predictions. However, in partial dependency plots, we usually see marginal dependencies
|
||||
#' of model prediction on feature value, while SHAP contribution dependency plots display the estimated
|
||||
#' contributions of a feature to model prediction for each individual case.
|
||||
#'
|
||||
#'
|
||||
#' When \code{plot_loess = TRUE} is set, feature values are rounded to 3 significant digits and
|
||||
#' weighted LOESS is computed and plotted, where weights are the numbers of data points
|
||||
#' at each rounded value.
|
||||
#'
|
||||
#'
|
||||
#' Note: SHAP contributions are shown on the scale of model margin. E.g., for a logistic binomial objective,
|
||||
#' the margin is prediction before a sigmoidal transform into probability-like values.
|
||||
#' Also, since SHAP stands for "SHapley Additive exPlanation" (model prediction = sum of SHAP
|
||||
#' contributions for all features + bias), depending on the objective used, transforming SHAP
|
||||
#' contributions for a feature from the marginal to the prediction space is not necessarily
|
||||
#' a meaningful thing to do.
|
||||
#'
|
||||
#'
|
||||
#' @return
|
||||
#'
|
||||
#'
|
||||
#' In addition to producing plots (when \code{plot=TRUE}), it silently returns a list of two matrices:
|
||||
#' \itemize{
|
||||
#' \item \code{data} the values of selected features;
|
||||
@@ -70,11 +70,11 @@
|
||||
#' Scott M. Lundberg, Su-In Lee, "Consistent feature attribution for tree ensembles", \url{https://arxiv.org/abs/1706.06060}
|
||||
#'
|
||||
#' @examples
|
||||
#'
|
||||
#'
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' data(agaricus.test, package='xgboost')
|
||||
#'
|
||||
#' bst <- xgboost(agaricus.train$data, agaricus.train$label, nrounds = 50,
|
||||
#' bst <- xgboost(agaricus.train$data, agaricus.train$label, nrounds = 50,
|
||||
#' eta = 0.1, max_depth = 3, subsample = .5,
|
||||
#' method = "hist", objective = "binary:logistic", nthread = 2, verbose = 0)
|
||||
#'
|
||||
@@ -99,7 +99,7 @@
|
||||
#' n_col = 2, col = col, pch = 16, pch_NA = 17)
|
||||
#' xgb.plot.shap(x, model = mbst, trees = trees0 + 2, target_class = 2, top_n = 4,
|
||||
#' n_col = 2, col = col, pch = 16, pch_NA = 17)
|
||||
#'
|
||||
#'
|
||||
#' @rdname xgb.plot.shap
|
||||
#' @export
|
||||
xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1, model = NULL,
|
||||
@@ -109,7 +109,7 @@ xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
|
||||
plot_NA = TRUE, col_NA = rgb(0.7, 0, 1, 0.6), pch_NA = '.', pos_NA = 1.07,
|
||||
plot_loess = TRUE, col_loess = 2, span_loess = 0.5,
|
||||
which = c("1d", "2d"), plot = TRUE, ...) {
|
||||
|
||||
|
||||
if (!is.matrix(data) && !inherits(data, "dgCMatrix"))
|
||||
stop("data: must be either matrix or dgCMatrix")
|
||||
|
||||
@@ -122,7 +122,7 @@ xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
|
||||
if (!is.null(shap_contrib) &&
|
||||
(!is.matrix(shap_contrib) || nrow(shap_contrib) != nrow(data) || ncol(shap_contrib) != ncol(data) + 1))
|
||||
stop("shap_contrib is not compatible with the provided data")
|
||||
|
||||
|
||||
nsample <- if (is.null(subsample)) min(100000, nrow(data)) else as.integer(subsample * nrow(data))
|
||||
idx <- sample(1:nrow(data), nsample)
|
||||
data <- data[idx,]
|
||||
@@ -144,13 +144,13 @@ xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
|
||||
stop("top_n: must be an integer within [1, 100]")
|
||||
features <- imp$Feature[1:min(top_n, NROW(imp))]
|
||||
}
|
||||
|
||||
|
||||
if (is.character(features)) {
|
||||
if (is.null(colnames(data)))
|
||||
stop("Either provide `data` with column names or provide `features` as column indices")
|
||||
features <- match(features, colnames(data))
|
||||
}
|
||||
|
||||
|
||||
if (n_col > length(features)) n_col <- length(features)
|
||||
|
||||
if (is.list(shap_contrib)) { # multiclass: either choose a class or merge
|
||||
@@ -165,7 +165,7 @@ xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
|
||||
if (is.null(cols)) cols <- paste0('X', 1:ncol(data))
|
||||
colnames(data) <- cols
|
||||
colnames(shap_contrib) <- cols
|
||||
|
||||
|
||||
if (plot && which == "1d") {
|
||||
op <- par(mfrow = c(ceiling(length(features) / n_col), n_col),
|
||||
oma = c(0,0,0,0) + 0.2,
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||
#' xgb.save(bst, 'xgb.model')
|
||||
#' bst <- xgb.load('xgb.model')
|
||||
#' if (file.exists('xgb.model')) file.remove('xgb.model')
|
||||
#' pred <- predict(bst, test$data)
|
||||
#' @export
|
||||
xgb.save <- function(model, fname) {
|
||||
|
||||
@@ -1,23 +1,23 @@
|
||||
#' Save xgboost model to R's raw vector,
|
||||
#' user can call xgb.load to load the model back from raw vector
|
||||
#'
|
||||
#' user can call xgb.load.raw to load the model back from raw vector
|
||||
#'
|
||||
#' Save xgboost model from xgboost or xgb.train
|
||||
#'
|
||||
#'
|
||||
#' @param model the model object.
|
||||
#'
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' data(agaricus.test, package='xgboost')
|
||||
#' train <- agaricus.train
|
||||
#' test <- agaricus.test
|
||||
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||
#' raw <- xgb.save.raw(bst)
|
||||
#' bst <- xgb.load(raw)
|
||||
#' bst <- xgb.load.raw(raw)
|
||||
#' pred <- predict(bst, test$data)
|
||||
#'
|
||||
#' @export
|
||||
xgb.save.raw <- function(model) {
|
||||
model <- xgb.get.handle(model)
|
||||
.Call(XGBoosterModelToRaw_R, model)
|
||||
handle <- xgb.get.handle(model)
|
||||
.Call(XGBoosterModelToRaw_R, handle)
|
||||
}
|
||||
|
||||
21
R-package/R/xgb.serialize.R
Normal file
21
R-package/R/xgb.serialize.R
Normal file
@@ -0,0 +1,21 @@
|
||||
#' Serialize the booster instance into R's raw vector. The serialization method differs
|
||||
#' from \code{\link{xgb.save.raw}} as the latter one saves only the model but not
|
||||
#' parameters. This serialization format is not stable across different xgboost versions.
|
||||
#'
|
||||
#' @param booster the booster instance
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' data(agaricus.test, package='xgboost')
|
||||
#' train <- agaricus.train
|
||||
#' test <- agaricus.test
|
||||
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||
#' raw <- xgb.serialize(bst)
|
||||
#' bst <- xgb.unserialize(raw)
|
||||
#'
|
||||
#' @export
|
||||
xgb.serialize <- function(booster) {
|
||||
handle <- xgb.get.handle(booster)
|
||||
.Call(XGBoosterSerializeToBuffer_R, handle)
|
||||
}
|
||||
@@ -1,48 +1,48 @@
|
||||
#' eXtreme Gradient Boosting Training
|
||||
#'
|
||||
#'
|
||||
#' \code{xgb.train} is an advanced interface for training an xgboost model.
|
||||
#' The \code{xgboost} function is a simpler wrapper for \code{xgb.train}.
|
||||
#'
|
||||
#' @param params the list of parameters.
|
||||
#' @param params the list of parameters.
|
||||
#' The complete list of parameters is available at \url{http://xgboost.readthedocs.io/en/latest/parameter.html}.
|
||||
#' Below is a shorter summary:
|
||||
#'
|
||||
#'
|
||||
#' 1. General Parameters
|
||||
#'
|
||||
#'
|
||||
#' \itemize{
|
||||
#' \item \code{booster} which booster to use, can be \code{gbtree} or \code{gblinear}. Default: \code{gbtree}.
|
||||
#' }
|
||||
#'
|
||||
#'
|
||||
#' 2. Booster Parameters
|
||||
#'
|
||||
#'
|
||||
#' 2.1. Parameter for Tree Booster
|
||||
#'
|
||||
#'
|
||||
#' \itemize{
|
||||
#' \item \code{eta} control the learning rate: scale the contribution of each tree by a factor of \code{0 < eta < 1} when it is added to the current approximation. Used to prevent overfitting by making the boosting process more conservative. Lower value for \code{eta} implies larger value for \code{nrounds}: low \code{eta} value means model more robust to overfitting but slower to compute. Default: 0.3
|
||||
#' \item \code{gamma} minimum loss reduction required to make a further partition on a leaf node of the tree. the larger, the more conservative the algorithm will be.
|
||||
#' \item \code{gamma} minimum loss reduction required to make a further partition on a leaf node of the tree. the larger, the more conservative the algorithm will be.
|
||||
#' \item \code{max_depth} maximum depth of a tree. Default: 6
|
||||
#' \item \code{min_child_weight} minimum sum of instance weight (hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, then the building process will give up further partitioning. In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node. The larger, the more conservative the algorithm will be. Default: 1
|
||||
#' \item \code{subsample} subsample ratio of the training instance. Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees and this will prevent overfitting. It makes computation shorter (because less data to analyse). It is advised to use this parameter with \code{eta} and increase \code{nrounds}. Default: 1
|
||||
#' \item \code{subsample} subsample ratio of the training instance. Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees and this will prevent overfitting. It makes computation shorter (because less data to analyse). It is advised to use this parameter with \code{eta} and increase \code{nrounds}. Default: 1
|
||||
#' \item \code{colsample_bytree} subsample ratio of columns when constructing each tree. Default: 1
|
||||
#' \item \code{num_parallel_tree} Experimental parameter. number of trees to grow per round. Useful to test Random Forest through Xgboost (set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly. Default: 1
|
||||
#' \item \code{monotone_constraints} A numerical vector consists of \code{1}, \code{0} and \code{-1} with its length equals to the number of features in the training data. \code{1} is increasing, \code{-1} is decreasing and \code{0} is no constraint.
|
||||
#' \item \code{interaction_constraints} A list of vectors specifying feature indices of permitted interactions. Each item of the list represents one permitted interaction where specified features are allowed to interact with each other. Feature index values should start from \code{0} (\code{0} references the first column). Leave argument unspecified for no interaction constraints.
|
||||
#' }
|
||||
#'
|
||||
#'
|
||||
#' 2.2. Parameter for Linear Booster
|
||||
#'
|
||||
#'
|
||||
#' \itemize{
|
||||
#' \item \code{lambda} L2 regularization term on weights. Default: 0
|
||||
#' \item \code{lambda_bias} L2 regularization term on bias. Default: 0
|
||||
#' \item \code{alpha} L1 regularization term on weights. (there is no L1 reg on bias because it is not important). Default: 0
|
||||
#' }
|
||||
#'
|
||||
#' 3. Task Parameters
|
||||
#'
|
||||
#'
|
||||
#' 3. Task Parameters
|
||||
#'
|
||||
#' \itemize{
|
||||
#' \item \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it. The default objective options are below:
|
||||
#' \itemize{
|
||||
#' \item \code{reg:linear} linear regression (Default).
|
||||
#' \item \code{reg:squarederror} Regression with squared loss (Default).
|
||||
#' \item \code{reg:logistic} logistic regression.
|
||||
#' \item \code{binary:logistic} logistic regression for binary classification. Output probability.
|
||||
#' \item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation.
|
||||
@@ -54,32 +54,32 @@
|
||||
#' \item \code{base_score} the initial prediction score of all instances, global bias. Default: 0.5
|
||||
#' \item \code{eval_metric} evaluation metrics for validation data. Users can pass a self-defined function to it. Default: metric will be assigned according to objective(rmse for regression, and error for classification, mean average precision for ranking). List is provided in detail section.
|
||||
#' }
|
||||
#'
|
||||
#'
|
||||
#' @param data training dataset. \code{xgb.train} accepts only an \code{xgb.DMatrix} as the input.
|
||||
#' \code{xgboost}, in addition, also accepts \code{matrix}, \code{dgCMatrix}, or name of a local data file.
|
||||
#' @param nrounds max number of boosting iterations.
|
||||
#' @param watchlist named list of xgb.DMatrix datasets to use for evaluating model performance.
|
||||
#' Metrics specified in either \code{eval_metric} or \code{feval} will be computed for each
|
||||
#' of these datasets during each boosting iteration, and stored in the end as a field named
|
||||
#' \code{evaluation_log} in the resulting object. When either \code{verbose>=1} or
|
||||
#' of these datasets during each boosting iteration, and stored in the end as a field named
|
||||
#' \code{evaluation_log} in the resulting object. When either \code{verbose>=1} or
|
||||
#' \code{\link{cb.print.evaluation}} callback is engaged, the performance results are continuously
|
||||
#' printed out during the training.
|
||||
#' printed out during the training.
|
||||
#' E.g., specifying \code{watchlist=list(validation1=mat1, validation2=mat2)} allows to track
|
||||
#' the performance of each round's model on mat1 and mat2.
|
||||
#' @param obj customized objective function. Returns gradient and second order
|
||||
#' @param obj customized objective function. Returns gradient and second order
|
||||
#' gradient with given prediction and dtrain.
|
||||
#' @param feval custimized evaluation function. Returns
|
||||
#' \code{list(metric='metric-name', value='metric-value')} with given
|
||||
#' @param feval customized evaluation function. Returns
|
||||
#' \code{list(metric='metric-name', value='metric-value')} with given
|
||||
#' prediction and dtrain.
|
||||
#' @param verbose If 0, xgboost will stay silent. If 1, it will print information about performance.
|
||||
#' If 2, some additional information will be printed out.
|
||||
#' Note that setting \code{verbose > 0} automatically engages the
|
||||
#' Note that setting \code{verbose > 0} automatically engages the
|
||||
#' \code{cb.print.evaluation(period=1)} callback function.
|
||||
#' @param print_every_n Print each n-th iteration evaluation messages when \code{verbose>0}.
|
||||
#' Default is 1 which means all messages are printed. This parameter is passed to the
|
||||
#' Default is 1 which means all messages are printed. This parameter is passed to the
|
||||
#' \code{\link{cb.print.evaluation}} callback.
|
||||
#' @param early_stopping_rounds If \code{NULL}, the early stopping function is not triggered.
|
||||
#' If set to an integer \code{k}, training with a validation set will stop if the performance
|
||||
#' @param early_stopping_rounds If \code{NULL}, the early stopping function is not triggered.
|
||||
#' If set to an integer \code{k}, training with a validation set will stop if the performance
|
||||
#' doesn't improve for \code{k} rounds.
|
||||
#' Setting this parameter engages the \code{\link{cb.early.stop}} callback.
|
||||
#' @param maximize If \code{feval} and \code{early_stopping_rounds} are set,
|
||||
@@ -90,35 +90,35 @@
|
||||
#' 0 means save at the end. The saving is handled by the \code{\link{cb.save.model}} callback.
|
||||
#' @param save_name the name or path for periodically saved model file.
|
||||
#' @param xgb_model a previously built model to continue the training from.
|
||||
#' Could be either an object of class \code{xgb.Booster}, or its raw data, or the name of a
|
||||
#' Could be either an object of class \code{xgb.Booster}, or its raw data, or the name of a
|
||||
#' file with a previously saved model.
|
||||
#' @param callbacks a list of callback functions to perform various task during boosting.
|
||||
#' See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the
|
||||
#' parameters' values. User can provide either existing or their own callback methods in order
|
||||
#' See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the
|
||||
#' parameters' values. User can provide either existing or their own callback methods in order
|
||||
#' to customize the training process.
|
||||
#' @param ... other parameters to pass to \code{params}.
|
||||
#' @param label vector of response values. Should not be provided when data is
|
||||
#' @param label vector of response values. Should not be provided when data is
|
||||
#' a local data file name or an \code{xgb.DMatrix}.
|
||||
#' @param missing by default is set to NA, which means that NA values should be considered as 'missing'
|
||||
#' by the algorithm. Sometimes, 0 or other extreme value might be used to represent missing values.
|
||||
#' This parameter is only used when input is a dense matrix.
|
||||
#' @param weight a vector indicating the weight for each row of the input.
|
||||
#'
|
||||
#' @details
|
||||
#' These are the training functions for \code{xgboost}.
|
||||
#'
|
||||
#' The \code{xgb.train} interface supports advanced features such as \code{watchlist},
|
||||
#' customized objective and evaluation metric functions, therefore it is more flexible
|
||||
#'
|
||||
#' @details
|
||||
#' These are the training functions for \code{xgboost}.
|
||||
#'
|
||||
#' The \code{xgb.train} interface supports advanced features such as \code{watchlist},
|
||||
#' customized objective and evaluation metric functions, therefore it is more flexible
|
||||
#' than the \code{xgboost} interface.
|
||||
#'
|
||||
#' Parallelization is automatically enabled if \code{OpenMP} is present.
|
||||
#' Parallelization is automatically enabled if \code{OpenMP} is present.
|
||||
#' Number of threads can also be manually specified via \code{nthread} parameter.
|
||||
#'
|
||||
#'
|
||||
#' The evaluation metric is chosen automatically by Xgboost (according to the objective)
|
||||
#' when the \code{eval_metric} parameter is not provided.
|
||||
#' User may set one or several \code{eval_metric} parameters.
|
||||
#' User may set one or several \code{eval_metric} parameters.
|
||||
#' Note that when using a customized metric, only this single metric can be used.
|
||||
#' The folloiwing is the list of built-in metrics for which Xgboost provides optimized implementation:
|
||||
#' The following is the list of built-in metrics for which Xgboost provides optimized implementation:
|
||||
#' \itemize{
|
||||
#' \item \code{rmse} root mean square error. \url{http://en.wikipedia.org/wiki/Root_mean_square_error}
|
||||
#' \item \code{logloss} negative log-likelihood. \url{http://en.wikipedia.org/wiki/Log-likelihood}
|
||||
@@ -131,7 +131,7 @@
|
||||
#' \item \code{aucpr} Area under the PR curve. \url{https://en.wikipedia.org/wiki/Precision_and_recall} for ranking evaluation.
|
||||
#' \item \code{ndcg} Normalized Discounted Cumulative Gain (for ranking task). \url{http://en.wikipedia.org/wiki/NDCG}
|
||||
#' }
|
||||
#'
|
||||
#'
|
||||
#' The following callbacks are automatically created when certain parameters are set:
|
||||
#' \itemize{
|
||||
#' \item \code{cb.print.evaluation} is turned on when \code{verbose > 0};
|
||||
@@ -140,38 +140,38 @@
|
||||
#' \item \code{cb.early.stop}: when \code{early_stopping_rounds} is set.
|
||||
#' \item \code{cb.save.model}: when \code{save_period > 0} is set.
|
||||
#' }
|
||||
#'
|
||||
#' @return
|
||||
#'
|
||||
#' @return
|
||||
#' An object of class \code{xgb.Booster} with the following elements:
|
||||
#' \itemize{
|
||||
#' \item \code{handle} a handle (pointer) to the xgboost model in memory.
|
||||
#' \item \code{raw} a cached memory dump of the xgboost model saved as R's \code{raw} type.
|
||||
#' \item \code{niter} number of boosting iterations.
|
||||
#' \item \code{evaluation_log} evaluation history storead as a \code{data.table} with the
|
||||
#' \item \code{evaluation_log} evaluation history stored as a \code{data.table} with the
|
||||
#' first column corresponding to iteration number and the rest corresponding to evaluation
|
||||
#' metrics' values. It is created by the \code{\link{cb.evaluation.log}} callback.
|
||||
#' \item \code{call} a function call.
|
||||
#' \item \code{params} parameters that were passed to the xgboost library. Note that it does not
|
||||
#' \item \code{params} parameters that were passed to the xgboost library. Note that it does not
|
||||
#' capture parameters changed by the \code{\link{cb.reset.parameters}} callback.
|
||||
#' \item \code{callbacks} callback functions that were either automatically assigned or
|
||||
#' explicitely passed.
|
||||
#' \item \code{callbacks} callback functions that were either automatically assigned or
|
||||
#' explicitly passed.
|
||||
#' \item \code{best_iteration} iteration number with the best evaluation metric value
|
||||
#' (only available with early stopping).
|
||||
#' \item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration,
|
||||
#' \item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration,
|
||||
#' which could further be used in \code{predict} method
|
||||
#' (only available with early stopping).
|
||||
#' \item \code{best_score} the best evaluation metric value during early stopping.
|
||||
#' (only available with early stopping).
|
||||
#' \item \code{feature_names} names of the training dataset features
|
||||
#' (only when comun names were defined in training data).
|
||||
#' (only when column names were defined in training data).
|
||||
#' \item \code{nfeatures} number of features in training data.
|
||||
#' }
|
||||
#'
|
||||
#'
|
||||
#' @seealso
|
||||
#' \code{\link{callbacks}},
|
||||
#' \code{\link{predict.xgb.Booster}},
|
||||
#' \code{\link{xgb.cv}}
|
||||
#'
|
||||
#'
|
||||
#' @references
|
||||
#'
|
||||
#' Tianqi Chen and Carlos Guestrin, "XGBoost: A Scalable Tree Boosting System",
|
||||
@@ -180,17 +180,17 @@
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' data(agaricus.test, package='xgboost')
|
||||
#'
|
||||
#'
|
||||
#' dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
#' dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
#' watchlist <- list(train = dtrain, eval = dtest)
|
||||
#'
|
||||
#'
|
||||
#' ## A simple xgb.train example:
|
||||
#' param <- list(max_depth = 2, eta = 1, silent = 1, nthread = 2,
|
||||
#' param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2,
|
||||
#' objective = "binary:logistic", eval_metric = "auc")
|
||||
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist)
|
||||
#'
|
||||
#'
|
||||
#'
|
||||
#'
|
||||
#' ## An xgb.train example where custom objective and evaluation metric are used:
|
||||
#' logregobj <- function(preds, dtrain) {
|
||||
#' labels <- getinfo(dtrain, "label")
|
||||
@@ -204,58 +204,58 @@
|
||||
#' err <- as.numeric(sum(labels != (preds > 0)))/length(labels)
|
||||
#' return(list(metric = "error", value = err))
|
||||
#' }
|
||||
#'
|
||||
#'
|
||||
#' # These functions could be used by passing them either:
|
||||
#' # as 'objective' and 'eval_metric' parameters in the params list:
|
||||
#' param <- list(max_depth = 2, eta = 1, silent = 1, nthread = 2,
|
||||
#' param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2,
|
||||
#' objective = logregobj, eval_metric = evalerror)
|
||||
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist)
|
||||
#'
|
||||
#'
|
||||
#' # or through the ... arguments:
|
||||
#' param <- list(max_depth = 2, eta = 1, silent = 1, nthread = 2)
|
||||
#' param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2)
|
||||
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist,
|
||||
#' objective = logregobj, eval_metric = evalerror)
|
||||
#'
|
||||
#'
|
||||
#' # or as dedicated 'obj' and 'feval' parameters of xgb.train:
|
||||
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist,
|
||||
#' obj = logregobj, feval = evalerror)
|
||||
#'
|
||||
#'
|
||||
#'
|
||||
#'
|
||||
#' ## An xgb.train example of using variable learning rates at each iteration:
|
||||
#' param <- list(max_depth = 2, eta = 1, silent = 1, nthread = 2,
|
||||
#' param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2,
|
||||
#' objective = "binary:logistic", eval_metric = "auc")
|
||||
#' my_etas <- list(eta = c(0.5, 0.1))
|
||||
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist,
|
||||
#' callbacks = list(cb.reset.parameters(my_etas)))
|
||||
#'
|
||||
#'
|
||||
#' ## Early stopping:
|
||||
#' bst <- xgb.train(param, dtrain, nrounds = 25, watchlist,
|
||||
#' early_stopping_rounds = 3)
|
||||
#'
|
||||
#'
|
||||
#' ## An 'xgboost' interface example:
|
||||
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label,
|
||||
#' max_depth = 2, eta = 1, nthread = 2, nrounds = 2,
|
||||
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label,
|
||||
#' max_depth = 2, eta = 1, nthread = 2, nrounds = 2,
|
||||
#' objective = "binary:logistic")
|
||||
#' pred <- predict(bst, agaricus.test$data)
|
||||
#'
|
||||
#'
|
||||
#' @rdname xgb.train
|
||||
#' @export
|
||||
xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
|
||||
obj = NULL, feval = NULL, verbose = 1, print_every_n = 1L,
|
||||
early_stopping_rounds = NULL, maximize = NULL,
|
||||
save_period = NULL, save_name = "xgboost.model",
|
||||
save_period = NULL, save_name = "xgboost.model",
|
||||
xgb_model = NULL, callbacks = list(), ...) {
|
||||
|
||||
|
||||
check.deprecation(...)
|
||||
|
||||
|
||||
params <- check.booster.params(params, ...)
|
||||
|
||||
check.custom.obj()
|
||||
check.custom.eval()
|
||||
|
||||
|
||||
# data & watchlist checks
|
||||
dtrain <- data
|
||||
if (!inherits(dtrain, "xgb.DMatrix"))
|
||||
if (!inherits(dtrain, "xgb.DMatrix"))
|
||||
stop("second argument dtrain must be xgb.DMatrix")
|
||||
if (length(watchlist) > 0) {
|
||||
if (typeof(watchlist) != "list" ||
|
||||
@@ -267,7 +267,7 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
|
||||
}
|
||||
|
||||
# evaluation printing callback
|
||||
params <- c(params, list(silent = ifelse(verbose > 1, 0, 1)))
|
||||
params <- c(params)
|
||||
print_every_n <- max( as.integer(print_every_n), 1L)
|
||||
if (!has.callbacks(callbacks, 'cb.print.evaluation') &&
|
||||
verbose) {
|
||||
@@ -288,11 +288,16 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
|
||||
stop_condition <- FALSE
|
||||
if (!is.null(early_stopping_rounds) &&
|
||||
!has.callbacks(callbacks, 'cb.early.stop')) {
|
||||
callbacks <- add.cb(callbacks, cb.early.stop(early_stopping_rounds,
|
||||
callbacks <- add.cb(callbacks, cb.early.stop(early_stopping_rounds,
|
||||
maximize = maximize, verbose = verbose))
|
||||
}
|
||||
|
||||
# Sort the callbacks into categories
|
||||
cb <- categorize.callbacks(callbacks)
|
||||
params['validate_parameters'] <- TRUE
|
||||
if (!is.null(params[['seed']])) {
|
||||
warning("xgb.train: `seed` is ignored in R package. Use `set.seed()` instead.")
|
||||
}
|
||||
|
||||
# The tree updating process would need slightly different handling
|
||||
is_update <- NVL(params[['process_type']], '.') == 'update'
|
||||
@@ -318,22 +323,22 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
|
||||
|
||||
# TODO: distributed code
|
||||
rank <- 0
|
||||
|
||||
|
||||
niter_skip <- ifelse(is_update, 0, niter_init)
|
||||
begin_iteration <- niter_skip + 1
|
||||
end_iteration <- niter_skip + nrounds
|
||||
|
||||
|
||||
# the main loop for boosting iterations
|
||||
for (iteration in begin_iteration:end_iteration) {
|
||||
|
||||
|
||||
for (f in cb$pre_iter) f()
|
||||
|
||||
|
||||
xgb.iter.update(bst$handle, dtrain, iteration - 1, obj)
|
||||
|
||||
|
||||
bst_evaluation <- numeric(0)
|
||||
if (length(watchlist) > 0)
|
||||
bst_evaluation <- xgb.iter.eval(bst$handle, watchlist, iteration - 1, feval)
|
||||
|
||||
|
||||
xgb.attr(bst$handle, 'niter') <- iteration - 1
|
||||
|
||||
for (f in cb$post_iter) f()
|
||||
@@ -341,9 +346,9 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
|
||||
if (stop_condition) break
|
||||
}
|
||||
for (f in cb$finalize) f(finalize = TRUE)
|
||||
|
||||
|
||||
bst <- xgb.Booster.complete(bst, saveraw = TRUE)
|
||||
|
||||
|
||||
# store the total number of boosting iterations
|
||||
bst$niter = end_iteration
|
||||
|
||||
|
||||
12
R-package/R/xgb.unserialize.R
Normal file
12
R-package/R/xgb.unserialize.R
Normal file
@@ -0,0 +1,12 @@
|
||||
#' Load the instance back from \code{\link{xgb.serialize}}
|
||||
#'
|
||||
#' @param buffer the buffer containing booster instance saved by \code{\link{xgb.serialize}}
|
||||
#'
|
||||
#' @export
|
||||
xgb.unserialize <- function(buffer) {
|
||||
cachelist <- list()
|
||||
handle <- .Call(XGBoosterCreate_R, cachelist)
|
||||
.Call(XGBoosterUnserializeFromBuffer_R, handle, buffer)
|
||||
class(handle) <- "xgb.Booster.handle"
|
||||
return (handle)
|
||||
}
|
||||
@@ -5,8 +5,8 @@
|
||||
#' @export
|
||||
xgboost <- function(data = NULL, label = NULL, missing = NA, weight = NULL,
|
||||
params = list(), nrounds,
|
||||
verbose = 1, print_every_n = 1L,
|
||||
early_stopping_rounds = NULL, maximize = NULL,
|
||||
verbose = 1, print_every_n = 1L,
|
||||
early_stopping_rounds = NULL, maximize = NULL,
|
||||
save_period = NULL, save_name = "xgboost.model",
|
||||
xgb_model = NULL, callbacks = list(), ...) {
|
||||
|
||||
@@ -18,16 +18,16 @@ xgboost <- function(data = NULL, label = NULL, missing = NA, weight = NULL,
|
||||
early_stopping_rounds = early_stopping_rounds, maximize = maximize,
|
||||
save_period = save_period, save_name = save_name,
|
||||
xgb_model = xgb_model, callbacks = callbacks, ...)
|
||||
return(bst)
|
||||
return (bst)
|
||||
}
|
||||
|
||||
#' Training part from Mushroom Data Set
|
||||
#'
|
||||
#'
|
||||
#' This data set is originally from the Mushroom data set,
|
||||
#' UCI Machine Learning Repository.
|
||||
#'
|
||||
#'
|
||||
#' This data set includes the following fields:
|
||||
#'
|
||||
#'
|
||||
#' \itemize{
|
||||
#' \item \code{label} the label for each record
|
||||
#' \item \code{data} a sparse Matrix of \code{dgCMatrix} class, with 126 columns.
|
||||
@@ -35,16 +35,16 @@ xgboost <- function(data = NULL, label = NULL, missing = NA, weight = NULL,
|
||||
#'
|
||||
#' @references
|
||||
#' https://archive.ics.uci.edu/ml/datasets/Mushroom
|
||||
#'
|
||||
#' Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository
|
||||
#' [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California,
|
||||
#'
|
||||
#' Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository
|
||||
#' [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California,
|
||||
#' School of Information and Computer Science.
|
||||
#'
|
||||
#'
|
||||
#' @docType data
|
||||
#' @keywords datasets
|
||||
#' @name agaricus.train
|
||||
#' @usage data(agaricus.train)
|
||||
#' @format A list containing a label vector, and a dgCMatrix object with 6513
|
||||
#' @format A list containing a label vector, and a dgCMatrix object with 6513
|
||||
#' rows and 127 variables
|
||||
NULL
|
||||
|
||||
@@ -52,9 +52,9 @@ NULL
|
||||
#'
|
||||
#' This data set is originally from the Mushroom data set,
|
||||
#' UCI Machine Learning Repository.
|
||||
#'
|
||||
#'
|
||||
#' This data set includes the following fields:
|
||||
#'
|
||||
#'
|
||||
#' \itemize{
|
||||
#' \item \code{label} the label for each record
|
||||
#' \item \code{data} a sparse Matrix of \code{dgCMatrix} class, with 126 columns.
|
||||
@@ -62,16 +62,16 @@ NULL
|
||||
#'
|
||||
#' @references
|
||||
#' https://archive.ics.uci.edu/ml/datasets/Mushroom
|
||||
#'
|
||||
#' Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository
|
||||
#' [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California,
|
||||
#'
|
||||
#' Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository
|
||||
#' [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California,
|
||||
#' School of Information and Computer Science.
|
||||
#'
|
||||
#'
|
||||
#' @docType data
|
||||
#' @keywords datasets
|
||||
#' @name agaricus.test
|
||||
#' @usage data(agaricus.test)
|
||||
#' @format A list containing a label vector, and a dgCMatrix object with 1611
|
||||
#' @format A list containing a label vector, and a dgCMatrix object with 1611
|
||||
#' rows and 126 variables
|
||||
NULL
|
||||
|
||||
@@ -107,7 +107,7 @@ NULL
|
||||
#' @importFrom graphics par
|
||||
#' @importFrom graphics title
|
||||
#' @importFrom grDevices rgb
|
||||
#'
|
||||
#'
|
||||
#' @import methods
|
||||
#' @useDynLib xgboost, .registration = TRUE
|
||||
NULL
|
||||
|
||||
@@ -30,4 +30,4 @@ Examples
|
||||
Development
|
||||
-----------
|
||||
|
||||
* See the [R Package section](https://xgboost.readthedocs.io/en/latest/how_to/contribute.html#r-package) of the contributors guide.
|
||||
* See the [R Package section](https://xgboost.readthedocs.io/en/latest/contribute.html#r-package) of the contributors guide.
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
#!/bin/sh
|
||||
|
||||
rm -f src/Makevars
|
||||
rm -f CMakeLists.txt
|
||||
|
||||
1043
R-package/configure
vendored
1043
R-package/configure
vendored
File diff suppressed because it is too large
Load Diff
@@ -4,6 +4,21 @@ AC_PREREQ(2.62)
|
||||
|
||||
AC_INIT([xgboost],[0.6-3],[],[xgboost],[])
|
||||
|
||||
# Use this line to set CC variable to a C compiler
|
||||
AC_PROG_CC
|
||||
|
||||
### Check whether backtrace() is part of libc or the external lib libexecinfo
|
||||
AC_MSG_CHECKING([Backtrace lib])
|
||||
AC_MSG_RESULT([])
|
||||
AC_CHECK_LIB([execinfo], [backtrace], [BACKTRACE_LIB=-lexecinfo], [BACKTRACE_LIB=''])
|
||||
|
||||
### Endian detection
|
||||
AC_MSG_CHECKING([endian])
|
||||
AC_MSG_RESULT([])
|
||||
AC_RUN_IFELSE([AC_LANG_PROGRAM([[#include <stdint.h>]], [[const uint16_t endianness = 256; return !!(*(const uint8_t *)&endianness);]])],
|
||||
[ENDIAN_FLAG="-DDMLC_CMAKE_LITTLE_ENDIAN=1"],
|
||||
[ENDIAN_FLAG="-DDMLC_CMAKE_LITTLE_ENDIAN=0"])
|
||||
|
||||
OPENMP_CXXFLAGS=""
|
||||
|
||||
if test `uname -s` = "Linux"
|
||||
@@ -13,19 +28,28 @@ fi
|
||||
|
||||
if test `uname -s` = "Darwin"
|
||||
then
|
||||
OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CXXFLAGS)"
|
||||
OPENMP_CXXFLAGS='-Xclang -fopenmp'
|
||||
OPENMP_LIB='/usr/local/lib/libomp.dylib'
|
||||
ac_pkg_openmp=no
|
||||
AC_MSG_CHECKING([whether OpenMP will work in a package])
|
||||
AC_LANG_CONFTEST(
|
||||
[AC_LANG_PROGRAM([[#include <omp.h>]], [[ return omp_get_num_threads (); ]])])
|
||||
PKG_CFLAGS="${OPENMP_CFLAGS}" PKG_LIBS="${OPENMP_CFLAGS}" "$RBIN" CMD SHLIB conftest.c 1>&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD && "$RBIN" --vanilla -q -e "dyn.load(paste('conftest',.Platform\$dynlib.ext,sep=''))" 1>&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD && ac_pkg_openmp=yes
|
||||
AC_LANG_CONFTEST([AC_LANG_PROGRAM([[#include <omp.h>]], [[ return (omp_get_max_threads() <= 1); ]])])
|
||||
${CC} -o conftest conftest.c /usr/local/lib/libomp.dylib -Xclang -fopenmp 2>/dev/null && ./conftest && ac_pkg_openmp=yes
|
||||
AC_MSG_RESULT([${ac_pkg_openmp}])
|
||||
if test "${ac_pkg_openmp}" = no; then
|
||||
OPENMP_CXXFLAGS=''
|
||||
OPENMP_LIB=''
|
||||
echo '*****************************************************************************************'
|
||||
echo 'WARNING: OpenMP is unavailable on this Mac OSX system. Training speed may be suboptimal.'
|
||||
echo ' To use all CPU cores for training jobs, you should install OpenMP by running\n'
|
||||
echo ' brew install libomp'
|
||||
echo '*****************************************************************************************'
|
||||
fi
|
||||
fi
|
||||
|
||||
AC_SUBST(OPENMP_CXXFLAGS)
|
||||
AC_SUBST(OPENMP_LIB)
|
||||
AC_SUBST(ENDIAN_FLAG)
|
||||
AC_SUBST(BACKTRACE_LIB)
|
||||
AC_CONFIG_FILES([src/Makevars])
|
||||
AC_OUTPUT
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@ wl <- list(train = dtrain, test = dtest)
|
||||
# - similar to the 'hist'
|
||||
# - the fastest option for moderately large datasets
|
||||
# - current limitations: max_depth < 16, does not implement guided loss
|
||||
# You can use tree_method = 'gpu_exact' for another GPU accelerated algorithm,
|
||||
# You can use tree_method = 'gpu_hist' for another GPU accelerated algorithm,
|
||||
# which is slower, more memory-hungry, but does not use binning.
|
||||
param <- list(objective = 'reg:logistic', eval_metric = 'auc', subsample = 0.5, nthread = 4,
|
||||
max_bin = 64, tree_method = 'gpu_hist')
|
||||
|
||||
@@ -38,6 +38,7 @@ create.new.tree.features <- function(model, original.features){
|
||||
# Convert previous features to one hot encoding
|
||||
new.features.train <- create.new.tree.features(bst, agaricus.train$data)
|
||||
new.features.test <- create.new.tree.features(bst, agaricus.test$data)
|
||||
colnames(new.features.test) <- colnames(new.features.train)
|
||||
|
||||
# learning with new features
|
||||
new.dtrain <- xgb.DMatrix(data = new.features.train, label = agaricus.train$label)
|
||||
|
||||
@@ -4,8 +4,10 @@
|
||||
\name{agaricus.test}
|
||||
\alias{agaricus.test}
|
||||
\title{Test part from Mushroom Data Set}
|
||||
\format{A list containing a label vector, and a dgCMatrix object with 1611
|
||||
rows and 126 variables}
|
||||
\format{
|
||||
A list containing a label vector, and a dgCMatrix object with 1611
|
||||
rows and 126 variables
|
||||
}
|
||||
\usage{
|
||||
data(agaricus.test)
|
||||
}
|
||||
@@ -24,8 +26,8 @@ This data set includes the following fields:
|
||||
\references{
|
||||
https://archive.ics.uci.edu/ml/datasets/Mushroom
|
||||
|
||||
Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository
|
||||
[http://archive.ics.uci.edu/ml]. Irvine, CA: University of California,
|
||||
Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository
|
||||
[http://archive.ics.uci.edu/ml]. Irvine, CA: University of California,
|
||||
School of Information and Computer Science.
|
||||
}
|
||||
\keyword{datasets}
|
||||
|
||||
@@ -4,8 +4,10 @@
|
||||
\name{agaricus.train}
|
||||
\alias{agaricus.train}
|
||||
\title{Training part from Mushroom Data Set}
|
||||
\format{A list containing a label vector, and a dgCMatrix object with 6513
|
||||
rows and 127 variables}
|
||||
\format{
|
||||
A list containing a label vector, and a dgCMatrix object with 6513
|
||||
rows and 127 variables
|
||||
}
|
||||
\usage{
|
||||
data(agaricus.train)
|
||||
}
|
||||
@@ -24,8 +26,8 @@ This data set includes the following fields:
|
||||
\references{
|
||||
https://archive.ics.uci.edu/ml/datasets/Mushroom
|
||||
|
||||
Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository
|
||||
[http://archive.ics.uci.edu/ml]. Irvine, CA: University of California,
|
||||
Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository
|
||||
[http://archive.ics.uci.edu/ml]. Irvine, CA: University of California,
|
||||
School of Information and Computer Science.
|
||||
}
|
||||
\keyword{datasets}
|
||||
|
||||
@@ -5,24 +5,24 @@
|
||||
\title{Callback closures for booster training.}
|
||||
\description{
|
||||
These are used to perform various service tasks either during boosting iterations or at the end.
|
||||
This approach helps to modularize many of such tasks without bloating the main training methods,
|
||||
This approach helps to modularize many of such tasks without bloating the main training methods,
|
||||
and it offers .
|
||||
}
|
||||
\details{
|
||||
By default, a callback function is run after each boosting iteration.
|
||||
An R-attribute \code{is_pre_iteration} could be set for a callback to define a pre-iteration function.
|
||||
|
||||
When a callback function has \code{finalize} parameter, its finalizer part will also be run after
|
||||
When a callback function has \code{finalize} parameter, its finalizer part will also be run after
|
||||
the boosting is completed.
|
||||
|
||||
WARNING: side-effects!!! Be aware that these callback functions access and modify things in
|
||||
WARNING: side-effects!!! Be aware that these callback functions access and modify things in
|
||||
the environment from which they are called from, which is a fairly uncommon thing to do in R.
|
||||
|
||||
To write a custom callback closure, make sure you first understand the main concepts about R envoronments.
|
||||
Check either R documentation on \code{\link[base]{environment}} or the
|
||||
\href{http://adv-r.had.co.nz/Environments.html}{Environments chapter} from the "Advanced R"
|
||||
To write a custom callback closure, make sure you first understand the main concepts about R environments.
|
||||
Check either R documentation on \code{\link[base]{environment}} or the
|
||||
\href{http://adv-r.had.co.nz/Environments.html}{Environments chapter} from the "Advanced R"
|
||||
book by Hadley Wickham. Further, the best option is to read the code of some of the existing callbacks -
|
||||
choose ones that do something similar to what you want to achieve. Also, you would need to get familiar
|
||||
choose ones that do something similar to what you want to achieve. Also, you would need to get familiar
|
||||
with the objects available inside of the \code{xgb.train} and \code{xgb.cv} internal environments.
|
||||
}
|
||||
\seealso{
|
||||
|
||||
@@ -11,11 +11,11 @@ cb.cv.predict(save_models = FALSE)
|
||||
}
|
||||
\value{
|
||||
Predictions are returned inside of the \code{pred} element, which is either a vector or a matrix,
|
||||
depending on the number of prediction outputs per data row. The order of predictions corresponds
|
||||
to the order of rows in the original dataset. Note that when a custom \code{folds} list is
|
||||
provided in \code{xgb.cv}, the predictions would only be returned properly when this list is a
|
||||
non-overlapping list of k sets of indices, as in a standard k-fold CV. The predictions would not be
|
||||
meaningful when user-profided folds have overlapping indices as in, e.g., random sampling splits.
|
||||
depending on the number of prediction outputs per data row. The order of predictions corresponds
|
||||
to the order of rows in the original dataset. Note that when a custom \code{folds} list is
|
||||
provided in \code{xgb.cv}, the predictions would only be returned properly when this list is a
|
||||
non-overlapping list of k sets of indices, as in a standard k-fold CV. The predictions would not be
|
||||
meaningful when user-provided folds have overlapping indices as in, e.g., random sampling splits.
|
||||
When some of the indices in the training dataset are not included into user-provided \code{folds},
|
||||
their prediction value would be \code{NA}.
|
||||
}
|
||||
|
||||
@@ -4,19 +4,23 @@
|
||||
\alias{cb.early.stop}
|
||||
\title{Callback closure to activate the early stopping.}
|
||||
\usage{
|
||||
cb.early.stop(stopping_rounds, maximize = FALSE, metric_name = NULL,
|
||||
verbose = TRUE)
|
||||
cb.early.stop(
|
||||
stopping_rounds,
|
||||
maximize = FALSE,
|
||||
metric_name = NULL,
|
||||
verbose = TRUE
|
||||
)
|
||||
}
|
||||
\arguments{
|
||||
\item{stopping_rounds}{The number of rounds with no improvement in
|
||||
\item{stopping_rounds}{The number of rounds with no improvement in
|
||||
the evaluation metric in order to stop the training.}
|
||||
|
||||
\item{maximize}{whether to maximize the evaluation metric}
|
||||
|
||||
\item{metric_name}{the name of an evaluation column to use as a criteria for early
|
||||
stopping. If not set, the last column would be used.
|
||||
Let's say the test data in \code{watchlist} was labelled as \code{dtest},
|
||||
and one wants to use the AUC in test data for early stopping regardless of where
|
||||
Let's say the test data in \code{watchlist} was labelled as \code{dtest},
|
||||
and one wants to use the AUC in test data for early stopping regardless of where
|
||||
it is in the \code{watchlist}, then one of the following would need to be set:
|
||||
\code{metric_name='dtest-auc'} or \code{metric_name='dtest_auc'}.
|
||||
All dash '-' characters in metric names are considered equivalent to '_'.}
|
||||
@@ -27,7 +31,7 @@ All dash '-' characters in metric names are considered equivalent to '_'.}
|
||||
Callback closure to activate the early stopping.
|
||||
}
|
||||
\details{
|
||||
This callback function determines the condition for early stopping
|
||||
This callback function determines the condition for early stopping
|
||||
by setting the \code{stop_condition = TRUE} flag in its calling frame.
|
||||
|
||||
The following additional fields are assigned to the model's R object:
|
||||
|
||||
@@ -13,12 +13,12 @@ Callback closure for logging the evaluation history
|
||||
This callback function appends the current iteration evaluation results \code{bst_evaluation}
|
||||
available in the calling parent frame to the \code{evaluation_log} list in a calling frame.
|
||||
|
||||
The finalizer callback (called with \code{finalize = TURE} in the end) converts
|
||||
The finalizer callback (called with \code{finalize = TURE} in the end) converts
|
||||
the \code{evaluation_log} list into a final data.table.
|
||||
|
||||
The iteration evaluation result \code{bst_evaluation} must be a named numeric vector.
|
||||
The iteration evaluation result \code{bst_evaluation} must be a named numeric vector.
|
||||
|
||||
Note: in the column names of the final data.table, the dash '-' character is replaced with
|
||||
Note: in the column names of the final data.table, the dash '-' character is replaced with
|
||||
the underscore '_' in order to make the column names more like regular R identifiers.
|
||||
|
||||
Callback function expects the following values to be set in its calling frame:
|
||||
|
||||
@@ -2,27 +2,27 @@
|
||||
% Please edit documentation in R/callbacks.R
|
||||
\name{cb.reset.parameters}
|
||||
\alias{cb.reset.parameters}
|
||||
\title{Callback closure for restetting the booster's parameters at each iteration.}
|
||||
\title{Callback closure for resetting the booster's parameters at each iteration.}
|
||||
\usage{
|
||||
cb.reset.parameters(new_params)
|
||||
}
|
||||
\arguments{
|
||||
\item{new_params}{a list where each element corresponds to a parameter that needs to be reset.
|
||||
Each element's value must be either a vector of values of length \code{nrounds}
|
||||
to be set at each iteration,
|
||||
or a function of two parameters \code{learning_rates(iteration, nrounds)}
|
||||
which returns a new parameter value by using the current iteration number
|
||||
Each element's value must be either a vector of values of length \code{nrounds}
|
||||
to be set at each iteration,
|
||||
or a function of two parameters \code{learning_rates(iteration, nrounds)}
|
||||
which returns a new parameter value by using the current iteration number
|
||||
and the total number of boosting rounds.}
|
||||
}
|
||||
\description{
|
||||
Callback closure for restetting the booster's parameters at each iteration.
|
||||
Callback closure for resetting the booster's parameters at each iteration.
|
||||
}
|
||||
\details{
|
||||
This is a "pre-iteration" callback function used to reset booster's parameters
|
||||
at the beginning of each iteration.
|
||||
|
||||
Note that when training is resumed from some previous model, and a function is used to
|
||||
reset a parameter value, the \code{nrounds} argument in this function would be the
|
||||
Note that when training is resumed from some previous model, and a function is used to
|
||||
reset a parameter value, the \code{nrounds} argument in this function would be the
|
||||
the number of boosting rounds in the current training.
|
||||
|
||||
Callback function expects the following values to be set in its calling frame:
|
||||
|
||||
@@ -7,13 +7,13 @@
|
||||
cb.save.model(save_period = 0, save_name = "xgboost.model")
|
||||
}
|
||||
\arguments{
|
||||
\item{save_period}{save the model to disk after every
|
||||
\item{save_period}{save the model to disk after every
|
||||
\code{save_period} iterations; 0 means save the model at the end.}
|
||||
|
||||
\item{save_name}{the name or path for the saved model file.
|
||||
It can contain a \code{\link[base]{sprintf}} formatting specifier
|
||||
It can contain a \code{\link[base]{sprintf}} formatting specifier
|
||||
to include the integer iteration number in the file name.
|
||||
E.g., with \code{save_name} = 'xgboost_%04d.model',
|
||||
E.g., with \code{save_name} = 'xgboost_%04d.model',
|
||||
the file saved at iteration 50 would be named "xgboost_0050.model".}
|
||||
}
|
||||
\description{
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
Returns a vector of numbers of rows and of columns in an \code{xgb.DMatrix}.
|
||||
}
|
||||
\details{
|
||||
Note: since \code{nrow} and \code{ncol} internally use \code{dim}, they can also
|
||||
Note: since \code{nrow} and \code{ncol} internally use \code{dim}, they can also
|
||||
be directly used with an \code{xgb.DMatrix} object.
|
||||
}
|
||||
\examples{
|
||||
|
||||
@@ -16,8 +16,8 @@
|
||||
and the second one is column names}
|
||||
}
|
||||
\description{
|
||||
Only column names are supported for \code{xgb.DMatrix}, thus setting of
|
||||
row names would have no effect and returnten row names would be NULL.
|
||||
Only column names are supported for \code{xgb.DMatrix}, thus setting of
|
||||
row names would have no effect and returned row names would be NULL.
|
||||
}
|
||||
\details{
|
||||
Generic \code{dimnames} methods are used by \code{colnames}.
|
||||
|
||||
@@ -27,7 +27,7 @@ The \code{name} field can be one of the following:
|
||||
\item \code{weight}: to do a weight rescale ;
|
||||
\item \code{base_margin}: base margin is the base prediction Xgboost will boost from ;
|
||||
\item \code{nrow}: number of rows of the \code{xgb.DMatrix}.
|
||||
|
||||
|
||||
}
|
||||
|
||||
\code{group} can be setup by \code{setinfo} but can't be retrieved by \code{getinfo}.
|
||||
|
||||
@@ -5,10 +5,20 @@
|
||||
\alias{predict.xgb.Booster.handle}
|
||||
\title{Predict method for eXtreme Gradient Boosting model}
|
||||
\usage{
|
||||
\method{predict}{xgb.Booster}(object, newdata, missing = NA,
|
||||
outputmargin = FALSE, ntreelimit = NULL, predleaf = FALSE,
|
||||
predcontrib = FALSE, approxcontrib = FALSE,
|
||||
predinteraction = FALSE, reshape = FALSE, ...)
|
||||
\method{predict}{xgb.Booster}(
|
||||
object,
|
||||
newdata,
|
||||
missing = NA,
|
||||
outputmargin = FALSE,
|
||||
ntreelimit = NULL,
|
||||
predleaf = FALSE,
|
||||
predcontrib = FALSE,
|
||||
approxcontrib = FALSE,
|
||||
predinteraction = FALSE,
|
||||
reshape = FALSE,
|
||||
training = FALSE,
|
||||
...
|
||||
)
|
||||
|
||||
\method{predict}{xgb.Booster.handle}(object, ...)
|
||||
}
|
||||
@@ -39,6 +49,9 @@ It will use all the trees by default (\code{NULL} value).}
|
||||
prediction outputs per case. This option has no effect when either of predleaf, predcontrib,
|
||||
or predinteraction flags is TRUE.}
|
||||
|
||||
\item{training}{whether is the prediction result used for training. For dart booster,
|
||||
training predicting will perform dropout.}
|
||||
|
||||
\item{...}{Parameters passed to \code{predict.xgb.Booster}}
|
||||
}
|
||||
\value{
|
||||
@@ -91,7 +104,7 @@ in \url{http://blog.datadive.net/interpreting-random-forests/}.
|
||||
|
||||
With \code{predinteraction = TRUE}, SHAP values of contributions of interaction of each pair of features
|
||||
are computed. Note that this operation might be rather expensive in terms of compute and memory.
|
||||
Since it quadratically depends on the number of features, it is recommended to perfom selection
|
||||
Since it quadratically depends on the number of features, it is recommended to perform selection
|
||||
of the most important features first. See below about the format of the returned results.
|
||||
}
|
||||
\examples{
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
\item{...}{not currently used}
|
||||
}
|
||||
\description{
|
||||
Print information about xgb.DMatrix.
|
||||
Print information about xgb.DMatrix.
|
||||
Currently it displays dimensions and presence of info-fields and colnames.
|
||||
}
|
||||
\examples{
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
Prints formatted results of \code{xgb.cv}.
|
||||
}
|
||||
\details{
|
||||
When not verbose, it would only print the evaluation results,
|
||||
When not verbose, it would only print the evaluation results,
|
||||
including the best iteration (when available).
|
||||
}
|
||||
\examples{
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
\alias{slice.xgb.DMatrix}
|
||||
\alias{[.xgb.DMatrix}
|
||||
\title{Get a new DMatrix containing the specified rows of
|
||||
orginal xgb.DMatrix object}
|
||||
original xgb.DMatrix object}
|
||||
\usage{
|
||||
slice(object, ...)
|
||||
|
||||
@@ -24,7 +24,7 @@ slice(object, ...)
|
||||
}
|
||||
\description{
|
||||
Get a new DMatrix containing the specified rows of
|
||||
orginal xgb.DMatrix object
|
||||
original xgb.DMatrix object
|
||||
}
|
||||
\examples{
|
||||
data(agaricus.train, package='xgboost')
|
||||
|
||||
@@ -28,7 +28,7 @@ E.g., when an \code{xgb.Booster} model is saved as an R object and then is loade
|
||||
its handle (pointer) to an internal xgboost model would be invalid. The majority of xgboost methods
|
||||
should still work for such a model object since those methods would be using
|
||||
\code{xgb.Booster.complete} internally. However, one might find it to be more efficient to call the
|
||||
\code{xgb.Booster.complete} function explicitely once after loading a model as an R-object.
|
||||
\code{xgb.Booster.complete} function explicitly once after loading a model as an R-object.
|
||||
That would prevent further repeated implicit reconstruction of an internal booster model.
|
||||
}
|
||||
\examples{
|
||||
@@ -39,6 +39,7 @@ bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_dep
|
||||
saveRDS(bst, "xgb.model.rds")
|
||||
|
||||
bst1 <- readRDS("xgb.model.rds")
|
||||
if (file.exists("xgb.model.rds")) file.remove("xgb.model.rds")
|
||||
# the handle is invalid:
|
||||
print(bst1$handle)
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
xgb.DMatrix(data, info = list(), missing = NA, silent = FALSE, ...)
|
||||
}
|
||||
\arguments{
|
||||
\item{data}{a \code{matrix} object (either numeric or integer), a \code{dgCMatrix} object, or a character
|
||||
\item{data}{a \code{matrix} object (either numeric or integer), a \code{dgCMatrix} object, or a character
|
||||
string representing a filename.}
|
||||
|
||||
\item{info}{a named list of additional information to store in the \code{xgb.DMatrix} object.
|
||||
@@ -31,4 +31,5 @@ train <- agaricus.train
|
||||
dtrain <- xgb.DMatrix(train$data, label=train$label)
|
||||
xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data')
|
||||
dtrain <- xgb.DMatrix('xgb.DMatrix.data')
|
||||
if (file.exists('xgb.DMatrix.data')) file.remove('xgb.DMatrix.data')
|
||||
}
|
||||
|
||||
@@ -20,4 +20,5 @@ train <- agaricus.train
|
||||
dtrain <- xgb.DMatrix(train$data, label=train$label)
|
||||
xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data')
|
||||
dtrain <- xgb.DMatrix('xgb.DMatrix.data')
|
||||
if (file.exists('xgb.DMatrix.data')) file.remove('xgb.DMatrix.data')
|
||||
}
|
||||
|
||||
@@ -55,7 +55,7 @@ than for \code{xgb.Booster}, since only just a handle (pointer) would need to be
|
||||
That would only matter if attributes need to be set many times.
|
||||
Note, however, that when feeding a handle of an \code{xgb.Booster} object to the attribute setters,
|
||||
the raw model cache of an \code{xgb.Booster} object would not be automatically updated,
|
||||
and it would be user's responsibility to call \code{xgb.save.raw} to update it.
|
||||
and it would be user's responsibility to call \code{xgb.serialize} to update it.
|
||||
|
||||
The \code{xgb.attributes<-} setter either updates the existing or adds one or several attributes,
|
||||
but it doesn't delete the other existing attributes.
|
||||
@@ -73,6 +73,7 @@ xgb.attributes(bst) <- list(a = 123, b = "abc")
|
||||
|
||||
xgb.save(bst, 'xgb.model')
|
||||
bst1 <- xgb.load('xgb.model')
|
||||
if (file.exists('xgb.model')) file.remove('xgb.model')
|
||||
print(xgb.attr(bst1, "my_attribute"))
|
||||
print(xgb.attributes(bst1))
|
||||
|
||||
|
||||
28
R-package/man/xgb.config.Rd
Normal file
28
R-package/man/xgb.config.Rd
Normal file
@@ -0,0 +1,28 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/xgb.Booster.R
|
||||
\name{xgb.config}
|
||||
\alias{xgb.config}
|
||||
\alias{xgb.config<-}
|
||||
\title{Accessors for model parameters as JSON string.}
|
||||
\usage{
|
||||
xgb.config(object)
|
||||
|
||||
xgb.config(object) <- value
|
||||
}
|
||||
\arguments{
|
||||
\item{object}{Object of class \code{xgb.Booster}}
|
||||
|
||||
\item{value}{A JSON string.}
|
||||
}
|
||||
\description{
|
||||
Accessors for model parameters as JSON string.
|
||||
}
|
||||
\examples{
|
||||
data(agaricus.train, package='xgboost')
|
||||
train <- agaricus.train
|
||||
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
config <- xgb.config(bst)
|
||||
|
||||
}
|
||||
@@ -87,6 +87,6 @@ accuracy.after <- sum((predict(bst, new.dtest) >= 0.5) == agaricus.test$label) /
|
||||
|
||||
# Here the accuracy was already good and is now perfect.
|
||||
cat(paste("The accuracy was", accuracy.before, "before adding leaf features and it is now",
|
||||
accuracy.after, "!\\n"))
|
||||
accuracy.after, "!\n"))
|
||||
|
||||
}
|
||||
|
||||
@@ -4,19 +4,35 @@
|
||||
\alias{xgb.cv}
|
||||
\title{Cross Validation}
|
||||
\usage{
|
||||
xgb.cv(params = list(), data, nrounds, nfold, label = NULL,
|
||||
missing = NA, prediction = FALSE, showsd = TRUE,
|
||||
metrics = list(), obj = NULL, feval = NULL, stratified = TRUE,
|
||||
folds = NULL, verbose = TRUE, print_every_n = 1L,
|
||||
early_stopping_rounds = NULL, maximize = NULL, callbacks = list(),
|
||||
...)
|
||||
xgb.cv(
|
||||
params = list(),
|
||||
data,
|
||||
nrounds,
|
||||
nfold,
|
||||
label = NULL,
|
||||
missing = NA,
|
||||
prediction = FALSE,
|
||||
showsd = TRUE,
|
||||
metrics = list(),
|
||||
obj = NULL,
|
||||
feval = NULL,
|
||||
stratified = TRUE,
|
||||
folds = NULL,
|
||||
train_folds = NULL,
|
||||
verbose = TRUE,
|
||||
print_every_n = 1L,
|
||||
early_stopping_rounds = NULL,
|
||||
maximize = NULL,
|
||||
callbacks = list(),
|
||||
...
|
||||
)
|
||||
}
|
||||
\arguments{
|
||||
\item{params}{the list of parameters. Commonly used ones are:
|
||||
\itemize{
|
||||
\item \code{objective} objective function, common ones are
|
||||
\itemize{
|
||||
\item \code{reg:linear} linear regression
|
||||
\item \code{reg:squarederror} Regression with squared loss
|
||||
\item \code{binary:logistic} logistic regression for classification
|
||||
}
|
||||
\item \code{eta} step size of each boosting step
|
||||
@@ -35,11 +51,11 @@ xgb.cv(params = list(), data, nrounds, nfold, label = NULL,
|
||||
|
||||
\item{label}{vector of response values. Should be provided only when data is an R-matrix.}
|
||||
|
||||
\item{missing}{is only used when input is a dense matrix. By default is set to NA, which means
|
||||
that NA values should be considered as 'missing' by the algorithm.
|
||||
\item{missing}{is only used when input is a dense matrix. By default is set to NA, which means
|
||||
that NA values should be considered as 'missing' by the algorithm.
|
||||
Sometimes, 0 or other extreme value might be used to represent missing values.}
|
||||
|
||||
\item{prediction}{A logical value indicating whether to return the test fold predictions
|
||||
\item{prediction}{A logical value indicating whether to return the test fold predictions
|
||||
from each CV model. This parameter engages the \code{\link{cb.cv.predict}} callback.}
|
||||
|
||||
\item{showsd}{\code{boolean}, whether to show standard deviation of cross validation}
|
||||
@@ -56,28 +72,31 @@ from each CV model. This parameter engages the \code{\link{cb.cv.predict}} callb
|
||||
\item \code{merror} Exact matching error, used to evaluate multi-class classification
|
||||
}}
|
||||
|
||||
\item{obj}{customized objective function. Returns gradient and second order
|
||||
\item{obj}{customized objective function. Returns gradient and second order
|
||||
gradient with given prediction and dtrain.}
|
||||
|
||||
\item{feval}{custimized evaluation function. Returns
|
||||
\code{list(metric='metric-name', value='metric-value')} with given
|
||||
\item{feval}{customized evaluation function. Returns
|
||||
\code{list(metric='metric-name', value='metric-value')} with given
|
||||
prediction and dtrain.}
|
||||
|
||||
\item{stratified}{a \code{boolean} indicating whether sampling of folds should be stratified
|
||||
\item{stratified}{a \code{boolean} indicating whether sampling of folds should be stratified
|
||||
by the values of outcome labels.}
|
||||
|
||||
\item{folds}{\code{list} provides a possibility to use a list of pre-defined CV folds
|
||||
(each element must be a vector of test fold's indices). When folds are supplied,
|
||||
(each element must be a vector of test fold's indices). When folds are supplied,
|
||||
the \code{nfold} and \code{stratified} parameters are ignored.}
|
||||
|
||||
\item{train_folds}{\code{list} list specifying which indicies to use for training. If \code{NULL}
|
||||
(the default) all indices not specified in \code{folds} will be used for training.}
|
||||
|
||||
\item{verbose}{\code{boolean}, print the statistics during the process}
|
||||
|
||||
\item{print_every_n}{Print each n-th iteration evaluation messages when \code{verbose>0}.
|
||||
Default is 1 which means all messages are printed. This parameter is passed to the
|
||||
Default is 1 which means all messages are printed. This parameter is passed to the
|
||||
\code{\link{cb.print.evaluation}} callback.}
|
||||
|
||||
\item{early_stopping_rounds}{If \code{NULL}, the early stopping function is not triggered.
|
||||
If set to an integer \code{k}, training with a validation set will stop if the performance
|
||||
\item{early_stopping_rounds}{If \code{NULL}, the early stopping function is not triggered.
|
||||
If set to an integer \code{k}, training with a validation set will stop if the performance
|
||||
doesn't improve for \code{k} rounds.
|
||||
Setting this parameter engages the \code{\link{cb.early.stop}} callback.}
|
||||
|
||||
@@ -87,8 +106,8 @@ When it is \code{TRUE}, it means the larger the evaluation score the better.
|
||||
This parameter is passed to the \code{\link{cb.early.stop}} callback.}
|
||||
|
||||
\item{callbacks}{a list of callback functions to perform various task during boosting.
|
||||
See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the
|
||||
parameters' values. User can provide either existing or their own callback methods in order
|
||||
See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the
|
||||
parameters' values. User can provide either existing or their own callback methods in order
|
||||
to customize the training process.}
|
||||
|
||||
\item{...}{other parameters to pass to \code{params}.}
|
||||
@@ -97,26 +116,26 @@ to customize the training process.}
|
||||
An object of class \code{xgb.cv.synchronous} with the following elements:
|
||||
\itemize{
|
||||
\item \code{call} a function call.
|
||||
\item \code{params} parameters that were passed to the xgboost library. Note that it does not
|
||||
\item \code{params} parameters that were passed to the xgboost library. Note that it does not
|
||||
capture parameters changed by the \code{\link{cb.reset.parameters}} callback.
|
||||
\item \code{callbacks} callback functions that were either automatically assigned or
|
||||
\item \code{callbacks} callback functions that were either automatically assigned or
|
||||
explicitly passed.
|
||||
\item \code{evaluation_log} evaluation history storead as a \code{data.table} with the
|
||||
first column corresponding to iteration number and the rest corresponding to the
|
||||
\item \code{evaluation_log} evaluation history stored as a \code{data.table} with the
|
||||
first column corresponding to iteration number and the rest corresponding to the
|
||||
CV-based evaluation means and standard deviations for the training and test CV-sets.
|
||||
It is created by the \code{\link{cb.evaluation.log}} callback.
|
||||
\item \code{niter} number of boosting iterations.
|
||||
\item \code{nfeatures} number of features in training data.
|
||||
\item \code{folds} the list of CV folds' indices - either those passed through the \code{folds}
|
||||
\item \code{folds} the list of CV folds' indices - either those passed through the \code{folds}
|
||||
parameter or randomly generated.
|
||||
\item \code{best_iteration} iteration number with the best evaluation metric value
|
||||
(only available with early stopping).
|
||||
\item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration,
|
||||
\item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration,
|
||||
which could further be used in \code{predict} method
|
||||
(only available with early stopping).
|
||||
\item \code{pred} CV prediction values available when \code{prediction} is set.
|
||||
\item \code{pred} CV prediction values available when \code{prediction} is set.
|
||||
It is either vector or matrix (see \code{\link{cb.cv.predict}}).
|
||||
\item \code{models} a liost of the CV folds' models. It is only available with the explicit
|
||||
\item \code{models} a list of the CV folds' models. It is only available with the explicit
|
||||
setting of the \code{cb.cv.predict(save_models = TRUE)} callback.
|
||||
}
|
||||
}
|
||||
@@ -124,9 +143,9 @@ An object of class \code{xgb.cv.synchronous} with the following elements:
|
||||
The cross validation function of xgboost
|
||||
}
|
||||
\details{
|
||||
The original sample is randomly partitioned into \code{nfold} equal size subsamples.
|
||||
The original sample is randomly partitioned into \code{nfold} equal size subsamples.
|
||||
|
||||
Of the \code{nfold} subsamples, a single subsample is retained as the validation data for testing the model, and the remaining \code{nfold - 1} subsamples are used as training data.
|
||||
Of the \code{nfold} subsamples, a single subsample is retained as the validation data for testing the model, and the remaining \code{nfold - 1} subsamples are used as training data.
|
||||
|
||||
The cross-validation process is then repeated \code{nrounds} times, with each of the \code{nfold} subsamples used exactly once as the validation data.
|
||||
|
||||
|
||||
@@ -4,8 +4,14 @@
|
||||
\alias{xgb.dump}
|
||||
\title{Dump an xgboost model in text format.}
|
||||
\usage{
|
||||
xgb.dump(model, fname = NULL, fmap = "", with_stats = FALSE,
|
||||
dump_format = c("text", "json"), ...)
|
||||
xgb.dump(
|
||||
model,
|
||||
fname = NULL,
|
||||
fmap = "",
|
||||
with_stats = FALSE,
|
||||
dump_format = c("text", "json"),
|
||||
...
|
||||
)
|
||||
}
|
||||
\arguments{
|
||||
\item{model}{the model object.}
|
||||
|
||||
@@ -12,7 +12,7 @@ using the \code{cb.gblinear.history()} callback.}
|
||||
|
||||
\item{class_index}{zero-based class index to extract the coefficients for only that
|
||||
specific class in a multinomial multiclass model. When it is NULL, all the
|
||||
coeffients are returned. Has no effect in non-multiclass models.}
|
||||
coefficients are returned. Has no effect in non-multiclass models.}
|
||||
}
|
||||
\value{
|
||||
For an \code{xgb.train} result, a matrix (either dense or sparse) with the columns
|
||||
|
||||
@@ -4,8 +4,14 @@
|
||||
\alias{xgb.importance}
|
||||
\title{Importance of features in a model.}
|
||||
\usage{
|
||||
xgb.importance(feature_names = NULL, model = NULL, trees = NULL,
|
||||
data = NULL, label = NULL, target = NULL)
|
||||
xgb.importance(
|
||||
feature_names = NULL,
|
||||
model = NULL,
|
||||
trees = NULL,
|
||||
data = NULL,
|
||||
label = NULL,
|
||||
target = NULL
|
||||
)
|
||||
}
|
||||
\arguments{
|
||||
\item{feature_names}{character vector of feature names. If the model already
|
||||
|
||||
@@ -33,6 +33,7 @@ bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||
xgb.save(bst, 'xgb.model')
|
||||
bst <- xgb.load('xgb.model')
|
||||
if (file.exists('xgb.model')) file.remove('xgb.model')
|
||||
pred <- predict(bst, test$data)
|
||||
}
|
||||
\seealso{
|
||||
|
||||
14
R-package/man/xgb.load.raw.Rd
Normal file
14
R-package/man/xgb.load.raw.Rd
Normal file
@@ -0,0 +1,14 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/xgb.load.raw.R
|
||||
\name{xgb.load.raw}
|
||||
\alias{xgb.load.raw}
|
||||
\title{Load serialised xgboost model from R's raw vector}
|
||||
\usage{
|
||||
xgb.load.raw(buffer)
|
||||
}
|
||||
\arguments{
|
||||
\item{buffer}{the buffer returned by xgb.save.raw}
|
||||
}
|
||||
\description{
|
||||
User can generate raw memory buffer by calling xgb.save.raw
|
||||
}
|
||||
@@ -4,8 +4,14 @@
|
||||
\alias{xgb.model.dt.tree}
|
||||
\title{Parse a boosted tree model text dump}
|
||||
\usage{
|
||||
xgb.model.dt.tree(feature_names = NULL, model = NULL, text = NULL,
|
||||
trees = NULL, use_int_id = FALSE, ...)
|
||||
xgb.model.dt.tree(
|
||||
feature_names = NULL,
|
||||
model = NULL,
|
||||
text = NULL,
|
||||
trees = NULL,
|
||||
use_int_id = FALSE,
|
||||
...
|
||||
)
|
||||
}
|
||||
\arguments{
|
||||
\item{feature_names}{character vector of feature names. If the model already
|
||||
|
||||
@@ -5,11 +5,17 @@
|
||||
\alias{xgb.plot.deepness}
|
||||
\title{Plot model trees deepness}
|
||||
\usage{
|
||||
xgb.ggplot.deepness(model = NULL, which = c("2x1", "max.depth",
|
||||
"med.depth", "med.weight"))
|
||||
xgb.ggplot.deepness(
|
||||
model = NULL,
|
||||
which = c("2x1", "max.depth", "med.depth", "med.weight")
|
||||
)
|
||||
|
||||
xgb.plot.deepness(model = NULL, which = c("2x1", "max.depth",
|
||||
"med.depth", "med.weight"), plot = TRUE, ...)
|
||||
xgb.plot.deepness(
|
||||
model = NULL,
|
||||
which = c("2x1", "max.depth", "med.depth", "med.weight"),
|
||||
plot = TRUE,
|
||||
...
|
||||
)
|
||||
}
|
||||
\arguments{
|
||||
\item{model}{either an \code{xgb.Booster} model generated by the \code{xgb.train} function
|
||||
|
||||
@@ -5,25 +5,38 @@
|
||||
\alias{xgb.plot.importance}
|
||||
\title{Plot feature importance as a bar graph}
|
||||
\usage{
|
||||
xgb.ggplot.importance(importance_matrix = NULL, top_n = NULL,
|
||||
measure = NULL, rel_to_first = FALSE, n_clusters = c(1:10), ...)
|
||||
xgb.ggplot.importance(
|
||||
importance_matrix = NULL,
|
||||
top_n = NULL,
|
||||
measure = NULL,
|
||||
rel_to_first = FALSE,
|
||||
n_clusters = c(1:10),
|
||||
...
|
||||
)
|
||||
|
||||
xgb.plot.importance(importance_matrix = NULL, top_n = NULL,
|
||||
measure = NULL, rel_to_first = FALSE, left_margin = 10,
|
||||
cex = NULL, plot = TRUE, ...)
|
||||
xgb.plot.importance(
|
||||
importance_matrix = NULL,
|
||||
top_n = NULL,
|
||||
measure = NULL,
|
||||
rel_to_first = FALSE,
|
||||
left_margin = 10,
|
||||
cex = NULL,
|
||||
plot = TRUE,
|
||||
...
|
||||
)
|
||||
}
|
||||
\arguments{
|
||||
\item{importance_matrix}{a \code{data.table} returned by \code{\link{xgb.importance}}.}
|
||||
|
||||
\item{top_n}{maximal number of top features to include into the plot.}
|
||||
|
||||
\item{measure}{the name of importance measure to plot.
|
||||
\item{measure}{the name of importance measure to plot.
|
||||
When \code{NULL}, 'Gain' would be used for trees and 'Weight' would be used for gblinear.}
|
||||
|
||||
\item{rel_to_first}{whether importance values should be represented as relative to the highest ranked feature.
|
||||
See Details.}
|
||||
|
||||
\item{n_clusters}{(ggplot only) a \code{numeric} vector containing the min and the max range
|
||||
\item{n_clusters}{(ggplot only) a \code{numeric} vector containing the min and the max range
|
||||
of the possible number of clusters of bars.}
|
||||
|
||||
\item{...}{other parameters passed to \code{barplot} (except horiz, border, cex.names, names.arg, and las).}
|
||||
@@ -33,7 +46,7 @@ When it is NULL, the existing \code{par('mar')} is used.}
|
||||
|
||||
\item{cex}{(base R barplot) passed as \code{cex.names} parameter to \code{barplot}.}
|
||||
|
||||
\item{plot}{(base R barplot) whether a barplot should be produced.
|
||||
\item{plot}{(base R barplot) whether a barplot should be produced.
|
||||
If FALSE, only a data.table is returned.}
|
||||
}
|
||||
\value{
|
||||
@@ -53,14 +66,14 @@ Features are shown ranked in a decreasing importance order.
|
||||
It works for importances from both \code{gblinear} and \code{gbtree} models.
|
||||
|
||||
When \code{rel_to_first = FALSE}, the values would be plotted as they were in \code{importance_matrix}.
|
||||
For gbtree model, that would mean being normalized to the total of 1
|
||||
For gbtree model, that would mean being normalized to the total of 1
|
||||
("what is feature's importance contribution relative to the whole model?").
|
||||
For linear models, \code{rel_to_first = FALSE} would show actual values of the coefficients.
|
||||
Setting \code{rel_to_first = TRUE} allows to see the picture from the perspective of
|
||||
Setting \code{rel_to_first = TRUE} allows to see the picture from the perspective of
|
||||
"what is feature's importance contribution relative to the most important feature?"
|
||||
|
||||
The ggplot-backend method also performs 1-D custering of the importance values,
|
||||
with bar colors coresponding to different clusters that have somewhat similar importance values.
|
||||
The ggplot-backend method also performs 1-D clustering of the importance values,
|
||||
with bar colors corresponding to different clusters that have somewhat similar importance values.
|
||||
}
|
||||
\examples{
|
||||
data(agaricus.train)
|
||||
|
||||
@@ -4,8 +4,15 @@
|
||||
\alias{xgb.plot.multi.trees}
|
||||
\title{Project all trees on one tree and plot it}
|
||||
\usage{
|
||||
xgb.plot.multi.trees(model, feature_names = NULL, features_keep = 5,
|
||||
plot_width = NULL, plot_height = NULL, render = TRUE, ...)
|
||||
xgb.plot.multi.trees(
|
||||
model,
|
||||
feature_names = NULL,
|
||||
features_keep = 5,
|
||||
plot_width = NULL,
|
||||
plot_height = NULL,
|
||||
render = TRUE,
|
||||
...
|
||||
)
|
||||
}
|
||||
\arguments{
|
||||
\item{model}{produced by the \code{xgb.train} function.}
|
||||
|
||||
@@ -4,18 +4,38 @@
|
||||
\alias{xgb.plot.shap}
|
||||
\title{SHAP contribution dependency plots}
|
||||
\usage{
|
||||
xgb.plot.shap(data, shap_contrib = NULL, features = NULL, top_n = 1,
|
||||
model = NULL, trees = NULL, target_class = NULL,
|
||||
approxcontrib = FALSE, subsample = NULL, n_col = 1, col = rgb(0,
|
||||
0, 1, 0.2), pch = ".", discrete_n_uniq = 5, discrete_jitter = 0.01,
|
||||
ylab = "SHAP", plot_NA = TRUE, col_NA = rgb(0.7, 0, 1, 0.6),
|
||||
pch_NA = ".", pos_NA = 1.07, plot_loess = TRUE, col_loess = 2,
|
||||
span_loess = 0.5, which = c("1d", "2d"), plot = TRUE, ...)
|
||||
xgb.plot.shap(
|
||||
data,
|
||||
shap_contrib = NULL,
|
||||
features = NULL,
|
||||
top_n = 1,
|
||||
model = NULL,
|
||||
trees = NULL,
|
||||
target_class = NULL,
|
||||
approxcontrib = FALSE,
|
||||
subsample = NULL,
|
||||
n_col = 1,
|
||||
col = rgb(0, 0, 1, 0.2),
|
||||
pch = ".",
|
||||
discrete_n_uniq = 5,
|
||||
discrete_jitter = 0.01,
|
||||
ylab = "SHAP",
|
||||
plot_NA = TRUE,
|
||||
col_NA = rgb(0.7, 0, 1, 0.6),
|
||||
pch_NA = ".",
|
||||
pos_NA = 1.07,
|
||||
plot_loess = TRUE,
|
||||
col_loess = 2,
|
||||
span_loess = 0.5,
|
||||
which = c("1d", "2d"),
|
||||
plot = TRUE,
|
||||
...
|
||||
)
|
||||
}
|
||||
\arguments{
|
||||
\item{data}{data as a \code{matrix} or \code{dgCMatrix}.}
|
||||
|
||||
\item{shap_contrib}{a matrix of SHAP contributions that was computed earlier for the above
|
||||
\item{shap_contrib}{a matrix of SHAP contributions that was computed earlier for the above
|
||||
\code{data}. When it is NULL, it is computed internally using \code{model} and \code{data}.}
|
||||
|
||||
\item{features}{a vector of either column indices or of feature names to plot. When it is NULL,
|
||||
@@ -63,7 +83,7 @@ more than 5 distinct values.}
|
||||
|
||||
\item{col_loess}{a color to use for the loess curves.}
|
||||
|
||||
\item{span_loess}{the \code{span} paramerer in \code{\link[stats]{loess}}'s call.}
|
||||
\item{span_loess}{the \code{span} parameter in \code{\link[stats]{loess}}'s call.}
|
||||
|
||||
\item{which}{whether to do univariate or bivariate plotting. NOTE: only 1D is implemented so far.}
|
||||
|
||||
@@ -104,7 +124,7 @@ a meaningful thing to do.
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
|
||||
bst <- xgboost(agaricus.train$data, agaricus.train$label, nrounds = 50,
|
||||
bst <- xgboost(agaricus.train$data, agaricus.train$label, nrounds = 50,
|
||||
eta = 0.1, max_depth = 3, subsample = .5,
|
||||
method = "hist", objective = "binary:logistic", nthread = 2, verbose = 0)
|
||||
|
||||
|
||||
@@ -4,9 +4,16 @@
|
||||
\alias{xgb.plot.tree}
|
||||
\title{Plot a boosted tree model}
|
||||
\usage{
|
||||
xgb.plot.tree(feature_names = NULL, model = NULL, trees = NULL,
|
||||
plot_width = NULL, plot_height = NULL, render = TRUE,
|
||||
show_node_id = FALSE, ...)
|
||||
xgb.plot.tree(
|
||||
feature_names = NULL,
|
||||
model = NULL,
|
||||
trees = NULL,
|
||||
plot_width = NULL,
|
||||
plot_height = NULL,
|
||||
render = TRUE,
|
||||
show_node_id = FALSE,
|
||||
...
|
||||
)
|
||||
}
|
||||
\arguments{
|
||||
\item{feature_names}{names of each feature as a \code{character} vector.}
|
||||
|
||||
@@ -33,6 +33,7 @@ bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||
xgb.save(bst, 'xgb.model')
|
||||
bst <- xgb.load('xgb.model')
|
||||
if (file.exists('xgb.model')) file.remove('xgb.model')
|
||||
pred <- predict(bst, test$data)
|
||||
}
|
||||
\seealso{
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
\name{xgb.save.raw}
|
||||
\alias{xgb.save.raw}
|
||||
\title{Save xgboost model to R's raw vector,
|
||||
user can call xgb.load to load the model back from raw vector}
|
||||
user can call xgb.load.raw to load the model back from raw vector}
|
||||
\usage{
|
||||
xgb.save.raw(model)
|
||||
}
|
||||
@@ -18,10 +18,10 @@ data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
train <- agaricus.train
|
||||
test <- agaricus.test
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||
raw <- xgb.save.raw(bst)
|
||||
bst <- xgb.load(raw)
|
||||
bst <- xgb.load.raw(raw)
|
||||
pred <- predict(bst, test$data)
|
||||
|
||||
}
|
||||
|
||||
29
R-package/man/xgb.serialize.Rd
Normal file
29
R-package/man/xgb.serialize.Rd
Normal file
@@ -0,0 +1,29 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/xgb.serialize.R
|
||||
\name{xgb.serialize}
|
||||
\alias{xgb.serialize}
|
||||
\title{Serialize the booster instance into R's raw vector. The serialization method differs
|
||||
from \code{\link{xgb.save.raw}} as the latter one saves only the model but not
|
||||
parameters. This serialization format is not stable across different xgboost versions.}
|
||||
\usage{
|
||||
xgb.serialize(booster)
|
||||
}
|
||||
\arguments{
|
||||
\item{booster}{the booster instance}
|
||||
}
|
||||
\description{
|
||||
Serialize the booster instance into R's raw vector. The serialization method differs
|
||||
from \code{\link{xgb.save.raw}} as the latter one saves only the model but not
|
||||
parameters. This serialization format is not stable across different xgboost versions.
|
||||
}
|
||||
\examples{
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
train <- agaricus.train
|
||||
test <- agaricus.test
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||
raw <- xgb.serialize(bst)
|
||||
bst <- xgb.unserialize(raw)
|
||||
|
||||
}
|
||||
@@ -5,20 +5,44 @@
|
||||
\alias{xgboost}
|
||||
\title{eXtreme Gradient Boosting Training}
|
||||
\usage{
|
||||
xgb.train(params = list(), data, nrounds, watchlist = list(),
|
||||
obj = NULL, feval = NULL, verbose = 1, print_every_n = 1L,
|
||||
early_stopping_rounds = NULL, maximize = NULL, save_period = NULL,
|
||||
save_name = "xgboost.model", xgb_model = NULL, callbacks = list(),
|
||||
...)
|
||||
xgb.train(
|
||||
params = list(),
|
||||
data,
|
||||
nrounds,
|
||||
watchlist = list(),
|
||||
obj = NULL,
|
||||
feval = NULL,
|
||||
verbose = 1,
|
||||
print_every_n = 1L,
|
||||
early_stopping_rounds = NULL,
|
||||
maximize = NULL,
|
||||
save_period = NULL,
|
||||
save_name = "xgboost.model",
|
||||
xgb_model = NULL,
|
||||
callbacks = list(),
|
||||
...
|
||||
)
|
||||
|
||||
xgboost(data = NULL, label = NULL, missing = NA, weight = NULL,
|
||||
params = list(), nrounds, verbose = 1, print_every_n = 1L,
|
||||
early_stopping_rounds = NULL, maximize = NULL, save_period = NULL,
|
||||
save_name = "xgboost.model", xgb_model = NULL, callbacks = list(),
|
||||
...)
|
||||
xgboost(
|
||||
data = NULL,
|
||||
label = NULL,
|
||||
missing = NA,
|
||||
weight = NULL,
|
||||
params = list(),
|
||||
nrounds,
|
||||
verbose = 1,
|
||||
print_every_n = 1L,
|
||||
early_stopping_rounds = NULL,
|
||||
maximize = NULL,
|
||||
save_period = NULL,
|
||||
save_name = "xgboost.model",
|
||||
xgb_model = NULL,
|
||||
callbacks = list(),
|
||||
...
|
||||
)
|
||||
}
|
||||
\arguments{
|
||||
\item{params}{the list of parameters.
|
||||
\item{params}{the list of parameters.
|
||||
The complete list of parameters is available at \url{http://xgboost.readthedocs.io/en/latest/parameter.html}.
|
||||
Below is a shorter summary:
|
||||
|
||||
@@ -27,36 +51,37 @@ xgboost(data = NULL, label = NULL, missing = NA, weight = NULL,
|
||||
\itemize{
|
||||
\item \code{booster} which booster to use, can be \code{gbtree} or \code{gblinear}. Default: \code{gbtree}.
|
||||
}
|
||||
|
||||
|
||||
2. Booster Parameters
|
||||
|
||||
2.1. Parameter for Tree Booster
|
||||
|
||||
\itemize{
|
||||
\item \code{eta} control the learning rate: scale the contribution of each tree by a factor of \code{0 < eta < 1} when it is added to the current approximation. Used to prevent overfitting by making the boosting process more conservative. Lower value for \code{eta} implies larger value for \code{nrounds}: low \code{eta} value means model more robust to overfitting but slower to compute. Default: 0.3
|
||||
\item \code{gamma} minimum loss reduction required to make a further partition on a leaf node of the tree. the larger, the more conservative the algorithm will be.
|
||||
\item \code{gamma} minimum loss reduction required to make a further partition on a leaf node of the tree. the larger, the more conservative the algorithm will be.
|
||||
\item \code{max_depth} maximum depth of a tree. Default: 6
|
||||
\item \code{min_child_weight} minimum sum of instance weight (hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, then the building process will give up further partitioning. In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node. The larger, the more conservative the algorithm will be. Default: 1
|
||||
\item \code{subsample} subsample ratio of the training instance. Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees and this will prevent overfitting. It makes computation shorter (because less data to analyse). It is advised to use this parameter with \code{eta} and increase \code{nrounds}. Default: 1
|
||||
\item \code{subsample} subsample ratio of the training instance. Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees and this will prevent overfitting. It makes computation shorter (because less data to analyse). It is advised to use this parameter with \code{eta} and increase \code{nrounds}. Default: 1
|
||||
\item \code{colsample_bytree} subsample ratio of columns when constructing each tree. Default: 1
|
||||
\item \code{num_parallel_tree} Experimental parameter. number of trees to grow per round. Useful to test Random Forest through Xgboost (set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly. Default: 1
|
||||
\item \code{monotone_constraints} A numerical vector consists of \code{1}, \code{0} and \code{-1} with its length equals to the number of features in the training data. \code{1} is increasing, \code{-1} is decreasing and \code{0} is no constraint.
|
||||
\item \code{interaction_constraints} A list of vectors specifying feature indices of permitted interactions. Each item of the list represents one permitted interaction where specified features are allowed to interact with each other. Feature index values should start from \code{0} (\code{0} references the first column). Leave argument unspecified for no interaction constraints.
|
||||
}
|
||||
|
||||
2.2. Parameter for Linear Booster
|
||||
|
||||
|
||||
\itemize{
|
||||
\item \code{lambda} L2 regularization term on weights. Default: 0
|
||||
\item \code{lambda_bias} L2 regularization term on bias. Default: 0
|
||||
\item \code{alpha} L1 regularization term on weights. (there is no L1 reg on bias because it is not important). Default: 0
|
||||
}
|
||||
|
||||
3. Task Parameters
|
||||
3. Task Parameters
|
||||
|
||||
\itemize{
|
||||
\item \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it. The default objective options are below:
|
||||
\itemize{
|
||||
\item \code{reg:linear} linear regression (Default).
|
||||
\item \code{reg:squarederror} Regression with squared loss (Default).
|
||||
\item \code{reg:logistic} logistic regression.
|
||||
\item \code{binary:logistic} logistic regression for binary classification. Output probability.
|
||||
\item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation.
|
||||
@@ -76,31 +101,31 @@ xgboost(data = NULL, label = NULL, missing = NA, weight = NULL,
|
||||
|
||||
\item{watchlist}{named list of xgb.DMatrix datasets to use for evaluating model performance.
|
||||
Metrics specified in either \code{eval_metric} or \code{feval} will be computed for each
|
||||
of these datasets during each boosting iteration, and stored in the end as a field named
|
||||
\code{evaluation_log} in the resulting object. When either \code{verbose>=1} or
|
||||
of these datasets during each boosting iteration, and stored in the end as a field named
|
||||
\code{evaluation_log} in the resulting object. When either \code{verbose>=1} or
|
||||
\code{\link{cb.print.evaluation}} callback is engaged, the performance results are continuously
|
||||
printed out during the training.
|
||||
printed out during the training.
|
||||
E.g., specifying \code{watchlist=list(validation1=mat1, validation2=mat2)} allows to track
|
||||
the performance of each round's model on mat1 and mat2.}
|
||||
|
||||
\item{obj}{customized objective function. Returns gradient and second order
|
||||
\item{obj}{customized objective function. Returns gradient and second order
|
||||
gradient with given prediction and dtrain.}
|
||||
|
||||
\item{feval}{custimized evaluation function. Returns
|
||||
\code{list(metric='metric-name', value='metric-value')} with given
|
||||
\item{feval}{customized evaluation function. Returns
|
||||
\code{list(metric='metric-name', value='metric-value')} with given
|
||||
prediction and dtrain.}
|
||||
|
||||
\item{verbose}{If 0, xgboost will stay silent. If 1, it will print information about performance.
|
||||
If 2, some additional information will be printed out.
|
||||
Note that setting \code{verbose > 0} automatically engages the
|
||||
Note that setting \code{verbose > 0} automatically engages the
|
||||
\code{cb.print.evaluation(period=1)} callback function.}
|
||||
|
||||
\item{print_every_n}{Print each n-th iteration evaluation messages when \code{verbose>0}.
|
||||
Default is 1 which means all messages are printed. This parameter is passed to the
|
||||
Default is 1 which means all messages are printed. This parameter is passed to the
|
||||
\code{\link{cb.print.evaluation}} callback.}
|
||||
|
||||
\item{early_stopping_rounds}{If \code{NULL}, the early stopping function is not triggered.
|
||||
If set to an integer \code{k}, training with a validation set will stop if the performance
|
||||
\item{early_stopping_rounds}{If \code{NULL}, the early stopping function is not triggered.
|
||||
If set to an integer \code{k}, training with a validation set will stop if the performance
|
||||
doesn't improve for \code{k} rounds.
|
||||
Setting this parameter engages the \code{\link{cb.early.stop}} callback.}
|
||||
|
||||
@@ -115,17 +140,17 @@ This parameter is passed to the \code{\link{cb.early.stop}} callback.}
|
||||
\item{save_name}{the name or path for periodically saved model file.}
|
||||
|
||||
\item{xgb_model}{a previously built model to continue the training from.
|
||||
Could be either an object of class \code{xgb.Booster}, or its raw data, or the name of a
|
||||
Could be either an object of class \code{xgb.Booster}, or its raw data, or the name of a
|
||||
file with a previously saved model.}
|
||||
|
||||
\item{callbacks}{a list of callback functions to perform various task during boosting.
|
||||
See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the
|
||||
parameters' values. User can provide either existing or their own callback methods in order
|
||||
See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the
|
||||
parameters' values. User can provide either existing or their own callback methods in order
|
||||
to customize the training process.}
|
||||
|
||||
\item{...}{other parameters to pass to \code{params}.}
|
||||
|
||||
\item{label}{vector of response values. Should not be provided when data is
|
||||
\item{label}{vector of response values. Should not be provided when data is
|
||||
a local data file name or an \code{xgb.DMatrix}.}
|
||||
|
||||
\item{missing}{by default is set to NA, which means that NA values should be considered as 'missing'
|
||||
@@ -140,23 +165,23 @@ An object of class \code{xgb.Booster} with the following elements:
|
||||
\item \code{handle} a handle (pointer) to the xgboost model in memory.
|
||||
\item \code{raw} a cached memory dump of the xgboost model saved as R's \code{raw} type.
|
||||
\item \code{niter} number of boosting iterations.
|
||||
\item \code{evaluation_log} evaluation history storead as a \code{data.table} with the
|
||||
\item \code{evaluation_log} evaluation history stored as a \code{data.table} with the
|
||||
first column corresponding to iteration number and the rest corresponding to evaluation
|
||||
metrics' values. It is created by the \code{\link{cb.evaluation.log}} callback.
|
||||
\item \code{call} a function call.
|
||||
\item \code{params} parameters that were passed to the xgboost library. Note that it does not
|
||||
\item \code{params} parameters that were passed to the xgboost library. Note that it does not
|
||||
capture parameters changed by the \code{\link{cb.reset.parameters}} callback.
|
||||
\item \code{callbacks} callback functions that were either automatically assigned or
|
||||
explicitely passed.
|
||||
\item \code{callbacks} callback functions that were either automatically assigned or
|
||||
explicitly passed.
|
||||
\item \code{best_iteration} iteration number with the best evaluation metric value
|
||||
(only available with early stopping).
|
||||
\item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration,
|
||||
\item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration,
|
||||
which could further be used in \code{predict} method
|
||||
(only available with early stopping).
|
||||
\item \code{best_score} the best evaluation metric value during early stopping.
|
||||
(only available with early stopping).
|
||||
\item \code{feature_names} names of the training dataset features
|
||||
(only when comun names were defined in training data).
|
||||
(only when column names were defined in training data).
|
||||
\item \code{nfeatures} number of features in training data.
|
||||
}
|
||||
}
|
||||
@@ -165,20 +190,20 @@ An object of class \code{xgb.Booster} with the following elements:
|
||||
The \code{xgboost} function is a simpler wrapper for \code{xgb.train}.
|
||||
}
|
||||
\details{
|
||||
These are the training functions for \code{xgboost}.
|
||||
These are the training functions for \code{xgboost}.
|
||||
|
||||
The \code{xgb.train} interface supports advanced features such as \code{watchlist},
|
||||
customized objective and evaluation metric functions, therefore it is more flexible
|
||||
The \code{xgb.train} interface supports advanced features such as \code{watchlist},
|
||||
customized objective and evaluation metric functions, therefore it is more flexible
|
||||
than the \code{xgboost} interface.
|
||||
|
||||
Parallelization is automatically enabled if \code{OpenMP} is present.
|
||||
Parallelization is automatically enabled if \code{OpenMP} is present.
|
||||
Number of threads can also be manually specified via \code{nthread} parameter.
|
||||
|
||||
The evaluation metric is chosen automatically by Xgboost (according to the objective)
|
||||
when the \code{eval_metric} parameter is not provided.
|
||||
User may set one or several \code{eval_metric} parameters.
|
||||
User may set one or several \code{eval_metric} parameters.
|
||||
Note that when using a customized metric, only this single metric can be used.
|
||||
The folloiwing is the list of built-in metrics for which Xgboost provides optimized implementation:
|
||||
The following is the list of built-in metrics for which Xgboost provides optimized implementation:
|
||||
\itemize{
|
||||
\item \code{rmse} root mean square error. \url{http://en.wikipedia.org/wiki/Root_mean_square_error}
|
||||
\item \code{logloss} negative log-likelihood. \url{http://en.wikipedia.org/wiki/Log-likelihood}
|
||||
@@ -210,7 +235,7 @@ dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
watchlist <- list(train = dtrain, eval = dtest)
|
||||
|
||||
## A simple xgb.train example:
|
||||
param <- list(max_depth = 2, eta = 1, silent = 1, nthread = 2,
|
||||
param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2,
|
||||
objective = "binary:logistic", eval_metric = "auc")
|
||||
bst <- xgb.train(param, dtrain, nrounds = 2, watchlist)
|
||||
|
||||
@@ -231,12 +256,12 @@ evalerror <- function(preds, dtrain) {
|
||||
|
||||
# These functions could be used by passing them either:
|
||||
# as 'objective' and 'eval_metric' parameters in the params list:
|
||||
param <- list(max_depth = 2, eta = 1, silent = 1, nthread = 2,
|
||||
param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2,
|
||||
objective = logregobj, eval_metric = evalerror)
|
||||
bst <- xgb.train(param, dtrain, nrounds = 2, watchlist)
|
||||
|
||||
# or through the ... arguments:
|
||||
param <- list(max_depth = 2, eta = 1, silent = 1, nthread = 2)
|
||||
param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2)
|
||||
bst <- xgb.train(param, dtrain, nrounds = 2, watchlist,
|
||||
objective = logregobj, eval_metric = evalerror)
|
||||
|
||||
@@ -246,7 +271,7 @@ bst <- xgb.train(param, dtrain, nrounds = 2, watchlist,
|
||||
|
||||
|
||||
## An xgb.train example of using variable learning rates at each iteration:
|
||||
param <- list(max_depth = 2, eta = 1, silent = 1, nthread = 2,
|
||||
param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2,
|
||||
objective = "binary:logistic", eval_metric = "auc")
|
||||
my_etas <- list(eta = c(0.5, 0.1))
|
||||
bst <- xgb.train(param, dtrain, nrounds = 2, watchlist,
|
||||
@@ -257,8 +282,8 @@ bst <- xgb.train(param, dtrain, nrounds = 25, watchlist,
|
||||
early_stopping_rounds = 3)
|
||||
|
||||
## An 'xgboost' interface example:
|
||||
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label,
|
||||
max_depth = 2, eta = 1, nthread = 2, nrounds = 2,
|
||||
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label,
|
||||
max_depth = 2, eta = 1, nthread = 2, nrounds = 2,
|
||||
objective = "binary:logistic")
|
||||
pred <- predict(bst, agaricus.test$data)
|
||||
|
||||
|
||||
14
R-package/man/xgb.unserialize.Rd
Normal file
14
R-package/man/xgb.unserialize.Rd
Normal file
@@ -0,0 +1,14 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/xgb.unserialize.R
|
||||
\name{xgb.unserialize}
|
||||
\alias{xgb.unserialize}
|
||||
\title{Load the instance back from \code{\link{xgb.serialize}}}
|
||||
\usage{
|
||||
xgb.unserialize(buffer)
|
||||
}
|
||||
\arguments{
|
||||
\item{buffer}{the buffer containing booster instance saved by \code{\link{xgb.serialize}}}
|
||||
}
|
||||
\description{
|
||||
Load the instance back from \code{\link{xgb.serialize}}
|
||||
}
|
||||
@@ -10,7 +10,7 @@ The deprecated parameters would be removed in the next release.
|
||||
\details{
|
||||
To see all the current deprecated and new parameters, check the \code{xgboost:::depr_par_lut} table.
|
||||
|
||||
A deprecation warning is shown when any of the deprecated parameters is used in a call.
|
||||
An additional warning is shown when there was a partial match to a deprecated parameter
|
||||
A deprecation warning is shown when any of the deprecated parameters is used in a call.
|
||||
An additional warning is shown when there was a partial match to a deprecated parameter
|
||||
(as R is able to partially match parameter names).
|
||||
}
|
||||
|
||||
@@ -17,8 +17,8 @@ endif
|
||||
$(foreach v, $(XGB_RFLAGS), $(warning $(v)))
|
||||
|
||||
PKG_CPPFLAGS= -I$(PKGROOT)/include -I$(PKGROOT)/dmlc-core/include -I$(PKGROOT)/rabit/include -I$(PKGROOT) $(XGB_RFLAGS)
|
||||
PKG_CXXFLAGS= @OPENMP_CXXFLAGS@ -pthread
|
||||
PKG_LIBS = @OPENMP_CXXFLAGS@ -pthread
|
||||
PKG_CXXFLAGS= @OPENMP_CXXFLAGS@ @ENDIAN_FLAG@ -pthread
|
||||
PKG_LIBS = @OPENMP_CXXFLAGS@ @OPENMP_LIB@ @ENDIAN_FLAG@ @BACKTRACE_LIB@ -pthread
|
||||
OBJECTS= ./xgboost_R.o ./xgboost_custom.o ./xgboost_assert.o ./init.o\
|
||||
$(PKGROOT)/amalgamation/xgboost-all0.o $(PKGROOT)/amalgamation/dmlc-minimum0.o\
|
||||
$(PKGROOT)/rabit/src/engine_empty.o $(PKGROOT)/rabit/src/c_api.o
|
||||
|
||||
@@ -23,8 +23,12 @@ extern SEXP XGBoosterGetAttrNames_R(SEXP);
|
||||
extern SEXP XGBoosterGetAttr_R(SEXP, SEXP);
|
||||
extern SEXP XGBoosterLoadModelFromRaw_R(SEXP, SEXP);
|
||||
extern SEXP XGBoosterLoadModel_R(SEXP, SEXP);
|
||||
extern SEXP XGBoosterSaveJsonConfig_R(SEXP handle);
|
||||
extern SEXP XGBoosterLoadJsonConfig_R(SEXP handle, SEXP value);
|
||||
extern SEXP XGBoosterSerializeToBuffer_R(SEXP handle);
|
||||
extern SEXP XGBoosterUnserializeFromBuffer_R(SEXP handle, SEXP raw);
|
||||
extern SEXP XGBoosterModelToRaw_R(SEXP);
|
||||
extern SEXP XGBoosterPredict_R(SEXP, SEXP, SEXP, SEXP);
|
||||
extern SEXP XGBoosterPredict_R(SEXP, SEXP, SEXP, SEXP, SEXP);
|
||||
extern SEXP XGBoosterSaveModel_R(SEXP, SEXP);
|
||||
extern SEXP XGBoosterSetAttr_R(SEXP, SEXP, SEXP);
|
||||
extern SEXP XGBoosterSetParam_R(SEXP, SEXP, SEXP);
|
||||
@@ -49,8 +53,12 @@ static const R_CallMethodDef CallEntries[] = {
|
||||
{"XGBoosterGetAttr_R", (DL_FUNC) &XGBoosterGetAttr_R, 2},
|
||||
{"XGBoosterLoadModelFromRaw_R", (DL_FUNC) &XGBoosterLoadModelFromRaw_R, 2},
|
||||
{"XGBoosterLoadModel_R", (DL_FUNC) &XGBoosterLoadModel_R, 2},
|
||||
{"XGBoosterSaveJsonConfig_R", (DL_FUNC) &XGBoosterSaveJsonConfig_R, 1},
|
||||
{"XGBoosterLoadJsonConfig_R", (DL_FUNC) &XGBoosterLoadJsonConfig_R, 2},
|
||||
{"XGBoosterSerializeToBuffer_R", (DL_FUNC) &XGBoosterSerializeToBuffer_R, 1},
|
||||
{"XGBoosterUnserializeFromBuffer_R", (DL_FUNC) &XGBoosterUnserializeFromBuffer_R, 2},
|
||||
{"XGBoosterModelToRaw_R", (DL_FUNC) &XGBoosterModelToRaw_R, 1},
|
||||
{"XGBoosterPredict_R", (DL_FUNC) &XGBoosterPredict_R, 4},
|
||||
{"XGBoosterPredict_R", (DL_FUNC) &XGBoosterPredict_R, 5},
|
||||
{"XGBoosterSaveModel_R", (DL_FUNC) &XGBoosterSaveModel_R, 2},
|
||||
{"XGBoosterSetAttr_R", (DL_FUNC) &XGBoosterSetAttr_R, 3},
|
||||
{"XGBoosterSetParam_R", (DL_FUNC) &XGBoosterSetParam_R, 3},
|
||||
|
||||
@@ -136,9 +136,10 @@ SEXP XGDMatrixSliceDMatrix_R(SEXP handle, SEXP idxset) {
|
||||
idxvec[i] = INTEGER(idxset)[i] - 1;
|
||||
}
|
||||
DMatrixHandle res;
|
||||
CHECK_CALL(XGDMatrixSliceDMatrix(R_ExternalPtrAddr(handle),
|
||||
BeginPtr(idxvec), len,
|
||||
&res));
|
||||
CHECK_CALL(XGDMatrixSliceDMatrixEx(R_ExternalPtrAddr(handle),
|
||||
BeginPtr(idxvec), len,
|
||||
&res,
|
||||
0));
|
||||
ret = PROTECT(R_MakeExternalPtr(res, R_NilValue, R_NilValue));
|
||||
R_RegisterCFinalizerEx(ret, _DMatrixFinalizer, TRUE);
|
||||
R_API_END();
|
||||
@@ -165,7 +166,9 @@ SEXP XGDMatrixSetInfo_R(SEXP handle, SEXP field, SEXP array) {
|
||||
for (int i = 0; i < len; ++i) {
|
||||
vec[i] = static_cast<unsigned>(INTEGER(array)[i]);
|
||||
}
|
||||
CHECK_CALL(XGDMatrixSetGroup(R_ExternalPtrAddr(handle), BeginPtr(vec), len));
|
||||
CHECK_CALL(XGDMatrixSetUIntInfo(R_ExternalPtrAddr(handle),
|
||||
CHAR(asChar(field)),
|
||||
BeginPtr(vec), len));
|
||||
} else {
|
||||
std::vector<float> vec(len);
|
||||
#pragma omp parallel for schedule(static)
|
||||
@@ -173,8 +176,8 @@ SEXP XGDMatrixSetInfo_R(SEXP handle, SEXP field, SEXP array) {
|
||||
vec[i] = REAL(array)[i];
|
||||
}
|
||||
CHECK_CALL(XGDMatrixSetFloatInfo(R_ExternalPtrAddr(handle),
|
||||
CHAR(asChar(field)),
|
||||
BeginPtr(vec), len));
|
||||
CHAR(asChar(field)),
|
||||
BeginPtr(vec), len));
|
||||
}
|
||||
R_API_END();
|
||||
return R_NilValue;
|
||||
@@ -292,24 +295,26 @@ SEXP XGBoosterEvalOneIter_R(SEXP handle, SEXP iter, SEXP dmats, SEXP evnames) {
|
||||
vec_sptr.push_back(vec_names[i].c_str());
|
||||
}
|
||||
CHECK_CALL(XGBoosterEvalOneIter(R_ExternalPtrAddr(handle),
|
||||
asInteger(iter),
|
||||
BeginPtr(vec_dmats),
|
||||
BeginPtr(vec_sptr),
|
||||
len, &ret));
|
||||
asInteger(iter),
|
||||
BeginPtr(vec_dmats),
|
||||
BeginPtr(vec_sptr),
|
||||
len, &ret));
|
||||
R_API_END();
|
||||
return mkString(ret);
|
||||
}
|
||||
|
||||
SEXP XGBoosterPredict_R(SEXP handle, SEXP dmat, SEXP option_mask, SEXP ntree_limit) {
|
||||
SEXP XGBoosterPredict_R(SEXP handle, SEXP dmat, SEXP option_mask,
|
||||
SEXP ntree_limit, SEXP training) {
|
||||
SEXP ret;
|
||||
R_API_BEGIN();
|
||||
bst_ulong olen;
|
||||
const float *res;
|
||||
CHECK_CALL(XGBoosterPredict(R_ExternalPtrAddr(handle),
|
||||
R_ExternalPtrAddr(dmat),
|
||||
asInteger(option_mask),
|
||||
asInteger(ntree_limit),
|
||||
&olen, &res));
|
||||
R_ExternalPtrAddr(dmat),
|
||||
asInteger(option_mask),
|
||||
asInteger(ntree_limit),
|
||||
asInteger(training),
|
||||
&olen, &res));
|
||||
ret = PROTECT(allocVector(REALSXP, olen));
|
||||
for (size_t i = 0; i < olen; ++i) {
|
||||
REAL(ret)[i] = res[i];
|
||||
@@ -333,15 +338,6 @@ SEXP XGBoosterSaveModel_R(SEXP handle, SEXP fname) {
|
||||
return R_NilValue;
|
||||
}
|
||||
|
||||
SEXP XGBoosterLoadModelFromRaw_R(SEXP handle, SEXP raw) {
|
||||
R_API_BEGIN();
|
||||
CHECK_CALL(XGBoosterLoadModelFromBuffer(R_ExternalPtrAddr(handle),
|
||||
RAW(raw),
|
||||
length(raw)));
|
||||
R_API_END();
|
||||
return R_NilValue;
|
||||
}
|
||||
|
||||
SEXP XGBoosterModelToRaw_R(SEXP handle) {
|
||||
SEXP ret;
|
||||
R_API_BEGIN();
|
||||
@@ -357,6 +353,57 @@ SEXP XGBoosterModelToRaw_R(SEXP handle) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
SEXP XGBoosterLoadModelFromRaw_R(SEXP handle, SEXP raw) {
|
||||
R_API_BEGIN();
|
||||
CHECK_CALL(XGBoosterLoadModelFromBuffer(R_ExternalPtrAddr(handle),
|
||||
RAW(raw),
|
||||
length(raw)));
|
||||
R_API_END();
|
||||
return R_NilValue;
|
||||
}
|
||||
|
||||
SEXP XGBoosterSaveJsonConfig_R(SEXP handle) {
|
||||
const char* ret;
|
||||
R_API_BEGIN();
|
||||
bst_ulong len {0};
|
||||
CHECK_CALL(XGBoosterSaveJsonConfig(R_ExternalPtrAddr(handle),
|
||||
&len,
|
||||
&ret));
|
||||
R_API_END();
|
||||
return mkString(ret);
|
||||
}
|
||||
|
||||
SEXP XGBoosterLoadJsonConfig_R(SEXP handle, SEXP value) {
|
||||
R_API_BEGIN();
|
||||
XGBoosterLoadJsonConfig(R_ExternalPtrAddr(handle), CHAR(asChar(value)));
|
||||
R_API_END();
|
||||
return R_NilValue;
|
||||
}
|
||||
|
||||
SEXP XGBoosterSerializeToBuffer_R(SEXP handle) {
|
||||
SEXP ret;
|
||||
R_API_BEGIN();
|
||||
bst_ulong out_len;
|
||||
const char *raw;
|
||||
CHECK_CALL(XGBoosterSerializeToBuffer(R_ExternalPtrAddr(handle), &out_len, &raw));
|
||||
ret = PROTECT(allocVector(RAWSXP, out_len));
|
||||
if (out_len != 0) {
|
||||
memcpy(RAW(ret), raw, out_len);
|
||||
}
|
||||
R_API_END();
|
||||
UNPROTECT(1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
SEXP XGBoosterUnserializeFromBuffer_R(SEXP handle, SEXP raw) {
|
||||
R_API_BEGIN();
|
||||
XGBoosterUnserializeFromBuffer(R_ExternalPtrAddr(handle),
|
||||
RAW(raw),
|
||||
length(raw));
|
||||
R_API_END();
|
||||
return R_NilValue;
|
||||
}
|
||||
|
||||
SEXP XGBoosterDumpModel_R(SEXP handle, SEXP fmap, SEXP with_stats, SEXP dump_format) {
|
||||
SEXP out;
|
||||
R_API_BEGIN();
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*!
|
||||
* Copyright 2014 (c) by Contributors
|
||||
* \file xgboost_wrapper_R.h
|
||||
* \file xgboost_R.h
|
||||
* \author Tianqi Chen
|
||||
* \brief R wrapper of xgboost
|
||||
*/
|
||||
@@ -148,8 +148,10 @@ XGB_DLL SEXP XGBoosterEvalOneIter_R(SEXP handle, SEXP iter, SEXP dmats, SEXP evn
|
||||
* \param dmat data matrix
|
||||
* \param option_mask output_margin:1 predict_leaf:2
|
||||
* \param ntree_limit limit number of trees used in prediction
|
||||
* \param training Whether the prediction value is used for training.
|
||||
*/
|
||||
XGB_DLL SEXP XGBoosterPredict_R(SEXP handle, SEXP dmat, SEXP option_mask, SEXP ntree_limit);
|
||||
XGB_DLL SEXP XGBoosterPredict_R(SEXP handle, SEXP dmat, SEXP option_mask,
|
||||
SEXP ntree_limit, SEXP training);
|
||||
/*!
|
||||
* \brief load model from existing file
|
||||
* \param handle handle
|
||||
@@ -177,9 +179,39 @@ XGB_DLL SEXP XGBoosterLoadModelFromRaw_R(SEXP handle, SEXP raw);
|
||||
* \brief save model into R's raw array
|
||||
* \param handle handle
|
||||
* \return raw array
|
||||
*/
|
||||
*/
|
||||
XGB_DLL SEXP XGBoosterModelToRaw_R(SEXP handle);
|
||||
|
||||
/*!
|
||||
* \brief Save internal parameters as a JSON string
|
||||
* \param handle handle
|
||||
* \return JSON string
|
||||
*/
|
||||
|
||||
XGB_DLL SEXP XGBoosterSaveJsonConfig_R(SEXP handle);
|
||||
/*!
|
||||
* \brief Load the JSON string returnd by XGBoosterSaveJsonConfig_R
|
||||
* \param handle handle
|
||||
* \param value JSON string
|
||||
* \return R_NilValue
|
||||
*/
|
||||
XGB_DLL SEXP XGBoosterLoadJsonConfig_R(SEXP handle, SEXP value);
|
||||
|
||||
/*!
|
||||
* \brief Memory snapshot based serialization method. Saves everything states
|
||||
* into buffer.
|
||||
* \param handle handle to booster
|
||||
*/
|
||||
XGB_DLL SEXP XGBoosterSerializeToBuffer_R(SEXP handle);
|
||||
|
||||
/*!
|
||||
* \brief Memory snapshot based serialization method. Loads the buffer returned
|
||||
* from `XGBoosterSerializeToBuffer'.
|
||||
* \param handle handle to booster
|
||||
* \return raw byte array
|
||||
*/
|
||||
XGB_DLL SEXP XGBoosterUnserializeFromBuffer_R(SEXP handle, SEXP raw);
|
||||
|
||||
/*!
|
||||
* \brief dump model into a string
|
||||
* \param handle handle
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
// to change behavior of libxgboost
|
||||
|
||||
#include <xgboost/logging.h>
|
||||
#include "src/common/random.h"
|
||||
#include "../../src/common/random.h"
|
||||
#include "./xgboost_R.h"
|
||||
|
||||
// redirect the messages to R's console.
|
||||
|
||||
@@ -27,7 +27,7 @@ test_that("train and predict binary classification", {
|
||||
|
||||
pred <- predict(bst, test$data)
|
||||
expect_length(pred, 1611)
|
||||
|
||||
|
||||
pred1 <- predict(bst, train$data, ntreelimit = 1)
|
||||
expect_length(pred1, 6513)
|
||||
err_pred1 <- sum((pred1 > 0.5) != train$label)/length(train$label)
|
||||
@@ -35,6 +35,87 @@ test_that("train and predict binary classification", {
|
||||
expect_lt(abs(err_pred1 - err_log), 10e-6)
|
||||
})
|
||||
|
||||
test_that("parameter validation works", {
|
||||
p <- list(foo = "bar")
|
||||
nrounds = 1
|
||||
set.seed(1994)
|
||||
|
||||
d <- cbind(
|
||||
x1 = rnorm(10),
|
||||
x2 = rnorm(10),
|
||||
x3 = rnorm(10))
|
||||
y <- d[,"x1"] + d[,"x2"]^2 +
|
||||
ifelse(d[,"x3"] > .5, d[,"x3"]^2, 2^d[,"x3"]) +
|
||||
rnorm(10)
|
||||
dtrain <- xgb.DMatrix(data=d, info = list(label=y))
|
||||
|
||||
correct <- function() {
|
||||
params <- list(max_depth = 2, booster = "dart",
|
||||
rate_drop = 0.5, one_drop = TRUE,
|
||||
objective = "reg:squarederror")
|
||||
xgb.train(params = params, data = dtrain, nrounds = nrounds)
|
||||
}
|
||||
expect_silent(correct())
|
||||
incorrect <- function() {
|
||||
params <- list(max_depth = 2, booster = "dart",
|
||||
rate_drop = 0.5, one_drop = TRUE,
|
||||
objective = "reg:squarederror",
|
||||
foo = "bar", bar = "foo")
|
||||
output <- capture.output(
|
||||
xgb.train(params = params, data = dtrain, nrounds = nrounds))
|
||||
print(output)
|
||||
}
|
||||
expect_output(incorrect(), "bar, foo")
|
||||
})
|
||||
|
||||
|
||||
test_that("dart prediction works", {
|
||||
nrounds = 32
|
||||
set.seed(1994)
|
||||
|
||||
d <- cbind(
|
||||
x1 = rnorm(100),
|
||||
x2 = rnorm(100),
|
||||
x3 = rnorm(100))
|
||||
y <- d[,"x1"] + d[,"x2"]^2 +
|
||||
ifelse(d[,"x3"] > .5, d[,"x3"]^2, 2^d[,"x3"]) +
|
||||
rnorm(100)
|
||||
|
||||
set.seed(1994)
|
||||
booster_by_xgboost <- xgboost(data = d, label = y, max_depth = 2, booster = "dart",
|
||||
rate_drop = 0.5, one_drop = TRUE,
|
||||
eta = 1, nthread = 2, nrounds = nrounds, objective = "reg:squarederror")
|
||||
pred_by_xgboost_0 <- predict(booster_by_xgboost, newdata = d, ntreelimit = 0)
|
||||
pred_by_xgboost_1 <- predict(booster_by_xgboost, newdata = d, ntreelimit = nrounds)
|
||||
expect_true(all(matrix(pred_by_xgboost_0, byrow=TRUE) == matrix(pred_by_xgboost_1, byrow=TRUE)))
|
||||
|
||||
pred_by_xgboost_2 <- predict(booster_by_xgboost, newdata = d, training = TRUE)
|
||||
expect_false(all(matrix(pred_by_xgboost_0, byrow=TRUE) == matrix(pred_by_xgboost_2, byrow=TRUE)))
|
||||
|
||||
set.seed(1994)
|
||||
dtrain <- xgb.DMatrix(data=d, info = list(label=y))
|
||||
booster_by_train <- xgb.train( params = list(
|
||||
booster = "dart",
|
||||
max_depth = 2,
|
||||
eta = 1,
|
||||
rate_drop = 0.5,
|
||||
one_drop = TRUE,
|
||||
nthread = 1,
|
||||
tree_method= "exact",
|
||||
objective = "reg:squarederror"
|
||||
),
|
||||
data = dtrain,
|
||||
nrounds = nrounds
|
||||
)
|
||||
pred_by_train_0 <- predict(booster_by_train, newdata = dtrain, ntreelimit = 0)
|
||||
pred_by_train_1 <- predict(booster_by_train, newdata = dtrain, ntreelimit = nrounds)
|
||||
pred_by_train_2 <- predict(booster_by_train, newdata = dtrain, training = TRUE)
|
||||
|
||||
expect_true(all(matrix(pred_by_train_0, byrow=TRUE) == matrix(pred_by_xgboost_0, byrow=TRUE)))
|
||||
expect_true(all(matrix(pred_by_train_1, byrow=TRUE) == matrix(pred_by_xgboost_1, byrow=TRUE)))
|
||||
expect_true(all(matrix(pred_by_train_2, byrow=TRUE) == matrix(pred_by_xgboost_2, byrow=TRUE)))
|
||||
})
|
||||
|
||||
test_that("train and predict softprob", {
|
||||
lb <- as.numeric(iris$Species) - 1
|
||||
set.seed(11)
|
||||
@@ -74,7 +155,7 @@ test_that("train and predict softmax", {
|
||||
expect_false(is.null(bst$evaluation_log))
|
||||
expect_lt(bst$evaluation_log[, min(train_merror)], 0.025)
|
||||
expect_equal(bst$niter * 3, xgb.ntree(bst))
|
||||
|
||||
|
||||
pred <- predict(bst, as.matrix(iris[, -5]))
|
||||
expect_length(pred, nrow(iris))
|
||||
err <- sum(pred != lb)/length(lb)
|
||||
@@ -90,12 +171,12 @@ test_that("train and predict RF", {
|
||||
num_parallel_tree = 20, subsample = 0.6, colsample_bytree = 0.1)
|
||||
expect_equal(bst$niter, 1)
|
||||
expect_equal(xgb.ntree(bst), 20)
|
||||
|
||||
|
||||
pred <- predict(bst, train$data)
|
||||
pred_err <- sum((pred > 0.5) != lb)/length(lb)
|
||||
expect_lt(abs(bst$evaluation_log[1, train_error] - pred_err), 10e-6)
|
||||
#expect_lt(pred_err, 0.03)
|
||||
|
||||
|
||||
pred <- predict(bst, train$data, ntreelimit = 20)
|
||||
pred_err_20 <- sum((pred > 0.5) != lb)/length(lb)
|
||||
expect_equal(pred_err_20, pred_err)
|
||||
@@ -171,6 +252,21 @@ test_that("training continuation works", {
|
||||
expect_equal(dim(bst2$evaluation_log), c(2, 2))
|
||||
})
|
||||
|
||||
test_that("model serialization works", {
|
||||
out_path <- "model_serialization"
|
||||
dtrain <- xgb.DMatrix(train$data, label = train$label)
|
||||
watchlist = list(train=dtrain)
|
||||
param <- list(objective = "binary:logistic")
|
||||
booster <- xgb.train(param, dtrain, nrounds = 4, watchlist)
|
||||
raw <- xgb.serialize(booster)
|
||||
saveRDS(raw, out_path)
|
||||
raw <- readRDS(out_path)
|
||||
|
||||
loaded <- xgb.unserialize(raw)
|
||||
raw_from_loaded <- xgb.serialize(loaded)
|
||||
expect_equal(raw, raw_from_loaded)
|
||||
file.remove(out_path)
|
||||
})
|
||||
|
||||
test_that("xgb.cv works", {
|
||||
set.seed(11)
|
||||
@@ -182,7 +278,7 @@ test_that("xgb.cv works", {
|
||||
expect_is(cv, 'xgb.cv.synchronous')
|
||||
expect_false(is.null(cv$evaluation_log))
|
||||
expect_lt(cv$evaluation_log[, min(test_error_mean)], 0.03)
|
||||
expect_lt(cv$evaluation_log[, min(test_error_std)], 0.004)
|
||||
expect_lt(cv$evaluation_log[, min(test_error_std)], 0.008)
|
||||
expect_equal(cv$niter, 2)
|
||||
expect_false(is.null(cv$folds) && is.list(cv$folds))
|
||||
expect_length(cv$folds, 5)
|
||||
@@ -191,13 +287,27 @@ test_that("xgb.cv works", {
|
||||
expect_false(is.null(cv$call))
|
||||
})
|
||||
|
||||
test_that("xgb.cv works with stratified folds", {
|
||||
dtrain <- xgb.DMatrix(train$data, label = train$label)
|
||||
set.seed(314159)
|
||||
cv <- xgb.cv(data = dtrain, max_depth = 2, nfold = 5,
|
||||
eta = 1., nthread = 2, nrounds = 2, objective = "binary:logistic",
|
||||
verbose=TRUE, stratified = FALSE)
|
||||
set.seed(314159)
|
||||
cv2 <- xgb.cv(data = dtrain, max_depth = 2, nfold = 5,
|
||||
eta = 1., nthread = 2, nrounds = 2, objective = "binary:logistic",
|
||||
verbose=TRUE, stratified = TRUE)
|
||||
# Stratified folds should result in a different evaluation logs
|
||||
expect_true(all(cv$evaluation_log[, test_error_mean] != cv2$evaluation_log[, test_error_mean]))
|
||||
})
|
||||
|
||||
test_that("train and predict with non-strict classes", {
|
||||
# standard dense matrix input
|
||||
train_dense <- as.matrix(train$data)
|
||||
bst <- xgboost(data = train_dense, label = train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", verbose = 0)
|
||||
pr0 <- predict(bst, train_dense)
|
||||
|
||||
|
||||
# dense matrix-like input of non-matrix class
|
||||
class(train_dense) <- 'shmatrix'
|
||||
expect_true(is.matrix(train_dense))
|
||||
@@ -207,7 +317,7 @@ test_that("train and predict with non-strict classes", {
|
||||
, regexp = NA)
|
||||
expect_error(pr <- predict(bst, train_dense), regexp = NA)
|
||||
expect_equal(pr0, pr)
|
||||
|
||||
|
||||
# dense matrix-like input of non-matrix class with some inheritance
|
||||
class(train_dense) <- c('pphmatrix','shmatrix')
|
||||
expect_true(is.matrix(train_dense))
|
||||
@@ -217,7 +327,7 @@ test_that("train and predict with non-strict classes", {
|
||||
, regexp = NA)
|
||||
expect_error(pr <- predict(bst, train_dense), regexp = NA)
|
||||
expect_equal(pr0, pr)
|
||||
|
||||
|
||||
# when someone inhertis from xgb.Booster, it should still be possible to use it as xgb.Booster
|
||||
class(bst) <- c('super.Booster', 'xgb.Booster')
|
||||
expect_error(pr <- predict(bst, train_dense), regexp = NA)
|
||||
@@ -247,18 +357,28 @@ test_that("colsample_bytree works", {
|
||||
test_y <- as.numeric(rowSums(test_x) > 0)
|
||||
colnames(train_x) <- paste0("Feature_", sprintf("%03d", 1:100))
|
||||
colnames(test_x) <- paste0("Feature_", sprintf("%03d", 1:100))
|
||||
dtrain <- xgb.DMatrix(train_x, label = train_y)
|
||||
dtrain <- xgb.DMatrix(train_x, label = train_y)
|
||||
dtest <- xgb.DMatrix(test_x, label = test_y)
|
||||
watchlist <- list(train = dtrain, eval = dtest)
|
||||
# Use colsample_bytree = 0.01, so that roughly one out of 100 features is
|
||||
# chosen for each tree
|
||||
param <- list(max_depth = 2, eta = 0, silent = 1, nthread = 2,
|
||||
## Use colsample_bytree = 0.01, so that roughly one out of 100 features is chosen for
|
||||
## each tree
|
||||
param <- list(max_depth = 2, eta = 0, nthread = 2,
|
||||
colsample_bytree = 0.01, objective = "binary:logistic",
|
||||
eval_metric = "auc")
|
||||
set.seed(2)
|
||||
set.seed(2)
|
||||
bst <- xgb.train(param, dtrain, nrounds = 100, watchlist, verbose = 0)
|
||||
xgb.importance(model = bst)
|
||||
# If colsample_bytree works properly, a variety of features should be used
|
||||
# in the 100 trees
|
||||
expect_gte(nrow(xgb.importance(model = bst)), 30)
|
||||
})
|
||||
|
||||
test_that("Configuration works", {
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic",
|
||||
eval_metric = 'error', eval_metric = 'auc', eval_metric = "logloss")
|
||||
config <- xgb.config(bst)
|
||||
xgb.config(bst) <- config
|
||||
reloaded_config <- xgb.config(bst)
|
||||
expect_equal(config, reloaded_config);
|
||||
})
|
||||
|
||||
@@ -30,16 +30,16 @@ param <- list(objective = "binary:logistic", max_depth = 2, nthread = 2)
|
||||
|
||||
|
||||
test_that("cb.print.evaluation works as expected", {
|
||||
|
||||
|
||||
bst_evaluation <- c('train-auc'=0.9, 'test-auc'=0.8)
|
||||
bst_evaluation_err <- NULL
|
||||
begin_iteration <- 1
|
||||
end_iteration <- 7
|
||||
|
||||
|
||||
f0 <- cb.print.evaluation(period=0)
|
||||
f1 <- cb.print.evaluation(period=1)
|
||||
f5 <- cb.print.evaluation(period=5)
|
||||
|
||||
|
||||
expect_false(is.null(attr(f1, 'call')))
|
||||
expect_equal(attr(f1, 'name'), 'cb.print.evaluation')
|
||||
|
||||
@@ -48,15 +48,15 @@ test_that("cb.print.evaluation works as expected", {
|
||||
expect_output(f1(), "\\[1\\]\ttrain-auc:0.900000\ttest-auc:0.800000")
|
||||
expect_output(f5(), "\\[1\\]\ttrain-auc:0.900000\ttest-auc:0.800000")
|
||||
expect_null(f1())
|
||||
|
||||
|
||||
iteration <- 2
|
||||
expect_output(f1(), "\\[2\\]\ttrain-auc:0.900000\ttest-auc:0.800000")
|
||||
expect_silent(f5())
|
||||
|
||||
|
||||
iteration <- 7
|
||||
expect_output(f1(), "\\[7\\]\ttrain-auc:0.900000\ttest-auc:0.800000")
|
||||
expect_output(f5(), "\\[7\\]\ttrain-auc:0.900000\ttest-auc:0.800000")
|
||||
|
||||
|
||||
bst_evaluation_err <- c('train-auc'=0.1, 'test-auc'=0.2)
|
||||
expect_output(f1(), "\\[7\\]\ttrain-auc:0.900000\\+0.100000\ttest-auc:0.800000\\+0.200000")
|
||||
})
|
||||
@@ -65,40 +65,40 @@ test_that("cb.evaluation.log works as expected", {
|
||||
|
||||
bst_evaluation <- c('train-auc'=0.9, 'test-auc'=0.8)
|
||||
bst_evaluation_err <- NULL
|
||||
|
||||
|
||||
evaluation_log <- list()
|
||||
f <- cb.evaluation.log()
|
||||
|
||||
|
||||
expect_false(is.null(attr(f, 'call')))
|
||||
expect_equal(attr(f, 'name'), 'cb.evaluation.log')
|
||||
|
||||
|
||||
iteration <- 1
|
||||
expect_silent(f())
|
||||
expect_equal(evaluation_log,
|
||||
expect_equal(evaluation_log,
|
||||
list(c(iter=1, bst_evaluation)))
|
||||
iteration <- 2
|
||||
expect_silent(f())
|
||||
expect_equal(evaluation_log,
|
||||
expect_equal(evaluation_log,
|
||||
list(c(iter=1, bst_evaluation), c(iter=2, bst_evaluation)))
|
||||
expect_silent(f(finalize = TRUE))
|
||||
expect_equal(evaluation_log,
|
||||
expect_equal(evaluation_log,
|
||||
data.table(iter=1:2, train_auc=c(0.9,0.9), test_auc=c(0.8,0.8)))
|
||||
|
||||
|
||||
bst_evaluation_err <- c('train-auc'=0.1, 'test-auc'=0.2)
|
||||
evaluation_log <- list()
|
||||
f <- cb.evaluation.log()
|
||||
|
||||
|
||||
iteration <- 1
|
||||
expect_silent(f())
|
||||
expect_equal(evaluation_log,
|
||||
expect_equal(evaluation_log,
|
||||
list(c(iter=1, c(bst_evaluation, bst_evaluation_err))))
|
||||
iteration <- 2
|
||||
expect_silent(f())
|
||||
expect_equal(evaluation_log,
|
||||
expect_equal(evaluation_log,
|
||||
list(c(iter=1, c(bst_evaluation, bst_evaluation_err)),
|
||||
c(iter=2, c(bst_evaluation, bst_evaluation_err))))
|
||||
expect_silent(f(finalize = TRUE))
|
||||
expect_equal(evaluation_log,
|
||||
expect_equal(evaluation_log,
|
||||
data.table(iter=1:2,
|
||||
train_auc_mean=c(0.9,0.9), train_auc_std=c(0.1,0.1),
|
||||
test_auc_mean=c(0.8,0.8), test_auc_std=c(0.2,0.2)))
|
||||
@@ -130,18 +130,18 @@ test_that("cb.reset.parameters works as expected", {
|
||||
bst1 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0,
|
||||
callbacks = list(cb.reset.parameters(my_par)))
|
||||
expect_false(is.null(bst1$evaluation_log$train_error))
|
||||
expect_equal(bst0$evaluation_log$train_error,
|
||||
expect_equal(bst0$evaluation_log$train_error,
|
||||
bst1$evaluation_log$train_error)
|
||||
|
||||
|
||||
# same eta but re-set via a function in the callback
|
||||
set.seed(111)
|
||||
my_par <- list(eta = function(itr, itr_end) 0.9)
|
||||
bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0,
|
||||
callbacks = list(cb.reset.parameters(my_par)))
|
||||
expect_false(is.null(bst2$evaluation_log$train_error))
|
||||
expect_equal(bst0$evaluation_log$train_error,
|
||||
expect_equal(bst0$evaluation_log$train_error,
|
||||
bst2$evaluation_log$train_error)
|
||||
|
||||
|
||||
# different eta re-set as a vector parameter in the callback
|
||||
set.seed(111)
|
||||
my_par <- list(eta = c(0.6, 0.5))
|
||||
@@ -149,7 +149,7 @@ test_that("cb.reset.parameters works as expected", {
|
||||
callbacks = list(cb.reset.parameters(my_par)))
|
||||
expect_false(is.null(bst3$evaluation_log$train_error))
|
||||
expect_false(all(bst0$evaluation_log$train_error == bst3$evaluation_log$train_error))
|
||||
|
||||
|
||||
# resetting multiple parameters at the same time runs with no error
|
||||
my_par <- list(eta = c(1., 0.5), gamma = c(1, 2), max_depth = c(4, 8))
|
||||
expect_error(
|
||||
@@ -175,7 +175,7 @@ test_that("cb.reset.parameters works as expected", {
|
||||
test_that("cb.save.model works as expected", {
|
||||
files <- c('xgboost_01.model', 'xgboost_02.model', 'xgboost.model')
|
||||
for (f in files) if (file.exists(f)) file.remove(f)
|
||||
|
||||
|
||||
bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, eta = 1, verbose = 0,
|
||||
save_period = 1, save_name = "xgboost_%02d.model")
|
||||
expect_true(file.exists('xgboost_01.model'))
|
||||
@@ -184,6 +184,9 @@ test_that("cb.save.model works as expected", {
|
||||
expect_equal(xgb.ntree(b1), 1)
|
||||
b2 <- xgb.load('xgboost_02.model')
|
||||
expect_equal(xgb.ntree(b2), 2)
|
||||
|
||||
xgb.config(b2) <- xgb.config(bst)
|
||||
expect_equal(xgb.config(bst), xgb.config(b2))
|
||||
expect_equal(bst$raw, b2$raw)
|
||||
|
||||
# save_period = 0 saves the last iteration's model
|
||||
@@ -191,8 +194,9 @@ test_that("cb.save.model works as expected", {
|
||||
save_period = 0)
|
||||
expect_true(file.exists('xgboost.model'))
|
||||
b2 <- xgb.load('xgboost.model')
|
||||
xgb.config(b2) <- xgb.config(bst)
|
||||
expect_equal(bst$raw, b2$raw)
|
||||
|
||||
|
||||
for (f in files) if (file.exists(f)) file.remove(f)
|
||||
})
|
||||
|
||||
@@ -211,13 +215,22 @@ test_that("early stopping xgb.train works", {
|
||||
err_pred <- err(ltest, pred)
|
||||
err_log <- bst$evaluation_log[bst$best_iteration, test_error]
|
||||
expect_equal(err_log, err_pred, tolerance = 5e-6)
|
||||
|
||||
|
||||
set.seed(11)
|
||||
expect_silent(
|
||||
bst0 <- xgb.train(param, dtrain, nrounds = 20, watchlist, eta = 0.3,
|
||||
early_stopping_rounds = 3, maximize = FALSE, verbose = 0)
|
||||
)
|
||||
expect_equal(bst$evaluation_log, bst0$evaluation_log)
|
||||
|
||||
xgb.save(bst, "model.bin")
|
||||
loaded <- xgb.load("model.bin")
|
||||
|
||||
expect_false(is.null(loaded$best_iteration))
|
||||
expect_equal(loaded$best_iteration, bst$best_ntreelimit)
|
||||
expect_equal(loaded$best_ntreelimit, bst$best_ntreelimit)
|
||||
|
||||
file.remove("model.bin")
|
||||
})
|
||||
|
||||
test_that("early stopping using a specific metric works", {
|
||||
@@ -236,7 +249,7 @@ test_that("early stopping using a specific metric works", {
|
||||
expect_equal(length(pred), 1611)
|
||||
logloss_pred <- sum(-ltest * log(pred) - (1 - ltest) * log(1 - pred)) / length(ltest)
|
||||
logloss_log <- bst$evaluation_log[bst$best_iteration, test_logloss]
|
||||
expect_equal(logloss_log, logloss_pred, tolerance = 5e-6)
|
||||
expect_equal(logloss_log, logloss_pred, tolerance = 1e-5)
|
||||
})
|
||||
|
||||
test_that("early stopping xgb.cv works", {
|
||||
@@ -282,18 +295,19 @@ test_that("prediction in xgb.cv works for gblinear too", {
|
||||
})
|
||||
|
||||
test_that("prediction in early-stopping xgb.cv works", {
|
||||
set.seed(1)
|
||||
set.seed(11)
|
||||
expect_output(
|
||||
cv <- xgb.cv(param, dtrain, nfold = 5, eta = 0.1, nrounds = 20,
|
||||
early_stopping_rounds = 5, maximize = FALSE, prediction = TRUE)
|
||||
early_stopping_rounds = 5, maximize = FALSE, stratified = FALSE,
|
||||
prediction = TRUE)
|
||||
, "Stopping. Best iteration")
|
||||
|
||||
|
||||
expect_false(is.null(cv$best_iteration))
|
||||
expect_lt(cv$best_iteration, 19)
|
||||
expect_false(is.null(cv$evaluation_log))
|
||||
expect_false(is.null(cv$pred))
|
||||
expect_length(cv$pred, nrow(train$data))
|
||||
|
||||
|
||||
err_pred <- mean( sapply(cv$folds, function(f) mean(err(ltrain[f], cv$pred[f]))) )
|
||||
err_log <- cv$evaluation_log[cv$best_iteration, test_error_mean]
|
||||
expect_equal(err_pred, err_log, tolerance = 1e-6)
|
||||
|
||||
@@ -31,7 +31,6 @@ num_round <- 2
|
||||
test_that("custom objective works", {
|
||||
bst <- xgb.train(param, dtrain, num_round, watchlist)
|
||||
expect_equal(class(bst), "xgb.Booster")
|
||||
expect_equal(length(bst$raw), 1094)
|
||||
expect_false(is.null(bst$evaluation_log))
|
||||
expect_false(is.null(bst$evaluation_log$eval_error))
|
||||
expect_lt(bst$evaluation_log[num_round, eval_error], 0.03)
|
||||
@@ -45,7 +44,7 @@ test_that("custom objective in CV works", {
|
||||
})
|
||||
|
||||
test_that("custom objective using DMatrix attr works", {
|
||||
|
||||
|
||||
attr(dtrain, 'label') <- getinfo(dtrain, 'label')
|
||||
|
||||
logregobjattr <- function(preds, dtrain) {
|
||||
@@ -58,5 +57,21 @@ test_that("custom objective using DMatrix attr works", {
|
||||
param$objective = logregobjattr
|
||||
bst <- xgb.train(param, dtrain, num_round, watchlist)
|
||||
expect_equal(class(bst), "xgb.Booster")
|
||||
expect_equal(length(bst$raw), 1094)
|
||||
})
|
||||
|
||||
test_that("custom objective with multi-class works", {
|
||||
data = as.matrix(iris[, -5])
|
||||
label = as.numeric(iris$Species) - 1
|
||||
dtrain <- xgb.DMatrix(data = data, label = label)
|
||||
nclasses <- 3
|
||||
|
||||
fake_softprob <- function(preds, dtrain) {
|
||||
expect_true(all(matrix(preds) == 0.5))
|
||||
grad <- rnorm(dim(as.matrix(preds))[1])
|
||||
expect_equal(dim(data)[1] * nclasses, dim(as.matrix(preds))[1])
|
||||
hess <- rnorm(dim(as.matrix(preds))[1])
|
||||
return (list(grad = grad, hess = hess))
|
||||
}
|
||||
param$objective = fake_softprob
|
||||
bst <- xgb.train(param, dtrain, 1, num_class=nclasses)
|
||||
})
|
||||
|
||||
@@ -10,12 +10,12 @@ test_label <- agaricus.test$label[1:100]
|
||||
test_that("xgb.DMatrix: basic construction", {
|
||||
# from sparse matrix
|
||||
dtest1 <- xgb.DMatrix(test_data, label=test_label)
|
||||
|
||||
# from dense matrix
|
||||
|
||||
# from dense matrix
|
||||
dtest2 <- xgb.DMatrix(as.matrix(test_data), label=test_label)
|
||||
expect_equal(getinfo(dtest1, 'label'), getinfo(dtest2, 'label'))
|
||||
expect_equal(dim(dtest1), dim(dtest2))
|
||||
|
||||
|
||||
#from dense integer matrix
|
||||
int_data <- as.matrix(test_data)
|
||||
storage.mode(int_data) <- "integer"
|
||||
@@ -33,7 +33,7 @@ test_that("xgb.DMatrix: saving, loading", {
|
||||
expect_output(dtest3 <- xgb.DMatrix(tmp_file, silent = TRUE), NA)
|
||||
unlink(tmp_file)
|
||||
expect_equal(getinfo(dtest1, 'label'), getinfo(dtest3, 'label'))
|
||||
|
||||
|
||||
# from a libsvm text file
|
||||
tmp <- c("0 1:1 2:1","1 3:1","0 1:1")
|
||||
tmp_file <- 'tmp.libsvm'
|
||||
@@ -49,7 +49,13 @@ test_that("xgb.DMatrix: getinfo & setinfo", {
|
||||
expect_true(setinfo(dtest, 'label', test_label))
|
||||
labels <- getinfo(dtest, 'label')
|
||||
expect_equal(test_label, getinfo(dtest, 'label'))
|
||||
|
||||
|
||||
expect_true(setinfo(dtest, 'label_lower_bound', test_label))
|
||||
expect_equal(test_label, getinfo(dtest, 'label_lower_bound'))
|
||||
|
||||
expect_true(setinfo(dtest, 'label_upper_bound', test_label))
|
||||
expect_equal(test_label, getinfo(dtest, 'label_upper_bound'))
|
||||
|
||||
expect_true(length(getinfo(dtest, 'weight')) == 0)
|
||||
expect_true(length(getinfo(dtest, 'base_margin')) == 0)
|
||||
|
||||
@@ -57,10 +63,10 @@ test_that("xgb.DMatrix: getinfo & setinfo", {
|
||||
expect_true(setinfo(dtest, 'base_margin', test_label))
|
||||
expect_true(setinfo(dtest, 'group', c(50,50)))
|
||||
expect_error(setinfo(dtest, 'group', test_label))
|
||||
|
||||
|
||||
# providing character values will give a warning
|
||||
expect_warning( setinfo(dtest, 'weight', rep('a', nrow(test_data))) )
|
||||
|
||||
expect_warning(setinfo(dtest, 'weight', rep('a', nrow(test_data))))
|
||||
|
||||
# any other label should error
|
||||
expect_error(setinfo(dtest, 'asdf', test_label))
|
||||
})
|
||||
@@ -71,7 +77,7 @@ test_that("xgb.DMatrix: slice, dim", {
|
||||
dsub1 <- slice(dtest, 1:42)
|
||||
expect_equal(nrow(dsub1), 42)
|
||||
expect_equal(ncol(dsub1), ncol(test_data))
|
||||
|
||||
|
||||
dsub2 <- dtest[1:42,]
|
||||
expect_equal(dim(dtest), dim(test_data))
|
||||
expect_equal(getinfo(dsub1, 'label'), getinfo(dsub2, 'label'))
|
||||
|
||||
@@ -40,7 +40,7 @@ test_that("gblinear works", {
|
||||
expect_lt(bst$evaluation_log$eval_error[2], ERR_UL)
|
||||
|
||||
bst <- xgb.train(param, dtrain, n, watchlist, verbose = VERB, feature_selector = 'thrifty',
|
||||
top_n = 50, callbacks = list(cb.gblinear.history(sparse = TRUE)))
|
||||
top_k = 50, callbacks = list(cb.gblinear.history(sparse = TRUE)))
|
||||
expect_lt(bst$evaluation_log$eval_error[n], ERR_UL)
|
||||
h <- xgb.gblinear.history(bst)
|
||||
expect_equal(dim(h), c(n, ncol(dtrain) + 1))
|
||||
|
||||
@@ -7,8 +7,8 @@ require(vcd, quietly = TRUE)
|
||||
|
||||
float_tolerance = 5e-6
|
||||
|
||||
# disable some tests for Win32
|
||||
win32_flag = .Platform$OS.type == "windows" && .Machine$sizeof.pointer != 8
|
||||
# disable some tests for 32-bit environment
|
||||
flag_32bit = .Machine$sizeof.pointer != 8
|
||||
|
||||
set.seed(1982)
|
||||
data(Arthritis)
|
||||
@@ -44,7 +44,7 @@ mbst.GLM <- xgboost(data = as.matrix(iris[, -5]), label = mlabel, verbose = 0,
|
||||
|
||||
|
||||
test_that("xgb.dump works", {
|
||||
if (!win32_flag)
|
||||
if (!flag_32bit)
|
||||
expect_length(xgb.dump(bst.Tree), 200)
|
||||
dump_file = file.path(tempdir(), 'xgb.model.dump')
|
||||
expect_true(xgb.dump(bst.Tree, dump_file, with_stats = T))
|
||||
@@ -54,7 +54,7 @@ test_that("xgb.dump works", {
|
||||
# JSON format
|
||||
dmp <- xgb.dump(bst.Tree, dump_format = "json")
|
||||
expect_length(dmp, 1)
|
||||
if (!win32_flag)
|
||||
if (!flag_32bit)
|
||||
expect_length(grep('nodeid', strsplit(dmp, '\n')[[1]]), 188)
|
||||
})
|
||||
|
||||
@@ -142,6 +142,44 @@ test_that("predict feature contributions works", {
|
||||
}
|
||||
})
|
||||
|
||||
test_that("SHAPs sum to predictions, with or without DART", {
|
||||
d <- cbind(
|
||||
x1 = rnorm(100),
|
||||
x2 = rnorm(100),
|
||||
x3 = rnorm(100))
|
||||
y <- d[,"x1"] + d[,"x2"]^2 +
|
||||
ifelse(d[,"x3"] > .5, d[,"x3"]^2, 2^d[,"x3"]) +
|
||||
rnorm(100)
|
||||
nrounds <- 30
|
||||
|
||||
for (booster in list("gbtree", "dart")) {
|
||||
fit <- xgboost(
|
||||
params = c(
|
||||
list(
|
||||
booster = booster,
|
||||
objective = "reg:squarederror",
|
||||
eval_metric = "rmse"),
|
||||
if (booster == "dart")
|
||||
list(rate_drop = .01, one_drop = T)),
|
||||
data = d,
|
||||
label = y,
|
||||
nrounds = nrounds)
|
||||
|
||||
pr <- function(...)
|
||||
predict(fit, newdata = d, ...)
|
||||
pred <- pr()
|
||||
shap <- pr(predcontrib = T)
|
||||
shapi <- pr(predinteraction = T)
|
||||
tol = 1e-5
|
||||
|
||||
expect_equal(rowSums(shap), pred, tol = tol)
|
||||
expect_equal(apply(shapi, 1, sum), pred, tol = tol)
|
||||
for (i in 1 : nrow(d))
|
||||
for (f in list(rowSums, colSums))
|
||||
expect_equal(f(shapi[i,,]), shap[i,], tol = tol)
|
||||
}
|
||||
})
|
||||
|
||||
test_that("xgb-attribute functionality", {
|
||||
val <- "my attribute value"
|
||||
list.val <- list(my_attr=val, a=123, b='ok')
|
||||
@@ -163,6 +201,7 @@ test_that("xgb-attribute functionality", {
|
||||
# serializing:
|
||||
xgb.save(bst.Tree, 'xgb.model')
|
||||
bst <- xgb.load('xgb.model')
|
||||
if (file.exists('xgb.model')) file.remove('xgb.model')
|
||||
expect_equal(xgb.attr(bst, "my_attr"), val)
|
||||
expect_equal(xgb.attributes(bst), list.ch)
|
||||
# deletion:
|
||||
@@ -199,10 +238,12 @@ if (grepl('Windows', Sys.info()[['sysname']]) ||
|
||||
test_that("xgb.Booster serializing as R object works", {
|
||||
saveRDS(bst.Tree, 'xgb.model.rds')
|
||||
bst <- readRDS('xgb.model.rds')
|
||||
if (file.exists('xgb.model.rds')) file.remove('xgb.model.rds')
|
||||
dtrain <- xgb.DMatrix(sparse_matrix, label = label)
|
||||
expect_equal(predict(bst.Tree, dtrain), predict(bst, dtrain), tolerance = float_tolerance)
|
||||
expect_equal(xgb.dump(bst.Tree), xgb.dump(bst))
|
||||
xgb.save(bst, 'xgb.model')
|
||||
if (file.exists('xgb.model')) file.remove('xgb.model')
|
||||
nil_ptr <- new("externalptr")
|
||||
class(nil_ptr) <- "xgb.Booster.handle"
|
||||
expect_true(identical(bst$handle, nil_ptr))
|
||||
@@ -215,7 +256,7 @@ test_that("xgb.model.dt.tree works with and without feature names", {
|
||||
names.dt.trees <- c("Tree", "Node", "ID", "Feature", "Split", "Yes", "No", "Missing", "Quality", "Cover")
|
||||
dt.tree <- xgb.model.dt.tree(feature_names = feature.names, model = bst.Tree)
|
||||
expect_equal(names.dt.trees, names(dt.tree))
|
||||
if (!win32_flag)
|
||||
if (!flag_32bit)
|
||||
expect_equal(dim(dt.tree), c(188, 10))
|
||||
expect_output(str(dt.tree), 'Feature.*\\"Age\\"')
|
||||
|
||||
@@ -242,7 +283,7 @@ test_that("xgb.model.dt.tree throws error for gblinear", {
|
||||
|
||||
test_that("xgb.importance works with and without feature names", {
|
||||
importance.Tree <- xgb.importance(feature_names = feature.names, model = bst.Tree)
|
||||
if (!win32_flag)
|
||||
if (!flag_32bit)
|
||||
expect_equal(dim(importance.Tree), c(7, 4))
|
||||
expect_equal(colnames(importance.Tree), c("Feature", "Gain", "Cover", "Frequency"))
|
||||
expect_output(str(importance.Tree), 'Feature.*\\"Age\\"')
|
||||
|
||||
@@ -14,25 +14,42 @@ test_that("interaction constraints for regression", {
|
||||
bst <- xgboost(data = train, label = y, max_depth = 3,
|
||||
eta = 0.1, nthread = 2, nrounds = 100, verbose = 0,
|
||||
interaction_constraints = list(c(0,1)))
|
||||
|
||||
|
||||
# Set all observations to have the same x3 values then increment
|
||||
# by the same amount
|
||||
preds <- lapply(c(1,2,3), function(x){
|
||||
tmat <- matrix(c(x1,x2,rep(x,1000)), ncol=3)
|
||||
return(predict(bst, tmat))
|
||||
})
|
||||
preds <- lapply(c(1,2,3), function(x){
|
||||
tmat <- matrix(c(x1,x2,rep(x,1000)), ncol=3)
|
||||
return(predict(bst, tmat))
|
||||
})
|
||||
|
||||
# Check incrementing x3 has the same effect on all observations
|
||||
# since x3 is constrained to be independent of x1 and x2
|
||||
# and all observations start off from the same x3 value
|
||||
diff1 <- preds[[2]] - preds[[1]]
|
||||
test1 <- all(abs(diff1 - diff1[1]) < 1e-4)
|
||||
|
||||
diff2 <- preds[[3]] - preds[[2]]
|
||||
test2 <- all(abs(diff2 - diff2[1]) < 1e-4)
|
||||
|
||||
diff1 <- preds[[2]] - preds[[1]]
|
||||
test1 <- all(abs(diff1 - diff1[1]) < 1e-4)
|
||||
|
||||
diff2 <- preds[[3]] - preds[[2]]
|
||||
test2 <- all(abs(diff2 - diff2[1]) < 1e-4)
|
||||
|
||||
expect_true({
|
||||
test1 & test2
|
||||
}, "Interaction Contraint Satisfied")
|
||||
|
||||
})
|
||||
|
||||
test_that("interaction constraints scientific representation", {
|
||||
rows <- 10
|
||||
## When number exceeds 1e5, R paste function uses scientific representation.
|
||||
## See: https://github.com/dmlc/xgboost/issues/5179
|
||||
cols <- 1e5+10
|
||||
|
||||
d <- matrix(rexp(rows, rate=.1), nrow=rows, ncol=cols)
|
||||
y <- rnorm(rows)
|
||||
|
||||
dtrain <- xgb.DMatrix(data=d, info = list(label=y))
|
||||
inc <- list(c(seq.int(from = 0, to = cols, by = 1)))
|
||||
|
||||
with_inc <- xgb.train(data=dtrain, tree_method='hist',
|
||||
interaction_constraints=inc, nrounds=10)
|
||||
without_inc <- xgb.train(data=dtrain, tree_method='hist', nrounds=10)
|
||||
expect_equal(xgb.save.raw(with_inc), xgb.save.raw(without_inc))
|
||||
})
|
||||
|
||||
@@ -98,7 +98,7 @@ test_that("SHAP contribution values are not NAN", {
|
||||
fit <- xgboost(
|
||||
verbose = 0,
|
||||
params = list(
|
||||
objective = "reg:linear",
|
||||
objective = "reg:squarederror",
|
||||
eval_metric = "rmse"),
|
||||
data = as.matrix(subset(d, fold == 2)[, ivs]),
|
||||
label = subset(d, fold == 2)$y,
|
||||
|
||||
@@ -138,7 +138,7 @@ levels(df[,Treatment])
|
||||
|
||||
Next step, we will transform the categorical data to dummy variables.
|
||||
Several encoding methods exist, e.g., [one-hot encoding](http://en.wikipedia.org/wiki/One-hot) is a common approach.
|
||||
We will use the [dummy contrast coding](http://www.ats.ucla.edu/stat/r/library/contrast_coding.htm#dummy) which is popular because it producess "full rank" encoding (also see [this blog post by Max Kuhn](http://appliedpredictivemodeling.com/blog/2013/10/23/the-basics-of-encoding-categorical-data-for-predictive-models)).
|
||||
We will use the [dummy contrast coding](http://www.ats.ucla.edu/stat/r/library/contrast_coding.htm#dummy) which is popular because it produces "full rank" encoding (also see [this blog post by Max Kuhn](http://appliedpredictivemodeling.com/blog/2013/10/23/the-basics-of-encoding-categorical-data-for-predictive-models)).
|
||||
|
||||
The purpose is to transform each value of each *categorical* feature into a *binary* feature `{0, 1}`.
|
||||
|
||||
@@ -268,7 +268,7 @@ c2 <- chisq.test(df$Age, output_vector)
|
||||
print(c2)
|
||||
```
|
||||
|
||||
Pearson correlation between Age and illness disapearing is **`r round(c2$statistic, 2 )`**.
|
||||
Pearson correlation between Age and illness disappearing is **`r round(c2$statistic, 2 )`**.
|
||||
|
||||
```{r, warning=FALSE, message=FALSE}
|
||||
c2 <- chisq.test(df$AgeDiscret, output_vector)
|
||||
|
||||
@@ -313,7 +313,7 @@ Until now, all the learnings we have performed were based on boosting trees. **X
|
||||
bst <- xgb.train(data=dtrain, booster = "gblinear", max_depth=2, nthread = 2, nrounds=2, watchlist=watchlist, eval_metric = "error", eval_metric = "logloss", objective = "binary:logistic")
|
||||
```
|
||||
|
||||
In this specific case, *linear boosting* gets sligtly better performance metrics than decision trees based algorithm.
|
||||
In this specific case, *linear boosting* gets slightly better performance metrics than decision trees based algorithm.
|
||||
|
||||
In simple cases, it will happen because there is nothing better than a linear algorithm to catch a linear link. However, decision trees are much better to catch a non linear link between predictors and outcome. Because there is no silver bullet, we advise you to check both algorithms with your own datasets to have an idea of what to use.
|
||||
|
||||
@@ -410,7 +410,7 @@ In some very specific cases, like when you want to pilot **XGBoost** from `caret
|
||||
|
||||
```{r saveLoadRBinVectorModel, message=F, warning=F}
|
||||
# save model to R's raw vector
|
||||
rawVec <- xgb.save.raw(bst)
|
||||
rawVec <- xgb.serialize(bst)
|
||||
|
||||
# print class
|
||||
print(class(rawVec))
|
||||
|
||||
189
R-package/vignettes/xgboostfromJSON.Rmd
Normal file
189
R-package/vignettes/xgboostfromJSON.Rmd
Normal file
@@ -0,0 +1,189 @@
|
||||
---
|
||||
title: "XGBoost from JSON"
|
||||
output:
|
||||
rmarkdown::html_vignette:
|
||||
number_sections: yes
|
||||
toc: yes
|
||||
author: Roland Stevenson
|
||||
vignette: >
|
||||
%\VignetteIndexEntry{XGBoost from JSON}
|
||||
%\VignetteEngine{knitr::rmarkdown}
|
||||
\usepackage[utf8]{inputenc}
|
||||
---
|
||||
|
||||
XGBoost from JSON
|
||||
=================
|
||||
|
||||
## Introduction
|
||||
|
||||
The purpose of this Vignette is to show you how to correctly load and work with an **Xgboost** model that has been dumped to JSON. **Xgboost** internally converts all data to [32-bit floats](https://en.wikipedia.org/wiki/Single-precision_floating-point_format), and the values dumped to JSON are decimal representations of these values. When working with a model that has been parsed from a JSON file, care must be taken to correctly treat:
|
||||
|
||||
- the input data, which should be converted to 32-bit floats
|
||||
- any 32-bit floats that were stored in JSON as decimal representations
|
||||
- any calculations must be done with 32-bit mathematical operators
|
||||
|
||||
## Setup
|
||||
|
||||
For the purpose of this tutorial we will load the xgboost, jsonlite, and float packages. We'll also set `digits=22` in our options in case we want to inspect many digits of our results.
|
||||
|
||||
```{r}
|
||||
require(xgboost)
|
||||
require(jsonlite)
|
||||
require(float)
|
||||
options(digits=22)
|
||||
```
|
||||
|
||||
We will create a toy binary logistic model based on the example first provided [here](https://github.com/dmlc/xgboost/issues/3960), so that we can easily understand the structure of the dumped JSON model object. This will allow us to understand where discrepancies can occur and how they should be handled.
|
||||
|
||||
```{r}
|
||||
dates <- c(20180130, 20180130, 20180130,
|
||||
20180130, 20180130, 20180130,
|
||||
20180131, 20180131, 20180131,
|
||||
20180131, 20180131, 20180131,
|
||||
20180131, 20180131, 20180131,
|
||||
20180134, 20180134, 20180134)
|
||||
|
||||
labels <- c(1, 1, 1,
|
||||
1, 1, 1,
|
||||
0, 0, 0,
|
||||
0, 0, 0,
|
||||
0, 0, 0,
|
||||
0, 0, 0)
|
||||
|
||||
data <- data.frame(dates = dates, labels=labels)
|
||||
|
||||
bst <- xgboost(
|
||||
data = as.matrix(data$dates),
|
||||
label = labels,
|
||||
nthread = 2,
|
||||
nrounds = 1,
|
||||
objective = "binary:logistic",
|
||||
missing = NA,
|
||||
max_depth = 1
|
||||
)
|
||||
```
|
||||
|
||||
## Comparing results
|
||||
We will now dump the model to JSON and attempt to illustrate a variety of issues that can arise, and how to properly deal with them.
|
||||
|
||||
First let's dump the model to JSON:
|
||||
|
||||
```{r}
|
||||
bst_json <- xgb.dump(bst, with_stats = FALSE, dump_format='json')
|
||||
bst_from_json <- fromJSON(bst_json, simplifyDataFrame = FALSE)
|
||||
node <- bst_from_json[[1]]
|
||||
cat(bst_json)
|
||||
```
|
||||
|
||||
The tree JSON shown by the above code-chunk tells us that if the data is less than 20180132, the tree will output the value in the first leaf. Otherwise it will output the value in the second leaf. Let's try to reproduce this manually with the data we have and confirm that it matches the model predictions we've already calculated.
|
||||
|
||||
```{r}
|
||||
bst_preds_logodds <- predict(bst,as.matrix(data$dates), outputmargin = TRUE)
|
||||
|
||||
# calculate the logodds values using the JSON representation
|
||||
bst_from_json_logodds <- ifelse(data$dates<node$split_condition,
|
||||
node$children[[1]]$leaf,
|
||||
node$children[[2]]$leaf)
|
||||
|
||||
bst_preds_logodds
|
||||
bst_from_json_logodds
|
||||
|
||||
# test that values are equal
|
||||
bst_preds_logodds == bst_from_json_logodds
|
||||
|
||||
```
|
||||
None are equal. What happened?
|
||||
|
||||
At this stage two things happened:
|
||||
|
||||
- input data was not converted to 32-bit floats
|
||||
- the JSON variables were not converted to 32-bit floats
|
||||
|
||||
### Lesson 1: All data is 32-bit floats
|
||||
|
||||
> When working with imported JSON, all data must be converted to 32-bit floats
|
||||
|
||||
To explain this, let's repeat the comparison and round to two decimals:
|
||||
|
||||
```{r}
|
||||
round(bst_preds_logodds,2) == round(bst_from_json_logodds,2)
|
||||
```
|
||||
|
||||
If we round to two decimals, we see that only the elements related to data values of `20180131` don't agree. If we convert the data to floats, they agree:
|
||||
|
||||
```{r}
|
||||
# now convert the dates to floats first
|
||||
bst_from_json_logodds <- ifelse(fl(data$dates)<node$split_condition,
|
||||
node$children[[1]]$leaf,
|
||||
node$children[[2]]$leaf)
|
||||
|
||||
# test that values are equal
|
||||
round(bst_preds_logodds,2) == round(bst_from_json_logodds,2)
|
||||
```
|
||||
|
||||
What's the lesson? If we are going to work with an imported JSON model, any data must be converted to floats first. In this case, since '20180131' cannot be represented as a 32-bit float, it is rounded up to 20180132, as shown here:
|
||||
|
||||
```{r}
|
||||
fl(20180131)
|
||||
```
|
||||
|
||||
|
||||
### Lesson 2: JSON parameters are 32-bit floats
|
||||
|
||||
> All JSON parameters stored as floats must be converted to floats.
|
||||
|
||||
Let's now say we do care about numbers past the first two decimals.
|
||||
|
||||
```{r}
|
||||
# test that values are equal
|
||||
bst_preds_logodds == bst_from_json_logodds
|
||||
```
|
||||
|
||||
None are exactly equal. What happened? Although we've converted the data to 32-bit floats, we also need to convert the JSON parameters to 32-bit floats. Let's do this:
|
||||
|
||||
```{r}
|
||||
# now convert the dates to floats first
|
||||
bst_from_json_logodds <- ifelse(fl(data$dates)<fl(node$split_condition),
|
||||
as.numeric(fl(node$children[[1]]$leaf)),
|
||||
as.numeric(fl(node$children[[2]]$leaf)))
|
||||
|
||||
# test that values are equal
|
||||
bst_preds_logodds == bst_from_json_logodds
|
||||
```
|
||||
All equal. What's the lesson? If we are going to work with an imported JSON model, any JSON parameters that were stored as floats must also be converted to floats first.
|
||||
|
||||
### Lesson 3: Use 32-bit math
|
||||
|
||||
> Always use 32-bit numbers and operators
|
||||
|
||||
We were able to get the log-odds to agree, so now let's manually calculate the sigmoid of the log-odds. This should agree with the xgboost predictions.
|
||||
|
||||
|
||||
```{r}
|
||||
bst_preds <- predict(bst,as.matrix(data$dates))
|
||||
|
||||
# calculate the predictions casting doubles to floats
|
||||
bst_from_json_preds <- ifelse(fl(data$dates)<fl(node$split_condition),
|
||||
as.numeric(1/(1+exp(-1*fl(node$children[[1]]$leaf)))),
|
||||
as.numeric(1/(1+exp(-1*fl(node$children[[2]]$leaf))))
|
||||
)
|
||||
|
||||
# test that values are equal
|
||||
bst_preds == bst_from_json_preds
|
||||
```
|
||||
|
||||
None are exactly equal again. What is going on here? Well, since we are using the value `1` in the calcuations, we have introduced a double into the calculation. Because of this, all float values are promoted to 64-bit doubles and the 64-bit version of the exponentiation operator `exp` is also used. On the other hand, xgboost uses the 32-bit version of the exponentation operator in its [sigmoid function](https://github.com/dmlc/xgboost/blob/54980b8959680a0da06a3fc0ec776e47c8cbb0a1/src/common/math.h#L25-L27).
|
||||
|
||||
How do we fix this? We have to ensure we use the correct datatypes everywhere and the correct operators. If we use only floats, the float library that we have loaded will ensure the 32-bit float exponention operator is applied.
|
||||
```{r}
|
||||
# calculate the predictions casting doubles to floats
|
||||
bst_from_json_preds <- ifelse(fl(data$dates)<fl(node$split_condition),
|
||||
as.numeric(fl(1)/(fl(1)+exp(fl(-1)*fl(node$children[[1]]$leaf)))),
|
||||
as.numeric(fl(1)/(fl(1)+exp(fl(-1)*fl(node$children[[2]]$leaf))))
|
||||
)
|
||||
|
||||
# test that values are equal
|
||||
bst_preds == bst_from_json_preds
|
||||
```
|
||||
|
||||
All equal. What's the lesson? We have to ensure that all calculations are done with 32-bit floating point operators if we want to reproduce the results that we see with xgboost.
|
||||
41
README.md
41
README.md
@@ -1,12 +1,13 @@
|
||||
<img src=https://raw.githubusercontent.com/dmlc/dmlc.github.io/master/img/logo-m/xgboost.png width=135/> eXtreme Gradient Boosting
|
||||
===========
|
||||
[](https://xgboost-ci.net/blue/organizations/jenkins/xgboost/activity)
|
||||
[](https://travis-ci.org/dmlc/xgboost)
|
||||
[](https://travis-ci.org/dmlc/xgboost)
|
||||
[](https://ci.appveyor.com/project/tqchen/xgboost)
|
||||
[](https://xgboost.readthedocs.org)
|
||||
[](./LICENSE)
|
||||
[](http://cran.r-project.org/web/packages/xgboost)
|
||||
[](https://pypi.python.org/pypi/xgboost/)
|
||||
[](https://optuna.org)
|
||||
|
||||
[Community](https://xgboost.ai/community) |
|
||||
[Documentation](https://xgboost.readthedocs.org) |
|
||||
@@ -17,18 +18,50 @@
|
||||
XGBoost is an optimized distributed gradient boosting library designed to be highly ***efficient***, ***flexible*** and ***portable***.
|
||||
It implements machine learning algorithms under the [Gradient Boosting](https://en.wikipedia.org/wiki/Gradient_boosting) framework.
|
||||
XGBoost provides a parallel tree boosting (also known as GBDT, GBM) that solve many data science problems in a fast and accurate way.
|
||||
The same code runs on major distributed environment (Hadoop, SGE, MPI) and can solve problems beyond billions of examples.
|
||||
The same code runs on major distributed environment (Kubernetes, Hadoop, SGE, MPI, Dask) and can solve problems beyond billions of examples.
|
||||
|
||||
License
|
||||
-------
|
||||
© Contributors, 2016. Licensed under an [Apache-2](https://github.com/dmlc/xgboost/blob/master/LICENSE) license.
|
||||
© Contributors, 2019. Licensed under an [Apache-2](https://github.com/dmlc/xgboost/blob/master/LICENSE) license.
|
||||
|
||||
Contribute to XGBoost
|
||||
---------------------
|
||||
XGBoost has been developed and used by a group of active community members. Your help is very valuable to make the package better for everyone.
|
||||
Checkout the [Community Page](https://xgboost.ai/community)
|
||||
Checkout the [Community Page](https://xgboost.ai/community).
|
||||
|
||||
Reference
|
||||
---------
|
||||
- Tianqi Chen and Carlos Guestrin. [XGBoost: A Scalable Tree Boosting System](http://arxiv.org/abs/1603.02754). In 22nd SIGKDD Conference on Knowledge Discovery and Data Mining, 2016
|
||||
- XGBoost originates from research project at University of Washington.
|
||||
|
||||
Sponsors
|
||||
--------
|
||||
Become a sponsor and get a logo here. See details at [Sponsoring the XGBoost Project](https://xgboost.ai/sponsors). The funds are used to defray the cost of continuous integration and testing infrastructure (https://xgboost-ci.net).
|
||||
|
||||
## Open Source Collective sponsors
|
||||
[](#backers) [](#sponsors)
|
||||
|
||||
### Sponsors
|
||||
[[Become a sponsor](https://opencollective.com/xgboost#sponsor)]
|
||||
|
||||
<!--<a href="https://opencollective.com/xgboost/sponsor/0/website" target="_blank"><img src="https://opencollective.com/xgboost/sponsor/0/avatar.svg"></a>-->
|
||||
<a href="https://www.nvidia.com/en-us/" target="_blank"><img src="https://raw.githubusercontent.com/xgboost-ai/xgboost-ai.github.io/master/images/sponsors/nvidia.jpg" alt="NVIDIA" width="72" height="72"></a>
|
||||
<a href="https://opencollective.com/xgboost/sponsor/1/website" target="_blank"><img src="https://opencollective.com/xgboost/sponsor/1/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/xgboost/sponsor/2/website" target="_blank"><img src="https://opencollective.com/xgboost/sponsor/2/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/xgboost/sponsor/3/website" target="_blank"><img src="https://opencollective.com/xgboost/sponsor/3/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/xgboost/sponsor/4/website" target="_blank"><img src="https://opencollective.com/xgboost/sponsor/4/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/xgboost/sponsor/5/website" target="_blank"><img src="https://opencollective.com/xgboost/sponsor/5/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/xgboost/sponsor/6/website" target="_blank"><img src="https://opencollective.com/xgboost/sponsor/6/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/xgboost/sponsor/7/website" target="_blank"><img src="https://opencollective.com/xgboost/sponsor/7/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/xgboost/sponsor/8/website" target="_blank"><img src="https://opencollective.com/xgboost/sponsor/8/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/xgboost/sponsor/9/website" target="_blank"><img src="https://opencollective.com/xgboost/sponsor/9/avatar.svg"></a>
|
||||
|
||||
### Backers
|
||||
[[Become a backer](https://opencollective.com/xgboost#backer)]
|
||||
|
||||
<a href="https://opencollective.com/xgboost#backers" target="_blank"><img src="https://opencollective.com/xgboost/backers.svg?width=890"></a>
|
||||
|
||||
## Other sponsors
|
||||
The sponsors in this list are donating cloud hours in lieu of cash donation.
|
||||
|
||||
<a href="https://aws.amazon.com/" target="_blank"><img src="https://raw.githubusercontent.com/xgboost-ai/xgboost-ai.github.io/master/images/sponsors/aws.png" alt="Amazon Web Services" width="72" height="72"></a>
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*!
|
||||
* Copyright 2015 by Contributors.
|
||||
* Copyright 2015-2019 by Contributors.
|
||||
* \brief XGBoost Amalgamation.
|
||||
* This offers an alternative way to compile the entire library from this single file.
|
||||
*
|
||||
@@ -14,6 +14,7 @@
|
||||
#include "../src/metric/elementwise_metric.cc"
|
||||
#include "../src/metric/multiclass_metric.cc"
|
||||
#include "../src/metric/rank_metric.cc"
|
||||
#include "../src/metric/survival_metric.cc"
|
||||
|
||||
// objectives
|
||||
#include "../src/objective/objective.cc"
|
||||
@@ -21,29 +22,32 @@
|
||||
#include "../src/objective/multiclass_obj.cc"
|
||||
#include "../src/objective/rank_obj.cc"
|
||||
#include "../src/objective/hinge.cc"
|
||||
#include "../src/objective/aft_obj.cc"
|
||||
|
||||
// gbms
|
||||
#include "../src/gbm/gbm.cc"
|
||||
#include "../src/gbm/gbtree.cc"
|
||||
#include "../src/gbm/gbtree_model.cc"
|
||||
#include "../src/gbm/gblinear.cc"
|
||||
#include "../src/gbm/gblinear_model.cc"
|
||||
|
||||
// data
|
||||
#include "../src/data/data.cc"
|
||||
#include "../src/data/simple_csr_source.cc"
|
||||
#include "../src/data/simple_dmatrix.cc"
|
||||
#include "../src/data/sparse_page_raw_format.cc"
|
||||
#include "../src/data/ellpack_page.cc"
|
||||
#include "../src/data/ellpack_page_source.cc"
|
||||
|
||||
// prediction
|
||||
#include "../src/predictor/predictor.cc"
|
||||
#include "../src/predictor/cpu_predictor.cc"
|
||||
|
||||
#if DMLC_ENABLE_STD_THREAD
|
||||
#include "../src/data/sparse_page_source.cc"
|
||||
#include "../src/data/sparse_page_dmatrix.cc"
|
||||
#include "../src/data/sparse_page_writer.cc"
|
||||
#endif
|
||||
|
||||
// tress
|
||||
// trees
|
||||
#include "../src/tree/param.cc"
|
||||
#include "../src/tree/split_evaluator.cc"
|
||||
#include "../src/tree/tree_model.cc"
|
||||
#include "../src/tree/tree_updater.cc"
|
||||
@@ -54,6 +58,7 @@
|
||||
#include "../src/tree/updater_sync.cc"
|
||||
#include "../src/tree/updater_histmaker.cc"
|
||||
#include "../src/tree/updater_skmaker.cc"
|
||||
#include "../src/tree/constraints.cc"
|
||||
|
||||
// linear
|
||||
#include "../src/linear/linear_updater.cc"
|
||||
@@ -64,8 +69,14 @@
|
||||
#include "../src/learner.cc"
|
||||
#include "../src/logging.cc"
|
||||
#include "../src/common/common.cc"
|
||||
#include "../src/common/timer.cc"
|
||||
#include "../src/common/host_device_vector.cc"
|
||||
#include "../src/common/hist_util.cc"
|
||||
#include "../src/common/json.cc"
|
||||
#include "../src/common/io.cc"
|
||||
#include "../src/common/survival_util.cc"
|
||||
#include "../src/common/probability_distribution.cc"
|
||||
#include "../src/common/version.cc"
|
||||
|
||||
// c_api
|
||||
#include "../src/c_api/c_api.cc"
|
||||
|
||||
23
appveyor.yml
23
appveyor.yml
@@ -2,10 +2,6 @@ environment:
|
||||
R_ARCH: x64
|
||||
USE_RTOOLS: true
|
||||
matrix:
|
||||
- target: msvc
|
||||
ver: 2013
|
||||
generator: "Visual Studio 12 2013 Win64"
|
||||
configuration: Release
|
||||
- target: msvc
|
||||
ver: 2015
|
||||
generator: "Visual Studio 14 2015 Win64"
|
||||
@@ -36,15 +32,21 @@ install:
|
||||
- set PATH=C:\msys64\mingw64\bin;C:\msys64\usr\bin;%PATH%
|
||||
- gcc -v
|
||||
- ls -l C:\
|
||||
# Miniconda2
|
||||
- set PATH=;C:\Miniconda-x64;C:\Miniconda-x64\Scripts;%PATH%
|
||||
# Miniconda3
|
||||
- call C:\Miniconda3-x64\Scripts\activate.bat
|
||||
- conda info
|
||||
- where python
|
||||
- python --version
|
||||
# do python build for mingw and one of the msvc jobs
|
||||
- set DO_PYTHON=off
|
||||
- if /i "%target%" == "mingw" set DO_PYTHON=on
|
||||
- if /i "%target%_%ver%_%configuration%" == "msvc_2015_Release" set DO_PYTHON=on
|
||||
- if /i "%DO_PYTHON%" == "on" conda install -y numpy scipy pandas matplotlib pytest scikit-learn graphviz python-graphviz
|
||||
- if /i "%DO_PYTHON%" == "on" (
|
||||
conda config --set always_yes true &&
|
||||
conda update -q conda &&
|
||||
conda install -y numpy scipy pandas matplotlib pytest scikit-learn graphviz python-graphviz
|
||||
)
|
||||
- set PATH=C:\Miniconda3-x64\Library\bin\graphviz;%PATH%
|
||||
# R: based on https://github.com/krlmlr/r-appveyor
|
||||
- ps: |
|
||||
if($env:target -eq 'rmingw' -or $env:target -eq 'rmsvc') {
|
||||
@@ -52,10 +54,10 @@ install:
|
||||
Invoke-WebRequest https://raw.githubusercontent.com/krlmlr/r-appveyor/master/scripts/appveyor-tool.ps1 -OutFile "$Env:TEMP\appveyor-tool.ps1"
|
||||
Import-Module "$Env:TEMP\appveyor-tool.ps1"
|
||||
Bootstrap
|
||||
$DEPS = "c('data.table','magrittr','stringi','ggplot2','DiagrammeR','Ckmeans.1d.dp','vcd','testthat','lintr','knitr','rmarkdown')"
|
||||
cmd.exe /c "R.exe -q -e ""install.packages($DEPS, repos='$CRAN', type='both')"" 2>&1"
|
||||
$BINARY_DEPS = "c('XML','igraph')"
|
||||
cmd.exe /c "R.exe -q -e ""install.packages($BINARY_DEPS, repos='$CRAN', type='win.binary')"" 2>&1"
|
||||
$DEPS = "c('data.table','magrittr','stringi','ggplot2','DiagrammeR','Ckmeans.1d.dp','vcd','testthat','lintr','knitr','rmarkdown')"
|
||||
cmd.exe /c "R.exe -q -e ""install.packages($DEPS, repos='$CRAN', type='both')"" 2>&1"
|
||||
}
|
||||
|
||||
build_script:
|
||||
@@ -92,7 +94,7 @@ build_script:
|
||||
cmake .. -G"%generator%" -DCMAKE_CONFIGURATION_TYPES="Release" -DR_LIB=ON &&
|
||||
cmake --build . --target install --config Release
|
||||
)
|
||||
- if /i "%target%" == "jvm" cd jvm-packages && mvn test -pl :xgboost4j
|
||||
- if /i "%target%" == "jvm" cd jvm-packages && mvn test -pl :xgboost4j_2.12
|
||||
|
||||
test_script:
|
||||
- cd %APPVEYOR_BUILD_FOLDER%
|
||||
@@ -100,6 +102,7 @@ test_script:
|
||||
# mingw R package: run the R check (which includes unit tests), and also keep the built binary package
|
||||
- if /i "%target%" == "rmingw" (
|
||||
set _R_CHECK_CRAN_INCOMING_=FALSE&&
|
||||
set _R_CHECK_FORCE_SUGGESTS_=FALSE&&
|
||||
R.exe CMD check xgboost*.tar.gz --no-manual --no-build-vignettes --as-cran --install-args=--build
|
||||
)
|
||||
# MSVC R package: run only the unit tests
|
||||
|
||||
51
build.sh
51
build.sh
@@ -1,51 +0,0 @@
|
||||
#!/bin/bash
|
||||
# This is a simple script to make xgboost in MAC and Linux
|
||||
# Basically, it first try to make with OpenMP, if fails, disable OpenMP and make it again.
|
||||
# This will automatically make xgboost for MAC users who don't have OpenMP support.
|
||||
# In most cases, type make will give what you want.
|
||||
|
||||
# See additional instruction in doc/build.md
|
||||
set -e
|
||||
|
||||
if make; then
|
||||
echo "Successfully build multi-thread xgboost"
|
||||
else
|
||||
|
||||
not_ready=0
|
||||
|
||||
if [[ ! -e ./rabit/Makefile ]]; then
|
||||
echo ""
|
||||
echo "Please init the rabit submodule:"
|
||||
echo "git submodule update --init --recursive -- rabit"
|
||||
not_ready=1
|
||||
fi
|
||||
|
||||
if [[ ! -e ./dmlc-core/Makefile ]]; then
|
||||
echo ""
|
||||
echo "Please init the dmlc-core submodule:"
|
||||
echo "git submodule update --init --recursive -- dmlc-core"
|
||||
not_ready=1
|
||||
fi
|
||||
|
||||
if [[ "${not_ready}" == "1" ]]; then
|
||||
echo ""
|
||||
echo "Please fix the errors above and retry the build, or reclone the repository with:"
|
||||
echo "git clone --recursive https://github.com/dmlc/xgboost.git"
|
||||
echo ""
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
echo "-----------------------------"
|
||||
echo "Building multi-thread xgboost failed"
|
||||
echo "Start to build single-thread xgboost"
|
||||
make clean_all
|
||||
make config=make/minimum.mk
|
||||
if [ $? -eq 0 ] ;then
|
||||
echo "Successfully build single-thread xgboost"
|
||||
echo "If you want multi-threaded version"
|
||||
echo "See additional instructions in doc/build.md"
|
||||
else
|
||||
echo "Failed to build single-thread xgboost"
|
||||
fi
|
||||
fi
|
||||
16
cmake/Doc.cmake
Normal file
16
cmake/Doc.cmake
Normal file
@@ -0,0 +1,16 @@
|
||||
function (run_doxygen)
|
||||
find_package(Doxygen REQUIRED)
|
||||
|
||||
if (NOT DOXYGEN_DOT_FOUND)
|
||||
message(FATAL_ERROR "Command `dot` not found. Please install graphviz.")
|
||||
endif (NOT DOXYGEN_DOT_FOUND)
|
||||
|
||||
configure_file(
|
||||
${xgboost_SOURCE_DIR}/doc/Doxyfile.in
|
||||
${CMAKE_CURRENT_BINARY_DIR}/Doxyfile @ONLY)
|
||||
add_custom_target( doc_doxygen ALL
|
||||
COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||
COMMENT "Generate C APIs documentation."
|
||||
VERBATIM)
|
||||
endfunction (run_doxygen)
|
||||
22
cmake/FindPrefetchIntrinsics.cmake
Normal file
22
cmake/FindPrefetchIntrinsics.cmake
Normal file
@@ -0,0 +1,22 @@
|
||||
function (find_prefetch_intrinsics)
|
||||
include(CheckCXXSourceCompiles)
|
||||
check_cxx_source_compiles("
|
||||
#include <xmmintrin.h>
|
||||
int main() {
|
||||
char data = 0;
|
||||
const char* address = &data;
|
||||
_mm_prefetch(address, _MM_HINT_NTA);
|
||||
return 0;
|
||||
}
|
||||
" XGBOOST_MM_PREFETCH_PRESENT)
|
||||
check_cxx_source_compiles("
|
||||
int main() {
|
||||
char data = 0;
|
||||
const char* address = &data;
|
||||
__builtin_prefetch(address, 0, 0);
|
||||
return 0;
|
||||
}
|
||||
" XGBOOST_BUILTIN_PREFETCH_PRESENT)
|
||||
set(XGBOOST_MM_PREFETCH_PRESENT ${XGBOOST_MM_PREFETCH_PRESENT} PARENT_SCOPE)
|
||||
set(XGBOOST_BUILTIN_PREFETCH_PRESENT ${XGBOOST_BUILTIN_PREFETCH_PRESENT} PARENT_SCOPE)
|
||||
endfunction (find_prefetch_intrinsics)
|
||||
1
cmake/Python_version.in
Normal file
1
cmake/Python_version.in
Normal file
@@ -0,0 +1 @@
|
||||
@xgboost_VERSION_MAJOR@.@xgboost_VERSION_MINOR@.@xgboost_VERSION_PATCH@
|
||||
@@ -4,24 +4,29 @@
|
||||
# enable_sanitizers("address;leak")
|
||||
|
||||
# Add flags
|
||||
macro(enable_sanitizer santizer)
|
||||
if(${santizer} MATCHES "address")
|
||||
macro(enable_sanitizer sanitizer)
|
||||
if(${sanitizer} MATCHES "address")
|
||||
find_package(ASan REQUIRED)
|
||||
set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=address")
|
||||
link_libraries(${ASan_LIBRARY})
|
||||
|
||||
elseif(${santizer} MATCHES "thread")
|
||||
elseif(${sanitizer} MATCHES "thread")
|
||||
find_package(TSan REQUIRED)
|
||||
set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=thread")
|
||||
link_libraries(${TSan_LIBRARY})
|
||||
|
||||
elseif(${santizer} MATCHES "leak")
|
||||
elseif(${sanitizer} MATCHES "leak")
|
||||
find_package(LSan REQUIRED)
|
||||
set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=leak")
|
||||
link_libraries(${LSan_LIBRARY})
|
||||
|
||||
elseif(${sanitizer} MATCHES "undefined")
|
||||
find_package(UBSan REQUIRED)
|
||||
set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=undefined -fno-sanitize-recover=undefined")
|
||||
link_libraries(${UBSan_LIBRARY})
|
||||
|
||||
else()
|
||||
message(FATAL_ERROR "Santizer ${santizer} not supported.")
|
||||
message(FATAL_ERROR "Santizer ${sanitizer} not supported.")
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user