Compare commits
532 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7f542d2198 | ||
|
|
c8d32102fb | ||
|
|
fe8d72b50b | ||
|
|
adc795929a | ||
|
|
472ded549d | ||
|
|
c67163250e | ||
|
|
4240daed4e | ||
|
|
cb3ed404cf | ||
|
|
f7105fa44f | ||
|
|
43974939f4 | ||
|
|
b513dcd352 | ||
|
|
ef19480eda | ||
|
|
0c7455276d | ||
|
|
1b3947d929 | ||
|
|
3eb1279bbf | ||
|
|
40680368cf | ||
|
|
44469a0ca9 | ||
|
|
b4f952bd22 | ||
|
|
aa9a68010b | ||
|
|
1891cc766d | ||
|
|
5d4c24a1fc | ||
|
|
9c56480c61 | ||
|
|
2a071cebc5 | ||
|
|
ff1342b252 | ||
|
|
e526871f0a | ||
|
|
5199b86126 | ||
|
|
808f61081b | ||
|
|
0184f2e9f7 | ||
|
|
a73e25e15f | ||
|
|
f100b8d878 | ||
|
|
7b65698187 | ||
|
|
8cbcc53ccb | ||
|
|
87ebfc1315 | ||
|
|
9559f81377 | ||
|
|
9049c7c653 | ||
|
|
ee287808fb | ||
|
|
77cfbff5a7 | ||
|
|
ebc86a3afa | ||
|
|
2b9a62a806 | ||
|
|
2d95b9a4b6 | ||
|
|
7b17e76c5b | ||
|
|
04db125699 | ||
|
|
018df6004e | ||
|
|
139ccc9902 | ||
|
|
d55489af14 | ||
|
|
6848d0426f | ||
|
|
61286c6e8f | ||
|
|
ee81ba8e1f | ||
|
|
9b0af6e882 | ||
|
|
f3d7877802 | ||
|
|
ced3660f60 | ||
|
|
298ebe68ac | ||
|
|
73b1bd2789 | ||
|
|
0202e04a8e | ||
|
|
1d0ca49761 | ||
|
|
a4b929385e | ||
|
|
c8bdb652c4 | ||
|
|
3d04a8cc97 | ||
|
|
b915788708 | ||
|
|
74f545bde3 | ||
|
|
e521bb6f83 | ||
|
|
37fdfa03f8 | ||
|
|
bc9d88259f | ||
|
|
27b3646d29 | ||
|
|
63ffd2f686 | ||
|
|
2fdb34ed2e | ||
|
|
3136185bc5 | ||
|
|
5aa007d7b2 | ||
|
|
ad4a1c732c | ||
|
|
208ab3b1ff | ||
|
|
c7cc657a4d | ||
|
|
e089e16e3d | ||
|
|
979f74d51a | ||
|
|
1cb6bcc382 | ||
|
|
b1789b0346 | ||
|
|
38763aa4fa | ||
|
|
608ebbe444 | ||
|
|
7ef5b78003 | ||
|
|
2dcb62ddfb | ||
|
|
64af1ecf86 | ||
|
|
f3d8536702 | ||
|
|
df9bdbbcb9 | ||
|
|
f5e13dcb9b | ||
|
|
f0ca53d9ec | ||
|
|
dcde433402 | ||
|
|
e3c34c79be | ||
|
|
f2277e7106 | ||
|
|
64f4361b47 | ||
|
|
761e938dbe | ||
|
|
b9dbfe0931 | ||
|
|
9f52e834dc | ||
|
|
d667ea9335 | ||
|
|
04c640f562 | ||
|
|
a4f5c86276 | ||
|
|
4d2779663e | ||
|
|
98b051269b | ||
|
|
e67388fb8f | ||
|
|
0afcc55d98 | ||
|
|
97abcc7ee2 | ||
|
|
886bf93ba4 | ||
|
|
2abe69d774 | ||
|
|
f4e7b707c9 | ||
|
|
1733c9e8f7 | ||
|
|
374648c21a | ||
|
|
7663de956c | ||
|
|
807a244517 | ||
|
|
b29b8c2f34 | ||
|
|
a37691428f | ||
|
|
6fac40cfb4 | ||
|
|
755a606201 | ||
|
|
6ec7e300bd | ||
|
|
96cd7ec2bb | ||
|
|
da6e74f7bb | ||
|
|
ac457c56a2 | ||
|
|
f24be2efb4 | ||
|
|
5b1715d97c | ||
|
|
310fe60b35 | ||
|
|
5620322a48 | ||
|
|
7e477a2adb | ||
|
|
95295ce026 | ||
|
|
741fbf47c4 | ||
|
|
4771bb0d41 | ||
|
|
010b8f1428 | ||
|
|
86ed01c4bb | ||
|
|
31030a8d3a | ||
|
|
ae536756ae | ||
|
|
9fc681001a | ||
|
|
a78d4e7aa8 | ||
|
|
60748b2071 | ||
|
|
185e3f1916 | ||
|
|
7e72a12871 | ||
|
|
2ebdec8aa6 | ||
|
|
b61d534472 | ||
|
|
a9053aff83 | ||
|
|
0e0849fa1e | ||
|
|
3d46bd0fa5 | ||
|
|
05d4751540 | ||
|
|
08ff510e48 | ||
|
|
f7487e4c2a | ||
|
|
5b4f28cc46 | ||
|
|
4bbf062ed3 | ||
|
|
c2cce4fac3 | ||
|
|
6c9b6f11da | ||
|
|
aefb1e5c2f | ||
|
|
80977182c5 | ||
|
|
095de3bf5f | ||
|
|
4ab1df5fe6 | ||
|
|
7e24a8d245 | ||
|
|
d30e63a0a5 | ||
|
|
2fa8b359e0 | ||
|
|
82ee2317e8 | ||
|
|
b8433c455a | ||
|
|
562bb0ae31 | ||
|
|
0b89cd1dfa | ||
|
|
a40b72d127 | ||
|
|
c7416002e9 | ||
|
|
fc8c9b0521 | ||
|
|
277e25797b | ||
|
|
22209b7b95 | ||
|
|
57106a3459 | ||
|
|
006eb80578 | ||
|
|
d669ea1eaa | ||
|
|
0e0955a6d8 | ||
|
|
5374f52531 | ||
|
|
125bcec62e | ||
|
|
512f037e55 | ||
|
|
c89bcc4de5 | ||
|
|
6a5e805886 | ||
|
|
0fc7dcfe6c | ||
|
|
f90e7f9aa8 | ||
|
|
a5f232feb8 | ||
|
|
52d44e07fe | ||
|
|
c0fbeff0ab | ||
|
|
2aed0ae230 | ||
|
|
733ed24dd9 | ||
|
|
0184eb5d02 | ||
|
|
830e73901d | ||
|
|
516955564b | ||
|
|
38ab79f889 | ||
|
|
41227d1933 | ||
|
|
6e6216ad67 | ||
|
|
fba298fecb | ||
|
|
3fa2ceb193 | ||
|
|
9700776597 | ||
|
|
ab357dd41c | ||
|
|
c358d95c44 | ||
|
|
c81238b5c4 | ||
|
|
b9b57f2289 | ||
|
|
53d4272c2a | ||
|
|
7b5cbcc846 | ||
|
|
ef9af33a00 | ||
|
|
c0ffe65f5c | ||
|
|
c5b229632d | ||
|
|
198f3a6c4a | ||
|
|
19f9fd5de9 | ||
|
|
f22b1c0348 | ||
|
|
602484e19f | ||
|
|
3e2c472944 | ||
|
|
851b5b3808 | ||
|
|
2a4df8e29f | ||
|
|
9c469b3844 | ||
|
|
97eece6ea0 | ||
|
|
b68de018b8 | ||
|
|
4fe0d8203e | ||
|
|
6edddd7966 | ||
|
|
e930a8e54f | ||
|
|
cb9a80ca90 | ||
|
|
166def9f75 | ||
|
|
b43f08bea5 | ||
|
|
d2e1e4d5b4 | ||
|
|
9b9e298ff2 | ||
|
|
7b74b1b64d | ||
|
|
59bc1ef330 | ||
|
|
2758c5acea | ||
|
|
d5c386ae24 | ||
|
|
001aaaee5f | ||
|
|
d4e0a30582 | ||
|
|
f0064c07ab | ||
|
|
ad1192e8a3 | ||
|
|
b45258ce66 | ||
|
|
4ef6d216b9 | ||
|
|
8ac8fbef29 | ||
|
|
1595e3f57b | ||
|
|
01b0c9047c | ||
|
|
3c506b076e | ||
|
|
5544a730f1 | ||
|
|
6323ef94ad | ||
|
|
9975c533c7 | ||
|
|
2973416f2e | ||
|
|
61f764946f | ||
|
|
beb7b295a8 | ||
|
|
3e339d9557 | ||
|
|
7a388cbf8b | ||
|
|
cd1526d3b1 | ||
|
|
30204b50fe | ||
|
|
d333918f5e | ||
|
|
1aaf4a679d | ||
|
|
562d9ae963 | ||
|
|
d9a47794a5 | ||
|
|
b7a1f22d24 | ||
|
|
4df246191f | ||
|
|
96bf91725b | ||
|
|
4e9fad74eb | ||
|
|
986fee6022 | ||
|
|
45876bf41b | ||
|
|
a30176907f | ||
|
|
923e6c86ba | ||
|
|
63ec95623d | ||
|
|
4d6590be3c | ||
|
|
abffbe014e | ||
|
|
dd01f7c4f5 | ||
|
|
cd3a3f99da | ||
|
|
5b2f805e74 | ||
|
|
8bdf15120a | ||
|
|
fe2de6f415 | ||
|
|
1f98f18cb8 | ||
|
|
2cff735126 | ||
|
|
9fa29ad753 | ||
|
|
30e1cb4e9e | ||
|
|
77fc28427d | ||
|
|
9494950ee7 | ||
|
|
6125521caf | ||
|
|
fdf27a5b82 | ||
|
|
221e163185 | ||
|
|
0c50f8417a | ||
|
|
ae05948e32 | ||
|
|
570374effe | ||
|
|
e94f85f0e4 | ||
|
|
6757654337 | ||
|
|
ba1d848767 | ||
|
|
7ae11c9284 | ||
|
|
90f683b25b | ||
|
|
a22368d210 | ||
|
|
66f9951d70 | ||
|
|
c5719cc457 | ||
|
|
a2042b685a | ||
|
|
4591039eba | ||
|
|
4e9965cb9d | ||
|
|
2f1319f273 | ||
|
|
9683fd433e | ||
|
|
da21ac0cc2 | ||
|
|
59ae42a179 | ||
|
|
afa99e6d9d | ||
|
|
3f2fe25a32 | ||
|
|
23a10c8339 | ||
|
|
399fabed49 | ||
|
|
c2a3902ba3 | ||
|
|
ea44417754 | ||
|
|
fbbae3386a | ||
|
|
dd60fc23e6 | ||
|
|
b48f895027 | ||
|
|
fed665ae8a | ||
|
|
6e16900711 | ||
|
|
c589eff941 | ||
|
|
a3fedbeaa8 | ||
|
|
972f693eaf | ||
|
|
3f7e5d9c47 | ||
|
|
09b90d9329 | ||
|
|
55e645c5f5 | ||
|
|
8ddd2715ee | ||
|
|
0ce300e73a | ||
|
|
278562db13 | ||
|
|
5a567ec249 | ||
|
|
515f5f5c47 | ||
|
|
adcd8ea7c6 | ||
|
|
cf2400036e | ||
|
|
3e930e4f2d | ||
|
|
a9ec2dd295 | ||
|
|
df2cdaca50 | ||
|
|
c6f2a7e186 | ||
|
|
e7d17ec4f4 | ||
|
|
b5f7cbfadf | ||
|
|
be0f346ec9 | ||
|
|
d16d9a9988 | ||
|
|
6ff994126a | ||
|
|
18e4fc3690 | ||
|
|
8da4907e89 | ||
|
|
ade3f30237 | ||
|
|
b511638ca1 | ||
|
|
eabcc0e210 | ||
|
|
5de7e12704 | ||
|
|
8d1098a983 | ||
|
|
9252b686ae | ||
|
|
2be85fc62a | ||
|
|
feb6ae3e18 | ||
|
|
54980b8959 | ||
|
|
c1e4a0f2c6 | ||
|
|
bfddc2c42c | ||
|
|
17df5fd296 | ||
|
|
4c74336384 | ||
|
|
ba98e0cdf2 | ||
|
|
eaab364a63 | ||
|
|
797ba8e72d | ||
|
|
253fdd8a42 | ||
|
|
91c513a0c1 | ||
|
|
5e582b0fa7 | ||
|
|
146e83f3b3 | ||
|
|
5dfb27fb2d | ||
|
|
77c03538b0 | ||
|
|
37dc82c3ff | ||
|
|
ea850ecd20 | ||
|
|
995698b0cb | ||
|
|
2d875ec019 | ||
|
|
503cc42f48 | ||
|
|
2c61f02add | ||
|
|
bbe0dbd7ec | ||
|
|
5e97de6a41 | ||
|
|
65db8d0626 | ||
|
|
711397d645 | ||
|
|
207f058711 | ||
|
|
84d992babc | ||
|
|
be7bc07ca3 | ||
|
|
edae664afb | ||
|
|
f4521bf6aa | ||
|
|
3078b5944d | ||
|
|
a448a8320c | ||
|
|
956e73f183 | ||
|
|
5c2575535f | ||
|
|
81c1cd40ca | ||
|
|
b72eab3e07 | ||
|
|
360f25ec27 | ||
|
|
c7bc739ed2 | ||
|
|
60a9af567c | ||
|
|
9080bba815 | ||
|
|
2e052e74b6 | ||
|
|
1ca5698221 | ||
|
|
70be1e38c2 | ||
|
|
37c75aac41 | ||
|
|
82dca3c108 | ||
|
|
2f7087eba1 | ||
|
|
680a1b36f3 | ||
|
|
ad4de0d718 | ||
|
|
7ea5b772fb | ||
|
|
7aed8f3d48 | ||
|
|
8c8021dfa7 | ||
|
|
3f312e30db | ||
|
|
c85181dd8a | ||
|
|
6d5b34d824 | ||
|
|
5aa42b5f11 | ||
|
|
263e2038e9 | ||
|
|
b374e0a7ab | ||
|
|
45c89a6792 | ||
|
|
8eab966998 | ||
|
|
09bd9e68cf | ||
|
|
00465d243d | ||
|
|
7814183199 | ||
|
|
359ed9c5bc | ||
|
|
29a1356669 | ||
|
|
cf8d5b9b76 | ||
|
|
fdcae024e7 | ||
|
|
7b1b11390a | ||
|
|
5465b73e7c | ||
|
|
4352fcdb15 | ||
|
|
b833b642ec | ||
|
|
99a714be64 | ||
|
|
7b9043cf71 | ||
|
|
259fb809e9 | ||
|
|
a36c3ed4f4 | ||
|
|
6fb4c5efef | ||
|
|
4eeeded7d1 | ||
|
|
f83e62dca5 | ||
|
|
331cd3e4f7 | ||
|
|
617f572c0f | ||
|
|
20845e8ccf | ||
|
|
224786f67f | ||
|
|
9837b09b20 | ||
|
|
0944360416 | ||
|
|
ac3d03089b | ||
|
|
28bd6cde22 | ||
|
|
00ea7b83c9 | ||
|
|
67c38805a1 | ||
|
|
5f34078fba | ||
|
|
3f83dcd502 | ||
|
|
0c1d5f1120 | ||
|
|
92b7577c62 | ||
|
|
9fefa2128d | ||
|
|
7ea5675679 | ||
|
|
74009afcac | ||
|
|
1b7405f688 | ||
|
|
dc2add96c5 | ||
|
|
8e0a08fbcf | ||
|
|
54793544a2 | ||
|
|
2aaae2e7bb | ||
|
|
cecbe0cf71 | ||
|
|
c8c472f39a | ||
|
|
1dac5e2410 | ||
|
|
a985a99cf0 | ||
|
|
0ff84d950e | ||
|
|
60f05352c5 | ||
|
|
549c8d6ae9 | ||
|
|
e1240413c9 | ||
|
|
2e618af743 | ||
|
|
71a604fae3 | ||
|
|
1fe874e58a | ||
|
|
ff2d4c99fa | ||
|
|
754fe8142b | ||
|
|
37ddfd7d6e | ||
|
|
d506a8bc63 | ||
|
|
c18a3660fa | ||
|
|
3be1b9ae30 | ||
|
|
9b917cda4f | ||
|
|
99a290489c | ||
|
|
3320a52192 | ||
|
|
ba584e5e9f | ||
|
|
2a9b085bc8 | ||
|
|
f8ca2960fc | ||
|
|
05243642bb | ||
|
|
017c97b8ce | ||
|
|
325b16bccd | ||
|
|
ae3bb9c2d5 | ||
|
|
8905df4a18 | ||
|
|
1088dff42c | ||
|
|
7a652a8c64 | ||
|
|
59f868bc60 | ||
|
|
0d0ce32908 | ||
|
|
a60e224484 | ||
|
|
e0094d996e | ||
|
|
a1c35cadf0 | ||
|
|
4fac9874e0 | ||
|
|
301cef4638 | ||
|
|
1fc37e4749 | ||
|
|
0f8af85f64 | ||
|
|
5f151c5cf3 | ||
|
|
dade7c3aff | ||
|
|
773ddbcfcb | ||
|
|
e290ec9a80 | ||
|
|
6a569b8cd9 | ||
|
|
55bc149efb | ||
|
|
431c850c03 | ||
|
|
1f022929f4 | ||
|
|
f368d0de2b | ||
|
|
15fe2f1e7c | ||
|
|
be948df23f | ||
|
|
9897b5042f | ||
|
|
7735252925 | ||
|
|
85939c6a6e | ||
|
|
f75a21af25 | ||
|
|
84c99f86f4 | ||
|
|
c055a32609 | ||
|
|
c8c7b9649c | ||
|
|
a2dc929598 | ||
|
|
42bf90eb8f | ||
|
|
e0a279114e | ||
|
|
fd722d60cd | ||
|
|
53f695acf2 | ||
|
|
3d81c48d3f | ||
|
|
84a3af8dc0 | ||
|
|
4be5edaf92 | ||
|
|
93f9ce9ef9 | ||
|
|
9af6b689d6 | ||
|
|
4f26053b09 | ||
|
|
48dddfd635 | ||
|
|
a9d684db18 | ||
|
|
c5f92df475 | ||
|
|
c5130e487a | ||
|
|
9c4ff50e83 | ||
|
|
42cac4a30b | ||
|
|
f9302a56fb | ||
|
|
7d3149a21f | ||
|
|
86aac98e54 | ||
|
|
e9ab4a1c6c | ||
|
|
dc2bfbfde1 | ||
|
|
7ebe8dcf5b | ||
|
|
973fc8b1ff | ||
|
|
93f63324e6 | ||
|
|
aa48b7e903 | ||
|
|
0cd326c1bc | ||
|
|
3a150742c7 | ||
|
|
0a0d4239d3 | ||
|
|
fe999bf968 | ||
|
|
2ea0f887c1 | ||
|
|
c76d993681 | ||
|
|
a2a8954659 | ||
|
|
7af0946ac1 | ||
|
|
143475b27b | ||
|
|
926eb651fe | ||
|
|
daf77ca7b7 | ||
|
|
97984f4890 | ||
|
|
0ddb8a7661 | ||
|
|
d810e6dec9 | ||
|
|
be0bb7dd90 | ||
|
|
e38d5a6831 | ||
|
|
828d75714d | ||
|
|
ad6e0d55f1 | ||
|
|
19ee0a3579 | ||
|
|
2b045aa805 | ||
|
|
d9642cf757 | ||
|
|
1bf4083dc6 | ||
|
|
20d5abf919 | ||
|
|
f1275f52c1 | ||
|
|
1698fe64bb | ||
|
|
91cc14ea70 |
@@ -1,4 +1,4 @@
|
|||||||
Checks: 'modernize-*,-modernize-make-*,-modernize-raw-string-literal,google-*,-google-default-arguments,-clang-diagnostic-#pragma-messages,readability-identifier-naming'
|
Checks: 'modernize-*,-modernize-make-*,-modernize-use-auto,-modernize-raw-string-literal,-modernize-avoid-c-arrays,-modernize-use-trailing-return-type,google-*,-google-default-arguments,-clang-diagnostic-#pragma-messages,readability-identifier-naming'
|
||||||
CheckOptions:
|
CheckOptions:
|
||||||
- { key: readability-identifier-naming.ClassCase, value: CamelCase }
|
- { key: readability-identifier-naming.ClassCase, value: CamelCase }
|
||||||
- { key: readability-identifier-naming.StructCase, value: CamelCase }
|
- { key: readability-identifier-naming.StructCase, value: CamelCase }
|
||||||
@@ -6,8 +6,8 @@ CheckOptions:
|
|||||||
- { key: readability-identifier-naming.TypedefCase, value: CamelCase }
|
- { key: readability-identifier-naming.TypedefCase, value: CamelCase }
|
||||||
- { key: readability-identifier-naming.TypeTemplateParameterCase, value: CamelCase }
|
- { key: readability-identifier-naming.TypeTemplateParameterCase, value: CamelCase }
|
||||||
- { key: readability-identifier-naming.MemberCase, value: lower_case }
|
- { key: readability-identifier-naming.MemberCase, value: lower_case }
|
||||||
- { key: readability-identifier-naming.PrivateMemberSuffix, value: '_' }
|
- { key: readability-identifier-naming.PrivateMemberSuffix, value: '_' }
|
||||||
- { key: readability-identifier-naming.ProtectedMemberSuffix, value: '_' }
|
- { key: readability-identifier-naming.ProtectedMemberSuffix, value: '_' }
|
||||||
- { key: readability-identifier-naming.EnumCase, value: CamelCase }
|
- { key: readability-identifier-naming.EnumCase, value: CamelCase }
|
||||||
- { key: readability-identifier-naming.EnumConstant, value: CamelCase }
|
- { key: readability-identifier-naming.EnumConstant, value: CamelCase }
|
||||||
- { key: readability-identifier-naming.EnumConstantPrefix, value: k }
|
- { key: readability-identifier-naming.EnumConstantPrefix, value: k }
|
||||||
|
|||||||
18
.gitignore
vendored
18
.gitignore
vendored
@@ -17,7 +17,7 @@
|
|||||||
*.tar.gz
|
*.tar.gz
|
||||||
*conf
|
*conf
|
||||||
*buffer
|
*buffer
|
||||||
*model
|
*.model
|
||||||
*pyc
|
*pyc
|
||||||
*.train
|
*.train
|
||||||
*.test
|
*.test
|
||||||
@@ -69,10 +69,8 @@ config.mk
|
|||||||
/xgboost
|
/xgboost
|
||||||
*.data
|
*.data
|
||||||
build_plugin
|
build_plugin
|
||||||
.idea
|
|
||||||
recommonmark/
|
recommonmark/
|
||||||
tags
|
tags
|
||||||
*.iml
|
|
||||||
*.class
|
*.class
|
||||||
target
|
target
|
||||||
*.swp
|
*.swp
|
||||||
@@ -90,4 +88,16 @@ lib/
|
|||||||
# spark
|
# spark
|
||||||
metastore_db
|
metastore_db
|
||||||
|
|
||||||
plugin/updater_gpu/test/cpp/data
|
/include/xgboost/build_config.h
|
||||||
|
|
||||||
|
# files from R-package source install
|
||||||
|
**/config.status
|
||||||
|
R-package/src/Makevars
|
||||||
|
|
||||||
|
# Visual Studio Code
|
||||||
|
/.vscode/
|
||||||
|
|
||||||
|
# IntelliJ/CLion
|
||||||
|
.idea
|
||||||
|
*.iml
|
||||||
|
/cmake-build-debug/
|
||||||
|
|||||||
66
.travis.yml
66
.travis.yml
@@ -1,77 +1,51 @@
|
|||||||
# disable sudo for container build.
|
# disable sudo for container build.
|
||||||
sudo: required
|
sudo: required
|
||||||
|
|
||||||
# Enabling test on Linux and OS X
|
# Enabling test OS X
|
||||||
os:
|
os:
|
||||||
- linux
|
- linux
|
||||||
- osx
|
- osx
|
||||||
|
|
||||||
osx_image: xcode8
|
osx_image: xcode10.3
|
||||||
|
dist: bionic
|
||||||
group: deprecated-2017Q4
|
|
||||||
|
|
||||||
# Use Build Matrix to do lint and build seperately
|
# Use Build Matrix to do lint and build seperately
|
||||||
env:
|
env:
|
||||||
matrix:
|
matrix:
|
||||||
# code lint
|
|
||||||
- TASK=lint
|
|
||||||
# r package test
|
|
||||||
- TASK=r_test
|
|
||||||
# python package test
|
# python package test
|
||||||
- TASK=python_test
|
- TASK=python_test
|
||||||
- TASK=python_lightweight_test
|
# test installation of Python source distribution
|
||||||
|
- TASK=python_sdist_test
|
||||||
# java package test
|
# java package test
|
||||||
- TASK=java_test
|
- TASK=java_test
|
||||||
# cmake test
|
# cmake test
|
||||||
- TASK=cmake_test
|
- TASK=cmake_test
|
||||||
# c++ test
|
|
||||||
- TASK=cpp_test
|
|
||||||
# distributed test
|
|
||||||
- TASK=distributed_test
|
|
||||||
# address sanitizer test
|
|
||||||
- TASK=sanitizer_test
|
|
||||||
|
|
||||||
matrix:
|
matrix:
|
||||||
exclude:
|
exclude:
|
||||||
- os: osx
|
|
||||||
env: TASK=lint
|
|
||||||
- os: osx
|
|
||||||
env: TASK=cmake_test
|
|
||||||
- os: linux
|
- os: linux
|
||||||
env: TASK=r_test
|
env: TASK=python_test
|
||||||
- os: osx
|
- os: linux
|
||||||
env: TASK=python_lightweight_test
|
env: TASK=java_test
|
||||||
- os: osx
|
- os: linux
|
||||||
env: TASK=cpp_test
|
env: TASK=cmake_test
|
||||||
- os: osx
|
|
||||||
env: TASK=distributed_test
|
|
||||||
- os: osx
|
|
||||||
env: TASK=sanitizer_test
|
|
||||||
|
|
||||||
# dependent apt packages
|
# dependent brew packages
|
||||||
addons:
|
addons:
|
||||||
apt:
|
homebrew:
|
||||||
sources:
|
|
||||||
- llvm-toolchain-trusty-5.0
|
|
||||||
- ubuntu-toolchain-r-test
|
|
||||||
- george-edison55-precise-backports
|
|
||||||
packages:
|
packages:
|
||||||
- clang
|
- cmake
|
||||||
- clang-tidy-5.0
|
- libomp
|
||||||
- cmake-data
|
|
||||||
- doxygen
|
|
||||||
- wget
|
|
||||||
- libcurl4-openssl-dev
|
|
||||||
- unzip
|
|
||||||
- graphviz
|
- graphviz
|
||||||
- gcc-4.8
|
- openssl
|
||||||
- g++-4.8
|
- libgit2
|
||||||
- gcc-7
|
- wget
|
||||||
- g++-7
|
- r
|
||||||
|
update: true
|
||||||
|
|
||||||
before_install:
|
before_install:
|
||||||
- source dmlc-core/scripts/travis/travis_setup_env.sh
|
- source dmlc-core/scripts/travis/travis_setup_env.sh
|
||||||
- export PYTHONPATH=${PYTHONPATH}:${PWD}/python-package
|
- if [ "${TASK}" != "python_sdist_test" ]; then export PYTHONPATH=${PYTHONPATH}:${PWD}/python-package; fi
|
||||||
- echo "MAVEN_OPTS='-Xmx2g -XX:MaxPermSize=1024m -XX:ReservedCodeCacheSize=512m -Dorg.slf4j.simpleLogger.defaultLogLevel=error'" > ~/.mavenrc
|
- echo "MAVEN_OPTS='-Xmx2g -XX:MaxPermSize=1024m -XX:ReservedCodeCacheSize=512m -Dorg.slf4j.simpleLogger.defaultLogLevel=error'" > ~/.mavenrc
|
||||||
|
|
||||||
install:
|
install:
|
||||||
|
|||||||
425
CMakeLists.txt
425
CMakeLists.txt
@@ -1,264 +1,247 @@
|
|||||||
cmake_minimum_required (VERSION 3.2)
|
cmake_minimum_required(VERSION 3.12)
|
||||||
project(xgboost)
|
project(xgboost LANGUAGES CXX C VERSION 1.0.0)
|
||||||
include(cmake/Utils.cmake)
|
include(cmake/Utils.cmake)
|
||||||
list(APPEND CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake/modules")
|
list(APPEND CMAKE_MODULE_PATH "${xgboost_SOURCE_DIR}/cmake/modules")
|
||||||
find_package(OpenMP)
|
cmake_policy(SET CMP0022 NEW)
|
||||||
|
|
||||||
|
if ((${CMAKE_VERSION} VERSION_GREATER 3.13) OR (${CMAKE_VERSION} VERSION_EQUAL 3.13))
|
||||||
|
cmake_policy(SET CMP0077 NEW)
|
||||||
|
endif ((${CMAKE_VERSION} VERSION_GREATER 3.13) OR (${CMAKE_VERSION} VERSION_EQUAL 3.13))
|
||||||
|
|
||||||
|
message(STATUS "CMake version ${CMAKE_VERSION}")
|
||||||
|
|
||||||
|
if (CMAKE_COMPILER_IS_GNUCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.0)
|
||||||
|
message(FATAL_ERROR "GCC version must be at least 5.0!")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
include(${xgboost_SOURCE_DIR}/cmake/FindPrefetchIntrinsics.cmake)
|
||||||
|
find_prefetch_intrinsics()
|
||||||
|
include(${xgboost_SOURCE_DIR}/cmake/Version.cmake)
|
||||||
|
write_version()
|
||||||
set_default_configuration_release()
|
set_default_configuration_release()
|
||||||
msvc_use_static_runtime()
|
|
||||||
|
|
||||||
# Options
|
#-- Options
|
||||||
option(USE_CUDA "Build with GPU acceleration")
|
option(BUILD_C_DOC "Build documentation for C APIs using Doxygen." OFF)
|
||||||
|
option(USE_OPENMP "Build with OpenMP support." ON)
|
||||||
|
## Bindings
|
||||||
option(JVM_BINDINGS "Build JVM bindings" OFF)
|
option(JVM_BINDINGS "Build JVM bindings" OFF)
|
||||||
option(GOOGLE_TEST "Build google tests" OFF)
|
|
||||||
option(R_LIB "Build shared library for R package" OFF)
|
option(R_LIB "Build shared library for R package" OFF)
|
||||||
|
## Dev
|
||||||
|
option(USE_DEBUG_OUTPUT "Dump internal training results like gradients and predictions to stdout.
|
||||||
|
Should only be used for debugging." OFF)
|
||||||
|
option(GOOGLE_TEST "Build google tests" OFF)
|
||||||
|
option(USE_DMLC_GTEST "Use google tests bundled with dmlc-core submodule" OFF)
|
||||||
|
option(USE_NVTX "Build with cuda profiling annotations. Developers only." OFF)
|
||||||
|
set(NVTX_HEADER_DIR "" CACHE PATH "Path to the stand-alone nvtx header")
|
||||||
|
option(RABIT_MOCK "Build rabit with mock" OFF)
|
||||||
|
## CUDA
|
||||||
|
option(USE_CUDA "Build with GPU acceleration" OFF)
|
||||||
|
option(USE_NCCL "Build with NCCL to enable distributed GPU support." OFF)
|
||||||
|
option(BUILD_WITH_SHARED_NCCL "Build with shared NCCL library." OFF)
|
||||||
set(GPU_COMPUTE_VER "" CACHE STRING
|
set(GPU_COMPUTE_VER "" CACHE STRING
|
||||||
"Space separated list of compute versions to be built against, e.g. '35 61'")
|
"Semicolon separated list of compute versions to be built against, e.g. '35;61'")
|
||||||
|
## Copied From dmlc
|
||||||
|
option(USE_HDFS "Build with HDFS support" OFF)
|
||||||
|
option(USE_AZURE "Build with AZURE support" OFF)
|
||||||
|
option(USE_S3 "Build with S3 support" OFF)
|
||||||
|
## Sanitizers
|
||||||
option(USE_SANITIZER "Use santizer flags" OFF)
|
option(USE_SANITIZER "Use santizer flags" OFF)
|
||||||
option(SANITIZER_PATH "Path to sanitizes.")
|
option(SANITIZER_PATH "Path to sanitizes.")
|
||||||
set(ENABLED_SANITIZERS "address" "leak" CACHE STRING
|
set(ENABLED_SANITIZERS "address" "leak" CACHE STRING
|
||||||
"Semicolon separated list of sanitizer names. E.g 'address;leak'. Supported sanitizers are
|
"Semicolon separated list of sanitizer names. E.g 'address;leak'. Supported sanitizers are
|
||||||
address, leak and thread.")
|
address, leak and thread.")
|
||||||
|
## Plugins
|
||||||
# Plugins
|
|
||||||
option(PLUGIN_LZ4 "Build lz4 plugin" OFF)
|
option(PLUGIN_LZ4 "Build lz4 plugin" OFF)
|
||||||
option(PLUGIN_DENSE_PARSER "Build dense parser plugin" OFF)
|
option(PLUGIN_DENSE_PARSER "Build dense parser plugin" OFF)
|
||||||
|
|
||||||
# Deprecation warning
|
#-- Checks for building XGBoost
|
||||||
if(USE_AVX)
|
if (USE_DEBUG_OUTPUT AND (NOT (CMAKE_BUILD_TYPE MATCHES Debug)))
|
||||||
message(WARNING "The option 'USE_AVX' is deprecated as experimental AVX features have been removed from xgboost.")
|
message(SEND_ERROR "Do not enable `USE_DEBUG_OUTPUT' with release build.")
|
||||||
endif()
|
endif (USE_DEBUG_OUTPUT AND (NOT (CMAKE_BUILD_TYPE MATCHES Debug)))
|
||||||
|
if (USE_NCCL AND NOT (USE_CUDA))
|
||||||
|
message(SEND_ERROR "`USE_NCCL` must be enabled with `USE_CUDA` flag.")
|
||||||
|
endif (USE_NCCL AND NOT (USE_CUDA))
|
||||||
|
if (BUILD_WITH_SHARED_NCCL AND (NOT USE_NCCL))
|
||||||
|
message(SEND_ERROR "Build XGBoost with -DUSE_NCCL=ON to enable BUILD_WITH_SHARED_NCCL.")
|
||||||
|
endif (BUILD_WITH_SHARED_NCCL AND (NOT USE_NCCL))
|
||||||
|
if (JVM_BINDINGS AND R_LIB)
|
||||||
|
message(SEND_ERROR "`R_LIB' is not compatible with `JVM_BINDINGS' as they both have customized configurations.")
|
||||||
|
endif (JVM_BINDINGS AND R_LIB)
|
||||||
|
if (R_LIB AND GOOGLE_TEST)
|
||||||
|
message(WARNING "Some C++ unittests will fail with `R_LIB` enabled,
|
||||||
|
as R package redirects some functions to R runtime implementation.")
|
||||||
|
endif (R_LIB AND GOOGLE_TEST)
|
||||||
|
if (USE_AVX)
|
||||||
|
message(SEND_ERROR "The option 'USE_AVX' is deprecated as experimental AVX features have been removed from XGBoost.")
|
||||||
|
endif (USE_AVX)
|
||||||
|
|
||||||
# Compiler flags
|
#-- Sanitizer
|
||||||
set(CMAKE_CXX_STANDARD 11)
|
if (USE_SANITIZER)
|
||||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
|
||||||
if(OpenMP_CXX_FOUND OR OPENMP_FOUND)
|
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
|
|
||||||
endif()
|
|
||||||
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
|
|
||||||
if(MSVC)
|
|
||||||
# Multithreaded compilation
|
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP")
|
|
||||||
else()
|
|
||||||
# Correct error for GCC 5 and cuda
|
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_MWAITXINTRIN_H_INCLUDED -D_FORCE_INLINES")
|
|
||||||
# Performance
|
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -funroll-loops")
|
|
||||||
endif()
|
|
||||||
if(WIN32 AND MINGW)
|
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -static-libstdc++")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# Sanitizer
|
|
||||||
if(USE_SANITIZER)
|
|
||||||
include(cmake/Sanitizer.cmake)
|
include(cmake/Sanitizer.cmake)
|
||||||
enable_sanitizers("${ENABLED_SANITIZERS}")
|
enable_sanitizers("${ENABLED_SANITIZERS}")
|
||||||
endif(USE_SANITIZER)
|
endif (USE_SANITIZER)
|
||||||
|
|
||||||
|
if (USE_CUDA)
|
||||||
|
SET(USE_OPENMP ON CACHE BOOL "CUDA requires OpenMP" FORCE)
|
||||||
|
# `export CXX=' is ignored by CMake CUDA.
|
||||||
|
set(CMAKE_CUDA_HOST_COMPILER ${CMAKE_CXX_COMPILER})
|
||||||
|
message(STATUS "Configured CUDA host compiler: ${CMAKE_CUDA_HOST_COMPILER}")
|
||||||
|
|
||||||
|
enable_language(CUDA)
|
||||||
|
set(GEN_CODE "")
|
||||||
|
format_gencode_flags("${GPU_COMPUTE_VER}" GEN_CODE)
|
||||||
|
message(STATUS "CUDA GEN_CODE: ${GEN_CODE}")
|
||||||
|
endif (USE_CUDA)
|
||||||
|
|
||||||
|
if (USE_OPENMP)
|
||||||
|
if (APPLE)
|
||||||
|
# Require CMake 3.16+ on Mac OSX, as previous versions of CMake had trouble locating
|
||||||
|
# OpenMP on Mac. See https://github.com/dmlc/xgboost/pull/5146#issuecomment-568312706
|
||||||
|
cmake_minimum_required(VERSION 3.16)
|
||||||
|
endif (APPLE)
|
||||||
|
find_package(OpenMP REQUIRED)
|
||||||
|
endif (USE_OPENMP)
|
||||||
|
|
||||||
# dmlc-core
|
# dmlc-core
|
||||||
add_subdirectory(dmlc-core)
|
msvc_use_static_runtime()
|
||||||
set(LINK_LIBRARIES dmlc rabit)
|
add_subdirectory(${xgboost_SOURCE_DIR}/dmlc-core)
|
||||||
|
set_target_properties(dmlc PROPERTIES
|
||||||
# enable custom logging
|
CXX_STANDARD 11
|
||||||
add_definitions(-DDMLC_LOG_CUSTOMIZE=1)
|
CXX_STANDARD_REQUIRED ON
|
||||||
|
POSITION_INDEPENDENT_CODE ON)
|
||||||
# compiled code customizations for R package
|
list(APPEND LINKED_LIBRARIES_PRIVATE dmlc)
|
||||||
if(R_LIB)
|
|
||||||
add_definitions(
|
|
||||||
-DXGBOOST_STRICT_R_MODE=1
|
|
||||||
-DXGBOOST_CUSTOMIZE_GLOBAL_PRNG=1
|
|
||||||
-DDMLC_LOG_BEFORE_THROW=0
|
|
||||||
-DDMLC_DISABLE_STDIN=1
|
|
||||||
-DDMLC_LOG_CUSTOMIZE=1
|
|
||||||
-DRABIT_CUSTOMIZE_MSG_
|
|
||||||
-DRABIT_STRICT_CXX98_
|
|
||||||
)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# Gather source files
|
|
||||||
include_directories (
|
|
||||||
${PROJECT_SOURCE_DIR}/include
|
|
||||||
${PROJECT_SOURCE_DIR}/dmlc-core/include
|
|
||||||
${PROJECT_SOURCE_DIR}/rabit/include
|
|
||||||
)
|
|
||||||
|
|
||||||
file(GLOB_RECURSE SOURCES
|
|
||||||
src/*.cc
|
|
||||||
src/*.h
|
|
||||||
include/*.h
|
|
||||||
)
|
|
||||||
|
|
||||||
# Only add main function for executable target
|
|
||||||
list(REMOVE_ITEM SOURCES ${PROJECT_SOURCE_DIR}/src/cli_main.cc)
|
|
||||||
|
|
||||||
file(GLOB_RECURSE TEST_SOURCES "tests/cpp/*.cc")
|
|
||||||
|
|
||||||
file(GLOB_RECURSE CUDA_SOURCES
|
|
||||||
src/*.cu
|
|
||||||
src/*.cuh
|
|
||||||
)
|
|
||||||
|
|
||||||
# Add plugins to source files
|
|
||||||
if(PLUGIN_LZ4)
|
|
||||||
list(APPEND SOURCES plugin/lz4/sparse_page_lz4_format.cc)
|
|
||||||
link_libraries(lz4)
|
|
||||||
endif()
|
|
||||||
if(PLUGIN_DENSE_PARSER)
|
|
||||||
list(APPEND SOURCES plugin/dense_parser/dense_libsvm.cc)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# rabit
|
# rabit
|
||||||
# TODO: Create rabit cmakelists.txt
|
set(RABIT_BUILD_DMLC OFF)
|
||||||
set(RABIT_SOURCES
|
set(DMLC_ROOT ${xgboost_SOURCE_DIR}/dmlc-core)
|
||||||
rabit/src/allreduce_base.cc
|
set(RABIT_WITH_R_LIB ${R_LIB})
|
||||||
rabit/src/allreduce_robust.cc
|
add_subdirectory(rabit)
|
||||||
rabit/src/engine.cc
|
|
||||||
rabit/src/c_api.cc
|
if (RABIT_MOCK)
|
||||||
)
|
list(APPEND LINKED_LIBRARIES_PRIVATE rabit_mock_static)
|
||||||
set(RABIT_EMPTY_SOURCES
|
|
||||||
rabit/src/engine_empty.cc
|
|
||||||
rabit/src/c_api.cc
|
|
||||||
)
|
|
||||||
if(MINGW OR R_LIB)
|
|
||||||
# build a dummy rabit library
|
|
||||||
add_library(rabit STATIC ${RABIT_EMPTY_SOURCES})
|
|
||||||
else()
|
else()
|
||||||
add_library(rabit STATIC ${RABIT_SOURCES})
|
list(APPEND LINKED_LIBRARIES_PRIVATE rabit)
|
||||||
endif()
|
endif(RABIT_MOCK)
|
||||||
|
|
||||||
if(USE_CUDA)
|
# Exports some R specific definitions and objects
|
||||||
find_package(CUDA 8.0 REQUIRED)
|
if (R_LIB)
|
||||||
cmake_minimum_required(VERSION 3.5)
|
add_subdirectory(${xgboost_SOURCE_DIR}/R-package)
|
||||||
|
endif (R_LIB)
|
||||||
|
|
||||||
add_definitions(-DXGBOOST_USE_CUDA)
|
# core xgboost
|
||||||
|
add_subdirectory(${xgboost_SOURCE_DIR}/plugin)
|
||||||
|
add_subdirectory(${xgboost_SOURCE_DIR}/src)
|
||||||
|
set(XGBOOST_OBJ_SOURCES "${XGBOOST_OBJ_SOURCES};$<TARGET_OBJECTS:objxgboost>")
|
||||||
|
|
||||||
include_directories(cub)
|
#-- Shared library
|
||||||
|
add_library(xgboost SHARED ${XGBOOST_OBJ_SOURCES})
|
||||||
|
target_include_directories(xgboost
|
||||||
|
INTERFACE
|
||||||
|
$<INSTALL_INTERFACE:${CMAKE_INSTALL_PREFIX}/include>
|
||||||
|
$<BUILD_INTERFACE:${CMAKE_CURRENT_LIST_DIR}/include>)
|
||||||
|
target_link_libraries(xgboost PRIVATE ${LINKED_LIBRARIES_PRIVATE})
|
||||||
|
|
||||||
if(USE_NCCL)
|
# This creates its own shared library `xgboost4j'.
|
||||||
find_package(Nccl REQUIRED)
|
if (JVM_BINDINGS)
|
||||||
include_directories(${NCCL_INCLUDE_DIR})
|
add_subdirectory(${xgboost_SOURCE_DIR}/jvm-packages)
|
||||||
add_definitions(-DXGBOOST_USE_NCCL)
|
endif (JVM_BINDINGS)
|
||||||
endif()
|
#-- End shared library
|
||||||
|
|
||||||
set(GENCODE_FLAGS "")
|
#-- CLI for xgboost
|
||||||
format_gencode_flags("${GPU_COMPUTE_VER}" GENCODE_FLAGS)
|
add_executable(runxgboost ${xgboost_SOURCE_DIR}/src/cli_main.cc ${XGBOOST_OBJ_SOURCES})
|
||||||
message("cuda architecture flags: ${GENCODE_FLAGS}")
|
|
||||||
|
|
||||||
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS};--expt-extended-lambda;--expt-relaxed-constexpr;${GENCODE_FLAGS};-lineinfo;")
|
target_include_directories(runxgboost
|
||||||
if(NOT MSVC)
|
PRIVATE
|
||||||
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS};-Xcompiler -fPIC; -Xcompiler -Werror; -std=c++11")
|
${xgboost_SOURCE_DIR}/include
|
||||||
endif()
|
${xgboost_SOURCE_DIR}/dmlc-core/include
|
||||||
|
${xgboost_SOURCE_DIR}/rabit/include)
|
||||||
|
target_link_libraries(runxgboost PRIVATE ${LINKED_LIBRARIES_PRIVATE})
|
||||||
|
set_target_properties(
|
||||||
|
runxgboost PROPERTIES
|
||||||
|
OUTPUT_NAME xgboost
|
||||||
|
CXX_STANDARD 11
|
||||||
|
CXX_STANDARD_REQUIRED ON)
|
||||||
|
#-- End CLI for xgboost
|
||||||
|
|
||||||
cuda_add_library(gpuxgboost ${CUDA_SOURCES} STATIC)
|
set_output_directory(runxgboost ${xgboost_SOURCE_DIR})
|
||||||
|
set_output_directory(xgboost ${xgboost_SOURCE_DIR}/lib)
|
||||||
|
# Ensure these two targets do not build simultaneously, as they produce outputs with conflicting names
|
||||||
|
add_dependencies(xgboost runxgboost)
|
||||||
|
|
||||||
if(USE_NCCL)
|
#-- Installing XGBoost
|
||||||
link_directories(${NCCL_LIBRARY})
|
if (R_LIB)
|
||||||
target_link_libraries(gpuxgboost ${NCCL_LIB_NAME})
|
|
||||||
endif()
|
|
||||||
list(APPEND LINK_LIBRARIES gpuxgboost)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
|
|
||||||
# flags and sources for R-package
|
|
||||||
if(R_LIB)
|
|
||||||
file(GLOB_RECURSE R_SOURCES
|
|
||||||
R-package/src/*.h
|
|
||||||
R-package/src/*.c
|
|
||||||
R-package/src/*.cc
|
|
||||||
)
|
|
||||||
list(APPEND SOURCES ${R_SOURCES})
|
|
||||||
endif()
|
|
||||||
|
|
||||||
add_library(objxgboost OBJECT ${SOURCES})
|
|
||||||
|
|
||||||
|
|
||||||
# building shared library for R package
|
|
||||||
if(R_LIB)
|
|
||||||
find_package(LibR REQUIRED)
|
|
||||||
|
|
||||||
list(APPEND LINK_LIBRARIES "${LIBR_CORE_LIBRARY}")
|
|
||||||
MESSAGE(STATUS "LIBR_CORE_LIBRARY " ${LIBR_CORE_LIBRARY})
|
|
||||||
|
|
||||||
include_directories(
|
|
||||||
"${LIBR_INCLUDE_DIRS}"
|
|
||||||
"${PROJECT_SOURCE_DIR}"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Shared library target for the R package
|
|
||||||
add_library(xgboost SHARED $<TARGET_OBJECTS:objxgboost>)
|
|
||||||
target_link_libraries(xgboost ${LINK_LIBRARIES})
|
|
||||||
# R uses no lib prefix in shared library names of its packages
|
|
||||||
set_target_properties(xgboost PROPERTIES PREFIX "")
|
set_target_properties(xgboost PROPERTIES PREFIX "")
|
||||||
if(APPLE)
|
if (APPLE)
|
||||||
set_target_properties(xgboost PROPERTIES SUFFIX ".so")
|
set_target_properties(xgboost PROPERTIES SUFFIX ".so")
|
||||||
endif()
|
endif (APPLE)
|
||||||
|
|
||||||
setup_rpackage_install_target(xgboost ${CMAKE_CURRENT_BINARY_DIR})
|
setup_rpackage_install_target(xgboost ${CMAKE_CURRENT_BINARY_DIR})
|
||||||
# use a dummy location for any other remaining installs
|
|
||||||
set(CMAKE_INSTALL_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/dummy_inst")
|
set(CMAKE_INSTALL_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/dummy_inst")
|
||||||
|
endif (R_LIB)
|
||||||
|
if (MINGW)
|
||||||
|
set_target_properties(xgboost PROPERTIES PREFIX "")
|
||||||
|
endif (MINGW)
|
||||||
|
|
||||||
# main targets: shared library & exe
|
if (BUILD_C_DOC)
|
||||||
else()
|
include(cmake/Doc.cmake)
|
||||||
# Executable
|
run_doxygen()
|
||||||
add_executable(runxgboost $<TARGET_OBJECTS:objxgboost> src/cli_main.cc)
|
endif (BUILD_C_DOC)
|
||||||
set_target_properties(runxgboost PROPERTIES
|
|
||||||
OUTPUT_NAME xgboost
|
|
||||||
)
|
|
||||||
set_output_directory(runxgboost ${PROJECT_SOURCE_DIR})
|
|
||||||
target_link_libraries(runxgboost ${LINK_LIBRARIES})
|
|
||||||
|
|
||||||
# Shared library
|
include(GNUInstallDirs)
|
||||||
add_library(xgboost SHARED $<TARGET_OBJECTS:objxgboost>)
|
# Install all headers. Please note that currently the C++ headers does not form an "API".
|
||||||
target_link_libraries(xgboost ${LINK_LIBRARIES})
|
install(DIRECTORY ${xgboost_SOURCE_DIR}/include/xgboost
|
||||||
set_output_directory(xgboost ${PROJECT_SOURCE_DIR}/lib)
|
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
|
||||||
if(MINGW)
|
|
||||||
# remove the 'lib' prefix to conform to windows convention for shared library names
|
|
||||||
set_target_properties(xgboost PROPERTIES PREFIX "")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
#Ensure these two targets do not build simultaneously, as they produce outputs with conflicting names
|
install(TARGETS xgboost runxgboost
|
||||||
add_dependencies(xgboost runxgboost)
|
EXPORT XGBoostTargets
|
||||||
endif()
|
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
|
||||||
|
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
|
||||||
|
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
|
||||||
|
INCLUDES DESTINATION ${LIBLEGACY_INCLUDE_DIRS})
|
||||||
|
install(EXPORT XGBoostTargets
|
||||||
|
FILE XGBoostTargets.cmake
|
||||||
|
NAMESPACE xgboost::
|
||||||
|
DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/xgboost)
|
||||||
|
|
||||||
|
include(CMakePackageConfigHelpers)
|
||||||
|
configure_package_config_file(
|
||||||
|
${CMAKE_CURRENT_LIST_DIR}/cmake/xgboost-config.cmake.in
|
||||||
|
${CMAKE_CURRENT_BINARY_DIR}/cmake/xgboost-config.cmake
|
||||||
|
INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/xgboost)
|
||||||
|
write_basic_package_version_file(
|
||||||
|
${CMAKE_BINARY_DIR}/cmake/xgboost-config-version.cmake
|
||||||
|
VERSION ${XGBOOST_VERSION}
|
||||||
|
COMPATIBILITY AnyNewerVersion)
|
||||||
|
install(
|
||||||
|
FILES
|
||||||
|
${CMAKE_BINARY_DIR}/cmake/xgboost-config.cmake
|
||||||
|
${CMAKE_BINARY_DIR}/cmake/xgboost-config-version.cmake
|
||||||
|
DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/xgboost)
|
||||||
|
|
||||||
# JVM
|
#-- Test
|
||||||
if(JVM_BINDINGS)
|
if (GOOGLE_TEST)
|
||||||
find_package(JNI QUIET REQUIRED)
|
|
||||||
|
|
||||||
include_directories(${JNI_INCLUDE_DIRS} jvm-packages/xgboost4j/src/native)
|
|
||||||
|
|
||||||
add_library(xgboost4j SHARED
|
|
||||||
$<TARGET_OBJECTS:objxgboost>
|
|
||||||
jvm-packages/xgboost4j/src/native/xgboost4j.cpp)
|
|
||||||
set_output_directory(xgboost4j ${PROJECT_SOURCE_DIR}/lib)
|
|
||||||
target_link_libraries(xgboost4j
|
|
||||||
${LINK_LIBRARIES}
|
|
||||||
${JAVA_JVM_LIBRARY})
|
|
||||||
endif()
|
|
||||||
|
|
||||||
|
|
||||||
# Test
|
|
||||||
if(GOOGLE_TEST)
|
|
||||||
enable_testing()
|
enable_testing()
|
||||||
find_package(GTest REQUIRED)
|
# Unittests.
|
||||||
|
add_subdirectory(${xgboost_SOURCE_DIR}/tests/cpp)
|
||||||
|
add_test(
|
||||||
|
NAME TestXGBoostLib
|
||||||
|
COMMAND testxgboost
|
||||||
|
WORKING_DIRECTORY ${xgboost_BINARY_DIR})
|
||||||
|
|
||||||
auto_source_group("${TEST_SOURCES}")
|
# CLI tests
|
||||||
include_directories(${GTEST_INCLUDE_DIRS})
|
configure_file(
|
||||||
|
${xgboost_SOURCE_DIR}/tests/cli/machine.conf.in
|
||||||
|
${xgboost_BINARY_DIR}/tests/cli/machine.conf
|
||||||
|
@ONLY)
|
||||||
|
add_test(
|
||||||
|
NAME TestXGBoostCLI
|
||||||
|
COMMAND runxgboost ${xgboost_BINARY_DIR}/tests/cli/machine.conf
|
||||||
|
WORKING_DIRECTORY ${xgboost_BINARY_DIR})
|
||||||
|
set_tests_properties(TestXGBoostCLI
|
||||||
|
PROPERTIES
|
||||||
|
PASS_REGULAR_EXPRESSION ".*test-rmse:0.087.*")
|
||||||
|
endif (GOOGLE_TEST)
|
||||||
|
|
||||||
if(USE_CUDA)
|
# For MSVC: Call msvc_use_static_runtime() once again to completely
|
||||||
file(GLOB_RECURSE CUDA_TEST_SOURCES "tests/cpp/*.cu")
|
# replace /MD with /MT. See https://github.com/dmlc/xgboost/issues/4462
|
||||||
cuda_compile(CUDA_TEST_OBJS ${CUDA_TEST_SOURCES})
|
# for issues caused by mixing of /MD and /MT flags
|
||||||
else()
|
msvc_use_static_runtime()
|
||||||
set(CUDA_TEST_OBJS "")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
add_executable(testxgboost ${TEST_SOURCES} ${CUDA_TEST_OBJS} $<TARGET_OBJECTS:objxgboost>)
|
|
||||||
set_output_directory(testxgboost ${PROJECT_SOURCE_DIR})
|
|
||||||
target_link_libraries(testxgboost ${GTEST_LIBRARIES} ${LINK_LIBRARIES})
|
|
||||||
|
|
||||||
add_test(TestXGBoost testxgboost)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
|
|
||||||
# Group sources
|
|
||||||
auto_source_group("${SOURCES}")
|
|
||||||
|
|||||||
@@ -2,34 +2,42 @@ Contributors of DMLC/XGBoost
|
|||||||
============================
|
============================
|
||||||
XGBoost has been developed and used by a group of active community. Everyone is more than welcomed to is a great way to make the project better and more accessible to more users.
|
XGBoost has been developed and used by a group of active community. Everyone is more than welcomed to is a great way to make the project better and more accessible to more users.
|
||||||
|
|
||||||
Committers
|
Project Management Committee(PMC)
|
||||||
----------
|
----------
|
||||||
Committers are people who have made substantial contribution to the project and granted write access to the project.
|
The Project Management Committee(PMC) consists group of active committers that moderate the discussion, manage the project release, and proposes new committer/PMC members.
|
||||||
|
|
||||||
* [Tianqi Chen](https://github.com/tqchen), University of Washington
|
* [Tianqi Chen](https://github.com/tqchen), University of Washington
|
||||||
- Tianqi is a Ph.D. student working on large-scale machine learning. He is the creator of the project.
|
- Tianqi is a Ph.D. student working on large-scale machine learning. He is the creator of the project.
|
||||||
* [Tong He](https://github.com/hetong007), Amazon AI
|
|
||||||
- Tong is an applied scientist in Amazon AI. He is the maintainer of XGBoost R package.
|
|
||||||
* [Vadim Khotilovich](https://github.com/khotilov)
|
|
||||||
- Vadim contributes many improvements in R and core packages.
|
|
||||||
* [Bing Xu](https://github.com/antinucleon)
|
|
||||||
- Bing is the original creator of XGBoost Python package and currently the maintainer of [XGBoost.jl](https://github.com/antinucleon/XGBoost.jl).
|
|
||||||
* [Michael Benesty](https://github.com/pommedeterresautee)
|
* [Michael Benesty](https://github.com/pommedeterresautee)
|
||||||
- Michael is a lawyer and data scientist in France. He is the creator of XGBoost interactive analysis module in R.
|
- Michael is a lawyer and data scientist in France. He is the creator of XGBoost interactive analysis module in R.
|
||||||
* [Yuan Tang](https://github.com/terrytangyuan), Ant Financial
|
* [Yuan Tang](https://github.com/terrytangyuan), Ant Financial
|
||||||
- Yuan is a software engineer in Ant Financial. He contributed mostly in R and Python packages.
|
- Yuan is a software engineer in Ant Financial. He contributed mostly in R and Python packages.
|
||||||
* [Nan Zhu](https://github.com/CodingCat), Uber
|
* [Nan Zhu](https://github.com/CodingCat), Uber
|
||||||
- Nan is a software engineer in Uber. He contributed mostly in JVM packages.
|
- Nan is a software engineer in Uber. He contributed mostly in JVM packages.
|
||||||
* [Sergei Lebedev](https://github.com/superbobry), Criteo
|
* [Jiaming Yuan](https://github.com/trivialfis)
|
||||||
- Sergei is a software engineer in Criteo. He contributed mostly in JVM packages.
|
- Jiaming contributed to the GPU algorithms. He has also introduced new abstractions to improve the quality of the C++ codebase.
|
||||||
* [Hongliang Liu](https://github.com/phunterlau)
|
* [Hyunsu Cho](http://hyunsu-cho.io/), Amazon AI
|
||||||
* [Scott Lundberg](http://scottlundberg.com/), University of Washington
|
- Hyunsu is an applied scientist in Amazon AI. He is the maintainer of the XGBoost Python package. He also manages the Jenkins continuous integration system (https://xgboost-ci.net/). He is the initial author of the CPU 'hist' updater.
|
||||||
- Scott is a Ph.D. student at University of Washington. He is the creator of SHAP, a unified approach to explain the output of machine learning models such as decision tree ensembles. He also helps maintain the XGBoost Julia package.
|
|
||||||
* [Rory Mitchell](https://github.com/RAMitchell), University of Waikato
|
* [Rory Mitchell](https://github.com/RAMitchell), University of Waikato
|
||||||
- Rory is a Ph.D. student at University of Waikato. He is the original creator of the GPU training algorithms. He improved the CMake build system and continuous integration.
|
- Rory is a Ph.D. student at University of Waikato. He is the original creator of the GPU training algorithms. He improved the CMake build system and continuous integration.
|
||||||
* [Hyunsu Cho](http://hyunsu-cho.io/), Amazon AI
|
* [Hongliang Liu](https://github.com/phunterlau)
|
||||||
- Hyunsu is an applied scientist in Amazon AI. He is the maintainer of the XGBoost Python package. He also manages the Jenkins continuous integration system (https://xgboost-ci.net/). He is the initial author of the CPU 'hist' updater.
|
|
||||||
* [Jiaming](https://github.com/trivialfis)
|
|
||||||
- Jiaming contributed to the GPU algorithms. He has also introduced new abstractions to improve the quality of the C++ codebase.
|
Committers
|
||||||
|
----------
|
||||||
|
Committers are people who have made substantial contribution to the project and granted write access to the project.
|
||||||
|
|
||||||
|
* [Tong He](https://github.com/hetong007), Amazon AI
|
||||||
|
- Tong is an applied scientist in Amazon AI. He is the maintainer of XGBoost R package.
|
||||||
|
* [Vadim Khotilovich](https://github.com/khotilov)
|
||||||
|
- Vadim contributes many improvements in R and core packages.
|
||||||
|
* [Bing Xu](https://github.com/antinucleon)
|
||||||
|
- Bing is the original creator of XGBoost Python package and currently the maintainer of [XGBoost.jl](https://github.com/antinucleon/XGBoost.jl).
|
||||||
|
* [Sergei Lebedev](https://github.com/superbobry), Criteo
|
||||||
|
- Sergei is a software engineer in Criteo. He contributed mostly in JVM packages.
|
||||||
|
* [Scott Lundberg](http://scottlundberg.com/), University of Washington
|
||||||
|
- Scott is a Ph.D. student at University of Washington. He is the creator of SHAP, a unified approach to explain the output of machine learning models such as decision tree ensembles. He also helps maintain the XGBoost Julia package.
|
||||||
|
|
||||||
|
|
||||||
Become a Committer
|
Become a Committer
|
||||||
------------------
|
------------------
|
||||||
@@ -85,4 +93,12 @@ List of Contributors
|
|||||||
* [Andrew Thia](https://github.com/BlueTea88)
|
* [Andrew Thia](https://github.com/BlueTea88)
|
||||||
- Andrew Thia implemented feature interaction constraints
|
- Andrew Thia implemented feature interaction constraints
|
||||||
* [Wei Tian](https://github.com/weitian)
|
* [Wei Tian](https://github.com/weitian)
|
||||||
* [Chen Qin] (https://github.com/chenqin)
|
* [Chen Qin](https://github.com/chenqin)
|
||||||
|
* [Sam Wilkinson](https://samwilkinson.io)
|
||||||
|
* [Matthew Jones](https://github.com/mt-jones)
|
||||||
|
* [Jiaxiang Li](https://github.com/JiaxiangBU)
|
||||||
|
* [Bryan Woods](https://github.com/bryan-woods)
|
||||||
|
- Bryan added support for cross-validation for the ranking objective
|
||||||
|
* [Haoda Fu](https://github.com/fuhaoda)
|
||||||
|
* [Evan Kepner](https://github.com/EvanKepner)
|
||||||
|
- Evan Kepner added support for os.PathLike file paths in Python
|
||||||
|
|||||||
453
Jenkinsfile
vendored
453
Jenkinsfile
vendored
@@ -3,106 +3,379 @@
|
|||||||
// Jenkins pipeline
|
// Jenkins pipeline
|
||||||
// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
|
// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
|
||||||
|
|
||||||
|
// Command to run command inside a docker container
|
||||||
|
dockerRun = 'tests/ci_build/ci_build.sh'
|
||||||
|
|
||||||
import groovy.transform.Field
|
import groovy.transform.Field
|
||||||
|
|
||||||
/* Unrestricted tasks: tasks that do NOT generate artifacts */
|
|
||||||
|
|
||||||
// Command to run command inside a docker container
|
|
||||||
def dockerRun = 'tests/ci_build/ci_build.sh'
|
|
||||||
// Utility functions
|
|
||||||
@Field
|
@Field
|
||||||
def utils
|
def commit_id // necessary to pass a variable from one stage to another
|
||||||
|
|
||||||
def buildMatrix = [
|
|
||||||
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": true, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "9.2", "multiGpu": true],
|
|
||||||
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": true, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "9.2" ],
|
|
||||||
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": true, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "8.0" ],
|
|
||||||
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": false, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "8.0" ],
|
|
||||||
]
|
|
||||||
|
|
||||||
pipeline {
|
pipeline {
|
||||||
// Each stage specify its own agent
|
// Each stage specify its own agent
|
||||||
agent none
|
agent none
|
||||||
|
|
||||||
// Setup common job properties
|
environment {
|
||||||
options {
|
DOCKER_CACHE_ECR_ID = '492475357299'
|
||||||
ansiColor('xterm')
|
DOCKER_CACHE_ECR_REGION = 'us-west-2'
|
||||||
timestamps()
|
}
|
||||||
timeout(time: 120, unit: 'MINUTES')
|
|
||||||
buildDiscarder(logRotator(numToKeepStr: '10'))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build stages
|
// Setup common job properties
|
||||||
stages {
|
options {
|
||||||
stage('Jenkins: Get sources') {
|
ansiColor('xterm')
|
||||||
agent {
|
timestamps()
|
||||||
label 'unrestricted'
|
timeout(time: 240, unit: 'MINUTES')
|
||||||
}
|
buildDiscarder(logRotator(numToKeepStr: '10'))
|
||||||
steps {
|
preserveStashes()
|
||||||
script {
|
}
|
||||||
utils = load('tests/ci_build/jenkins_tools.Groovy')
|
|
||||||
utils.checkoutSrcs()
|
// Build stages
|
||||||
}
|
stages {
|
||||||
stash name: 'srcs', excludes: '.git/'
|
stage('Jenkins Linux: Get sources') {
|
||||||
milestone label: 'Sources ready', ordinal: 1
|
agent { label 'linux && cpu' }
|
||||||
}
|
steps {
|
||||||
}
|
script {
|
||||||
stage('Jenkins: Build & Test') {
|
checkoutSrcs()
|
||||||
steps {
|
commit_id = "${GIT_COMMIT}"
|
||||||
script {
|
|
||||||
parallel (buildMatrix.findAll{it['enabled']}.collectEntries{ c ->
|
|
||||||
def buildName = utils.getBuildName(c)
|
|
||||||
utils.buildFactory(buildName, c, false, this.&buildPlatformCmake)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
stash name: 'srcs'
|
||||||
|
milestone ordinal: 1
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
stage('Jenkins Linux: Formatting Check') {
|
||||||
|
agent none
|
||||||
|
steps {
|
||||||
|
script {
|
||||||
|
parallel ([
|
||||||
|
'clang-tidy': { ClangTidy() },
|
||||||
|
'lint': { Lint() },
|
||||||
|
'sphinx-doc': { SphinxDoc() },
|
||||||
|
'doxygen': { Doxygen() }
|
||||||
|
])
|
||||||
|
}
|
||||||
|
milestone ordinal: 2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage('Jenkins Linux: Build') {
|
||||||
|
agent none
|
||||||
|
steps {
|
||||||
|
script {
|
||||||
|
parallel ([
|
||||||
|
'build-cpu': { BuildCPU() },
|
||||||
|
'build-cpu-rabit-mock': { BuildCPUMock() },
|
||||||
|
'build-gpu-cuda9.0': { BuildCUDA(cuda_version: '9.0') },
|
||||||
|
'build-gpu-cuda10.0': { BuildCUDA(cuda_version: '10.0') },
|
||||||
|
'build-gpu-cuda10.1': { BuildCUDA(cuda_version: '10.1') },
|
||||||
|
'build-jvm-packages': { BuildJVMPackages(spark_version: '2.4.3') },
|
||||||
|
'build-jvm-doc': { BuildJVMDoc() }
|
||||||
|
])
|
||||||
|
}
|
||||||
|
milestone ordinal: 3
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage('Jenkins Linux: Test') {
|
||||||
|
agent none
|
||||||
|
steps {
|
||||||
|
script {
|
||||||
|
parallel ([
|
||||||
|
'test-python-cpu': { TestPythonCPU() },
|
||||||
|
'test-python-gpu-cuda9.0': { TestPythonGPU(cuda_version: '9.0') },
|
||||||
|
'test-python-gpu-cuda10.0': { TestPythonGPU(cuda_version: '10.0') },
|
||||||
|
'test-python-gpu-cuda10.1': { TestPythonGPU(cuda_version: '10.1') },
|
||||||
|
'test-python-mgpu-cuda10.1': { TestPythonGPU(cuda_version: '10.1', multi_gpu: true) },
|
||||||
|
'test-cpp-gpu': { TestCppGPU(cuda_version: '10.1') },
|
||||||
|
'test-cpp-mgpu': { TestCppGPU(cuda_version: '10.1', multi_gpu: true) },
|
||||||
|
'test-jvm-jdk8': { CrossTestJVMwithJDK(jdk_version: '8', spark_version: '2.4.3') },
|
||||||
|
'test-jvm-jdk11': { CrossTestJVMwithJDK(jdk_version: '11') },
|
||||||
|
'test-jvm-jdk12': { CrossTestJVMwithJDK(jdk_version: '12') },
|
||||||
|
'test-r-3.4.4': { TestR(use_r35: false) },
|
||||||
|
'test-r-3.5.3': { TestR(use_r35: true) }
|
||||||
|
])
|
||||||
|
}
|
||||||
|
milestone ordinal: 4
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
// check out source code from git
|
||||||
* Build platform and test it via cmake.
|
def checkoutSrcs() {
|
||||||
*/
|
retry(5) {
|
||||||
def buildPlatformCmake(buildName, conf, nodeReq, dockerTarget) {
|
try {
|
||||||
def opts = utils.cmakeOptions(conf)
|
timeout(time: 2, unit: 'MINUTES') {
|
||||||
// Destination dir for artifacts
|
checkout scm
|
||||||
def distDir = "dist/${buildName}"
|
sh 'git submodule update --init'
|
||||||
def dockerArgs = ""
|
}
|
||||||
if (conf["withGpu"]) {
|
} catch (exc) {
|
||||||
dockerArgs = "--build-arg CUDA_VERSION=" + conf["cudaVersion"]
|
deleteDir()
|
||||||
}
|
error "Failed to fetch source codes"
|
||||||
def test_suite = conf["withGpu"] ? (conf["multiGpu"] ? "mgpu" : "gpu") : "cpu"
|
|
||||||
// Build node - this is returned result
|
|
||||||
retry(3) {
|
|
||||||
node(nodeReq) {
|
|
||||||
unstash name: 'srcs'
|
|
||||||
echo """
|
|
||||||
|===== XGBoost CMake build =====
|
|
||||||
| dockerTarget: ${dockerTarget}
|
|
||||||
| cmakeOpts : ${opts}
|
|
||||||
|=========================
|
|
||||||
""".stripMargin('|')
|
|
||||||
// Invoke command inside docker
|
|
||||||
sh """
|
|
||||||
${dockerRun} ${dockerTarget} ${dockerArgs} tests/ci_build/build_via_cmake.sh ${opts}
|
|
||||||
${dockerRun} ${dockerTarget} ${dockerArgs} tests/ci_build/test_${test_suite}.sh
|
|
||||||
"""
|
|
||||||
if (!conf["multiGpu"]) {
|
|
||||||
sh """
|
|
||||||
${dockerRun} ${dockerTarget} ${dockerArgs} bash -c "cd python-package; rm -f dist/*; python setup.py bdist_wheel --universal"
|
|
||||||
rm -rf "${distDir}"; mkdir -p "${distDir}/py"
|
|
||||||
cp xgboost "${distDir}"
|
|
||||||
cp -r python-package/dist "${distDir}/py"
|
|
||||||
# Test the wheel for compatibility on a barebones CPU container
|
|
||||||
${dockerRun} release ${dockerArgs} bash -c " \
|
|
||||||
pip install --user python-package/dist/xgboost-*-none-any.whl && \
|
|
||||||
python -m nose -v tests/python"
|
|
||||||
# Test the wheel for compatibility on CUDA 10.0 container
|
|
||||||
${dockerRun} gpu --build-arg CUDA_VERSION=10.0 bash -c " \
|
|
||||||
pip install --user python-package/dist/xgboost-*-none-any.whl && \
|
|
||||||
python -m nose -v --eval-attr='(not slow) and (not mgpu)' tests/python-gpu"
|
|
||||||
"""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def ClangTidy() {
|
||||||
|
node('linux && cpu') {
|
||||||
|
unstash name: 'srcs'
|
||||||
|
echo "Running clang-tidy job..."
|
||||||
|
def container_type = "clang_tidy"
|
||||||
|
def docker_binary = "docker"
|
||||||
|
def dockerArgs = "--build-arg CUDA_VERSION=9.2"
|
||||||
|
sh """
|
||||||
|
${dockerRun} ${container_type} ${docker_binary} ${dockerArgs} python3 tests/ci_build/tidy.py
|
||||||
|
"""
|
||||||
|
deleteDir()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def Lint() {
|
||||||
|
node('linux && cpu') {
|
||||||
|
unstash name: 'srcs'
|
||||||
|
echo "Running lint..."
|
||||||
|
def container_type = "cpu"
|
||||||
|
def docker_binary = "docker"
|
||||||
|
sh """
|
||||||
|
${dockerRun} ${container_type} ${docker_binary} make lint
|
||||||
|
"""
|
||||||
|
deleteDir()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def SphinxDoc() {
|
||||||
|
node('linux && cpu') {
|
||||||
|
unstash name: 'srcs'
|
||||||
|
echo "Running sphinx-doc..."
|
||||||
|
def container_type = "cpu"
|
||||||
|
def docker_binary = "docker"
|
||||||
|
def docker_extra_params = "CI_DOCKER_EXTRA_PARAMS_INIT='-e SPHINX_GIT_BRANCH=${BRANCH_NAME}'"
|
||||||
|
sh """#!/bin/bash
|
||||||
|
${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} make -C doc html
|
||||||
|
"""
|
||||||
|
deleteDir()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def Doxygen() {
|
||||||
|
node('linux && cpu') {
|
||||||
|
unstash name: 'srcs'
|
||||||
|
echo "Running doxygen..."
|
||||||
|
def container_type = "cpu"
|
||||||
|
def docker_binary = "docker"
|
||||||
|
sh """
|
||||||
|
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/doxygen.sh ${BRANCH_NAME}
|
||||||
|
"""
|
||||||
|
echo 'Uploading doc...'
|
||||||
|
s3Upload file: "build/${BRANCH_NAME}.tar.bz2", bucket: 'xgboost-docs', acl: 'PublicRead', path: "doxygen/${BRANCH_NAME}.tar.bz2"
|
||||||
|
deleteDir()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def BuildCPU() {
|
||||||
|
node('linux && cpu') {
|
||||||
|
unstash name: 'srcs'
|
||||||
|
echo "Build CPU"
|
||||||
|
def container_type = "cpu"
|
||||||
|
def docker_binary = "docker"
|
||||||
|
sh """
|
||||||
|
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_via_cmake.sh
|
||||||
|
${dockerRun} ${container_type} ${docker_binary} build/testxgboost
|
||||||
|
"""
|
||||||
|
// Sanitizer test
|
||||||
|
def docker_extra_params = "CI_DOCKER_EXTRA_PARAMS_INIT='-e ASAN_SYMBOLIZER_PATH=/usr/bin/llvm-symbolizer -e ASAN_OPTIONS=symbolize=1 -e UBSAN_OPTIONS=print_stacktrace=1:log_path=ubsan_error.log --cap-add SYS_PTRACE'"
|
||||||
|
def docker_args = "--build-arg CMAKE_VERSION=3.12"
|
||||||
|
sh """
|
||||||
|
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_via_cmake.sh -DUSE_SANITIZER=ON -DENABLED_SANITIZERS="address;leak;undefined" \
|
||||||
|
-DCMAKE_BUILD_TYPE=Debug -DSANITIZER_PATH=/usr/lib/x86_64-linux-gnu/
|
||||||
|
${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} build/testxgboost
|
||||||
|
"""
|
||||||
|
deleteDir()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def BuildCPUMock() {
|
||||||
|
node('linux && cpu') {
|
||||||
|
unstash name: 'srcs'
|
||||||
|
echo "Build CPU with rabit mock"
|
||||||
|
def container_type = "cpu"
|
||||||
|
def docker_binary = "docker"
|
||||||
|
sh """
|
||||||
|
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_mock_cmake.sh
|
||||||
|
"""
|
||||||
|
echo 'Stashing rabit C++ test executable (xgboost)...'
|
||||||
|
stash name: 'xgboost_rabit_tests', includes: 'xgboost'
|
||||||
|
deleteDir()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def BuildCUDA(args) {
|
||||||
|
node('linux && cpu') {
|
||||||
|
unstash name: 'srcs'
|
||||||
|
echo "Build with CUDA ${args.cuda_version}"
|
||||||
|
def container_type = "gpu_build"
|
||||||
|
def docker_binary = "docker"
|
||||||
|
def docker_args = "--build-arg CUDA_VERSION=${args.cuda_version}"
|
||||||
|
sh """
|
||||||
|
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_via_cmake.sh -DUSE_CUDA=ON -DUSE_NCCL=ON -DOPEN_MP:BOOL=ON
|
||||||
|
${dockerRun} ${container_type} ${docker_binary} ${docker_args} bash -c "cd python-package && rm -rf dist/* && python setup.py bdist_wheel --universal"
|
||||||
|
${dockerRun} ${container_type} ${docker_binary} ${docker_args} python3 tests/ci_build/rename_whl.py python-package/dist/*.whl ${commit_id} manylinux1_x86_64
|
||||||
|
"""
|
||||||
|
// Stash wheel for CUDA 9.0 target
|
||||||
|
if (args.cuda_version == '9.0') {
|
||||||
|
echo 'Stashing Python wheel...'
|
||||||
|
stash name: 'xgboost_whl_cuda9', includes: 'python-package/dist/*.whl'
|
||||||
|
path = ("${BRANCH_NAME}" == 'master') ? '' : "${BRANCH_NAME}/"
|
||||||
|
s3Upload bucket: 'xgboost-nightly-builds', path: path, acl: 'PublicRead', workingDir: 'python-package/dist', includePathPattern:'**/*.whl'
|
||||||
|
echo 'Stashing C++ test executable (testxgboost)...'
|
||||||
|
stash name: 'xgboost_cpp_tests', includes: 'build/testxgboost'
|
||||||
|
}
|
||||||
|
deleteDir()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def BuildJVMPackages(args) {
|
||||||
|
node('linux && cpu') {
|
||||||
|
unstash name: 'srcs'
|
||||||
|
echo "Build XGBoost4J-Spark with Spark ${args.spark_version}"
|
||||||
|
def container_type = "jvm"
|
||||||
|
def docker_binary = "docker"
|
||||||
|
// Use only 4 CPU cores
|
||||||
|
def docker_extra_params = "CI_DOCKER_EXTRA_PARAMS_INIT='--cpuset-cpus 0-3'"
|
||||||
|
sh """
|
||||||
|
${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_jvm_packages.sh ${args.spark_version}
|
||||||
|
"""
|
||||||
|
echo 'Stashing XGBoost4J JAR...'
|
||||||
|
stash name: 'xgboost4j_jar', includes: 'jvm-packages/xgboost4j/target/*.jar,jvm-packages/xgboost4j-spark/target/*.jar,jvm-packages/xgboost4j-example/target/*.jar'
|
||||||
|
deleteDir()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def BuildJVMDoc() {
|
||||||
|
node('linux && cpu') {
|
||||||
|
unstash name: 'srcs'
|
||||||
|
echo "Building JVM doc..."
|
||||||
|
def container_type = "jvm"
|
||||||
|
def docker_binary = "docker"
|
||||||
|
sh """
|
||||||
|
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_jvm_doc.sh ${BRANCH_NAME}
|
||||||
|
"""
|
||||||
|
echo 'Uploading doc...'
|
||||||
|
s3Upload file: "jvm-packages/${BRANCH_NAME}.tar.bz2", bucket: 'xgboost-docs', acl: 'PublicRead', path: "${BRANCH_NAME}.tar.bz2"
|
||||||
|
deleteDir()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def TestPythonCPU() {
|
||||||
|
node('linux && cpu') {
|
||||||
|
unstash name: 'xgboost_whl_cuda9'
|
||||||
|
unstash name: 'srcs'
|
||||||
|
echo "Test Python CPU"
|
||||||
|
def container_type = "cpu"
|
||||||
|
def docker_binary = "docker"
|
||||||
|
sh """
|
||||||
|
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/test_python.sh cpu
|
||||||
|
"""
|
||||||
|
deleteDir()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def TestPythonGPU(args) {
|
||||||
|
nodeReq = (args.multi_gpu) ? 'linux && mgpu' : 'linux && gpu'
|
||||||
|
node(nodeReq) {
|
||||||
|
unstash name: 'xgboost_whl_cuda9'
|
||||||
|
unstash name: 'srcs'
|
||||||
|
echo "Test Python GPU: CUDA ${args.cuda_version}"
|
||||||
|
def container_type = "gpu"
|
||||||
|
def docker_binary = "nvidia-docker"
|
||||||
|
def docker_args = "--build-arg CUDA_VERSION=${args.cuda_version}"
|
||||||
|
if (args.multi_gpu) {
|
||||||
|
echo "Using multiple GPUs"
|
||||||
|
sh """
|
||||||
|
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_python.sh mgpu
|
||||||
|
"""
|
||||||
|
} else {
|
||||||
|
echo "Using a single GPU"
|
||||||
|
sh """
|
||||||
|
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_python.sh gpu
|
||||||
|
"""
|
||||||
|
}
|
||||||
|
// For CUDA 10.0 target, run cuDF tests too
|
||||||
|
if (args.cuda_version == '10.0') {
|
||||||
|
echo "Running tests with cuDF..."
|
||||||
|
sh """
|
||||||
|
${dockerRun} cudf ${docker_binary} ${docker_args} tests/ci_build/test_python.sh cudf
|
||||||
|
"""
|
||||||
|
}
|
||||||
|
deleteDir()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def TestCppRabit() {
|
||||||
|
node(nodeReq) {
|
||||||
|
unstash name: 'xgboost_rabit_tests'
|
||||||
|
unstash name: 'srcs'
|
||||||
|
echo "Test C++, rabit mock on"
|
||||||
|
def container_type = "cpu"
|
||||||
|
def docker_binary = "docker"
|
||||||
|
sh """
|
||||||
|
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/runxgb.sh xgboost tests/ci_build/approx.conf.in
|
||||||
|
"""
|
||||||
|
deleteDir()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def TestCppGPU(args) {
|
||||||
|
nodeReq = (args.multi_gpu) ? 'linux && mgpu' : 'linux && gpu'
|
||||||
|
node(nodeReq) {
|
||||||
|
unstash name: 'xgboost_cpp_tests'
|
||||||
|
unstash name: 'srcs'
|
||||||
|
echo "Test C++, CUDA ${args.cuda_version}"
|
||||||
|
def container_type = "gpu"
|
||||||
|
def docker_binary = "nvidia-docker"
|
||||||
|
def docker_args = "--build-arg CUDA_VERSION=${args.cuda_version}"
|
||||||
|
if (args.multi_gpu) {
|
||||||
|
echo "Using multiple GPUs"
|
||||||
|
sh "${dockerRun} ${container_type} ${docker_binary} ${docker_args} build/testxgboost --gtest_filter=*.MGPU_*"
|
||||||
|
} else {
|
||||||
|
echo "Using a single GPU"
|
||||||
|
sh "${dockerRun} ${container_type} ${docker_binary} ${docker_args} build/testxgboost --gtest_filter=-*.MGPU_*"
|
||||||
|
}
|
||||||
|
deleteDir()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def CrossTestJVMwithJDK(args) {
|
||||||
|
node('linux && cpu') {
|
||||||
|
unstash name: 'xgboost4j_jar'
|
||||||
|
unstash name: 'srcs'
|
||||||
|
if (args.spark_version != null) {
|
||||||
|
echo "Test XGBoost4J on a machine with JDK ${args.jdk_version}, Spark ${args.spark_version}"
|
||||||
|
} else {
|
||||||
|
echo "Test XGBoost4J on a machine with JDK ${args.jdk_version}"
|
||||||
|
}
|
||||||
|
def container_type = "jvm_cross"
|
||||||
|
def docker_binary = "docker"
|
||||||
|
def spark_arg = (args.spark_version != null) ? "--build-arg SPARK_VERSION=${args.spark_version}" : ""
|
||||||
|
def docker_args = "--build-arg JDK_VERSION=${args.jdk_version} ${spark_arg}"
|
||||||
|
// Run integration tests only when spark_version is given
|
||||||
|
def docker_extra_params = (args.spark_version != null) ? "CI_DOCKER_EXTRA_PARAMS_INIT='-e RUN_INTEGRATION_TEST=1'" : ""
|
||||||
|
sh """
|
||||||
|
${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_jvm_cross.sh
|
||||||
|
"""
|
||||||
|
deleteDir()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def TestR(args) {
|
||||||
|
node('linux && cpu') {
|
||||||
|
unstash name: 'srcs'
|
||||||
|
echo "Test R package"
|
||||||
|
def container_type = "rproject"
|
||||||
|
def docker_binary = "docker"
|
||||||
|
def use_r35_flag = (args.use_r35) ? "1" : "0"
|
||||||
|
def docker_args = "--build-arg USE_R35=${use_r35_flag}"
|
||||||
|
sh """
|
||||||
|
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_test_rpkg.sh || tests/ci_build/print_r_stacktrace.sh
|
||||||
|
"""
|
||||||
|
deleteDir()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,123 +0,0 @@
|
|||||||
#!/usr/bin/groovy
|
|
||||||
// -*- mode: groovy -*-
|
|
||||||
// Jenkins pipeline
|
|
||||||
// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
|
|
||||||
|
|
||||||
import groovy.transform.Field
|
|
||||||
|
|
||||||
/* Restricted tasks: tasks generating artifacts, such as binary wheels and
|
|
||||||
documentation */
|
|
||||||
|
|
||||||
// Command to run command inside a docker container
|
|
||||||
def dockerRun = 'tests/ci_build/ci_build.sh'
|
|
||||||
// Utility functions
|
|
||||||
@Field
|
|
||||||
def utils
|
|
||||||
@Field
|
|
||||||
def commit_id
|
|
||||||
@Field
|
|
||||||
def branch_name
|
|
||||||
|
|
||||||
def buildMatrix = [
|
|
||||||
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": true, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "9.2" ],
|
|
||||||
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": true, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "8.0" ],
|
|
||||||
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": false, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "8.0" ],
|
|
||||||
]
|
|
||||||
|
|
||||||
pipeline {
|
|
||||||
// Each stage specify its own agent
|
|
||||||
agent none
|
|
||||||
|
|
||||||
// Setup common job properties
|
|
||||||
options {
|
|
||||||
ansiColor('xterm')
|
|
||||||
timestamps()
|
|
||||||
timeout(time: 120, unit: 'MINUTES')
|
|
||||||
buildDiscarder(logRotator(numToKeepStr: '10'))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build stages
|
|
||||||
stages {
|
|
||||||
stage('Jenkins: Get sources') {
|
|
||||||
agent {
|
|
||||||
label 'restricted'
|
|
||||||
}
|
|
||||||
steps {
|
|
||||||
script {
|
|
||||||
utils = load('tests/ci_build/jenkins_tools.Groovy')
|
|
||||||
utils.checkoutSrcs()
|
|
||||||
commit_id = "${GIT_COMMIT}"
|
|
||||||
branch_name = "${GIT_LOCAL_BRANCH}"
|
|
||||||
}
|
|
||||||
stash name: 'srcs', excludes: '.git/'
|
|
||||||
milestone label: 'Sources ready', ordinal: 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage('Jenkins: Build doc') {
|
|
||||||
steps {
|
|
||||||
script {
|
|
||||||
retry(3) {
|
|
||||||
node('linux && cpu && restricted') {
|
|
||||||
unstash name: 'srcs'
|
|
||||||
echo 'Building doc...'
|
|
||||||
dir ('jvm-packages') {
|
|
||||||
sh "bash ./build_doc.sh ${commit_id}"
|
|
||||||
archiveArtifacts artifacts: "${commit_id}.tar.bz2", allowEmptyArchive: true
|
|
||||||
echo 'Deploying doc...'
|
|
||||||
withAWS(credentials:'xgboost-doc-bucket') {
|
|
||||||
s3Upload file: "${commit_id}.tar.bz2", bucket: 'xgboost-docs', acl: 'PublicRead', path: "${branch_name}.tar.bz2"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
stage('Jenkins: Build artifacts') {
|
|
||||||
steps {
|
|
||||||
script {
|
|
||||||
parallel (buildMatrix.findAll{it['enabled']}.collectEntries{ c ->
|
|
||||||
def buildName = utils.getBuildName(c)
|
|
||||||
utils.buildFactory(buildName, c, true, this.&buildPlatformCmake)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Build platform and test it via cmake.
|
|
||||||
*/
|
|
||||||
def buildPlatformCmake(buildName, conf, nodeReq, dockerTarget) {
|
|
||||||
def opts = utils.cmakeOptions(conf)
|
|
||||||
// Destination dir for artifacts
|
|
||||||
def distDir = "dist/${buildName}"
|
|
||||||
def dockerArgs = ""
|
|
||||||
if(conf["withGpu"]){
|
|
||||||
dockerArgs = "--build-arg CUDA_VERSION=" + conf["cudaVersion"]
|
|
||||||
}
|
|
||||||
// Build node - this is returned result
|
|
||||||
retry(3) {
|
|
||||||
node(nodeReq) {
|
|
||||||
unstash name: 'srcs'
|
|
||||||
echo """
|
|
||||||
|===== XGBoost CMake build =====
|
|
||||||
| dockerTarget: ${dockerTarget}
|
|
||||||
| cmakeOpts : ${opts}
|
|
||||||
|=========================
|
|
||||||
""".stripMargin('|')
|
|
||||||
// Invoke command inside docker
|
|
||||||
sh """
|
|
||||||
${dockerRun} ${dockerTarget} ${dockerArgs} tests/ci_build/build_via_cmake.sh ${opts}
|
|
||||||
${dockerRun} ${dockerTarget} ${dockerArgs} bash -c "cd python-package; rm -f dist/*; python setup.py bdist_wheel --universal"
|
|
||||||
rm -rf "${distDir}"; mkdir -p "${distDir}/py"
|
|
||||||
cp xgboost "${distDir}"
|
|
||||||
cp -r lib "${distDir}"
|
|
||||||
cp -r python-package/dist "${distDir}/py"
|
|
||||||
"""
|
|
||||||
archiveArtifacts artifacts: "${distDir}/**/*.*", allowEmptyArchive: true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
141
Jenkinsfile-win64
Normal file
141
Jenkinsfile-win64
Normal file
@@ -0,0 +1,141 @@
|
|||||||
|
#!/usr/bin/groovy
|
||||||
|
// -*- mode: groovy -*-
|
||||||
|
|
||||||
|
/* Jenkins pipeline for Windows AMD64 target */
|
||||||
|
|
||||||
|
import groovy.transform.Field
|
||||||
|
|
||||||
|
@Field
|
||||||
|
def commit_id // necessary to pass a variable from one stage to another
|
||||||
|
|
||||||
|
pipeline {
|
||||||
|
agent none
|
||||||
|
// Build stages
|
||||||
|
stages {
|
||||||
|
stage('Jenkins Win64: Get sources') {
|
||||||
|
agent { label 'win64 && build' }
|
||||||
|
steps {
|
||||||
|
script {
|
||||||
|
checkoutSrcs()
|
||||||
|
commit_id = "${GIT_COMMIT}"
|
||||||
|
}
|
||||||
|
stash name: 'srcs'
|
||||||
|
milestone ordinal: 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage('Jenkins Win64: Build') {
|
||||||
|
agent none
|
||||||
|
steps {
|
||||||
|
script {
|
||||||
|
parallel ([
|
||||||
|
'build-win64-cuda9.0': { BuildWin64() }
|
||||||
|
])
|
||||||
|
}
|
||||||
|
milestone ordinal: 2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage('Jenkins Win64: Test') {
|
||||||
|
agent none
|
||||||
|
steps {
|
||||||
|
script {
|
||||||
|
parallel ([
|
||||||
|
'test-win64-cpu': { TestWin64CPU() },
|
||||||
|
'test-win64-gpu-cuda9.0': { TestWin64GPU(cuda_target: 'cuda9') },
|
||||||
|
'test-win64-gpu-cuda10.0': { TestWin64GPU(cuda_target: 'cuda10_0') },
|
||||||
|
'test-win64-gpu-cuda10.1': { TestWin64GPU(cuda_target: 'cuda10_1') }
|
||||||
|
])
|
||||||
|
}
|
||||||
|
milestone ordinal: 3
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check out source code from git
|
||||||
|
def checkoutSrcs() {
|
||||||
|
retry(5) {
|
||||||
|
try {
|
||||||
|
timeout(time: 2, unit: 'MINUTES') {
|
||||||
|
checkout scm
|
||||||
|
sh 'git submodule update --init'
|
||||||
|
}
|
||||||
|
} catch (exc) {
|
||||||
|
deleteDir()
|
||||||
|
error "Failed to fetch source codes"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def BuildWin64() {
|
||||||
|
node('win64 && build') {
|
||||||
|
unstash name: 'srcs'
|
||||||
|
echo "Building XGBoost for Windows AMD64 target..."
|
||||||
|
bat "nvcc --version"
|
||||||
|
bat """
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
cmake .. -G"Visual Studio 15 2017 Win64" -DUSE_CUDA=ON -DCMAKE_VERBOSE_MAKEFILE=ON -DGOOGLE_TEST=ON -DUSE_DMLC_GTEST=ON
|
||||||
|
"""
|
||||||
|
bat """
|
||||||
|
cd build
|
||||||
|
"C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\MSBuild\\15.0\\Bin\\MSBuild.exe" xgboost.sln /m /p:Configuration=Release /nodeReuse:false
|
||||||
|
"""
|
||||||
|
bat """
|
||||||
|
cd python-package
|
||||||
|
conda activate && python setup.py bdist_wheel --universal && for /R %%i in (dist\\*.whl) DO python ../tests/ci_build/rename_whl.py "%%i" ${commit_id} win_amd64
|
||||||
|
"""
|
||||||
|
echo "Insert vcomp140.dll (OpenMP runtime) into the wheel..."
|
||||||
|
bat """
|
||||||
|
cd python-package\\dist
|
||||||
|
COPY /B ..\\..\\tests\\ci_build\\insert_vcomp140.py
|
||||||
|
conda activate && python insert_vcomp140.py *.whl
|
||||||
|
"""
|
||||||
|
echo 'Stashing Python wheel...'
|
||||||
|
stash name: 'xgboost_whl', includes: 'python-package/dist/*.whl'
|
||||||
|
path = ("${BRANCH_NAME}" == 'master') ? '' : "${BRANCH_NAME}/"
|
||||||
|
s3Upload bucket: 'xgboost-nightly-builds', path: path, acl: 'PublicRead', workingDir: 'python-package/dist', includePathPattern:'**/*.whl'
|
||||||
|
echo 'Stashing C++ test executable (testxgboost)...'
|
||||||
|
stash name: 'xgboost_cpp_tests', includes: 'build/testxgboost.exe'
|
||||||
|
deleteDir()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def TestWin64CPU() {
|
||||||
|
node('win64 && cpu') {
|
||||||
|
unstash name: 'srcs'
|
||||||
|
unstash name: 'xgboost_whl'
|
||||||
|
echo "Test Win64 CPU"
|
||||||
|
echo "Installing Python wheel..."
|
||||||
|
bat "conda activate && (python -m pip uninstall -y xgboost || cd .)"
|
||||||
|
bat """
|
||||||
|
conda activate && for /R %%i in (python-package\\dist\\*.whl) DO python -m pip install "%%i"
|
||||||
|
"""
|
||||||
|
echo "Running Python tests..."
|
||||||
|
bat "conda activate && python -m pytest -v -s --fulltrace tests\\python"
|
||||||
|
bat "conda activate && python -m pip uninstall -y xgboost"
|
||||||
|
deleteDir()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def TestWin64GPU(args) {
|
||||||
|
node("win64 && gpu && ${args.cuda_target}") {
|
||||||
|
unstash name: 'srcs'
|
||||||
|
unstash name: 'xgboost_whl'
|
||||||
|
unstash name: 'xgboost_cpp_tests'
|
||||||
|
echo "Test Win64 GPU (${args.cuda_target})"
|
||||||
|
bat "nvcc --version"
|
||||||
|
echo "Running C++ tests..."
|
||||||
|
bat "build\\testxgboost.exe"
|
||||||
|
echo "Installing Python wheel..."
|
||||||
|
bat "conda activate && (python -m pip uninstall -y xgboost || cd .)"
|
||||||
|
bat """
|
||||||
|
conda activate && for /R %%i in (python-package\\dist\\*.whl) DO python -m pip install "%%i"
|
||||||
|
"""
|
||||||
|
echo "Running Python tests..."
|
||||||
|
bat """
|
||||||
|
conda activate && python -m pytest -v -s --fulltrace -m "(not slow) and (not mgpu)" tests\\python-gpu
|
||||||
|
"""
|
||||||
|
bat "conda activate && python -m pip uninstall -y xgboost"
|
||||||
|
deleteDir()
|
||||||
|
}
|
||||||
|
}
|
||||||
2
LICENSE
2
LICENSE
@@ -186,7 +186,7 @@
|
|||||||
same "printed page" as the copyright notice for easier
|
same "printed page" as the copyright notice for easier
|
||||||
identification within third-party archives.
|
identification within third-party archives.
|
||||||
|
|
||||||
Copyright (c) 2018 by Contributors
|
Copyright (c) 2019 by Contributors
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
|
|||||||
43
Makefile
43
Makefile
@@ -42,11 +42,6 @@ ifeq ($(USE_OPENMP), 0)
|
|||||||
endif
|
endif
|
||||||
include $(DMLC_CORE)/make/dmlc.mk
|
include $(DMLC_CORE)/make/dmlc.mk
|
||||||
|
|
||||||
# include the plugins
|
|
||||||
ifdef XGB_PLUGINS
|
|
||||||
include $(XGB_PLUGINS)
|
|
||||||
endif
|
|
||||||
|
|
||||||
# set compiler defaults for OSX versus *nix
|
# set compiler defaults for OSX versus *nix
|
||||||
# let people override either
|
# let people override either
|
||||||
OS := $(shell uname)
|
OS := $(shell uname)
|
||||||
@@ -67,8 +62,8 @@ export CXX = g++
|
|||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
export LDFLAGS= -pthread -lm $(ADD_LDFLAGS) $(DMLC_LDFLAGS) $(PLUGIN_LDFLAGS)
|
export LDFLAGS= -pthread -lm $(ADD_LDFLAGS) $(DMLC_LDFLAGS)
|
||||||
export CFLAGS= -DDMLC_LOG_CUSTOMIZE=1 -std=c++11 -Wall -Wno-unknown-pragmas -Iinclude $(ADD_CFLAGS) $(PLUGIN_CFLAGS)
|
export CFLAGS= -DDMLC_LOG_CUSTOMIZE=1 -std=c++11 -Wall -Wno-unknown-pragmas -Iinclude $(ADD_CFLAGS)
|
||||||
CFLAGS += -I$(DMLC_CORE)/include -I$(RABIT)/include -I$(GTEST_PATH)/include
|
CFLAGS += -I$(DMLC_CORE)/include -I$(RABIT)/include -I$(GTEST_PATH)/include
|
||||||
#java include path
|
#java include path
|
||||||
export JAVAINCFLAGS = -I${JAVA_HOME}/include -I./java
|
export JAVAINCFLAGS = -I${JAVA_HOME}/include -I./java
|
||||||
@@ -130,7 +125,7 @@ $(RABIT)/lib/$(LIB_RABIT): $(wildcard $(RABIT)/src/*.cc)
|
|||||||
jvm: jvm-packages/lib/libxgboost4j.so
|
jvm: jvm-packages/lib/libxgboost4j.so
|
||||||
|
|
||||||
SRC = $(wildcard src/*.cc src/*/*.cc)
|
SRC = $(wildcard src/*.cc src/*/*.cc)
|
||||||
ALL_OBJ = $(patsubst src/%.cc, build/%.o, $(SRC)) $(PLUGIN_OBJS)
|
ALL_OBJ = $(patsubst src/%.cc, build/%.o, $(SRC))
|
||||||
AMALGA_OBJ = amalgamation/xgboost-all0.o
|
AMALGA_OBJ = amalgamation/xgboost-all0.o
|
||||||
LIB_DEP = $(DMLC_CORE)/libdmlc.a $(RABIT)/lib/$(LIB_RABIT)
|
LIB_DEP = $(DMLC_CORE)/libdmlc.a $(RABIT)/lib/$(LIB_RABIT)
|
||||||
ALL_DEP = $(filter-out build/cli_main.o, $(ALL_OBJ)) $(LIB_DEP)
|
ALL_DEP = $(filter-out build/cli_main.o, $(ALL_OBJ)) $(LIB_DEP)
|
||||||
@@ -142,11 +137,6 @@ build/%.o: src/%.cc
|
|||||||
$(CXX) $(CFLAGS) -MM -MT build/$*.o $< >build/$*.d
|
$(CXX) $(CFLAGS) -MM -MT build/$*.o $< >build/$*.d
|
||||||
$(CXX) -c $(CFLAGS) $< -o $@
|
$(CXX) -c $(CFLAGS) $< -o $@
|
||||||
|
|
||||||
build_plugin/%.o: plugin/%.cc
|
|
||||||
@mkdir -p $(@D)
|
|
||||||
$(CXX) $(CFLAGS) -MM -MT build_plugin/$*.o $< >build_plugin/$*.d
|
|
||||||
$(CXX) -c $(CFLAGS) $< -o $@
|
|
||||||
|
|
||||||
# The should be equivalent to $(ALL_OBJ) except for build/cli_main.o
|
# The should be equivalent to $(ALL_OBJ) except for build/cli_main.o
|
||||||
amalgamation/xgboost-all0.o: amalgamation/xgboost-all0.cc
|
amalgamation/xgboost-all0.o: amalgamation/xgboost-all0.cc
|
||||||
$(CXX) -c $(CFLAGS) $< -o $@
|
$(CXX) -c $(CFLAGS) $< -o $@
|
||||||
@@ -173,10 +163,14 @@ xgboost: $(CLI_OBJ) $(ALL_DEP)
|
|||||||
$(CXX) $(CFLAGS) -o $@ $(filter %.o %.a, $^) $(LDFLAGS)
|
$(CXX) $(CFLAGS) -o $@ $(filter %.o %.a, $^) $(LDFLAGS)
|
||||||
|
|
||||||
rcpplint:
|
rcpplint:
|
||||||
python2 dmlc-core/scripts/lint.py xgboost ${LINT_LANG} R-package/src
|
python3 dmlc-core/scripts/lint.py xgboost ${LINT_LANG} R-package/src
|
||||||
|
|
||||||
lint: rcpplint
|
lint: rcpplint
|
||||||
python2 dmlc-core/scripts/lint.py xgboost ${LINT_LANG} include src plugin python-package
|
python3 dmlc-core/scripts/lint.py --exclude_path python-package/xgboost/dmlc-core \
|
||||||
|
python-package/xgboost/include python-package/xgboost/lib \
|
||||||
|
python-package/xgboost/make python-package/xgboost/rabit \
|
||||||
|
python-package/xgboost/src --pylint-rc ${PWD}/python-package/.pylintrc xgboost \
|
||||||
|
${LINT_LANG} include src python-package
|
||||||
|
|
||||||
pylint:
|
pylint:
|
||||||
flake8 --ignore E501 python-package
|
flake8 --ignore E501 python-package
|
||||||
@@ -196,7 +190,7 @@ cover: check
|
|||||||
endif
|
endif
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
$(RM) -rf build build_plugin lib bin *~ */*~ */*/*~ */*/*/*~ */*.o */*/*.o */*/*/*.o #xgboost
|
$(RM) -rf build lib bin *~ */*~ */*/*~ */*/*/*~ */*.o */*/*.o */*/*/*.o #xgboost
|
||||||
$(RM) -rf build_tests *.gcov tests/cpp/xgboost_test
|
$(RM) -rf build_tests *.gcov tests/cpp/xgboost_test
|
||||||
if [ -d "R-package/src" ]; then \
|
if [ -d "R-package/src" ]; then \
|
||||||
cd R-package/src; \
|
cd R-package/src; \
|
||||||
@@ -227,7 +221,9 @@ pippack: clean_all
|
|||||||
rm -rf python-package/xgboost/rabit
|
rm -rf python-package/xgboost/rabit
|
||||||
rm -rf python-package/xgboost/src
|
rm -rf python-package/xgboost/src
|
||||||
cp -r python-package xgboost-python
|
cp -r python-package xgboost-python
|
||||||
cp -r Makefile xgboost-python/xgboost/
|
cp -r CMakeLists.txt xgboost-python/xgboost/
|
||||||
|
cp -r cmake xgboost-python/xgboost/
|
||||||
|
cp -r plugin xgboost-python/xgboost/
|
||||||
cp -r make xgboost-python/xgboost/
|
cp -r make xgboost-python/xgboost/
|
||||||
cp -r src xgboost-python/xgboost/
|
cp -r src xgboost-python/xgboost/
|
||||||
cp -r tests xgboost-python/xgboost/
|
cp -r tests xgboost-python/xgboost/
|
||||||
@@ -258,9 +254,17 @@ Rpack: clean_all
|
|||||||
cp -r dmlc-core/include xgboost/src/dmlc-core/include
|
cp -r dmlc-core/include xgboost/src/dmlc-core/include
|
||||||
cp -r dmlc-core/src xgboost/src/dmlc-core/src
|
cp -r dmlc-core/src xgboost/src/dmlc-core/src
|
||||||
cp ./LICENSE xgboost
|
cp ./LICENSE xgboost
|
||||||
cat R-package/src/Makevars.in|sed '2s/.*/PKGROOT=./' | sed '3s/.*/ENABLE_STD_THREAD=0/' > xgboost/src/Makevars.in
|
# Modify PKGROOT in Makevars.in
|
||||||
|
cat R-package/src/Makevars.in|sed '2s/.*/PKGROOT=./' > xgboost/src/Makevars.in
|
||||||
|
# Configure Makevars.win (Windows-specific Makevars, likely using MinGW)
|
||||||
cp xgboost/src/Makevars.in xgboost/src/Makevars.win
|
cp xgboost/src/Makevars.in xgboost/src/Makevars.win
|
||||||
sed -i -e 's/@OPENMP_CXXFLAGS@/$$\(SHLIB_OPENMP_CFLAGS\)/g' xgboost/src/Makevars.win
|
cat xgboost/src/Makevars.in| sed '3s/.*/ENABLE_STD_THREAD=0/' > xgboost/src/Makevars.win
|
||||||
|
sed -i -e 's/@OPENMP_CXXFLAGS@/$$\(SHLIB_OPENMP_CXXFLAGS\)/g' xgboost/src/Makevars.win
|
||||||
|
sed -i -e 's/-pthread/$$\(SHLIB_PTHREAD_FLAGS\)/g' xgboost/src/Makevars.win
|
||||||
|
sed -i -e 's/@ENDIAN_FLAG@/-DDMLC_CMAKE_LITTLE_ENDIAN=1/g' xgboost/src/Makevars.win
|
||||||
|
sed -i -e 's/@BACKTRACE_LIB@//g' xgboost/src/Makevars.win
|
||||||
|
sed -i -e 's/@OPENMP_LIB@//g' xgboost/src/Makevars.win
|
||||||
|
rm -f xgboost/src/Makevars.win-e # OSX sed create this extra file; remove it
|
||||||
bash R-package/remove_warning_suppression_pragma.sh
|
bash R-package/remove_warning_suppression_pragma.sh
|
||||||
rm xgboost/remove_warning_suppression_pragma.sh
|
rm xgboost/remove_warning_suppression_pragma.sh
|
||||||
|
|
||||||
@@ -273,4 +277,3 @@ Rcheck: Rbuild
|
|||||||
|
|
||||||
-include build/*.d
|
-include build/*.d
|
||||||
-include build/*/*.d
|
-include build/*/*.d
|
||||||
-include build_plugin/*/*.d
|
|
||||||
|
|||||||
308
NEWS.md
308
NEWS.md
@@ -3,6 +3,301 @@ XGBoost Change Log
|
|||||||
|
|
||||||
This file records the changes in xgboost library in reverse chronological order.
|
This file records the changes in xgboost library in reverse chronological order.
|
||||||
|
|
||||||
|
## v0.90 (2019.05.18)
|
||||||
|
|
||||||
|
### XGBoost Python package drops Python 2.x (#4379, #4381)
|
||||||
|
Python 2.x is reaching its end-of-life at the end of this year. [Many scientific Python packages are now moving to drop Python 2.x](https://python3statement.org/).
|
||||||
|
|
||||||
|
### XGBoost4J-Spark now requires Spark 2.4.x (#4377)
|
||||||
|
* Spark 2.3 is reaching its end-of-life soon. See discussion at #4389.
|
||||||
|
* **Consistent handling of missing values** (#4309, #4349, #4411): Many users had reported issue with inconsistent predictions between XGBoost4J-Spark and the Python XGBoost package. The issue was caused by Spark mis-handling non-zero missing values (NaN, -1, 999 etc). We now alert the user whenever Spark doesn't handle missing values correctly (#4309, #4349). See [the tutorial for dealing with missing values in XGBoost4J-Spark](https://xgboost.readthedocs.io/en/release_0.90/jvm/xgboost4j_spark_tutorial.html#dealing-with-missing-values). This fix also depends on the availability of Spark 2.4.x.
|
||||||
|
|
||||||
|
### Roadmap: better performance scaling for multi-core CPUs (#4310)
|
||||||
|
* Poor performance scaling of the `hist` algorithm for multi-core CPUs has been under investigation (#3810). #4310 optimizes quantile sketches and other pre-processing tasks. Special thanks to @SmirnovEgorRu.
|
||||||
|
|
||||||
|
### Roadmap: Harden distributed training (#4250)
|
||||||
|
* Make distributed training in XGBoost more robust by hardening [Rabit](https://github.com/dmlc/rabit), which implements [the AllReduce primitive](https://en.wikipedia.org/wiki/Reduce_%28parallel_pattern%29). In particular, improve test coverage on mechanisms for fault tolerance and recovery. Special thanks to @chenqin.
|
||||||
|
|
||||||
|
### New feature: Multi-class metric functions for GPUs (#4368)
|
||||||
|
* Metrics for multi-class classification have been ported to GPU: `merror`, `mlogloss`. Special thanks to @trivialfis.
|
||||||
|
* With supported metrics, XGBoost will select the correct devices based on your system and `n_gpus` parameter.
|
||||||
|
|
||||||
|
### New feature: Scikit-learn-like random forest API (#4148, #4255, #4258)
|
||||||
|
* XGBoost Python package now offers `XGBRFClassifier` and `XGBRFRegressor` API to train random forests. See [the tutorial](https://xgboost.readthedocs.io/en/release_0.90/tutorials/rf.html). Special thanks to @canonizer
|
||||||
|
|
||||||
|
### New feature: use external memory in GPU predictor (#4284, #4396, #4438, #4457)
|
||||||
|
* It is now possible to make predictions on GPU when the input is read from external memory. This is useful when you want to make predictions with big dataset that does not fit into the GPU memory. Special thanks to @rongou, @canonizer, @sriramch.
|
||||||
|
|
||||||
|
```python
|
||||||
|
dtest = xgboost.DMatrix('test_data.libsvm#dtest.cache')
|
||||||
|
bst.set_param('predictor', 'gpu_predictor')
|
||||||
|
bst.predict(dtest)
|
||||||
|
```
|
||||||
|
|
||||||
|
* Coming soon: GPU training (`gpu_hist`) with external memory
|
||||||
|
|
||||||
|
### New feature: XGBoost can now handle comments in LIBSVM files (#4430)
|
||||||
|
* Special thanks to @trivialfis and @hcho3
|
||||||
|
|
||||||
|
### New feature: Embed XGBoost in your C/C++ applications using CMake (#4323, #4333, #4453)
|
||||||
|
* It is now easier than ever to embed XGBoost in your C/C++ applications. In your CMakeLists.txt, add `xgboost::xgboost` as a linked library:
|
||||||
|
|
||||||
|
```cmake
|
||||||
|
find_package(xgboost REQUIRED)
|
||||||
|
add_executable(api-demo c-api-demo.c)
|
||||||
|
target_link_libraries(api-demo xgboost::xgboost)
|
||||||
|
```
|
||||||
|
|
||||||
|
[XGBoost C API documentation is available.](https://xgboost.readthedocs.io/en/release_0.90/dev) Special thanks to @trivialfis
|
||||||
|
|
||||||
|
### Performance improvements
|
||||||
|
* Use feature interaction constraints to narrow split search space (#4341, #4428)
|
||||||
|
* Additional optimizations for `gpu_hist` (#4248, #4283)
|
||||||
|
* Reduce OpenMP thread launches in `gpu_hist` (#4343)
|
||||||
|
* Additional optimizations for multi-node multi-GPU random forests. (#4238)
|
||||||
|
* Allocate unique prediction buffer for each input matrix, to avoid re-sizing GPU array (#4275)
|
||||||
|
* Remove various synchronisations from CUDA API calls (#4205)
|
||||||
|
* XGBoost4J-Spark
|
||||||
|
- Allow the user to control whether to cache partitioned training data, to potentially reduce execution time (#4268)
|
||||||
|
|
||||||
|
### Bug-fixes
|
||||||
|
* Fix node reuse in `hist` (#4404)
|
||||||
|
* Fix GPU histogram allocation (#4347)
|
||||||
|
* Fix matrix attributes not sliced (#4311)
|
||||||
|
* Revise AUC and AUCPR metrics now work with weighted ranking task (#4216, #4436)
|
||||||
|
* Fix timer invocation for InitDataOnce() in `gpu_hist` (#4206)
|
||||||
|
* Fix R-devel errors (#4251)
|
||||||
|
* Make gradient update in GPU linear updater thread-safe (#4259)
|
||||||
|
* Prevent out-of-range access in column matrix (#4231)
|
||||||
|
* Don't store DMatrix handle in Python object until it's initialized, to improve exception safety (#4317)
|
||||||
|
* XGBoost4J-Spark
|
||||||
|
- Fix non-deterministic order within a zipped partition on prediction (#4388)
|
||||||
|
- Remove race condition on tracker shutdown (#4224)
|
||||||
|
- Allow set the parameter `maxLeaves`. (#4226)
|
||||||
|
- Allow partial evaluation of dataframe before prediction (#4407)
|
||||||
|
- Automatically set `maximize_evaluation_metrics` if not explicitly given (#4446)
|
||||||
|
|
||||||
|
### API changes
|
||||||
|
* Deprecate `reg:linear` in favor of `reg:squarederror`. (#4267, #4427)
|
||||||
|
* Add attribute getter and setter to the Booster object in XGBoost4J (#4336)
|
||||||
|
|
||||||
|
### Maintenance: Refactor C++ code for legibility and maintainability
|
||||||
|
* Fix clang-tidy warnings. (#4149)
|
||||||
|
* Remove deprecated C APIs. (#4266)
|
||||||
|
* Use Monitor class to time functions in `hist`. (#4273)
|
||||||
|
* Retire DVec class in favour of c++20 style span for device memory. (#4293)
|
||||||
|
* Improve HostDeviceVector exception safety (#4301)
|
||||||
|
|
||||||
|
### Maintenance: testing, continuous integration, build system
|
||||||
|
* **Major refactor of CMakeLists.txt** (#4323, #4333, #4453): adopt modern CMake and export XGBoost as a target
|
||||||
|
* **Major improvement in Jenkins CI pipeline** (#4234)
|
||||||
|
- Migrate all Linux tests to Jenkins (#4401)
|
||||||
|
- Builds and tests are now de-coupled, to test an artifact against multiple versions of CUDA, JDK, and other dependencies (#4401)
|
||||||
|
- Add Windows GPU to Jenkins CI pipeline (#4463, #4469)
|
||||||
|
* Support CUDA 10.1 (#4223, #4232, #4265, #4468)
|
||||||
|
* Python wheels are now built with CUDA 9.0, so that JIT is not required on Volta architecture (#4459)
|
||||||
|
* Integrate with NVTX CUDA profiler (#4205)
|
||||||
|
* Add a test for cpu predictor using external memory (#4308)
|
||||||
|
* Refactor tests to get rid of duplication (#4358)
|
||||||
|
* Remove test dependency on `craigcitro/r-travis`, since it's deprecated (#4353)
|
||||||
|
* Add files from local R build to `.gitignore` (#4346)
|
||||||
|
* Make XGBoost4J compatible with Java 9+ by revising NativeLibLoader (#4351)
|
||||||
|
* Jenkins build for CUDA 10.0 (#4281)
|
||||||
|
* Remove remaining `silent` and `debug_verbose` in Python tests (#4299)
|
||||||
|
* Use all cores to build XGBoost4J lib on linux (#4304)
|
||||||
|
* Upgrade Jenkins Linux build environment to GCC 5.3.1, CMake 3.6.0 (#4306)
|
||||||
|
* Make CMakeLists.txt compatible with CMake 3.3 (#4420)
|
||||||
|
* Add OpenMP option in CMakeLists.txt (#4339)
|
||||||
|
* Get rid of a few trivial compiler warnings (#4312)
|
||||||
|
* Add external Docker build cache, to speed up builds on Jenkins CI (#4331, #4334, #4458)
|
||||||
|
* Fix Windows tests (#4403)
|
||||||
|
* Fix a broken python test (#4395)
|
||||||
|
* Use a fixed seed to split data in XGBoost4J-Spark tests, for reproducibility (#4417)
|
||||||
|
* Add additional Python tests to test training under constraints (#4426)
|
||||||
|
* Enable building with shared NCCL. (#4447)
|
||||||
|
|
||||||
|
### Usability Improvements, Documentation
|
||||||
|
* Document limitation of one-split-at-a-time Greedy tree learning heuristic (#4233)
|
||||||
|
* Update build doc: PyPI wheel now support multi-GPU (#4219)
|
||||||
|
* Fix docs for `num_parallel_tree` (#4221)
|
||||||
|
* Fix document about `colsample_by*` parameter (#4340)
|
||||||
|
* Make the train and test input with same colnames. (#4329)
|
||||||
|
* Update R contribute link. (#4236)
|
||||||
|
* Fix travis R tests (#4277)
|
||||||
|
* Log version number in crash log in XGBoost4J-Spark (#4271, #4303)
|
||||||
|
* Allow supression of Rabit output in Booster::train in XGBoost4J (#4262)
|
||||||
|
* Add tutorial on handling missing values in XGBoost4J-Spark (#4425)
|
||||||
|
* Fix typos (#4345, #4393, #4432, #4435)
|
||||||
|
* Added language classifier in setup.py (#4327)
|
||||||
|
* Added Travis CI badge (#4344)
|
||||||
|
* Add BentoML to use case section (#4400)
|
||||||
|
* Remove subtly sexist remark (#4418)
|
||||||
|
* Add R vignette about parsing JSON dumps (#4439)
|
||||||
|
|
||||||
|
### Acknowledgement
|
||||||
|
**Contributors**: Nan Zhu (@CodingCat), Adam Pocock (@Craigacp), Daniel Hen (@Daniel8hen), Jiaxiang Li (@JiaxiangBU), Rory Mitchell (@RAMitchell), Egor Smirnov (@SmirnovEgorRu), Andy Adinets (@canonizer), Jonas (@elcombato), Harry Braviner (@harrybraviner), Philip Hyunsu Cho (@hcho3), Tong He (@hetong007), James Lamb (@jameslamb), Jean-Francois Zinque (@jeffzi), Yang Yang (@jokerkeny), Mayank Suman (@mayanksuman), jess (@monkeywithacupcake), Hajime Morrita (@omo), Ravi Kalia (@project-delphi), @ras44, Rong Ou (@rongou), Shaochen Shi (@shishaochen), Xu Xiao (@sperlingxx), @sriramch, Jiaming Yuan (@trivialfis), Christopher Suchanek (@wsuchy), Bozhao (@yubozhao)
|
||||||
|
|
||||||
|
**Reviewers**: Nan Zhu (@CodingCat), Adam Pocock (@Craigacp), Daniel Hen (@Daniel8hen), Jiaxiang Li (@JiaxiangBU), Laurae (@Laurae2), Rory Mitchell (@RAMitchell), Egor Smirnov (@SmirnovEgorRu), @alois-bissuel, Andy Adinets (@canonizer), Chen Qin (@chenqin), Harry Braviner (@harrybraviner), Philip Hyunsu Cho (@hcho3), Tong He (@hetong007), @jakirkham, James Lamb (@jameslamb), Julien Schueller (@jschueller), Mayank Suman (@mayanksuman), Hajime Morrita (@omo), Rong Ou (@rongou), Sara Robinson (@sararob), Shaochen Shi (@shishaochen), Xu Xiao (@sperlingxx), @sriramch, Sean Owen (@srowen), Sergei Lebedev (@superbobry), Yuan (Terry) Tang (@terrytangyuan), Theodore Vasiloudis (@thvasilo), Matthew Tovbin (@tovbinm), Jiaming Yuan (@trivialfis), Xin Yin (@xydrolase)
|
||||||
|
|
||||||
|
## v0.82 (2019.03.03)
|
||||||
|
This release is packed with many new features and bug fixes.
|
||||||
|
|
||||||
|
### Roadmap: better performance scaling for multi-core CPUs (#3957)
|
||||||
|
* Poor performance scaling of the `hist` algorithm for multi-core CPUs has been under investigation (#3810). #3957 marks an important step toward better performance scaling, by using software pre-fetching and replacing STL vectors with C-style arrays. Special thanks to @Laurae2 and @SmirnovEgorRu.
|
||||||
|
* See #3810 for latest progress on this roadmap.
|
||||||
|
|
||||||
|
### New feature: Distributed Fast Histogram Algorithm (`hist`) (#4011, #4102, #4140, #4128)
|
||||||
|
* It is now possible to run the `hist` algorithm in distributed setting. Special thanks to @CodingCat. The benefits include:
|
||||||
|
1. Faster local computation via feature binning
|
||||||
|
2. Support for monotonic constraints and feature interaction constraints
|
||||||
|
3. Simpler codebase than `approx`, allowing for future improvement
|
||||||
|
* Depth-wise tree growing is now performed in a separate code path, so that cross-node syncronization is performed only once per level.
|
||||||
|
|
||||||
|
### New feature: Multi-Node, Multi-GPU training (#4095)
|
||||||
|
* Distributed training is now able to utilize clusters equipped with NVIDIA GPUs. In particular, the rabit AllReduce layer will communicate GPU device information. Special thanks to @mt-jones, @RAMitchell, @rongou, @trivialfis, @canonizer, and @jeffdk.
|
||||||
|
* Resource management systems will be able to assign a rank for each GPU in the cluster.
|
||||||
|
* In Dask, users will be able to construct a collection of XGBoost processes over an inhomogeneous device cluster (i.e. workers with different number and/or kinds of GPUs).
|
||||||
|
|
||||||
|
### New feature: Multiple validation datasets in XGBoost4J-Spark (#3904, #3910)
|
||||||
|
* You can now track the performance of the model during training with multiple evaluation datasets. By specifying `eval_sets` or call `setEvalSets` over a `XGBoostClassifier` or `XGBoostRegressor`, you can pass in multiple evaluation datasets typed as a `Map` from `String` to `DataFrame`. Special thanks to @CodingCat.
|
||||||
|
* See the usage of multiple validation datasets [here](https://github.com/dmlc/xgboost/blob/0c1d5f1120c0a159f2567b267f0ec4ffadee00d0/jvm-packages/xgboost4j-example/src/main/scala/ml/dmlc/xgboost4j/scala/example/spark/SparkTraining.scala#L66-L78)
|
||||||
|
|
||||||
|
### New feature: Additional metric functions for GPUs (#3952)
|
||||||
|
* Element-wise metrics have been ported to GPU: `rmse`, `mae`, `logloss`, `poisson-nloglik`, `gamma-deviance`, `gamma-nloglik`, `error`, `tweedie-nloglik`. Special thanks to @trivialfis and @RAMitchell.
|
||||||
|
* With supported metrics, XGBoost will select the correct devices based on your system and `n_gpus` parameter.
|
||||||
|
|
||||||
|
### New feature: Column sampling at individual nodes (splits) (#3971)
|
||||||
|
* Columns (features) can now be sampled at individual tree nodes, in addition to per-tree and per-level sampling. To enable per-node sampling, set `colsample_bynode` parameter, which represents the fraction of columns sampled at each node. This parameter is set to 1.0 by default (i.e. no sampling per node). Special thanks to @canonizer.
|
||||||
|
* The `colsample_bynode` parameter works cumulatively with other `colsample_by*` parameters: for example, `{'colsample_bynode':0.5, 'colsample_bytree':0.5}` with 100 columns will give 25 features to choose from at each split.
|
||||||
|
|
||||||
|
### Major API change: consistent logging level via `verbosity` (#3982, #4002, #4138)
|
||||||
|
* XGBoost now allows fine-grained control over logging. You can set `verbosity` to 0 (silent), 1 (warning), 2 (info), and 3 (debug). This is useful for controlling the amount of logging outputs. Special thanks to @trivialfis.
|
||||||
|
* Parameters `silent` and `debug_verbose` are now deprecated.
|
||||||
|
* Note: Sometimes XGBoost tries to change configurations based on heuristics, which is displayed as warning message. If there's unexpected behaviour, please try to increase value of verbosity.
|
||||||
|
|
||||||
|
### Major bug fix: external memory (#4040, #4193)
|
||||||
|
* Clarify object ownership in multi-threaded prefetcher, to avoid memory error.
|
||||||
|
* Correctly merge two column batches (which uses [CSC layout](https://en.wikipedia.org/wiki/Sparse_matrix#Compressed_sparse_column_(CSC_or_CCS))).
|
||||||
|
* Add unit tests for external memory.
|
||||||
|
* Special thanks to @trivialfis and @hcho3.
|
||||||
|
|
||||||
|
### Major bug fix: early stopping fixed in XGBoost4J and XGBoost4J-Spark (#3928, #4176)
|
||||||
|
* Early stopping in XGBoost4J and XGBoost4J-Spark is now consistent with its counterpart in the Python package. Training stops if the current iteration is `earlyStoppingSteps` away from the best iteration. If there are multiple evaluation sets, only the last one is used to determinate early stop.
|
||||||
|
* See the updated documentation [here](https://xgboost.readthedocs.io/en/release_0.82/jvm/xgboost4j_spark_tutorial.html#early-stopping)
|
||||||
|
* Special thanks to @CodingCat, @yanboliang, and @mingyang.
|
||||||
|
|
||||||
|
### Major bug fix: infrequent features should not crash distributed training (#4045)
|
||||||
|
* For infrequently occuring features, some partitions may not get any instance. This scenario used to crash distributed training due to mal-formed ranges. The problem has now been fixed.
|
||||||
|
* In practice, one-hot-encoded categorical variables tend to produce rare features, particularly when the cardinality is high.
|
||||||
|
* Special thanks to @CodingCat.
|
||||||
|
|
||||||
|
### Performance improvements
|
||||||
|
* Faster, more space-efficient radix sorting in `gpu_hist` (#3895)
|
||||||
|
* Subtraction trick in histogram calculation in `gpu_hist` (#3945)
|
||||||
|
* More performant re-partition in XGBoost4J-Spark (#4049)
|
||||||
|
|
||||||
|
### Bug-fixes
|
||||||
|
* Fix semantics of `gpu_id` when running multiple XGBoost processes on a multi-GPU machine (#3851)
|
||||||
|
* Fix page storage path for external memory on Windows (#3869)
|
||||||
|
* Fix configuration setup so that DART utilizes GPU (#4024)
|
||||||
|
* Eliminate NAN values from SHAP prediction (#3943)
|
||||||
|
* Prevent empty quantile sketches in `hist` (#4155)
|
||||||
|
* Enable running objectives with 0 GPU (#3878)
|
||||||
|
* Parameters are no longer dependent on system locale (#3891, #3907)
|
||||||
|
* Use consistent data type in the GPU coordinate descent code (#3917)
|
||||||
|
* Remove undefined behavior in the CLI config parser on the ARM platform (#3976)
|
||||||
|
* Initialize counters in GPU AllReduce (#3987)
|
||||||
|
* Prevent deadlocks in GPU AllReduce (#4113)
|
||||||
|
* Load correct values from sliced NumPy arrays (#4147, #4165)
|
||||||
|
* Fix incorrect GPU device selection (#4161)
|
||||||
|
* Make feature binning logic in `hist` aware of query groups when running a ranking task (#4115). For ranking task, query groups are weighted, not individual instances.
|
||||||
|
* Generate correct C++ exception type for `LOG(FATAL)` macro (#4159)
|
||||||
|
* Python package
|
||||||
|
- Python package should run on system without `PATH` environment variable (#3845)
|
||||||
|
- Fix `coef_` and `intercept_` signature to be compatible with `sklearn.RFECV` (#3873)
|
||||||
|
- Use UTF-8 encoding in Python package README, to support non-English locale (#3867)
|
||||||
|
- Add AUC-PR to list of metrics to maximize for early stopping (#3936)
|
||||||
|
- Allow loading pickles without `self.booster` attribute, for backward compatibility (#3938, #3944)
|
||||||
|
- White-list DART for feature importances (#4073)
|
||||||
|
- Update usage of [h2oai/datatable](https://github.com/h2oai/datatable) (#4123)
|
||||||
|
* XGBoost4J-Spark
|
||||||
|
- Address scalability issue in prediction (#4033)
|
||||||
|
- Enforce the use of per-group weights for ranking task (#4118)
|
||||||
|
- Fix vector size of `rawPredictionCol` in `XGBoostClassificationModel` (#3932)
|
||||||
|
- More robust error handling in Spark tracker (#4046, #4108)
|
||||||
|
- Fix return type of `setEvalSets` (#4105)
|
||||||
|
- Return correct value of `getMaxLeaves` (#4114)
|
||||||
|
|
||||||
|
### API changes
|
||||||
|
* Add experimental parameter `single_precision_histogram` to use single-precision histograms for the `gpu_hist` algorithm (#3965)
|
||||||
|
* Python package
|
||||||
|
- Add option to select type of feature importances in the scikit-learn inferface (#3876)
|
||||||
|
- Add `trees_to_df()` method to dump decision trees as Pandas data frame (#4153)
|
||||||
|
- Add options to control node shapes in the GraphViz plotting function (#3859)
|
||||||
|
- Add `xgb_model` option to `XGBClassifier`, to load previously saved model (#4092)
|
||||||
|
- Passing lists into `DMatrix` is now deprecated (#3970)
|
||||||
|
* XGBoost4J
|
||||||
|
- Support multiple feature importance features (#3801)
|
||||||
|
|
||||||
|
### Maintenance: Refactor C++ code for legibility and maintainability
|
||||||
|
* Refactor `hist` algorithm code and add unit tests (#3836)
|
||||||
|
* Minor refactoring of split evaluator in `gpu_hist` (#3889)
|
||||||
|
* Removed unused leaf vector field in the tree model (#3989)
|
||||||
|
* Simplify the tree representation by combining `TreeModel` and `RegTree` classes (#3995)
|
||||||
|
* Simplify and harden tree expansion code (#4008, #4015)
|
||||||
|
* De-duplicate parameter classes in the linear model algorithms (#4013)
|
||||||
|
* Robust handling of ranges with C++20 span in `gpu_exact` and `gpu_coord_descent` (#4020, #4029)
|
||||||
|
* Simplify tree training code (#3825). Also use Span class for robust handling of ranges.
|
||||||
|
|
||||||
|
### Maintenance: testing, continuous integration, build system
|
||||||
|
* Disallow `std::regex` since it's not supported by GCC 4.8.x (#3870)
|
||||||
|
* Add multi-GPU tests for coordinate descent algorithm for linear models (#3893, #3974)
|
||||||
|
* Enforce naming style in Python lint (#3896)
|
||||||
|
* Refactor Python tests (#3897, #3901): Use pytest exclusively, display full trace upon failure
|
||||||
|
* Address `DeprecationWarning` when using Python collections (#3909)
|
||||||
|
* Use correct group for maven site plugin (#3937)
|
||||||
|
* Jenkins CI is now using on-demand EC2 instances exclusively, due to unreliability of Spot instances (#3948)
|
||||||
|
* Better GPU performance logging (#3945)
|
||||||
|
* Fix GPU tests on machines with only 1 GPU (#4053)
|
||||||
|
* Eliminate CRAN check warnings and notes (#3988)
|
||||||
|
* Add unit tests for tree serialization (#3989)
|
||||||
|
* Add unit tests for tree fitting functions in `hist` (#4155)
|
||||||
|
* Add a unit test for `gpu_exact` algorithm (#4020)
|
||||||
|
* Correct JVM CMake GPU flag (#4071)
|
||||||
|
* Fix failing Travis CI on Mac (#4086)
|
||||||
|
* Speed up Jenkins by not compiling CMake (#4099)
|
||||||
|
* Analyze C++ and CUDA code using clang-tidy, as part of Jenkins CI pipeline (#4034)
|
||||||
|
* Fix broken R test: Install Homebrew GCC (#4142)
|
||||||
|
* Check for empty datasets in GPU unit tests (#4151)
|
||||||
|
* Fix Windows compilation (#4139)
|
||||||
|
* Comply with latest convention of cpplint (#4157)
|
||||||
|
* Fix a unit test in `gpu_hist` (#4158)
|
||||||
|
* Speed up data generation in Python tests (#4164)
|
||||||
|
|
||||||
|
### Usability Improvements
|
||||||
|
* Add link to [InfoWorld 2019 Technology of the Year Award](https://www.infoworld.com/article/3336072/application-development/infoworlds-2019-technology-of-the-year-award-winners.html) (#4116)
|
||||||
|
* Remove outdated AWS YARN tutorial (#3885)
|
||||||
|
* Document current limitation in number of features (#3886)
|
||||||
|
* Remove unnecessary warning when `gblinear` is selected (#3888)
|
||||||
|
* Document limitation of CSV parser: header not supported (#3934)
|
||||||
|
* Log training parameters in XGBoost4J-Spark (#4091)
|
||||||
|
* Clarify early stopping behavior in the scikit-learn interface (#3967)
|
||||||
|
* Clarify behavior of `max_depth` parameter (#4078)
|
||||||
|
* Revise Python docstrings for ranking task (#4121). In particular, weights must be per-group in learning-to-rank setting.
|
||||||
|
* Document parameter `num_parallel_tree` (#4022)
|
||||||
|
* Add Jenkins status badge (#4090)
|
||||||
|
* Warn users against using internal functions of `Booster` object (#4066)
|
||||||
|
* Reformat `benchmark_tree.py` to comply with Python style convention (#4126)
|
||||||
|
* Clarify a comment in `objectiveTrait` (#4174)
|
||||||
|
* Fix typos and broken links in documentation (#3890, #3872, #3902, #3919, #3975, #4027, #4156, #4167)
|
||||||
|
|
||||||
|
### Acknowledgement
|
||||||
|
**Contributors** (in no particular order): Jiaming Yuan (@trivialfis), Hyunsu Cho (@hcho3), Nan Zhu (@CodingCat), Rory Mitchell (@RAMitchell), Yanbo Liang (@yanboliang), Andy Adinets (@canonizer), Tong He (@hetong007), Yuan Tang (@terrytangyuan)
|
||||||
|
|
||||||
|
**First-time Contributors** (in no particular order): Jelle Zijlstra (@JelleZijlstra), Jiacheng Xu (@jiachengxu), @ajing, Kashif Rasul (@kashif), @theycallhimavi, Joey Gao (@pjgao), Prabakaran Kumaresshan (@nixphix), Huafeng Wang (@huafengw), @lyxthe, Sam Wilkinson (@scwilkinson), Tatsuhito Kato (@stabacov), Shayak Banerjee (@shayakbanerjee), Kodi Arfer (@Kodiologist), @KyleLi1985, Egor Smirnov (@SmirnovEgorRu), @tmitanitky, Pasha Stetsenko (@st-pasha), Kenichi Nagahara (@keni-chi), Abhai Kollara Dilip (@abhaikollara), Patrick Ford (@pford221), @hshujuan, Matthew Jones (@mt-jones), Thejaswi Rao (@teju85), Adam November (@anovember)
|
||||||
|
|
||||||
|
**First-time Reviewers** (in no particular order): Mingyang Hu (@mingyang), Theodore Vasiloudis (@thvasilo), Jakub Troszok (@troszok), Rong Ou (@rongou), @Denisevi4, Matthew Jones (@mt-jones), Jeff Kaplan (@jeffdk)
|
||||||
|
|
||||||
## v0.81 (2018.11.04)
|
## v0.81 (2018.11.04)
|
||||||
### New feature: feature interaction constraints
|
### New feature: feature interaction constraints
|
||||||
* Users are now able to control which features (independent variables) are allowed to interact by specifying feature interaction constraints (#3466).
|
* Users are now able to control which features (independent variables) are allowed to interact by specifying feature interaction constraints (#3466).
|
||||||
@@ -23,6 +318,10 @@ This file records the changes in xgboost library in reverse chronological order.
|
|||||||
* Mitigate tracker "thundering herd" issue on large cluster. Add exponential backoff retry when workers connect to tracker.
|
* Mitigate tracker "thundering herd" issue on large cluster. Add exponential backoff retry when workers connect to tracker.
|
||||||
* With this change, we were able to scale to 1.5k executors on a 12 billion row dataset after some tweaks here and there.
|
* With this change, we were able to scale to 1.5k executors on a 12 billion row dataset after some tweaks here and there.
|
||||||
|
|
||||||
|
### New feature: Additional objective functions for GPUs
|
||||||
|
* New objective functions ported to GPU: `hinge`, `multi:softmax`, `multi:softprob`, `count:poisson`, `reg:gamma`, `"reg:tweedie`.
|
||||||
|
* With supported objectives, XGBoost will select the correct devices based on your system and `n_gpus` parameter.
|
||||||
|
|
||||||
### Major bug fix: learning to rank with XGBoost4J-Spark
|
### Major bug fix: learning to rank with XGBoost4J-Spark
|
||||||
* Previously, `repartitionForData` would shuffle data and lose ordering necessary for ranking task.
|
* Previously, `repartitionForData` would shuffle data and lose ordering necessary for ranking task.
|
||||||
* To fix this issue, data points within each RDD partition is explicitly group by their group (query session) IDs (#3654). Also handle empty RDD partition carefully (#3750).
|
* To fix this issue, data points within each RDD partition is explicitly group by their group (query session) IDs (#3654). Also handle empty RDD partition carefully (#3750).
|
||||||
@@ -33,6 +332,7 @@ This file records the changes in xgboost library in reverse chronological order.
|
|||||||
|
|
||||||
### API changes
|
### API changes
|
||||||
* Column sampling by level (`colsample_bylevel`) is now functional for `hist` algorithm (#3635, #3862)
|
* Column sampling by level (`colsample_bylevel`) is now functional for `hist` algorithm (#3635, #3862)
|
||||||
|
* GPU tag `gpu:` for regression objectives are now deprecated. XGBoost will select the correct devices automatically (#3643)
|
||||||
* Add `disable_default_eval_metric` parameter to disable default metric (#3606)
|
* Add `disable_default_eval_metric` parameter to disable default metric (#3606)
|
||||||
* Experimental AVX support for gradient computation is removed (#3752)
|
* Experimental AVX support for gradient computation is removed (#3752)
|
||||||
* XGBoost4J-Spark
|
* XGBoost4J-Spark
|
||||||
@@ -159,7 +459,7 @@ This file records the changes in xgboost library in reverse chronological order.
|
|||||||
### Acknowledgement
|
### Acknowledgement
|
||||||
**Contributors** (in no particular order): Hyunsu Cho (@hcho3), Jiaming Yuan (@trivialfis), Nan Zhu (@CodingCat), Rory Mitchell (@RAMitchell), Andy Adinets (@canonizer), Vadim Khotilovich (@khotilov), Sergei Lebedev (@superbobry)
|
**Contributors** (in no particular order): Hyunsu Cho (@hcho3), Jiaming Yuan (@trivialfis), Nan Zhu (@CodingCat), Rory Mitchell (@RAMitchell), Andy Adinets (@canonizer), Vadim Khotilovich (@khotilov), Sergei Lebedev (@superbobry)
|
||||||
|
|
||||||
**First-time Contributors** (in no particular order): Matthew Tovbin (@tovbinm), Jakob Richter (@jakob-r), Grace Lam (@grace-lam), Grant W Schneider (@grantschneider), Andrew Thia (@BlueTea88), Sergei Chipiga (@schipiga), Joseph Bradley (@jkbradley), Chen Qin (@chenqin), Jerry Lin (@linjer), Dmitriy Rybalko (@rdtft), Michael Mui (@mmui), Takahiro Kojima (@515hikaru), Bruce Zhao (@BruceZhaoR), Wei Tian (@weitian), Saumya Bhatnagar (@Sam1301), Juzer Shakir (@JuzerShakir), Zhao Hang (@cleghom), Jonathan Friedman (@jontonsoup), Bruno Tremblay (@meztez), @Shiki-H, @mrgutkun, @gorogm, @htgeis, @jakehoare, @zengxy, @KOLANICH
|
**First-time Contributors** (in no particular order): Matthew Tovbin (@tovbinm), Jakob Richter (@jakob-r), Grace Lam (@grace-lam), Grant W Schneider (@grantschneider), Andrew Thia (@BlueTea88), Sergei Chipiga (@schipiga), Joseph Bradley (@jkbradley), Chen Qin (@chenqin), Jerry Lin (@linjer), Dmitriy Rybalko (@rdtft), Michael Mui (@mmui), Takahiro Kojima (@515hikaru), Bruce Zhao (@BruceZhaoR), Wei Tian (@weitian), Saumya Bhatnagar (@Sam1301), Juzer Shakir (@JuzerShakir), Zhao Hang (@cleghom), Jonathan Friedman (@jontonsoup), Bruno Tremblay (@meztez), Boris Filippov (@frenzykryger), @Shiki-H, @mrgutkun, @gorogm, @htgeis, @jakehoare, @zengxy, @KOLANICH
|
||||||
|
|
||||||
**First-time Reviewers** (in no particular order): Nikita Titov (@StrikerRUS), Xiangrui Meng (@mengxr), Nirmal Borah (@Nirmal-Neel)
|
**First-time Reviewers** (in no particular order): Nikita Titov (@StrikerRUS), Xiangrui Meng (@mengxr), Nirmal Borah (@Nirmal-Neel)
|
||||||
|
|
||||||
@@ -174,7 +474,7 @@ This file records the changes in xgboost library in reverse chronological order.
|
|||||||
- Latest master: https://xgboost.readthedocs.io/en/latest
|
- Latest master: https://xgboost.readthedocs.io/en/latest
|
||||||
- 0.80 stable: https://xgboost.readthedocs.io/en/release_0.80
|
- 0.80 stable: https://xgboost.readthedocs.io/en/release_0.80
|
||||||
- 0.72 stable: https://xgboost.readthedocs.io/en/release_0.72
|
- 0.72 stable: https://xgboost.readthedocs.io/en/release_0.72
|
||||||
* Ranking task now uses instance weights (#3379)
|
* Support for per-group weights in ranking objective (#3379)
|
||||||
* Fix inaccurate decimal parsing (#3546)
|
* Fix inaccurate decimal parsing (#3546)
|
||||||
* New functionality
|
* New functionality
|
||||||
- Query ID column support in LIBSVM data files (#2749). This is convenient for performing ranking task in distributed setting.
|
- Query ID column support in LIBSVM data files (#2749). This is convenient for performing ranking task in distributed setting.
|
||||||
@@ -334,7 +634,7 @@ This version is only applicable for the Python package. The content is identical
|
|||||||
- Compatibility fix for Python 2.6
|
- Compatibility fix for Python 2.6
|
||||||
- Call `print_evaluation` callback at last iteration
|
- Call `print_evaluation` callback at last iteration
|
||||||
- Use appropriate integer types when calling native code, to prevent truncation and memory error
|
- Use appropriate integer types when calling native code, to prevent truncation and memory error
|
||||||
- Fix shared library loading on Mac OS X
|
- Fix shared library loading on Mac OS X
|
||||||
* R package:
|
* R package:
|
||||||
- New parameters:
|
- New parameters:
|
||||||
- `silent` in `xgb.DMatrix()`
|
- `silent` in `xgb.DMatrix()`
|
||||||
@@ -375,7 +675,7 @@ This version is only applicable for the Python package. The content is identical
|
|||||||
- Support instance weights
|
- Support instance weights
|
||||||
- Use `SparkParallelismTracker` to prevent jobs from hanging forever
|
- Use `SparkParallelismTracker` to prevent jobs from hanging forever
|
||||||
- Expose train-time evaluation metrics via `XGBoostModel.summary`
|
- Expose train-time evaluation metrics via `XGBoostModel.summary`
|
||||||
- Option to specify `host-ip` explicitly in the Rabit tracker
|
- Option to specify `host-ip` explicitly in the Rabit tracker
|
||||||
* Documentation
|
* Documentation
|
||||||
- Better math notation for gradient boosting
|
- Better math notation for gradient boosting
|
||||||
- Updated build instructions for Mac OS X
|
- Updated build instructions for Mac OS X
|
||||||
|
|||||||
34
R-package/CMakeLists.txt
Normal file
34
R-package/CMakeLists.txt
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
find_package(LibR REQUIRED)
|
||||||
|
message(STATUS "LIBR_CORE_LIBRARY " ${LIBR_CORE_LIBRARY})
|
||||||
|
|
||||||
|
file(GLOB_RECURSE R_SOURCES
|
||||||
|
${CMAKE_CURRENT_LIST_DIR}/src/*.cc
|
||||||
|
${CMAKE_CURRENT_LIST_DIR}/src/*.c)
|
||||||
|
# Use object library to expose symbols
|
||||||
|
add_library(xgboost-r OBJECT ${R_SOURCES})
|
||||||
|
|
||||||
|
set(R_DEFINITIONS
|
||||||
|
-DXGBOOST_STRICT_R_MODE=1
|
||||||
|
-DXGBOOST_CUSTOMIZE_GLOBAL_PRNG=1
|
||||||
|
-DDMLC_LOG_BEFORE_THROW=0
|
||||||
|
-DDMLC_DISABLE_STDIN=1
|
||||||
|
-DDMLC_LOG_CUSTOMIZE=1
|
||||||
|
-DRABIT_CUSTOMIZE_MSG_
|
||||||
|
-DRABIT_STRICT_CXX98_)
|
||||||
|
target_compile_definitions(xgboost-r
|
||||||
|
PRIVATE ${R_DEFINITIONS})
|
||||||
|
target_include_directories(xgboost-r
|
||||||
|
PRIVATE
|
||||||
|
${LIBR_INCLUDE_DIRS}
|
||||||
|
${PROJECT_SOURCE_DIR}/include
|
||||||
|
${PROJECT_SOURCE_DIR}/dmlc-core/include
|
||||||
|
${PROJECT_SOURCE_DIR}/rabit/include)
|
||||||
|
set_target_properties(
|
||||||
|
xgboost-r PROPERTIES
|
||||||
|
CXX_STANDARD 11
|
||||||
|
CXX_STANDARD_REQUIRED ON
|
||||||
|
POSITION_INDEPENDENT_CODE ON)
|
||||||
|
|
||||||
|
set(XGBOOST_DEFINITIONS "${XGBOOST_DEFINITIONS};${R_DEFINITIONS}" PARENT_SCOPE)
|
||||||
|
set(XGBOOST_OBJ_SOURCES $<TARGET_OBJECTS:xgboost-r> PARENT_SCOPE)
|
||||||
|
set(LINKED_LIBRARIES_PRIVATE ${LINKED_LIBRARIES_PRIVATE} ${LIBR_CORE_LIBRARY} PARENT_SCOPE)
|
||||||
@@ -1,8 +1,8 @@
|
|||||||
Package: xgboost
|
Package: xgboost
|
||||||
Type: Package
|
Type: Package
|
||||||
Title: Extreme Gradient Boosting
|
Title: Extreme Gradient Boosting
|
||||||
Version: 0.81.0.1
|
Version: 1.0.0.1
|
||||||
Date: 2018-08-13
|
Date: 2019-07-23
|
||||||
Authors@R: c(
|
Authors@R: c(
|
||||||
person("Tianqi", "Chen", role = c("aut"),
|
person("Tianqi", "Chen", role = c("aut"),
|
||||||
email = "tianqi.tchen@gmail.com"),
|
email = "tianqi.tchen@gmail.com"),
|
||||||
@@ -52,7 +52,9 @@ Suggests:
|
|||||||
vcd (>= 1.3),
|
vcd (>= 1.3),
|
||||||
testthat,
|
testthat,
|
||||||
lintr,
|
lintr,
|
||||||
igraph (>= 1.0.1)
|
igraph (>= 1.0.1),
|
||||||
|
jsonlite,
|
||||||
|
float
|
||||||
Depends:
|
Depends:
|
||||||
R (>= 3.3.0)
|
R (>= 3.3.0)
|
||||||
Imports:
|
Imports:
|
||||||
@@ -61,5 +63,5 @@ Imports:
|
|||||||
data.table (>= 1.9.6),
|
data.table (>= 1.9.6),
|
||||||
magrittr (>= 1.5),
|
magrittr (>= 1.5),
|
||||||
stringi (>= 0.5.2)
|
stringi (>= 0.5.2)
|
||||||
RoxygenNote: 6.1.0
|
RoxygenNote: 7.0.2
|
||||||
SystemRequirements: GNU make, C++11
|
SystemRequirements: GNU make, C++11
|
||||||
|
|||||||
@@ -1,26 +1,26 @@
|
|||||||
#' Callback closures for booster training.
|
#' Callback closures for booster training.
|
||||||
#'
|
#'
|
||||||
#' These are used to perform various service tasks either during boosting iterations or at the end.
|
#' These are used to perform various service tasks either during boosting iterations or at the end.
|
||||||
#' This approach helps to modularize many of such tasks without bloating the main training methods,
|
#' This approach helps to modularize many of such tasks without bloating the main training methods,
|
||||||
#' and it offers .
|
#' and it offers .
|
||||||
#'
|
#'
|
||||||
#' @details
|
#' @details
|
||||||
#' By default, a callback function is run after each boosting iteration.
|
#' By default, a callback function is run after each boosting iteration.
|
||||||
#' An R-attribute \code{is_pre_iteration} could be set for a callback to define a pre-iteration function.
|
#' An R-attribute \code{is_pre_iteration} could be set for a callback to define a pre-iteration function.
|
||||||
#'
|
#'
|
||||||
#' When a callback function has \code{finalize} parameter, its finalizer part will also be run after
|
#' When a callback function has \code{finalize} parameter, its finalizer part will also be run after
|
||||||
#' the boosting is completed.
|
#' the boosting is completed.
|
||||||
#'
|
#'
|
||||||
#' WARNING: side-effects!!! Be aware that these callback functions access and modify things in
|
#' WARNING: side-effects!!! Be aware that these callback functions access and modify things in
|
||||||
#' the environment from which they are called from, which is a fairly uncommon thing to do in R.
|
#' the environment from which they are called from, which is a fairly uncommon thing to do in R.
|
||||||
#'
|
#'
|
||||||
#' To write a custom callback closure, make sure you first understand the main concepts about R envoronments.
|
#' To write a custom callback closure, make sure you first understand the main concepts about R environments.
|
||||||
#' Check either R documentation on \code{\link[base]{environment}} or the
|
#' Check either R documentation on \code{\link[base]{environment}} or the
|
||||||
#' \href{http://adv-r.had.co.nz/Environments.html}{Environments chapter} from the "Advanced R"
|
#' \href{http://adv-r.had.co.nz/Environments.html}{Environments chapter} from the "Advanced R"
|
||||||
#' book by Hadley Wickham. Further, the best option is to read the code of some of the existing callbacks -
|
#' book by Hadley Wickham. Further, the best option is to read the code of some of the existing callbacks -
|
||||||
#' choose ones that do something similar to what you want to achieve. Also, you would need to get familiar
|
#' choose ones that do something similar to what you want to achieve. Also, you would need to get familiar
|
||||||
#' with the objects available inside of the \code{xgb.train} and \code{xgb.cv} internal environments.
|
#' with the objects available inside of the \code{xgb.train} and \code{xgb.cv} internal environments.
|
||||||
#'
|
#'
|
||||||
#' @seealso
|
#' @seealso
|
||||||
#' \code{\link{cb.print.evaluation}},
|
#' \code{\link{cb.print.evaluation}},
|
||||||
#' \code{\link{cb.evaluation.log}},
|
#' \code{\link{cb.evaluation.log}},
|
||||||
@@ -30,42 +30,42 @@
|
|||||||
#' \code{\link{cb.cv.predict}},
|
#' \code{\link{cb.cv.predict}},
|
||||||
#' \code{\link{xgb.train}},
|
#' \code{\link{xgb.train}},
|
||||||
#' \code{\link{xgb.cv}}
|
#' \code{\link{xgb.cv}}
|
||||||
#'
|
#'
|
||||||
#' @name callbacks
|
#' @name callbacks
|
||||||
NULL
|
NULL
|
||||||
|
|
||||||
#
|
#
|
||||||
# Callbacks -------------------------------------------------------------------
|
# Callbacks -------------------------------------------------------------------
|
||||||
#
|
#
|
||||||
|
|
||||||
#' Callback closure for printing the result of evaluation
|
#' Callback closure for printing the result of evaluation
|
||||||
#'
|
#'
|
||||||
#' @param period results would be printed every number of periods
|
#' @param period results would be printed every number of periods
|
||||||
#' @param showsd whether standard deviations should be printed (when available)
|
#' @param showsd whether standard deviations should be printed (when available)
|
||||||
#'
|
#'
|
||||||
#' @details
|
#' @details
|
||||||
#' The callback function prints the result of evaluation at every \code{period} iterations.
|
#' The callback function prints the result of evaluation at every \code{period} iterations.
|
||||||
#' The initial and the last iteration's evaluations are always printed.
|
#' The initial and the last iteration's evaluations are always printed.
|
||||||
#'
|
#'
|
||||||
#' Callback function expects the following values to be set in its calling frame:
|
#' Callback function expects the following values to be set in its calling frame:
|
||||||
#' \code{bst_evaluation} (also \code{bst_evaluation_err} when available),
|
#' \code{bst_evaluation} (also \code{bst_evaluation_err} when available),
|
||||||
#' \code{iteration},
|
#' \code{iteration},
|
||||||
#' \code{begin_iteration},
|
#' \code{begin_iteration},
|
||||||
#' \code{end_iteration}.
|
#' \code{end_iteration}.
|
||||||
#'
|
#'
|
||||||
#' @seealso
|
#' @seealso
|
||||||
#' \code{\link{callbacks}}
|
#' \code{\link{callbacks}}
|
||||||
#'
|
#'
|
||||||
#' @export
|
#' @export
|
||||||
cb.print.evaluation <- function(period = 1, showsd = TRUE) {
|
cb.print.evaluation <- function(period = 1, showsd = TRUE) {
|
||||||
|
|
||||||
callback <- function(env = parent.frame()) {
|
callback <- function(env = parent.frame()) {
|
||||||
if (length(env$bst_evaluation) == 0 ||
|
if (length(env$bst_evaluation) == 0 ||
|
||||||
period == 0 ||
|
period == 0 ||
|
||||||
NVL(env$rank, 0) != 0 )
|
NVL(env$rank, 0) != 0 )
|
||||||
return()
|
return()
|
||||||
|
|
||||||
i <- env$iteration
|
i <- env$iteration
|
||||||
if ((i-1) %% period == 0 ||
|
if ((i-1) %% period == 0 ||
|
||||||
i == env$begin_iteration ||
|
i == env$begin_iteration ||
|
||||||
i == env$end_iteration) {
|
i == env$end_iteration) {
|
||||||
@@ -81,48 +81,48 @@ cb.print.evaluation <- function(period = 1, showsd = TRUE) {
|
|||||||
|
|
||||||
|
|
||||||
#' Callback closure for logging the evaluation history
|
#' Callback closure for logging the evaluation history
|
||||||
#'
|
#'
|
||||||
#' @details
|
#' @details
|
||||||
#' This callback function appends the current iteration evaluation results \code{bst_evaluation}
|
#' This callback function appends the current iteration evaluation results \code{bst_evaluation}
|
||||||
#' available in the calling parent frame to the \code{evaluation_log} list in a calling frame.
|
#' available in the calling parent frame to the \code{evaluation_log} list in a calling frame.
|
||||||
#'
|
#'
|
||||||
#' The finalizer callback (called with \code{finalize = TURE} in the end) converts
|
#' The finalizer callback (called with \code{finalize = TURE} in the end) converts
|
||||||
#' the \code{evaluation_log} list into a final data.table.
|
#' the \code{evaluation_log} list into a final data.table.
|
||||||
#'
|
#'
|
||||||
#' The iteration evaluation result \code{bst_evaluation} must be a named numeric vector.
|
#' The iteration evaluation result \code{bst_evaluation} must be a named numeric vector.
|
||||||
#'
|
#'
|
||||||
#' Note: in the column names of the final data.table, the dash '-' character is replaced with
|
#' Note: in the column names of the final data.table, the dash '-' character is replaced with
|
||||||
#' the underscore '_' in order to make the column names more like regular R identifiers.
|
#' the underscore '_' in order to make the column names more like regular R identifiers.
|
||||||
#'
|
#'
|
||||||
#' Callback function expects the following values to be set in its calling frame:
|
#' Callback function expects the following values to be set in its calling frame:
|
||||||
#' \code{evaluation_log},
|
#' \code{evaluation_log},
|
||||||
#' \code{bst_evaluation},
|
#' \code{bst_evaluation},
|
||||||
#' \code{iteration}.
|
#' \code{iteration}.
|
||||||
#'
|
#'
|
||||||
#' @seealso
|
#' @seealso
|
||||||
#' \code{\link{callbacks}}
|
#' \code{\link{callbacks}}
|
||||||
#'
|
#'
|
||||||
#' @export
|
#' @export
|
||||||
cb.evaluation.log <- function() {
|
cb.evaluation.log <- function() {
|
||||||
|
|
||||||
mnames <- NULL
|
mnames <- NULL
|
||||||
|
|
||||||
init <- function(env) {
|
init <- function(env) {
|
||||||
if (!is.list(env$evaluation_log))
|
if (!is.list(env$evaluation_log))
|
||||||
stop("'evaluation_log' has to be a list")
|
stop("'evaluation_log' has to be a list")
|
||||||
mnames <<- names(env$bst_evaluation)
|
mnames <<- names(env$bst_evaluation)
|
||||||
if (is.null(mnames) || any(mnames == ""))
|
if (is.null(mnames) || any(mnames == ""))
|
||||||
stop("bst_evaluation must have non-empty names")
|
stop("bst_evaluation must have non-empty names")
|
||||||
|
|
||||||
mnames <<- gsub('-', '_', names(env$bst_evaluation))
|
mnames <<- gsub('-', '_', names(env$bst_evaluation))
|
||||||
if(!is.null(env$bst_evaluation_err))
|
if(!is.null(env$bst_evaluation_err))
|
||||||
mnames <<- c(paste0(mnames, '_mean'), paste0(mnames, '_std'))
|
mnames <<- c(paste0(mnames, '_mean'), paste0(mnames, '_std'))
|
||||||
}
|
}
|
||||||
|
|
||||||
finalizer <- function(env) {
|
finalizer <- function(env) {
|
||||||
env$evaluation_log <- as.data.table(t(simplify2array(env$evaluation_log)))
|
env$evaluation_log <- as.data.table(t(simplify2array(env$evaluation_log)))
|
||||||
setnames(env$evaluation_log, c('iter', mnames))
|
setnames(env$evaluation_log, c('iter', mnames))
|
||||||
|
|
||||||
if(!is.null(env$bst_evaluation_err)) {
|
if(!is.null(env$bst_evaluation_err)) {
|
||||||
# rearrange col order from _mean,_mean,...,_std,_std,...
|
# rearrange col order from _mean,_mean,...,_std,_std,...
|
||||||
# to be _mean,_std,_mean,_std,...
|
# to be _mean,_std,_mean,_std,...
|
||||||
@@ -135,18 +135,18 @@ cb.evaluation.log <- function() {
|
|||||||
env$evaluation_log <- env$evaluation_log[, c('iter', cnames), with = FALSE]
|
env$evaluation_log <- env$evaluation_log[, c('iter', cnames), with = FALSE]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
callback <- function(env = parent.frame(), finalize = FALSE) {
|
callback <- function(env = parent.frame(), finalize = FALSE) {
|
||||||
if (is.null(mnames))
|
if (is.null(mnames))
|
||||||
init(env)
|
init(env)
|
||||||
|
|
||||||
if (finalize)
|
if (finalize)
|
||||||
return(finalizer(env))
|
return(finalizer(env))
|
||||||
|
|
||||||
ev <- env$bst_evaluation
|
ev <- env$bst_evaluation
|
||||||
if(!is.null(env$bst_evaluation_err))
|
if(!is.null(env$bst_evaluation_err))
|
||||||
ev <- c(ev, env$bst_evaluation_err)
|
ev <- c(ev, env$bst_evaluation_err)
|
||||||
env$evaluation_log <- c(env$evaluation_log,
|
env$evaluation_log <- c(env$evaluation_log,
|
||||||
list(c(iter = env$iteration, ev)))
|
list(c(iter = env$iteration, ev)))
|
||||||
}
|
}
|
||||||
attr(callback, 'call') <- match.call()
|
attr(callback, 'call') <- match.call()
|
||||||
@@ -154,21 +154,21 @@ cb.evaluation.log <- function() {
|
|||||||
callback
|
callback
|
||||||
}
|
}
|
||||||
|
|
||||||
#' Callback closure for restetting the booster's parameters at each iteration.
|
#' Callback closure for resetting the booster's parameters at each iteration.
|
||||||
#'
|
#'
|
||||||
#' @param new_params a list where each element corresponds to a parameter that needs to be reset.
|
#' @param new_params a list where each element corresponds to a parameter that needs to be reset.
|
||||||
#' Each element's value must be either a vector of values of length \code{nrounds}
|
#' Each element's value must be either a vector of values of length \code{nrounds}
|
||||||
#' to be set at each iteration,
|
#' to be set at each iteration,
|
||||||
#' or a function of two parameters \code{learning_rates(iteration, nrounds)}
|
#' or a function of two parameters \code{learning_rates(iteration, nrounds)}
|
||||||
#' which returns a new parameter value by using the current iteration number
|
#' which returns a new parameter value by using the current iteration number
|
||||||
#' and the total number of boosting rounds.
|
#' and the total number of boosting rounds.
|
||||||
#'
|
#'
|
||||||
#' @details
|
#' @details
|
||||||
#' This is a "pre-iteration" callback function used to reset booster's parameters
|
#' This is a "pre-iteration" callback function used to reset booster's parameters
|
||||||
#' at the beginning of each iteration.
|
#' at the beginning of each iteration.
|
||||||
#'
|
#'
|
||||||
#' Note that when training is resumed from some previous model, and a function is used to
|
#' Note that when training is resumed from some previous model, and a function is used to
|
||||||
#' reset a parameter value, the \code{nrounds} argument in this function would be the
|
#' reset a parameter value, the \code{nrounds} argument in this function would be the
|
||||||
#' the number of boosting rounds in the current training.
|
#' the number of boosting rounds in the current training.
|
||||||
#'
|
#'
|
||||||
#' Callback function expects the following values to be set in its calling frame:
|
#' Callback function expects the following values to be set in its calling frame:
|
||||||
@@ -176,32 +176,32 @@ cb.evaluation.log <- function() {
|
|||||||
#' \code{iteration},
|
#' \code{iteration},
|
||||||
#' \code{begin_iteration},
|
#' \code{begin_iteration},
|
||||||
#' \code{end_iteration}.
|
#' \code{end_iteration}.
|
||||||
#'
|
#'
|
||||||
#' @seealso
|
#' @seealso
|
||||||
#' \code{\link{callbacks}}
|
#' \code{\link{callbacks}}
|
||||||
#'
|
#'
|
||||||
#' @export
|
#' @export
|
||||||
cb.reset.parameters <- function(new_params) {
|
cb.reset.parameters <- function(new_params) {
|
||||||
|
|
||||||
if (typeof(new_params) != "list")
|
if (typeof(new_params) != "list")
|
||||||
stop("'new_params' must be a list")
|
stop("'new_params' must be a list")
|
||||||
pnames <- gsub("\\.", "_", names(new_params))
|
pnames <- gsub("\\.", "_", names(new_params))
|
||||||
nrounds <- NULL
|
nrounds <- NULL
|
||||||
|
|
||||||
# run some checks in the begining
|
# run some checks in the begining
|
||||||
init <- function(env) {
|
init <- function(env) {
|
||||||
nrounds <<- env$end_iteration - env$begin_iteration + 1
|
nrounds <<- env$end_iteration - env$begin_iteration + 1
|
||||||
|
|
||||||
if (is.null(env$bst) && is.null(env$bst_folds))
|
if (is.null(env$bst) && is.null(env$bst_folds))
|
||||||
stop("Parent frame has neither 'bst' nor 'bst_folds'")
|
stop("Parent frame has neither 'bst' nor 'bst_folds'")
|
||||||
|
|
||||||
# Some parameters are not allowed to be changed,
|
# Some parameters are not allowed to be changed,
|
||||||
# since changing them would simply wreck some chaos
|
# since changing them would simply wreck some chaos
|
||||||
not_allowed <- pnames %in%
|
not_allowed <- pnames %in%
|
||||||
c('num_class', 'num_output_group', 'size_leaf_vector', 'updater_seq')
|
c('num_class', 'num_output_group', 'size_leaf_vector', 'updater_seq')
|
||||||
if (any(not_allowed))
|
if (any(not_allowed))
|
||||||
stop('Parameters ', paste(pnames[not_allowed]), " cannot be changed during boosting.")
|
stop('Parameters ', paste(pnames[not_allowed]), " cannot be changed during boosting.")
|
||||||
|
|
||||||
for (n in pnames) {
|
for (n in pnames) {
|
||||||
p <- new_params[[n]]
|
p <- new_params[[n]]
|
||||||
if (is.function(p)) {
|
if (is.function(p)) {
|
||||||
@@ -215,18 +215,18 @@ cb.reset.parameters <- function(new_params) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
callback <- function(env = parent.frame()) {
|
callback <- function(env = parent.frame()) {
|
||||||
if (is.null(nrounds))
|
if (is.null(nrounds))
|
||||||
init(env)
|
init(env)
|
||||||
|
|
||||||
i <- env$iteration
|
i <- env$iteration
|
||||||
pars <- lapply(new_params, function(p) {
|
pars <- lapply(new_params, function(p) {
|
||||||
if (is.function(p))
|
if (is.function(p))
|
||||||
return(p(i, nrounds))
|
return(p(i, nrounds))
|
||||||
p[i]
|
p[i]
|
||||||
})
|
})
|
||||||
|
|
||||||
if (!is.null(env$bst)) {
|
if (!is.null(env$bst)) {
|
||||||
xgb.parameters(env$bst$handle) <- pars
|
xgb.parameters(env$bst$handle) <- pars
|
||||||
} else {
|
} else {
|
||||||
@@ -242,23 +242,23 @@ cb.reset.parameters <- function(new_params) {
|
|||||||
|
|
||||||
|
|
||||||
#' Callback closure to activate the early stopping.
|
#' Callback closure to activate the early stopping.
|
||||||
#'
|
#'
|
||||||
#' @param stopping_rounds The number of rounds with no improvement in
|
#' @param stopping_rounds The number of rounds with no improvement in
|
||||||
#' the evaluation metric in order to stop the training.
|
#' the evaluation metric in order to stop the training.
|
||||||
#' @param maximize whether to maximize the evaluation metric
|
#' @param maximize whether to maximize the evaluation metric
|
||||||
#' @param metric_name the name of an evaluation column to use as a criteria for early
|
#' @param metric_name the name of an evaluation column to use as a criteria for early
|
||||||
#' stopping. If not set, the last column would be used.
|
#' stopping. If not set, the last column would be used.
|
||||||
#' Let's say the test data in \code{watchlist} was labelled as \code{dtest},
|
#' Let's say the test data in \code{watchlist} was labelled as \code{dtest},
|
||||||
#' and one wants to use the AUC in test data for early stopping regardless of where
|
#' and one wants to use the AUC in test data for early stopping regardless of where
|
||||||
#' it is in the \code{watchlist}, then one of the following would need to be set:
|
#' it is in the \code{watchlist}, then one of the following would need to be set:
|
||||||
#' \code{metric_name='dtest-auc'} or \code{metric_name='dtest_auc'}.
|
#' \code{metric_name='dtest-auc'} or \code{metric_name='dtest_auc'}.
|
||||||
#' All dash '-' characters in metric names are considered equivalent to '_'.
|
#' All dash '-' characters in metric names are considered equivalent to '_'.
|
||||||
#' @param verbose whether to print the early stopping information.
|
#' @param verbose whether to print the early stopping information.
|
||||||
#'
|
#'
|
||||||
#' @details
|
#' @details
|
||||||
#' This callback function determines the condition for early stopping
|
#' This callback function determines the condition for early stopping
|
||||||
#' by setting the \code{stop_condition = TRUE} flag in its calling frame.
|
#' by setting the \code{stop_condition = TRUE} flag in its calling frame.
|
||||||
#'
|
#'
|
||||||
#' The following additional fields are assigned to the model's R object:
|
#' The following additional fields are assigned to the model's R object:
|
||||||
#' \itemize{
|
#' \itemize{
|
||||||
#' \item \code{best_score} the evaluation score at the best iteration
|
#' \item \code{best_score} the evaluation score at the best iteration
|
||||||
@@ -266,13 +266,13 @@ cb.reset.parameters <- function(new_params) {
|
|||||||
#' \item \code{best_ntreelimit} to use with the \code{ntreelimit} parameter in \code{predict}.
|
#' \item \code{best_ntreelimit} to use with the \code{ntreelimit} parameter in \code{predict}.
|
||||||
#' It differs from \code{best_iteration} in multiclass or random forest settings.
|
#' It differs from \code{best_iteration} in multiclass or random forest settings.
|
||||||
#' }
|
#' }
|
||||||
#'
|
#'
|
||||||
#' The Same values are also stored as xgb-attributes:
|
#' The Same values are also stored as xgb-attributes:
|
||||||
#' \itemize{
|
#' \itemize{
|
||||||
#' \item \code{best_iteration} is stored as a 0-based iteration index (for interoperability of binary models)
|
#' \item \code{best_iteration} is stored as a 0-based iteration index (for interoperability of binary models)
|
||||||
#' \item \code{best_msg} message string is also stored.
|
#' \item \code{best_msg} message string is also stored.
|
||||||
#' }
|
#' }
|
||||||
#'
|
#'
|
||||||
#' At least one data element is required in the evaluation watchlist for early stopping to work.
|
#' At least one data element is required in the evaluation watchlist for early stopping to work.
|
||||||
#'
|
#'
|
||||||
#' Callback function expects the following values to be set in its calling frame:
|
#' Callback function expects the following values to be set in its calling frame:
|
||||||
@@ -284,13 +284,13 @@ cb.reset.parameters <- function(new_params) {
|
|||||||
#' \code{begin_iteration},
|
#' \code{begin_iteration},
|
||||||
#' \code{end_iteration},
|
#' \code{end_iteration},
|
||||||
#' \code{num_parallel_tree}.
|
#' \code{num_parallel_tree}.
|
||||||
#'
|
#'
|
||||||
#' @seealso
|
#' @seealso
|
||||||
#' \code{\link{callbacks}},
|
#' \code{\link{callbacks}},
|
||||||
#' \code{\link{xgb.attr}}
|
#' \code{\link{xgb.attr}}
|
||||||
#'
|
#'
|
||||||
#' @export
|
#' @export
|
||||||
cb.early.stop <- function(stopping_rounds, maximize = FALSE,
|
cb.early.stop <- function(stopping_rounds, maximize = FALSE,
|
||||||
metric_name = NULL, verbose = TRUE) {
|
metric_name = NULL, verbose = TRUE) {
|
||||||
# state variables
|
# state variables
|
||||||
best_iteration <- -1
|
best_iteration <- -1
|
||||||
@@ -298,11 +298,11 @@ cb.early.stop <- function(stopping_rounds, maximize = FALSE,
|
|||||||
best_score <- Inf
|
best_score <- Inf
|
||||||
best_msg <- NULL
|
best_msg <- NULL
|
||||||
metric_idx <- 1
|
metric_idx <- 1
|
||||||
|
|
||||||
init <- function(env) {
|
init <- function(env) {
|
||||||
if (length(env$bst_evaluation) == 0)
|
if (length(env$bst_evaluation) == 0)
|
||||||
stop("For early stopping, watchlist must have at least one element")
|
stop("For early stopping, watchlist must have at least one element")
|
||||||
|
|
||||||
eval_names <- gsub('-', '_', names(env$bst_evaluation))
|
eval_names <- gsub('-', '_', names(env$bst_evaluation))
|
||||||
if (!is.null(metric_name)) {
|
if (!is.null(metric_name)) {
|
||||||
metric_idx <<- which(gsub('-', '_', metric_name) == eval_names)
|
metric_idx <<- which(gsub('-', '_', metric_name) == eval_names)
|
||||||
@@ -314,25 +314,25 @@ cb.early.stop <- function(stopping_rounds, maximize = FALSE,
|
|||||||
length(env$bst_evaluation) > 1) {
|
length(env$bst_evaluation) > 1) {
|
||||||
metric_idx <<- length(eval_names)
|
metric_idx <<- length(eval_names)
|
||||||
if (verbose)
|
if (verbose)
|
||||||
cat('Multiple eval metrics are present. Will use ',
|
cat('Multiple eval metrics are present. Will use ',
|
||||||
eval_names[metric_idx], ' for early stopping.\n', sep = '')
|
eval_names[metric_idx], ' for early stopping.\n', sep = '')
|
||||||
}
|
}
|
||||||
|
|
||||||
metric_name <<- eval_names[metric_idx]
|
metric_name <<- eval_names[metric_idx]
|
||||||
|
|
||||||
# maximize is usually NULL when not set in xgb.train and built-in metrics
|
# maximize is usually NULL when not set in xgb.train and built-in metrics
|
||||||
if (is.null(maximize))
|
if (is.null(maximize))
|
||||||
maximize <<- grepl('(_auc|_map|_ndcg)', metric_name)
|
maximize <<- grepl('(_auc|_map|_ndcg)', metric_name)
|
||||||
|
|
||||||
if (verbose && NVL(env$rank, 0) == 0)
|
if (verbose && NVL(env$rank, 0) == 0)
|
||||||
cat("Will train until ", metric_name, " hasn't improved in ",
|
cat("Will train until ", metric_name, " hasn't improved in ",
|
||||||
stopping_rounds, " rounds.\n\n", sep = '')
|
stopping_rounds, " rounds.\n\n", sep = '')
|
||||||
|
|
||||||
best_iteration <<- 1
|
best_iteration <<- 1
|
||||||
if (maximize) best_score <<- -Inf
|
if (maximize) best_score <<- -Inf
|
||||||
|
|
||||||
env$stop_condition <- FALSE
|
env$stop_condition <- FALSE
|
||||||
|
|
||||||
if (!is.null(env$bst)) {
|
if (!is.null(env$bst)) {
|
||||||
if (!inherits(env$bst, 'xgb.Booster'))
|
if (!inherits(env$bst, 'xgb.Booster'))
|
||||||
stop("'bst' in the parent frame must be an 'xgb.Booster'")
|
stop("'bst' in the parent frame must be an 'xgb.Booster'")
|
||||||
@@ -348,7 +348,7 @@ cb.early.stop <- function(stopping_rounds, maximize = FALSE,
|
|||||||
stop("Parent frame has neither 'bst' nor ('bst_folds' and 'basket')")
|
stop("Parent frame has neither 'bst' nor ('bst_folds' and 'basket')")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
finalizer <- function(env) {
|
finalizer <- function(env) {
|
||||||
if (!is.null(env$bst)) {
|
if (!is.null(env$bst)) {
|
||||||
attr_best_score = as.numeric(xgb.attr(env$bst$handle, 'best_score'))
|
attr_best_score = as.numeric(xgb.attr(env$bst$handle, 'best_score'))
|
||||||
@@ -367,16 +367,16 @@ cb.early.stop <- function(stopping_rounds, maximize = FALSE,
|
|||||||
callback <- function(env = parent.frame(), finalize = FALSE) {
|
callback <- function(env = parent.frame(), finalize = FALSE) {
|
||||||
if (best_iteration < 0)
|
if (best_iteration < 0)
|
||||||
init(env)
|
init(env)
|
||||||
|
|
||||||
if (finalize)
|
if (finalize)
|
||||||
return(finalizer(env))
|
return(finalizer(env))
|
||||||
|
|
||||||
i <- env$iteration
|
i <- env$iteration
|
||||||
score = env$bst_evaluation[metric_idx]
|
score = env$bst_evaluation[metric_idx]
|
||||||
|
|
||||||
if (( maximize && score > best_score) ||
|
if (( maximize && score > best_score) ||
|
||||||
(!maximize && score < best_score)) {
|
(!maximize && score < best_score)) {
|
||||||
|
|
||||||
best_msg <<- format.eval.string(i, env$bst_evaluation, env$bst_evaluation_err)
|
best_msg <<- format.eval.string(i, env$bst_evaluation, env$bst_evaluation_err)
|
||||||
best_score <<- score
|
best_score <<- score
|
||||||
best_iteration <<- i
|
best_iteration <<- i
|
||||||
@@ -403,37 +403,37 @@ cb.early.stop <- function(stopping_rounds, maximize = FALSE,
|
|||||||
|
|
||||||
|
|
||||||
#' Callback closure for saving a model file.
|
#' Callback closure for saving a model file.
|
||||||
#'
|
#'
|
||||||
#' @param save_period save the model to disk after every
|
#' @param save_period save the model to disk after every
|
||||||
#' \code{save_period} iterations; 0 means save the model at the end.
|
#' \code{save_period} iterations; 0 means save the model at the end.
|
||||||
#' @param save_name the name or path for the saved model file.
|
#' @param save_name the name or path for the saved model file.
|
||||||
#' It can contain a \code{\link[base]{sprintf}} formatting specifier
|
#' It can contain a \code{\link[base]{sprintf}} formatting specifier
|
||||||
#' to include the integer iteration number in the file name.
|
#' to include the integer iteration number in the file name.
|
||||||
#' E.g., with \code{save_name} = 'xgboost_%04d.model',
|
#' E.g., with \code{save_name} = 'xgboost_%04d.model',
|
||||||
#' the file saved at iteration 50 would be named "xgboost_0050.model".
|
#' the file saved at iteration 50 would be named "xgboost_0050.model".
|
||||||
#'
|
#'
|
||||||
#' @details
|
#' @details
|
||||||
#' This callback function allows to save an xgb-model file, either periodically after each \code{save_period}'s or at the end.
|
#' This callback function allows to save an xgb-model file, either periodically after each \code{save_period}'s or at the end.
|
||||||
#'
|
#'
|
||||||
#' Callback function expects the following values to be set in its calling frame:
|
#' Callback function expects the following values to be set in its calling frame:
|
||||||
#' \code{bst},
|
#' \code{bst},
|
||||||
#' \code{iteration},
|
#' \code{iteration},
|
||||||
#' \code{begin_iteration},
|
#' \code{begin_iteration},
|
||||||
#' \code{end_iteration}.
|
#' \code{end_iteration}.
|
||||||
#'
|
#'
|
||||||
#' @seealso
|
#' @seealso
|
||||||
#' \code{\link{callbacks}}
|
#' \code{\link{callbacks}}
|
||||||
#'
|
#'
|
||||||
#' @export
|
#' @export
|
||||||
cb.save.model <- function(save_period = 0, save_name = "xgboost.model") {
|
cb.save.model <- function(save_period = 0, save_name = "xgboost.model") {
|
||||||
|
|
||||||
if (save_period < 0)
|
if (save_period < 0)
|
||||||
stop("'save_period' cannot be negative")
|
stop("'save_period' cannot be negative")
|
||||||
|
|
||||||
callback <- function(env = parent.frame()) {
|
callback <- function(env = parent.frame()) {
|
||||||
if (is.null(env$bst))
|
if (is.null(env$bst))
|
||||||
stop("'save_model' callback requires the 'bst' booster object in its calling frame")
|
stop("'save_model' callback requires the 'bst' booster object in its calling frame")
|
||||||
|
|
||||||
if ((save_period > 0 && (env$iteration - env$begin_iteration) %% save_period == 0) ||
|
if ((save_period > 0 && (env$iteration - env$begin_iteration) %% save_period == 0) ||
|
||||||
(save_period == 0 && env$iteration == env$end_iteration))
|
(save_period == 0 && env$iteration == env$end_iteration))
|
||||||
xgb.save(env$bst, sprintf(save_name, env$iteration))
|
xgb.save(env$bst, sprintf(save_name, env$iteration))
|
||||||
@@ -445,16 +445,16 @@ cb.save.model <- function(save_period = 0, save_name = "xgboost.model") {
|
|||||||
|
|
||||||
|
|
||||||
#' Callback closure for returning cross-validation based predictions.
|
#' Callback closure for returning cross-validation based predictions.
|
||||||
#'
|
#'
|
||||||
#' @param save_models a flag for whether to save the folds' models.
|
#' @param save_models a flag for whether to save the folds' models.
|
||||||
#'
|
#'
|
||||||
#' @details
|
#' @details
|
||||||
#' This callback function saves predictions for all of the test folds,
|
#' This callback function saves predictions for all of the test folds,
|
||||||
#' and also allows to save the folds' models.
|
#' and also allows to save the folds' models.
|
||||||
#'
|
#'
|
||||||
#' It is a "finalizer" callback and it uses early stopping information whenever it is available,
|
#' It is a "finalizer" callback and it uses early stopping information whenever it is available,
|
||||||
#' thus it must be run after the early stopping callback if the early stopping is used.
|
#' thus it must be run after the early stopping callback if the early stopping is used.
|
||||||
#'
|
#'
|
||||||
#' Callback function expects the following values to be set in its calling frame:
|
#' Callback function expects the following values to be set in its calling frame:
|
||||||
#' \code{bst_folds},
|
#' \code{bst_folds},
|
||||||
#' \code{basket},
|
#' \code{basket},
|
||||||
@@ -463,36 +463,36 @@ cb.save.model <- function(save_period = 0, save_name = "xgboost.model") {
|
|||||||
#' \code{params},
|
#' \code{params},
|
||||||
#' \code{num_parallel_tree},
|
#' \code{num_parallel_tree},
|
||||||
#' \code{num_class}.
|
#' \code{num_class}.
|
||||||
#'
|
#'
|
||||||
#' @return
|
#' @return
|
||||||
#' Predictions are returned inside of the \code{pred} element, which is either a vector or a matrix,
|
#' Predictions are returned inside of the \code{pred} element, which is either a vector or a matrix,
|
||||||
#' depending on the number of prediction outputs per data row. The order of predictions corresponds
|
#' depending on the number of prediction outputs per data row. The order of predictions corresponds
|
||||||
#' to the order of rows in the original dataset. Note that when a custom \code{folds} list is
|
#' to the order of rows in the original dataset. Note that when a custom \code{folds} list is
|
||||||
#' provided in \code{xgb.cv}, the predictions would only be returned properly when this list is a
|
#' provided in \code{xgb.cv}, the predictions would only be returned properly when this list is a
|
||||||
#' non-overlapping list of k sets of indices, as in a standard k-fold CV. The predictions would not be
|
#' non-overlapping list of k sets of indices, as in a standard k-fold CV. The predictions would not be
|
||||||
#' meaningful when user-profided folds have overlapping indices as in, e.g., random sampling splits.
|
#' meaningful when user-provided folds have overlapping indices as in, e.g., random sampling splits.
|
||||||
#' When some of the indices in the training dataset are not included into user-provided \code{folds},
|
#' When some of the indices in the training dataset are not included into user-provided \code{folds},
|
||||||
#' their prediction value would be \code{NA}.
|
#' their prediction value would be \code{NA}.
|
||||||
#'
|
#'
|
||||||
#' @seealso
|
#' @seealso
|
||||||
#' \code{\link{callbacks}}
|
#' \code{\link{callbacks}}
|
||||||
#'
|
#'
|
||||||
#' @export
|
#' @export
|
||||||
cb.cv.predict <- function(save_models = FALSE) {
|
cb.cv.predict <- function(save_models = FALSE) {
|
||||||
|
|
||||||
finalizer <- function(env) {
|
finalizer <- function(env) {
|
||||||
if (is.null(env$basket) || is.null(env$bst_folds))
|
if (is.null(env$basket) || is.null(env$bst_folds))
|
||||||
stop("'cb.cv.predict' callback requires 'basket' and 'bst_folds' lists in its calling frame")
|
stop("'cb.cv.predict' callback requires 'basket' and 'bst_folds' lists in its calling frame")
|
||||||
|
|
||||||
N <- nrow(env$data)
|
N <- nrow(env$data)
|
||||||
pred <-
|
pred <-
|
||||||
if (env$num_class > 1) {
|
if (env$num_class > 1) {
|
||||||
matrix(NA_real_, N, env$num_class)
|
matrix(NA_real_, N, env$num_class)
|
||||||
} else {
|
} else {
|
||||||
rep(NA_real_, N)
|
rep(NA_real_, N)
|
||||||
}
|
}
|
||||||
|
|
||||||
ntreelimit <- NVL(env$basket$best_ntreelimit,
|
ntreelimit <- NVL(env$basket$best_ntreelimit,
|
||||||
env$end_iteration * env$num_parallel_tree)
|
env$end_iteration * env$num_parallel_tree)
|
||||||
if (NVL(env$params[['booster']], '') == 'gblinear') {
|
if (NVL(env$params[['booster']], '') == 'gblinear') {
|
||||||
ntreelimit <- 0 # must be 0 for gblinear
|
ntreelimit <- 0 # must be 0 for gblinear
|
||||||
@@ -569,7 +569,7 @@ cb.cv.predict <- function(save_models = FALSE) {
|
|||||||
#' # Extract the coefficients' path and plot them vs boosting iteration number:
|
#' # Extract the coefficients' path and plot them vs boosting iteration number:
|
||||||
#' coef_path <- xgb.gblinear.history(bst)
|
#' coef_path <- xgb.gblinear.history(bst)
|
||||||
#' matplot(coef_path, type = 'l')
|
#' matplot(coef_path, type = 'l')
|
||||||
#'
|
#'
|
||||||
#' # With the deterministic coordinate descent updater, it is safer to use higher learning rates.
|
#' # With the deterministic coordinate descent updater, it is safer to use higher learning rates.
|
||||||
#' # Will try the classical componentwise boosting which selects a single best feature per round:
|
#' # Will try the classical componentwise boosting which selects a single best feature per round:
|
||||||
#' bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 200, eta = 0.8,
|
#' bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 200, eta = 0.8,
|
||||||
@@ -586,7 +586,7 @@ cb.cv.predict <- function(save_models = FALSE) {
|
|||||||
#' # coefficients in the CV fold #3
|
#' # coefficients in the CV fold #3
|
||||||
#' xgb.gblinear.history(bst)[[3]] %>% matplot(type = 'l')
|
#' xgb.gblinear.history(bst)[[3]] %>% matplot(type = 'l')
|
||||||
#'
|
#'
|
||||||
#'
|
#'
|
||||||
#' #### Multiclass classification:
|
#' #### Multiclass classification:
|
||||||
#' #
|
#' #
|
||||||
#' dtrain <- xgb.DMatrix(scale(x), label = as.numeric(iris$Species) - 1)
|
#' dtrain <- xgb.DMatrix(scale(x), label = as.numeric(iris$Species) - 1)
|
||||||
@@ -681,9 +681,9 @@ cb.gblinear.history <- function(sparse=FALSE) {
|
|||||||
#' using the \code{cb.gblinear.history()} callback.
|
#' using the \code{cb.gblinear.history()} callback.
|
||||||
#' @param class_index zero-based class index to extract the coefficients for only that
|
#' @param class_index zero-based class index to extract the coefficients for only that
|
||||||
#' specific class in a multinomial multiclass model. When it is NULL, all the
|
#' specific class in a multinomial multiclass model. When it is NULL, all the
|
||||||
#' coeffients are returned. Has no effect in non-multiclass models.
|
#' coefficients are returned. Has no effect in non-multiclass models.
|
||||||
#'
|
#'
|
||||||
#' @return
|
#' @return
|
||||||
#' For an \code{xgb.train} result, a matrix (either dense or sparse) with the columns
|
#' For an \code{xgb.train} result, a matrix (either dense or sparse) with the columns
|
||||||
#' corresponding to iteration's coefficients (in the order as \code{xgb.dump()} would
|
#' corresponding to iteration's coefficients (in the order as \code{xgb.dump()} would
|
||||||
#' return) and the rows corresponding to boosting iterations.
|
#' return) and the rows corresponding to boosting iterations.
|
||||||
@@ -731,7 +731,7 @@ xgb.gblinear.history <- function(model, class_index = NULL) {
|
|||||||
coef_path <- environment(model$callbacks$cb.gblinear.history)[["coefs"]]
|
coef_path <- environment(model$callbacks$cb.gblinear.history)[["coefs"]]
|
||||||
if (!is.null(class_index) && num_class > 1) {
|
if (!is.null(class_index) && num_class > 1) {
|
||||||
coef_path <- if (is.list(coef_path)) {
|
coef_path <- if (is.list(coef_path)) {
|
||||||
lapply(coef_path,
|
lapply(coef_path,
|
||||||
function(x) x[, seq(1 + class_index, by=num_class, length.out=num_feat)])
|
function(x) x[, seq(1 + class_index, by=num_class, length.out=num_feat)])
|
||||||
} else {
|
} else {
|
||||||
coef_path <- coef_path[, seq(1 + class_index, by=num_class, length.out=num_feat)]
|
coef_path <- coef_path[, seq(1 + class_index, by=num_class, length.out=num_feat)]
|
||||||
@@ -743,7 +743,7 @@ xgb.gblinear.history <- function(model, class_index = NULL) {
|
|||||||
|
|
||||||
#
|
#
|
||||||
# Internal utility functions for callbacks ------------------------------------
|
# Internal utility functions for callbacks ------------------------------------
|
||||||
#
|
#
|
||||||
|
|
||||||
# Format the evaluation metric string
|
# Format the evaluation metric string
|
||||||
format.eval.string <- function(iter, eval_res, eval_err = NULL) {
|
format.eval.string <- function(iter, eval_res, eval_err = NULL) {
|
||||||
@@ -773,7 +773,7 @@ callback.calls <- function(cb_list) {
|
|||||||
unlist(lapply(cb_list, function(x) attr(x, 'call')))
|
unlist(lapply(cb_list, function(x) attr(x, 'call')))
|
||||||
}
|
}
|
||||||
|
|
||||||
# Add a callback cb to the list and make sure that
|
# Add a callback cb to the list and make sure that
|
||||||
# cb.early.stop and cb.cv.predict are at the end of the list
|
# cb.early.stop and cb.cv.predict are at the end of the list
|
||||||
# with cb.cv.predict being the last (when present)
|
# with cb.cv.predict being the last (when present)
|
||||||
add.cb <- function(cb_list, cb) {
|
add.cb <- function(cb_list, cb) {
|
||||||
@@ -782,11 +782,11 @@ add.cb <- function(cb_list, cb) {
|
|||||||
if ('cb.early.stop' %in% names(cb_list)) {
|
if ('cb.early.stop' %in% names(cb_list)) {
|
||||||
cb_list <- c(cb_list, cb_list['cb.early.stop'])
|
cb_list <- c(cb_list, cb_list['cb.early.stop'])
|
||||||
# this removes only the first one
|
# this removes only the first one
|
||||||
cb_list['cb.early.stop'] <- NULL
|
cb_list['cb.early.stop'] <- NULL
|
||||||
}
|
}
|
||||||
if ('cb.cv.predict' %in% names(cb_list)) {
|
if ('cb.cv.predict' %in% names(cb_list)) {
|
||||||
cb_list <- c(cb_list, cb_list['cb.cv.predict'])
|
cb_list <- c(cb_list, cb_list['cb.cv.predict'])
|
||||||
cb_list['cb.cv.predict'] <- NULL
|
cb_list['cb.cv.predict'] <- NULL
|
||||||
}
|
}
|
||||||
cb_list
|
cb_list
|
||||||
}
|
}
|
||||||
@@ -796,7 +796,7 @@ categorize.callbacks <- function(cb_list) {
|
|||||||
list(
|
list(
|
||||||
pre_iter = Filter(function(x) {
|
pre_iter = Filter(function(x) {
|
||||||
pre <- attr(x, 'is_pre_iteration')
|
pre <- attr(x, 'is_pre_iteration')
|
||||||
!is.null(pre) && pre
|
!is.null(pre) && pre
|
||||||
}, cb_list),
|
}, cb_list),
|
||||||
post_iter = Filter(function(x) {
|
post_iter = Filter(function(x) {
|
||||||
pre <- attr(x, 'is_pre_iteration')
|
pre <- attr(x, 'is_pre_iteration')
|
||||||
|
|||||||
@@ -28,12 +28,12 @@ NVL <- function(x, val) {
|
|||||||
# Merges booster params with whatever is provided in ...
|
# Merges booster params with whatever is provided in ...
|
||||||
# plus runs some checks
|
# plus runs some checks
|
||||||
check.booster.params <- function(params, ...) {
|
check.booster.params <- function(params, ...) {
|
||||||
if (typeof(params) != "list")
|
if (typeof(params) != "list")
|
||||||
stop("params must be a list")
|
stop("params must be a list")
|
||||||
|
|
||||||
# in R interface, allow for '.' instead of '_' in parameter names
|
# in R interface, allow for '.' instead of '_' in parameter names
|
||||||
names(params) <- gsub("\\.", "_", names(params))
|
names(params) <- gsub("\\.", "_", names(params))
|
||||||
|
|
||||||
# merge parameters from the params and the dots-expansion
|
# merge parameters from the params and the dots-expansion
|
||||||
dot_params <- list(...)
|
dot_params <- list(...)
|
||||||
names(dot_params) <- gsub("\\.", "_", names(dot_params))
|
names(dot_params) <- gsub("\\.", "_", names(dot_params))
|
||||||
@@ -41,15 +41,15 @@ check.booster.params <- function(params, ...) {
|
|||||||
names(dot_params))) > 0)
|
names(dot_params))) > 0)
|
||||||
stop("Same parameters in 'params' and in the call are not allowed. Please check your 'params' list.")
|
stop("Same parameters in 'params' and in the call are not allowed. Please check your 'params' list.")
|
||||||
params <- c(params, dot_params)
|
params <- c(params, dot_params)
|
||||||
|
|
||||||
# providing a parameter multiple times makes sense only for 'eval_metric'
|
# providing a parameter multiple times makes sense only for 'eval_metric'
|
||||||
name_freqs <- table(names(params))
|
name_freqs <- table(names(params))
|
||||||
multi_names <- setdiff(names(name_freqs[name_freqs > 1]), 'eval_metric')
|
multi_names <- setdiff(names(name_freqs[name_freqs > 1]), 'eval_metric')
|
||||||
if (length(multi_names) > 0) {
|
if (length(multi_names) > 0) {
|
||||||
warning("The following parameters were provided multiple times:\n\t",
|
warning("The following parameters were provided multiple times:\n\t",
|
||||||
paste(multi_names, collapse = ', '), "\n Only the last value for each of them will be used.\n")
|
paste(multi_names, collapse = ', '), "\n Only the last value for each of them will be used.\n")
|
||||||
# While xgboost internals would choose the last value for a multiple-times parameter,
|
# While xgboost internals would choose the last value for a multiple-times parameter,
|
||||||
# enforce it here in R as well (b/c multi-parameters might be used further in R code,
|
# enforce it here in R as well (b/c multi-parameters might be used further in R code,
|
||||||
# and R takes the 1st value when multiple elements with the same name are present in a list).
|
# and R takes the 1st value when multiple elements with the same name are present in a list).
|
||||||
for (n in multi_names) {
|
for (n in multi_names) {
|
||||||
del_idx <- which(n == names(params))
|
del_idx <- which(n == names(params))
|
||||||
@@ -57,25 +57,25 @@ check.booster.params <- function(params, ...) {
|
|||||||
params[[del_idx]] <- NULL
|
params[[del_idx]] <- NULL
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# for multiclass, expect num_class to be set
|
# for multiclass, expect num_class to be set
|
||||||
if (typeof(params[['objective']]) == "character" &&
|
if (typeof(params[['objective']]) == "character" &&
|
||||||
substr(NVL(params[['objective']], 'x'), 1, 6) == 'multi:' &&
|
substr(NVL(params[['objective']], 'x'), 1, 6) == 'multi:' &&
|
||||||
as.numeric(NVL(params[['num_class']], 0)) < 2) {
|
as.numeric(NVL(params[['num_class']], 0)) < 2) {
|
||||||
stop("'num_class' > 1 parameter must be set for multiclass classification")
|
stop("'num_class' > 1 parameter must be set for multiclass classification")
|
||||||
}
|
}
|
||||||
|
|
||||||
# monotone_constraints parser
|
# monotone_constraints parser
|
||||||
|
|
||||||
if (!is.null(params[['monotone_constraints']]) &&
|
if (!is.null(params[['monotone_constraints']]) &&
|
||||||
typeof(params[['monotone_constraints']]) != "character") {
|
typeof(params[['monotone_constraints']]) != "character") {
|
||||||
vec2str = paste(params[['monotone_constraints']], collapse = ',')
|
vec2str = paste(params[['monotone_constraints']], collapse = ',')
|
||||||
vec2str = paste0('(', vec2str, ')')
|
vec2str = paste0('(', vec2str, ')')
|
||||||
params[['monotone_constraints']] = vec2str
|
params[['monotone_constraints']] = vec2str
|
||||||
}
|
}
|
||||||
|
|
||||||
# interaction constraints parser (convert from list of column indices to string)
|
# interaction constraints parser (convert from list of column indices to string)
|
||||||
if (!is.null(params[['interaction_constraints']]) &&
|
if (!is.null(params[['interaction_constraints']]) &&
|
||||||
typeof(params[['interaction_constraints']]) != "character"){
|
typeof(params[['interaction_constraints']]) != "character"){
|
||||||
# check input class
|
# check input class
|
||||||
if (class(params[['interaction_constraints']]) != 'list') stop('interaction_constraints should be class list')
|
if (class(params[['interaction_constraints']]) != 'list') stop('interaction_constraints should be class list')
|
||||||
@@ -96,10 +96,10 @@ check.booster.params <- function(params, ...) {
|
|||||||
check.custom.obj <- function(env = parent.frame()) {
|
check.custom.obj <- function(env = parent.frame()) {
|
||||||
if (!is.null(env$params[['objective']]) && !is.null(env$obj))
|
if (!is.null(env$params[['objective']]) && !is.null(env$obj))
|
||||||
stop("Setting objectives in 'params' and 'obj' at the same time is not allowed")
|
stop("Setting objectives in 'params' and 'obj' at the same time is not allowed")
|
||||||
|
|
||||||
if (!is.null(env$obj) && typeof(env$obj) != 'closure')
|
if (!is.null(env$obj) && typeof(env$obj) != 'closure')
|
||||||
stop("'obj' must be a function")
|
stop("'obj' must be a function")
|
||||||
|
|
||||||
# handle the case when custom objective function was provided through params
|
# handle the case when custom objective function was provided through params
|
||||||
if (!is.null(env$params[['objective']]) &&
|
if (!is.null(env$params[['objective']]) &&
|
||||||
typeof(env$params$objective) == 'closure') {
|
typeof(env$params$objective) == 'closure') {
|
||||||
@@ -113,21 +113,21 @@ check.custom.obj <- function(env = parent.frame()) {
|
|||||||
check.custom.eval <- function(env = parent.frame()) {
|
check.custom.eval <- function(env = parent.frame()) {
|
||||||
if (!is.null(env$params[['eval_metric']]) && !is.null(env$feval))
|
if (!is.null(env$params[['eval_metric']]) && !is.null(env$feval))
|
||||||
stop("Setting evaluation metrics in 'params' and 'feval' at the same time is not allowed")
|
stop("Setting evaluation metrics in 'params' and 'feval' at the same time is not allowed")
|
||||||
|
|
||||||
if (!is.null(env$feval) && typeof(env$feval) != 'closure')
|
if (!is.null(env$feval) && typeof(env$feval) != 'closure')
|
||||||
stop("'feval' must be a function")
|
stop("'feval' must be a function")
|
||||||
|
|
||||||
# handle a situation when custom eval function was provided through params
|
# handle a situation when custom eval function was provided through params
|
||||||
if (!is.null(env$params[['eval_metric']]) &&
|
if (!is.null(env$params[['eval_metric']]) &&
|
||||||
typeof(env$params$eval_metric) == 'closure') {
|
typeof(env$params$eval_metric) == 'closure') {
|
||||||
env$feval <- env$params$eval_metric
|
env$feval <- env$params$eval_metric
|
||||||
env$params$eval_metric <- NULL
|
env$params$eval_metric <- NULL
|
||||||
}
|
}
|
||||||
|
|
||||||
# require maximize to be set when custom feval and early stopping are used together
|
# require maximize to be set when custom feval and early stopping are used together
|
||||||
if (!is.null(env$feval) &&
|
if (!is.null(env$feval) &&
|
||||||
is.null(env$maximize) && (
|
is.null(env$maximize) && (
|
||||||
!is.null(env$early_stopping_rounds) ||
|
!is.null(env$early_stopping_rounds) ||
|
||||||
has.callbacks(env$callbacks, 'cb.early.stop')))
|
has.callbacks(env$callbacks, 'cb.early.stop')))
|
||||||
stop("Please set 'maximize' to indicate whether the evaluation metric needs to be maximized or not")
|
stop("Please set 'maximize' to indicate whether the evaluation metric needs to be maximized or not")
|
||||||
}
|
}
|
||||||
@@ -145,7 +145,7 @@ xgb.iter.update <- function(booster_handle, dtrain, iter, obj = NULL) {
|
|||||||
if (is.null(obj)) {
|
if (is.null(obj)) {
|
||||||
.Call(XGBoosterUpdateOneIter_R, booster_handle, as.integer(iter), dtrain)
|
.Call(XGBoosterUpdateOneIter_R, booster_handle, as.integer(iter), dtrain)
|
||||||
} else {
|
} else {
|
||||||
pred <- predict(booster_handle, dtrain)
|
pred <- predict(booster_handle, dtrain, training = TRUE)
|
||||||
gpair <- obj(pred, dtrain)
|
gpair <- obj(pred, dtrain)
|
||||||
.Call(XGBoosterBoostOneIter_R, booster_handle, dtrain, gpair$grad, gpair$hess)
|
.Call(XGBoosterBoostOneIter_R, booster_handle, dtrain, gpair$grad, gpair$hess)
|
||||||
}
|
}
|
||||||
@@ -154,15 +154,15 @@ xgb.iter.update <- function(booster_handle, dtrain, iter, obj = NULL) {
|
|||||||
|
|
||||||
|
|
||||||
# Evaluate one iteration.
|
# Evaluate one iteration.
|
||||||
# Returns a named vector of evaluation metrics
|
# Returns a named vector of evaluation metrics
|
||||||
# with the names in a 'datasetname-metricname' format.
|
# with the names in a 'datasetname-metricname' format.
|
||||||
xgb.iter.eval <- function(booster_handle, watchlist, iter, feval = NULL) {
|
xgb.iter.eval <- function(booster_handle, watchlist, iter, feval = NULL) {
|
||||||
if (!identical(class(booster_handle), "xgb.Booster.handle"))
|
if (!identical(class(booster_handle), "xgb.Booster.handle"))
|
||||||
stop("class of booster_handle must be xgb.Booster.handle")
|
stop("class of booster_handle must be xgb.Booster.handle")
|
||||||
|
|
||||||
if (length(watchlist) == 0)
|
if (length(watchlist) == 0)
|
||||||
return(NULL)
|
return(NULL)
|
||||||
|
|
||||||
evnames <- names(watchlist)
|
evnames <- names(watchlist)
|
||||||
if (is.null(feval)) {
|
if (is.null(feval)) {
|
||||||
msg <- .Call(XGBoosterEvalOneIter_R, booster_handle, as.integer(iter), watchlist, as.list(evnames))
|
msg <- .Call(XGBoosterEvalOneIter_R, booster_handle, as.integer(iter), watchlist, as.list(evnames))
|
||||||
@@ -189,7 +189,7 @@ xgb.iter.eval <- function(booster_handle, watchlist, iter, feval = NULL) {
|
|||||||
|
|
||||||
# Generates random (stratified if needed) CV folds
|
# Generates random (stratified if needed) CV folds
|
||||||
generate.cv.folds <- function(nfold, nrows, stratified, label, params) {
|
generate.cv.folds <- function(nfold, nrows, stratified, label, params) {
|
||||||
|
|
||||||
# cannot do it for rank
|
# cannot do it for rank
|
||||||
if (exists('objective', where = params) &&
|
if (exists('objective', where = params) &&
|
||||||
is.character(params$objective) &&
|
is.character(params$objective) &&
|
||||||
@@ -209,13 +209,14 @@ generate.cv.folds <- function(nfold, nrows, stratified, label, params) {
|
|||||||
if (exists('objective', where = params) &&
|
if (exists('objective', where = params) &&
|
||||||
is.character(params$objective)) {
|
is.character(params$objective)) {
|
||||||
# If 'objective' provided in params, assume that y is a classification label
|
# If 'objective' provided in params, assume that y is a classification label
|
||||||
# unless objective is reg:linear
|
# unless objective is reg:squarederror
|
||||||
if (params$objective != 'reg:linear')
|
if (params$objective != 'reg:squarederror')
|
||||||
y <- factor(y)
|
y <- factor(y)
|
||||||
} else {
|
} else {
|
||||||
# If no 'objective' given in params, it means that user either wants to use
|
# If no 'objective' given in params, it means that user either wants to
|
||||||
# the default 'reg:linear' objective or has provided a custom obj function.
|
# use the default 'reg:squarederror' objective or has provided a custom
|
||||||
# Here, assume classification setting when y has 5 or less unique values:
|
# obj function. Here, assume classification setting when y has 5 or less
|
||||||
|
# unique values:
|
||||||
if (length(unique(y)) <= 5)
|
if (length(unique(y)) <= 5)
|
||||||
y <- factor(y)
|
y <- factor(y)
|
||||||
}
|
}
|
||||||
@@ -293,22 +294,22 @@ xgb.createFolds <- function(y, k = 10)
|
|||||||
#
|
#
|
||||||
|
|
||||||
#' Deprecation notices.
|
#' Deprecation notices.
|
||||||
#'
|
#'
|
||||||
#' At this time, some of the parameter names were changed in order to make the code style more uniform.
|
#' At this time, some of the parameter names were changed in order to make the code style more uniform.
|
||||||
#' The deprecated parameters would be removed in the next release.
|
#' The deprecated parameters would be removed in the next release.
|
||||||
#'
|
#'
|
||||||
#' To see all the current deprecated and new parameters, check the \code{xgboost:::depr_par_lut} table.
|
#' To see all the current deprecated and new parameters, check the \code{xgboost:::depr_par_lut} table.
|
||||||
#'
|
#'
|
||||||
#' A deprecation warning is shown when any of the deprecated parameters is used in a call.
|
#' A deprecation warning is shown when any of the deprecated parameters is used in a call.
|
||||||
#' An additional warning is shown when there was a partial match to a deprecated parameter
|
#' An additional warning is shown when there was a partial match to a deprecated parameter
|
||||||
#' (as R is able to partially match parameter names).
|
#' (as R is able to partially match parameter names).
|
||||||
#'
|
#'
|
||||||
#' @name xgboost-deprecated
|
#' @name xgboost-deprecated
|
||||||
NULL
|
NULL
|
||||||
|
|
||||||
# Lookup table for the deprecated parameters bookkeeping
|
# Lookup table for the deprecated parameters bookkeeping
|
||||||
depr_par_lut <- matrix(c(
|
depr_par_lut <- matrix(c(
|
||||||
'print.every.n', 'print_every_n',
|
'print.every.n', 'print_every_n',
|
||||||
'early.stop.round', 'early_stopping_rounds',
|
'early.stop.round', 'early_stopping_rounds',
|
||||||
'training.data', 'data',
|
'training.data', 'data',
|
||||||
'with.stats', 'with_stats',
|
'with.stats', 'with_stats',
|
||||||
|
|||||||
@@ -51,11 +51,13 @@ is.null.handle <- function(handle) {
|
|||||||
# Return a verified to be valid handle out of either xgb.Booster.handle or xgb.Booster
|
# Return a verified to be valid handle out of either xgb.Booster.handle or xgb.Booster
|
||||||
# internal utility function
|
# internal utility function
|
||||||
xgb.get.handle <- function(object) {
|
xgb.get.handle <- function(object) {
|
||||||
handle <- switch(class(object)[1],
|
if (inherits(object, "xgb.Booster")) {
|
||||||
xgb.Booster = object$handle,
|
handle <- object$handle
|
||||||
xgb.Booster.handle = object,
|
} else if (inherits(object, "xgb.Booster.handle")) {
|
||||||
|
handle <- object
|
||||||
|
} else {
|
||||||
stop("argument must be of either xgb.Booster or xgb.Booster.handle class")
|
stop("argument must be of either xgb.Booster or xgb.Booster.handle class")
|
||||||
)
|
}
|
||||||
if (is.null.handle(handle)) {
|
if (is.null.handle(handle)) {
|
||||||
stop("invalid xgb.Booster.handle")
|
stop("invalid xgb.Booster.handle")
|
||||||
}
|
}
|
||||||
@@ -81,7 +83,7 @@ xgb.get.handle <- function(object) {
|
|||||||
#' its handle (pointer) to an internal xgboost model would be invalid. The majority of xgboost methods
|
#' its handle (pointer) to an internal xgboost model would be invalid. The majority of xgboost methods
|
||||||
#' should still work for such a model object since those methods would be using
|
#' should still work for such a model object since those methods would be using
|
||||||
#' \code{xgb.Booster.complete} internally. However, one might find it to be more efficient to call the
|
#' \code{xgb.Booster.complete} internally. However, one might find it to be more efficient to call the
|
||||||
#' \code{xgb.Booster.complete} function explicitely once after loading a model as an R-object.
|
#' \code{xgb.Booster.complete} function explicitly once after loading a model as an R-object.
|
||||||
#' That would prevent further repeated implicit reconstruction of an internal booster model.
|
#' That would prevent further repeated implicit reconstruction of an internal booster model.
|
||||||
#'
|
#'
|
||||||
#' @return
|
#' @return
|
||||||
@@ -95,6 +97,7 @@ xgb.get.handle <- function(object) {
|
|||||||
#' saveRDS(bst, "xgb.model.rds")
|
#' saveRDS(bst, "xgb.model.rds")
|
||||||
#'
|
#'
|
||||||
#' bst1 <- readRDS("xgb.model.rds")
|
#' bst1 <- readRDS("xgb.model.rds")
|
||||||
|
#' if (file.exists("xgb.model.rds")) file.remove("xgb.model.rds")
|
||||||
#' # the handle is invalid:
|
#' # the handle is invalid:
|
||||||
#' print(bst1$handle)
|
#' print(bst1$handle)
|
||||||
#'
|
#'
|
||||||
@@ -162,7 +165,7 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
|
|||||||
#'
|
#'
|
||||||
#' With \code{predinteraction = TRUE}, SHAP values of contributions of interaction of each pair of features
|
#' With \code{predinteraction = TRUE}, SHAP values of contributions of interaction of each pair of features
|
||||||
#' are computed. Note that this operation might be rather expensive in terms of compute and memory.
|
#' are computed. Note that this operation might be rather expensive in terms of compute and memory.
|
||||||
#' Since it quadratically depends on the number of features, it is recommended to perfom selection
|
#' Since it quadratically depends on the number of features, it is recommended to perform selection
|
||||||
#' of the most important features first. See below about the format of the returned results.
|
#' of the most important features first. See below about the format of the returned results.
|
||||||
#'
|
#'
|
||||||
#' @return
|
#' @return
|
||||||
@@ -190,7 +193,7 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
|
|||||||
#'
|
#'
|
||||||
#' @seealso
|
#' @seealso
|
||||||
#' \code{\link{xgb.train}}.
|
#' \code{\link{xgb.train}}.
|
||||||
#'
|
#'
|
||||||
#' @references
|
#' @references
|
||||||
#'
|
#'
|
||||||
#' Scott M. Lundberg, Su-In Lee, "A Unified Approach to Interpreting Model Predictions", NIPS Proceedings 2017, \url{https://arxiv.org/abs/1705.07874}
|
#' Scott M. Lundberg, Su-In Lee, "A Unified Approach to Interpreting Model Predictions", NIPS Proceedings 2017, \url{https://arxiv.org/abs/1705.07874}
|
||||||
@@ -285,7 +288,7 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
|
|||||||
#' @export
|
#' @export
|
||||||
predict.xgb.Booster <- function(object, newdata, missing = NA, outputmargin = FALSE, ntreelimit = NULL,
|
predict.xgb.Booster <- function(object, newdata, missing = NA, outputmargin = FALSE, ntreelimit = NULL,
|
||||||
predleaf = FALSE, predcontrib = FALSE, approxcontrib = FALSE, predinteraction = FALSE,
|
predleaf = FALSE, predcontrib = FALSE, approxcontrib = FALSE, predinteraction = FALSE,
|
||||||
reshape = FALSE, ...) {
|
reshape = FALSE, training = FALSE, ...) {
|
||||||
|
|
||||||
object <- xgb.Booster.complete(object, saveraw = FALSE)
|
object <- xgb.Booster.complete(object, saveraw = FALSE)
|
||||||
if (!inherits(newdata, "xgb.DMatrix"))
|
if (!inherits(newdata, "xgb.DMatrix"))
|
||||||
@@ -304,7 +307,8 @@ predict.xgb.Booster <- function(object, newdata, missing = NA, outputmargin = FA
|
|||||||
option <- 0L + 1L * as.logical(outputmargin) + 2L * as.logical(predleaf) + 4L * as.logical(predcontrib) +
|
option <- 0L + 1L * as.logical(outputmargin) + 2L * as.logical(predleaf) + 4L * as.logical(predcontrib) +
|
||||||
8L * as.logical(approxcontrib) + 16L * as.logical(predinteraction)
|
8L * as.logical(approxcontrib) + 16L * as.logical(predinteraction)
|
||||||
|
|
||||||
ret <- .Call(XGBoosterPredict_R, object$handle, newdata, option[1], as.integer(ntreelimit))
|
ret <- .Call(XGBoosterPredict_R, object$handle, newdata, option[1],
|
||||||
|
as.integer(ntreelimit), as.integer(training))
|
||||||
|
|
||||||
n_ret <- length(ret)
|
n_ret <- length(ret)
|
||||||
n_row <- nrow(newdata)
|
n_row <- nrow(newdata)
|
||||||
@@ -418,6 +422,7 @@ predict.xgb.Booster.handle <- function(object, ...) {
|
|||||||
#'
|
#'
|
||||||
#' xgb.save(bst, 'xgb.model')
|
#' xgb.save(bst, 'xgb.model')
|
||||||
#' bst1 <- xgb.load('xgb.model')
|
#' bst1 <- xgb.load('xgb.model')
|
||||||
|
#' if (file.exists('xgb.model')) file.remove('xgb.model')
|
||||||
#' print(xgb.attr(bst1, "my_attribute"))
|
#' print(xgb.attr(bst1, "my_attribute"))
|
||||||
#' print(xgb.attributes(bst1))
|
#' print(xgb.attributes(bst1))
|
||||||
#'
|
#'
|
||||||
|
|||||||
@@ -1,24 +1,25 @@
|
|||||||
#' Construct xgb.DMatrix object
|
#' Construct xgb.DMatrix object
|
||||||
#'
|
#'
|
||||||
#' Construct xgb.DMatrix object from either a dense matrix, a sparse matrix, or a local file.
|
#' Construct xgb.DMatrix object from either a dense matrix, a sparse matrix, or a local file.
|
||||||
#' Supported input file formats are either a libsvm text file or a binary file that was created previously by
|
#' Supported input file formats are either a libsvm text file or a binary file that was created previously by
|
||||||
#' \code{\link{xgb.DMatrix.save}}).
|
#' \code{\link{xgb.DMatrix.save}}).
|
||||||
#'
|
#'
|
||||||
#' @param data a \code{matrix} object (either numeric or integer), a \code{dgCMatrix} object, or a character
|
#' @param data a \code{matrix} object (either numeric or integer), a \code{dgCMatrix} object, or a character
|
||||||
#' string representing a filename.
|
#' string representing a filename.
|
||||||
#' @param info a named list of additional information to store in the \code{xgb.DMatrix} object.
|
#' @param info a named list of additional information to store in the \code{xgb.DMatrix} object.
|
||||||
#' See \code{\link{setinfo}} for the specific allowed kinds of
|
#' See \code{\link{setinfo}} for the specific allowed kinds of
|
||||||
#' @param missing a float value to represents missing values in data (used only when input is a dense matrix).
|
#' @param missing a float value to represents missing values in data (used only when input is a dense matrix).
|
||||||
#' It is useful when a 0 or some other extreme value represents missing values in data.
|
#' It is useful when a 0 or some other extreme value represents missing values in data.
|
||||||
#' @param silent whether to suppress printing an informational message after loading from a file.
|
#' @param silent whether to suppress printing an informational message after loading from a file.
|
||||||
#' @param ... the \code{info} data could be passed directly as parameters, without creating an \code{info} list.
|
#' @param ... the \code{info} data could be passed directly as parameters, without creating an \code{info} list.
|
||||||
#'
|
#'
|
||||||
#' @examples
|
#' @examples
|
||||||
#' data(agaricus.train, package='xgboost')
|
#' data(agaricus.train, package='xgboost')
|
||||||
#' train <- agaricus.train
|
#' train <- agaricus.train
|
||||||
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
|
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
|
||||||
#' xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data')
|
#' xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data')
|
||||||
#' dtrain <- xgb.DMatrix('xgb.DMatrix.data')
|
#' dtrain <- xgb.DMatrix('xgb.DMatrix.data')
|
||||||
|
#' if (file.exists('xgb.DMatrix.data')) file.remove('xgb.DMatrix.data')
|
||||||
#' @export
|
#' @export
|
||||||
xgb.DMatrix <- function(data, info = list(), missing = NA, silent = FALSE, ...) {
|
xgb.DMatrix <- function(data, info = list(), missing = NA, silent = FALSE, ...) {
|
||||||
cnames <- NULL
|
cnames <- NULL
|
||||||
@@ -78,23 +79,23 @@ xgb.get.DMatrix <- function(data, label = NULL, missing = NA, weight = NULL) {
|
|||||||
|
|
||||||
|
|
||||||
#' Dimensions of xgb.DMatrix
|
#' Dimensions of xgb.DMatrix
|
||||||
#'
|
#'
|
||||||
#' Returns a vector of numbers of rows and of columns in an \code{xgb.DMatrix}.
|
#' Returns a vector of numbers of rows and of columns in an \code{xgb.DMatrix}.
|
||||||
#' @param x Object of class \code{xgb.DMatrix}
|
#' @param x Object of class \code{xgb.DMatrix}
|
||||||
#'
|
#'
|
||||||
#' @details
|
#' @details
|
||||||
#' Note: since \code{nrow} and \code{ncol} internally use \code{dim}, they can also
|
#' Note: since \code{nrow} and \code{ncol} internally use \code{dim}, they can also
|
||||||
#' be directly used with an \code{xgb.DMatrix} object.
|
#' be directly used with an \code{xgb.DMatrix} object.
|
||||||
#'
|
#'
|
||||||
#' @examples
|
#' @examples
|
||||||
#' data(agaricus.train, package='xgboost')
|
#' data(agaricus.train, package='xgboost')
|
||||||
#' train <- agaricus.train
|
#' train <- agaricus.train
|
||||||
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
|
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
|
||||||
#'
|
#'
|
||||||
#' stopifnot(nrow(dtrain) == nrow(train$data))
|
#' stopifnot(nrow(dtrain) == nrow(train$data))
|
||||||
#' stopifnot(ncol(dtrain) == ncol(train$data))
|
#' stopifnot(ncol(dtrain) == ncol(train$data))
|
||||||
#' stopifnot(all(dim(dtrain) == dim(train$data)))
|
#' stopifnot(all(dim(dtrain) == dim(train$data)))
|
||||||
#'
|
#'
|
||||||
#' @export
|
#' @export
|
||||||
dim.xgb.DMatrix <- function(x) {
|
dim.xgb.DMatrix <- function(x) {
|
||||||
c(.Call(XGDMatrixNumRow_R, x), .Call(XGDMatrixNumCol_R, x))
|
c(.Call(XGDMatrixNumRow_R, x), .Call(XGDMatrixNumCol_R, x))
|
||||||
@@ -102,14 +103,14 @@ dim.xgb.DMatrix <- function(x) {
|
|||||||
|
|
||||||
|
|
||||||
#' Handling of column names of \code{xgb.DMatrix}
|
#' Handling of column names of \code{xgb.DMatrix}
|
||||||
#'
|
#'
|
||||||
#' Only column names are supported for \code{xgb.DMatrix}, thus setting of
|
#' Only column names are supported for \code{xgb.DMatrix}, thus setting of
|
||||||
#' row names would have no effect and returnten row names would be NULL.
|
#' row names would have no effect and returned row names would be NULL.
|
||||||
#'
|
#'
|
||||||
#' @param x object of class \code{xgb.DMatrix}
|
#' @param x object of class \code{xgb.DMatrix}
|
||||||
#' @param value a list of two elements: the first one is ignored
|
#' @param value a list of two elements: the first one is ignored
|
||||||
#' and the second one is column names
|
#' and the second one is column names
|
||||||
#'
|
#'
|
||||||
#' @details
|
#' @details
|
||||||
#' Generic \code{dimnames} methods are used by \code{colnames}.
|
#' Generic \code{dimnames} methods are used by \code{colnames}.
|
||||||
#' Since row names are irrelevant, it is recommended to use \code{colnames} directly.
|
#' Since row names are irrelevant, it is recommended to use \code{colnames} directly.
|
||||||
@@ -122,7 +123,7 @@ dim.xgb.DMatrix <- function(x) {
|
|||||||
#' colnames(dtrain)
|
#' colnames(dtrain)
|
||||||
#' colnames(dtrain) <- make.names(1:ncol(train$data))
|
#' colnames(dtrain) <- make.names(1:ncol(train$data))
|
||||||
#' print(dtrain, verbose=TRUE)
|
#' print(dtrain, verbose=TRUE)
|
||||||
#'
|
#'
|
||||||
#' @rdname dimnames.xgb.DMatrix
|
#' @rdname dimnames.xgb.DMatrix
|
||||||
#' @export
|
#' @export
|
||||||
dimnames.xgb.DMatrix <- function(x) {
|
dimnames.xgb.DMatrix <- function(x) {
|
||||||
@@ -140,8 +141,8 @@ dimnames.xgb.DMatrix <- function(x) {
|
|||||||
attr(x, '.Dimnames') <- NULL
|
attr(x, '.Dimnames') <- NULL
|
||||||
return(x)
|
return(x)
|
||||||
}
|
}
|
||||||
if (ncol(x) != length(value[[2]]))
|
if (ncol(x) != length(value[[2]]))
|
||||||
stop("can't assign ", length(value[[2]]), " colnames to a ",
|
stop("can't assign ", length(value[[2]]), " colnames to a ",
|
||||||
ncol(x), " column xgb.DMatrix")
|
ncol(x), " column xgb.DMatrix")
|
||||||
attr(x, '.Dimnames') <- value
|
attr(x, '.Dimnames') <- value
|
||||||
x
|
x
|
||||||
@@ -149,33 +150,33 @@ dimnames.xgb.DMatrix <- function(x) {
|
|||||||
|
|
||||||
|
|
||||||
#' Get information of an xgb.DMatrix object
|
#' Get information of an xgb.DMatrix object
|
||||||
#'
|
#'
|
||||||
#' Get information of an xgb.DMatrix object
|
#' Get information of an xgb.DMatrix object
|
||||||
#' @param object Object of class \code{xgb.DMatrix}
|
#' @param object Object of class \code{xgb.DMatrix}
|
||||||
#' @param name the name of the information field to get (see details)
|
#' @param name the name of the information field to get (see details)
|
||||||
#' @param ... other parameters
|
#' @param ... other parameters
|
||||||
#'
|
#'
|
||||||
#' @details
|
#' @details
|
||||||
#' The \code{name} field can be one of the following:
|
#' The \code{name} field can be one of the following:
|
||||||
#'
|
#'
|
||||||
#' \itemize{
|
#' \itemize{
|
||||||
#' \item \code{label}: label Xgboost learn from ;
|
#' \item \code{label}: label Xgboost learn from ;
|
||||||
#' \item \code{weight}: to do a weight rescale ;
|
#' \item \code{weight}: to do a weight rescale ;
|
||||||
#' \item \code{base_margin}: base margin is the base prediction Xgboost will boost from ;
|
#' \item \code{base_margin}: base margin is the base prediction Xgboost will boost from ;
|
||||||
#' \item \code{nrow}: number of rows of the \code{xgb.DMatrix}.
|
#' \item \code{nrow}: number of rows of the \code{xgb.DMatrix}.
|
||||||
#'
|
#'
|
||||||
#' }
|
#' }
|
||||||
#'
|
#'
|
||||||
#' \code{group} can be setup by \code{setinfo} but can't be retrieved by \code{getinfo}.
|
#' \code{group} can be setup by \code{setinfo} but can't be retrieved by \code{getinfo}.
|
||||||
#'
|
#'
|
||||||
#' @examples
|
#' @examples
|
||||||
#' data(agaricus.train, package='xgboost')
|
#' data(agaricus.train, package='xgboost')
|
||||||
#' train <- agaricus.train
|
#' train <- agaricus.train
|
||||||
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
|
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
|
||||||
#'
|
#'
|
||||||
#' labels <- getinfo(dtrain, 'label')
|
#' labels <- getinfo(dtrain, 'label')
|
||||||
#' setinfo(dtrain, 'label', 1-labels)
|
#' setinfo(dtrain, 'label', 1-labels)
|
||||||
#'
|
#'
|
||||||
#' labels2 <- getinfo(dtrain, 'label')
|
#' labels2 <- getinfo(dtrain, 'label')
|
||||||
#' stopifnot(all(labels2 == 1-labels))
|
#' stopifnot(all(labels2 == 1-labels))
|
||||||
#' @rdname getinfo
|
#' @rdname getinfo
|
||||||
@@ -202,9 +203,9 @@ getinfo.xgb.DMatrix <- function(object, name, ...) {
|
|||||||
|
|
||||||
|
|
||||||
#' Set information of an xgb.DMatrix object
|
#' Set information of an xgb.DMatrix object
|
||||||
#'
|
#'
|
||||||
#' Set information of an xgb.DMatrix object
|
#' Set information of an xgb.DMatrix object
|
||||||
#'
|
#'
|
||||||
#' @param object Object of class "xgb.DMatrix"
|
#' @param object Object of class "xgb.DMatrix"
|
||||||
#' @param name the name of the field to get
|
#' @param name the name of the field to get
|
||||||
#' @param info the specific field of information to set
|
#' @param info the specific field of information to set
|
||||||
@@ -212,19 +213,19 @@ getinfo.xgb.DMatrix <- function(object, name, ...) {
|
|||||||
#'
|
#'
|
||||||
#' @details
|
#' @details
|
||||||
#' The \code{name} field can be one of the following:
|
#' The \code{name} field can be one of the following:
|
||||||
#'
|
#'
|
||||||
#' \itemize{
|
#' \itemize{
|
||||||
#' \item \code{label}: label Xgboost learn from ;
|
#' \item \code{label}: label Xgboost learn from ;
|
||||||
#' \item \code{weight}: to do a weight rescale ;
|
#' \item \code{weight}: to do a weight rescale ;
|
||||||
#' \item \code{base_margin}: base margin is the base prediction Xgboost will boost from ;
|
#' \item \code{base_margin}: base margin is the base prediction Xgboost will boost from ;
|
||||||
#' \item \code{group}: number of rows in each group (to use with \code{rank:pairwise} objective).
|
#' \item \code{group}: number of rows in each group (to use with \code{rank:pairwise} objective).
|
||||||
#' }
|
#' }
|
||||||
#'
|
#'
|
||||||
#' @examples
|
#' @examples
|
||||||
#' data(agaricus.train, package='xgboost')
|
#' data(agaricus.train, package='xgboost')
|
||||||
#' train <- agaricus.train
|
#' train <- agaricus.train
|
||||||
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
|
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
|
||||||
#'
|
#'
|
||||||
#' labels <- getinfo(dtrain, 'label')
|
#' labels <- getinfo(dtrain, 'label')
|
||||||
#' setinfo(dtrain, 'label', 1-labels)
|
#' setinfo(dtrain, 'label', 1-labels)
|
||||||
#' labels2 <- getinfo(dtrain, 'label')
|
#' labels2 <- getinfo(dtrain, 'label')
|
||||||
@@ -266,27 +267,27 @@ setinfo.xgb.DMatrix <- function(object, name, info, ...) {
|
|||||||
|
|
||||||
|
|
||||||
#' Get a new DMatrix containing the specified rows of
|
#' Get a new DMatrix containing the specified rows of
|
||||||
#' orginal xgb.DMatrix object
|
#' original xgb.DMatrix object
|
||||||
#'
|
#'
|
||||||
#' Get a new DMatrix containing the specified rows of
|
#' Get a new DMatrix containing the specified rows of
|
||||||
#' orginal xgb.DMatrix object
|
#' original xgb.DMatrix object
|
||||||
#'
|
#'
|
||||||
#' @param object Object of class "xgb.DMatrix"
|
#' @param object Object of class "xgb.DMatrix"
|
||||||
#' @param idxset a integer vector of indices of rows needed
|
#' @param idxset a integer vector of indices of rows needed
|
||||||
#' @param colset currently not used (columns subsetting is not available)
|
#' @param colset currently not used (columns subsetting is not available)
|
||||||
#' @param ... other parameters (currently not used)
|
#' @param ... other parameters (currently not used)
|
||||||
#'
|
#'
|
||||||
#' @examples
|
#' @examples
|
||||||
#' data(agaricus.train, package='xgboost')
|
#' data(agaricus.train, package='xgboost')
|
||||||
#' train <- agaricus.train
|
#' train <- agaricus.train
|
||||||
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
|
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
|
||||||
#'
|
#'
|
||||||
#' dsub <- slice(dtrain, 1:42)
|
#' dsub <- slice(dtrain, 1:42)
|
||||||
#' labels1 <- getinfo(dsub, 'label')
|
#' labels1 <- getinfo(dsub, 'label')
|
||||||
#' dsub <- dtrain[1:42, ]
|
#' dsub <- dtrain[1:42, ]
|
||||||
#' labels2 <- getinfo(dsub, 'label')
|
#' labels2 <- getinfo(dsub, 'label')
|
||||||
#' all.equal(labels1, labels2)
|
#' all.equal(labels1, labels2)
|
||||||
#'
|
#'
|
||||||
#' @rdname slice.xgb.DMatrix
|
#' @rdname slice.xgb.DMatrix
|
||||||
#' @export
|
#' @export
|
||||||
slice <- function(object, ...) UseMethod("slice")
|
slice <- function(object, ...) UseMethod("slice")
|
||||||
@@ -301,12 +302,17 @@ slice.xgb.DMatrix <- function(object, idxset, ...) {
|
|||||||
|
|
||||||
attr_list <- attributes(object)
|
attr_list <- attributes(object)
|
||||||
nr <- nrow(object)
|
nr <- nrow(object)
|
||||||
len <- sapply(attr_list, length)
|
len <- sapply(attr_list, NROW)
|
||||||
ind <- which(len == nr)
|
ind <- which(len == nr)
|
||||||
if (length(ind) > 0) {
|
if (length(ind) > 0) {
|
||||||
nms <- names(attr_list)[ind]
|
nms <- names(attr_list)[ind]
|
||||||
for (i in seq_along(ind)) {
|
for (i in seq_along(ind)) {
|
||||||
attr(ret, nms[i]) <- attr(object, nms[i])[idxset]
|
obj_attr <- attr(object, nms[i])
|
||||||
|
if (NCOL(obj_attr) > 1) {
|
||||||
|
attr(ret, nms[i]) <- obj_attr[idxset,]
|
||||||
|
} else {
|
||||||
|
attr(ret, nms[i]) <- obj_attr[idxset]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return(structure(ret, class = "xgb.DMatrix"))
|
return(structure(ret, class = "xgb.DMatrix"))
|
||||||
@@ -320,22 +326,22 @@ slice.xgb.DMatrix <- function(object, idxset, ...) {
|
|||||||
|
|
||||||
|
|
||||||
#' Print xgb.DMatrix
|
#' Print xgb.DMatrix
|
||||||
#'
|
#'
|
||||||
#' Print information about xgb.DMatrix.
|
#' Print information about xgb.DMatrix.
|
||||||
#' Currently it displays dimensions and presence of info-fields and colnames.
|
#' Currently it displays dimensions and presence of info-fields and colnames.
|
||||||
#'
|
#'
|
||||||
#' @param x an xgb.DMatrix object
|
#' @param x an xgb.DMatrix object
|
||||||
#' @param verbose whether to print colnames (when present)
|
#' @param verbose whether to print colnames (when present)
|
||||||
#' @param ... not currently used
|
#' @param ... not currently used
|
||||||
#'
|
#'
|
||||||
#' @examples
|
#' @examples
|
||||||
#' data(agaricus.train, package='xgboost')
|
#' data(agaricus.train, package='xgboost')
|
||||||
#' train <- agaricus.train
|
#' train <- agaricus.train
|
||||||
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
|
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
|
||||||
#'
|
#'
|
||||||
#' dtrain
|
#' dtrain
|
||||||
#' print(dtrain, verbose=TRUE)
|
#' print(dtrain, verbose=TRUE)
|
||||||
#'
|
#'
|
||||||
#' @method print xgb.DMatrix
|
#' @method print xgb.DMatrix
|
||||||
#' @export
|
#' @export
|
||||||
print.xgb.DMatrix <- function(x, verbose = FALSE, ...) {
|
print.xgb.DMatrix <- function(x, verbose = FALSE, ...) {
|
||||||
|
|||||||
@@ -11,6 +11,7 @@
|
|||||||
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
|
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
|
||||||
#' xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data')
|
#' xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data')
|
||||||
#' dtrain <- xgb.DMatrix('xgb.DMatrix.data')
|
#' dtrain <- xgb.DMatrix('xgb.DMatrix.data')
|
||||||
|
#' if (file.exists('xgb.DMatrix.data')) file.remove('xgb.DMatrix.data')
|
||||||
#' @export
|
#' @export
|
||||||
xgb.DMatrix.save <- function(dmatrix, fname) {
|
xgb.DMatrix.save <- function(dmatrix, fname) {
|
||||||
if (typeof(fname) != "character")
|
if (typeof(fname) != "character")
|
||||||
|
|||||||
@@ -1,12 +1,12 @@
|
|||||||
#' Cross Validation
|
#' Cross Validation
|
||||||
#'
|
#'
|
||||||
#' The cross validation function of xgboost
|
#' The cross validation function of xgboost
|
||||||
#'
|
#'
|
||||||
#' @param params the list of parameters. Commonly used ones are:
|
#' @param params the list of parameters. Commonly used ones are:
|
||||||
#' \itemize{
|
#' \itemize{
|
||||||
#' \item \code{objective} objective function, common ones are
|
#' \item \code{objective} objective function, common ones are
|
||||||
#' \itemize{
|
#' \itemize{
|
||||||
#' \item \code{reg:linear} linear regression
|
#' \item \code{reg:squarederror} Regression with squared loss
|
||||||
#' \item \code{binary:logistic} logistic regression for classification
|
#' \item \code{binary:logistic} logistic regression for classification
|
||||||
#' }
|
#' }
|
||||||
#' \item \code{eta} step size of each boosting step
|
#' \item \code{eta} step size of each boosting step
|
||||||
@@ -18,12 +18,12 @@
|
|||||||
#' See also demo/ for walkthrough example in R.
|
#' See also demo/ for walkthrough example in R.
|
||||||
#' @param data takes an \code{xgb.DMatrix}, \code{matrix}, or \code{dgCMatrix} as the input.
|
#' @param data takes an \code{xgb.DMatrix}, \code{matrix}, or \code{dgCMatrix} as the input.
|
||||||
#' @param nrounds the max number of iterations
|
#' @param nrounds the max number of iterations
|
||||||
#' @param nfold the original dataset is randomly partitioned into \code{nfold} equal size subsamples.
|
#' @param nfold the original dataset is randomly partitioned into \code{nfold} equal size subsamples.
|
||||||
#' @param label vector of response values. Should be provided only when data is an R-matrix.
|
#' @param label vector of response values. Should be provided only when data is an R-matrix.
|
||||||
#' @param missing is only used when input is a dense matrix. By default is set to NA, which means
|
#' @param missing is only used when input is a dense matrix. By default is set to NA, which means
|
||||||
#' that NA values should be considered as 'missing' by the algorithm.
|
#' that NA values should be considered as 'missing' by the algorithm.
|
||||||
#' Sometimes, 0 or other extreme value might be used to represent missing values.
|
#' Sometimes, 0 or other extreme value might be used to represent missing values.
|
||||||
#' @param prediction A logical value indicating whether to return the test fold predictions
|
#' @param prediction A logical value indicating whether to return the test fold predictions
|
||||||
#' from each CV model. This parameter engages the \code{\link{cb.cv.predict}} callback.
|
#' from each CV model. This parameter engages the \code{\link{cb.cv.predict}} callback.
|
||||||
#' @param showsd \code{boolean}, whether to show standard deviation of cross validation
|
#' @param showsd \code{boolean}, whether to show standard deviation of cross validation
|
||||||
#' @param metrics, list of evaluation metrics to be used in cross validation,
|
#' @param metrics, list of evaluation metrics to be used in cross validation,
|
||||||
@@ -37,22 +37,24 @@
|
|||||||
#' \item \code{aucpr} Area under PR curve
|
#' \item \code{aucpr} Area under PR curve
|
||||||
#' \item \code{merror} Exact matching error, used to evaluate multi-class classification
|
#' \item \code{merror} Exact matching error, used to evaluate multi-class classification
|
||||||
#' }
|
#' }
|
||||||
#' @param obj customized objective function. Returns gradient and second order
|
#' @param obj customized objective function. Returns gradient and second order
|
||||||
#' gradient with given prediction and dtrain.
|
#' gradient with given prediction and dtrain.
|
||||||
#' @param feval custimized evaluation function. Returns
|
#' @param feval customized evaluation function. Returns
|
||||||
#' \code{list(metric='metric-name', value='metric-value')} with given
|
#' \code{list(metric='metric-name', value='metric-value')} with given
|
||||||
#' prediction and dtrain.
|
#' prediction and dtrain.
|
||||||
#' @param stratified a \code{boolean} indicating whether sampling of folds should be stratified
|
#' @param stratified a \code{boolean} indicating whether sampling of folds should be stratified
|
||||||
#' by the values of outcome labels.
|
#' by the values of outcome labels.
|
||||||
#' @param folds \code{list} provides a possibility to use a list of pre-defined CV folds
|
#' @param folds \code{list} provides a possibility to use a list of pre-defined CV folds
|
||||||
#' (each element must be a vector of test fold's indices). When folds are supplied,
|
#' (each element must be a vector of test fold's indices). When folds are supplied,
|
||||||
#' the \code{nfold} and \code{stratified} parameters are ignored.
|
#' the \code{nfold} and \code{stratified} parameters are ignored.
|
||||||
|
#' @param train_folds \code{list} list specifying which indicies to use for training. If \code{NULL}
|
||||||
|
#' (the default) all indices not specified in \code{folds} will be used for training.
|
||||||
#' @param verbose \code{boolean}, print the statistics during the process
|
#' @param verbose \code{boolean}, print the statistics during the process
|
||||||
#' @param print_every_n Print each n-th iteration evaluation messages when \code{verbose>0}.
|
#' @param print_every_n Print each n-th iteration evaluation messages when \code{verbose>0}.
|
||||||
#' Default is 1 which means all messages are printed. This parameter is passed to the
|
#' Default is 1 which means all messages are printed. This parameter is passed to the
|
||||||
#' \code{\link{cb.print.evaluation}} callback.
|
#' \code{\link{cb.print.evaluation}} callback.
|
||||||
#' @param early_stopping_rounds If \code{NULL}, the early stopping function is not triggered.
|
#' @param early_stopping_rounds If \code{NULL}, the early stopping function is not triggered.
|
||||||
#' If set to an integer \code{k}, training with a validation set will stop if the performance
|
#' If set to an integer \code{k}, training with a validation set will stop if the performance
|
||||||
#' doesn't improve for \code{k} rounds.
|
#' doesn't improve for \code{k} rounds.
|
||||||
#' Setting this parameter engages the \code{\link{cb.early.stop}} callback.
|
#' Setting this parameter engages the \code{\link{cb.early.stop}} callback.
|
||||||
#' @param maximize If \code{feval} and \code{early_stopping_rounds} are set,
|
#' @param maximize If \code{feval} and \code{early_stopping_rounds} are set,
|
||||||
@@ -60,46 +62,46 @@
|
|||||||
#' When it is \code{TRUE}, it means the larger the evaluation score the better.
|
#' When it is \code{TRUE}, it means the larger the evaluation score the better.
|
||||||
#' This parameter is passed to the \code{\link{cb.early.stop}} callback.
|
#' This parameter is passed to the \code{\link{cb.early.stop}} callback.
|
||||||
#' @param callbacks a list of callback functions to perform various task during boosting.
|
#' @param callbacks a list of callback functions to perform various task during boosting.
|
||||||
#' See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the
|
#' See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the
|
||||||
#' parameters' values. User can provide either existing or their own callback methods in order
|
#' parameters' values. User can provide either existing or their own callback methods in order
|
||||||
#' to customize the training process.
|
#' to customize the training process.
|
||||||
#' @param ... other parameters to pass to \code{params}.
|
#' @param ... other parameters to pass to \code{params}.
|
||||||
#'
|
#'
|
||||||
#' @details
|
#' @details
|
||||||
#' The original sample is randomly partitioned into \code{nfold} equal size subsamples.
|
#' The original sample is randomly partitioned into \code{nfold} equal size subsamples.
|
||||||
#'
|
#'
|
||||||
#' Of the \code{nfold} subsamples, a single subsample is retained as the validation data for testing the model, and the remaining \code{nfold - 1} subsamples are used as training data.
|
#' Of the \code{nfold} subsamples, a single subsample is retained as the validation data for testing the model, and the remaining \code{nfold - 1} subsamples are used as training data.
|
||||||
#'
|
#'
|
||||||
#' The cross-validation process is then repeated \code{nrounds} times, with each of the \code{nfold} subsamples used exactly once as the validation data.
|
#' The cross-validation process is then repeated \code{nrounds} times, with each of the \code{nfold} subsamples used exactly once as the validation data.
|
||||||
#'
|
#'
|
||||||
#' All observations are used for both training and validation.
|
#' All observations are used for both training and validation.
|
||||||
#'
|
#'
|
||||||
#' Adapted from \url{http://en.wikipedia.org/wiki/Cross-validation_\%28statistics\%29#k-fold_cross-validation}
|
#' Adapted from \url{http://en.wikipedia.org/wiki/Cross-validation_\%28statistics\%29#k-fold_cross-validation}
|
||||||
#'
|
#'
|
||||||
#' @return
|
#' @return
|
||||||
#' An object of class \code{xgb.cv.synchronous} with the following elements:
|
#' An object of class \code{xgb.cv.synchronous} with the following elements:
|
||||||
#' \itemize{
|
#' \itemize{
|
||||||
#' \item \code{call} a function call.
|
#' \item \code{call} a function call.
|
||||||
#' \item \code{params} parameters that were passed to the xgboost library. Note that it does not
|
#' \item \code{params} parameters that were passed to the xgboost library. Note that it does not
|
||||||
#' capture parameters changed by the \code{\link{cb.reset.parameters}} callback.
|
#' capture parameters changed by the \code{\link{cb.reset.parameters}} callback.
|
||||||
#' \item \code{callbacks} callback functions that were either automatically assigned or
|
#' \item \code{callbacks} callback functions that were either automatically assigned or
|
||||||
#' explicitly passed.
|
#' explicitly passed.
|
||||||
#' \item \code{evaluation_log} evaluation history storead as a \code{data.table} with the
|
#' \item \code{evaluation_log} evaluation history stored as a \code{data.table} with the
|
||||||
#' first column corresponding to iteration number and the rest corresponding to the
|
#' first column corresponding to iteration number and the rest corresponding to the
|
||||||
#' CV-based evaluation means and standard deviations for the training and test CV-sets.
|
#' CV-based evaluation means and standard deviations for the training and test CV-sets.
|
||||||
#' It is created by the \code{\link{cb.evaluation.log}} callback.
|
#' It is created by the \code{\link{cb.evaluation.log}} callback.
|
||||||
#' \item \code{niter} number of boosting iterations.
|
#' \item \code{niter} number of boosting iterations.
|
||||||
#' \item \code{nfeatures} number of features in training data.
|
#' \item \code{nfeatures} number of features in training data.
|
||||||
#' \item \code{folds} the list of CV folds' indices - either those passed through the \code{folds}
|
#' \item \code{folds} the list of CV folds' indices - either those passed through the \code{folds}
|
||||||
#' parameter or randomly generated.
|
#' parameter or randomly generated.
|
||||||
#' \item \code{best_iteration} iteration number with the best evaluation metric value
|
#' \item \code{best_iteration} iteration number with the best evaluation metric value
|
||||||
#' (only available with early stopping).
|
#' (only available with early stopping).
|
||||||
#' \item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration,
|
#' \item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration,
|
||||||
#' which could further be used in \code{predict} method
|
#' which could further be used in \code{predict} method
|
||||||
#' (only available with early stopping).
|
#' (only available with early stopping).
|
||||||
#' \item \code{pred} CV prediction values available when \code{prediction} is set.
|
#' \item \code{pred} CV prediction values available when \code{prediction} is set.
|
||||||
#' It is either vector or matrix (see \code{\link{cb.cv.predict}}).
|
#' It is either vector or matrix (see \code{\link{cb.cv.predict}}).
|
||||||
#' \item \code{models} a liost of the CV folds' models. It is only available with the explicit
|
#' \item \code{models} a liost of the CV folds' models. It is only available with the explicit
|
||||||
#' setting of the \code{cb.cv.predict(save_models = TRUE)} callback.
|
#' setting of the \code{cb.cv.predict(save_models = TRUE)} callback.
|
||||||
#' }
|
#' }
|
||||||
#'
|
#'
|
||||||
@@ -110,32 +112,39 @@
|
|||||||
#' max_depth = 3, eta = 1, objective = "binary:logistic")
|
#' max_depth = 3, eta = 1, objective = "binary:logistic")
|
||||||
#' print(cv)
|
#' print(cv)
|
||||||
#' print(cv, verbose=TRUE)
|
#' print(cv, verbose=TRUE)
|
||||||
#'
|
#'
|
||||||
#' @export
|
#' @export
|
||||||
xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing = NA,
|
xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing = NA,
|
||||||
prediction = FALSE, showsd = TRUE, metrics=list(),
|
prediction = FALSE, showsd = TRUE, metrics=list(),
|
||||||
obj = NULL, feval = NULL, stratified = TRUE, folds = NULL,
|
obj = NULL, feval = NULL, stratified = TRUE, folds = NULL, train_folds = NULL,
|
||||||
verbose = TRUE, print_every_n=1L,
|
verbose = TRUE, print_every_n=1L,
|
||||||
early_stopping_rounds = NULL, maximize = NULL, callbacks = list(), ...) {
|
early_stopping_rounds = NULL, maximize = NULL, callbacks = list(), ...) {
|
||||||
|
|
||||||
check.deprecation(...)
|
check.deprecation(...)
|
||||||
|
|
||||||
params <- check.booster.params(params, ...)
|
params <- check.booster.params(params, ...)
|
||||||
# TODO: should we deprecate the redundant 'metrics' parameter?
|
# TODO: should we deprecate the redundant 'metrics' parameter?
|
||||||
for (m in metrics)
|
for (m in metrics)
|
||||||
params <- c(params, list("eval_metric" = m))
|
params <- c(params, list("eval_metric" = m))
|
||||||
|
|
||||||
check.custom.obj()
|
check.custom.obj()
|
||||||
check.custom.eval()
|
check.custom.eval()
|
||||||
|
|
||||||
#if (is.null(params[['eval_metric']]) && is.null(feval))
|
#if (is.null(params[['eval_metric']]) && is.null(feval))
|
||||||
# stop("Either 'eval_metric' or 'feval' must be provided for CV")
|
# stop("Either 'eval_metric' or 'feval' must be provided for CV")
|
||||||
|
|
||||||
# Check the labels
|
# Check the labels
|
||||||
if ( (inherits(data, 'xgb.DMatrix') && is.null(getinfo(data, 'label'))) ||
|
if ( (inherits(data, 'xgb.DMatrix') && is.null(getinfo(data, 'label'))) ||
|
||||||
(!inherits(data, 'xgb.DMatrix') && is.null(label)))
|
(!inherits(data, 'xgb.DMatrix') && is.null(label))) {
|
||||||
stop("Labels must be provided for CV either through xgb.DMatrix, or through 'label=' when 'data' is matrix")
|
stop("Labels must be provided for CV either through xgb.DMatrix, or through 'label=' when 'data' is matrix")
|
||||||
|
} else if (inherits(data, 'xgb.DMatrix')) {
|
||||||
|
if (!is.null(label))
|
||||||
|
warning("xgb.cv: label will be ignored, since data is of type xgb.DMatrix")
|
||||||
|
cv_label = getinfo(data, 'label')
|
||||||
|
} else {
|
||||||
|
cv_label = label
|
||||||
|
}
|
||||||
|
|
||||||
# CV folds
|
# CV folds
|
||||||
if(!is.null(folds)) {
|
if(!is.null(folds)) {
|
||||||
if(!is.list(folds) || length(folds) < 2)
|
if(!is.list(folds) || length(folds) < 2)
|
||||||
@@ -144,9 +153,9 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
|
|||||||
} else {
|
} else {
|
||||||
if (nfold <= 1)
|
if (nfold <= 1)
|
||||||
stop("'nfold' must be > 1")
|
stop("'nfold' must be > 1")
|
||||||
folds <- generate.cv.folds(nfold, nrow(data), stratified, label, params)
|
folds <- generate.cv.folds(nfold, nrow(data), stratified, cv_label, params)
|
||||||
}
|
}
|
||||||
|
|
||||||
# Potential TODO: sequential CV
|
# Potential TODO: sequential CV
|
||||||
#if (strategy == 'sequential')
|
#if (strategy == 'sequential')
|
||||||
# stop('Sequential CV strategy is not yet implemented')
|
# stop('Sequential CV strategy is not yet implemented')
|
||||||
@@ -166,7 +175,7 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
|
|||||||
stop_condition <- FALSE
|
stop_condition <- FALSE
|
||||||
if (!is.null(early_stopping_rounds) &&
|
if (!is.null(early_stopping_rounds) &&
|
||||||
!has.callbacks(callbacks, 'cb.early.stop')) {
|
!has.callbacks(callbacks, 'cb.early.stop')) {
|
||||||
callbacks <- add.cb(callbacks, cb.early.stop(early_stopping_rounds,
|
callbacks <- add.cb(callbacks, cb.early.stop(early_stopping_rounds,
|
||||||
maximize = maximize, verbose = verbose))
|
maximize = maximize, verbose = verbose))
|
||||||
}
|
}
|
||||||
# CV-predictions callback
|
# CV-predictions callback
|
||||||
@@ -177,12 +186,17 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
|
|||||||
# Sort the callbacks into categories
|
# Sort the callbacks into categories
|
||||||
cb <- categorize.callbacks(callbacks)
|
cb <- categorize.callbacks(callbacks)
|
||||||
|
|
||||||
|
|
||||||
# create the booster-folds
|
# create the booster-folds
|
||||||
|
# train_folds
|
||||||
dall <- xgb.get.DMatrix(data, label, missing)
|
dall <- xgb.get.DMatrix(data, label, missing)
|
||||||
bst_folds <- lapply(seq_along(folds), function(k) {
|
bst_folds <- lapply(seq_along(folds), function(k) {
|
||||||
dtest <- slice(dall, folds[[k]])
|
dtest <- slice(dall, folds[[k]])
|
||||||
dtrain <- slice(dall, unlist(folds[-k]))
|
# code originally contributed by @RolandASc on stackoverflow
|
||||||
|
if(is.null(train_folds))
|
||||||
|
dtrain <- slice(dall, unlist(folds[-k]))
|
||||||
|
else
|
||||||
|
dtrain <- slice(dall, train_folds[[k]])
|
||||||
handle <- xgb.Booster.handle(params, list(dtrain, dtest))
|
handle <- xgb.Booster.handle(params, list(dtrain, dtest))
|
||||||
list(dtrain = dtrain, bst = handle, watchlist = list(train = dtrain, test=dtest), index = folds[[k]])
|
list(dtrain = dtrain, bst = handle, watchlist = list(train = dtrain, test=dtest), index = folds[[k]])
|
||||||
})
|
})
|
||||||
@@ -197,12 +211,12 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
|
|||||||
# those are fixed for CV (no training continuation)
|
# those are fixed for CV (no training continuation)
|
||||||
begin_iteration <- 1
|
begin_iteration <- 1
|
||||||
end_iteration <- nrounds
|
end_iteration <- nrounds
|
||||||
|
|
||||||
# synchronous CV boosting: run CV folds' models within each iteration
|
# synchronous CV boosting: run CV folds' models within each iteration
|
||||||
for (iteration in begin_iteration:end_iteration) {
|
for (iteration in begin_iteration:end_iteration) {
|
||||||
|
|
||||||
for (f in cb$pre_iter) f()
|
for (f in cb$pre_iter) f()
|
||||||
|
|
||||||
msg <- lapply(bst_folds, function(fd) {
|
msg <- lapply(bst_folds, function(fd) {
|
||||||
xgb.iter.update(fd$bst, fd$dtrain, iteration - 1, obj)
|
xgb.iter.update(fd$bst, fd$dtrain, iteration - 1, obj)
|
||||||
xgb.iter.eval(fd$bst, fd$watchlist, iteration - 1, feval)
|
xgb.iter.eval(fd$bst, fd$watchlist, iteration - 1, feval)
|
||||||
@@ -210,9 +224,9 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
|
|||||||
msg <- simplify2array(msg)
|
msg <- simplify2array(msg)
|
||||||
bst_evaluation <- rowMeans(msg)
|
bst_evaluation <- rowMeans(msg)
|
||||||
bst_evaluation_err <- sqrt(rowMeans(msg^2) - bst_evaluation^2)
|
bst_evaluation_err <- sqrt(rowMeans(msg^2) - bst_evaluation^2)
|
||||||
|
|
||||||
for (f in cb$post_iter) f()
|
for (f in cb$post_iter) f()
|
||||||
|
|
||||||
if (stop_condition) break
|
if (stop_condition) break
|
||||||
}
|
}
|
||||||
for (f in cb$finalize) f(finalize = TRUE)
|
for (f in cb$finalize) f(finalize = TRUE)
|
||||||
@@ -236,17 +250,17 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
|
|||||||
|
|
||||||
|
|
||||||
#' Print xgb.cv result
|
#' Print xgb.cv result
|
||||||
#'
|
#'
|
||||||
#' Prints formatted results of \code{xgb.cv}.
|
#' Prints formatted results of \code{xgb.cv}.
|
||||||
#'
|
#'
|
||||||
#' @param x an \code{xgb.cv.synchronous} object
|
#' @param x an \code{xgb.cv.synchronous} object
|
||||||
#' @param verbose whether to print detailed data
|
#' @param verbose whether to print detailed data
|
||||||
#' @param ... passed to \code{data.table.print}
|
#' @param ... passed to \code{data.table.print}
|
||||||
#'
|
#'
|
||||||
#' @details
|
#' @details
|
||||||
#' When not verbose, it would only print the evaluation results,
|
#' When not verbose, it would only print the evaluation results,
|
||||||
#' including the best iteration (when available).
|
#' including the best iteration (when available).
|
||||||
#'
|
#'
|
||||||
#' @examples
|
#' @examples
|
||||||
#' data(agaricus.train, package='xgboost')
|
#' data(agaricus.train, package='xgboost')
|
||||||
#' train <- agaricus.train
|
#' train <- agaricus.train
|
||||||
@@ -254,13 +268,13 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
|
|||||||
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||||
#' print(cv)
|
#' print(cv)
|
||||||
#' print(cv, verbose=TRUE)
|
#' print(cv, verbose=TRUE)
|
||||||
#'
|
#'
|
||||||
#' @rdname print.xgb.cv
|
#' @rdname print.xgb.cv
|
||||||
#' @method print xgb.cv.synchronous
|
#' @method print xgb.cv.synchronous
|
||||||
#' @export
|
#' @export
|
||||||
print.xgb.cv.synchronous <- function(x, verbose = FALSE, ...) {
|
print.xgb.cv.synchronous <- function(x, verbose = FALSE, ...) {
|
||||||
cat('##### xgb.cv ', length(x$folds), '-folds\n', sep = '')
|
cat('##### xgb.cv ', length(x$folds), '-folds\n', sep = '')
|
||||||
|
|
||||||
if (verbose) {
|
if (verbose) {
|
||||||
if (!is.null(x$call)) {
|
if (!is.null(x$call)) {
|
||||||
cat('call:\n ')
|
cat('call:\n ')
|
||||||
@@ -268,8 +282,8 @@ print.xgb.cv.synchronous <- function(x, verbose = FALSE, ...) {
|
|||||||
}
|
}
|
||||||
if (!is.null(x$params)) {
|
if (!is.null(x$params)) {
|
||||||
cat('params (as set within xgb.cv):\n')
|
cat('params (as set within xgb.cv):\n')
|
||||||
cat( ' ',
|
cat( ' ',
|
||||||
paste(names(x$params),
|
paste(names(x$params),
|
||||||
paste0('"', unlist(x$params), '"'),
|
paste0('"', unlist(x$params), '"'),
|
||||||
sep = ' = ', collapse = ', '), '\n', sep = '')
|
sep = ' = ', collapse = ', '), '\n', sep = '')
|
||||||
}
|
}
|
||||||
@@ -280,9 +294,9 @@ print.xgb.cv.synchronous <- function(x, verbose = FALSE, ...) {
|
|||||||
print(x)
|
print(x)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
for (n in c('niter', 'best_iteration', 'best_ntreelimit')) {
|
for (n in c('niter', 'best_iteration', 'best_ntreelimit')) {
|
||||||
if (is.null(x[[n]]))
|
if (is.null(x[[n]]))
|
||||||
next
|
next
|
||||||
cat(n, ': ', x[[n]], '\n', sep = '')
|
cat(n, ': ', x[[n]], '\n', sep = '')
|
||||||
}
|
}
|
||||||
@@ -293,10 +307,10 @@ print.xgb.cv.synchronous <- function(x, verbose = FALSE, ...) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (verbose)
|
if (verbose)
|
||||||
cat('evaluation_log:\n')
|
cat('evaluation_log:\n')
|
||||||
print(x$evaluation_log, row.names = FALSE, ...)
|
print(x$evaluation_log, row.names = FALSE, ...)
|
||||||
|
|
||||||
if (!is.null(x$best_iteration)) {
|
if (!is.null(x$best_iteration)) {
|
||||||
cat('Best iteration:\n')
|
cat('Best iteration:\n')
|
||||||
print(x$evaluation_log[x$best_iteration], row.names = FALSE, ...)
|
print(x$evaluation_log[x$best_iteration], row.names = FALSE, ...)
|
||||||
|
|||||||
@@ -28,6 +28,7 @@
|
|||||||
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||||
#' xgb.save(bst, 'xgb.model')
|
#' xgb.save(bst, 'xgb.model')
|
||||||
#' bst <- xgb.load('xgb.model')
|
#' bst <- xgb.load('xgb.model')
|
||||||
|
#' if (file.exists('xgb.model')) file.remove('xgb.model')
|
||||||
#' pred <- predict(bst, test$data)
|
#' pred <- predict(bst, test$data)
|
||||||
#' @export
|
#' @export
|
||||||
xgb.load <- function(modelfile) {
|
xgb.load <- function(modelfile) {
|
||||||
|
|||||||
@@ -27,7 +27,7 @@
|
|||||||
#' a tree's median absolute leaf weight changes through the iterations.
|
#' a tree's median absolute leaf weight changes through the iterations.
|
||||||
#'
|
#'
|
||||||
#' This function was inspired by the blog post
|
#' This function was inspired by the blog post
|
||||||
#' \url{http://aysent.github.io/2015/11/08/random-forest-leaf-visualization.html}.
|
#' \url{https://github.com/aysent/random-forest-leaf-visualization}.
|
||||||
#'
|
#'
|
||||||
#' @return
|
#' @return
|
||||||
#'
|
#'
|
||||||
|
|||||||
@@ -5,16 +5,16 @@
|
|||||||
#'
|
#'
|
||||||
#' @param importance_matrix a \code{data.table} returned by \code{\link{xgb.importance}}.
|
#' @param importance_matrix a \code{data.table} returned by \code{\link{xgb.importance}}.
|
||||||
#' @param top_n maximal number of top features to include into the plot.
|
#' @param top_n maximal number of top features to include into the plot.
|
||||||
#' @param measure the name of importance measure to plot.
|
#' @param measure the name of importance measure to plot.
|
||||||
#' When \code{NULL}, 'Gain' would be used for trees and 'Weight' would be used for gblinear.
|
#' When \code{NULL}, 'Gain' would be used for trees and 'Weight' would be used for gblinear.
|
||||||
#' @param rel_to_first whether importance values should be represented as relative to the highest ranked feature.
|
#' @param rel_to_first whether importance values should be represented as relative to the highest ranked feature.
|
||||||
#' See Details.
|
#' See Details.
|
||||||
#' @param left_margin (base R barplot) allows to adjust the left margin size to fit feature names.
|
#' @param left_margin (base R barplot) allows to adjust the left margin size to fit feature names.
|
||||||
#' When it is NULL, the existing \code{par('mar')} is used.
|
#' When it is NULL, the existing \code{par('mar')} is used.
|
||||||
#' @param cex (base R barplot) passed as \code{cex.names} parameter to \code{barplot}.
|
#' @param cex (base R barplot) passed as \code{cex.names} parameter to \code{barplot}.
|
||||||
#' @param plot (base R barplot) whether a barplot should be produced.
|
#' @param plot (base R barplot) whether a barplot should be produced.
|
||||||
#' If FALSE, only a data.table is returned.
|
#' If FALSE, only a data.table is returned.
|
||||||
#' @param n_clusters (ggplot only) a \code{numeric} vector containing the min and the max range
|
#' @param n_clusters (ggplot only) a \code{numeric} vector containing the min and the max range
|
||||||
#' of the possible number of clusters of bars.
|
#' of the possible number of clusters of bars.
|
||||||
#' @param ... other parameters passed to \code{barplot} (except horiz, border, cex.names, names.arg, and las).
|
#' @param ... other parameters passed to \code{barplot} (except horiz, border, cex.names, names.arg, and las).
|
||||||
#'
|
#'
|
||||||
@@ -22,27 +22,27 @@
|
|||||||
#' The graph represents each feature as a horizontal bar of length proportional to the importance of a feature.
|
#' The graph represents each feature as a horizontal bar of length proportional to the importance of a feature.
|
||||||
#' Features are shown ranked in a decreasing importance order.
|
#' Features are shown ranked in a decreasing importance order.
|
||||||
#' It works for importances from both \code{gblinear} and \code{gbtree} models.
|
#' It works for importances from both \code{gblinear} and \code{gbtree} models.
|
||||||
#'
|
#'
|
||||||
#' When \code{rel_to_first = FALSE}, the values would be plotted as they were in \code{importance_matrix}.
|
#' When \code{rel_to_first = FALSE}, the values would be plotted as they were in \code{importance_matrix}.
|
||||||
#' For gbtree model, that would mean being normalized to the total of 1
|
#' For gbtree model, that would mean being normalized to the total of 1
|
||||||
#' ("what is feature's importance contribution relative to the whole model?").
|
#' ("what is feature's importance contribution relative to the whole model?").
|
||||||
#' For linear models, \code{rel_to_first = FALSE} would show actual values of the coefficients.
|
#' For linear models, \code{rel_to_first = FALSE} would show actual values of the coefficients.
|
||||||
#' Setting \code{rel_to_first = TRUE} allows to see the picture from the perspective of
|
#' Setting \code{rel_to_first = TRUE} allows to see the picture from the perspective of
|
||||||
#' "what is feature's importance contribution relative to the most important feature?"
|
#' "what is feature's importance contribution relative to the most important feature?"
|
||||||
#'
|
#'
|
||||||
#' The ggplot-backend method also performs 1-D custering of the importance values,
|
#' The ggplot-backend method also performs 1-D clustering of the importance values,
|
||||||
#' with bar colors coresponding to different clusters that have somewhat similar importance values.
|
#' with bar colors corresponding to different clusters that have somewhat similar importance values.
|
||||||
#'
|
#'
|
||||||
#' @return
|
#' @return
|
||||||
#' The \code{xgb.plot.importance} function creates a \code{barplot} (when \code{plot=TRUE})
|
#' The \code{xgb.plot.importance} function creates a \code{barplot} (when \code{plot=TRUE})
|
||||||
#' and silently returns a processed data.table with \code{n_top} features sorted by importance.
|
#' and silently returns a processed data.table with \code{n_top} features sorted by importance.
|
||||||
#'
|
#'
|
||||||
#' The \code{xgb.ggplot.importance} function returns a ggplot graph which could be customized afterwards.
|
#' The \code{xgb.ggplot.importance} function returns a ggplot graph which could be customized afterwards.
|
||||||
#' E.g., to change the title of the graph, add \code{+ ggtitle("A GRAPH NAME")} to the result.
|
#' E.g., to change the title of the graph, add \code{+ ggtitle("A GRAPH NAME")} to the result.
|
||||||
#'
|
#'
|
||||||
#' @seealso
|
#' @seealso
|
||||||
#' \code{\link[graphics]{barplot}}.
|
#' \code{\link[graphics]{barplot}}.
|
||||||
#'
|
#'
|
||||||
#' @examples
|
#' @examples
|
||||||
#' data(agaricus.train)
|
#' data(agaricus.train)
|
||||||
#'
|
#'
|
||||||
@@ -50,15 +50,15 @@
|
|||||||
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||||
#'
|
#'
|
||||||
#' importance_matrix <- xgb.importance(colnames(agaricus.train$data), model = bst)
|
#' importance_matrix <- xgb.importance(colnames(agaricus.train$data), model = bst)
|
||||||
#'
|
#'
|
||||||
#' xgb.plot.importance(importance_matrix, rel_to_first = TRUE, xlab = "Relative importance")
|
#' xgb.plot.importance(importance_matrix, rel_to_first = TRUE, xlab = "Relative importance")
|
||||||
#'
|
#'
|
||||||
#' (gg <- xgb.ggplot.importance(importance_matrix, measure = "Frequency", rel_to_first = TRUE))
|
#' (gg <- xgb.ggplot.importance(importance_matrix, measure = "Frequency", rel_to_first = TRUE))
|
||||||
#' gg + ggplot2::ylab("Frequency")
|
#' gg + ggplot2::ylab("Frequency")
|
||||||
#'
|
#'
|
||||||
#' @rdname xgb.plot.importance
|
#' @rdname xgb.plot.importance
|
||||||
#' @export
|
#' @export
|
||||||
xgb.plot.importance <- function(importance_matrix = NULL, top_n = NULL, measure = NULL,
|
xgb.plot.importance <- function(importance_matrix = NULL, top_n = NULL, measure = NULL,
|
||||||
rel_to_first = FALSE, left_margin = 10, cex = NULL, plot = TRUE, ...) {
|
rel_to_first = FALSE, left_margin = 10, cex = NULL, plot = TRUE, ...) {
|
||||||
check.deprecation(...)
|
check.deprecation(...)
|
||||||
if (!is.data.table(importance_matrix)) {
|
if (!is.data.table(importance_matrix)) {
|
||||||
@@ -80,13 +80,13 @@ xgb.plot.importance <- function(importance_matrix = NULL, top_n = NULL, measure
|
|||||||
if (!"Feature" %in% imp_names)
|
if (!"Feature" %in% imp_names)
|
||||||
stop("Importance matrix column names are not as expected!")
|
stop("Importance matrix column names are not as expected!")
|
||||||
}
|
}
|
||||||
|
|
||||||
# also aggregate, just in case when the values were not yet summed up by feature
|
# also aggregate, just in case when the values were not yet summed up by feature
|
||||||
importance_matrix <- importance_matrix[, Importance := sum(get(measure)), by = Feature]
|
importance_matrix <- importance_matrix[, Importance := sum(get(measure)), by = Feature]
|
||||||
|
|
||||||
# make sure it's ordered
|
# make sure it's ordered
|
||||||
importance_matrix <- importance_matrix[order(-abs(Importance))]
|
importance_matrix <- importance_matrix[order(-abs(Importance))]
|
||||||
|
|
||||||
if (!is.null(top_n)) {
|
if (!is.null(top_n)) {
|
||||||
top_n <- min(top_n, nrow(importance_matrix))
|
top_n <- min(top_n, nrow(importance_matrix))
|
||||||
importance_matrix <- head(importance_matrix, top_n)
|
importance_matrix <- head(importance_matrix, top_n)
|
||||||
@@ -97,14 +97,14 @@ xgb.plot.importance <- function(importance_matrix = NULL, top_n = NULL, measure
|
|||||||
if (is.null(cex)) {
|
if (is.null(cex)) {
|
||||||
cex <- 2.5/log2(1 + nrow(importance_matrix))
|
cex <- 2.5/log2(1 + nrow(importance_matrix))
|
||||||
}
|
}
|
||||||
|
|
||||||
if (plot) {
|
if (plot) {
|
||||||
op <- par(no.readonly = TRUE)
|
op <- par(no.readonly = TRUE)
|
||||||
mar <- op$mar
|
mar <- op$mar
|
||||||
if (!is.null(left_margin))
|
if (!is.null(left_margin))
|
||||||
mar[2] <- left_margin
|
mar[2] <- left_margin
|
||||||
par(mar = mar)
|
par(mar = mar)
|
||||||
|
|
||||||
# reverse the order of rows to have the highest ranked at the top
|
# reverse the order of rows to have the highest ranked at the top
|
||||||
importance_matrix[nrow(importance_matrix):1,
|
importance_matrix[nrow(importance_matrix):1,
|
||||||
barplot(Importance, horiz = TRUE, border = NA, cex.names = cex,
|
barplot(Importance, horiz = TRUE, border = NA, cex.names = cex,
|
||||||
@@ -115,7 +115,7 @@ xgb.plot.importance <- function(importance_matrix = NULL, top_n = NULL, measure
|
|||||||
barplot(Importance, horiz = TRUE, border = NA, add = TRUE)]
|
barplot(Importance, horiz = TRUE, border = NA, add = TRUE)]
|
||||||
par(op)
|
par(op)
|
||||||
}
|
}
|
||||||
|
|
||||||
invisible(importance_matrix)
|
invisible(importance_matrix)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
#' SHAP contribution dependency plots
|
#' SHAP contribution dependency plots
|
||||||
#'
|
#'
|
||||||
#' Visualizing the SHAP feature contribution to prediction dependencies on feature value.
|
#' Visualizing the SHAP feature contribution to prediction dependencies on feature value.
|
||||||
#'
|
#'
|
||||||
#' @param data data as a \code{matrix} or \code{dgCMatrix}.
|
#' @param data data as a \code{matrix} or \code{dgCMatrix}.
|
||||||
#' @param shap_contrib a matrix of SHAP contributions that was computed earlier for the above
|
#' @param shap_contrib a matrix of SHAP contributions that was computed earlier for the above
|
||||||
#' \code{data}. When it is NULL, it is computed internally using \code{model} and \code{data}.
|
#' \code{data}. When it is NULL, it is computed internally using \code{model} and \code{data}.
|
||||||
#' @param features a vector of either column indices or of feature names to plot. When it is NULL,
|
#' @param features a vector of either column indices or of feature names to plot. When it is NULL,
|
||||||
#' feature importance is calculated, and \code{top_n} high ranked features are taken.
|
#' feature importance is calculated, and \code{top_n} high ranked features are taken.
|
||||||
@@ -31,32 +31,32 @@
|
|||||||
#' @param plot_loess whether to plot loess-smoothed curves. The smoothing is only done for features with
|
#' @param plot_loess whether to plot loess-smoothed curves. The smoothing is only done for features with
|
||||||
#' more than 5 distinct values.
|
#' more than 5 distinct values.
|
||||||
#' @param col_loess a color to use for the loess curves.
|
#' @param col_loess a color to use for the loess curves.
|
||||||
#' @param span_loess the \code{span} paramerer in \code{\link[stats]{loess}}'s call.
|
#' @param span_loess the \code{span} parameter in \code{\link[stats]{loess}}'s call.
|
||||||
#' @param which whether to do univariate or bivariate plotting. NOTE: only 1D is implemented so far.
|
#' @param which whether to do univariate or bivariate plotting. NOTE: only 1D is implemented so far.
|
||||||
#' @param plot whether a plot should be drawn. If FALSE, only a lits of matrices is returned.
|
#' @param plot whether a plot should be drawn. If FALSE, only a lits of matrices is returned.
|
||||||
#' @param ... other parameters passed to \code{plot}.
|
#' @param ... other parameters passed to \code{plot}.
|
||||||
#'
|
#'
|
||||||
#' @details
|
#' @details
|
||||||
#'
|
#'
|
||||||
#' These scatterplots represent how SHAP feature contributions depend of feature values.
|
#' These scatterplots represent how SHAP feature contributions depend of feature values.
|
||||||
#' The similarity to partial dependency plots is that they also give an idea for how feature values
|
#' The similarity to partial dependency plots is that they also give an idea for how feature values
|
||||||
#' affect predictions. However, in partial dependency plots, we usually see marginal dependencies
|
#' affect predictions. However, in partial dependency plots, we usually see marginal dependencies
|
||||||
#' of model prediction on feature value, while SHAP contribution dependency plots display the estimated
|
#' of model prediction on feature value, while SHAP contribution dependency plots display the estimated
|
||||||
#' contributions of a feature to model prediction for each individual case.
|
#' contributions of a feature to model prediction for each individual case.
|
||||||
#'
|
#'
|
||||||
#' When \code{plot_loess = TRUE} is set, feature values are rounded to 3 significant digits and
|
#' When \code{plot_loess = TRUE} is set, feature values are rounded to 3 significant digits and
|
||||||
#' weighted LOESS is computed and plotted, where weights are the numbers of data points
|
#' weighted LOESS is computed and plotted, where weights are the numbers of data points
|
||||||
#' at each rounded value.
|
#' at each rounded value.
|
||||||
#'
|
#'
|
||||||
#' Note: SHAP contributions are shown on the scale of model margin. E.g., for a logistic binomial objective,
|
#' Note: SHAP contributions are shown on the scale of model margin. E.g., for a logistic binomial objective,
|
||||||
#' the margin is prediction before a sigmoidal transform into probability-like values.
|
#' the margin is prediction before a sigmoidal transform into probability-like values.
|
||||||
#' Also, since SHAP stands for "SHapley Additive exPlanation" (model prediction = sum of SHAP
|
#' Also, since SHAP stands for "SHapley Additive exPlanation" (model prediction = sum of SHAP
|
||||||
#' contributions for all features + bias), depending on the objective used, transforming SHAP
|
#' contributions for all features + bias), depending on the objective used, transforming SHAP
|
||||||
#' contributions for a feature from the marginal to the prediction space is not necessarily
|
#' contributions for a feature from the marginal to the prediction space is not necessarily
|
||||||
#' a meaningful thing to do.
|
#' a meaningful thing to do.
|
||||||
#'
|
#'
|
||||||
#' @return
|
#' @return
|
||||||
#'
|
#'
|
||||||
#' In addition to producing plots (when \code{plot=TRUE}), it silently returns a list of two matrices:
|
#' In addition to producing plots (when \code{plot=TRUE}), it silently returns a list of two matrices:
|
||||||
#' \itemize{
|
#' \itemize{
|
||||||
#' \item \code{data} the values of selected features;
|
#' \item \code{data} the values of selected features;
|
||||||
@@ -70,11 +70,11 @@
|
|||||||
#' Scott M. Lundberg, Su-In Lee, "Consistent feature attribution for tree ensembles", \url{https://arxiv.org/abs/1706.06060}
|
#' Scott M. Lundberg, Su-In Lee, "Consistent feature attribution for tree ensembles", \url{https://arxiv.org/abs/1706.06060}
|
||||||
#'
|
#'
|
||||||
#' @examples
|
#' @examples
|
||||||
#'
|
#'
|
||||||
#' data(agaricus.train, package='xgboost')
|
#' data(agaricus.train, package='xgboost')
|
||||||
#' data(agaricus.test, package='xgboost')
|
#' data(agaricus.test, package='xgboost')
|
||||||
#'
|
#'
|
||||||
#' bst <- xgboost(agaricus.train$data, agaricus.train$label, nrounds = 50,
|
#' bst <- xgboost(agaricus.train$data, agaricus.train$label, nrounds = 50,
|
||||||
#' eta = 0.1, max_depth = 3, subsample = .5,
|
#' eta = 0.1, max_depth = 3, subsample = .5,
|
||||||
#' method = "hist", objective = "binary:logistic", nthread = 2, verbose = 0)
|
#' method = "hist", objective = "binary:logistic", nthread = 2, verbose = 0)
|
||||||
#'
|
#'
|
||||||
@@ -99,7 +99,7 @@
|
|||||||
#' n_col = 2, col = col, pch = 16, pch_NA = 17)
|
#' n_col = 2, col = col, pch = 16, pch_NA = 17)
|
||||||
#' xgb.plot.shap(x, model = mbst, trees = trees0 + 2, target_class = 2, top_n = 4,
|
#' xgb.plot.shap(x, model = mbst, trees = trees0 + 2, target_class = 2, top_n = 4,
|
||||||
#' n_col = 2, col = col, pch = 16, pch_NA = 17)
|
#' n_col = 2, col = col, pch = 16, pch_NA = 17)
|
||||||
#'
|
#'
|
||||||
#' @rdname xgb.plot.shap
|
#' @rdname xgb.plot.shap
|
||||||
#' @export
|
#' @export
|
||||||
xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1, model = NULL,
|
xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1, model = NULL,
|
||||||
@@ -109,7 +109,7 @@ xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
|
|||||||
plot_NA = TRUE, col_NA = rgb(0.7, 0, 1, 0.6), pch_NA = '.', pos_NA = 1.07,
|
plot_NA = TRUE, col_NA = rgb(0.7, 0, 1, 0.6), pch_NA = '.', pos_NA = 1.07,
|
||||||
plot_loess = TRUE, col_loess = 2, span_loess = 0.5,
|
plot_loess = TRUE, col_loess = 2, span_loess = 0.5,
|
||||||
which = c("1d", "2d"), plot = TRUE, ...) {
|
which = c("1d", "2d"), plot = TRUE, ...) {
|
||||||
|
|
||||||
if (!is.matrix(data) && !inherits(data, "dgCMatrix"))
|
if (!is.matrix(data) && !inherits(data, "dgCMatrix"))
|
||||||
stop("data: must be either matrix or dgCMatrix")
|
stop("data: must be either matrix or dgCMatrix")
|
||||||
|
|
||||||
@@ -122,7 +122,7 @@ xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
|
|||||||
if (!is.null(shap_contrib) &&
|
if (!is.null(shap_contrib) &&
|
||||||
(!is.matrix(shap_contrib) || nrow(shap_contrib) != nrow(data) || ncol(shap_contrib) != ncol(data) + 1))
|
(!is.matrix(shap_contrib) || nrow(shap_contrib) != nrow(data) || ncol(shap_contrib) != ncol(data) + 1))
|
||||||
stop("shap_contrib is not compatible with the provided data")
|
stop("shap_contrib is not compatible with the provided data")
|
||||||
|
|
||||||
nsample <- if (is.null(subsample)) min(100000, nrow(data)) else as.integer(subsample * nrow(data))
|
nsample <- if (is.null(subsample)) min(100000, nrow(data)) else as.integer(subsample * nrow(data))
|
||||||
idx <- sample(1:nrow(data), nsample)
|
idx <- sample(1:nrow(data), nsample)
|
||||||
data <- data[idx,]
|
data <- data[idx,]
|
||||||
@@ -144,13 +144,13 @@ xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
|
|||||||
stop("top_n: must be an integer within [1, 100]")
|
stop("top_n: must be an integer within [1, 100]")
|
||||||
features <- imp$Feature[1:min(top_n, NROW(imp))]
|
features <- imp$Feature[1:min(top_n, NROW(imp))]
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is.character(features)) {
|
if (is.character(features)) {
|
||||||
if (is.null(colnames(data)))
|
if (is.null(colnames(data)))
|
||||||
stop("Either provide `data` with column names or provide `features` as column indices")
|
stop("Either provide `data` with column names or provide `features` as column indices")
|
||||||
features <- match(features, colnames(data))
|
features <- match(features, colnames(data))
|
||||||
}
|
}
|
||||||
|
|
||||||
if (n_col > length(features)) n_col <- length(features)
|
if (n_col > length(features)) n_col <- length(features)
|
||||||
|
|
||||||
if (is.list(shap_contrib)) { # multiclass: either choose a class or merge
|
if (is.list(shap_contrib)) { # multiclass: either choose a class or merge
|
||||||
@@ -165,7 +165,7 @@ xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
|
|||||||
if (is.null(cols)) cols <- paste0('X', 1:ncol(data))
|
if (is.null(cols)) cols <- paste0('X', 1:ncol(data))
|
||||||
colnames(data) <- cols
|
colnames(data) <- cols
|
||||||
colnames(shap_contrib) <- cols
|
colnames(shap_contrib) <- cols
|
||||||
|
|
||||||
if (plot && which == "1d") {
|
if (plot && which == "1d") {
|
||||||
op <- par(mfrow = c(ceiling(length(features) / n_col), n_col),
|
op <- par(mfrow = c(ceiling(length(features) / n_col), n_col),
|
||||||
oma = c(0,0,0,0) + 0.2,
|
oma = c(0,0,0,0) + 0.2,
|
||||||
|
|||||||
@@ -27,6 +27,7 @@
|
|||||||
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||||
#' xgb.save(bst, 'xgb.model')
|
#' xgb.save(bst, 'xgb.model')
|
||||||
#' bst <- xgb.load('xgb.model')
|
#' bst <- xgb.load('xgb.model')
|
||||||
|
#' if (file.exists('xgb.model')) file.remove('xgb.model')
|
||||||
#' pred <- predict(bst, test$data)
|
#' pred <- predict(bst, test$data)
|
||||||
#' @export
|
#' @export
|
||||||
xgb.save <- function(model, fname) {
|
xgb.save <- function(model, fname) {
|
||||||
|
|||||||
@@ -1,48 +1,48 @@
|
|||||||
#' eXtreme Gradient Boosting Training
|
#' eXtreme Gradient Boosting Training
|
||||||
#'
|
#'
|
||||||
#' \code{xgb.train} is an advanced interface for training an xgboost model.
|
#' \code{xgb.train} is an advanced interface for training an xgboost model.
|
||||||
#' The \code{xgboost} function is a simpler wrapper for \code{xgb.train}.
|
#' The \code{xgboost} function is a simpler wrapper for \code{xgb.train}.
|
||||||
#'
|
#'
|
||||||
#' @param params the list of parameters.
|
#' @param params the list of parameters.
|
||||||
#' The complete list of parameters is available at \url{http://xgboost.readthedocs.io/en/latest/parameter.html}.
|
#' The complete list of parameters is available at \url{http://xgboost.readthedocs.io/en/latest/parameter.html}.
|
||||||
#' Below is a shorter summary:
|
#' Below is a shorter summary:
|
||||||
#'
|
#'
|
||||||
#' 1. General Parameters
|
#' 1. General Parameters
|
||||||
#'
|
#'
|
||||||
#' \itemize{
|
#' \itemize{
|
||||||
#' \item \code{booster} which booster to use, can be \code{gbtree} or \code{gblinear}. Default: \code{gbtree}.
|
#' \item \code{booster} which booster to use, can be \code{gbtree} or \code{gblinear}. Default: \code{gbtree}.
|
||||||
#' }
|
#' }
|
||||||
#'
|
#'
|
||||||
#' 2. Booster Parameters
|
#' 2. Booster Parameters
|
||||||
#'
|
#'
|
||||||
#' 2.1. Parameter for Tree Booster
|
#' 2.1. Parameter for Tree Booster
|
||||||
#'
|
#'
|
||||||
#' \itemize{
|
#' \itemize{
|
||||||
#' \item \code{eta} control the learning rate: scale the contribution of each tree by a factor of \code{0 < eta < 1} when it is added to the current approximation. Used to prevent overfitting by making the boosting process more conservative. Lower value for \code{eta} implies larger value for \code{nrounds}: low \code{eta} value means model more robust to overfitting but slower to compute. Default: 0.3
|
#' \item \code{eta} control the learning rate: scale the contribution of each tree by a factor of \code{0 < eta < 1} when it is added to the current approximation. Used to prevent overfitting by making the boosting process more conservative. Lower value for \code{eta} implies larger value for \code{nrounds}: low \code{eta} value means model more robust to overfitting but slower to compute. Default: 0.3
|
||||||
#' \item \code{gamma} minimum loss reduction required to make a further partition on a leaf node of the tree. the larger, the more conservative the algorithm will be.
|
#' \item \code{gamma} minimum loss reduction required to make a further partition on a leaf node of the tree. the larger, the more conservative the algorithm will be.
|
||||||
#' \item \code{max_depth} maximum depth of a tree. Default: 6
|
#' \item \code{max_depth} maximum depth of a tree. Default: 6
|
||||||
#' \item \code{min_child_weight} minimum sum of instance weight (hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, then the building process will give up further partitioning. In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node. The larger, the more conservative the algorithm will be. Default: 1
|
#' \item \code{min_child_weight} minimum sum of instance weight (hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, then the building process will give up further partitioning. In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node. The larger, the more conservative the algorithm will be. Default: 1
|
||||||
#' \item \code{subsample} subsample ratio of the training instance. Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees and this will prevent overfitting. It makes computation shorter (because less data to analyse). It is advised to use this parameter with \code{eta} and increase \code{nrounds}. Default: 1
|
#' \item \code{subsample} subsample ratio of the training instance. Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees and this will prevent overfitting. It makes computation shorter (because less data to analyse). It is advised to use this parameter with \code{eta} and increase \code{nrounds}. Default: 1
|
||||||
#' \item \code{colsample_bytree} subsample ratio of columns when constructing each tree. Default: 1
|
#' \item \code{colsample_bytree} subsample ratio of columns when constructing each tree. Default: 1
|
||||||
#' \item \code{num_parallel_tree} Experimental parameter. number of trees to grow per round. Useful to test Random Forest through Xgboost (set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly. Default: 1
|
#' \item \code{num_parallel_tree} Experimental parameter. number of trees to grow per round. Useful to test Random Forest through Xgboost (set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly. Default: 1
|
||||||
#' \item \code{monotone_constraints} A numerical vector consists of \code{1}, \code{0} and \code{-1} with its length equals to the number of features in the training data. \code{1} is increasing, \code{-1} is decreasing and \code{0} is no constraint.
|
#' \item \code{monotone_constraints} A numerical vector consists of \code{1}, \code{0} and \code{-1} with its length equals to the number of features in the training data. \code{1} is increasing, \code{-1} is decreasing and \code{0} is no constraint.
|
||||||
#' \item \code{interaction_constraints} A list of vectors specifying feature indices of permitted interactions. Each item of the list represents one permitted interaction where specified features are allowed to interact with each other. Feature index values should start from \code{0} (\code{0} references the first column). Leave argument unspecified for no interaction constraints.
|
#' \item \code{interaction_constraints} A list of vectors specifying feature indices of permitted interactions. Each item of the list represents one permitted interaction where specified features are allowed to interact with each other. Feature index values should start from \code{0} (\code{0} references the first column). Leave argument unspecified for no interaction constraints.
|
||||||
#' }
|
#' }
|
||||||
#'
|
#'
|
||||||
#' 2.2. Parameter for Linear Booster
|
#' 2.2. Parameter for Linear Booster
|
||||||
#'
|
#'
|
||||||
#' \itemize{
|
#' \itemize{
|
||||||
#' \item \code{lambda} L2 regularization term on weights. Default: 0
|
#' \item \code{lambda} L2 regularization term on weights. Default: 0
|
||||||
#' \item \code{lambda_bias} L2 regularization term on bias. Default: 0
|
#' \item \code{lambda_bias} L2 regularization term on bias. Default: 0
|
||||||
#' \item \code{alpha} L1 regularization term on weights. (there is no L1 reg on bias because it is not important). Default: 0
|
#' \item \code{alpha} L1 regularization term on weights. (there is no L1 reg on bias because it is not important). Default: 0
|
||||||
#' }
|
#' }
|
||||||
#'
|
#'
|
||||||
#' 3. Task Parameters
|
#' 3. Task Parameters
|
||||||
#'
|
#'
|
||||||
#' \itemize{
|
#' \itemize{
|
||||||
#' \item \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it. The default objective options are below:
|
#' \item \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it. The default objective options are below:
|
||||||
#' \itemize{
|
#' \itemize{
|
||||||
#' \item \code{reg:linear} linear regression (Default).
|
#' \item \code{reg:squarederror} Regression with squared loss (Default).
|
||||||
#' \item \code{reg:logistic} logistic regression.
|
#' \item \code{reg:logistic} logistic regression.
|
||||||
#' \item \code{binary:logistic} logistic regression for binary classification. Output probability.
|
#' \item \code{binary:logistic} logistic regression for binary classification. Output probability.
|
||||||
#' \item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation.
|
#' \item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation.
|
||||||
@@ -54,32 +54,32 @@
|
|||||||
#' \item \code{base_score} the initial prediction score of all instances, global bias. Default: 0.5
|
#' \item \code{base_score} the initial prediction score of all instances, global bias. Default: 0.5
|
||||||
#' \item \code{eval_metric} evaluation metrics for validation data. Users can pass a self-defined function to it. Default: metric will be assigned according to objective(rmse for regression, and error for classification, mean average precision for ranking). List is provided in detail section.
|
#' \item \code{eval_metric} evaluation metrics for validation data. Users can pass a self-defined function to it. Default: metric will be assigned according to objective(rmse for regression, and error for classification, mean average precision for ranking). List is provided in detail section.
|
||||||
#' }
|
#' }
|
||||||
#'
|
#'
|
||||||
#' @param data training dataset. \code{xgb.train} accepts only an \code{xgb.DMatrix} as the input.
|
#' @param data training dataset. \code{xgb.train} accepts only an \code{xgb.DMatrix} as the input.
|
||||||
#' \code{xgboost}, in addition, also accepts \code{matrix}, \code{dgCMatrix}, or name of a local data file.
|
#' \code{xgboost}, in addition, also accepts \code{matrix}, \code{dgCMatrix}, or name of a local data file.
|
||||||
#' @param nrounds max number of boosting iterations.
|
#' @param nrounds max number of boosting iterations.
|
||||||
#' @param watchlist named list of xgb.DMatrix datasets to use for evaluating model performance.
|
#' @param watchlist named list of xgb.DMatrix datasets to use for evaluating model performance.
|
||||||
#' Metrics specified in either \code{eval_metric} or \code{feval} will be computed for each
|
#' Metrics specified in either \code{eval_metric} or \code{feval} will be computed for each
|
||||||
#' of these datasets during each boosting iteration, and stored in the end as a field named
|
#' of these datasets during each boosting iteration, and stored in the end as a field named
|
||||||
#' \code{evaluation_log} in the resulting object. When either \code{verbose>=1} or
|
#' \code{evaluation_log} in the resulting object. When either \code{verbose>=1} or
|
||||||
#' \code{\link{cb.print.evaluation}} callback is engaged, the performance results are continuously
|
#' \code{\link{cb.print.evaluation}} callback is engaged, the performance results are continuously
|
||||||
#' printed out during the training.
|
#' printed out during the training.
|
||||||
#' E.g., specifying \code{watchlist=list(validation1=mat1, validation2=mat2)} allows to track
|
#' E.g., specifying \code{watchlist=list(validation1=mat1, validation2=mat2)} allows to track
|
||||||
#' the performance of each round's model on mat1 and mat2.
|
#' the performance of each round's model on mat1 and mat2.
|
||||||
#' @param obj customized objective function. Returns gradient and second order
|
#' @param obj customized objective function. Returns gradient and second order
|
||||||
#' gradient with given prediction and dtrain.
|
#' gradient with given prediction and dtrain.
|
||||||
#' @param feval custimized evaluation function. Returns
|
#' @param feval customized evaluation function. Returns
|
||||||
#' \code{list(metric='metric-name', value='metric-value')} with given
|
#' \code{list(metric='metric-name', value='metric-value')} with given
|
||||||
#' prediction and dtrain.
|
#' prediction and dtrain.
|
||||||
#' @param verbose If 0, xgboost will stay silent. If 1, it will print information about performance.
|
#' @param verbose If 0, xgboost will stay silent. If 1, it will print information about performance.
|
||||||
#' If 2, some additional information will be printed out.
|
#' If 2, some additional information will be printed out.
|
||||||
#' Note that setting \code{verbose > 0} automatically engages the
|
#' Note that setting \code{verbose > 0} automatically engages the
|
||||||
#' \code{cb.print.evaluation(period=1)} callback function.
|
#' \code{cb.print.evaluation(period=1)} callback function.
|
||||||
#' @param print_every_n Print each n-th iteration evaluation messages when \code{verbose>0}.
|
#' @param print_every_n Print each n-th iteration evaluation messages when \code{verbose>0}.
|
||||||
#' Default is 1 which means all messages are printed. This parameter is passed to the
|
#' Default is 1 which means all messages are printed. This parameter is passed to the
|
||||||
#' \code{\link{cb.print.evaluation}} callback.
|
#' \code{\link{cb.print.evaluation}} callback.
|
||||||
#' @param early_stopping_rounds If \code{NULL}, the early stopping function is not triggered.
|
#' @param early_stopping_rounds If \code{NULL}, the early stopping function is not triggered.
|
||||||
#' If set to an integer \code{k}, training with a validation set will stop if the performance
|
#' If set to an integer \code{k}, training with a validation set will stop if the performance
|
||||||
#' doesn't improve for \code{k} rounds.
|
#' doesn't improve for \code{k} rounds.
|
||||||
#' Setting this parameter engages the \code{\link{cb.early.stop}} callback.
|
#' Setting this parameter engages the \code{\link{cb.early.stop}} callback.
|
||||||
#' @param maximize If \code{feval} and \code{early_stopping_rounds} are set,
|
#' @param maximize If \code{feval} and \code{early_stopping_rounds} are set,
|
||||||
@@ -90,35 +90,35 @@
|
|||||||
#' 0 means save at the end. The saving is handled by the \code{\link{cb.save.model}} callback.
|
#' 0 means save at the end. The saving is handled by the \code{\link{cb.save.model}} callback.
|
||||||
#' @param save_name the name or path for periodically saved model file.
|
#' @param save_name the name or path for periodically saved model file.
|
||||||
#' @param xgb_model a previously built model to continue the training from.
|
#' @param xgb_model a previously built model to continue the training from.
|
||||||
#' Could be either an object of class \code{xgb.Booster}, or its raw data, or the name of a
|
#' Could be either an object of class \code{xgb.Booster}, or its raw data, or the name of a
|
||||||
#' file with a previously saved model.
|
#' file with a previously saved model.
|
||||||
#' @param callbacks a list of callback functions to perform various task during boosting.
|
#' @param callbacks a list of callback functions to perform various task during boosting.
|
||||||
#' See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the
|
#' See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the
|
||||||
#' parameters' values. User can provide either existing or their own callback methods in order
|
#' parameters' values. User can provide either existing or their own callback methods in order
|
||||||
#' to customize the training process.
|
#' to customize the training process.
|
||||||
#' @param ... other parameters to pass to \code{params}.
|
#' @param ... other parameters to pass to \code{params}.
|
||||||
#' @param label vector of response values. Should not be provided when data is
|
#' @param label vector of response values. Should not be provided when data is
|
||||||
#' a local data file name or an \code{xgb.DMatrix}.
|
#' a local data file name or an \code{xgb.DMatrix}.
|
||||||
#' @param missing by default is set to NA, which means that NA values should be considered as 'missing'
|
#' @param missing by default is set to NA, which means that NA values should be considered as 'missing'
|
||||||
#' by the algorithm. Sometimes, 0 or other extreme value might be used to represent missing values.
|
#' by the algorithm. Sometimes, 0 or other extreme value might be used to represent missing values.
|
||||||
#' This parameter is only used when input is a dense matrix.
|
#' This parameter is only used when input is a dense matrix.
|
||||||
#' @param weight a vector indicating the weight for each row of the input.
|
#' @param weight a vector indicating the weight for each row of the input.
|
||||||
#'
|
#'
|
||||||
#' @details
|
#' @details
|
||||||
#' These are the training functions for \code{xgboost}.
|
#' These are the training functions for \code{xgboost}.
|
||||||
#'
|
#'
|
||||||
#' The \code{xgb.train} interface supports advanced features such as \code{watchlist},
|
#' The \code{xgb.train} interface supports advanced features such as \code{watchlist},
|
||||||
#' customized objective and evaluation metric functions, therefore it is more flexible
|
#' customized objective and evaluation metric functions, therefore it is more flexible
|
||||||
#' than the \code{xgboost} interface.
|
#' than the \code{xgboost} interface.
|
||||||
#'
|
#'
|
||||||
#' Parallelization is automatically enabled if \code{OpenMP} is present.
|
#' Parallelization is automatically enabled if \code{OpenMP} is present.
|
||||||
#' Number of threads can also be manually specified via \code{nthread} parameter.
|
#' Number of threads can also be manually specified via \code{nthread} parameter.
|
||||||
#'
|
#'
|
||||||
#' The evaluation metric is chosen automatically by Xgboost (according to the objective)
|
#' The evaluation metric is chosen automatically by Xgboost (according to the objective)
|
||||||
#' when the \code{eval_metric} parameter is not provided.
|
#' when the \code{eval_metric} parameter is not provided.
|
||||||
#' User may set one or several \code{eval_metric} parameters.
|
#' User may set one or several \code{eval_metric} parameters.
|
||||||
#' Note that when using a customized metric, only this single metric can be used.
|
#' Note that when using a customized metric, only this single metric can be used.
|
||||||
#' The folloiwing is the list of built-in metrics for which Xgboost provides optimized implementation:
|
#' The following is the list of built-in metrics for which Xgboost provides optimized implementation:
|
||||||
#' \itemize{
|
#' \itemize{
|
||||||
#' \item \code{rmse} root mean square error. \url{http://en.wikipedia.org/wiki/Root_mean_square_error}
|
#' \item \code{rmse} root mean square error. \url{http://en.wikipedia.org/wiki/Root_mean_square_error}
|
||||||
#' \item \code{logloss} negative log-likelihood. \url{http://en.wikipedia.org/wiki/Log-likelihood}
|
#' \item \code{logloss} negative log-likelihood. \url{http://en.wikipedia.org/wiki/Log-likelihood}
|
||||||
@@ -131,7 +131,7 @@
|
|||||||
#' \item \code{aucpr} Area under the PR curve. \url{https://en.wikipedia.org/wiki/Precision_and_recall} for ranking evaluation.
|
#' \item \code{aucpr} Area under the PR curve. \url{https://en.wikipedia.org/wiki/Precision_and_recall} for ranking evaluation.
|
||||||
#' \item \code{ndcg} Normalized Discounted Cumulative Gain (for ranking task). \url{http://en.wikipedia.org/wiki/NDCG}
|
#' \item \code{ndcg} Normalized Discounted Cumulative Gain (for ranking task). \url{http://en.wikipedia.org/wiki/NDCG}
|
||||||
#' }
|
#' }
|
||||||
#'
|
#'
|
||||||
#' The following callbacks are automatically created when certain parameters are set:
|
#' The following callbacks are automatically created when certain parameters are set:
|
||||||
#' \itemize{
|
#' \itemize{
|
||||||
#' \item \code{cb.print.evaluation} is turned on when \code{verbose > 0};
|
#' \item \code{cb.print.evaluation} is turned on when \code{verbose > 0};
|
||||||
@@ -140,38 +140,38 @@
|
|||||||
#' \item \code{cb.early.stop}: when \code{early_stopping_rounds} is set.
|
#' \item \code{cb.early.stop}: when \code{early_stopping_rounds} is set.
|
||||||
#' \item \code{cb.save.model}: when \code{save_period > 0} is set.
|
#' \item \code{cb.save.model}: when \code{save_period > 0} is set.
|
||||||
#' }
|
#' }
|
||||||
#'
|
#'
|
||||||
#' @return
|
#' @return
|
||||||
#' An object of class \code{xgb.Booster} with the following elements:
|
#' An object of class \code{xgb.Booster} with the following elements:
|
||||||
#' \itemize{
|
#' \itemize{
|
||||||
#' \item \code{handle} a handle (pointer) to the xgboost model in memory.
|
#' \item \code{handle} a handle (pointer) to the xgboost model in memory.
|
||||||
#' \item \code{raw} a cached memory dump of the xgboost model saved as R's \code{raw} type.
|
#' \item \code{raw} a cached memory dump of the xgboost model saved as R's \code{raw} type.
|
||||||
#' \item \code{niter} number of boosting iterations.
|
#' \item \code{niter} number of boosting iterations.
|
||||||
#' \item \code{evaluation_log} evaluation history storead as a \code{data.table} with the
|
#' \item \code{evaluation_log} evaluation history stored as a \code{data.table} with the
|
||||||
#' first column corresponding to iteration number and the rest corresponding to evaluation
|
#' first column corresponding to iteration number and the rest corresponding to evaluation
|
||||||
#' metrics' values. It is created by the \code{\link{cb.evaluation.log}} callback.
|
#' metrics' values. It is created by the \code{\link{cb.evaluation.log}} callback.
|
||||||
#' \item \code{call} a function call.
|
#' \item \code{call} a function call.
|
||||||
#' \item \code{params} parameters that were passed to the xgboost library. Note that it does not
|
#' \item \code{params} parameters that were passed to the xgboost library. Note that it does not
|
||||||
#' capture parameters changed by the \code{\link{cb.reset.parameters}} callback.
|
#' capture parameters changed by the \code{\link{cb.reset.parameters}} callback.
|
||||||
#' \item \code{callbacks} callback functions that were either automatically assigned or
|
#' \item \code{callbacks} callback functions that were either automatically assigned or
|
||||||
#' explicitely passed.
|
#' explicitly passed.
|
||||||
#' \item \code{best_iteration} iteration number with the best evaluation metric value
|
#' \item \code{best_iteration} iteration number with the best evaluation metric value
|
||||||
#' (only available with early stopping).
|
#' (only available with early stopping).
|
||||||
#' \item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration,
|
#' \item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration,
|
||||||
#' which could further be used in \code{predict} method
|
#' which could further be used in \code{predict} method
|
||||||
#' (only available with early stopping).
|
#' (only available with early stopping).
|
||||||
#' \item \code{best_score} the best evaluation metric value during early stopping.
|
#' \item \code{best_score} the best evaluation metric value during early stopping.
|
||||||
#' (only available with early stopping).
|
#' (only available with early stopping).
|
||||||
#' \item \code{feature_names} names of the training dataset features
|
#' \item \code{feature_names} names of the training dataset features
|
||||||
#' (only when comun names were defined in training data).
|
#' (only when column names were defined in training data).
|
||||||
#' \item \code{nfeatures} number of features in training data.
|
#' \item \code{nfeatures} number of features in training data.
|
||||||
#' }
|
#' }
|
||||||
#'
|
#'
|
||||||
#' @seealso
|
#' @seealso
|
||||||
#' \code{\link{callbacks}},
|
#' \code{\link{callbacks}},
|
||||||
#' \code{\link{predict.xgb.Booster}},
|
#' \code{\link{predict.xgb.Booster}},
|
||||||
#' \code{\link{xgb.cv}}
|
#' \code{\link{xgb.cv}}
|
||||||
#'
|
#'
|
||||||
#' @references
|
#' @references
|
||||||
#'
|
#'
|
||||||
#' Tianqi Chen and Carlos Guestrin, "XGBoost: A Scalable Tree Boosting System",
|
#' Tianqi Chen and Carlos Guestrin, "XGBoost: A Scalable Tree Boosting System",
|
||||||
@@ -180,17 +180,17 @@
|
|||||||
#' @examples
|
#' @examples
|
||||||
#' data(agaricus.train, package='xgboost')
|
#' data(agaricus.train, package='xgboost')
|
||||||
#' data(agaricus.test, package='xgboost')
|
#' data(agaricus.test, package='xgboost')
|
||||||
#'
|
#'
|
||||||
#' dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
#' dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||||
#' dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
#' dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||||
#' watchlist <- list(train = dtrain, eval = dtest)
|
#' watchlist <- list(train = dtrain, eval = dtest)
|
||||||
#'
|
#'
|
||||||
#' ## A simple xgb.train example:
|
#' ## A simple xgb.train example:
|
||||||
#' param <- list(max_depth = 2, eta = 1, silent = 1, nthread = 2,
|
#' param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2,
|
||||||
#' objective = "binary:logistic", eval_metric = "auc")
|
#' objective = "binary:logistic", eval_metric = "auc")
|
||||||
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist)
|
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist)
|
||||||
#'
|
#'
|
||||||
#'
|
#'
|
||||||
#' ## An xgb.train example where custom objective and evaluation metric are used:
|
#' ## An xgb.train example where custom objective and evaluation metric are used:
|
||||||
#' logregobj <- function(preds, dtrain) {
|
#' logregobj <- function(preds, dtrain) {
|
||||||
#' labels <- getinfo(dtrain, "label")
|
#' labels <- getinfo(dtrain, "label")
|
||||||
@@ -204,58 +204,58 @@
|
|||||||
#' err <- as.numeric(sum(labels != (preds > 0)))/length(labels)
|
#' err <- as.numeric(sum(labels != (preds > 0)))/length(labels)
|
||||||
#' return(list(metric = "error", value = err))
|
#' return(list(metric = "error", value = err))
|
||||||
#' }
|
#' }
|
||||||
#'
|
#'
|
||||||
#' # These functions could be used by passing them either:
|
#' # These functions could be used by passing them either:
|
||||||
#' # as 'objective' and 'eval_metric' parameters in the params list:
|
#' # as 'objective' and 'eval_metric' parameters in the params list:
|
||||||
#' param <- list(max_depth = 2, eta = 1, silent = 1, nthread = 2,
|
#' param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2,
|
||||||
#' objective = logregobj, eval_metric = evalerror)
|
#' objective = logregobj, eval_metric = evalerror)
|
||||||
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist)
|
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist)
|
||||||
#'
|
#'
|
||||||
#' # or through the ... arguments:
|
#' # or through the ... arguments:
|
||||||
#' param <- list(max_depth = 2, eta = 1, silent = 1, nthread = 2)
|
#' param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2)
|
||||||
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist,
|
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist,
|
||||||
#' objective = logregobj, eval_metric = evalerror)
|
#' objective = logregobj, eval_metric = evalerror)
|
||||||
#'
|
#'
|
||||||
#' # or as dedicated 'obj' and 'feval' parameters of xgb.train:
|
#' # or as dedicated 'obj' and 'feval' parameters of xgb.train:
|
||||||
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist,
|
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist,
|
||||||
#' obj = logregobj, feval = evalerror)
|
#' obj = logregobj, feval = evalerror)
|
||||||
#'
|
#'
|
||||||
#'
|
#'
|
||||||
#' ## An xgb.train example of using variable learning rates at each iteration:
|
#' ## An xgb.train example of using variable learning rates at each iteration:
|
||||||
#' param <- list(max_depth = 2, eta = 1, silent = 1, nthread = 2,
|
#' param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2,
|
||||||
#' objective = "binary:logistic", eval_metric = "auc")
|
#' objective = "binary:logistic", eval_metric = "auc")
|
||||||
#' my_etas <- list(eta = c(0.5, 0.1))
|
#' my_etas <- list(eta = c(0.5, 0.1))
|
||||||
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist,
|
#' bst <- xgb.train(param, dtrain, nrounds = 2, watchlist,
|
||||||
#' callbacks = list(cb.reset.parameters(my_etas)))
|
#' callbacks = list(cb.reset.parameters(my_etas)))
|
||||||
#'
|
#'
|
||||||
#' ## Early stopping:
|
#' ## Early stopping:
|
||||||
#' bst <- xgb.train(param, dtrain, nrounds = 25, watchlist,
|
#' bst <- xgb.train(param, dtrain, nrounds = 25, watchlist,
|
||||||
#' early_stopping_rounds = 3)
|
#' early_stopping_rounds = 3)
|
||||||
#'
|
#'
|
||||||
#' ## An 'xgboost' interface example:
|
#' ## An 'xgboost' interface example:
|
||||||
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label,
|
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label,
|
||||||
#' max_depth = 2, eta = 1, nthread = 2, nrounds = 2,
|
#' max_depth = 2, eta = 1, nthread = 2, nrounds = 2,
|
||||||
#' objective = "binary:logistic")
|
#' objective = "binary:logistic")
|
||||||
#' pred <- predict(bst, agaricus.test$data)
|
#' pred <- predict(bst, agaricus.test$data)
|
||||||
#'
|
#'
|
||||||
#' @rdname xgb.train
|
#' @rdname xgb.train
|
||||||
#' @export
|
#' @export
|
||||||
xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
|
xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
|
||||||
obj = NULL, feval = NULL, verbose = 1, print_every_n = 1L,
|
obj = NULL, feval = NULL, verbose = 1, print_every_n = 1L,
|
||||||
early_stopping_rounds = NULL, maximize = NULL,
|
early_stopping_rounds = NULL, maximize = NULL,
|
||||||
save_period = NULL, save_name = "xgboost.model",
|
save_period = NULL, save_name = "xgboost.model",
|
||||||
xgb_model = NULL, callbacks = list(), ...) {
|
xgb_model = NULL, callbacks = list(), ...) {
|
||||||
|
|
||||||
check.deprecation(...)
|
check.deprecation(...)
|
||||||
|
|
||||||
params <- check.booster.params(params, ...)
|
params <- check.booster.params(params, ...)
|
||||||
|
|
||||||
check.custom.obj()
|
check.custom.obj()
|
||||||
check.custom.eval()
|
check.custom.eval()
|
||||||
|
|
||||||
# data & watchlist checks
|
# data & watchlist checks
|
||||||
dtrain <- data
|
dtrain <- data
|
||||||
if (!inherits(dtrain, "xgb.DMatrix"))
|
if (!inherits(dtrain, "xgb.DMatrix"))
|
||||||
stop("second argument dtrain must be xgb.DMatrix")
|
stop("second argument dtrain must be xgb.DMatrix")
|
||||||
if (length(watchlist) > 0) {
|
if (length(watchlist) > 0) {
|
||||||
if (typeof(watchlist) != "list" ||
|
if (typeof(watchlist) != "list" ||
|
||||||
@@ -288,11 +288,14 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
|
|||||||
stop_condition <- FALSE
|
stop_condition <- FALSE
|
||||||
if (!is.null(early_stopping_rounds) &&
|
if (!is.null(early_stopping_rounds) &&
|
||||||
!has.callbacks(callbacks, 'cb.early.stop')) {
|
!has.callbacks(callbacks, 'cb.early.stop')) {
|
||||||
callbacks <- add.cb(callbacks, cb.early.stop(early_stopping_rounds,
|
callbacks <- add.cb(callbacks, cb.early.stop(early_stopping_rounds,
|
||||||
maximize = maximize, verbose = verbose))
|
maximize = maximize, verbose = verbose))
|
||||||
}
|
}
|
||||||
# Sort the callbacks into categories
|
# Sort the callbacks into categories
|
||||||
cb <- categorize.callbacks(callbacks)
|
cb <- categorize.callbacks(callbacks)
|
||||||
|
if (!is.null(params[['seed']])) {
|
||||||
|
warning("xgb.train: `seed` is ignored in R package. Use `set.seed()` instead.")
|
||||||
|
}
|
||||||
|
|
||||||
# The tree updating process would need slightly different handling
|
# The tree updating process would need slightly different handling
|
||||||
is_update <- NVL(params[['process_type']], '.') == 'update'
|
is_update <- NVL(params[['process_type']], '.') == 'update'
|
||||||
@@ -318,22 +321,22 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
|
|||||||
|
|
||||||
# TODO: distributed code
|
# TODO: distributed code
|
||||||
rank <- 0
|
rank <- 0
|
||||||
|
|
||||||
niter_skip <- ifelse(is_update, 0, niter_init)
|
niter_skip <- ifelse(is_update, 0, niter_init)
|
||||||
begin_iteration <- niter_skip + 1
|
begin_iteration <- niter_skip + 1
|
||||||
end_iteration <- niter_skip + nrounds
|
end_iteration <- niter_skip + nrounds
|
||||||
|
|
||||||
# the main loop for boosting iterations
|
# the main loop for boosting iterations
|
||||||
for (iteration in begin_iteration:end_iteration) {
|
for (iteration in begin_iteration:end_iteration) {
|
||||||
|
|
||||||
for (f in cb$pre_iter) f()
|
for (f in cb$pre_iter) f()
|
||||||
|
|
||||||
xgb.iter.update(bst$handle, dtrain, iteration - 1, obj)
|
xgb.iter.update(bst$handle, dtrain, iteration - 1, obj)
|
||||||
|
|
||||||
bst_evaluation <- numeric(0)
|
bst_evaluation <- numeric(0)
|
||||||
if (length(watchlist) > 0)
|
if (length(watchlist) > 0)
|
||||||
bst_evaluation <- xgb.iter.eval(bst$handle, watchlist, iteration - 1, feval)
|
bst_evaluation <- xgb.iter.eval(bst$handle, watchlist, iteration - 1, feval)
|
||||||
|
|
||||||
xgb.attr(bst$handle, 'niter') <- iteration - 1
|
xgb.attr(bst$handle, 'niter') <- iteration - 1
|
||||||
|
|
||||||
for (f in cb$post_iter) f()
|
for (f in cb$post_iter) f()
|
||||||
@@ -341,9 +344,9 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
|
|||||||
if (stop_condition) break
|
if (stop_condition) break
|
||||||
}
|
}
|
||||||
for (f in cb$finalize) f(finalize = TRUE)
|
for (f in cb$finalize) f(finalize = TRUE)
|
||||||
|
|
||||||
bst <- xgb.Booster.complete(bst, saveraw = TRUE)
|
bst <- xgb.Booster.complete(bst, saveraw = TRUE)
|
||||||
|
|
||||||
# store the total number of boosting iterations
|
# store the total number of boosting iterations
|
||||||
bst$niter = end_iteration
|
bst$niter = end_iteration
|
||||||
|
|
||||||
|
|||||||
@@ -5,8 +5,8 @@
|
|||||||
#' @export
|
#' @export
|
||||||
xgboost <- function(data = NULL, label = NULL, missing = NA, weight = NULL,
|
xgboost <- function(data = NULL, label = NULL, missing = NA, weight = NULL,
|
||||||
params = list(), nrounds,
|
params = list(), nrounds,
|
||||||
verbose = 1, print_every_n = 1L,
|
verbose = 1, print_every_n = 1L,
|
||||||
early_stopping_rounds = NULL, maximize = NULL,
|
early_stopping_rounds = NULL, maximize = NULL,
|
||||||
save_period = NULL, save_name = "xgboost.model",
|
save_period = NULL, save_name = "xgboost.model",
|
||||||
xgb_model = NULL, callbacks = list(), ...) {
|
xgb_model = NULL, callbacks = list(), ...) {
|
||||||
|
|
||||||
@@ -18,16 +18,16 @@ xgboost <- function(data = NULL, label = NULL, missing = NA, weight = NULL,
|
|||||||
early_stopping_rounds = early_stopping_rounds, maximize = maximize,
|
early_stopping_rounds = early_stopping_rounds, maximize = maximize,
|
||||||
save_period = save_period, save_name = save_name,
|
save_period = save_period, save_name = save_name,
|
||||||
xgb_model = xgb_model, callbacks = callbacks, ...)
|
xgb_model = xgb_model, callbacks = callbacks, ...)
|
||||||
return(bst)
|
return (bst)
|
||||||
}
|
}
|
||||||
|
|
||||||
#' Training part from Mushroom Data Set
|
#' Training part from Mushroom Data Set
|
||||||
#'
|
#'
|
||||||
#' This data set is originally from the Mushroom data set,
|
#' This data set is originally from the Mushroom data set,
|
||||||
#' UCI Machine Learning Repository.
|
#' UCI Machine Learning Repository.
|
||||||
#'
|
#'
|
||||||
#' This data set includes the following fields:
|
#' This data set includes the following fields:
|
||||||
#'
|
#'
|
||||||
#' \itemize{
|
#' \itemize{
|
||||||
#' \item \code{label} the label for each record
|
#' \item \code{label} the label for each record
|
||||||
#' \item \code{data} a sparse Matrix of \code{dgCMatrix} class, with 126 columns.
|
#' \item \code{data} a sparse Matrix of \code{dgCMatrix} class, with 126 columns.
|
||||||
@@ -35,16 +35,16 @@ xgboost <- function(data = NULL, label = NULL, missing = NA, weight = NULL,
|
|||||||
#'
|
#'
|
||||||
#' @references
|
#' @references
|
||||||
#' https://archive.ics.uci.edu/ml/datasets/Mushroom
|
#' https://archive.ics.uci.edu/ml/datasets/Mushroom
|
||||||
#'
|
#'
|
||||||
#' Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository
|
#' Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository
|
||||||
#' [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California,
|
#' [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California,
|
||||||
#' School of Information and Computer Science.
|
#' School of Information and Computer Science.
|
||||||
#'
|
#'
|
||||||
#' @docType data
|
#' @docType data
|
||||||
#' @keywords datasets
|
#' @keywords datasets
|
||||||
#' @name agaricus.train
|
#' @name agaricus.train
|
||||||
#' @usage data(agaricus.train)
|
#' @usage data(agaricus.train)
|
||||||
#' @format A list containing a label vector, and a dgCMatrix object with 6513
|
#' @format A list containing a label vector, and a dgCMatrix object with 6513
|
||||||
#' rows and 127 variables
|
#' rows and 127 variables
|
||||||
NULL
|
NULL
|
||||||
|
|
||||||
@@ -52,9 +52,9 @@ NULL
|
|||||||
#'
|
#'
|
||||||
#' This data set is originally from the Mushroom data set,
|
#' This data set is originally from the Mushroom data set,
|
||||||
#' UCI Machine Learning Repository.
|
#' UCI Machine Learning Repository.
|
||||||
#'
|
#'
|
||||||
#' This data set includes the following fields:
|
#' This data set includes the following fields:
|
||||||
#'
|
#'
|
||||||
#' \itemize{
|
#' \itemize{
|
||||||
#' \item \code{label} the label for each record
|
#' \item \code{label} the label for each record
|
||||||
#' \item \code{data} a sparse Matrix of \code{dgCMatrix} class, with 126 columns.
|
#' \item \code{data} a sparse Matrix of \code{dgCMatrix} class, with 126 columns.
|
||||||
@@ -62,16 +62,16 @@ NULL
|
|||||||
#'
|
#'
|
||||||
#' @references
|
#' @references
|
||||||
#' https://archive.ics.uci.edu/ml/datasets/Mushroom
|
#' https://archive.ics.uci.edu/ml/datasets/Mushroom
|
||||||
#'
|
#'
|
||||||
#' Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository
|
#' Bache, K. & Lichman, M. (2013). UCI Machine Learning Repository
|
||||||
#' [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California,
|
#' [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California,
|
||||||
#' School of Information and Computer Science.
|
#' School of Information and Computer Science.
|
||||||
#'
|
#'
|
||||||
#' @docType data
|
#' @docType data
|
||||||
#' @keywords datasets
|
#' @keywords datasets
|
||||||
#' @name agaricus.test
|
#' @name agaricus.test
|
||||||
#' @usage data(agaricus.test)
|
#' @usage data(agaricus.test)
|
||||||
#' @format A list containing a label vector, and a dgCMatrix object with 1611
|
#' @format A list containing a label vector, and a dgCMatrix object with 1611
|
||||||
#' rows and 126 variables
|
#' rows and 126 variables
|
||||||
NULL
|
NULL
|
||||||
|
|
||||||
@@ -107,7 +107,7 @@ NULL
|
|||||||
#' @importFrom graphics par
|
#' @importFrom graphics par
|
||||||
#' @importFrom graphics title
|
#' @importFrom graphics title
|
||||||
#' @importFrom grDevices rgb
|
#' @importFrom grDevices rgb
|
||||||
#'
|
#'
|
||||||
#' @import methods
|
#' @import methods
|
||||||
#' @useDynLib xgboost, .registration = TRUE
|
#' @useDynLib xgboost, .registration = TRUE
|
||||||
NULL
|
NULL
|
||||||
|
|||||||
@@ -30,4 +30,4 @@ Examples
|
|||||||
Development
|
Development
|
||||||
-----------
|
-----------
|
||||||
|
|
||||||
* See the [R Package section](https://xgboost.readthedocs.io/en/latest/how_to/contribute.html#r-package) of the contributors guide.
|
* See the [R Package section](https://xgboost.readthedocs.io/en/latest/contribute.html#r-package) of the contributors guide.
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
|
||||||
rm -f src/Makevars
|
rm -f src/Makevars
|
||||||
|
rm -f CMakeLists.txt
|
||||||
|
|||||||
1045
R-package/configure
vendored
1045
R-package/configure
vendored
File diff suppressed because it is too large
Load Diff
@@ -4,28 +4,52 @@ AC_PREREQ(2.62)
|
|||||||
|
|
||||||
AC_INIT([xgboost],[0.6-3],[],[xgboost],[])
|
AC_INIT([xgboost],[0.6-3],[],[xgboost],[])
|
||||||
|
|
||||||
|
# Use this line to set CC variable to a C compiler
|
||||||
|
AC_PROG_CC
|
||||||
|
|
||||||
|
### Check whether backtrace() is part of libc or the external lib libexecinfo
|
||||||
|
AC_MSG_CHECKING([Backtrace lib])
|
||||||
|
AC_MSG_RESULT([])
|
||||||
|
AC_CHECK_LIB([execinfo], [backtrace], [BACKTRACE_LIB=-lexecinfo], [BACKTRACE_LIB=''])
|
||||||
|
|
||||||
|
### Endian detection
|
||||||
|
AC_MSG_CHECKING([endian])
|
||||||
|
AC_MSG_RESULT([])
|
||||||
|
AC_RUN_IFELSE([AC_LANG_PROGRAM([[#include <stdint.h>]], [[const uint16_t endianness = 256; return !!(*(const uint8_t *)&endianness);]])],
|
||||||
|
[ENDIAN_FLAG="-DDMLC_CMAKE_LITTLE_ENDIAN=1"],
|
||||||
|
[ENDIAN_FLAG="-DDMLC_CMAKE_LITTLE_ENDIAN=0"])
|
||||||
|
|
||||||
OPENMP_CXXFLAGS=""
|
OPENMP_CXXFLAGS=""
|
||||||
|
|
||||||
if test `uname -s` = "Linux"
|
if test `uname -s` = "Linux"
|
||||||
then
|
then
|
||||||
OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CFLAGS)"
|
OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CXXFLAGS)"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if test `uname -s` = "Darwin"
|
if test `uname -s` = "Darwin"
|
||||||
then
|
then
|
||||||
OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CFLAGS)"
|
OPENMP_CXXFLAGS='-Xclang -fopenmp'
|
||||||
|
OPENMP_LIB='/usr/local/lib/libomp.dylib'
|
||||||
ac_pkg_openmp=no
|
ac_pkg_openmp=no
|
||||||
AC_MSG_CHECKING([whether OpenMP will work in a package])
|
AC_MSG_CHECKING([whether OpenMP will work in a package])
|
||||||
AC_LANG_CONFTEST(
|
AC_LANG_CONFTEST([AC_LANG_PROGRAM([[#include <omp.h>]], [[ return (omp_get_max_threads() <= 1); ]])])
|
||||||
[AC_LANG_PROGRAM([[#include <omp.h>]], [[ return omp_get_num_threads (); ]])])
|
${CC} -o conftest conftest.c /usr/local/lib/libomp.dylib -Xclang -fopenmp 2>/dev/null && ./conftest && ac_pkg_openmp=yes
|
||||||
PKG_CFLAGS="${OPENMP_CFLAGS}" PKG_LIBS="${OPENMP_CFLAGS}" "$RBIN" CMD SHLIB conftest.c 1>&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD && "$RBIN" --vanilla -q -e "dyn.load(paste('conftest',.Platform\$dynlib.ext,sep=''))" 1>&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD && ac_pkg_openmp=yes
|
|
||||||
AC_MSG_RESULT([${ac_pkg_openmp}])
|
AC_MSG_RESULT([${ac_pkg_openmp}])
|
||||||
if test "${ac_pkg_openmp}" = no; then
|
if test "${ac_pkg_openmp}" = no; then
|
||||||
OPENMP_CXXFLAGS=''
|
OPENMP_CXXFLAGS=''
|
||||||
|
OPENMP_LIB=''
|
||||||
|
echo '*****************************************************************************************'
|
||||||
|
echo 'WARNING: OpenMP is unavailable on this Mac OSX system. Training speed may be suboptimal.'
|
||||||
|
echo ' To use all CPU cores for training jobs, you should install OpenMP by running\n'
|
||||||
|
echo ' brew install libomp'
|
||||||
|
echo '*****************************************************************************************'
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
AC_SUBST(OPENMP_CXXFLAGS)
|
AC_SUBST(OPENMP_CXXFLAGS)
|
||||||
|
AC_SUBST(OPENMP_LIB)
|
||||||
|
AC_SUBST(ENDIAN_FLAG)
|
||||||
|
AC_SUBST(BACKTRACE_LIB)
|
||||||
AC_CONFIG_FILES([src/Makevars])
|
AC_CONFIG_FILES([src/Makevars])
|
||||||
AC_OUTPUT
|
AC_OUTPUT
|
||||||
|
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ evalerror <- function(preds, dtrain) {
|
|||||||
return(list(metric = "error", value = err))
|
return(list(metric = "error", value = err))
|
||||||
}
|
}
|
||||||
|
|
||||||
param <- list(max_depth=2, eta=1, nthread = 2, silent=1,
|
param <- list(max_depth=2, eta=1, nthread = 2, verbosity=0,
|
||||||
objective=logregobj, eval_metric=evalerror)
|
objective=logregobj, eval_metric=evalerror)
|
||||||
print ('start training with user customized objective')
|
print ('start training with user customized objective')
|
||||||
# training with customized objective, we can also do step by step training
|
# training with customized objective, we can also do step by step training
|
||||||
@@ -57,7 +57,7 @@ logregobjattr <- function(preds, dtrain) {
|
|||||||
hess <- preds * (1 - preds)
|
hess <- preds * (1 - preds)
|
||||||
return(list(grad = grad, hess = hess))
|
return(list(grad = grad, hess = hess))
|
||||||
}
|
}
|
||||||
param <- list(max_depth=2, eta=1, nthread = 2, silent=1,
|
param <- list(max_depth=2, eta=1, nthread = 2, verbosity=0,
|
||||||
objective=logregobjattr, eval_metric=evalerror)
|
objective=logregobjattr, eval_metric=evalerror)
|
||||||
print ('start training with user customized objective, with additional attributes in DMatrix')
|
print ('start training with user customized objective, with additional attributes in DMatrix')
|
||||||
# training with customized objective, we can also do step by step training
|
# training with customized objective, we can also do step by step training
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
|||||||
# note: for customized objective function, we leave objective as default
|
# note: for customized objective function, we leave objective as default
|
||||||
# note: what we are getting is margin value in prediction
|
# note: what we are getting is margin value in prediction
|
||||||
# you must know what you are doing
|
# you must know what you are doing
|
||||||
param <- list(max_depth=2, eta=1, nthread = 2, silent=1)
|
param <- list(max_depth=2, eta=1, nthread=2, verbosity=0)
|
||||||
watchlist <- list(eval = dtest)
|
watchlist <- list(eval = dtest)
|
||||||
num_round <- 20
|
num_round <- 20
|
||||||
# user define objective function, given prediction, return gradient and second order gradient
|
# user define objective function, given prediction, return gradient and second order gradient
|
||||||
@@ -32,9 +32,9 @@ evalerror <- function(preds, dtrain) {
|
|||||||
}
|
}
|
||||||
print ('start training with early Stopping setting')
|
print ('start training with early Stopping setting')
|
||||||
|
|
||||||
bst <- xgb.train(param, dtrain, num_round, watchlist,
|
bst <- xgb.train(param, dtrain, num_round, watchlist,
|
||||||
objective = logregobj, eval_metric = evalerror, maximize = FALSE,
|
objective = logregobj, eval_metric = evalerror, maximize = FALSE,
|
||||||
early_stopping_round = 3)
|
early_stopping_round = 3)
|
||||||
bst <- xgb.cv(param, dtrain, num_round, nfold = 5,
|
bst <- xgb.cv(param, dtrain, num_round, nfold = 5,
|
||||||
objective = logregobj, eval_metric = evalerror,
|
objective = logregobj, eval_metric = evalerror,
|
||||||
maximize = FALSE, early_stopping_rounds = 3)
|
maximize = FALSE, early_stopping_rounds = 3)
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ wl <- list(train = dtrain, test = dtest)
|
|||||||
# - similar to the 'hist'
|
# - similar to the 'hist'
|
||||||
# - the fastest option for moderately large datasets
|
# - the fastest option for moderately large datasets
|
||||||
# - current limitations: max_depth < 16, does not implement guided loss
|
# - current limitations: max_depth < 16, does not implement guided loss
|
||||||
# You can use tree_method = 'gpu_exact' for another GPU accelerated algorithm,
|
# You can use tree_method = 'gpu_hist' for another GPU accelerated algorithm,
|
||||||
# which is slower, more memory-hungry, but does not use binning.
|
# which is slower, more memory-hungry, but does not use binning.
|
||||||
param <- list(objective = 'reg:logistic', eval_metric = 'auc', subsample = 0.5, nthread = 4,
|
param <- list(objective = 'reg:logistic', eval_metric = 'auc', subsample = 0.5, nthread = 4,
|
||||||
max_bin = 64, tree_method = 'gpu_hist')
|
max_bin = 64, tree_method = 'gpu_hist')
|
||||||
|
|||||||
@@ -38,6 +38,7 @@ create.new.tree.features <- function(model, original.features){
|
|||||||
# Convert previous features to one hot encoding
|
# Convert previous features to one hot encoding
|
||||||
new.features.train <- create.new.tree.features(bst, agaricus.train$data)
|
new.features.train <- create.new.tree.features(bst, agaricus.train$data)
|
||||||
new.features.test <- create.new.tree.features(bst, agaricus.test$data)
|
new.features.test <- create.new.tree.features(bst, agaricus.test$data)
|
||||||
|
colnames(new.features.test) <- colnames(new.features.train)
|
||||||
|
|
||||||
# learning with new features
|
# learning with new features
|
||||||
new.dtrain <- xgb.DMatrix(data = new.features.train, label = agaricus.train$label)
|
new.dtrain <- xgb.DMatrix(data = new.features.train, label = agaricus.train$label)
|
||||||
|
|||||||
@@ -5,24 +5,24 @@
|
|||||||
\title{Callback closures for booster training.}
|
\title{Callback closures for booster training.}
|
||||||
\description{
|
\description{
|
||||||
These are used to perform various service tasks either during boosting iterations or at the end.
|
These are used to perform various service tasks either during boosting iterations or at the end.
|
||||||
This approach helps to modularize many of such tasks without bloating the main training methods,
|
This approach helps to modularize many of such tasks without bloating the main training methods,
|
||||||
and it offers .
|
and it offers .
|
||||||
}
|
}
|
||||||
\details{
|
\details{
|
||||||
By default, a callback function is run after each boosting iteration.
|
By default, a callback function is run after each boosting iteration.
|
||||||
An R-attribute \code{is_pre_iteration} could be set for a callback to define a pre-iteration function.
|
An R-attribute \code{is_pre_iteration} could be set for a callback to define a pre-iteration function.
|
||||||
|
|
||||||
When a callback function has \code{finalize} parameter, its finalizer part will also be run after
|
When a callback function has \code{finalize} parameter, its finalizer part will also be run after
|
||||||
the boosting is completed.
|
the boosting is completed.
|
||||||
|
|
||||||
WARNING: side-effects!!! Be aware that these callback functions access and modify things in
|
WARNING: side-effects!!! Be aware that these callback functions access and modify things in
|
||||||
the environment from which they are called from, which is a fairly uncommon thing to do in R.
|
the environment from which they are called from, which is a fairly uncommon thing to do in R.
|
||||||
|
|
||||||
To write a custom callback closure, make sure you first understand the main concepts about R envoronments.
|
To write a custom callback closure, make sure you first understand the main concepts about R environments.
|
||||||
Check either R documentation on \code{\link[base]{environment}} or the
|
Check either R documentation on \code{\link[base]{environment}} or the
|
||||||
\href{http://adv-r.had.co.nz/Environments.html}{Environments chapter} from the "Advanced R"
|
\href{http://adv-r.had.co.nz/Environments.html}{Environments chapter} from the "Advanced R"
|
||||||
book by Hadley Wickham. Further, the best option is to read the code of some of the existing callbacks -
|
book by Hadley Wickham. Further, the best option is to read the code of some of the existing callbacks -
|
||||||
choose ones that do something similar to what you want to achieve. Also, you would need to get familiar
|
choose ones that do something similar to what you want to achieve. Also, you would need to get familiar
|
||||||
with the objects available inside of the \code{xgb.train} and \code{xgb.cv} internal environments.
|
with the objects available inside of the \code{xgb.train} and \code{xgb.cv} internal environments.
|
||||||
}
|
}
|
||||||
\seealso{
|
\seealso{
|
||||||
|
|||||||
@@ -11,11 +11,11 @@ cb.cv.predict(save_models = FALSE)
|
|||||||
}
|
}
|
||||||
\value{
|
\value{
|
||||||
Predictions are returned inside of the \code{pred} element, which is either a vector or a matrix,
|
Predictions are returned inside of the \code{pred} element, which is either a vector or a matrix,
|
||||||
depending on the number of prediction outputs per data row. The order of predictions corresponds
|
depending on the number of prediction outputs per data row. The order of predictions corresponds
|
||||||
to the order of rows in the original dataset. Note that when a custom \code{folds} list is
|
to the order of rows in the original dataset. Note that when a custom \code{folds} list is
|
||||||
provided in \code{xgb.cv}, the predictions would only be returned properly when this list is a
|
provided in \code{xgb.cv}, the predictions would only be returned properly when this list is a
|
||||||
non-overlapping list of k sets of indices, as in a standard k-fold CV. The predictions would not be
|
non-overlapping list of k sets of indices, as in a standard k-fold CV. The predictions would not be
|
||||||
meaningful when user-profided folds have overlapping indices as in, e.g., random sampling splits.
|
meaningful when user-provided folds have overlapping indices as in, e.g., random sampling splits.
|
||||||
When some of the indices in the training dataset are not included into user-provided \code{folds},
|
When some of the indices in the training dataset are not included into user-provided \code{folds},
|
||||||
their prediction value would be \code{NA}.
|
their prediction value would be \code{NA}.
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,19 +4,23 @@
|
|||||||
\alias{cb.early.stop}
|
\alias{cb.early.stop}
|
||||||
\title{Callback closure to activate the early stopping.}
|
\title{Callback closure to activate the early stopping.}
|
||||||
\usage{
|
\usage{
|
||||||
cb.early.stop(stopping_rounds, maximize = FALSE, metric_name = NULL,
|
cb.early.stop(
|
||||||
verbose = TRUE)
|
stopping_rounds,
|
||||||
|
maximize = FALSE,
|
||||||
|
metric_name = NULL,
|
||||||
|
verbose = TRUE
|
||||||
|
)
|
||||||
}
|
}
|
||||||
\arguments{
|
\arguments{
|
||||||
\item{stopping_rounds}{The number of rounds with no improvement in
|
\item{stopping_rounds}{The number of rounds with no improvement in
|
||||||
the evaluation metric in order to stop the training.}
|
the evaluation metric in order to stop the training.}
|
||||||
|
|
||||||
\item{maximize}{whether to maximize the evaluation metric}
|
\item{maximize}{whether to maximize the evaluation metric}
|
||||||
|
|
||||||
\item{metric_name}{the name of an evaluation column to use as a criteria for early
|
\item{metric_name}{the name of an evaluation column to use as a criteria for early
|
||||||
stopping. If not set, the last column would be used.
|
stopping. If not set, the last column would be used.
|
||||||
Let's say the test data in \code{watchlist} was labelled as \code{dtest},
|
Let's say the test data in \code{watchlist} was labelled as \code{dtest},
|
||||||
and one wants to use the AUC in test data for early stopping regardless of where
|
and one wants to use the AUC in test data for early stopping regardless of where
|
||||||
it is in the \code{watchlist}, then one of the following would need to be set:
|
it is in the \code{watchlist}, then one of the following would need to be set:
|
||||||
\code{metric_name='dtest-auc'} or \code{metric_name='dtest_auc'}.
|
\code{metric_name='dtest-auc'} or \code{metric_name='dtest_auc'}.
|
||||||
All dash '-' characters in metric names are considered equivalent to '_'.}
|
All dash '-' characters in metric names are considered equivalent to '_'.}
|
||||||
@@ -27,7 +31,7 @@ All dash '-' characters in metric names are considered equivalent to '_'.}
|
|||||||
Callback closure to activate the early stopping.
|
Callback closure to activate the early stopping.
|
||||||
}
|
}
|
||||||
\details{
|
\details{
|
||||||
This callback function determines the condition for early stopping
|
This callback function determines the condition for early stopping
|
||||||
by setting the \code{stop_condition = TRUE} flag in its calling frame.
|
by setting the \code{stop_condition = TRUE} flag in its calling frame.
|
||||||
|
|
||||||
The following additional fields are assigned to the model's R object:
|
The following additional fields are assigned to the model's R object:
|
||||||
|
|||||||
@@ -13,12 +13,12 @@ Callback closure for logging the evaluation history
|
|||||||
This callback function appends the current iteration evaluation results \code{bst_evaluation}
|
This callback function appends the current iteration evaluation results \code{bst_evaluation}
|
||||||
available in the calling parent frame to the \code{evaluation_log} list in a calling frame.
|
available in the calling parent frame to the \code{evaluation_log} list in a calling frame.
|
||||||
|
|
||||||
The finalizer callback (called with \code{finalize = TURE} in the end) converts
|
The finalizer callback (called with \code{finalize = TURE} in the end) converts
|
||||||
the \code{evaluation_log} list into a final data.table.
|
the \code{evaluation_log} list into a final data.table.
|
||||||
|
|
||||||
The iteration evaluation result \code{bst_evaluation} must be a named numeric vector.
|
The iteration evaluation result \code{bst_evaluation} must be a named numeric vector.
|
||||||
|
|
||||||
Note: in the column names of the final data.table, the dash '-' character is replaced with
|
Note: in the column names of the final data.table, the dash '-' character is replaced with
|
||||||
the underscore '_' in order to make the column names more like regular R identifiers.
|
the underscore '_' in order to make the column names more like regular R identifiers.
|
||||||
|
|
||||||
Callback function expects the following values to be set in its calling frame:
|
Callback function expects the following values to be set in its calling frame:
|
||||||
|
|||||||
@@ -2,27 +2,27 @@
|
|||||||
% Please edit documentation in R/callbacks.R
|
% Please edit documentation in R/callbacks.R
|
||||||
\name{cb.reset.parameters}
|
\name{cb.reset.parameters}
|
||||||
\alias{cb.reset.parameters}
|
\alias{cb.reset.parameters}
|
||||||
\title{Callback closure for restetting the booster's parameters at each iteration.}
|
\title{Callback closure for resetting the booster's parameters at each iteration.}
|
||||||
\usage{
|
\usage{
|
||||||
cb.reset.parameters(new_params)
|
cb.reset.parameters(new_params)
|
||||||
}
|
}
|
||||||
\arguments{
|
\arguments{
|
||||||
\item{new_params}{a list where each element corresponds to a parameter that needs to be reset.
|
\item{new_params}{a list where each element corresponds to a parameter that needs to be reset.
|
||||||
Each element's value must be either a vector of values of length \code{nrounds}
|
Each element's value must be either a vector of values of length \code{nrounds}
|
||||||
to be set at each iteration,
|
to be set at each iteration,
|
||||||
or a function of two parameters \code{learning_rates(iteration, nrounds)}
|
or a function of two parameters \code{learning_rates(iteration, nrounds)}
|
||||||
which returns a new parameter value by using the current iteration number
|
which returns a new parameter value by using the current iteration number
|
||||||
and the total number of boosting rounds.}
|
and the total number of boosting rounds.}
|
||||||
}
|
}
|
||||||
\description{
|
\description{
|
||||||
Callback closure for restetting the booster's parameters at each iteration.
|
Callback closure for resetting the booster's parameters at each iteration.
|
||||||
}
|
}
|
||||||
\details{
|
\details{
|
||||||
This is a "pre-iteration" callback function used to reset booster's parameters
|
This is a "pre-iteration" callback function used to reset booster's parameters
|
||||||
at the beginning of each iteration.
|
at the beginning of each iteration.
|
||||||
|
|
||||||
Note that when training is resumed from some previous model, and a function is used to
|
Note that when training is resumed from some previous model, and a function is used to
|
||||||
reset a parameter value, the \code{nrounds} argument in this function would be the
|
reset a parameter value, the \code{nrounds} argument in this function would be the
|
||||||
the number of boosting rounds in the current training.
|
the number of boosting rounds in the current training.
|
||||||
|
|
||||||
Callback function expects the following values to be set in its calling frame:
|
Callback function expects the following values to be set in its calling frame:
|
||||||
|
|||||||
@@ -7,13 +7,13 @@
|
|||||||
cb.save.model(save_period = 0, save_name = "xgboost.model")
|
cb.save.model(save_period = 0, save_name = "xgboost.model")
|
||||||
}
|
}
|
||||||
\arguments{
|
\arguments{
|
||||||
\item{save_period}{save the model to disk after every
|
\item{save_period}{save the model to disk after every
|
||||||
\code{save_period} iterations; 0 means save the model at the end.}
|
\code{save_period} iterations; 0 means save the model at the end.}
|
||||||
|
|
||||||
\item{save_name}{the name or path for the saved model file.
|
\item{save_name}{the name or path for the saved model file.
|
||||||
It can contain a \code{\link[base]{sprintf}} formatting specifier
|
It can contain a \code{\link[base]{sprintf}} formatting specifier
|
||||||
to include the integer iteration number in the file name.
|
to include the integer iteration number in the file name.
|
||||||
E.g., with \code{save_name} = 'xgboost_%04d.model',
|
E.g., with \code{save_name} = 'xgboost_%04d.model',
|
||||||
the file saved at iteration 50 would be named "xgboost_0050.model".}
|
the file saved at iteration 50 would be named "xgboost_0050.model".}
|
||||||
}
|
}
|
||||||
\description{
|
\description{
|
||||||
|
|||||||
@@ -13,7 +13,7 @@
|
|||||||
Returns a vector of numbers of rows and of columns in an \code{xgb.DMatrix}.
|
Returns a vector of numbers of rows and of columns in an \code{xgb.DMatrix}.
|
||||||
}
|
}
|
||||||
\details{
|
\details{
|
||||||
Note: since \code{nrow} and \code{ncol} internally use \code{dim}, they can also
|
Note: since \code{nrow} and \code{ncol} internally use \code{dim}, they can also
|
||||||
be directly used with an \code{xgb.DMatrix} object.
|
be directly used with an \code{xgb.DMatrix} object.
|
||||||
}
|
}
|
||||||
\examples{
|
\examples{
|
||||||
|
|||||||
@@ -16,8 +16,8 @@
|
|||||||
and the second one is column names}
|
and the second one is column names}
|
||||||
}
|
}
|
||||||
\description{
|
\description{
|
||||||
Only column names are supported for \code{xgb.DMatrix}, thus setting of
|
Only column names are supported for \code{xgb.DMatrix}, thus setting of
|
||||||
row names would have no effect and returnten row names would be NULL.
|
row names would have no effect and returned row names would be NULL.
|
||||||
}
|
}
|
||||||
\details{
|
\details{
|
||||||
Generic \code{dimnames} methods are used by \code{colnames}.
|
Generic \code{dimnames} methods are used by \code{colnames}.
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ The \code{name} field can be one of the following:
|
|||||||
\item \code{weight}: to do a weight rescale ;
|
\item \code{weight}: to do a weight rescale ;
|
||||||
\item \code{base_margin}: base margin is the base prediction Xgboost will boost from ;
|
\item \code{base_margin}: base margin is the base prediction Xgboost will boost from ;
|
||||||
\item \code{nrow}: number of rows of the \code{xgb.DMatrix}.
|
\item \code{nrow}: number of rows of the \code{xgb.DMatrix}.
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
\code{group} can be setup by \code{setinfo} but can't be retrieved by \code{getinfo}.
|
\code{group} can be setup by \code{setinfo} but can't be retrieved by \code{getinfo}.
|
||||||
|
|||||||
@@ -5,10 +5,20 @@
|
|||||||
\alias{predict.xgb.Booster.handle}
|
\alias{predict.xgb.Booster.handle}
|
||||||
\title{Predict method for eXtreme Gradient Boosting model}
|
\title{Predict method for eXtreme Gradient Boosting model}
|
||||||
\usage{
|
\usage{
|
||||||
\method{predict}{xgb.Booster}(object, newdata, missing = NA,
|
\method{predict}{xgb.Booster}(
|
||||||
outputmargin = FALSE, ntreelimit = NULL, predleaf = FALSE,
|
object,
|
||||||
predcontrib = FALSE, approxcontrib = FALSE,
|
newdata,
|
||||||
predinteraction = FALSE, reshape = FALSE, ...)
|
missing = NA,
|
||||||
|
outputmargin = FALSE,
|
||||||
|
ntreelimit = NULL,
|
||||||
|
predleaf = FALSE,
|
||||||
|
predcontrib = FALSE,
|
||||||
|
approxcontrib = FALSE,
|
||||||
|
predinteraction = FALSE,
|
||||||
|
reshape = FALSE,
|
||||||
|
training = FALSE,
|
||||||
|
...
|
||||||
|
)
|
||||||
|
|
||||||
\method{predict}{xgb.Booster.handle}(object, ...)
|
\method{predict}{xgb.Booster.handle}(object, ...)
|
||||||
}
|
}
|
||||||
@@ -91,7 +101,7 @@ in \url{http://blog.datadive.net/interpreting-random-forests/}.
|
|||||||
|
|
||||||
With \code{predinteraction = TRUE}, SHAP values of contributions of interaction of each pair of features
|
With \code{predinteraction = TRUE}, SHAP values of contributions of interaction of each pair of features
|
||||||
are computed. Note that this operation might be rather expensive in terms of compute and memory.
|
are computed. Note that this operation might be rather expensive in terms of compute and memory.
|
||||||
Since it quadratically depends on the number of features, it is recommended to perfom selection
|
Since it quadratically depends on the number of features, it is recommended to perform selection
|
||||||
of the most important features first. See below about the format of the returned results.
|
of the most important features first. See below about the format of the returned results.
|
||||||
}
|
}
|
||||||
\examples{
|
\examples{
|
||||||
|
|||||||
@@ -14,7 +14,7 @@
|
|||||||
\item{...}{not currently used}
|
\item{...}{not currently used}
|
||||||
}
|
}
|
||||||
\description{
|
\description{
|
||||||
Print information about xgb.DMatrix.
|
Print information about xgb.DMatrix.
|
||||||
Currently it displays dimensions and presence of info-fields and colnames.
|
Currently it displays dimensions and presence of info-fields and colnames.
|
||||||
}
|
}
|
||||||
\examples{
|
\examples{
|
||||||
|
|||||||
@@ -17,7 +17,7 @@
|
|||||||
Prints formatted results of \code{xgb.cv}.
|
Prints formatted results of \code{xgb.cv}.
|
||||||
}
|
}
|
||||||
\details{
|
\details{
|
||||||
When not verbose, it would only print the evaluation results,
|
When not verbose, it would only print the evaluation results,
|
||||||
including the best iteration (when available).
|
including the best iteration (when available).
|
||||||
}
|
}
|
||||||
\examples{
|
\examples{
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
\alias{slice.xgb.DMatrix}
|
\alias{slice.xgb.DMatrix}
|
||||||
\alias{[.xgb.DMatrix}
|
\alias{[.xgb.DMatrix}
|
||||||
\title{Get a new DMatrix containing the specified rows of
|
\title{Get a new DMatrix containing the specified rows of
|
||||||
orginal xgb.DMatrix object}
|
original xgb.DMatrix object}
|
||||||
\usage{
|
\usage{
|
||||||
slice(object, ...)
|
slice(object, ...)
|
||||||
|
|
||||||
@@ -24,7 +24,7 @@ slice(object, ...)
|
|||||||
}
|
}
|
||||||
\description{
|
\description{
|
||||||
Get a new DMatrix containing the specified rows of
|
Get a new DMatrix containing the specified rows of
|
||||||
orginal xgb.DMatrix object
|
original xgb.DMatrix object
|
||||||
}
|
}
|
||||||
\examples{
|
\examples{
|
||||||
data(agaricus.train, package='xgboost')
|
data(agaricus.train, package='xgboost')
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ E.g., when an \code{xgb.Booster} model is saved as an R object and then is loade
|
|||||||
its handle (pointer) to an internal xgboost model would be invalid. The majority of xgboost methods
|
its handle (pointer) to an internal xgboost model would be invalid. The majority of xgboost methods
|
||||||
should still work for such a model object since those methods would be using
|
should still work for such a model object since those methods would be using
|
||||||
\code{xgb.Booster.complete} internally. However, one might find it to be more efficient to call the
|
\code{xgb.Booster.complete} internally. However, one might find it to be more efficient to call the
|
||||||
\code{xgb.Booster.complete} function explicitely once after loading a model as an R-object.
|
\code{xgb.Booster.complete} function explicitly once after loading a model as an R-object.
|
||||||
That would prevent further repeated implicit reconstruction of an internal booster model.
|
That would prevent further repeated implicit reconstruction of an internal booster model.
|
||||||
}
|
}
|
||||||
\examples{
|
\examples{
|
||||||
@@ -39,6 +39,7 @@ bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_dep
|
|||||||
saveRDS(bst, "xgb.model.rds")
|
saveRDS(bst, "xgb.model.rds")
|
||||||
|
|
||||||
bst1 <- readRDS("xgb.model.rds")
|
bst1 <- readRDS("xgb.model.rds")
|
||||||
|
if (file.exists("xgb.model.rds")) file.remove("xgb.model.rds")
|
||||||
# the handle is invalid:
|
# the handle is invalid:
|
||||||
print(bst1$handle)
|
print(bst1$handle)
|
||||||
|
|
||||||
|
|||||||
@@ -7,7 +7,7 @@
|
|||||||
xgb.DMatrix(data, info = list(), missing = NA, silent = FALSE, ...)
|
xgb.DMatrix(data, info = list(), missing = NA, silent = FALSE, ...)
|
||||||
}
|
}
|
||||||
\arguments{
|
\arguments{
|
||||||
\item{data}{a \code{matrix} object (either numeric or integer), a \code{dgCMatrix} object, or a character
|
\item{data}{a \code{matrix} object (either numeric or integer), a \code{dgCMatrix} object, or a character
|
||||||
string representing a filename.}
|
string representing a filename.}
|
||||||
|
|
||||||
\item{info}{a named list of additional information to store in the \code{xgb.DMatrix} object.
|
\item{info}{a named list of additional information to store in the \code{xgb.DMatrix} object.
|
||||||
@@ -31,4 +31,5 @@ train <- agaricus.train
|
|||||||
dtrain <- xgb.DMatrix(train$data, label=train$label)
|
dtrain <- xgb.DMatrix(train$data, label=train$label)
|
||||||
xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data')
|
xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data')
|
||||||
dtrain <- xgb.DMatrix('xgb.DMatrix.data')
|
dtrain <- xgb.DMatrix('xgb.DMatrix.data')
|
||||||
|
if (file.exists('xgb.DMatrix.data')) file.remove('xgb.DMatrix.data')
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,4 +20,5 @@ train <- agaricus.train
|
|||||||
dtrain <- xgb.DMatrix(train$data, label=train$label)
|
dtrain <- xgb.DMatrix(train$data, label=train$label)
|
||||||
xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data')
|
xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data')
|
||||||
dtrain <- xgb.DMatrix('xgb.DMatrix.data')
|
dtrain <- xgb.DMatrix('xgb.DMatrix.data')
|
||||||
|
if (file.exists('xgb.DMatrix.data')) file.remove('xgb.DMatrix.data')
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -73,6 +73,7 @@ xgb.attributes(bst) <- list(a = 123, b = "abc")
|
|||||||
|
|
||||||
xgb.save(bst, 'xgb.model')
|
xgb.save(bst, 'xgb.model')
|
||||||
bst1 <- xgb.load('xgb.model')
|
bst1 <- xgb.load('xgb.model')
|
||||||
|
if (file.exists('xgb.model')) file.remove('xgb.model')
|
||||||
print(xgb.attr(bst1, "my_attribute"))
|
print(xgb.attr(bst1, "my_attribute"))
|
||||||
print(xgb.attributes(bst1))
|
print(xgb.attributes(bst1))
|
||||||
|
|
||||||
|
|||||||
@@ -87,6 +87,6 @@ accuracy.after <- sum((predict(bst, new.dtest) >= 0.5) == agaricus.test$label) /
|
|||||||
|
|
||||||
# Here the accuracy was already good and is now perfect.
|
# Here the accuracy was already good and is now perfect.
|
||||||
cat(paste("The accuracy was", accuracy.before, "before adding leaf features and it is now",
|
cat(paste("The accuracy was", accuracy.before, "before adding leaf features and it is now",
|
||||||
accuracy.after, "!\\n"))
|
accuracy.after, "!\n"))
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,19 +4,35 @@
|
|||||||
\alias{xgb.cv}
|
\alias{xgb.cv}
|
||||||
\title{Cross Validation}
|
\title{Cross Validation}
|
||||||
\usage{
|
\usage{
|
||||||
xgb.cv(params = list(), data, nrounds, nfold, label = NULL,
|
xgb.cv(
|
||||||
missing = NA, prediction = FALSE, showsd = TRUE,
|
params = list(),
|
||||||
metrics = list(), obj = NULL, feval = NULL, stratified = TRUE,
|
data,
|
||||||
folds = NULL, verbose = TRUE, print_every_n = 1L,
|
nrounds,
|
||||||
early_stopping_rounds = NULL, maximize = NULL, callbacks = list(),
|
nfold,
|
||||||
...)
|
label = NULL,
|
||||||
|
missing = NA,
|
||||||
|
prediction = FALSE,
|
||||||
|
showsd = TRUE,
|
||||||
|
metrics = list(),
|
||||||
|
obj = NULL,
|
||||||
|
feval = NULL,
|
||||||
|
stratified = TRUE,
|
||||||
|
folds = NULL,
|
||||||
|
train_folds = NULL,
|
||||||
|
verbose = TRUE,
|
||||||
|
print_every_n = 1L,
|
||||||
|
early_stopping_rounds = NULL,
|
||||||
|
maximize = NULL,
|
||||||
|
callbacks = list(),
|
||||||
|
...
|
||||||
|
)
|
||||||
}
|
}
|
||||||
\arguments{
|
\arguments{
|
||||||
\item{params}{the list of parameters. Commonly used ones are:
|
\item{params}{the list of parameters. Commonly used ones are:
|
||||||
\itemize{
|
\itemize{
|
||||||
\item \code{objective} objective function, common ones are
|
\item \code{objective} objective function, common ones are
|
||||||
\itemize{
|
\itemize{
|
||||||
\item \code{reg:linear} linear regression
|
\item \code{reg:squarederror} Regression with squared loss
|
||||||
\item \code{binary:logistic} logistic regression for classification
|
\item \code{binary:logistic} logistic regression for classification
|
||||||
}
|
}
|
||||||
\item \code{eta} step size of each boosting step
|
\item \code{eta} step size of each boosting step
|
||||||
@@ -35,11 +51,11 @@ xgb.cv(params = list(), data, nrounds, nfold, label = NULL,
|
|||||||
|
|
||||||
\item{label}{vector of response values. Should be provided only when data is an R-matrix.}
|
\item{label}{vector of response values. Should be provided only when data is an R-matrix.}
|
||||||
|
|
||||||
\item{missing}{is only used when input is a dense matrix. By default is set to NA, which means
|
\item{missing}{is only used when input is a dense matrix. By default is set to NA, which means
|
||||||
that NA values should be considered as 'missing' by the algorithm.
|
that NA values should be considered as 'missing' by the algorithm.
|
||||||
Sometimes, 0 or other extreme value might be used to represent missing values.}
|
Sometimes, 0 or other extreme value might be used to represent missing values.}
|
||||||
|
|
||||||
\item{prediction}{A logical value indicating whether to return the test fold predictions
|
\item{prediction}{A logical value indicating whether to return the test fold predictions
|
||||||
from each CV model. This parameter engages the \code{\link{cb.cv.predict}} callback.}
|
from each CV model. This parameter engages the \code{\link{cb.cv.predict}} callback.}
|
||||||
|
|
||||||
\item{showsd}{\code{boolean}, whether to show standard deviation of cross validation}
|
\item{showsd}{\code{boolean}, whether to show standard deviation of cross validation}
|
||||||
@@ -56,28 +72,31 @@ from each CV model. This parameter engages the \code{\link{cb.cv.predict}} callb
|
|||||||
\item \code{merror} Exact matching error, used to evaluate multi-class classification
|
\item \code{merror} Exact matching error, used to evaluate multi-class classification
|
||||||
}}
|
}}
|
||||||
|
|
||||||
\item{obj}{customized objective function. Returns gradient and second order
|
\item{obj}{customized objective function. Returns gradient and second order
|
||||||
gradient with given prediction and dtrain.}
|
gradient with given prediction and dtrain.}
|
||||||
|
|
||||||
\item{feval}{custimized evaluation function. Returns
|
\item{feval}{customized evaluation function. Returns
|
||||||
\code{list(metric='metric-name', value='metric-value')} with given
|
\code{list(metric='metric-name', value='metric-value')} with given
|
||||||
prediction and dtrain.}
|
prediction and dtrain.}
|
||||||
|
|
||||||
\item{stratified}{a \code{boolean} indicating whether sampling of folds should be stratified
|
\item{stratified}{a \code{boolean} indicating whether sampling of folds should be stratified
|
||||||
by the values of outcome labels.}
|
by the values of outcome labels.}
|
||||||
|
|
||||||
\item{folds}{\code{list} provides a possibility to use a list of pre-defined CV folds
|
\item{folds}{\code{list} provides a possibility to use a list of pre-defined CV folds
|
||||||
(each element must be a vector of test fold's indices). When folds are supplied,
|
(each element must be a vector of test fold's indices). When folds are supplied,
|
||||||
the \code{nfold} and \code{stratified} parameters are ignored.}
|
the \code{nfold} and \code{stratified} parameters are ignored.}
|
||||||
|
|
||||||
|
\item{train_folds}{\code{list} list specifying which indicies to use for training. If \code{NULL}
|
||||||
|
(the default) all indices not specified in \code{folds} will be used for training.}
|
||||||
|
|
||||||
\item{verbose}{\code{boolean}, print the statistics during the process}
|
\item{verbose}{\code{boolean}, print the statistics during the process}
|
||||||
|
|
||||||
\item{print_every_n}{Print each n-th iteration evaluation messages when \code{verbose>0}.
|
\item{print_every_n}{Print each n-th iteration evaluation messages when \code{verbose>0}.
|
||||||
Default is 1 which means all messages are printed. This parameter is passed to the
|
Default is 1 which means all messages are printed. This parameter is passed to the
|
||||||
\code{\link{cb.print.evaluation}} callback.}
|
\code{\link{cb.print.evaluation}} callback.}
|
||||||
|
|
||||||
\item{early_stopping_rounds}{If \code{NULL}, the early stopping function is not triggered.
|
\item{early_stopping_rounds}{If \code{NULL}, the early stopping function is not triggered.
|
||||||
If set to an integer \code{k}, training with a validation set will stop if the performance
|
If set to an integer \code{k}, training with a validation set will stop if the performance
|
||||||
doesn't improve for \code{k} rounds.
|
doesn't improve for \code{k} rounds.
|
||||||
Setting this parameter engages the \code{\link{cb.early.stop}} callback.}
|
Setting this parameter engages the \code{\link{cb.early.stop}} callback.}
|
||||||
|
|
||||||
@@ -87,8 +106,8 @@ When it is \code{TRUE}, it means the larger the evaluation score the better.
|
|||||||
This parameter is passed to the \code{\link{cb.early.stop}} callback.}
|
This parameter is passed to the \code{\link{cb.early.stop}} callback.}
|
||||||
|
|
||||||
\item{callbacks}{a list of callback functions to perform various task during boosting.
|
\item{callbacks}{a list of callback functions to perform various task during boosting.
|
||||||
See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the
|
See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the
|
||||||
parameters' values. User can provide either existing or their own callback methods in order
|
parameters' values. User can provide either existing or their own callback methods in order
|
||||||
to customize the training process.}
|
to customize the training process.}
|
||||||
|
|
||||||
\item{...}{other parameters to pass to \code{params}.}
|
\item{...}{other parameters to pass to \code{params}.}
|
||||||
@@ -97,26 +116,26 @@ to customize the training process.}
|
|||||||
An object of class \code{xgb.cv.synchronous} with the following elements:
|
An object of class \code{xgb.cv.synchronous} with the following elements:
|
||||||
\itemize{
|
\itemize{
|
||||||
\item \code{call} a function call.
|
\item \code{call} a function call.
|
||||||
\item \code{params} parameters that were passed to the xgboost library. Note that it does not
|
\item \code{params} parameters that were passed to the xgboost library. Note that it does not
|
||||||
capture parameters changed by the \code{\link{cb.reset.parameters}} callback.
|
capture parameters changed by the \code{\link{cb.reset.parameters}} callback.
|
||||||
\item \code{callbacks} callback functions that were either automatically assigned or
|
\item \code{callbacks} callback functions that were either automatically assigned or
|
||||||
explicitly passed.
|
explicitly passed.
|
||||||
\item \code{evaluation_log} evaluation history storead as a \code{data.table} with the
|
\item \code{evaluation_log} evaluation history stored as a \code{data.table} with the
|
||||||
first column corresponding to iteration number and the rest corresponding to the
|
first column corresponding to iteration number and the rest corresponding to the
|
||||||
CV-based evaluation means and standard deviations for the training and test CV-sets.
|
CV-based evaluation means and standard deviations for the training and test CV-sets.
|
||||||
It is created by the \code{\link{cb.evaluation.log}} callback.
|
It is created by the \code{\link{cb.evaluation.log}} callback.
|
||||||
\item \code{niter} number of boosting iterations.
|
\item \code{niter} number of boosting iterations.
|
||||||
\item \code{nfeatures} number of features in training data.
|
\item \code{nfeatures} number of features in training data.
|
||||||
\item \code{folds} the list of CV folds' indices - either those passed through the \code{folds}
|
\item \code{folds} the list of CV folds' indices - either those passed through the \code{folds}
|
||||||
parameter or randomly generated.
|
parameter or randomly generated.
|
||||||
\item \code{best_iteration} iteration number with the best evaluation metric value
|
\item \code{best_iteration} iteration number with the best evaluation metric value
|
||||||
(only available with early stopping).
|
(only available with early stopping).
|
||||||
\item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration,
|
\item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration,
|
||||||
which could further be used in \code{predict} method
|
which could further be used in \code{predict} method
|
||||||
(only available with early stopping).
|
(only available with early stopping).
|
||||||
\item \code{pred} CV prediction values available when \code{prediction} is set.
|
\item \code{pred} CV prediction values available when \code{prediction} is set.
|
||||||
It is either vector or matrix (see \code{\link{cb.cv.predict}}).
|
It is either vector or matrix (see \code{\link{cb.cv.predict}}).
|
||||||
\item \code{models} a liost of the CV folds' models. It is only available with the explicit
|
\item \code{models} a liost of the CV folds' models. It is only available with the explicit
|
||||||
setting of the \code{cb.cv.predict(save_models = TRUE)} callback.
|
setting of the \code{cb.cv.predict(save_models = TRUE)} callback.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -124,9 +143,9 @@ An object of class \code{xgb.cv.synchronous} with the following elements:
|
|||||||
The cross validation function of xgboost
|
The cross validation function of xgboost
|
||||||
}
|
}
|
||||||
\details{
|
\details{
|
||||||
The original sample is randomly partitioned into \code{nfold} equal size subsamples.
|
The original sample is randomly partitioned into \code{nfold} equal size subsamples.
|
||||||
|
|
||||||
Of the \code{nfold} subsamples, a single subsample is retained as the validation data for testing the model, and the remaining \code{nfold - 1} subsamples are used as training data.
|
Of the \code{nfold} subsamples, a single subsample is retained as the validation data for testing the model, and the remaining \code{nfold - 1} subsamples are used as training data.
|
||||||
|
|
||||||
The cross-validation process is then repeated \code{nrounds} times, with each of the \code{nfold} subsamples used exactly once as the validation data.
|
The cross-validation process is then repeated \code{nrounds} times, with each of the \code{nfold} subsamples used exactly once as the validation data.
|
||||||
|
|
||||||
|
|||||||
@@ -4,8 +4,14 @@
|
|||||||
\alias{xgb.dump}
|
\alias{xgb.dump}
|
||||||
\title{Dump an xgboost model in text format.}
|
\title{Dump an xgboost model in text format.}
|
||||||
\usage{
|
\usage{
|
||||||
xgb.dump(model, fname = NULL, fmap = "", with_stats = FALSE,
|
xgb.dump(
|
||||||
dump_format = c("text", "json"), ...)
|
model,
|
||||||
|
fname = NULL,
|
||||||
|
fmap = "",
|
||||||
|
with_stats = FALSE,
|
||||||
|
dump_format = c("text", "json"),
|
||||||
|
...
|
||||||
|
)
|
||||||
}
|
}
|
||||||
\arguments{
|
\arguments{
|
||||||
\item{model}{the model object.}
|
\item{model}{the model object.}
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ using the \code{cb.gblinear.history()} callback.}
|
|||||||
|
|
||||||
\item{class_index}{zero-based class index to extract the coefficients for only that
|
\item{class_index}{zero-based class index to extract the coefficients for only that
|
||||||
specific class in a multinomial multiclass model. When it is NULL, all the
|
specific class in a multinomial multiclass model. When it is NULL, all the
|
||||||
coeffients are returned. Has no effect in non-multiclass models.}
|
coefficients are returned. Has no effect in non-multiclass models.}
|
||||||
}
|
}
|
||||||
\value{
|
\value{
|
||||||
For an \code{xgb.train} result, a matrix (either dense or sparse) with the columns
|
For an \code{xgb.train} result, a matrix (either dense or sparse) with the columns
|
||||||
|
|||||||
@@ -4,8 +4,14 @@
|
|||||||
\alias{xgb.importance}
|
\alias{xgb.importance}
|
||||||
\title{Importance of features in a model.}
|
\title{Importance of features in a model.}
|
||||||
\usage{
|
\usage{
|
||||||
xgb.importance(feature_names = NULL, model = NULL, trees = NULL,
|
xgb.importance(
|
||||||
data = NULL, label = NULL, target = NULL)
|
feature_names = NULL,
|
||||||
|
model = NULL,
|
||||||
|
trees = NULL,
|
||||||
|
data = NULL,
|
||||||
|
label = NULL,
|
||||||
|
target = NULL
|
||||||
|
)
|
||||||
}
|
}
|
||||||
\arguments{
|
\arguments{
|
||||||
\item{feature_names}{character vector of feature names. If the model already
|
\item{feature_names}{character vector of feature names. If the model already
|
||||||
|
|||||||
@@ -33,6 +33,7 @@ bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
|||||||
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||||
xgb.save(bst, 'xgb.model')
|
xgb.save(bst, 'xgb.model')
|
||||||
bst <- xgb.load('xgb.model')
|
bst <- xgb.load('xgb.model')
|
||||||
|
if (file.exists('xgb.model')) file.remove('xgb.model')
|
||||||
pred <- predict(bst, test$data)
|
pred <- predict(bst, test$data)
|
||||||
}
|
}
|
||||||
\seealso{
|
\seealso{
|
||||||
|
|||||||
@@ -4,8 +4,14 @@
|
|||||||
\alias{xgb.model.dt.tree}
|
\alias{xgb.model.dt.tree}
|
||||||
\title{Parse a boosted tree model text dump}
|
\title{Parse a boosted tree model text dump}
|
||||||
\usage{
|
\usage{
|
||||||
xgb.model.dt.tree(feature_names = NULL, model = NULL, text = NULL,
|
xgb.model.dt.tree(
|
||||||
trees = NULL, use_int_id = FALSE, ...)
|
feature_names = NULL,
|
||||||
|
model = NULL,
|
||||||
|
text = NULL,
|
||||||
|
trees = NULL,
|
||||||
|
use_int_id = FALSE,
|
||||||
|
...
|
||||||
|
)
|
||||||
}
|
}
|
||||||
\arguments{
|
\arguments{
|
||||||
\item{feature_names}{character vector of feature names. If the model already
|
\item{feature_names}{character vector of feature names. If the model already
|
||||||
|
|||||||
@@ -5,11 +5,17 @@
|
|||||||
\alias{xgb.plot.deepness}
|
\alias{xgb.plot.deepness}
|
||||||
\title{Plot model trees deepness}
|
\title{Plot model trees deepness}
|
||||||
\usage{
|
\usage{
|
||||||
xgb.ggplot.deepness(model = NULL, which = c("2x1", "max.depth",
|
xgb.ggplot.deepness(
|
||||||
"med.depth", "med.weight"))
|
model = NULL,
|
||||||
|
which = c("2x1", "max.depth", "med.depth", "med.weight")
|
||||||
|
)
|
||||||
|
|
||||||
xgb.plot.deepness(model = NULL, which = c("2x1", "max.depth",
|
xgb.plot.deepness(
|
||||||
"med.depth", "med.weight"), plot = TRUE, ...)
|
model = NULL,
|
||||||
|
which = c("2x1", "max.depth", "med.depth", "med.weight"),
|
||||||
|
plot = TRUE,
|
||||||
|
...
|
||||||
|
)
|
||||||
}
|
}
|
||||||
\arguments{
|
\arguments{
|
||||||
\item{model}{either an \code{xgb.Booster} model generated by the \code{xgb.train} function
|
\item{model}{either an \code{xgb.Booster} model generated by the \code{xgb.train} function
|
||||||
@@ -50,7 +56,7 @@ per tree with respect to tree number are created. And \code{which="med.weight"}
|
|||||||
a tree's median absolute leaf weight changes through the iterations.
|
a tree's median absolute leaf weight changes through the iterations.
|
||||||
|
|
||||||
This function was inspired by the blog post
|
This function was inspired by the blog post
|
||||||
\url{http://aysent.github.io/2015/11/08/random-forest-leaf-visualization.html}.
|
\url{https://github.com/aysent/random-forest-leaf-visualization}.
|
||||||
}
|
}
|
||||||
\examples{
|
\examples{
|
||||||
|
|
||||||
|
|||||||
@@ -5,25 +5,38 @@
|
|||||||
\alias{xgb.plot.importance}
|
\alias{xgb.plot.importance}
|
||||||
\title{Plot feature importance as a bar graph}
|
\title{Plot feature importance as a bar graph}
|
||||||
\usage{
|
\usage{
|
||||||
xgb.ggplot.importance(importance_matrix = NULL, top_n = NULL,
|
xgb.ggplot.importance(
|
||||||
measure = NULL, rel_to_first = FALSE, n_clusters = c(1:10), ...)
|
importance_matrix = NULL,
|
||||||
|
top_n = NULL,
|
||||||
|
measure = NULL,
|
||||||
|
rel_to_first = FALSE,
|
||||||
|
n_clusters = c(1:10),
|
||||||
|
...
|
||||||
|
)
|
||||||
|
|
||||||
xgb.plot.importance(importance_matrix = NULL, top_n = NULL,
|
xgb.plot.importance(
|
||||||
measure = NULL, rel_to_first = FALSE, left_margin = 10,
|
importance_matrix = NULL,
|
||||||
cex = NULL, plot = TRUE, ...)
|
top_n = NULL,
|
||||||
|
measure = NULL,
|
||||||
|
rel_to_first = FALSE,
|
||||||
|
left_margin = 10,
|
||||||
|
cex = NULL,
|
||||||
|
plot = TRUE,
|
||||||
|
...
|
||||||
|
)
|
||||||
}
|
}
|
||||||
\arguments{
|
\arguments{
|
||||||
\item{importance_matrix}{a \code{data.table} returned by \code{\link{xgb.importance}}.}
|
\item{importance_matrix}{a \code{data.table} returned by \code{\link{xgb.importance}}.}
|
||||||
|
|
||||||
\item{top_n}{maximal number of top features to include into the plot.}
|
\item{top_n}{maximal number of top features to include into the plot.}
|
||||||
|
|
||||||
\item{measure}{the name of importance measure to plot.
|
\item{measure}{the name of importance measure to plot.
|
||||||
When \code{NULL}, 'Gain' would be used for trees and 'Weight' would be used for gblinear.}
|
When \code{NULL}, 'Gain' would be used for trees and 'Weight' would be used for gblinear.}
|
||||||
|
|
||||||
\item{rel_to_first}{whether importance values should be represented as relative to the highest ranked feature.
|
\item{rel_to_first}{whether importance values should be represented as relative to the highest ranked feature.
|
||||||
See Details.}
|
See Details.}
|
||||||
|
|
||||||
\item{n_clusters}{(ggplot only) a \code{numeric} vector containing the min and the max range
|
\item{n_clusters}{(ggplot only) a \code{numeric} vector containing the min and the max range
|
||||||
of the possible number of clusters of bars.}
|
of the possible number of clusters of bars.}
|
||||||
|
|
||||||
\item{...}{other parameters passed to \code{barplot} (except horiz, border, cex.names, names.arg, and las).}
|
\item{...}{other parameters passed to \code{barplot} (except horiz, border, cex.names, names.arg, and las).}
|
||||||
@@ -33,7 +46,7 @@ When it is NULL, the existing \code{par('mar')} is used.}
|
|||||||
|
|
||||||
\item{cex}{(base R barplot) passed as \code{cex.names} parameter to \code{barplot}.}
|
\item{cex}{(base R barplot) passed as \code{cex.names} parameter to \code{barplot}.}
|
||||||
|
|
||||||
\item{plot}{(base R barplot) whether a barplot should be produced.
|
\item{plot}{(base R barplot) whether a barplot should be produced.
|
||||||
If FALSE, only a data.table is returned.}
|
If FALSE, only a data.table is returned.}
|
||||||
}
|
}
|
||||||
\value{
|
\value{
|
||||||
@@ -53,14 +66,14 @@ Features are shown ranked in a decreasing importance order.
|
|||||||
It works for importances from both \code{gblinear} and \code{gbtree} models.
|
It works for importances from both \code{gblinear} and \code{gbtree} models.
|
||||||
|
|
||||||
When \code{rel_to_first = FALSE}, the values would be plotted as they were in \code{importance_matrix}.
|
When \code{rel_to_first = FALSE}, the values would be plotted as they were in \code{importance_matrix}.
|
||||||
For gbtree model, that would mean being normalized to the total of 1
|
For gbtree model, that would mean being normalized to the total of 1
|
||||||
("what is feature's importance contribution relative to the whole model?").
|
("what is feature's importance contribution relative to the whole model?").
|
||||||
For linear models, \code{rel_to_first = FALSE} would show actual values of the coefficients.
|
For linear models, \code{rel_to_first = FALSE} would show actual values of the coefficients.
|
||||||
Setting \code{rel_to_first = TRUE} allows to see the picture from the perspective of
|
Setting \code{rel_to_first = TRUE} allows to see the picture from the perspective of
|
||||||
"what is feature's importance contribution relative to the most important feature?"
|
"what is feature's importance contribution relative to the most important feature?"
|
||||||
|
|
||||||
The ggplot-backend method also performs 1-D custering of the importance values,
|
The ggplot-backend method also performs 1-D clustering of the importance values,
|
||||||
with bar colors coresponding to different clusters that have somewhat similar importance values.
|
with bar colors corresponding to different clusters that have somewhat similar importance values.
|
||||||
}
|
}
|
||||||
\examples{
|
\examples{
|
||||||
data(agaricus.train)
|
data(agaricus.train)
|
||||||
|
|||||||
@@ -4,8 +4,15 @@
|
|||||||
\alias{xgb.plot.multi.trees}
|
\alias{xgb.plot.multi.trees}
|
||||||
\title{Project all trees on one tree and plot it}
|
\title{Project all trees on one tree and plot it}
|
||||||
\usage{
|
\usage{
|
||||||
xgb.plot.multi.trees(model, feature_names = NULL, features_keep = 5,
|
xgb.plot.multi.trees(
|
||||||
plot_width = NULL, plot_height = NULL, render = TRUE, ...)
|
model,
|
||||||
|
feature_names = NULL,
|
||||||
|
features_keep = 5,
|
||||||
|
plot_width = NULL,
|
||||||
|
plot_height = NULL,
|
||||||
|
render = TRUE,
|
||||||
|
...
|
||||||
|
)
|
||||||
}
|
}
|
||||||
\arguments{
|
\arguments{
|
||||||
\item{model}{produced by the \code{xgb.train} function.}
|
\item{model}{produced by the \code{xgb.train} function.}
|
||||||
|
|||||||
@@ -4,18 +4,38 @@
|
|||||||
\alias{xgb.plot.shap}
|
\alias{xgb.plot.shap}
|
||||||
\title{SHAP contribution dependency plots}
|
\title{SHAP contribution dependency plots}
|
||||||
\usage{
|
\usage{
|
||||||
xgb.plot.shap(data, shap_contrib = NULL, features = NULL, top_n = 1,
|
xgb.plot.shap(
|
||||||
model = NULL, trees = NULL, target_class = NULL,
|
data,
|
||||||
approxcontrib = FALSE, subsample = NULL, n_col = 1, col = rgb(0,
|
shap_contrib = NULL,
|
||||||
0, 1, 0.2), pch = ".", discrete_n_uniq = 5, discrete_jitter = 0.01,
|
features = NULL,
|
||||||
ylab = "SHAP", plot_NA = TRUE, col_NA = rgb(0.7, 0, 1, 0.6),
|
top_n = 1,
|
||||||
pch_NA = ".", pos_NA = 1.07, plot_loess = TRUE, col_loess = 2,
|
model = NULL,
|
||||||
span_loess = 0.5, which = c("1d", "2d"), plot = TRUE, ...)
|
trees = NULL,
|
||||||
|
target_class = NULL,
|
||||||
|
approxcontrib = FALSE,
|
||||||
|
subsample = NULL,
|
||||||
|
n_col = 1,
|
||||||
|
col = rgb(0, 0, 1, 0.2),
|
||||||
|
pch = ".",
|
||||||
|
discrete_n_uniq = 5,
|
||||||
|
discrete_jitter = 0.01,
|
||||||
|
ylab = "SHAP",
|
||||||
|
plot_NA = TRUE,
|
||||||
|
col_NA = rgb(0.7, 0, 1, 0.6),
|
||||||
|
pch_NA = ".",
|
||||||
|
pos_NA = 1.07,
|
||||||
|
plot_loess = TRUE,
|
||||||
|
col_loess = 2,
|
||||||
|
span_loess = 0.5,
|
||||||
|
which = c("1d", "2d"),
|
||||||
|
plot = TRUE,
|
||||||
|
...
|
||||||
|
)
|
||||||
}
|
}
|
||||||
\arguments{
|
\arguments{
|
||||||
\item{data}{data as a \code{matrix} or \code{dgCMatrix}.}
|
\item{data}{data as a \code{matrix} or \code{dgCMatrix}.}
|
||||||
|
|
||||||
\item{shap_contrib}{a matrix of SHAP contributions that was computed earlier for the above
|
\item{shap_contrib}{a matrix of SHAP contributions that was computed earlier for the above
|
||||||
\code{data}. When it is NULL, it is computed internally using \code{model} and \code{data}.}
|
\code{data}. When it is NULL, it is computed internally using \code{model} and \code{data}.}
|
||||||
|
|
||||||
\item{features}{a vector of either column indices or of feature names to plot. When it is NULL,
|
\item{features}{a vector of either column indices or of feature names to plot. When it is NULL,
|
||||||
@@ -63,7 +83,7 @@ more than 5 distinct values.}
|
|||||||
|
|
||||||
\item{col_loess}{a color to use for the loess curves.}
|
\item{col_loess}{a color to use for the loess curves.}
|
||||||
|
|
||||||
\item{span_loess}{the \code{span} paramerer in \code{\link[stats]{loess}}'s call.}
|
\item{span_loess}{the \code{span} parameter in \code{\link[stats]{loess}}'s call.}
|
||||||
|
|
||||||
\item{which}{whether to do univariate or bivariate plotting. NOTE: only 1D is implemented so far.}
|
\item{which}{whether to do univariate or bivariate plotting. NOTE: only 1D is implemented so far.}
|
||||||
|
|
||||||
@@ -104,7 +124,7 @@ a meaningful thing to do.
|
|||||||
data(agaricus.train, package='xgboost')
|
data(agaricus.train, package='xgboost')
|
||||||
data(agaricus.test, package='xgboost')
|
data(agaricus.test, package='xgboost')
|
||||||
|
|
||||||
bst <- xgboost(agaricus.train$data, agaricus.train$label, nrounds = 50,
|
bst <- xgboost(agaricus.train$data, agaricus.train$label, nrounds = 50,
|
||||||
eta = 0.1, max_depth = 3, subsample = .5,
|
eta = 0.1, max_depth = 3, subsample = .5,
|
||||||
method = "hist", objective = "binary:logistic", nthread = 2, verbose = 0)
|
method = "hist", objective = "binary:logistic", nthread = 2, verbose = 0)
|
||||||
|
|
||||||
|
|||||||
@@ -4,9 +4,16 @@
|
|||||||
\alias{xgb.plot.tree}
|
\alias{xgb.plot.tree}
|
||||||
\title{Plot a boosted tree model}
|
\title{Plot a boosted tree model}
|
||||||
\usage{
|
\usage{
|
||||||
xgb.plot.tree(feature_names = NULL, model = NULL, trees = NULL,
|
xgb.plot.tree(
|
||||||
plot_width = NULL, plot_height = NULL, render = TRUE,
|
feature_names = NULL,
|
||||||
show_node_id = FALSE, ...)
|
model = NULL,
|
||||||
|
trees = NULL,
|
||||||
|
plot_width = NULL,
|
||||||
|
plot_height = NULL,
|
||||||
|
render = TRUE,
|
||||||
|
show_node_id = FALSE,
|
||||||
|
...
|
||||||
|
)
|
||||||
}
|
}
|
||||||
\arguments{
|
\arguments{
|
||||||
\item{feature_names}{names of each feature as a \code{character} vector.}
|
\item{feature_names}{names of each feature as a \code{character} vector.}
|
||||||
|
|||||||
@@ -33,6 +33,7 @@ bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
|||||||
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
|
||||||
xgb.save(bst, 'xgb.model')
|
xgb.save(bst, 'xgb.model')
|
||||||
bst <- xgb.load('xgb.model')
|
bst <- xgb.load('xgb.model')
|
||||||
|
if (file.exists('xgb.model')) file.remove('xgb.model')
|
||||||
pred <- predict(bst, test$data)
|
pred <- predict(bst, test$data)
|
||||||
}
|
}
|
||||||
\seealso{
|
\seealso{
|
||||||
|
|||||||
@@ -5,20 +5,44 @@
|
|||||||
\alias{xgboost}
|
\alias{xgboost}
|
||||||
\title{eXtreme Gradient Boosting Training}
|
\title{eXtreme Gradient Boosting Training}
|
||||||
\usage{
|
\usage{
|
||||||
xgb.train(params = list(), data, nrounds, watchlist = list(),
|
xgb.train(
|
||||||
obj = NULL, feval = NULL, verbose = 1, print_every_n = 1L,
|
params = list(),
|
||||||
early_stopping_rounds = NULL, maximize = NULL, save_period = NULL,
|
data,
|
||||||
save_name = "xgboost.model", xgb_model = NULL, callbacks = list(),
|
nrounds,
|
||||||
...)
|
watchlist = list(),
|
||||||
|
obj = NULL,
|
||||||
|
feval = NULL,
|
||||||
|
verbose = 1,
|
||||||
|
print_every_n = 1L,
|
||||||
|
early_stopping_rounds = NULL,
|
||||||
|
maximize = NULL,
|
||||||
|
save_period = NULL,
|
||||||
|
save_name = "xgboost.model",
|
||||||
|
xgb_model = NULL,
|
||||||
|
callbacks = list(),
|
||||||
|
...
|
||||||
|
)
|
||||||
|
|
||||||
xgboost(data = NULL, label = NULL, missing = NA, weight = NULL,
|
xgboost(
|
||||||
params = list(), nrounds, verbose = 1, print_every_n = 1L,
|
data = NULL,
|
||||||
early_stopping_rounds = NULL, maximize = NULL, save_period = NULL,
|
label = NULL,
|
||||||
save_name = "xgboost.model", xgb_model = NULL, callbacks = list(),
|
missing = NA,
|
||||||
...)
|
weight = NULL,
|
||||||
|
params = list(),
|
||||||
|
nrounds,
|
||||||
|
verbose = 1,
|
||||||
|
print_every_n = 1L,
|
||||||
|
early_stopping_rounds = NULL,
|
||||||
|
maximize = NULL,
|
||||||
|
save_period = NULL,
|
||||||
|
save_name = "xgboost.model",
|
||||||
|
xgb_model = NULL,
|
||||||
|
callbacks = list(),
|
||||||
|
...
|
||||||
|
)
|
||||||
}
|
}
|
||||||
\arguments{
|
\arguments{
|
||||||
\item{params}{the list of parameters.
|
\item{params}{the list of parameters.
|
||||||
The complete list of parameters is available at \url{http://xgboost.readthedocs.io/en/latest/parameter.html}.
|
The complete list of parameters is available at \url{http://xgboost.readthedocs.io/en/latest/parameter.html}.
|
||||||
Below is a shorter summary:
|
Below is a shorter summary:
|
||||||
|
|
||||||
@@ -27,36 +51,37 @@ xgboost(data = NULL, label = NULL, missing = NA, weight = NULL,
|
|||||||
\itemize{
|
\itemize{
|
||||||
\item \code{booster} which booster to use, can be \code{gbtree} or \code{gblinear}. Default: \code{gbtree}.
|
\item \code{booster} which booster to use, can be \code{gbtree} or \code{gblinear}. Default: \code{gbtree}.
|
||||||
}
|
}
|
||||||
|
|
||||||
2. Booster Parameters
|
2. Booster Parameters
|
||||||
|
|
||||||
2.1. Parameter for Tree Booster
|
2.1. Parameter for Tree Booster
|
||||||
|
|
||||||
\itemize{
|
\itemize{
|
||||||
\item \code{eta} control the learning rate: scale the contribution of each tree by a factor of \code{0 < eta < 1} when it is added to the current approximation. Used to prevent overfitting by making the boosting process more conservative. Lower value for \code{eta} implies larger value for \code{nrounds}: low \code{eta} value means model more robust to overfitting but slower to compute. Default: 0.3
|
\item \code{eta} control the learning rate: scale the contribution of each tree by a factor of \code{0 < eta < 1} when it is added to the current approximation. Used to prevent overfitting by making the boosting process more conservative. Lower value for \code{eta} implies larger value for \code{nrounds}: low \code{eta} value means model more robust to overfitting but slower to compute. Default: 0.3
|
||||||
\item \code{gamma} minimum loss reduction required to make a further partition on a leaf node of the tree. the larger, the more conservative the algorithm will be.
|
\item \code{gamma} minimum loss reduction required to make a further partition on a leaf node of the tree. the larger, the more conservative the algorithm will be.
|
||||||
\item \code{max_depth} maximum depth of a tree. Default: 6
|
\item \code{max_depth} maximum depth of a tree. Default: 6
|
||||||
\item \code{min_child_weight} minimum sum of instance weight (hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, then the building process will give up further partitioning. In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node. The larger, the more conservative the algorithm will be. Default: 1
|
\item \code{min_child_weight} minimum sum of instance weight (hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, then the building process will give up further partitioning. In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node. The larger, the more conservative the algorithm will be. Default: 1
|
||||||
\item \code{subsample} subsample ratio of the training instance. Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees and this will prevent overfitting. It makes computation shorter (because less data to analyse). It is advised to use this parameter with \code{eta} and increase \code{nrounds}. Default: 1
|
\item \code{subsample} subsample ratio of the training instance. Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees and this will prevent overfitting. It makes computation shorter (because less data to analyse). It is advised to use this parameter with \code{eta} and increase \code{nrounds}. Default: 1
|
||||||
\item \code{colsample_bytree} subsample ratio of columns when constructing each tree. Default: 1
|
\item \code{colsample_bytree} subsample ratio of columns when constructing each tree. Default: 1
|
||||||
\item \code{num_parallel_tree} Experimental parameter. number of trees to grow per round. Useful to test Random Forest through Xgboost (set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly. Default: 1
|
\item \code{num_parallel_tree} Experimental parameter. number of trees to grow per round. Useful to test Random Forest through Xgboost (set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly. Default: 1
|
||||||
\item \code{monotone_constraints} A numerical vector consists of \code{1}, \code{0} and \code{-1} with its length equals to the number of features in the training data. \code{1} is increasing, \code{-1} is decreasing and \code{0} is no constraint.
|
\item \code{monotone_constraints} A numerical vector consists of \code{1}, \code{0} and \code{-1} with its length equals to the number of features in the training data. \code{1} is increasing, \code{-1} is decreasing and \code{0} is no constraint.
|
||||||
|
\item \code{interaction_constraints} A list of vectors specifying feature indices of permitted interactions. Each item of the list represents one permitted interaction where specified features are allowed to interact with each other. Feature index values should start from \code{0} (\code{0} references the first column). Leave argument unspecified for no interaction constraints.
|
||||||
}
|
}
|
||||||
|
|
||||||
2.2. Parameter for Linear Booster
|
2.2. Parameter for Linear Booster
|
||||||
|
|
||||||
\itemize{
|
\itemize{
|
||||||
\item \code{lambda} L2 regularization term on weights. Default: 0
|
\item \code{lambda} L2 regularization term on weights. Default: 0
|
||||||
\item \code{lambda_bias} L2 regularization term on bias. Default: 0
|
\item \code{lambda_bias} L2 regularization term on bias. Default: 0
|
||||||
\item \code{alpha} L1 regularization term on weights. (there is no L1 reg on bias because it is not important). Default: 0
|
\item \code{alpha} L1 regularization term on weights. (there is no L1 reg on bias because it is not important). Default: 0
|
||||||
}
|
}
|
||||||
|
|
||||||
3. Task Parameters
|
3. Task Parameters
|
||||||
|
|
||||||
\itemize{
|
\itemize{
|
||||||
\item \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it. The default objective options are below:
|
\item \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it. The default objective options are below:
|
||||||
\itemize{
|
\itemize{
|
||||||
\item \code{reg:linear} linear regression (Default).
|
\item \code{reg:squarederror} Regression with squared loss (Default).
|
||||||
\item \code{reg:logistic} logistic regression.
|
\item \code{reg:logistic} logistic regression.
|
||||||
\item \code{binary:logistic} logistic regression for binary classification. Output probability.
|
\item \code{binary:logistic} logistic regression for binary classification. Output probability.
|
||||||
\item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation.
|
\item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation.
|
||||||
@@ -76,31 +101,31 @@ xgboost(data = NULL, label = NULL, missing = NA, weight = NULL,
|
|||||||
|
|
||||||
\item{watchlist}{named list of xgb.DMatrix datasets to use for evaluating model performance.
|
\item{watchlist}{named list of xgb.DMatrix datasets to use for evaluating model performance.
|
||||||
Metrics specified in either \code{eval_metric} or \code{feval} will be computed for each
|
Metrics specified in either \code{eval_metric} or \code{feval} will be computed for each
|
||||||
of these datasets during each boosting iteration, and stored in the end as a field named
|
of these datasets during each boosting iteration, and stored in the end as a field named
|
||||||
\code{evaluation_log} in the resulting object. When either \code{verbose>=1} or
|
\code{evaluation_log} in the resulting object. When either \code{verbose>=1} or
|
||||||
\code{\link{cb.print.evaluation}} callback is engaged, the performance results are continuously
|
\code{\link{cb.print.evaluation}} callback is engaged, the performance results are continuously
|
||||||
printed out during the training.
|
printed out during the training.
|
||||||
E.g., specifying \code{watchlist=list(validation1=mat1, validation2=mat2)} allows to track
|
E.g., specifying \code{watchlist=list(validation1=mat1, validation2=mat2)} allows to track
|
||||||
the performance of each round's model on mat1 and mat2.}
|
the performance of each round's model on mat1 and mat2.}
|
||||||
|
|
||||||
\item{obj}{customized objective function. Returns gradient and second order
|
\item{obj}{customized objective function. Returns gradient and second order
|
||||||
gradient with given prediction and dtrain.}
|
gradient with given prediction and dtrain.}
|
||||||
|
|
||||||
\item{feval}{custimized evaluation function. Returns
|
\item{feval}{customized evaluation function. Returns
|
||||||
\code{list(metric='metric-name', value='metric-value')} with given
|
\code{list(metric='metric-name', value='metric-value')} with given
|
||||||
prediction and dtrain.}
|
prediction and dtrain.}
|
||||||
|
|
||||||
\item{verbose}{If 0, xgboost will stay silent. If 1, it will print information about performance.
|
\item{verbose}{If 0, xgboost will stay silent. If 1, it will print information about performance.
|
||||||
If 2, some additional information will be printed out.
|
If 2, some additional information will be printed out.
|
||||||
Note that setting \code{verbose > 0} automatically engages the
|
Note that setting \code{verbose > 0} automatically engages the
|
||||||
\code{cb.print.evaluation(period=1)} callback function.}
|
\code{cb.print.evaluation(period=1)} callback function.}
|
||||||
|
|
||||||
\item{print_every_n}{Print each n-th iteration evaluation messages when \code{verbose>0}.
|
\item{print_every_n}{Print each n-th iteration evaluation messages when \code{verbose>0}.
|
||||||
Default is 1 which means all messages are printed. This parameter is passed to the
|
Default is 1 which means all messages are printed. This parameter is passed to the
|
||||||
\code{\link{cb.print.evaluation}} callback.}
|
\code{\link{cb.print.evaluation}} callback.}
|
||||||
|
|
||||||
\item{early_stopping_rounds}{If \code{NULL}, the early stopping function is not triggered.
|
\item{early_stopping_rounds}{If \code{NULL}, the early stopping function is not triggered.
|
||||||
If set to an integer \code{k}, training with a validation set will stop if the performance
|
If set to an integer \code{k}, training with a validation set will stop if the performance
|
||||||
doesn't improve for \code{k} rounds.
|
doesn't improve for \code{k} rounds.
|
||||||
Setting this parameter engages the \code{\link{cb.early.stop}} callback.}
|
Setting this parameter engages the \code{\link{cb.early.stop}} callback.}
|
||||||
|
|
||||||
@@ -115,17 +140,17 @@ This parameter is passed to the \code{\link{cb.early.stop}} callback.}
|
|||||||
\item{save_name}{the name or path for periodically saved model file.}
|
\item{save_name}{the name or path for periodically saved model file.}
|
||||||
|
|
||||||
\item{xgb_model}{a previously built model to continue the training from.
|
\item{xgb_model}{a previously built model to continue the training from.
|
||||||
Could be either an object of class \code{xgb.Booster}, or its raw data, or the name of a
|
Could be either an object of class \code{xgb.Booster}, or its raw data, or the name of a
|
||||||
file with a previously saved model.}
|
file with a previously saved model.}
|
||||||
|
|
||||||
\item{callbacks}{a list of callback functions to perform various task during boosting.
|
\item{callbacks}{a list of callback functions to perform various task during boosting.
|
||||||
See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the
|
See \code{\link{callbacks}}. Some of the callbacks are automatically created depending on the
|
||||||
parameters' values. User can provide either existing or their own callback methods in order
|
parameters' values. User can provide either existing or their own callback methods in order
|
||||||
to customize the training process.}
|
to customize the training process.}
|
||||||
|
|
||||||
\item{...}{other parameters to pass to \code{params}.}
|
\item{...}{other parameters to pass to \code{params}.}
|
||||||
|
|
||||||
\item{label}{vector of response values. Should not be provided when data is
|
\item{label}{vector of response values. Should not be provided when data is
|
||||||
a local data file name or an \code{xgb.DMatrix}.}
|
a local data file name or an \code{xgb.DMatrix}.}
|
||||||
|
|
||||||
\item{missing}{by default is set to NA, which means that NA values should be considered as 'missing'
|
\item{missing}{by default is set to NA, which means that NA values should be considered as 'missing'
|
||||||
@@ -140,23 +165,23 @@ An object of class \code{xgb.Booster} with the following elements:
|
|||||||
\item \code{handle} a handle (pointer) to the xgboost model in memory.
|
\item \code{handle} a handle (pointer) to the xgboost model in memory.
|
||||||
\item \code{raw} a cached memory dump of the xgboost model saved as R's \code{raw} type.
|
\item \code{raw} a cached memory dump of the xgboost model saved as R's \code{raw} type.
|
||||||
\item \code{niter} number of boosting iterations.
|
\item \code{niter} number of boosting iterations.
|
||||||
\item \code{evaluation_log} evaluation history storead as a \code{data.table} with the
|
\item \code{evaluation_log} evaluation history stored as a \code{data.table} with the
|
||||||
first column corresponding to iteration number and the rest corresponding to evaluation
|
first column corresponding to iteration number and the rest corresponding to evaluation
|
||||||
metrics' values. It is created by the \code{\link{cb.evaluation.log}} callback.
|
metrics' values. It is created by the \code{\link{cb.evaluation.log}} callback.
|
||||||
\item \code{call} a function call.
|
\item \code{call} a function call.
|
||||||
\item \code{params} parameters that were passed to the xgboost library. Note that it does not
|
\item \code{params} parameters that were passed to the xgboost library. Note that it does not
|
||||||
capture parameters changed by the \code{\link{cb.reset.parameters}} callback.
|
capture parameters changed by the \code{\link{cb.reset.parameters}} callback.
|
||||||
\item \code{callbacks} callback functions that were either automatically assigned or
|
\item \code{callbacks} callback functions that were either automatically assigned or
|
||||||
explicitely passed.
|
explicitly passed.
|
||||||
\item \code{best_iteration} iteration number with the best evaluation metric value
|
\item \code{best_iteration} iteration number with the best evaluation metric value
|
||||||
(only available with early stopping).
|
(only available with early stopping).
|
||||||
\item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration,
|
\item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration,
|
||||||
which could further be used in \code{predict} method
|
which could further be used in \code{predict} method
|
||||||
(only available with early stopping).
|
(only available with early stopping).
|
||||||
\item \code{best_score} the best evaluation metric value during early stopping.
|
\item \code{best_score} the best evaluation metric value during early stopping.
|
||||||
(only available with early stopping).
|
(only available with early stopping).
|
||||||
\item \code{feature_names} names of the training dataset features
|
\item \code{feature_names} names of the training dataset features
|
||||||
(only when comun names were defined in training data).
|
(only when column names were defined in training data).
|
||||||
\item \code{nfeatures} number of features in training data.
|
\item \code{nfeatures} number of features in training data.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -165,20 +190,20 @@ An object of class \code{xgb.Booster} with the following elements:
|
|||||||
The \code{xgboost} function is a simpler wrapper for \code{xgb.train}.
|
The \code{xgboost} function is a simpler wrapper for \code{xgb.train}.
|
||||||
}
|
}
|
||||||
\details{
|
\details{
|
||||||
These are the training functions for \code{xgboost}.
|
These are the training functions for \code{xgboost}.
|
||||||
|
|
||||||
The \code{xgb.train} interface supports advanced features such as \code{watchlist},
|
The \code{xgb.train} interface supports advanced features such as \code{watchlist},
|
||||||
customized objective and evaluation metric functions, therefore it is more flexible
|
customized objective and evaluation metric functions, therefore it is more flexible
|
||||||
than the \code{xgboost} interface.
|
than the \code{xgboost} interface.
|
||||||
|
|
||||||
Parallelization is automatically enabled if \code{OpenMP} is present.
|
Parallelization is automatically enabled if \code{OpenMP} is present.
|
||||||
Number of threads can also be manually specified via \code{nthread} parameter.
|
Number of threads can also be manually specified via \code{nthread} parameter.
|
||||||
|
|
||||||
The evaluation metric is chosen automatically by Xgboost (according to the objective)
|
The evaluation metric is chosen automatically by Xgboost (according to the objective)
|
||||||
when the \code{eval_metric} parameter is not provided.
|
when the \code{eval_metric} parameter is not provided.
|
||||||
User may set one or several \code{eval_metric} parameters.
|
User may set one or several \code{eval_metric} parameters.
|
||||||
Note that when using a customized metric, only this single metric can be used.
|
Note that when using a customized metric, only this single metric can be used.
|
||||||
The folloiwing is the list of built-in metrics for which Xgboost provides optimized implementation:
|
The following is the list of built-in metrics for which Xgboost provides optimized implementation:
|
||||||
\itemize{
|
\itemize{
|
||||||
\item \code{rmse} root mean square error. \url{http://en.wikipedia.org/wiki/Root_mean_square_error}
|
\item \code{rmse} root mean square error. \url{http://en.wikipedia.org/wiki/Root_mean_square_error}
|
||||||
\item \code{logloss} negative log-likelihood. \url{http://en.wikipedia.org/wiki/Log-likelihood}
|
\item \code{logloss} negative log-likelihood. \url{http://en.wikipedia.org/wiki/Log-likelihood}
|
||||||
@@ -210,7 +235,7 @@ dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
|||||||
watchlist <- list(train = dtrain, eval = dtest)
|
watchlist <- list(train = dtrain, eval = dtest)
|
||||||
|
|
||||||
## A simple xgb.train example:
|
## A simple xgb.train example:
|
||||||
param <- list(max_depth = 2, eta = 1, silent = 1, nthread = 2,
|
param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2,
|
||||||
objective = "binary:logistic", eval_metric = "auc")
|
objective = "binary:logistic", eval_metric = "auc")
|
||||||
bst <- xgb.train(param, dtrain, nrounds = 2, watchlist)
|
bst <- xgb.train(param, dtrain, nrounds = 2, watchlist)
|
||||||
|
|
||||||
@@ -231,12 +256,12 @@ evalerror <- function(preds, dtrain) {
|
|||||||
|
|
||||||
# These functions could be used by passing them either:
|
# These functions could be used by passing them either:
|
||||||
# as 'objective' and 'eval_metric' parameters in the params list:
|
# as 'objective' and 'eval_metric' parameters in the params list:
|
||||||
param <- list(max_depth = 2, eta = 1, silent = 1, nthread = 2,
|
param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2,
|
||||||
objective = logregobj, eval_metric = evalerror)
|
objective = logregobj, eval_metric = evalerror)
|
||||||
bst <- xgb.train(param, dtrain, nrounds = 2, watchlist)
|
bst <- xgb.train(param, dtrain, nrounds = 2, watchlist)
|
||||||
|
|
||||||
# or through the ... arguments:
|
# or through the ... arguments:
|
||||||
param <- list(max_depth = 2, eta = 1, silent = 1, nthread = 2)
|
param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2)
|
||||||
bst <- xgb.train(param, dtrain, nrounds = 2, watchlist,
|
bst <- xgb.train(param, dtrain, nrounds = 2, watchlist,
|
||||||
objective = logregobj, eval_metric = evalerror)
|
objective = logregobj, eval_metric = evalerror)
|
||||||
|
|
||||||
@@ -246,7 +271,7 @@ bst <- xgb.train(param, dtrain, nrounds = 2, watchlist,
|
|||||||
|
|
||||||
|
|
||||||
## An xgb.train example of using variable learning rates at each iteration:
|
## An xgb.train example of using variable learning rates at each iteration:
|
||||||
param <- list(max_depth = 2, eta = 1, silent = 1, nthread = 2,
|
param <- list(max_depth = 2, eta = 1, verbose = 0, nthread = 2,
|
||||||
objective = "binary:logistic", eval_metric = "auc")
|
objective = "binary:logistic", eval_metric = "auc")
|
||||||
my_etas <- list(eta = c(0.5, 0.1))
|
my_etas <- list(eta = c(0.5, 0.1))
|
||||||
bst <- xgb.train(param, dtrain, nrounds = 2, watchlist,
|
bst <- xgb.train(param, dtrain, nrounds = 2, watchlist,
|
||||||
@@ -257,8 +282,8 @@ bst <- xgb.train(param, dtrain, nrounds = 25, watchlist,
|
|||||||
early_stopping_rounds = 3)
|
early_stopping_rounds = 3)
|
||||||
|
|
||||||
## An 'xgboost' interface example:
|
## An 'xgboost' interface example:
|
||||||
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label,
|
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label,
|
||||||
max_depth = 2, eta = 1, nthread = 2, nrounds = 2,
|
max_depth = 2, eta = 1, nthread = 2, nrounds = 2,
|
||||||
objective = "binary:logistic")
|
objective = "binary:logistic")
|
||||||
pred <- predict(bst, agaricus.test$data)
|
pred <- predict(bst, agaricus.test$data)
|
||||||
|
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ The deprecated parameters would be removed in the next release.
|
|||||||
\details{
|
\details{
|
||||||
To see all the current deprecated and new parameters, check the \code{xgboost:::depr_par_lut} table.
|
To see all the current deprecated and new parameters, check the \code{xgboost:::depr_par_lut} table.
|
||||||
|
|
||||||
A deprecation warning is shown when any of the deprecated parameters is used in a call.
|
A deprecation warning is shown when any of the deprecated parameters is used in a call.
|
||||||
An additional warning is shown when there was a partial match to a deprecated parameter
|
An additional warning is shown when there was a partial match to a deprecated parameter
|
||||||
(as R is able to partially match parameter names).
|
(as R is able to partially match parameter names).
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,8 +17,8 @@ endif
|
|||||||
$(foreach v, $(XGB_RFLAGS), $(warning $(v)))
|
$(foreach v, $(XGB_RFLAGS), $(warning $(v)))
|
||||||
|
|
||||||
PKG_CPPFLAGS= -I$(PKGROOT)/include -I$(PKGROOT)/dmlc-core/include -I$(PKGROOT)/rabit/include -I$(PKGROOT) $(XGB_RFLAGS)
|
PKG_CPPFLAGS= -I$(PKGROOT)/include -I$(PKGROOT)/dmlc-core/include -I$(PKGROOT)/rabit/include -I$(PKGROOT) $(XGB_RFLAGS)
|
||||||
PKG_CXXFLAGS= @OPENMP_CXXFLAGS@ $(SHLIB_PTHREAD_FLAGS)
|
PKG_CXXFLAGS= @OPENMP_CXXFLAGS@ @ENDIAN_FLAG@ -pthread
|
||||||
PKG_LIBS = @OPENMP_CXXFLAGS@ $(SHLIB_PTHREAD_FLAGS)
|
PKG_LIBS = @OPENMP_CXXFLAGS@ @OPENMP_LIB@ @ENDIAN_FLAG@ @BACKTRACE_LIB@ -pthread
|
||||||
OBJECTS= ./xgboost_R.o ./xgboost_custom.o ./xgboost_assert.o ./init.o\
|
OBJECTS= ./xgboost_R.o ./xgboost_custom.o ./xgboost_assert.o ./init.o\
|
||||||
$(PKGROOT)/amalgamation/xgboost-all0.o $(PKGROOT)/amalgamation/dmlc-minimum0.o\
|
$(PKGROOT)/amalgamation/xgboost-all0.o $(PKGROOT)/amalgamation/dmlc-minimum0.o\
|
||||||
$(PKGROOT)/rabit/src/engine_empty.o $(PKGROOT)/rabit/src/c_api.o
|
$(PKGROOT)/rabit/src/engine_empty.o $(PKGROOT)/rabit/src/c_api.o
|
||||||
|
|||||||
@@ -29,8 +29,8 @@ endif
|
|||||||
$(foreach v, $(XGB_RFLAGS), $(warning $(v)))
|
$(foreach v, $(XGB_RFLAGS), $(warning $(v)))
|
||||||
|
|
||||||
PKG_CPPFLAGS= -I$(PKGROOT)/include -I$(PKGROOT)/dmlc-core/include -I$(PKGROOT)/rabit/include -I$(PKGROOT) $(XGB_RFLAGS)
|
PKG_CPPFLAGS= -I$(PKGROOT)/include -I$(PKGROOT)/dmlc-core/include -I$(PKGROOT)/rabit/include -I$(PKGROOT) $(XGB_RFLAGS)
|
||||||
PKG_CXXFLAGS= $(SHLIB_OPENMP_CFLAGS) $(SHLIB_PTHREAD_FLAGS)
|
PKG_CXXFLAGS= $(SHLIB_OPENMP_CXXFLAGS) $(SHLIB_PTHREAD_FLAGS)
|
||||||
PKG_LIBS = $(SHLIB_OPENMP_CFLAGS) $(SHLIB_PTHREAD_FLAGS)
|
PKG_LIBS = $(SHLIB_OPENMP_CXXFLAGS) $(SHLIB_PTHREAD_FLAGS)
|
||||||
OBJECTS= ./xgboost_R.o ./xgboost_custom.o ./xgboost_assert.o ./init.o\
|
OBJECTS= ./xgboost_R.o ./xgboost_custom.o ./xgboost_assert.o ./init.o\
|
||||||
$(PKGROOT)/amalgamation/xgboost-all0.o $(PKGROOT)/amalgamation/dmlc-minimum0.o\
|
$(PKGROOT)/amalgamation/xgboost-all0.o $(PKGROOT)/amalgamation/dmlc-minimum0.o\
|
||||||
$(PKGROOT)/rabit/src/engine_empty.o $(PKGROOT)/rabit/src/c_api.o
|
$(PKGROOT)/rabit/src/engine_empty.o $(PKGROOT)/rabit/src/c_api.o
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/* Copyright (c) 2015 by Contributors
|
/* Copyright (c) 2015 by Contributors
|
||||||
*
|
*
|
||||||
* This file was initially generated using the following R command:
|
* This file was initially generated using the following R command:
|
||||||
* tools::package_native_routine_registration_skeleton('.', con = 'src/init.c', character_only = F)
|
* tools::package_native_routine_registration_skeleton('.', con = 'src/init.c', character_only = F)
|
||||||
* and edited to conform to xgboost C linter requirements. For details, see
|
* and edited to conform to xgboost C linter requirements. For details, see
|
||||||
@@ -10,7 +10,7 @@
|
|||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <R_ext/Rdynload.h>
|
#include <R_ext/Rdynload.h>
|
||||||
|
|
||||||
/* FIXME:
|
/* FIXME:
|
||||||
Check these declarations against the C/Fortran source code.
|
Check these declarations against the C/Fortran source code.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@@ -24,7 +24,7 @@ extern SEXP XGBoosterGetAttr_R(SEXP, SEXP);
|
|||||||
extern SEXP XGBoosterLoadModelFromRaw_R(SEXP, SEXP);
|
extern SEXP XGBoosterLoadModelFromRaw_R(SEXP, SEXP);
|
||||||
extern SEXP XGBoosterLoadModel_R(SEXP, SEXP);
|
extern SEXP XGBoosterLoadModel_R(SEXP, SEXP);
|
||||||
extern SEXP XGBoosterModelToRaw_R(SEXP);
|
extern SEXP XGBoosterModelToRaw_R(SEXP);
|
||||||
extern SEXP XGBoosterPredict_R(SEXP, SEXP, SEXP, SEXP);
|
extern SEXP XGBoosterPredict_R(SEXP, SEXP, SEXP, SEXP, SEXP);
|
||||||
extern SEXP XGBoosterSaveModel_R(SEXP, SEXP);
|
extern SEXP XGBoosterSaveModel_R(SEXP, SEXP);
|
||||||
extern SEXP XGBoosterSetAttr_R(SEXP, SEXP, SEXP);
|
extern SEXP XGBoosterSetAttr_R(SEXP, SEXP, SEXP);
|
||||||
extern SEXP XGBoosterSetParam_R(SEXP, SEXP, SEXP);
|
extern SEXP XGBoosterSetParam_R(SEXP, SEXP, SEXP);
|
||||||
@@ -50,7 +50,7 @@ static const R_CallMethodDef CallEntries[] = {
|
|||||||
{"XGBoosterLoadModelFromRaw_R", (DL_FUNC) &XGBoosterLoadModelFromRaw_R, 2},
|
{"XGBoosterLoadModelFromRaw_R", (DL_FUNC) &XGBoosterLoadModelFromRaw_R, 2},
|
||||||
{"XGBoosterLoadModel_R", (DL_FUNC) &XGBoosterLoadModel_R, 2},
|
{"XGBoosterLoadModel_R", (DL_FUNC) &XGBoosterLoadModel_R, 2},
|
||||||
{"XGBoosterModelToRaw_R", (DL_FUNC) &XGBoosterModelToRaw_R, 1},
|
{"XGBoosterModelToRaw_R", (DL_FUNC) &XGBoosterModelToRaw_R, 1},
|
||||||
{"XGBoosterPredict_R", (DL_FUNC) &XGBoosterPredict_R, 4},
|
{"XGBoosterPredict_R", (DL_FUNC) &XGBoosterPredict_R, 5},
|
||||||
{"XGBoosterSaveModel_R", (DL_FUNC) &XGBoosterSaveModel_R, 2},
|
{"XGBoosterSaveModel_R", (DL_FUNC) &XGBoosterSaveModel_R, 2},
|
||||||
{"XGBoosterSetAttr_R", (DL_FUNC) &XGBoosterSetAttr_R, 3},
|
{"XGBoosterSetAttr_R", (DL_FUNC) &XGBoosterSetAttr_R, 3},
|
||||||
{"XGBoosterSetParam_R", (DL_FUNC) &XGBoosterSetParam_R, 3},
|
{"XGBoosterSetParam_R", (DL_FUNC) &XGBoosterSetParam_R, 3},
|
||||||
@@ -70,7 +70,7 @@ static const R_CallMethodDef CallEntries[] = {
|
|||||||
|
|
||||||
#if defined(_WIN32)
|
#if defined(_WIN32)
|
||||||
__declspec(dllexport)
|
__declspec(dllexport)
|
||||||
#endif
|
#endif // defined(_WIN32)
|
||||||
void R_init_xgboost(DllInfo *dll) {
|
void R_init_xgboost(DllInfo *dll) {
|
||||||
R_registerRoutines(dll, NULL, CallEntries, NULL, NULL);
|
R_registerRoutines(dll, NULL, CallEntries, NULL, NULL);
|
||||||
R_useDynamicSymbols(dll, FALSE);
|
R_useDynamicSymbols(dll, FALSE);
|
||||||
|
|||||||
@@ -136,9 +136,10 @@ SEXP XGDMatrixSliceDMatrix_R(SEXP handle, SEXP idxset) {
|
|||||||
idxvec[i] = INTEGER(idxset)[i] - 1;
|
idxvec[i] = INTEGER(idxset)[i] - 1;
|
||||||
}
|
}
|
||||||
DMatrixHandle res;
|
DMatrixHandle res;
|
||||||
CHECK_CALL(XGDMatrixSliceDMatrix(R_ExternalPtrAddr(handle),
|
CHECK_CALL(XGDMatrixSliceDMatrixEx(R_ExternalPtrAddr(handle),
|
||||||
BeginPtr(idxvec), len,
|
BeginPtr(idxvec), len,
|
||||||
&res));
|
&res,
|
||||||
|
0));
|
||||||
ret = PROTECT(R_MakeExternalPtr(res, R_NilValue, R_NilValue));
|
ret = PROTECT(R_MakeExternalPtr(res, R_NilValue, R_NilValue));
|
||||||
R_RegisterCFinalizerEx(ret, _DMatrixFinalizer, TRUE);
|
R_RegisterCFinalizerEx(ret, _DMatrixFinalizer, TRUE);
|
||||||
R_API_END();
|
R_API_END();
|
||||||
@@ -165,7 +166,9 @@ SEXP XGDMatrixSetInfo_R(SEXP handle, SEXP field, SEXP array) {
|
|||||||
for (int i = 0; i < len; ++i) {
|
for (int i = 0; i < len; ++i) {
|
||||||
vec[i] = static_cast<unsigned>(INTEGER(array)[i]);
|
vec[i] = static_cast<unsigned>(INTEGER(array)[i]);
|
||||||
}
|
}
|
||||||
CHECK_CALL(XGDMatrixSetGroup(R_ExternalPtrAddr(handle), BeginPtr(vec), len));
|
CHECK_CALL(XGDMatrixSetUIntInfo(R_ExternalPtrAddr(handle),
|
||||||
|
CHAR(asChar(field)),
|
||||||
|
BeginPtr(vec), len));
|
||||||
} else {
|
} else {
|
||||||
std::vector<float> vec(len);
|
std::vector<float> vec(len);
|
||||||
#pragma omp parallel for schedule(static)
|
#pragma omp parallel for schedule(static)
|
||||||
@@ -173,8 +176,8 @@ SEXP XGDMatrixSetInfo_R(SEXP handle, SEXP field, SEXP array) {
|
|||||||
vec[i] = REAL(array)[i];
|
vec[i] = REAL(array)[i];
|
||||||
}
|
}
|
||||||
CHECK_CALL(XGDMatrixSetFloatInfo(R_ExternalPtrAddr(handle),
|
CHECK_CALL(XGDMatrixSetFloatInfo(R_ExternalPtrAddr(handle),
|
||||||
CHAR(asChar(field)),
|
CHAR(asChar(field)),
|
||||||
BeginPtr(vec), len));
|
BeginPtr(vec), len));
|
||||||
}
|
}
|
||||||
R_API_END();
|
R_API_END();
|
||||||
return R_NilValue;
|
return R_NilValue;
|
||||||
@@ -292,24 +295,26 @@ SEXP XGBoosterEvalOneIter_R(SEXP handle, SEXP iter, SEXP dmats, SEXP evnames) {
|
|||||||
vec_sptr.push_back(vec_names[i].c_str());
|
vec_sptr.push_back(vec_names[i].c_str());
|
||||||
}
|
}
|
||||||
CHECK_CALL(XGBoosterEvalOneIter(R_ExternalPtrAddr(handle),
|
CHECK_CALL(XGBoosterEvalOneIter(R_ExternalPtrAddr(handle),
|
||||||
asInteger(iter),
|
asInteger(iter),
|
||||||
BeginPtr(vec_dmats),
|
BeginPtr(vec_dmats),
|
||||||
BeginPtr(vec_sptr),
|
BeginPtr(vec_sptr),
|
||||||
len, &ret));
|
len, &ret));
|
||||||
R_API_END();
|
R_API_END();
|
||||||
return mkString(ret);
|
return mkString(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
SEXP XGBoosterPredict_R(SEXP handle, SEXP dmat, SEXP option_mask, SEXP ntree_limit) {
|
SEXP XGBoosterPredict_R(SEXP handle, SEXP dmat, SEXP option_mask,
|
||||||
|
SEXP ntree_limit, SEXP training) {
|
||||||
SEXP ret;
|
SEXP ret;
|
||||||
R_API_BEGIN();
|
R_API_BEGIN();
|
||||||
bst_ulong olen;
|
bst_ulong olen;
|
||||||
const float *res;
|
const float *res;
|
||||||
CHECK_CALL(XGBoosterPredict(R_ExternalPtrAddr(handle),
|
CHECK_CALL(XGBoosterPredict(R_ExternalPtrAddr(handle),
|
||||||
R_ExternalPtrAddr(dmat),
|
R_ExternalPtrAddr(dmat),
|
||||||
asInteger(option_mask),
|
asInteger(option_mask),
|
||||||
asInteger(ntree_limit),
|
asInteger(ntree_limit),
|
||||||
&olen, &res));
|
asInteger(training),
|
||||||
|
&olen, &res));
|
||||||
ret = PROTECT(allocVector(REALSXP, olen));
|
ret = PROTECT(allocVector(REALSXP, olen));
|
||||||
for (size_t i = 0; i < olen; ++i) {
|
for (size_t i = 0; i < olen; ++i) {
|
||||||
REAL(ret)[i] = res[i];
|
REAL(ret)[i] = res[i];
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
/*!
|
/*!
|
||||||
* Copyright 2014 (c) by Contributors
|
* Copyright 2014 (c) by Contributors
|
||||||
* \file xgboost_wrapper_R.h
|
* \file xgboost_R.h
|
||||||
* \author Tianqi Chen
|
* \author Tianqi Chen
|
||||||
* \brief R wrapper of xgboost
|
* \brief R wrapper of xgboost
|
||||||
*/
|
*/
|
||||||
@@ -148,8 +148,10 @@ XGB_DLL SEXP XGBoosterEvalOneIter_R(SEXP handle, SEXP iter, SEXP dmats, SEXP evn
|
|||||||
* \param dmat data matrix
|
* \param dmat data matrix
|
||||||
* \param option_mask output_margin:1 predict_leaf:2
|
* \param option_mask output_margin:1 predict_leaf:2
|
||||||
* \param ntree_limit limit number of trees used in prediction
|
* \param ntree_limit limit number of trees used in prediction
|
||||||
|
* \param training Whether the prediction value is used for training.
|
||||||
*/
|
*/
|
||||||
XGB_DLL SEXP XGBoosterPredict_R(SEXP handle, SEXP dmat, SEXP option_mask, SEXP ntree_limit);
|
XGB_DLL SEXP XGBoosterPredict_R(SEXP handle, SEXP dmat, SEXP option_mask,
|
||||||
|
SEXP ntree_limit, SEXP training);
|
||||||
/*!
|
/*!
|
||||||
* \brief load model from existing file
|
* \brief load model from existing file
|
||||||
* \param handle handle
|
* \param handle handle
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
// to change behavior of libxgboost
|
// to change behavior of libxgboost
|
||||||
|
|
||||||
#include <xgboost/logging.h>
|
#include <xgboost/logging.h>
|
||||||
#include "src/common/random.h"
|
#include "../../src/common/random.h"
|
||||||
#include "./xgboost_R.h"
|
#include "./xgboost_R.h"
|
||||||
|
|
||||||
// redirect the messages to R's console.
|
// redirect the messages to R's console.
|
||||||
@@ -32,7 +32,10 @@ extern "C" {
|
|||||||
|
|
||||||
namespace xgboost {
|
namespace xgboost {
|
||||||
ConsoleLogger::~ConsoleLogger() {
|
ConsoleLogger::~ConsoleLogger() {
|
||||||
dmlc::CustomLogMessage::Log(log_stream_.str());
|
if (cur_verbosity_ == LogVerbosity::kIgnore ||
|
||||||
|
cur_verbosity_ <= global_verbosity_) {
|
||||||
|
dmlc::CustomLogMessage::Log(log_stream_.str());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
TrackerLogger::~TrackerLogger() {
|
TrackerLogger::~TrackerLogger() {
|
||||||
dmlc::CustomLogMessage::Log(log_stream_.str());
|
dmlc::CustomLogMessage::Log(log_stream_.str());
|
||||||
@@ -46,10 +49,11 @@ namespace common {
|
|||||||
bool CheckNAN(double v) {
|
bool CheckNAN(double v) {
|
||||||
return ISNAN(v);
|
return ISNAN(v);
|
||||||
}
|
}
|
||||||
|
#if !defined(XGBOOST_USE_CUDA)
|
||||||
double LogGamma(double v) {
|
double LogGamma(double v) {
|
||||||
return lgammafn(v);
|
return lgammafn(v);
|
||||||
}
|
}
|
||||||
|
#endif // !defined(XGBOOST_USE_CUDA)
|
||||||
// customize random engine.
|
// customize random engine.
|
||||||
void CustomGlobalRandomEngine::seed(CustomGlobalRandomEngine::result_type val) {
|
void CustomGlobalRandomEngine::seed(CustomGlobalRandomEngine::result_type val) {
|
||||||
// ignore the seed
|
// ignore the seed
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ test_that("train and predict binary classification", {
|
|||||||
|
|
||||||
pred <- predict(bst, test$data)
|
pred <- predict(bst, test$data)
|
||||||
expect_length(pred, 1611)
|
expect_length(pred, 1611)
|
||||||
|
|
||||||
pred1 <- predict(bst, train$data, ntreelimit = 1)
|
pred1 <- predict(bst, train$data, ntreelimit = 1)
|
||||||
expect_length(pred1, 6513)
|
expect_length(pred1, 6513)
|
||||||
err_pred1 <- sum((pred1 > 0.5) != train$label)/length(train$label)
|
err_pred1 <- sum((pred1 > 0.5) != train$label)/length(train$label)
|
||||||
@@ -35,6 +35,54 @@ test_that("train and predict binary classification", {
|
|||||||
expect_lt(abs(err_pred1 - err_log), 10e-6)
|
expect_lt(abs(err_pred1 - err_log), 10e-6)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
test_that("dart prediction works", {
|
||||||
|
nrounds = 32
|
||||||
|
set.seed(1994)
|
||||||
|
|
||||||
|
d <- cbind(
|
||||||
|
x1 = rnorm(100),
|
||||||
|
x2 = rnorm(100),
|
||||||
|
x3 = rnorm(100))
|
||||||
|
y <- d[,"x1"] + d[,"x2"]^2 +
|
||||||
|
ifelse(d[,"x3"] > .5, d[,"x3"]^2, 2^d[,"x3"]) +
|
||||||
|
rnorm(100)
|
||||||
|
|
||||||
|
set.seed(1994)
|
||||||
|
booster_by_xgboost <- xgboost(data = d, label = y, max_depth = 2, booster = "dart",
|
||||||
|
rate_drop = 0.5, one_drop = TRUE,
|
||||||
|
eta = 1, nthread = 2, nrounds = nrounds, objective = "reg:squarederror")
|
||||||
|
pred_by_xgboost_0 <- predict(booster_by_xgboost, newdata = d, ntreelimit = 0)
|
||||||
|
pred_by_xgboost_1 <- predict(booster_by_xgboost, newdata = d, ntreelimit = nrounds)
|
||||||
|
expect_true(all(matrix(pred_by_xgboost_0, byrow=TRUE) == matrix(pred_by_xgboost_1, byrow=TRUE)))
|
||||||
|
|
||||||
|
pred_by_xgboost_2 <- predict(booster_by_xgboost, newdata = d, training = TRUE)
|
||||||
|
expect_false(all(matrix(pred_by_xgboost_0, byrow=TRUE) == matrix(pred_by_xgboost_2, byrow=TRUE)))
|
||||||
|
|
||||||
|
set.seed(1994)
|
||||||
|
dtrain <- xgb.DMatrix(data=d, info = list(label=y))
|
||||||
|
booster_by_train <- xgb.train( params = list(
|
||||||
|
booster = "dart",
|
||||||
|
max_depth = 2,
|
||||||
|
eta = 1,
|
||||||
|
rate_drop = 0.5,
|
||||||
|
one_drop = TRUE,
|
||||||
|
nthread = 1,
|
||||||
|
tree_method= "exact",
|
||||||
|
verbosity = 3,
|
||||||
|
objective = "reg:squarederror"
|
||||||
|
),
|
||||||
|
data = dtrain,
|
||||||
|
nrounds = nrounds
|
||||||
|
)
|
||||||
|
pred_by_train_0 <- predict(booster_by_train, newdata = dtrain, ntreelimit = 0)
|
||||||
|
pred_by_train_1 <- predict(booster_by_train, newdata = dtrain, ntreelimit = nrounds)
|
||||||
|
pred_by_train_2 <- predict(booster_by_train, newdata = dtrain, training = TRUE)
|
||||||
|
|
||||||
|
expect_true(all(matrix(pred_by_train_0, byrow=TRUE) == matrix(pred_by_xgboost_0, byrow=TRUE)))
|
||||||
|
expect_true(all(matrix(pred_by_train_1, byrow=TRUE) == matrix(pred_by_xgboost_1, byrow=TRUE)))
|
||||||
|
expect_true(all(matrix(pred_by_train_2, byrow=TRUE) == matrix(pred_by_xgboost_2, byrow=TRUE)))
|
||||||
|
})
|
||||||
|
|
||||||
test_that("train and predict softprob", {
|
test_that("train and predict softprob", {
|
||||||
lb <- as.numeric(iris$Species) - 1
|
lb <- as.numeric(iris$Species) - 1
|
||||||
set.seed(11)
|
set.seed(11)
|
||||||
@@ -74,7 +122,7 @@ test_that("train and predict softmax", {
|
|||||||
expect_false(is.null(bst$evaluation_log))
|
expect_false(is.null(bst$evaluation_log))
|
||||||
expect_lt(bst$evaluation_log[, min(train_merror)], 0.025)
|
expect_lt(bst$evaluation_log[, min(train_merror)], 0.025)
|
||||||
expect_equal(bst$niter * 3, xgb.ntree(bst))
|
expect_equal(bst$niter * 3, xgb.ntree(bst))
|
||||||
|
|
||||||
pred <- predict(bst, as.matrix(iris[, -5]))
|
pred <- predict(bst, as.matrix(iris[, -5]))
|
||||||
expect_length(pred, nrow(iris))
|
expect_length(pred, nrow(iris))
|
||||||
err <- sum(pred != lb)/length(lb)
|
err <- sum(pred != lb)/length(lb)
|
||||||
@@ -90,12 +138,12 @@ test_that("train and predict RF", {
|
|||||||
num_parallel_tree = 20, subsample = 0.6, colsample_bytree = 0.1)
|
num_parallel_tree = 20, subsample = 0.6, colsample_bytree = 0.1)
|
||||||
expect_equal(bst$niter, 1)
|
expect_equal(bst$niter, 1)
|
||||||
expect_equal(xgb.ntree(bst), 20)
|
expect_equal(xgb.ntree(bst), 20)
|
||||||
|
|
||||||
pred <- predict(bst, train$data)
|
pred <- predict(bst, train$data)
|
||||||
pred_err <- sum((pred > 0.5) != lb)/length(lb)
|
pred_err <- sum((pred > 0.5) != lb)/length(lb)
|
||||||
expect_lt(abs(bst$evaluation_log[1, train_error] - pred_err), 10e-6)
|
expect_lt(abs(bst$evaluation_log[1, train_error] - pred_err), 10e-6)
|
||||||
#expect_lt(pred_err, 0.03)
|
#expect_lt(pred_err, 0.03)
|
||||||
|
|
||||||
pred <- predict(bst, train$data, ntreelimit = 20)
|
pred <- predict(bst, train$data, ntreelimit = 20)
|
||||||
pred_err_20 <- sum((pred > 0.5) != lb)/length(lb)
|
pred_err_20 <- sum((pred > 0.5) != lb)/length(lb)
|
||||||
expect_equal(pred_err_20, pred_err)
|
expect_equal(pred_err_20, pred_err)
|
||||||
@@ -182,7 +230,7 @@ test_that("xgb.cv works", {
|
|||||||
expect_is(cv, 'xgb.cv.synchronous')
|
expect_is(cv, 'xgb.cv.synchronous')
|
||||||
expect_false(is.null(cv$evaluation_log))
|
expect_false(is.null(cv$evaluation_log))
|
||||||
expect_lt(cv$evaluation_log[, min(test_error_mean)], 0.03)
|
expect_lt(cv$evaluation_log[, min(test_error_mean)], 0.03)
|
||||||
expect_lt(cv$evaluation_log[, min(test_error_std)], 0.004)
|
expect_lt(cv$evaluation_log[, min(test_error_std)], 0.008)
|
||||||
expect_equal(cv$niter, 2)
|
expect_equal(cv$niter, 2)
|
||||||
expect_false(is.null(cv$folds) && is.list(cv$folds))
|
expect_false(is.null(cv$folds) && is.list(cv$folds))
|
||||||
expect_length(cv$folds, 5)
|
expect_length(cv$folds, 5)
|
||||||
@@ -191,13 +239,27 @@ test_that("xgb.cv works", {
|
|||||||
expect_false(is.null(cv$call))
|
expect_false(is.null(cv$call))
|
||||||
})
|
})
|
||||||
|
|
||||||
|
test_that("xgb.cv works with stratified folds", {
|
||||||
|
dtrain <- xgb.DMatrix(train$data, label = train$label)
|
||||||
|
set.seed(314159)
|
||||||
|
cv <- xgb.cv(data = dtrain, max_depth = 2, nfold = 5,
|
||||||
|
eta = 1., nthread = 2, nrounds = 2, objective = "binary:logistic",
|
||||||
|
verbose=TRUE, stratified = FALSE)
|
||||||
|
set.seed(314159)
|
||||||
|
cv2 <- xgb.cv(data = dtrain, max_depth = 2, nfold = 5,
|
||||||
|
eta = 1., nthread = 2, nrounds = 2, objective = "binary:logistic",
|
||||||
|
verbose=TRUE, stratified = TRUE)
|
||||||
|
# Stratified folds should result in a different evaluation logs
|
||||||
|
expect_true(all(cv$evaluation_log[, test_error_mean] != cv2$evaluation_log[, test_error_mean]))
|
||||||
|
})
|
||||||
|
|
||||||
test_that("train and predict with non-strict classes", {
|
test_that("train and predict with non-strict classes", {
|
||||||
# standard dense matrix input
|
# standard dense matrix input
|
||||||
train_dense <- as.matrix(train$data)
|
train_dense <- as.matrix(train$data)
|
||||||
bst <- xgboost(data = train_dense, label = train$label, max_depth = 2,
|
bst <- xgboost(data = train_dense, label = train$label, max_depth = 2,
|
||||||
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", verbose = 0)
|
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", verbose = 0)
|
||||||
pr0 <- predict(bst, train_dense)
|
pr0 <- predict(bst, train_dense)
|
||||||
|
|
||||||
# dense matrix-like input of non-matrix class
|
# dense matrix-like input of non-matrix class
|
||||||
class(train_dense) <- 'shmatrix'
|
class(train_dense) <- 'shmatrix'
|
||||||
expect_true(is.matrix(train_dense))
|
expect_true(is.matrix(train_dense))
|
||||||
@@ -207,7 +269,7 @@ test_that("train and predict with non-strict classes", {
|
|||||||
, regexp = NA)
|
, regexp = NA)
|
||||||
expect_error(pr <- predict(bst, train_dense), regexp = NA)
|
expect_error(pr <- predict(bst, train_dense), regexp = NA)
|
||||||
expect_equal(pr0, pr)
|
expect_equal(pr0, pr)
|
||||||
|
|
||||||
# dense matrix-like input of non-matrix class with some inheritance
|
# dense matrix-like input of non-matrix class with some inheritance
|
||||||
class(train_dense) <- c('pphmatrix','shmatrix')
|
class(train_dense) <- c('pphmatrix','shmatrix')
|
||||||
expect_true(is.matrix(train_dense))
|
expect_true(is.matrix(train_dense))
|
||||||
@@ -217,7 +279,7 @@ test_that("train and predict with non-strict classes", {
|
|||||||
, regexp = NA)
|
, regexp = NA)
|
||||||
expect_error(pr <- predict(bst, train_dense), regexp = NA)
|
expect_error(pr <- predict(bst, train_dense), regexp = NA)
|
||||||
expect_equal(pr0, pr)
|
expect_equal(pr0, pr)
|
||||||
|
|
||||||
# when someone inhertis from xgb.Booster, it should still be possible to use it as xgb.Booster
|
# when someone inhertis from xgb.Booster, it should still be possible to use it as xgb.Booster
|
||||||
class(bst) <- c('super.Booster', 'xgb.Booster')
|
class(bst) <- c('super.Booster', 'xgb.Booster')
|
||||||
expect_error(pr <- predict(bst, train_dense), regexp = NA)
|
expect_error(pr <- predict(bst, train_dense), regexp = NA)
|
||||||
|
|||||||
@@ -236,7 +236,7 @@ test_that("early stopping using a specific metric works", {
|
|||||||
expect_equal(length(pred), 1611)
|
expect_equal(length(pred), 1611)
|
||||||
logloss_pred <- sum(-ltest * log(pred) - (1 - ltest) * log(1 - pred)) / length(ltest)
|
logloss_pred <- sum(-ltest * log(pred) - (1 - ltest) * log(1 - pred)) / length(ltest)
|
||||||
logloss_log <- bst$evaluation_log[bst$best_iteration, test_logloss]
|
logloss_log <- bst$evaluation_log[bst$best_iteration, test_logloss]
|
||||||
expect_equal(logloss_log, logloss_pred, tolerance = 5e-6)
|
expect_equal(logloss_log, logloss_pred, tolerance = 1e-5)
|
||||||
})
|
})
|
||||||
|
|
||||||
test_that("early stopping xgb.cv works", {
|
test_that("early stopping xgb.cv works", {
|
||||||
@@ -282,10 +282,11 @@ test_that("prediction in xgb.cv works for gblinear too", {
|
|||||||
})
|
})
|
||||||
|
|
||||||
test_that("prediction in early-stopping xgb.cv works", {
|
test_that("prediction in early-stopping xgb.cv works", {
|
||||||
set.seed(1)
|
set.seed(11)
|
||||||
expect_output(
|
expect_output(
|
||||||
cv <- xgb.cv(param, dtrain, nfold = 5, eta = 0.1, nrounds = 20,
|
cv <- xgb.cv(param, dtrain, nfold = 5, eta = 0.1, nrounds = 20,
|
||||||
early_stopping_rounds = 5, maximize = FALSE, prediction = TRUE)
|
early_stopping_rounds = 5, maximize = FALSE, stratified = FALSE,
|
||||||
|
prediction = TRUE)
|
||||||
, "Stopping. Best iteration")
|
, "Stopping. Best iteration")
|
||||||
|
|
||||||
expect_false(is.null(cv$best_iteration))
|
expect_false(is.null(cv$best_iteration))
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ num_round <- 2
|
|||||||
test_that("custom objective works", {
|
test_that("custom objective works", {
|
||||||
bst <- xgb.train(param, dtrain, num_round, watchlist)
|
bst <- xgb.train(param, dtrain, num_round, watchlist)
|
||||||
expect_equal(class(bst), "xgb.Booster")
|
expect_equal(class(bst), "xgb.Booster")
|
||||||
expect_equal(length(bst$raw), 1094)
|
expect_equal(length(bst$raw), 1100)
|
||||||
expect_false(is.null(bst$evaluation_log))
|
expect_false(is.null(bst$evaluation_log))
|
||||||
expect_false(is.null(bst$evaluation_log$eval_error))
|
expect_false(is.null(bst$evaluation_log$eval_error))
|
||||||
expect_lt(bst$evaluation_log[num_round, eval_error], 0.03)
|
expect_lt(bst$evaluation_log[num_round, eval_error], 0.03)
|
||||||
@@ -45,7 +45,7 @@ test_that("custom objective in CV works", {
|
|||||||
})
|
})
|
||||||
|
|
||||||
test_that("custom objective using DMatrix attr works", {
|
test_that("custom objective using DMatrix attr works", {
|
||||||
|
|
||||||
attr(dtrain, 'label') <- getinfo(dtrain, 'label')
|
attr(dtrain, 'label') <- getinfo(dtrain, 'label')
|
||||||
|
|
||||||
logregobjattr <- function(preds, dtrain) {
|
logregobjattr <- function(preds, dtrain) {
|
||||||
@@ -58,5 +58,5 @@ test_that("custom objective using DMatrix attr works", {
|
|||||||
param$objective = logregobjattr
|
param$objective = logregobjattr
|
||||||
bst <- xgb.train(param, dtrain, num_round, watchlist)
|
bst <- xgb.train(param, dtrain, num_round, watchlist)
|
||||||
expect_equal(class(bst), "xgb.Booster")
|
expect_equal(class(bst), "xgb.Booster")
|
||||||
expect_equal(length(bst$raw), 1094)
|
expect_equal(length(bst$raw), 1100)
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -10,12 +10,12 @@ test_label <- agaricus.test$label[1:100]
|
|||||||
test_that("xgb.DMatrix: basic construction", {
|
test_that("xgb.DMatrix: basic construction", {
|
||||||
# from sparse matrix
|
# from sparse matrix
|
||||||
dtest1 <- xgb.DMatrix(test_data, label=test_label)
|
dtest1 <- xgb.DMatrix(test_data, label=test_label)
|
||||||
|
|
||||||
# from dense matrix
|
# from dense matrix
|
||||||
dtest2 <- xgb.DMatrix(as.matrix(test_data), label=test_label)
|
dtest2 <- xgb.DMatrix(as.matrix(test_data), label=test_label)
|
||||||
expect_equal(getinfo(dtest1, 'label'), getinfo(dtest2, 'label'))
|
expect_equal(getinfo(dtest1, 'label'), getinfo(dtest2, 'label'))
|
||||||
expect_equal(dim(dtest1), dim(dtest2))
|
expect_equal(dim(dtest1), dim(dtest2))
|
||||||
|
|
||||||
#from dense integer matrix
|
#from dense integer matrix
|
||||||
int_data <- as.matrix(test_data)
|
int_data <- as.matrix(test_data)
|
||||||
storage.mode(int_data) <- "integer"
|
storage.mode(int_data) <- "integer"
|
||||||
@@ -33,7 +33,7 @@ test_that("xgb.DMatrix: saving, loading", {
|
|||||||
expect_output(dtest3 <- xgb.DMatrix(tmp_file, silent = TRUE), NA)
|
expect_output(dtest3 <- xgb.DMatrix(tmp_file, silent = TRUE), NA)
|
||||||
unlink(tmp_file)
|
unlink(tmp_file)
|
||||||
expect_equal(getinfo(dtest1, 'label'), getinfo(dtest3, 'label'))
|
expect_equal(getinfo(dtest1, 'label'), getinfo(dtest3, 'label'))
|
||||||
|
|
||||||
# from a libsvm text file
|
# from a libsvm text file
|
||||||
tmp <- c("0 1:1 2:1","1 3:1","0 1:1")
|
tmp <- c("0 1:1 2:1","1 3:1","0 1:1")
|
||||||
tmp_file <- 'tmp.libsvm'
|
tmp_file <- 'tmp.libsvm'
|
||||||
@@ -49,7 +49,7 @@ test_that("xgb.DMatrix: getinfo & setinfo", {
|
|||||||
expect_true(setinfo(dtest, 'label', test_label))
|
expect_true(setinfo(dtest, 'label', test_label))
|
||||||
labels <- getinfo(dtest, 'label')
|
labels <- getinfo(dtest, 'label')
|
||||||
expect_equal(test_label, getinfo(dtest, 'label'))
|
expect_equal(test_label, getinfo(dtest, 'label'))
|
||||||
|
|
||||||
expect_true(length(getinfo(dtest, 'weight')) == 0)
|
expect_true(length(getinfo(dtest, 'weight')) == 0)
|
||||||
expect_true(length(getinfo(dtest, 'base_margin')) == 0)
|
expect_true(length(getinfo(dtest, 'base_margin')) == 0)
|
||||||
|
|
||||||
@@ -57,10 +57,10 @@ test_that("xgb.DMatrix: getinfo & setinfo", {
|
|||||||
expect_true(setinfo(dtest, 'base_margin', test_label))
|
expect_true(setinfo(dtest, 'base_margin', test_label))
|
||||||
expect_true(setinfo(dtest, 'group', c(50,50)))
|
expect_true(setinfo(dtest, 'group', c(50,50)))
|
||||||
expect_error(setinfo(dtest, 'group', test_label))
|
expect_error(setinfo(dtest, 'group', test_label))
|
||||||
|
|
||||||
# providing character values will give a warning
|
# providing character values will give a warning
|
||||||
expect_warning( setinfo(dtest, 'weight', rep('a', nrow(test_data))) )
|
expect_warning( setinfo(dtest, 'weight', rep('a', nrow(test_data))) )
|
||||||
|
|
||||||
# any other label should error
|
# any other label should error
|
||||||
expect_error(setinfo(dtest, 'asdf', test_label))
|
expect_error(setinfo(dtest, 'asdf', test_label))
|
||||||
})
|
})
|
||||||
@@ -71,7 +71,7 @@ test_that("xgb.DMatrix: slice, dim", {
|
|||||||
dsub1 <- slice(dtest, 1:42)
|
dsub1 <- slice(dtest, 1:42)
|
||||||
expect_equal(nrow(dsub1), 42)
|
expect_equal(nrow(dsub1), 42)
|
||||||
expect_equal(ncol(dsub1), ncol(test_data))
|
expect_equal(ncol(dsub1), ncol(test_data))
|
||||||
|
|
||||||
dsub2 <- dtest[1:42,]
|
dsub2 <- dtest[1:42,]
|
||||||
expect_equal(dim(dtest), dim(test_data))
|
expect_equal(dim(dtest), dim(test_data))
|
||||||
expect_equal(getinfo(dsub1, 'label'), getinfo(dsub2, 'label'))
|
expect_equal(getinfo(dsub1, 'label'), getinfo(dsub2, 'label'))
|
||||||
|
|||||||
@@ -142,6 +142,44 @@ test_that("predict feature contributions works", {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
test_that("SHAPs sum to predictions, with or without DART", {
|
||||||
|
d <- cbind(
|
||||||
|
x1 = rnorm(100),
|
||||||
|
x2 = rnorm(100),
|
||||||
|
x3 = rnorm(100))
|
||||||
|
y <- d[,"x1"] + d[,"x2"]^2 +
|
||||||
|
ifelse(d[,"x3"] > .5, d[,"x3"]^2, 2^d[,"x3"]) +
|
||||||
|
rnorm(100)
|
||||||
|
nrounds <- 30
|
||||||
|
|
||||||
|
for (booster in list("gbtree", "dart")) {
|
||||||
|
fit <- xgboost(
|
||||||
|
params = c(
|
||||||
|
list(
|
||||||
|
booster = booster,
|
||||||
|
objective = "reg:squarederror",
|
||||||
|
eval_metric = "rmse"),
|
||||||
|
if (booster == "dart")
|
||||||
|
list(rate_drop = .01, one_drop = T)),
|
||||||
|
data = d,
|
||||||
|
label = y,
|
||||||
|
nrounds = nrounds)
|
||||||
|
|
||||||
|
pr <- function(...)
|
||||||
|
predict(fit, newdata = d, ...)
|
||||||
|
pred <- pr()
|
||||||
|
shap <- pr(predcontrib = T)
|
||||||
|
shapi <- pr(predinteraction = T)
|
||||||
|
tol = 1e-5
|
||||||
|
|
||||||
|
expect_equal(rowSums(shap), pred, tol = tol)
|
||||||
|
expect_equal(apply(shapi, 1, sum), pred, tol = tol)
|
||||||
|
for (i in 1 : nrow(d))
|
||||||
|
for (f in list(rowSums, colSums))
|
||||||
|
expect_equal(f(shapi[i,,]), shap[i,], tol = tol)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
test_that("xgb-attribute functionality", {
|
test_that("xgb-attribute functionality", {
|
||||||
val <- "my attribute value"
|
val <- "my attribute value"
|
||||||
list.val <- list(my_attr=val, a=123, b='ok')
|
list.val <- list(my_attr=val, a=123, b='ok')
|
||||||
@@ -163,6 +201,7 @@ test_that("xgb-attribute functionality", {
|
|||||||
# serializing:
|
# serializing:
|
||||||
xgb.save(bst.Tree, 'xgb.model')
|
xgb.save(bst.Tree, 'xgb.model')
|
||||||
bst <- xgb.load('xgb.model')
|
bst <- xgb.load('xgb.model')
|
||||||
|
if (file.exists('xgb.model')) file.remove('xgb.model')
|
||||||
expect_equal(xgb.attr(bst, "my_attr"), val)
|
expect_equal(xgb.attr(bst, "my_attr"), val)
|
||||||
expect_equal(xgb.attributes(bst), list.ch)
|
expect_equal(xgb.attributes(bst), list.ch)
|
||||||
# deletion:
|
# deletion:
|
||||||
@@ -199,10 +238,12 @@ if (grepl('Windows', Sys.info()[['sysname']]) ||
|
|||||||
test_that("xgb.Booster serializing as R object works", {
|
test_that("xgb.Booster serializing as R object works", {
|
||||||
saveRDS(bst.Tree, 'xgb.model.rds')
|
saveRDS(bst.Tree, 'xgb.model.rds')
|
||||||
bst <- readRDS('xgb.model.rds')
|
bst <- readRDS('xgb.model.rds')
|
||||||
|
if (file.exists('xgb.model.rds')) file.remove('xgb.model.rds')
|
||||||
dtrain <- xgb.DMatrix(sparse_matrix, label = label)
|
dtrain <- xgb.DMatrix(sparse_matrix, label = label)
|
||||||
expect_equal(predict(bst.Tree, dtrain), predict(bst, dtrain), tolerance = float_tolerance)
|
expect_equal(predict(bst.Tree, dtrain), predict(bst, dtrain), tolerance = float_tolerance)
|
||||||
expect_equal(xgb.dump(bst.Tree), xgb.dump(bst))
|
expect_equal(xgb.dump(bst.Tree), xgb.dump(bst))
|
||||||
xgb.save(bst, 'xgb.model')
|
xgb.save(bst, 'xgb.model')
|
||||||
|
if (file.exists('xgb.model')) file.remove('xgb.model')
|
||||||
nil_ptr <- new("externalptr")
|
nil_ptr <- new("externalptr")
|
||||||
class(nil_ptr) <- "xgb.Booster.handle"
|
class(nil_ptr) <- "xgb.Booster.handle"
|
||||||
expect_true(identical(bst$handle, nil_ptr))
|
expect_true(identical(bst$handle, nil_ptr))
|
||||||
|
|||||||
@@ -81,6 +81,39 @@ test_that("predict feature interactions works", {
|
|||||||
expect_lt(max(abs(intr - gt_intr)), 0.1)
|
expect_lt(max(abs(intr - gt_intr)), 0.1)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
test_that("SHAP contribution values are not NAN", {
|
||||||
|
d <- data.frame(
|
||||||
|
x1 = c(-2.3, 1.4, 5.9, 2, 2.5, 0.3, -3.6, -0.2, 0.5, -2.8, -4.6, 3.3, -1.2,
|
||||||
|
-1.1, -2.3, 0.4, -1.5, -0.2, -1, 3.7),
|
||||||
|
x2 = c(291.179171, 269.198331, 289.942097, 283.191669, 269.673332,
|
||||||
|
294.158346, 287.255835, 291.530838, 285.899586, 269.290833,
|
||||||
|
268.649586, 291.530841, 280.074593, 269.484168, 293.94042,
|
||||||
|
294.327506, 296.20709, 295.441669, 283.16792, 270.227085),
|
||||||
|
y = c(9, 15, 5.7, 9.2, 22.4, 5, 9, 3.2, 7.2, 13.1, 7.8, 16.9, 6.5, 22.1,
|
||||||
|
5.3, 10.4, 11.1, 13.9, 11, 20.5),
|
||||||
|
fold = c(2, 2, 2, 1, 2, 2, 1, 2, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2))
|
||||||
|
|
||||||
|
ivs <- c("x1", "x2")
|
||||||
|
|
||||||
|
fit <- xgboost(
|
||||||
|
verbose = 0,
|
||||||
|
params = list(
|
||||||
|
objective = "reg:squarederror",
|
||||||
|
eval_metric = "rmse"),
|
||||||
|
data = as.matrix(subset(d, fold == 2)[, ivs]),
|
||||||
|
label = subset(d, fold == 2)$y,
|
||||||
|
nthread = 1,
|
||||||
|
nrounds = 3)
|
||||||
|
|
||||||
|
shaps <- as.data.frame(predict(fit,
|
||||||
|
newdata = as.matrix(subset(d, fold == 1)[, ivs]),
|
||||||
|
predcontrib = T))
|
||||||
|
result <- cbind(shaps, sum = rowSums(shaps), pred = predict(fit,
|
||||||
|
newdata = as.matrix(subset(d, fold == 1)[, ivs])))
|
||||||
|
|
||||||
|
expect_true(identical(TRUE, all.equal(result$sum, result$pred, tol = 1e-6)))
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
test_that("multiclass feature interactions work", {
|
test_that("multiclass feature interactions work", {
|
||||||
dm <- xgb.DMatrix(as.matrix(iris[,-5]), label=as.numeric(iris$Species)-1)
|
dm <- xgb.DMatrix(as.matrix(iris[,-5]), label=as.numeric(iris$Species)-1)
|
||||||
|
|||||||
@@ -138,7 +138,7 @@ levels(df[,Treatment])
|
|||||||
|
|
||||||
Next step, we will transform the categorical data to dummy variables.
|
Next step, we will transform the categorical data to dummy variables.
|
||||||
Several encoding methods exist, e.g., [one-hot encoding](http://en.wikipedia.org/wiki/One-hot) is a common approach.
|
Several encoding methods exist, e.g., [one-hot encoding](http://en.wikipedia.org/wiki/One-hot) is a common approach.
|
||||||
We will use the [dummy contrast coding](http://www.ats.ucla.edu/stat/r/library/contrast_coding.htm#dummy) which is popular because it producess "full rank" encoding (also see [this blog post by Max Kuhn](http://appliedpredictivemodeling.com/blog/2013/10/23/the-basics-of-encoding-categorical-data-for-predictive-models)).
|
We will use the [dummy contrast coding](http://www.ats.ucla.edu/stat/r/library/contrast_coding.htm#dummy) which is popular because it produces "full rank" encoding (also see [this blog post by Max Kuhn](http://appliedpredictivemodeling.com/blog/2013/10/23/the-basics-of-encoding-categorical-data-for-predictive-models)).
|
||||||
|
|
||||||
The purpose is to transform each value of each *categorical* feature into a *binary* feature `{0, 1}`.
|
The purpose is to transform each value of each *categorical* feature into a *binary* feature `{0, 1}`.
|
||||||
|
|
||||||
@@ -268,7 +268,7 @@ c2 <- chisq.test(df$Age, output_vector)
|
|||||||
print(c2)
|
print(c2)
|
||||||
```
|
```
|
||||||
|
|
||||||
Pearson correlation between Age and illness disapearing is **`r round(c2$statistic, 2 )`**.
|
Pearson correlation between Age and illness disappearing is **`r round(c2$statistic, 2 )`**.
|
||||||
|
|
||||||
```{r, warning=FALSE, message=FALSE}
|
```{r, warning=FALSE, message=FALSE}
|
||||||
c2 <- chisq.test(df$AgeDiscret, output_vector)
|
c2 <- chisq.test(df$AgeDiscret, output_vector)
|
||||||
|
|||||||
@@ -313,7 +313,7 @@ Until now, all the learnings we have performed were based on boosting trees. **X
|
|||||||
bst <- xgb.train(data=dtrain, booster = "gblinear", max_depth=2, nthread = 2, nrounds=2, watchlist=watchlist, eval_metric = "error", eval_metric = "logloss", objective = "binary:logistic")
|
bst <- xgb.train(data=dtrain, booster = "gblinear", max_depth=2, nthread = 2, nrounds=2, watchlist=watchlist, eval_metric = "error", eval_metric = "logloss", objective = "binary:logistic")
|
||||||
```
|
```
|
||||||
|
|
||||||
In this specific case, *linear boosting* gets sligtly better performance metrics than decision trees based algorithm.
|
In this specific case, *linear boosting* gets slightly better performance metrics than decision trees based algorithm.
|
||||||
|
|
||||||
In simple cases, it will happen because there is nothing better than a linear algorithm to catch a linear link. However, decision trees are much better to catch a non linear link between predictors and outcome. Because there is no silver bullet, we advise you to check both algorithms with your own datasets to have an idea of what to use.
|
In simple cases, it will happen because there is nothing better than a linear algorithm to catch a linear link. However, decision trees are much better to catch a non linear link between predictors and outcome. Because there is no silver bullet, we advise you to check both algorithms with your own datasets to have an idea of what to use.
|
||||||
|
|
||||||
|
|||||||
189
R-package/vignettes/xgboostfromJSON.Rmd
Normal file
189
R-package/vignettes/xgboostfromJSON.Rmd
Normal file
@@ -0,0 +1,189 @@
|
|||||||
|
---
|
||||||
|
title: "XGBoost from JSON"
|
||||||
|
output:
|
||||||
|
rmarkdown::html_vignette:
|
||||||
|
number_sections: yes
|
||||||
|
toc: yes
|
||||||
|
author: Roland Stevenson
|
||||||
|
vignette: >
|
||||||
|
%\VignetteIndexEntry{XGBoost from JSON}
|
||||||
|
%\VignetteEngine{knitr::rmarkdown}
|
||||||
|
\usepackage[utf8]{inputenc}
|
||||||
|
---
|
||||||
|
|
||||||
|
XGBoost from JSON
|
||||||
|
=================
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
The purpose of this Vignette is to show you how to correctly load and work with an **Xgboost** model that has been dumped to JSON. **Xgboost** internally converts all data to [32-bit floats](https://en.wikipedia.org/wiki/Single-precision_floating-point_format), and the values dumped to JSON are decimal representations of these values. When working with a model that has been parsed from a JSON file, care must be taken to correctly treat:
|
||||||
|
|
||||||
|
- the input data, which should be converted to 32-bit floats
|
||||||
|
- any 32-bit floats that were stored in JSON as decimal representations
|
||||||
|
- any calculations must be done with 32-bit mathematical operators
|
||||||
|
|
||||||
|
## Setup
|
||||||
|
|
||||||
|
For the purpose of this tutorial we will load the xgboost, jsonlite, and float packages. We'll also set `digits=22` in our options in case we want to inspect many digits of our results.
|
||||||
|
|
||||||
|
```{r}
|
||||||
|
require(xgboost)
|
||||||
|
require(jsonlite)
|
||||||
|
require(float)
|
||||||
|
options(digits=22)
|
||||||
|
```
|
||||||
|
|
||||||
|
We will create a toy binary logistic model based on the example first provided [here](https://github.com/dmlc/xgboost/issues/3960), so that we can easily understand the structure of the dumped JSON model object. This will allow us to understand where discrepancies can occur and how they should be handled.
|
||||||
|
|
||||||
|
```{r}
|
||||||
|
dates <- c(20180130, 20180130, 20180130,
|
||||||
|
20180130, 20180130, 20180130,
|
||||||
|
20180131, 20180131, 20180131,
|
||||||
|
20180131, 20180131, 20180131,
|
||||||
|
20180131, 20180131, 20180131,
|
||||||
|
20180134, 20180134, 20180134)
|
||||||
|
|
||||||
|
labels <- c(1, 1, 1,
|
||||||
|
1, 1, 1,
|
||||||
|
0, 0, 0,
|
||||||
|
0, 0, 0,
|
||||||
|
0, 0, 0,
|
||||||
|
0, 0, 0)
|
||||||
|
|
||||||
|
data <- data.frame(dates = dates, labels=labels)
|
||||||
|
|
||||||
|
bst <- xgboost(
|
||||||
|
data = as.matrix(data$dates),
|
||||||
|
label = labels,
|
||||||
|
nthread = 2,
|
||||||
|
nrounds = 1,
|
||||||
|
objective = "binary:logistic",
|
||||||
|
missing = NA,
|
||||||
|
max_depth = 1
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Comparing results
|
||||||
|
We will now dump the model to JSON and attempt to illustrate a variety of issues that can arise, and how to properly deal with them.
|
||||||
|
|
||||||
|
First let's dump the model to JSON:
|
||||||
|
|
||||||
|
```{r}
|
||||||
|
bst_json <- xgb.dump(bst, with_stats = FALSE, dump_format='json')
|
||||||
|
bst_from_json <- fromJSON(bst_json, simplifyDataFrame = FALSE)
|
||||||
|
node <- bst_from_json[[1]]
|
||||||
|
cat(bst_json)
|
||||||
|
```
|
||||||
|
|
||||||
|
The tree JSON shown by the above code-chunk tells us that if the data is less than 20180132, the tree will output the value in the first leaf. Otherwise it will output the value in the second leaf. Let's try to reproduce this manually with the data we have and confirm that it matches the model predictions we've already calculated.
|
||||||
|
|
||||||
|
```{r}
|
||||||
|
bst_preds_logodds <- predict(bst,as.matrix(data$dates), outputmargin = TRUE)
|
||||||
|
|
||||||
|
# calculate the logodds values using the JSON representation
|
||||||
|
bst_from_json_logodds <- ifelse(data$dates<node$split_condition,
|
||||||
|
node$children[[1]]$leaf,
|
||||||
|
node$children[[2]]$leaf)
|
||||||
|
|
||||||
|
bst_preds_logodds
|
||||||
|
bst_from_json_logodds
|
||||||
|
|
||||||
|
# test that values are equal
|
||||||
|
bst_preds_logodds == bst_from_json_logodds
|
||||||
|
|
||||||
|
```
|
||||||
|
None are equal. What happened?
|
||||||
|
|
||||||
|
At this stage two things happened:
|
||||||
|
|
||||||
|
- input data was not converted to 32-bit floats
|
||||||
|
- the JSON variables were not converted to 32-bit floats
|
||||||
|
|
||||||
|
### Lesson 1: All data is 32-bit floats
|
||||||
|
|
||||||
|
> When working with imported JSON, all data must be converted to 32-bit floats
|
||||||
|
|
||||||
|
To explain this, let's repeat the comparison and round to two decimals:
|
||||||
|
|
||||||
|
```{r}
|
||||||
|
round(bst_preds_logodds,2) == round(bst_from_json_logodds,2)
|
||||||
|
```
|
||||||
|
|
||||||
|
If we round to two decimals, we see that only the elements related to data values of `20180131` don't agree. If we convert the data to floats, they agree:
|
||||||
|
|
||||||
|
```{r}
|
||||||
|
# now convert the dates to floats first
|
||||||
|
bst_from_json_logodds <- ifelse(fl(data$dates)<node$split_condition,
|
||||||
|
node$children[[1]]$leaf,
|
||||||
|
node$children[[2]]$leaf)
|
||||||
|
|
||||||
|
# test that values are equal
|
||||||
|
round(bst_preds_logodds,2) == round(bst_from_json_logodds,2)
|
||||||
|
```
|
||||||
|
|
||||||
|
What's the lesson? If we are going to work with an imported JSON model, any data must be converted to floats first. In this case, since '20180131' cannot be represented as a 32-bit float, it is rounded up to 20180132, as shown here:
|
||||||
|
|
||||||
|
```{r}
|
||||||
|
fl(20180131)
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### Lesson 2: JSON parameters are 32-bit floats
|
||||||
|
|
||||||
|
> All JSON parameters stored as floats must be converted to floats.
|
||||||
|
|
||||||
|
Let's now say we do care about numbers past the first two decimals.
|
||||||
|
|
||||||
|
```{r}
|
||||||
|
# test that values are equal
|
||||||
|
bst_preds_logodds == bst_from_json_logodds
|
||||||
|
```
|
||||||
|
|
||||||
|
None are exactly equal. What happened? Although we've converted the data to 32-bit floats, we also need to convert the JSON parameters to 32-bit floats. Let's do this:
|
||||||
|
|
||||||
|
```{r}
|
||||||
|
# now convert the dates to floats first
|
||||||
|
bst_from_json_logodds <- ifelse(fl(data$dates)<fl(node$split_condition),
|
||||||
|
as.numeric(fl(node$children[[1]]$leaf)),
|
||||||
|
as.numeric(fl(node$children[[2]]$leaf)))
|
||||||
|
|
||||||
|
# test that values are equal
|
||||||
|
bst_preds_logodds == bst_from_json_logodds
|
||||||
|
```
|
||||||
|
All equal. What's the lesson? If we are going to work with an imported JSON model, any JSON parameters that were stored as floats must also be converted to floats first.
|
||||||
|
|
||||||
|
### Lesson 3: Use 32-bit math
|
||||||
|
|
||||||
|
> Always use 32-bit numbers and operators
|
||||||
|
|
||||||
|
We were able to get the log-odds to agree, so now let's manually calculate the sigmoid of the log-odds. This should agree with the xgboost predictions.
|
||||||
|
|
||||||
|
|
||||||
|
```{r}
|
||||||
|
bst_preds <- predict(bst,as.matrix(data$dates))
|
||||||
|
|
||||||
|
# calculate the predictions casting doubles to floats
|
||||||
|
bst_from_json_preds <- ifelse(fl(data$dates)<fl(node$split_condition),
|
||||||
|
as.numeric(1/(1+exp(-1*fl(node$children[[1]]$leaf)))),
|
||||||
|
as.numeric(1/(1+exp(-1*fl(node$children[[2]]$leaf))))
|
||||||
|
)
|
||||||
|
|
||||||
|
# test that values are equal
|
||||||
|
bst_preds == bst_from_json_preds
|
||||||
|
```
|
||||||
|
|
||||||
|
None are exactly equal again. What is going on here? Well, since we are using the value `1` in the calcuations, we have introduced a double into the calculation. Because of this, all float values are promoted to 64-bit doubles and the 64-bit version of the exponentiation operator `exp` is also used. On the other hand, xgboost uses the 32-bit version of the exponentation operator in its [sigmoid function](https://github.com/dmlc/xgboost/blob/54980b8959680a0da06a3fc0ec776e47c8cbb0a1/src/common/math.h#L25-L27).
|
||||||
|
|
||||||
|
How do we fix this? We have to ensure we use the correct datatypes everywhere and the correct operators. If we use only floats, the float library that we have loaded will ensure the 32-bit float exponention operator is applied.
|
||||||
|
```{r}
|
||||||
|
# calculate the predictions casting doubles to floats
|
||||||
|
bst_from_json_preds <- ifelse(fl(data$dates)<fl(node$split_condition),
|
||||||
|
as.numeric(fl(1)/(fl(1)+exp(fl(-1)*fl(node$children[[1]]$leaf)))),
|
||||||
|
as.numeric(fl(1)/(fl(1)+exp(fl(-1)*fl(node$children[[2]]$leaf))))
|
||||||
|
)
|
||||||
|
|
||||||
|
# test that values are equal
|
||||||
|
bst_preds == bst_from_json_preds
|
||||||
|
```
|
||||||
|
|
||||||
|
All equal. What's the lesson? We have to ensure that all calculations are done with 32-bit floating point operators if we want to reproduce the results that we see with xgboost.
|
||||||
40
README.md
40
README.md
@@ -1,11 +1,13 @@
|
|||||||
<img src=https://raw.githubusercontent.com/dmlc/dmlc.github.io/master/img/logo-m/xgboost.png width=135/> eXtreme Gradient Boosting
|
<img src=https://raw.githubusercontent.com/dmlc/dmlc.github.io/master/img/logo-m/xgboost.png width=135/> eXtreme Gradient Boosting
|
||||||
===========
|
===========
|
||||||
[](https://travis-ci.org/dmlc/xgboost)
|
[](https://xgboost-ci.net/blue/organizations/jenkins/xgboost/activity)
|
||||||
|
[](https://travis-ci.org/dmlc/xgboost)
|
||||||
[](https://ci.appveyor.com/project/tqchen/xgboost)
|
[](https://ci.appveyor.com/project/tqchen/xgboost)
|
||||||
[](https://xgboost.readthedocs.org)
|
[](https://xgboost.readthedocs.org)
|
||||||
[](./LICENSE)
|
[](./LICENSE)
|
||||||
[](http://cran.r-project.org/web/packages/xgboost)
|
[](http://cran.r-project.org/web/packages/xgboost)
|
||||||
[](https://pypi.python.org/pypi/xgboost/)
|
[](https://pypi.python.org/pypi/xgboost/)
|
||||||
|
[](https://optuna.org)
|
||||||
|
|
||||||
[Community](https://xgboost.ai/community) |
|
[Community](https://xgboost.ai/community) |
|
||||||
[Documentation](https://xgboost.readthedocs.org) |
|
[Documentation](https://xgboost.readthedocs.org) |
|
||||||
@@ -16,11 +18,11 @@
|
|||||||
XGBoost is an optimized distributed gradient boosting library designed to be highly ***efficient***, ***flexible*** and ***portable***.
|
XGBoost is an optimized distributed gradient boosting library designed to be highly ***efficient***, ***flexible*** and ***portable***.
|
||||||
It implements machine learning algorithms under the [Gradient Boosting](https://en.wikipedia.org/wiki/Gradient_boosting) framework.
|
It implements machine learning algorithms under the [Gradient Boosting](https://en.wikipedia.org/wiki/Gradient_boosting) framework.
|
||||||
XGBoost provides a parallel tree boosting (also known as GBDT, GBM) that solve many data science problems in a fast and accurate way.
|
XGBoost provides a parallel tree boosting (also known as GBDT, GBM) that solve many data science problems in a fast and accurate way.
|
||||||
The same code runs on major distributed environment (Hadoop, SGE, MPI) and can solve problems beyond billions of examples.
|
The same code runs on major distributed environment (Kubernetes, Hadoop, SGE, MPI, Dask) and can solve problems beyond billions of examples.
|
||||||
|
|
||||||
License
|
License
|
||||||
-------
|
-------
|
||||||
© Contributors, 2016. Licensed under an [Apache-2](https://github.com/dmlc/xgboost/blob/master/LICENSE) license.
|
© Contributors, 2019. Licensed under an [Apache-2](https://github.com/dmlc/xgboost/blob/master/LICENSE) license.
|
||||||
|
|
||||||
Contribute to XGBoost
|
Contribute to XGBoost
|
||||||
---------------------
|
---------------------
|
||||||
@@ -31,3 +33,35 @@ Reference
|
|||||||
---------
|
---------
|
||||||
- Tianqi Chen and Carlos Guestrin. [XGBoost: A Scalable Tree Boosting System](http://arxiv.org/abs/1603.02754). In 22nd SIGKDD Conference on Knowledge Discovery and Data Mining, 2016
|
- Tianqi Chen and Carlos Guestrin. [XGBoost: A Scalable Tree Boosting System](http://arxiv.org/abs/1603.02754). In 22nd SIGKDD Conference on Knowledge Discovery and Data Mining, 2016
|
||||||
- XGBoost originates from research project at University of Washington.
|
- XGBoost originates from research project at University of Washington.
|
||||||
|
|
||||||
|
Sponsors
|
||||||
|
--------
|
||||||
|
Become a sponsor and get a logo here. See details at [Sponsoring the XGBoost Project](https://xgboost.ai/sponsors). The funds are used to defray the cost of continuous integration and testing infrastructure (https://xgboost-ci.net).
|
||||||
|
|
||||||
|
## Open Source Collective sponsors
|
||||||
|
[](#backers) [](#sponsors)
|
||||||
|
|
||||||
|
### Sponsors
|
||||||
|
[[Become a sponsor](https://opencollective.com/xgboost#sponsor)]
|
||||||
|
|
||||||
|
<!--<a href="https://opencollective.com/xgboost/sponsor/0/website" target="_blank"><img src="https://opencollective.com/xgboost/sponsor/0/avatar.svg"></a>-->
|
||||||
|
<a href="https://www.nvidia.com/en-us/" target="_blank"><img src="https://raw.githubusercontent.com/xgboost-ai/xgboost-ai.github.io/master/images/sponsors/nvidia.jpg" alt="NVIDIA" width="72" height="72"></a>
|
||||||
|
<a href="https://opencollective.com/xgboost/sponsor/1/website" target="_blank"><img src="https://opencollective.com/xgboost/sponsor/1/avatar.svg"></a>
|
||||||
|
<a href="https://opencollective.com/xgboost/sponsor/2/website" target="_blank"><img src="https://opencollective.com/xgboost/sponsor/2/avatar.svg"></a>
|
||||||
|
<a href="https://opencollective.com/xgboost/sponsor/3/website" target="_blank"><img src="https://opencollective.com/xgboost/sponsor/3/avatar.svg"></a>
|
||||||
|
<a href="https://opencollective.com/xgboost/sponsor/4/website" target="_blank"><img src="https://opencollective.com/xgboost/sponsor/4/avatar.svg"></a>
|
||||||
|
<a href="https://opencollective.com/xgboost/sponsor/5/website" target="_blank"><img src="https://opencollective.com/xgboost/sponsor/5/avatar.svg"></a>
|
||||||
|
<a href="https://opencollective.com/xgboost/sponsor/6/website" target="_blank"><img src="https://opencollective.com/xgboost/sponsor/6/avatar.svg"></a>
|
||||||
|
<a href="https://opencollective.com/xgboost/sponsor/7/website" target="_blank"><img src="https://opencollective.com/xgboost/sponsor/7/avatar.svg"></a>
|
||||||
|
<a href="https://opencollective.com/xgboost/sponsor/8/website" target="_blank"><img src="https://opencollective.com/xgboost/sponsor/8/avatar.svg"></a>
|
||||||
|
<a href="https://opencollective.com/xgboost/sponsor/9/website" target="_blank"><img src="https://opencollective.com/xgboost/sponsor/9/avatar.svg"></a>
|
||||||
|
|
||||||
|
### Backers
|
||||||
|
[[Become a backer](https://opencollective.com/xgboost#backer)]
|
||||||
|
|
||||||
|
<a href="https://opencollective.com/xgboost#backers" target="_blank"><img src="https://opencollective.com/xgboost/backers.svg?width=890"></a>
|
||||||
|
|
||||||
|
## Other sponsors
|
||||||
|
The sponsors in this list are donating cloud hours in lieu of cash donation.
|
||||||
|
|
||||||
|
<a href="https://aws.amazon.com/" target="_blank"><img src="https://raw.githubusercontent.com/xgboost-ai/xgboost-ai.github.io/master/images/sponsors/aws.png" alt="Amazon Web Services" width="72" height="72"></a>
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*!
|
/*!
|
||||||
* Copyright 2015 by Contributors.
|
* Copyright 2015-2019 by Contributors.
|
||||||
* \brief XGBoost Amalgamation.
|
* \brief XGBoost Amalgamation.
|
||||||
* This offers an alternative way to compile the entire library from this single file.
|
* This offers an alternative way to compile the entire library from this single file.
|
||||||
*
|
*
|
||||||
@@ -25,35 +25,39 @@
|
|||||||
// gbms
|
// gbms
|
||||||
#include "../src/gbm/gbm.cc"
|
#include "../src/gbm/gbm.cc"
|
||||||
#include "../src/gbm/gbtree.cc"
|
#include "../src/gbm/gbtree.cc"
|
||||||
|
#include "../src/gbm/gbtree_model.cc"
|
||||||
#include "../src/gbm/gblinear.cc"
|
#include "../src/gbm/gblinear.cc"
|
||||||
|
#include "../src/gbm/gblinear_model.cc"
|
||||||
|
|
||||||
// data
|
// data
|
||||||
#include "../src/data/data.cc"
|
#include "../src/data/data.cc"
|
||||||
#include "../src/data/simple_csr_source.cc"
|
#include "../src/data/simple_csr_source.cc"
|
||||||
#include "../src/data/simple_dmatrix.cc"
|
#include "../src/data/simple_dmatrix.cc"
|
||||||
#include "../src/data/sparse_page_raw_format.cc"
|
#include "../src/data/sparse_page_raw_format.cc"
|
||||||
|
#include "../src/data/ellpack_page.cc"
|
||||||
|
#include "../src/data/ellpack_page_source.cc"
|
||||||
|
|
||||||
// prediction
|
// prediction
|
||||||
#include "../src/predictor/predictor.cc"
|
#include "../src/predictor/predictor.cc"
|
||||||
#include "../src/predictor/cpu_predictor.cc"
|
#include "../src/predictor/cpu_predictor.cc"
|
||||||
|
|
||||||
#if DMLC_ENABLE_STD_THREAD
|
#if DMLC_ENABLE_STD_THREAD
|
||||||
#include "../src/data/sparse_page_source.cc"
|
|
||||||
#include "../src/data/sparse_page_dmatrix.cc"
|
#include "../src/data/sparse_page_dmatrix.cc"
|
||||||
#include "../src/data/sparse_page_writer.cc"
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// tress
|
// tress
|
||||||
|
#include "../src/tree/param.cc"
|
||||||
#include "../src/tree/split_evaluator.cc"
|
#include "../src/tree/split_evaluator.cc"
|
||||||
#include "../src/tree/tree_model.cc"
|
#include "../src/tree/tree_model.cc"
|
||||||
#include "../src/tree/tree_updater.cc"
|
#include "../src/tree/tree_updater.cc"
|
||||||
#include "../src/tree/updater_colmaker.cc"
|
#include "../src/tree/updater_colmaker.cc"
|
||||||
#include "../src/tree/updater_fast_hist.cc"
|
#include "../src/tree/updater_quantile_hist.cc"
|
||||||
#include "../src/tree/updater_prune.cc"
|
#include "../src/tree/updater_prune.cc"
|
||||||
#include "../src/tree/updater_refresh.cc"
|
#include "../src/tree/updater_refresh.cc"
|
||||||
#include "../src/tree/updater_sync.cc"
|
#include "../src/tree/updater_sync.cc"
|
||||||
#include "../src/tree/updater_histmaker.cc"
|
#include "../src/tree/updater_histmaker.cc"
|
||||||
#include "../src/tree/updater_skmaker.cc"
|
#include "../src/tree/updater_skmaker.cc"
|
||||||
|
#include "../src/tree/constraints.cc"
|
||||||
|
|
||||||
// linear
|
// linear
|
||||||
#include "../src/linear/linear_updater.cc"
|
#include "../src/linear/linear_updater.cc"
|
||||||
@@ -64,8 +68,12 @@
|
|||||||
#include "../src/learner.cc"
|
#include "../src/learner.cc"
|
||||||
#include "../src/logging.cc"
|
#include "../src/logging.cc"
|
||||||
#include "../src/common/common.cc"
|
#include "../src/common/common.cc"
|
||||||
|
#include "../src/common/timer.cc"
|
||||||
#include "../src/common/host_device_vector.cc"
|
#include "../src/common/host_device_vector.cc"
|
||||||
#include "../src/common/hist_util.cc"
|
#include "../src/common/hist_util.cc"
|
||||||
|
#include "../src/common/json.cc"
|
||||||
|
#include "../src/common/io.cc"
|
||||||
|
#include "../src/common/version.cc"
|
||||||
|
|
||||||
// c_api
|
// c_api
|
||||||
#include "../src/c_api/c_api.cc"
|
#include "../src/c_api/c_api.cc"
|
||||||
|
|||||||
27
appveyor.yml
27
appveyor.yml
@@ -2,10 +2,6 @@ environment:
|
|||||||
R_ARCH: x64
|
R_ARCH: x64
|
||||||
USE_RTOOLS: true
|
USE_RTOOLS: true
|
||||||
matrix:
|
matrix:
|
||||||
- target: msvc
|
|
||||||
ver: 2013
|
|
||||||
generator: "Visual Studio 12 2013 Win64"
|
|
||||||
configuration: Release
|
|
||||||
- target: msvc
|
- target: msvc
|
||||||
ver: 2015
|
ver: 2015
|
||||||
generator: "Visual Studio 14 2015 Win64"
|
generator: "Visual Studio 14 2015 Win64"
|
||||||
@@ -36,26 +32,32 @@ install:
|
|||||||
- set PATH=C:\msys64\mingw64\bin;C:\msys64\usr\bin;%PATH%
|
- set PATH=C:\msys64\mingw64\bin;C:\msys64\usr\bin;%PATH%
|
||||||
- gcc -v
|
- gcc -v
|
||||||
- ls -l C:\
|
- ls -l C:\
|
||||||
# Miniconda2
|
# Miniconda3
|
||||||
- set PATH=;C:\Miniconda-x64;C:\Miniconda-x64\Scripts;%PATH%
|
- call C:\Miniconda3-x64\Scripts\activate.bat
|
||||||
|
- conda info
|
||||||
- where python
|
- where python
|
||||||
- python --version
|
- python --version
|
||||||
# do python build for mingw and one of the msvc jobs
|
# do python build for mingw and one of the msvc jobs
|
||||||
- set DO_PYTHON=off
|
- set DO_PYTHON=off
|
||||||
- if /i "%target%" == "mingw" set DO_PYTHON=on
|
- if /i "%target%" == "mingw" set DO_PYTHON=on
|
||||||
- if /i "%target%_%ver%_%configuration%" == "msvc_2015_Release" set DO_PYTHON=on
|
- if /i "%target%_%ver%_%configuration%" == "msvc_2015_Release" set DO_PYTHON=on
|
||||||
- if /i "%DO_PYTHON%" == "on" conda install -y numpy scipy pandas matplotlib nose scikit-learn graphviz python-graphviz
|
- if /i "%DO_PYTHON%" == "on" (
|
||||||
|
conda config --set always_yes true &&
|
||||||
|
conda update -q conda &&
|
||||||
|
conda install -y numpy scipy pandas matplotlib pytest scikit-learn graphviz python-graphviz
|
||||||
|
)
|
||||||
|
- set PATH=C:\Miniconda3-x64\Library\bin\graphviz;%PATH%
|
||||||
# R: based on https://github.com/krlmlr/r-appveyor
|
# R: based on https://github.com/krlmlr/r-appveyor
|
||||||
- ps: |
|
- ps: |
|
||||||
if($env:target -eq 'rmingw' -or $env:target -eq 'rmsvc') {
|
if($env:target -eq 'rmingw' -or $env:target -eq 'rmsvc') {
|
||||||
#$ErrorActionPreference = "Stop"
|
#$ErrorActionPreference = "Stop"
|
||||||
Invoke-WebRequest http://raw.github.com/krlmlr/r-appveyor/master/scripts/appveyor-tool.ps1 -OutFile "$Env:TEMP\appveyor-tool.ps1"
|
Invoke-WebRequest https://raw.githubusercontent.com/krlmlr/r-appveyor/master/scripts/appveyor-tool.ps1 -OutFile "$Env:TEMP\appveyor-tool.ps1"
|
||||||
Import-Module "$Env:TEMP\appveyor-tool.ps1"
|
Import-Module "$Env:TEMP\appveyor-tool.ps1"
|
||||||
Bootstrap
|
Bootstrap
|
||||||
$DEPS = "c('data.table','magrittr','stringi','ggplot2','DiagrammeR','Ckmeans.1d.dp','vcd','testthat','lintr','knitr','rmarkdown')"
|
|
||||||
cmd.exe /c "R.exe -q -e ""install.packages($DEPS, repos='$CRAN', type='both')"" 2>&1"
|
|
||||||
$BINARY_DEPS = "c('XML','igraph')"
|
$BINARY_DEPS = "c('XML','igraph')"
|
||||||
cmd.exe /c "R.exe -q -e ""install.packages($BINARY_DEPS, repos='$CRAN', type='win.binary')"" 2>&1"
|
cmd.exe /c "R.exe -q -e ""install.packages($BINARY_DEPS, repos='$CRAN', type='win.binary')"" 2>&1"
|
||||||
|
$DEPS = "c('data.table','magrittr','stringi','ggplot2','DiagrammeR','Ckmeans.1d.dp','vcd','testthat','lintr','knitr','rmarkdown')"
|
||||||
|
cmd.exe /c "R.exe -q -e ""install.packages($DEPS, repos='$CRAN', type='both')"" 2>&1"
|
||||||
}
|
}
|
||||||
|
|
||||||
build_script:
|
build_script:
|
||||||
@@ -92,14 +94,15 @@ build_script:
|
|||||||
cmake .. -G"%generator%" -DCMAKE_CONFIGURATION_TYPES="Release" -DR_LIB=ON &&
|
cmake .. -G"%generator%" -DCMAKE_CONFIGURATION_TYPES="Release" -DR_LIB=ON &&
|
||||||
cmake --build . --target install --config Release
|
cmake --build . --target install --config Release
|
||||||
)
|
)
|
||||||
- if /i "%target%" == "jvm" cd jvm-packages && mvn test -pl :xgboost4j
|
- if /i "%target%" == "jvm" cd jvm-packages && mvn test -pl :xgboost4j_2.12
|
||||||
|
|
||||||
test_script:
|
test_script:
|
||||||
- cd %APPVEYOR_BUILD_FOLDER%
|
- cd %APPVEYOR_BUILD_FOLDER%
|
||||||
- if /i "%DO_PYTHON%" == "on" python -m nose tests/python
|
- if /i "%DO_PYTHON%" == "on" python -m pytest tests/python
|
||||||
# mingw R package: run the R check (which includes unit tests), and also keep the built binary package
|
# mingw R package: run the R check (which includes unit tests), and also keep the built binary package
|
||||||
- if /i "%target%" == "rmingw" (
|
- if /i "%target%" == "rmingw" (
|
||||||
set _R_CHECK_CRAN_INCOMING_=FALSE&&
|
set _R_CHECK_CRAN_INCOMING_=FALSE&&
|
||||||
|
set _R_CHECK_FORCE_SUGGESTS_=FALSE&&
|
||||||
R.exe CMD check xgboost*.tar.gz --no-manual --no-build-vignettes --as-cran --install-args=--build
|
R.exe CMD check xgboost*.tar.gz --no-manual --no-build-vignettes --as-cran --install-args=--build
|
||||||
)
|
)
|
||||||
# MSVC R package: run only the unit tests
|
# MSVC R package: run only the unit tests
|
||||||
|
|||||||
51
build.sh
51
build.sh
@@ -1,51 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# This is a simple script to make xgboost in MAC and Linux
|
|
||||||
# Basically, it first try to make with OpenMP, if fails, disable OpenMP and make it again.
|
|
||||||
# This will automatically make xgboost for MAC users who don't have OpenMP support.
|
|
||||||
# In most cases, type make will give what you want.
|
|
||||||
|
|
||||||
# See additional instruction in doc/build.md
|
|
||||||
set -e
|
|
||||||
|
|
||||||
if make; then
|
|
||||||
echo "Successfully build multi-thread xgboost"
|
|
||||||
else
|
|
||||||
|
|
||||||
not_ready=0
|
|
||||||
|
|
||||||
if [[ ! -e ./rabit/Makefile ]]; then
|
|
||||||
echo ""
|
|
||||||
echo "Please init the rabit submodule:"
|
|
||||||
echo "git submodule update --init --recursive -- rabit"
|
|
||||||
not_ready=1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ ! -e ./dmlc-core/Makefile ]]; then
|
|
||||||
echo ""
|
|
||||||
echo "Please init the dmlc-core submodule:"
|
|
||||||
echo "git submodule update --init --recursive -- dmlc-core"
|
|
||||||
not_ready=1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "${not_ready}" == "1" ]]; then
|
|
||||||
echo ""
|
|
||||||
echo "Please fix the errors above and retry the build, or reclone the repository with:"
|
|
||||||
echo "git clone --recursive https://github.com/dmlc/xgboost.git"
|
|
||||||
echo ""
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
echo "-----------------------------"
|
|
||||||
echo "Building multi-thread xgboost failed"
|
|
||||||
echo "Start to build single-thread xgboost"
|
|
||||||
make clean_all
|
|
||||||
make config=make/minimum.mk
|
|
||||||
if [ $? -eq 0 ] ;then
|
|
||||||
echo "Successfully build single-thread xgboost"
|
|
||||||
echo "If you want multi-threaded version"
|
|
||||||
echo "See additional instructions in doc/build.md"
|
|
||||||
else
|
|
||||||
echo "Failed to build single-thread xgboost"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
16
cmake/Doc.cmake
Normal file
16
cmake/Doc.cmake
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
function (run_doxygen)
|
||||||
|
find_package(Doxygen REQUIRED)
|
||||||
|
|
||||||
|
if (NOT DOXYGEN_DOT_FOUND)
|
||||||
|
message(FATAL_ERROR "Command `dot` not found. Please install graphviz.")
|
||||||
|
endif (NOT DOXYGEN_DOT_FOUND)
|
||||||
|
|
||||||
|
configure_file(
|
||||||
|
${xgboost_SOURCE_DIR}/doc/Doxyfile.in
|
||||||
|
${CMAKE_CURRENT_BINARY_DIR}/Doxyfile @ONLY)
|
||||||
|
add_custom_target( doc_doxygen ALL
|
||||||
|
COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile
|
||||||
|
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||||
|
COMMENT "Generate C APIs documentation."
|
||||||
|
VERBATIM)
|
||||||
|
endfunction (run_doxygen)
|
||||||
22
cmake/FindPrefetchIntrinsics.cmake
Normal file
22
cmake/FindPrefetchIntrinsics.cmake
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
function (find_prefetch_intrinsics)
|
||||||
|
include(CheckCXXSourceCompiles)
|
||||||
|
check_cxx_source_compiles("
|
||||||
|
#include <xmmintrin.h>
|
||||||
|
int main() {
|
||||||
|
char data = 0;
|
||||||
|
const char* address = &data;
|
||||||
|
_mm_prefetch(address, _MM_HINT_NTA);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
" XGBOOST_MM_PREFETCH_PRESENT)
|
||||||
|
check_cxx_source_compiles("
|
||||||
|
int main() {
|
||||||
|
char data = 0;
|
||||||
|
const char* address = &data;
|
||||||
|
__builtin_prefetch(address, 0, 0);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
" XGBOOST_BUILTIN_PREFETCH_PRESENT)
|
||||||
|
set(XGBOOST_MM_PREFETCH_PRESENT ${XGBOOST_MM_PREFETCH_PRESENT} PARENT_SCOPE)
|
||||||
|
set(XGBOOST_BUILTIN_PREFETCH_PRESENT ${XGBOOST_BUILTIN_PREFETCH_PRESENT} PARENT_SCOPE)
|
||||||
|
endfunction (find_prefetch_intrinsics)
|
||||||
1
cmake/Python_version.in
Normal file
1
cmake/Python_version.in
Normal file
@@ -0,0 +1 @@
|
|||||||
|
@xgboost_VERSION_MAJOR@.@xgboost_VERSION_MINOR@.@xgboost_VERSION_PATCH@rc1
|
||||||
@@ -4,24 +4,29 @@
|
|||||||
# enable_sanitizers("address;leak")
|
# enable_sanitizers("address;leak")
|
||||||
|
|
||||||
# Add flags
|
# Add flags
|
||||||
macro(enable_sanitizer santizer)
|
macro(enable_sanitizer sanitizer)
|
||||||
if(${santizer} MATCHES "address")
|
if(${sanitizer} MATCHES "address")
|
||||||
find_package(ASan REQUIRED)
|
find_package(ASan REQUIRED)
|
||||||
set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=address")
|
set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=address")
|
||||||
link_libraries(${ASan_LIBRARY})
|
link_libraries(${ASan_LIBRARY})
|
||||||
|
|
||||||
elseif(${santizer} MATCHES "thread")
|
elseif(${sanitizer} MATCHES "thread")
|
||||||
find_package(TSan REQUIRED)
|
find_package(TSan REQUIRED)
|
||||||
set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=thread")
|
set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=thread")
|
||||||
link_libraries(${TSan_LIBRARY})
|
link_libraries(${TSan_LIBRARY})
|
||||||
|
|
||||||
elseif(${santizer} MATCHES "leak")
|
elseif(${sanitizer} MATCHES "leak")
|
||||||
find_package(LSan REQUIRED)
|
find_package(LSan REQUIRED)
|
||||||
set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=leak")
|
set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=leak")
|
||||||
link_libraries(${LSan_LIBRARY})
|
link_libraries(${LSan_LIBRARY})
|
||||||
|
|
||||||
|
elseif(${sanitizer} MATCHES "undefined")
|
||||||
|
find_package(UBSan REQUIRED)
|
||||||
|
set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=undefined -fno-sanitize-recover=undefined")
|
||||||
|
link_libraries(${UBSan_LIBRARY})
|
||||||
|
|
||||||
else()
|
else()
|
||||||
message(FATAL_ERROR "Santizer ${santizer} not supported.")
|
message(FATAL_ERROR "Santizer ${sanitizer} not supported.")
|
||||||
endif()
|
endif()
|
||||||
endmacro()
|
endmacro()
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
|
|
||||||
# Automatically set source group based on folder
|
# Automatically set source group based on folder
|
||||||
function(auto_source_group SOURCES)
|
function(auto_source_group SOURCES)
|
||||||
|
|
||||||
@@ -18,6 +17,10 @@ endfunction(auto_source_group)
|
|||||||
function(msvc_use_static_runtime)
|
function(msvc_use_static_runtime)
|
||||||
if(MSVC)
|
if(MSVC)
|
||||||
set(variables
|
set(variables
|
||||||
|
CMAKE_C_FLAGS_DEBUG
|
||||||
|
CMAKE_C_FLAGS_MINSIZEREL
|
||||||
|
CMAKE_C_FLAGS_RELEASE
|
||||||
|
CMAKE_C_FLAGS_RELWITHDEBINFO
|
||||||
CMAKE_CXX_FLAGS_DEBUG
|
CMAKE_CXX_FLAGS_DEBUG
|
||||||
CMAKE_CXX_FLAGS_MINSIZEREL
|
CMAKE_CXX_FLAGS_MINSIZEREL
|
||||||
CMAKE_CXX_FLAGS_RELEASE
|
CMAKE_CXX_FLAGS_RELEASE
|
||||||
@@ -29,25 +32,46 @@ function(msvc_use_static_runtime)
|
|||||||
set(${variable} "${${variable}}" PARENT_SCOPE)
|
set(${variable} "${${variable}}" PARENT_SCOPE)
|
||||||
endif()
|
endif()
|
||||||
endforeach()
|
endforeach()
|
||||||
|
set(variables
|
||||||
|
CMAKE_CUDA_FLAGS
|
||||||
|
CMAKE_CUDA_FLAGS_DEBUG
|
||||||
|
CMAKE_CUDA_FLAGS_MINSIZEREL
|
||||||
|
CMAKE_CUDA_FLAGS_RELEASE
|
||||||
|
CMAKE_CUDA_FLAGS_RELWITHDEBINFO
|
||||||
|
)
|
||||||
|
foreach(variable ${variables})
|
||||||
|
if(${variable} MATCHES "-MD")
|
||||||
|
string(REGEX REPLACE "-MD" "-MT" ${variable} "${${variable}}")
|
||||||
|
set(${variable} "${${variable}}" PARENT_SCOPE)
|
||||||
|
endif()
|
||||||
|
if(${variable} MATCHES "/MD")
|
||||||
|
string(REGEX REPLACE "/MD" "/MT" ${variable} "${${variable}}")
|
||||||
|
set(${variable} "${${variable}}" PARENT_SCOPE)
|
||||||
|
endif()
|
||||||
|
endforeach()
|
||||||
endif()
|
endif()
|
||||||
endfunction(msvc_use_static_runtime)
|
endfunction(msvc_use_static_runtime)
|
||||||
|
|
||||||
# Set output directory of target, ignoring debug or release
|
# Set output directory of target, ignoring debug or release
|
||||||
function(set_output_directory target dir)
|
function(set_output_directory target dir)
|
||||||
set_target_properties(${target} PROPERTIES
|
set_target_properties(${target} PROPERTIES
|
||||||
RUNTIME_OUTPUT_DIRECTORY ${dir}
|
RUNTIME_OUTPUT_DIRECTORY ${dir}
|
||||||
RUNTIME_OUTPUT_DIRECTORY_DEBUG ${dir}
|
RUNTIME_OUTPUT_DIRECTORY_DEBUG ${dir}
|
||||||
RUNTIME_OUTPUT_DIRECTORY_RELEASE ${dir}
|
RUNTIME_OUTPUT_DIRECTORY_RELEASE ${dir}
|
||||||
LIBRARY_OUTPUT_DIRECTORY ${dir}
|
RUNTIME_OUTPUT_DIRECTORY_RELWITHDEBINFO ${dir}
|
||||||
LIBRARY_OUTPUT_DIRECTORY_DEBUG ${dir}
|
RUNTIME_OUTPUT_DIRECTORY_MINSIZEREL ${dir}
|
||||||
LIBRARY_OUTPUT_DIRECTORY_RELEASE ${dir}
|
LIBRARY_OUTPUT_DIRECTORY ${dir}
|
||||||
|
LIBRARY_OUTPUT_DIRECTORY_DEBUG ${dir}
|
||||||
|
LIBRARY_OUTPUT_DIRECTORY_RELEASE ${dir}
|
||||||
|
LIBRARY_OUTPUT_DIRECTORY_RELWITHDEBINFO ${dir}
|
||||||
|
LIBRARY_OUTPUT_DIRECTORY_MINSIZEREL ${dir}
|
||||||
)
|
)
|
||||||
endfunction(set_output_directory)
|
endfunction(set_output_directory)
|
||||||
|
|
||||||
# Set a default build type to release if none was specified
|
# Set a default build type to release if none was specified
|
||||||
function(set_default_configuration_release)
|
function(set_default_configuration_release)
|
||||||
if(CMAKE_CONFIGURATION_TYPES STREQUAL "Debug;Release;MinSizeRel;RelWithDebInfo") # multiconfig generator?
|
if(CMAKE_CONFIGURATION_TYPES STREQUAL "Debug;Release;MinSizeRel;RelWithDebInfo") # multiconfig generator?
|
||||||
set(CMAKE_CONFIGURATION_TYPES Release CACHE STRING "" FORCE)
|
set(CMAKE_CONFIGURATION_TYPES Release CACHE STRING "" FORCE)
|
||||||
elseif(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
|
elseif(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
|
||||||
message(STATUS "Setting build type to 'Release' as none was specified.")
|
message(STATUS "Setting build type to 'Release' as none was specified.")
|
||||||
set(CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build." FORCE )
|
set(CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build." FORCE )
|
||||||
@@ -57,9 +81,14 @@ endfunction(set_default_configuration_release)
|
|||||||
# Generate nvcc compiler flags given a list of architectures
|
# Generate nvcc compiler flags given a list of architectures
|
||||||
# Also generates PTX for the most recent architecture for forwards compatibility
|
# Also generates PTX for the most recent architecture for forwards compatibility
|
||||||
function(format_gencode_flags flags out)
|
function(format_gencode_flags flags out)
|
||||||
|
if(CMAKE_CUDA_COMPILER_VERSION MATCHES "^([0-9]+\\.[0-9]+)")
|
||||||
|
set(CUDA_VERSION "${CMAKE_MATCH_1}")
|
||||||
|
endif()
|
||||||
# Set up architecture flags
|
# Set up architecture flags
|
||||||
if(NOT flags)
|
if(NOT flags)
|
||||||
if((CUDA_VERSION_MAJOR EQUAL 9) OR (CUDA_VERSION_MAJOR GREATER 9))
|
if(CUDA_VERSION VERSION_GREATER_EQUAL "10.0")
|
||||||
|
set(flags "35;50;52;60;61;70;75")
|
||||||
|
elseif(CUDA_VERSION VERSION_GREATER_EQUAL "9.0")
|
||||||
set(flags "35;50;52;60;61;70")
|
set(flags "35;50;52;60;61;70")
|
||||||
else()
|
else()
|
||||||
set(flags "35;50;52;60;61")
|
set(flags "35;50;52;60;61")
|
||||||
@@ -67,12 +96,12 @@ function(format_gencode_flags flags out)
|
|||||||
endif()
|
endif()
|
||||||
# Generate SASS
|
# Generate SASS
|
||||||
foreach(ver ${flags})
|
foreach(ver ${flags})
|
||||||
set(${out} "${${out}}-gencode arch=compute_${ver},code=sm_${ver};")
|
set(${out} "${${out}}--generate-code=arch=compute_${ver},code=sm_${ver};")
|
||||||
endforeach()
|
endforeach()
|
||||||
# Generate PTX for last architecture
|
# Generate PTX for last architecture
|
||||||
list(GET flags -1 ver)
|
list(GET flags -1 ver)
|
||||||
set(${out} "${${out}}-gencode arch=compute_${ver},code=compute_${ver};")
|
set(${out} "${${out}}--generate-code=arch=compute_${ver},code=compute_${ver};")
|
||||||
|
|
||||||
set(${out} "${${out}}" PARENT_SCOPE)
|
set(${out} "${${out}}" PARENT_SCOPE)
|
||||||
endfunction(format_gencode_flags flags)
|
endfunction(format_gencode_flags flags)
|
||||||
|
|
||||||
@@ -80,9 +109,13 @@ endfunction(format_gencode_flags flags)
|
|||||||
# if necessary, installs the main R package dependencies;
|
# if necessary, installs the main R package dependencies;
|
||||||
# runs R CMD INSTALL.
|
# runs R CMD INSTALL.
|
||||||
function(setup_rpackage_install_target rlib_target build_dir)
|
function(setup_rpackage_install_target rlib_target build_dir)
|
||||||
|
# backup cmake_install.cmake
|
||||||
|
install(CODE "file(COPY \"${build_dir}/R-package/cmake_install.cmake\"
|
||||||
|
DESTINATION \"${build_dir}/bak\")")
|
||||||
|
|
||||||
install(CODE "file(REMOVE_RECURSE \"${build_dir}/R-package\")")
|
install(CODE "file(REMOVE_RECURSE \"${build_dir}/R-package\")")
|
||||||
install(
|
install(
|
||||||
DIRECTORY "${PROJECT_SOURCE_DIR}/R-package"
|
DIRECTORY "${xgboost_SOURCE_DIR}/R-package"
|
||||||
DESTINATION "${build_dir}"
|
DESTINATION "${build_dir}"
|
||||||
REGEX "src/*" EXCLUDE
|
REGEX "src/*" EXCLUDE
|
||||||
REGEX "R-package/configure" EXCLUDE
|
REGEX "R-package/configure" EXCLUDE
|
||||||
@@ -98,4 +131,8 @@ function(setup_rpackage_install_target rlib_target build_dir)
|
|||||||
install(CODE "execute_process(COMMAND \"${LIBR_EXECUTABLE}\" \"-q\" \"-e\" \"${XGB_DEPS_SCRIPT}\")")
|
install(CODE "execute_process(COMMAND \"${LIBR_EXECUTABLE}\" \"-q\" \"-e\" \"${XGB_DEPS_SCRIPT}\")")
|
||||||
install(CODE "execute_process(COMMAND \"${LIBR_EXECUTABLE}\" CMD INSTALL\
|
install(CODE "execute_process(COMMAND \"${LIBR_EXECUTABLE}\" CMD INSTALL\
|
||||||
\"--no-multiarch\" \"--build\" \"${build_dir}/R-package\")")
|
\"--no-multiarch\" \"--build\" \"${build_dir}/R-package\")")
|
||||||
|
|
||||||
|
# restore cmake_install.cmake
|
||||||
|
install(CODE "file(RENAME \"${build_dir}/bak/cmake_install.cmake\"
|
||||||
|
\"${build_dir}/R-package/cmake_install.cmake\")")
|
||||||
endfunction(setup_rpackage_install_target)
|
endfunction(setup_rpackage_install_target)
|
||||||
|
|||||||
9
cmake/Version.cmake
Normal file
9
cmake/Version.cmake
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
function (write_version)
|
||||||
|
message(STATUS "xgboost VERSION: ${xgboost_VERSION}")
|
||||||
|
configure_file(
|
||||||
|
${xgboost_SOURCE_DIR}/cmake/version_config.h.in
|
||||||
|
${xgboost_SOURCE_DIR}/include/xgboost/version_config.h @ONLY)
|
||||||
|
configure_file(
|
||||||
|
${xgboost_SOURCE_DIR}/cmake/Python_version.in
|
||||||
|
${xgboost_SOURCE_DIR}/python-package/xgboost/VERSION @ONLY)
|
||||||
|
endfunction (write_version)
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
set(ASan_LIB_NAME ASan)
|
set(ASan_LIB_NAME ASan)
|
||||||
|
|
||||||
find_library(ASan_LIBRARY
|
find_library(ASan_LIBRARY
|
||||||
NAMES libasan.so libasan.so.4 libasan.so.3 libasan.so.2 libasan.so.1 libasan.so.0
|
NAMES libasan.so libasan.so.5 libasan.so.4 libasan.so.3 libasan.so.2 libasan.so.1 libasan.so.0
|
||||||
PATHS ${SANITIZER_PATH} /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib ${CMAKE_PREFIX_PATH}/lib)
|
PATHS ${SANITIZER_PATH} /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib ${CMAKE_PREFIX_PATH}/lib)
|
||||||
|
|
||||||
include(FindPackageHandleStandardArgs)
|
include(FindPackageHandleStandardArgs)
|
||||||
|
|||||||
23
cmake/modules/FindNVML.cmake
Normal file
23
cmake/modules/FindNVML.cmake
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
if (NVML_LIBRARY)
|
||||||
|
unset(NVML_LIBRARY CACHE)
|
||||||
|
endif(NVML_LIBRARY)
|
||||||
|
|
||||||
|
set(NVML_LIB_NAME nvml)
|
||||||
|
|
||||||
|
find_path(NVML_INCLUDE_DIR
|
||||||
|
NAMES nvml.h
|
||||||
|
PATHS ${CUDA_HOME}/include ${CUDA_INCLUDE} /usr/local/cuda/include)
|
||||||
|
|
||||||
|
find_library(NVML_LIBRARY
|
||||||
|
NAMES nvidia-ml)
|
||||||
|
|
||||||
|
message(STATUS "Using nvml library: ${NVML_LIBRARY}")
|
||||||
|
|
||||||
|
include(FindPackageHandleStandardArgs)
|
||||||
|
find_package_handle_standard_args(NVML DEFAULT_MSG
|
||||||
|
NVML_INCLUDE_DIR NVML_LIBRARY)
|
||||||
|
|
||||||
|
mark_as_advanced(
|
||||||
|
NVML_INCLUDE_DIR
|
||||||
|
NVML_LIBRARY
|
||||||
|
)
|
||||||
@@ -32,20 +32,28 @@
|
|||||||
#
|
#
|
||||||
# This module assumes that the user has already called find_package(CUDA)
|
# This module assumes that the user has already called find_package(CUDA)
|
||||||
|
|
||||||
|
if (NCCL_LIBRARY)
|
||||||
|
# Don't cache NCCL_LIBRARY to enable switching between static and shared.
|
||||||
|
unset(NCCL_LIBRARY CACHE)
|
||||||
|
endif()
|
||||||
|
|
||||||
set(NCCL_LIB_NAME nccl_static)
|
if (BUILD_WITH_SHARED_NCCL)
|
||||||
|
# libnccl.so
|
||||||
|
set(NCCL_LIB_NAME nccl)
|
||||||
|
else ()
|
||||||
|
# libnccl_static.a
|
||||||
|
set(NCCL_LIB_NAME nccl_static)
|
||||||
|
endif (BUILD_WITH_SHARED_NCCL)
|
||||||
|
|
||||||
find_path(NCCL_INCLUDE_DIR
|
find_path(NCCL_INCLUDE_DIR
|
||||||
NAMES nccl.h
|
NAMES nccl.h
|
||||||
PATHS $ENV{NCCL_ROOT}/include ${NCCL_ROOT}/include ${CUDA_INCLUDE_DIRS} /usr/include)
|
PATHS $ENV{NCCL_ROOT}/include ${NCCL_ROOT}/include)
|
||||||
|
|
||||||
find_library(NCCL_LIBRARY
|
find_library(NCCL_LIBRARY
|
||||||
NAMES ${NCCL_LIB_NAME}
|
NAMES ${NCCL_LIB_NAME}
|
||||||
PATHS $ENV{NCCL_ROOT}/lib ${NCCL_ROOT}/lib ${CUDA_INCLUDE_DIRS}/../lib /usr/lib)
|
PATHS $ENV{NCCL_ROOT}/lib/ ${NCCL_ROOT}/lib)
|
||||||
|
|
||||||
if (NCCL_INCLUDE_DIR AND NCCL_LIBRARY)
|
message(STATUS "Using nccl library: ${NCCL_LIBRARY}")
|
||||||
get_filename_component(NCCL_LIBRARY ${NCCL_LIBRARY} PATH)
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
include(FindPackageHandleStandardArgs)
|
include(FindPackageHandleStandardArgs)
|
||||||
find_package_handle_standard_args(Nccl DEFAULT_MSG
|
find_package_handle_standard_args(Nccl DEFAULT_MSG
|
||||||
@@ -54,5 +62,4 @@ find_package_handle_standard_args(Nccl DEFAULT_MSG
|
|||||||
mark_as_advanced(
|
mark_as_advanced(
|
||||||
NCCL_INCLUDE_DIR
|
NCCL_INCLUDE_DIR
|
||||||
NCCL_LIBRARY
|
NCCL_LIBRARY
|
||||||
NCCL_LIB_NAME
|
|
||||||
)
|
)
|
||||||
|
|||||||
13
cmake/modules/FindUBSan.cmake
Normal file
13
cmake/modules/FindUBSan.cmake
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
set(UBSan_LIB_NAME UBSan)
|
||||||
|
|
||||||
|
find_library(UBSan_LIBRARY
|
||||||
|
NAMES libubsan.so libubsan.so.5 libubsan.so.4 libubsan.so.3 libubsan.so.2 libubsan.so.1 libubsan.so.0
|
||||||
|
PATHS ${SANITIZER_PATH} /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib ${CMAKE_PREFIX_PATH}/lib)
|
||||||
|
|
||||||
|
include(FindPackageHandleStandardArgs)
|
||||||
|
find_package_handle_standard_args(UBSan DEFAULT_MSG
|
||||||
|
UBSan_LIBRARY)
|
||||||
|
|
||||||
|
mark_as_advanced(
|
||||||
|
UBSan_LIBRARY
|
||||||
|
UBSan_LIB_NAME)
|
||||||
11
cmake/version_config.h.in
Normal file
11
cmake/version_config.h.in
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
/*!
|
||||||
|
* Copyright 2019 XGBoost contributors
|
||||||
|
*/
|
||||||
|
#ifndef XGBOOST_VERSION_CONFIG_H_
|
||||||
|
#define XGBOOST_VERSION_CONFIG_H_
|
||||||
|
|
||||||
|
#define XGBOOST_VER_MAJOR @xgboost_VERSION_MAJOR@
|
||||||
|
#define XGBOOST_VER_MINOR @xgboost_VERSION_MINOR@
|
||||||
|
#define XGBOOST_VER_PATCH @xgboost_VERSION_PATCH@
|
||||||
|
|
||||||
|
#endif // XGBOOST_VERSION_CONFIG_H_
|
||||||
5
cmake/xgboost-config.cmake.in
Normal file
5
cmake/xgboost-config.cmake.in
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
@PACKAGE_INIT@
|
||||||
|
|
||||||
|
if(NOT TARGET xgboost::xgboost)
|
||||||
|
include(${CMAKE_CURRENT_LIST_DIR}/XGBoostTargets.cmake)
|
||||||
|
endif()
|
||||||
@@ -119,6 +119,7 @@ If you have particular usecase of xgboost that you would like to highlight.
|
|||||||
Send a PR to add a one sentence description:)
|
Send a PR to add a one sentence description:)
|
||||||
|
|
||||||
- XGBoost is used in [Kaggle Script](https://www.kaggle.com/scripts) to solve data science challenges.
|
- XGBoost is used in [Kaggle Script](https://www.kaggle.com/scripts) to solve data science challenges.
|
||||||
|
- Distribute XGBoost as Rest API server from Jupyter notebook with [BentoML](https://github.com/bentoml/bentoml). [Link to notebook](https://github.com/bentoml/BentoML/blob/master/examples/xgboost-predict-titanic-survival/XGBoost-titanic-survival-prediction.ipynb)
|
||||||
- [Seldon predictive service powered by XGBoost](http://docs.seldon.io/iris-demo.html)
|
- [Seldon predictive service powered by XGBoost](http://docs.seldon.io/iris-demo.html)
|
||||||
- XGBoost Distributed is used in [ODPS Cloud Service by Alibaba](https://yq.aliyun.com/articles/6355) (in Chinese)
|
- XGBoost Distributed is used in [ODPS Cloud Service by Alibaba](https://yq.aliyun.com/articles/6355) (in Chinese)
|
||||||
- XGBoost is incoporated as part of [Graphlab Create](https://dato.com/products/create/) for scalable machine learning.
|
- XGBoost is incoporated as part of [Graphlab Create](https://dato.com/products/create/) for scalable machine learning.
|
||||||
@@ -135,6 +136,7 @@ Send a PR to add a one sentence description:)
|
|||||||
|
|
||||||
## Awards
|
## Awards
|
||||||
- [John Chambers Award](http://stat-computing.org/awards/jmc/winners.html) - 2016 Winner: XGBoost R Package, by Tong He (Simon Fraser University) and Tianqi Chen (University of Washington)
|
- [John Chambers Award](http://stat-computing.org/awards/jmc/winners.html) - 2016 Winner: XGBoost R Package, by Tong He (Simon Fraser University) and Tianqi Chen (University of Washington)
|
||||||
|
- [InfoWorld’s 2019 Technology of the Year Award](https://www.infoworld.com/article/3336072/application-development/infoworlds-2019-technology-of-the-year-award-winners.html)
|
||||||
|
|
||||||
## Windows Binaries
|
## Windows Binaries
|
||||||
Unofficial windows binaries and instructions on how to use them are hosted on [Guido Tapia's blog](http://www.picnet.com.au/blogs/guido/post/2016/09/22/xgboost-windows-x64-binaries-for-download/)
|
Unofficial windows binaries and instructions on how to use them are hosted on [Guido Tapia's blog](http://www.picnet.com.au/blogs/guido/post/2016/09/22/xgboost-windows-x64-binaries-for-download/)
|
||||||
|
|||||||
@@ -62,7 +62,7 @@ test:data = "agaricus.txt.test"
|
|||||||
We use the tree booster and logistic regression objective in our setting. This indicates that we accomplish our task using classic gradient boosting regression tree(GBRT), which is a promising method for binary classification.
|
We use the tree booster and logistic regression objective in our setting. This indicates that we accomplish our task using classic gradient boosting regression tree(GBRT), which is a promising method for binary classification.
|
||||||
|
|
||||||
The parameters shown in the example gives the most common ones that are needed to use xgboost.
|
The parameters shown in the example gives the most common ones that are needed to use xgboost.
|
||||||
If you are interested in more parameter settings, the complete parameter settings and detailed descriptions are [here](../../doc/parameter.md). Besides putting the parameters in the configuration file, we can set them by passing them as arguments as below:
|
If you are interested in more parameter settings, the complete parameter settings and detailed descriptions are [here](../../doc/parameter.rst). Besides putting the parameters in the configuration file, we can set them by passing them as arguments as below:
|
||||||
|
|
||||||
```
|
```
|
||||||
../../xgboost mushroom.conf max_depth=6
|
../../xgboost mushroom.conf max_depth=6
|
||||||
|
|||||||
4
demo/c-api/CMakeLists.txt
Normal file
4
demo/c-api/CMakeLists.txt
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
cmake_minimum_required(VERSION 3.12)
|
||||||
|
find_package(xgboost REQUIRED)
|
||||||
|
add_executable(api-demo c-api-demo.c)
|
||||||
|
target_link_libraries(api-demo xgboost::xgboost)
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user