Compare commits
1315 Commits
release_1.
...
v1.5.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
eb69c6110a | ||
|
|
0f9ffcdc16 | ||
|
|
9bbd00a49f | ||
|
|
7e239f229c | ||
|
|
a013942649 | ||
|
|
4d2ea0d4ef | ||
|
|
d1052b5cfe | ||
|
|
14c56f05da | ||
|
|
11f8b5cfcd | ||
|
|
e7ac2486eb | ||
|
|
a3d195e73e | ||
|
|
fab3c05ced | ||
|
|
584b45a9cc | ||
|
|
30c1b5c54c | ||
|
|
36e247aca4 | ||
|
|
c4aff733bb | ||
|
|
cdbfd21d31 | ||
|
|
508a0b0dbd | ||
|
|
e04e773f9f | ||
|
|
1debabb321 | ||
|
|
d8a549e6ac | ||
|
|
ca17f8a5fc | ||
|
|
fbd58bf190 | ||
|
|
0ee11dac77 | ||
|
|
d27a427dc5 | ||
|
|
475fd1abec | ||
|
|
9472be7d77 | ||
|
|
4f93e5586a | ||
|
|
18bd16341a | ||
|
|
61a619b5c3 | ||
|
|
e48e05e6e2 | ||
|
|
c735c17f33 | ||
|
|
c311a8c1d8 | ||
|
|
b18f5f61b0 | ||
|
|
38a23f66a8 | ||
|
|
8ad7e8eeb0 | ||
|
|
22d56cebf1 | ||
|
|
32e0858501 | ||
|
|
31c1e13f90 | ||
|
|
9f63d6fead | ||
|
|
0ed979b096 | ||
|
|
2942dc68e4 | ||
|
|
037dd0820d | ||
|
|
d997c967d5 | ||
|
|
3515931305 | ||
|
|
a0dcf6f5c1 | ||
|
|
804b2ac60f | ||
|
|
68a2c7b8d6 | ||
|
|
b12e7f7edd | ||
|
|
3a4f51f39f | ||
|
|
ba69244a94 | ||
|
|
7a1d67f9cb | ||
|
|
e7d7ab6bc3 | ||
|
|
b70e07da1f | ||
|
|
cdfaa705f3 | ||
|
|
3060f0b562 | ||
|
|
9c64618cb6 | ||
|
|
d04312b9c0 | ||
|
|
ee8d1f5ed8 | ||
|
|
3290a4f3ed | ||
|
|
bf562bd33c | ||
|
|
01b7acba30 | ||
|
|
ec849ec335 | ||
|
|
46c46829ce | ||
|
|
6bcbc77226 | ||
|
|
3f38d983a6 | ||
|
|
9600ca83f3 | ||
|
|
149f209af6 | ||
|
|
336af4f974 | ||
|
|
f7003dc819 | ||
|
|
8a84be37b8 | ||
|
|
8ee127469f | ||
|
|
ba47eda61b | ||
|
|
e2c406f5c8 | ||
|
|
7bdedacb54 | ||
|
|
d080b5a953 | ||
|
|
36346f8f56 | ||
|
|
1369133916 | ||
|
|
f1a4a1ac95 | ||
|
|
dfdf0b08fc | ||
|
|
92ae3abc97 | ||
|
|
1a75f43304 | ||
|
|
7017dd5a26 | ||
|
|
48d5de80a2 | ||
|
|
7ee7a95b84 | ||
|
|
e88ac9cc54 | ||
|
|
778135f657 | ||
|
|
41e882f80b | ||
|
|
caa9e527dd | ||
|
|
e6088366df | ||
|
|
e64ee6592f | ||
|
|
9f7f8b976d | ||
|
|
d7c14496d2 | ||
|
|
bd1f3a38f0 | ||
|
|
2f524e9f41 | ||
|
|
abec3dbf6d | ||
|
|
2801d69fb7 | ||
|
|
8e8232fb4c | ||
|
|
345796825f | ||
|
|
1d91f71119 | ||
|
|
77f6cf2d13 | ||
|
|
84d359efb8 | ||
|
|
5d7cdf2e36 | ||
|
|
c766f143ab | ||
|
|
689eb8f620 | ||
|
|
615ab2b03e | ||
|
|
d22b293f2f | ||
|
|
f937f514aa | ||
|
|
116d711815 | ||
|
|
d7e1fa7664 | ||
|
|
ffa66aace0 | ||
|
|
b56d3d5d5c | ||
|
|
93f3acdef9 | ||
|
|
a5d222fcdb | ||
|
|
1cd20efe68 | ||
|
|
1c8fdf2218 | ||
|
|
dd4db347f3 | ||
|
|
8fa32fdda2 | ||
|
|
663136aa08 | ||
|
|
b2d300e727 | ||
|
|
1d4d345634 | ||
|
|
da1ad798ca | ||
|
|
bbfffb444d | ||
|
|
29f8fd6fee | ||
|
|
7968c0d051 | ||
|
|
86715e4cd4 | ||
|
|
7dd29ffd47 | ||
|
|
dcd84b3979 | ||
|
|
d9799b09d0 | ||
|
|
5c2d7a18c9 | ||
|
|
2567404ab6 | ||
|
|
1faad825f4 | ||
|
|
b56614e9b8 | ||
|
|
25514e104a | ||
|
|
f79cc4a7a4 | ||
|
|
72f9daf9b6 | ||
|
|
bd2ca543c4 | ||
|
|
7beb2f7fae | ||
|
|
c4b9f4f622 | ||
|
|
655e6992f6 | ||
|
|
5cdaac00c1 | ||
|
|
05db6a6c29 | ||
|
|
57c732655e | ||
|
|
ee4f51a631 | ||
|
|
816b789bf0 | ||
|
|
55b823b27d | ||
|
|
89a49cf30e | ||
|
|
4cf95a6041 | ||
|
|
ab6fd304c4 | ||
|
|
29d6a5e2b8 | ||
|
|
86e60e3ba8 | ||
|
|
c6d87e5e18 | ||
|
|
a4bc7ecf27 | ||
|
|
6e52aefb37 | ||
|
|
cf06a266a8 | ||
|
|
81bdfb835d | ||
|
|
29c942f2a8 | ||
|
|
2320aa0da2 | ||
|
|
5cb51a191e | ||
|
|
7e846bb965 | ||
|
|
6e104f0570 | ||
|
|
42fc7ca6a0 | ||
|
|
a4886c404a | ||
|
|
f94f479358 | ||
|
|
d245bc891e | ||
|
|
894e9bc5d4 | ||
|
|
44cc9c04ea | ||
|
|
05ac415780 | ||
|
|
3e7e426b36 | ||
|
|
2a9979e256 | ||
|
|
90cd724be1 | ||
|
|
e41619b1fc | ||
|
|
ec6ce08cd0 | ||
|
|
4ddbaeea32 | ||
|
|
b35dd76dca | ||
|
|
37ad60fe25 | ||
|
|
a1d23f6613 | ||
|
|
45ddc39c1d | ||
|
|
34df1f588b | ||
|
|
b31d37eac5 | ||
|
|
db6285fb55 | ||
|
|
4e1a8b1fe5 | ||
|
|
5472ef626c | ||
|
|
20f34d9776 | ||
|
|
ef473b1f09 | ||
|
|
8760ec4827 | ||
|
|
54afa3ac7a | ||
|
|
a2ecbdaa31 | ||
|
|
896aede340 | ||
|
|
74b41637de | ||
|
|
c8cc3eacc9 | ||
|
|
2828da3c4c | ||
|
|
233bdf105f | ||
|
|
71b938f608 | ||
|
|
146549260a | ||
|
|
bec2b4f094 | ||
|
|
2c684ffd32 | ||
|
|
556a83022d | ||
|
|
1b26a2a561 | ||
|
|
a5d7094a45 | ||
|
|
d31a57cf5f | ||
|
|
bccb7e87d1 | ||
|
|
2e8c101b4a | ||
|
|
4224c08cac | ||
|
|
878b990fcd | ||
|
|
dee5ef2dfd | ||
|
|
3d919db0c0 | ||
|
|
b9a4f3336a | ||
|
|
ea7a6a0321 | ||
|
|
f294c4e023 | ||
|
|
b65e3c4444 | ||
|
|
7bcc8b3e5c | ||
|
|
aa0d8f20c1 | ||
|
|
7e06c81894 | ||
|
|
0cced530ea | ||
|
|
b1fdb220f4 | ||
|
|
74f3a2f4b5 | ||
|
|
47b62480af | ||
|
|
a5c852660b | ||
|
|
905fdd3e08 | ||
|
|
ca998df912 | ||
|
|
3039dd194b | ||
|
|
10ae0f9511 | ||
|
|
138fe8516a | ||
|
|
79b8b560d2 | ||
|
|
4aa12e10c0 | ||
|
|
f01af43eb0 | ||
|
|
a59c7323b4 | ||
|
|
5c87c2bba8 | ||
|
|
8825670c9c | ||
|
|
744c46995c | ||
|
|
a7083d3c13 | ||
|
|
1d90577800 | ||
|
|
794fd6a46b | ||
|
|
bcc0277338 | ||
|
|
4ee8340e79 | ||
|
|
f6fe15d11f | ||
|
|
23b4165a6b | ||
|
|
c2b6b80600 | ||
|
|
642336add7 | ||
|
|
4230dcb614 | ||
|
|
e2d8a99413 | ||
|
|
4e00737c60 | ||
|
|
4f75f514ce | ||
|
|
1a73a28511 | ||
|
|
325bc93e16 | ||
|
|
19a2c54265 | ||
|
|
366f3cb9d8 | ||
|
|
e4894111ba | ||
|
|
49c22c23b4 | ||
|
|
5ae7f9944b | ||
|
|
f20074e826 | ||
|
|
a9b4a95225 | ||
|
|
9c8523432a | ||
|
|
1fa6793a4e | ||
|
|
9da2287ab8 | ||
|
|
b6167cd2ff | ||
|
|
9b530e5697 | ||
|
|
c375173dca | ||
|
|
17913713b5 | ||
|
|
872e559b91 | ||
|
|
25077564ab | ||
|
|
bdedaab8d1 | ||
|
|
9f15b9e322 | ||
|
|
dc97b5f19f | ||
|
|
4c5d2608e0 | ||
|
|
9a0399e898 | ||
|
|
9b267a435e | ||
|
|
e8c5c53e2f | ||
|
|
dbf7e9d3cb | ||
|
|
1335db6113 | ||
|
|
5d48d40d9a | ||
|
|
218a5fb6dd | ||
|
|
4656b09d5d | ||
|
|
dbb5208a0a | ||
|
|
1e949110da | ||
|
|
a4101de678 | ||
|
|
72892cc80d | ||
|
|
9d62b14591 | ||
|
|
411592a347 | ||
|
|
87ab1ad607 | ||
|
|
a9ec0ea6da | ||
|
|
d8ec7aad5a | ||
|
|
c3c8e66fc9 | ||
|
|
0f2ed21a9d | ||
|
|
d167892c7e | ||
|
|
0ad6e18a2a | ||
|
|
55ee2bd77f | ||
|
|
1b70a323a7 | ||
|
|
bc08e0c9d1 | ||
|
|
8968ca7c0a | ||
|
|
d19a0ddacf | ||
|
|
740d042255 | ||
|
|
4bf23c2391 | ||
|
|
8942c98054 | ||
|
|
561809200a | ||
|
|
fec66d033a | ||
|
|
a275f40267 | ||
|
|
7bc56fa0ed | ||
|
|
26982f9fce | ||
|
|
f0fd7629ae | ||
|
|
9d2832a3a3 | ||
|
|
f8bb678c67 | ||
|
|
d6d72de339 | ||
|
|
d132933550 | ||
|
|
d356b7a071 | ||
|
|
89a00a5866 | ||
|
|
0027220aa0 | ||
|
|
7f4d3a91b9 | ||
|
|
03cd087da1 | ||
|
|
c709f2aaaf | ||
|
|
f2f7dd87b8 | ||
|
|
78f2cd83d7 | ||
|
|
80065d571e | ||
|
|
96d3d32265 | ||
|
|
7c9dcbedbc | ||
|
|
f5ff90cd87 | ||
|
|
8747885a8b | ||
|
|
b2246ae7ef | ||
|
|
60cfd14349 | ||
|
|
516a93d25c | ||
|
|
195a41cef1 | ||
|
|
2b049b32e9 | ||
|
|
fa13992264 | ||
|
|
5e9e525223 | ||
|
|
8ad22bf4e7 | ||
|
|
de8fd852a5 | ||
|
|
610ee632cc | ||
|
|
cb207a355d | ||
|
|
2231940d1d | ||
|
|
95cbfad990 | ||
|
|
fbb980d9d3 | ||
|
|
cd0821500c | ||
|
|
380f6f4ab8 | ||
|
|
ca3da55de4 | ||
|
|
125b3c0f2d | ||
|
|
ad1a527709 | ||
|
|
bf6cfe3b99 | ||
|
|
d8d684538c | ||
|
|
c5876277a8 | ||
|
|
0e97d97d50 | ||
|
|
749364f25d | ||
|
|
347f593169 | ||
|
|
ef4a0e0aac | ||
|
|
886486a519 | ||
|
|
5c8ccf4455 | ||
|
|
3c3f026ec1 | ||
|
|
d45c0d843b | ||
|
|
1e2c3ade9e | ||
|
|
8139849ab6 | ||
|
|
9a194273cd | ||
|
|
aac4eba2ef | ||
|
|
afc4567268 | ||
|
|
a30461cf87 | ||
|
|
c31e3efa7c | ||
|
|
0d483cb7c1 | ||
|
|
b8044e6136 | ||
|
|
0ffaf0f5be | ||
|
|
47b86180f6 | ||
|
|
55bdf084cb | ||
|
|
703c2d06aa | ||
|
|
d6386e45e8 | ||
|
|
05e5563c2c | ||
|
|
84b726ef53 | ||
|
|
c103ec51d8 | ||
|
|
4f70e14031 | ||
|
|
fb56da5e8b | ||
|
|
c2ba4fb957 | ||
|
|
927c316aeb | ||
|
|
f4ff1c53fd | ||
|
|
b0036b339b | ||
|
|
956beead70 | ||
|
|
4dbbeb635d | ||
|
|
0c85b90671 | ||
|
|
42d31d9dcb | ||
|
|
2ce2a1a4d8 | ||
|
|
cc581b3b6b | ||
|
|
00218d065a | ||
|
|
c120822a24 | ||
|
|
a7b42adb74 | ||
|
|
44a9d69efb | ||
|
|
9c9070aea2 | ||
|
|
c763b50dd0 | ||
|
|
4d1d5d4010 | ||
|
|
e426b6e040 | ||
|
|
3cca1c5fa1 | ||
|
|
3ac173fc8b | ||
|
|
ae1662028a | ||
|
|
5cb24d0d39 | ||
|
|
512b464cfa | ||
|
|
fcd6fad822 | ||
|
|
4ccf92ea34 | ||
|
|
fcfeb4959c | ||
|
|
e5193c21a1 | ||
|
|
5a33c2f3a0 | ||
|
|
c1a62b5fa2 | ||
|
|
c90f968d92 | ||
|
|
c5645180a6 | ||
|
|
12d27f43ff | ||
|
|
d711d648cb | ||
|
|
debeae2509 | ||
|
|
6e12c2a6f8 | ||
|
|
8a17610666 | ||
|
|
7f101d1b33 | ||
|
|
a5cfa7841e | ||
|
|
43efadea2e | ||
|
|
9564886d9f | ||
|
|
e65e3cf36e | ||
|
|
184e2eac7d | ||
|
|
d411f98d26 | ||
|
|
519cee115a | ||
|
|
f3a4253984 | ||
|
|
51e6531315 | ||
|
|
2cc9662005 | ||
|
|
29745c6df2 | ||
|
|
7756192906 | ||
|
|
5a7b3592ed | ||
|
|
048acf81cd | ||
|
|
5e1e972aea | ||
|
|
f0fe18fc28 | ||
|
|
6ff331b705 | ||
|
|
b181a88f9f | ||
|
|
c80657b542 | ||
|
|
608bda7052 | ||
|
|
74ea82209b | ||
|
|
dfac5f89e9 | ||
|
|
6383757dca | ||
|
|
d261ba029a | ||
|
|
671971e12e | ||
|
|
e8884c4637 | ||
|
|
143b278267 | ||
|
|
c4da967b5c | ||
|
|
f6169c0b16 | ||
|
|
3310e208fd | ||
|
|
cc76724762 | ||
|
|
4e9c4f2d73 | ||
|
|
e1de390e6e | ||
|
|
f0c3ff313f | ||
|
|
b180223d18 | ||
|
|
8e0f5a6fc7 | ||
|
|
c8ec62103a | ||
|
|
bcfab4d726 | ||
|
|
d61b628bf5 | ||
|
|
2686d32a36 | ||
|
|
677f676172 | ||
|
|
1300467d36 | ||
|
|
b5c2a47b20 | ||
|
|
81c37c28d5 | ||
|
|
d1254808d5 | ||
|
|
ddf37cca30 | ||
|
|
7f6ed5780c | ||
|
|
5037abeb86 | ||
|
|
cdcdab98b8 | ||
|
|
65ea42bd42 | ||
|
|
549f361b71 | ||
|
|
6d293020fb | ||
|
|
52452bebb9 | ||
|
|
3098d7cee0 | ||
|
|
3da5a69dc9 | ||
|
|
06e453ddf4 | ||
|
|
b51a717deb | ||
|
|
bed7ae4083 | ||
|
|
734a911a26 | ||
|
|
0fc263ead5 | ||
|
|
b05073bda5 | ||
|
|
2443275891 | ||
|
|
70c2039748 | ||
|
|
2241563f23 | ||
|
|
ab5b35134f | ||
|
|
b5b24354b8 | ||
|
|
c991eb612d | ||
|
|
70ce5216b5 | ||
|
|
6bc9747df5 | ||
|
|
a4ce0eae43 | ||
|
|
4cfdcaaf7b | ||
|
|
ddc4f20e54 | ||
|
|
a2fea33103 | ||
|
|
5908598666 | ||
|
|
1013224888 | ||
|
|
f121f2738f | ||
|
|
fd58005edf | ||
|
|
750bd0ae9a | ||
|
|
cf4f019ed6 | ||
|
|
e0e4f15d0e | ||
|
|
eb7946ff25 | ||
|
|
6bc41df2fe | ||
|
|
f0c63902ff | ||
|
|
444131a2e6 | ||
|
|
798af22ff4 | ||
|
|
7622b8cdb8 | ||
|
|
52c0b3f100 | ||
|
|
dda9e1e487 | ||
|
|
434a3f35a3 | ||
|
|
07355599c2 | ||
|
|
e6a238c020 | ||
|
|
03b8fdec74 | ||
|
|
2c4dedb7a0 | ||
|
|
bd2b1eabd0 | ||
|
|
72ef553550 | ||
|
|
5b05f88ba9 | ||
|
|
14afdb4d92 | ||
|
|
78d72ef936 | ||
|
|
678ea40b24 | ||
|
|
c686bc0461 | ||
|
|
e033caa3ba | ||
|
|
452ac8ea62 | ||
|
|
33d80ffad0 | ||
|
|
7065779afa | ||
|
|
210c131ce7 | ||
|
|
c932fb50a1 | ||
|
|
a069a21e03 | ||
|
|
e319b63f9e | ||
|
|
cc82ca167a | ||
|
|
5384ed85c8 | ||
|
|
6bc9b9dc4f | ||
|
|
9e955fb9b0 | ||
|
|
33577ef5d3 | ||
|
|
47350f6acb | ||
|
|
b5f52f0b1b | ||
|
|
c6f2b8c841 | ||
|
|
1453bee3e7 | ||
|
|
07945290a2 | ||
|
|
c92d751ad1 | ||
|
|
08bdb2efc8 | ||
|
|
00b0ad1293 | ||
|
|
d0ccb13d09 | ||
|
|
9338582d79 | ||
|
|
3dcd85fab5 | ||
|
|
318bffaa10 | ||
|
|
b0001a6e29 | ||
|
|
da61d9460b | ||
|
|
93e9af43bb | ||
|
|
e5d40b39cd | ||
|
|
5994f3b14c | ||
|
|
974ba12f38 | ||
|
|
68c55a37d9 | ||
|
|
24ca9348f7 | ||
|
|
2e907abdb8 | ||
|
|
0e2d5669f6 | ||
|
|
3912f3de06 | ||
|
|
9be969cc7a | ||
|
|
ada964f16e | ||
|
|
c1ca872d1e | ||
|
|
3a990433f9 | ||
|
|
9bddecee05 | ||
|
|
2fcc4f2886 | ||
|
|
80c8547147 | ||
|
|
9c14e430af | ||
|
|
81d8dd79ca | ||
|
|
20c95be625 | ||
|
|
9a4e8b1d81 | ||
|
|
b3193052b3 | ||
|
|
4729458a36 | ||
|
|
cfced58c1c | ||
|
|
a144daf034 | ||
|
|
b9ebbffc57 | ||
|
|
7a46515d3d | ||
|
|
7be2e04bd4 | ||
|
|
1fd29edf66 | ||
|
|
24f2e6c97e | ||
|
|
29b7fea572 | ||
|
|
90355b4f00 | ||
|
|
f58e41bad8 | ||
|
|
e51cba6195 | ||
|
|
989ddd036f | ||
|
|
4d99c58a5f | ||
|
|
a418278064 | ||
|
|
14d5ce712c | ||
|
|
111968ca58 | ||
|
|
1c5904df3f | ||
|
|
d240463b38 | ||
|
|
511bb22ffd | ||
|
|
e3ec7b01df | ||
|
|
674c409e9d | ||
|
|
12e3fb6a6c | ||
|
|
9adb812a0a | ||
|
|
c3ea3b7e37 | ||
|
|
ee70a2380b | ||
|
|
bd6b7f4aa7 | ||
|
|
6f7112a848 | ||
|
|
f93f1c03fc | ||
|
|
0b2a26fa74 | ||
|
|
388f975cf5 | ||
|
|
7cf3e9be59 | ||
|
|
589b385ec6 | ||
|
|
801e6b6800 | ||
|
|
4acdd7c6f6 | ||
|
|
9c6e791e64 | ||
|
|
dde9c5aaff | ||
|
|
8599f87597 | ||
|
|
9c93531709 | ||
|
|
1149a7a292 | ||
|
|
b069431c28 | ||
|
|
71197d1dfa | ||
|
|
5a2dcd1c33 | ||
|
|
bf2990e773 | ||
|
|
5f3c811e84 | ||
|
|
3fcfaad577 | ||
|
|
3b88bc948f | ||
|
|
70903c872f | ||
|
|
d268a2a463 | ||
|
|
fa3715f584 | ||
|
|
e4a273e1da | ||
|
|
071e10c1d1 | ||
|
|
f5fdcbe194 | ||
|
|
18349a7ccf | ||
|
|
75b8c22b0b | ||
|
|
5879acde9a | ||
|
|
8943eb4314 | ||
|
|
6347fa1c2e | ||
|
|
ace7fd328b | ||
|
|
40361043ae | ||
|
|
12110c900e | ||
|
|
487ab0ce73 | ||
|
|
e6cd74ead3 | ||
|
|
a4de2f68e4 | ||
|
|
fbfbd525d8 | ||
|
|
4af857f95d | ||
|
|
bc1d3ee230 | ||
|
|
30363d9c35 | ||
|
|
66cc1e02aa | ||
|
|
627cf41a60 | ||
|
|
9b688aca3b | ||
|
|
8d7702766a | ||
|
|
03fb98fbde | ||
|
|
8b1afce316 | ||
|
|
b3d2e7644a | ||
|
|
ac9136ee49 | ||
|
|
6c0c87216f | ||
|
|
71b0528a2f | ||
|
|
7c2686146e | ||
|
|
e471056ec4 | ||
|
|
730866a7bc | ||
|
|
029a8b533f | ||
|
|
7aee0e51ed | ||
|
|
3c40f4a7f5 | ||
|
|
3cae287dea | ||
|
|
970b4b3fa2 | ||
|
|
e0c179c7cc | ||
|
|
dd445af56e | ||
|
|
9f85e92602 | ||
|
|
23e2c6ec91 | ||
|
|
1813804e36 | ||
|
|
0d411b0397 | ||
|
|
22a31b1faa | ||
|
|
06320729d4 | ||
|
|
d0a29c3135 | ||
|
|
a3ec964346 | ||
|
|
048d969be4 | ||
|
|
ac3f0e78dc | ||
|
|
93c44a9a64 | ||
|
|
4b0852ee41 | ||
|
|
0f17e35bce | ||
|
|
efe3e48ae2 | ||
|
|
1a0801238e | ||
|
|
4d277d750d | ||
|
|
eb067c1c34 | ||
|
|
90a9c68874 | ||
|
|
74bf00a5ab | ||
|
|
47c89775d6 | ||
|
|
95f11ed27e | ||
|
|
8234091368 | ||
|
|
dcff96ed27 | ||
|
|
8104f10328 | ||
|
|
26143ad0b1 | ||
|
|
c4d721200a | ||
|
|
a6d9a06b7b | ||
|
|
a67bc64819 | ||
|
|
abdf894fcf | ||
|
|
38ee514787 | ||
|
|
7c3a168ffd | ||
|
|
e8ecafb8dc | ||
|
|
b47b5ac771 | ||
|
|
02884b08aa | ||
|
|
ae18a094b0 | ||
|
|
d39da42e69 | ||
|
|
529b5c2cfd | ||
|
|
1bcbe1fc14 | ||
|
|
1fa84b61c1 | ||
|
|
306e38ff31 | ||
|
|
3028fa6b42 | ||
|
|
c35be9dc40 | ||
|
|
cb7f7e542c | ||
|
|
c96e1ef283 | ||
|
|
1d22a9be1c | ||
|
|
d087a12b04 | ||
|
|
b5ab009c19 | ||
|
|
cacff9232a | ||
|
|
8fe7f5dc43 | ||
|
|
bd9d57f579 | ||
|
|
359023c0fa | ||
|
|
cfc23c6a6b | ||
|
|
d3a0efbf16 | ||
|
|
cd3d14ad0e | ||
|
|
e49607af19 | ||
|
|
e533908922 | ||
|
|
0be0e6fd88 | ||
|
|
b77e3e3fcc | ||
|
|
325156c7a9 | ||
|
|
d19cec70f1 | ||
|
|
267c1ed784 | ||
|
|
073b625bde | ||
|
|
9e1b29944e | ||
|
|
057c762ecd | ||
|
|
251dc8a663 | ||
|
|
f779980f7e | ||
|
|
ca0d605b34 | ||
|
|
35e2205256 | ||
|
|
91c646392d | ||
|
|
fdbb6ae856 | ||
|
|
75a0025a3d | ||
|
|
78b4e95f25 | ||
|
|
e3aa7f1441 | ||
|
|
f145241593 | ||
|
|
8438c7d0e4 | ||
|
|
e35ad8a074 | ||
|
|
1ba24a7597 | ||
|
|
f656ef2fed | ||
|
|
5af8161a1a | ||
|
|
646def51e0 | ||
|
|
60511a3222 | ||
|
|
dd01e4ba8d | ||
|
|
e21a608552 | ||
|
|
7903286961 | ||
|
|
a6008d5d93 | ||
|
|
dd9aeb60ae | ||
|
|
83981a9ce3 | ||
|
|
535479e69f | ||
|
|
2c1a439869 | ||
|
|
4e64e2ef8e | ||
|
|
9ad40901a8 | ||
|
|
fcf57823b6 | ||
|
|
9910265064 | ||
|
|
21ed1f0c6d | ||
|
|
eaf2a00b5c | ||
|
|
67d267f9da | ||
|
|
33e052b1e5 | ||
|
|
8de7f1928e | ||
|
|
b9649e7b8e | ||
|
|
dfcdfabf1f | ||
|
|
c90457f489 | ||
|
|
7d93932423 | ||
|
|
8dfe7b3686 | ||
|
|
4fd95272c8 | ||
|
|
474cfddf91 | ||
|
|
a23de1c108 | ||
|
|
f68155de6c | ||
|
|
e726dd9902 | ||
|
|
4fb34a008d | ||
|
|
2f7fcff4d7 | ||
|
|
6e563951af | ||
|
|
0d6a853212 | ||
|
|
493ad834a1 | ||
|
|
1907b25cd0 | ||
|
|
90e2239372 | ||
|
|
2f25347168 | ||
|
|
d22e0809a8 | ||
|
|
33dbc10aab | ||
|
|
8e2c201d23 | ||
|
|
ed9328ceae | ||
|
|
6dab74689c | ||
|
|
5d1b613910 | ||
|
|
af7281afe3 | ||
|
|
ddcc2d85da | ||
|
|
e92641887b | ||
|
|
d4ce6807c7 | ||
|
|
9a7ac85d7e | ||
|
|
5797dcb64e | ||
|
|
dba32d54d1 | ||
|
|
65b718a5e7 | ||
|
|
fc85f776f4 | ||
|
|
a429748e24 | ||
|
|
5c3b36f346 | ||
|
|
e3d51d3e62 | ||
|
|
ecd4bf7aae | ||
|
|
785d7e54d3 | ||
|
|
ed06e0c6af | ||
|
|
1cc34f01db | ||
|
|
0101a4719c | ||
|
|
05941a5f96 | ||
|
|
eb2590b774 | ||
|
|
3a35dabfae | ||
|
|
69cdfae22f | ||
|
|
785bde6f87 | ||
|
|
edc403fb2c | ||
|
|
87143deb4c | ||
|
|
fc5072b100 | ||
|
|
7bc46b8c75 | ||
|
|
440e81db0b | ||
|
|
0759d5ed2b | ||
|
|
2eb1a1a371 | ||
|
|
41c96a25a9 | ||
|
|
0b406754fa | ||
|
|
ab5f203b44 | ||
|
|
a1acf23b60 | ||
|
|
a764d45cfb | ||
|
|
21b5e12913 | ||
|
|
032152ad24 | ||
|
|
af1b7d6e7a | ||
|
|
a9a2a69dc1 | ||
|
|
cd1db1afaa | ||
|
|
1007a26641 | ||
|
|
7e15fdd9c6 | ||
|
|
2dd7476ad7 | ||
|
|
9d235c31a7 | ||
|
|
8f61535b83 | ||
|
|
b8aec1730c | ||
|
|
e19fced5cb | ||
|
|
849b20b7c8 | ||
|
|
be50e7b632 | ||
|
|
aeb4008606 | ||
|
|
1392e9f3da | ||
|
|
225f5258c7 | ||
|
|
56ec4263f9 | ||
|
|
e3188afbe8 | ||
|
|
c7d53aecc3 | ||
|
|
26c87ec6e7 | ||
|
|
f0f07ecd22 | ||
|
|
e814dc8a4b | ||
|
|
d45fca0298 | ||
|
|
7479791f6a | ||
|
|
73b6e9bbd0 | ||
|
|
112d866dc9 | ||
|
|
05b958c178 | ||
|
|
bed63208af | ||
|
|
291ab05023 | ||
|
|
de251635b1 | ||
|
|
3a6be65a20 | ||
|
|
e81a11dd7e | ||
|
|
35c3b371ea | ||
|
|
c71ed6fccb | ||
|
|
62e5647a33 | ||
|
|
732f1c634c | ||
|
|
2fa6e0245a | ||
|
|
053766503c | ||
|
|
7b59dcb8b8 | ||
|
|
5934950ce2 | ||
|
|
f5381871a3 | ||
|
|
44b60490f4 | ||
|
|
387339bf17 | ||
|
|
9d4397aa4a | ||
|
|
2879a4853b | ||
|
|
30e3110170 | ||
|
|
9ff0301515 | ||
|
|
6b629c2e81 | ||
|
|
32e19558e6 | ||
|
|
8f4839d1d9 | ||
|
|
93137b2e52 | ||
|
|
7eeeb79599 | ||
|
|
a8f00cc4a5 | ||
|
|
19b0f019c7 | ||
|
|
dd011849b7 | ||
|
|
c1cdc194e9 | ||
|
|
fcf0f4351a | ||
|
|
cbc21ae531 | ||
|
|
62ddfa7709 | ||
|
|
aefc05cb91 | ||
|
|
2aee9b4959 | ||
|
|
fe4e7c2b96 | ||
|
|
800198349f | ||
|
|
5ca33e48ea | ||
|
|
88f7d24de9 | ||
|
|
29d43ab52f | ||
|
|
fe8bb3b60e | ||
|
|
229c71d9b5 | ||
|
|
7424218392 | ||
|
|
d1d45bbdae | ||
|
|
1e8813f3bd | ||
|
|
1ccc9903a1 | ||
|
|
0323e0670e | ||
|
|
679a835d38 | ||
|
|
7ea5b7c209 | ||
|
|
b73e2be55e | ||
|
|
174228356d | ||
|
|
1838e25b8a | ||
|
|
bc4e957c39 | ||
|
|
fba6fc208c | ||
|
|
025110185e | ||
|
|
d50b905824 | ||
|
|
d4f2509178 | ||
|
|
cdf401a77c | ||
|
|
fef0ef26f1 | ||
|
|
cef360d782 | ||
|
|
c125d2a8bb | ||
|
|
270a49ee75 | ||
|
|
744f9015bb | ||
|
|
1cb5cad50c | ||
|
|
8cc07ba391 | ||
|
|
d74f126592 | ||
|
|
52b3dcdf07 | ||
|
|
099581b591 | ||
|
|
1258046f14 | ||
|
|
7addac910b | ||
|
|
0ea7adff92 | ||
|
|
f858856586 | ||
|
|
d8eac4ae27 | ||
|
|
3cc49ad0e8 | ||
|
|
ceedf4ea96 | ||
|
|
fd8920c71d | ||
|
|
8bbed35736 | ||
|
|
9520b90c4f | ||
|
|
df14bb1671 | ||
|
|
f441dc7ed8 | ||
|
|
2467942886 | ||
|
|
181ef47053 | ||
|
|
1582180e5b | ||
|
|
e0b7da0302 | ||
|
|
fa99857467 | ||
|
|
24f17df782 | ||
|
|
4fe8d1d66b | ||
|
|
a5d77ca08d | ||
|
|
d1d2ab4599 | ||
|
|
e1ddcc2eb7 | ||
|
|
6745667eb0 | ||
|
|
c5b4610cfe | ||
|
|
fed1683b9b | ||
|
|
c01520f173 | ||
|
|
27340f95e4 | ||
|
|
e03eabccda | ||
|
|
82ca10acb6 | ||
|
|
6601939588 | ||
|
|
df8f917463 | ||
|
|
c60b284e1f | ||
|
|
c67967161e | ||
|
|
f52daf9be1 | ||
|
|
7568f75f45 | ||
|
|
3bf8661ec1 | ||
|
|
18f4d6c0ba | ||
|
|
bcfbe51e7e | ||
|
|
ad383b084d | ||
|
|
3b8c04a902 | ||
|
|
9dd97cc141 | ||
|
|
ef13aaf379 | ||
|
|
50a66b3855 | ||
|
|
e08542c635 | ||
|
|
e95c96232a | ||
|
|
b15f6cd2ac | ||
|
|
5634ec3008 | ||
|
|
2dd6c2f0c9 | ||
|
|
38d7f999a7 | ||
|
|
8acb96a627 | ||
|
|
911a1f0ce2 | ||
|
|
732d8c33d1 | ||
|
|
684ea0ad26 | ||
|
|
8cb4c02165 | ||
|
|
be2ff703bc | ||
|
|
16975b447c | ||
|
|
eb1f4a4003 | ||
|
|
59e63bc135 | ||
|
|
62330505e1 | ||
|
|
14477f9f5a | ||
|
|
75a6d349c6 | ||
|
|
e3c76bfafb | ||
|
|
8b3c435241 | ||
|
|
2035799817 | ||
|
|
7751b2b320 | ||
|
|
769031375a | ||
|
|
bd346b4844 | ||
|
|
faba1dca6c | ||
|
|
6f7783e4f6 | ||
|
|
e5f034040e | ||
|
|
3ed9ec808f | ||
|
|
e552ac401e | ||
|
|
b2505e3d6f | ||
|
|
bc696c9273 | ||
|
|
f3e867ed97 | ||
|
|
5dc843cff3 | ||
|
|
cd9c81be91 | ||
|
|
1e23af2adc | ||
|
|
f165ffbc95 | ||
|
|
8cc650847a | ||
|
|
fad4d69ee4 | ||
|
|
0fd6197b8b | ||
|
|
7423837303 | ||
|
|
d25de54008 | ||
|
|
e5a9e31d13 | ||
|
|
ed3bee84c2 | ||
|
|
07740003b8 | ||
|
|
9b66e7edf2 | ||
|
|
6812f14886 | ||
|
|
08e1c16dd2 | ||
|
|
d6b68286ee | ||
|
|
146e069000 | ||
|
|
19cb685c40 | ||
|
|
4cf3c13750 | ||
|
|
20daddbeda | ||
|
|
c57dad8b17 | ||
|
|
295d8a12f1 | ||
|
|
994cb02a66 | ||
|
|
014c86603d | ||
|
|
091634b259 | ||
|
|
d558f6f550 | ||
|
|
c8efc01367 | ||
|
|
28ca7becbd | ||
|
|
ca4b20fad1 | ||
|
|
1133628c01 | ||
|
|
6a1167611c | ||
|
|
a607047aa1 | ||
|
|
2c1cfd8be6 | ||
|
|
4f28e32ebd | ||
|
|
2fbda812bc | ||
|
|
3258bcf531 | ||
|
|
67ebf81e7a | ||
|
|
9b6bf57e79 | ||
|
|
395d5c29d5 | ||
|
|
88ce76767e | ||
|
|
19be870562 | ||
|
|
a1bd3c64f0 | ||
|
|
1a573f987b | ||
|
|
29476f1c6b | ||
|
|
d4ec037f2e | ||
|
|
6612fcf36c | ||
|
|
d29892cb22 | ||
|
|
4fa054e26e | ||
|
|
75c647cd84 | ||
|
|
e4ce8efab5 | ||
|
|
76ecb4a031 | ||
|
|
2e1c4c945e | ||
|
|
4db0a62a06 | ||
|
|
87017bd4cd | ||
|
|
dc703e1b62 | ||
|
|
c171440324 | ||
|
|
7db2070598 | ||
|
|
581fe06a9b | ||
|
|
d2f252f87a | ||
|
|
4a5b9e5f78 | ||
|
|
12ee049a74 | ||
|
|
37a28376bb | ||
|
|
6ade7cba94 | ||
|
|
1bb8fe9615 | ||
|
|
fb13cab216 | ||
|
|
1479e370f8 | ||
|
|
0ca7a63670 | ||
|
|
5ef4830b55 | ||
|
|
93a13381c1 | ||
|
|
4ebe657dd7 | ||
|
|
85b746394e | ||
|
|
fe6366eb40 | ||
|
|
a98720ebc9 | ||
|
|
1db6449b01 | ||
|
|
c7282acb2a | ||
|
|
f332750359 | ||
|
|
9edb3b306f | ||
|
|
c46120a46b | ||
|
|
537497f520 | ||
|
|
56a80f431b | ||
|
|
774d501c1f | ||
|
|
7396c87249 | ||
|
|
c7533f92bb | ||
|
|
38b7fec37a | ||
|
|
c798fc2a29 | ||
|
|
f5245c615c | ||
|
|
aebb7998a3 | ||
|
|
b87da8fe9a | ||
|
|
1f35478b82 | ||
|
|
6d5ac6446c | ||
|
|
8f23eb11d7 | ||
|
|
0617281863 | ||
|
|
7d67f6f26d | ||
|
|
34c8253ad6 | ||
|
|
86e61ad6a5 | ||
|
|
6dbaddd2b9 | ||
|
|
a7faac2f09 | ||
|
|
f161d2f1e5 | ||
|
|
797fe27efe | ||
|
|
a57c5c5425 | ||
|
|
968b33ec79 | ||
|
|
87c7817124 | ||
|
|
bddfa2fc24 | ||
|
|
d05df9836b | ||
|
|
2f2e481fc3 | ||
|
|
1dda51f1fa | ||
|
|
348a1e7619 | ||
|
|
478d250818 | ||
|
|
532575b752 | ||
|
|
c127f9650c | ||
|
|
3419cf9aa7 | ||
|
|
877fc42e40 | ||
|
|
f79e5fc041 | ||
|
|
95c6d7398f | ||
|
|
5c7967e863 | ||
|
|
54e2f7e90d | ||
|
|
48c42bf189 | ||
|
|
92c94176c1 | ||
|
|
15e085cd32 | ||
|
|
2d72c853df | ||
|
|
9a4a81f100 | ||
|
|
61626aaf85 | ||
|
|
5a457d69fc | ||
|
|
7572794add | ||
|
|
60a10b3322 | ||
|
|
ec3fd9bd2a | ||
|
|
34cde09b2b | ||
|
|
8dd94461e1 | ||
|
|
9e04ab62fb | ||
|
|
9907bafa1d | ||
|
|
30f3971bee | ||
|
|
6b651176a3 | ||
|
|
a120edc56e | ||
|
|
5146409a1d | ||
|
|
db2ebf7410 | ||
|
|
bfc3f61010 | ||
|
|
78bfe867e6 | ||
|
|
03dca6d6b3 | ||
|
|
b2dec95862 | ||
|
|
26b5fdac40 | ||
|
|
00323f462a | ||
|
|
981f69ff55 | ||
|
|
5e843cfbbd | ||
|
|
b5ac85f103 | ||
|
|
d81fb6a9e6 | ||
|
|
d269cb9c50 | ||
|
|
2d97833f48 | ||
|
|
eef79067a8 | ||
|
|
aea4c10847 | ||
|
|
7eb4258951 | ||
|
|
c6d0be57d4 | ||
|
|
80b0d06b7e | ||
|
|
8685b740cc | ||
|
|
7fa23f2d2f | ||
|
|
ed264002a0 | ||
|
|
2e3361f0e0 | ||
|
|
363994f29d | ||
|
|
3f4bf96c5d | ||
|
|
0100fdd18d | ||
|
|
c0f85c681e | ||
|
|
43c129f431 | ||
|
|
500a57697d | ||
|
|
c2ab64afe3 | ||
|
|
6b30fb2bea | ||
|
|
9d34d2e036 | ||
|
|
76c15dffde | ||
|
|
d986693fbd | ||
|
|
7f5cb3aa0e | ||
|
|
697a01bfb4 | ||
|
|
1b4921977f | ||
|
|
be355c1e60 | ||
|
|
d10a435d64 | ||
|
|
eb2b086b65 | ||
|
|
08ca3b0849 | ||
|
|
61f21859d9 | ||
|
|
2bfbbfb381 | ||
|
|
31a3d22af4 | ||
|
|
90a8505208 | ||
|
|
06206e1d03 | ||
|
|
bfb9aa3d77 | ||
|
|
1bcea65117 | ||
|
|
bdfa1a0220 | ||
|
|
39504825d8 | ||
|
|
76abd80cb7 | ||
|
|
b1340bf310 | ||
|
|
c731e82fae | ||
|
|
491716c418 | ||
|
|
d64d0ef1dc | ||
|
|
27d6977a3e | ||
|
|
15836eb98e | ||
|
|
0dd51d5dd0 | ||
|
|
6e6031cbe9 | ||
|
|
d82a6ed811 | ||
|
|
ab7492dbc2 | ||
|
|
d3433c5946 | ||
|
|
975bcc8261 | ||
|
|
dd8d9646c4 | ||
|
|
bb2ecc6ad5 | ||
|
|
7a2ae105ea | ||
|
|
fd533d9a76 | ||
|
|
5fe3c58b4a | ||
|
|
dcb6e22a9e | ||
|
|
12399a1d42 | ||
|
|
a624051b85 | ||
|
|
cfea4dbe85 | ||
|
|
e40047f9c2 | ||
|
|
10bb407a2c | ||
|
|
ecf91ee081 | ||
|
|
925d014271 | ||
|
|
77d74f6c0d | ||
|
|
5570e7ceae | ||
|
|
e72a869fd1 | ||
|
|
2c0a0671ad | ||
|
|
6151899ce2 | ||
|
|
6bf282c6c2 | ||
|
|
8c35cff02c | ||
|
|
9f42b78a18 | ||
|
|
69d7f71ae8 | ||
|
|
1754fdbf4e | ||
|
|
58331067f8 | ||
|
|
aa2cb38543 | ||
|
|
6b18ee9edb | ||
|
|
c8faed0b54 | ||
|
|
dbd05a65b5 | ||
|
|
31403a41cd | ||
|
|
3f22596e3c | ||
|
|
cc5efb8d81 | ||
|
|
5aff7fab29 | ||
|
|
dfb3961eea | ||
|
|
39f2dcdfef | ||
|
|
2750679270 | ||
|
|
b38fa40fa6 | ||
|
|
8d570b54c7 | ||
|
|
e2adce1cc1 | ||
|
|
322e40c72e | ||
|
|
328cf187ba | ||
|
|
20b03e781c | ||
|
|
fcf2f0a03d | ||
|
|
cd8ab469ff | ||
|
|
659b9cd517 | ||
|
|
52d472c209 | ||
|
|
9ed59e71f6 | ||
|
|
e0053c62e1 | ||
|
|
8f0d7d1d3e | ||
|
|
771891491c | ||
|
|
f203d13efc | ||
|
|
14e400226a | ||
|
|
58f80c5675 | ||
|
|
4a7d84e861 | ||
|
|
1519f74f3c | ||
|
|
0e012cb05e | ||
|
|
19631ecef6 | ||
|
|
a569bf2698 | ||
|
|
dc12958fc7 | ||
|
|
67b68ceae6 | ||
|
|
54eb5623cb | ||
|
|
d9c22e54de | ||
|
|
7765e2dc55 | ||
|
|
ab278513ab | ||
|
|
e7a22792ac | ||
|
|
e05098cacb | ||
|
|
f9e95ab522 | ||
|
|
bb7d6814a7 | ||
|
|
e00fb99e7b | ||
|
|
e9a3f5169e | ||
|
|
1af3e81ada | ||
|
|
7cd5474f1a | ||
|
|
821eb21ae2 | ||
|
|
cc410b8c90 | ||
|
|
79e7862583 | ||
|
|
f9d634ce06 | ||
|
|
65a1cdf8e5 | ||
|
|
67229fd7a9 | ||
|
|
3033177e9e | ||
|
|
656a8fa3a2 | ||
|
|
0e9b64649a | ||
|
|
9da3c6c573 | ||
|
|
09a1305628 | ||
|
|
7d314fef78 | ||
|
|
dece767084 | ||
|
|
63bf9c7995 | ||
|
|
1c76483b4b | ||
|
|
9abe6ad4d8 | ||
|
|
8175df1002 | ||
|
|
a1a1a8895e | ||
|
|
69af79d45d | ||
|
|
e3a95b2d1a | ||
|
|
5c23b94069 | ||
|
|
85bb6cd027 | ||
|
|
90b9f1a98a | ||
|
|
55c2a5dc83 | ||
|
|
1d0d5bb141 | ||
|
|
7a983a4079 | ||
|
|
2523288509 | ||
|
|
8a6768763d | ||
|
|
a186f8c3aa | ||
|
|
ceeb6f0690 | ||
|
|
f3e5b6e13c | ||
|
|
34f2f887b1 | ||
|
|
20b51cc9ce | ||
|
|
56aad86231 | ||
|
|
ed1de6df80 | ||
|
|
8cb5b68cb6 | ||
|
|
e4abca9494 | ||
|
|
0a3300d773 | ||
|
|
2fab05c83e | ||
|
|
40f7ee1cab | ||
|
|
2c166d7a3a | ||
|
|
dcea64c838 | ||
|
|
255218a2f3 | ||
|
|
b76cd5858c | ||
|
|
46b5d46111 | ||
|
|
993ff8bb91 | ||
|
|
2cde04867f | ||
|
|
337840d29b | ||
|
|
fd2c57b8a4 | ||
|
|
1c5167d96e | ||
|
|
0d63646015 | ||
|
|
b5367f48f6 | ||
|
|
62c8ce9657 | ||
|
|
eb2ca06d67 | ||
|
|
16f729115e | ||
|
|
9355f5faf2 | ||
|
|
8cef2086f5 | ||
|
|
f7928c68a3 | ||
|
|
ecb09a23bc | ||
|
|
b9b58a1275 | ||
|
|
4a6c01c83c | ||
|
|
27f6f8ea9e | ||
|
|
d8d648549f | ||
|
|
38cd595235 | ||
|
|
7a60cb7f3e | ||
|
|
68f13cd739 | ||
|
|
d1ce3c697c | ||
|
|
2e536eda29 | ||
|
|
155ed3a814 | ||
|
|
5b0bb53184 | ||
|
|
42505f473d | ||
|
|
98756c068a | ||
|
|
aa54a038f2 | ||
|
|
a30075794b | ||
|
|
a8128493c2 | ||
|
|
faed8285cd | ||
|
|
21f3f3eec4 | ||
|
|
2f1ba40786 | ||
|
|
c565104491 | ||
|
|
54fcff189f | ||
|
|
d37f38c455 | ||
|
|
5e5bdda491 |
1
.github/FUNDING.yml
vendored
1
.github/FUNDING.yml
vendored
@@ -1 +1,2 @@
|
||||
open_collective: xgboost
|
||||
custom: https://xgboost.ai/sponsors
|
||||
|
||||
74
.github/workflows/jvm_tests.yml
vendored
Normal file
74
.github/workflows/jvm_tests.yml
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
name: XGBoost-JVM-Tests
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
test-with-jvm:
|
||||
name: Test JVM on OS ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [windows-latest, ubuntu-latest, macos-10.15]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '3.8'
|
||||
architecture: 'x64'
|
||||
|
||||
- uses: actions/setup-java@v1
|
||||
with:
|
||||
java-version: 1.8
|
||||
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
python -m pip install wheel setuptools
|
||||
python -m pip install awscli
|
||||
|
||||
- name: Cache Maven packages
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ~/.m2
|
||||
key: ${{ runner.os }}-m2-${{ hashFiles('./jvm-packages/pom.xml') }}
|
||||
restore-keys: ${{ runner.os }}-m2
|
||||
|
||||
- name: Test XGBoost4J
|
||||
run: |
|
||||
cd jvm-packages
|
||||
mvn test -B -pl :xgboost4j_2.12
|
||||
|
||||
- name: Extract branch name
|
||||
shell: bash
|
||||
run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})"
|
||||
id: extract_branch
|
||||
if: |
|
||||
(github.ref == 'refs/heads/master' || contains(github.ref, 'refs/heads/release_')) &&
|
||||
matrix.os == 'windows-latest'
|
||||
|
||||
- name: Publish artifact xgboost4j.dll to S3
|
||||
run: |
|
||||
cd lib/
|
||||
Rename-Item -Path xgboost4j.dll -NewName xgboost4j_${{ github.sha }}.dll
|
||||
dir
|
||||
python -m awscli s3 cp xgboost4j_${{ github.sha }}.dll s3://xgboost-nightly-builds/${{ steps.extract_branch.outputs.branch }}/ --acl public-read
|
||||
if: |
|
||||
(github.ref == 'refs/heads/master' || contains(github.ref, 'refs/heads/release_')) &&
|
||||
matrix.os == 'windows-latest'
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID_IAM_S3_UPLOADER }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY_IAM_S3_UPLOADER }}
|
||||
|
||||
|
||||
- name: Test XGBoost4J-Spark
|
||||
run: |
|
||||
rm -rfv build/
|
||||
cd jvm-packages
|
||||
mvn -B test
|
||||
if: matrix.os == 'ubuntu-latest' # Distributed training doesn't work on Windows
|
||||
env:
|
||||
RABIT_MOCK: ON
|
||||
232
.github/workflows/main.yml
vendored
Normal file
232
.github/workflows/main.yml
vendored
Normal file
@@ -0,0 +1,232 @@
|
||||
# This is a basic workflow to help you get started with Actions
|
||||
|
||||
name: XGBoost-CI
|
||||
|
||||
# Controls when the action will run. Triggers the workflow on push or pull request
|
||||
# events but only for the master branch
|
||||
on: [push, pull_request]
|
||||
|
||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||
jobs:
|
||||
gtest-cpu:
|
||||
name: Test Google C++ test (CPU)
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [macos-10.15]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
- name: Install system packages
|
||||
run: |
|
||||
# Use libomp 11.1.0: https://github.com/dmlc/xgboost/issues/7039
|
||||
wget https://raw.githubusercontent.com/Homebrew/homebrew-core/679923b4eb48a8dc7ecc1f05d06063cd79b3fc00/Formula/libomp.rb -O $(find $(brew --repository) -name libomp.rb)
|
||||
brew install ninja libomp
|
||||
brew pin libomp
|
||||
- name: Build gtest binary
|
||||
run: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DGOOGLE_TEST=ON -DUSE_OPENMP=ON -DUSE_DMLC_GTEST=ON -DPLUGIN_DENSE_PARSER=ON -GNinja
|
||||
ninja -v
|
||||
- name: Run gtest binary
|
||||
run: |
|
||||
cd build
|
||||
./testxgboost
|
||||
ctest -R TestXGBoostCLI --extra-verbose
|
||||
|
||||
gtest-cpu-nonomp:
|
||||
name: Test Google C++ unittest (CPU Non-OMP)
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
- name: Install system packages
|
||||
run: |
|
||||
sudo apt-get install -y --no-install-recommends ninja-build
|
||||
- name: Build and install XGBoost
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -GNinja -DGOOGLE_TEST=ON -DUSE_DMLC_GTEST=ON -DUSE_OPENMP=OFF
|
||||
ninja -v
|
||||
- name: Run gtest binary
|
||||
run: |
|
||||
cd build
|
||||
ctest --extra-verbose
|
||||
|
||||
c-api-demo:
|
||||
name: Test installing XGBoost lib + building the C API demo
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: ["ubuntu-latest"]
|
||||
python-version: ["3.8"]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
- name: Install system packages
|
||||
run: |
|
||||
sudo apt-get install -y --no-install-recommends ninja-build
|
||||
- uses: conda-incubator/setup-miniconda@v2
|
||||
with:
|
||||
auto-update-conda: true
|
||||
python-version: ${{ matrix.python-version }}
|
||||
activate-environment: test
|
||||
- name: Display Conda env
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
conda info
|
||||
conda list
|
||||
- name: Build and install XGBoost static library
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DBUILD_STATIC_LIB=ON -DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX -GNinja
|
||||
ninja -v install
|
||||
cd -
|
||||
- name: Build and run C API demo with static
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
pushd .
|
||||
cd demo/c-api/
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -GNinja -DCMAKE_PREFIX_PATH=$CONDA_PREFIX
|
||||
ninja -v
|
||||
ctest
|
||||
cd ..
|
||||
rm -rf ./build
|
||||
popd
|
||||
- name: Build and install XGBoost shared library
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
cd build
|
||||
cmake .. -DBUILD_STATIC_LIB=OFF -DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX -GNinja
|
||||
ninja -v install
|
||||
cd -
|
||||
- name: Build and run C API demo with shared
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
pushd .
|
||||
cd demo/c-api/
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -GNinja -DCMAKE_PREFIX_PATH=$CONDA_PREFIX
|
||||
ninja -v
|
||||
ctest
|
||||
popd
|
||||
./tests/ci_build/verify_link.sh ./demo/c-api/build/basic/api-demo
|
||||
./tests/ci_build/verify_link.sh ./demo/c-api/build/external-memory/external-memory-demo
|
||||
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
name: Code linting for Python and C++
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '3.7'
|
||||
architecture: 'x64'
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
python -m pip install wheel setuptools
|
||||
python -m pip install pylint cpplint numpy scipy scikit-learn
|
||||
- name: Run lint
|
||||
run: |
|
||||
make lint
|
||||
|
||||
mypy:
|
||||
runs-on: ubuntu-latest
|
||||
name: Type checking for Python
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '3.7'
|
||||
architecture: 'x64'
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
python -m pip install wheel setuptools mypy pandas dask[complete] distributed
|
||||
- name: Run mypy
|
||||
run: |
|
||||
make mypy
|
||||
|
||||
doxygen:
|
||||
runs-on: ubuntu-latest
|
||||
name: Generate C/C++ API doc using Doxygen
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '3.7'
|
||||
architecture: 'x64'
|
||||
- name: Install system packages
|
||||
run: |
|
||||
sudo apt-get install -y --no-install-recommends doxygen graphviz ninja-build
|
||||
python -m pip install wheel setuptools
|
||||
python -m pip install awscli
|
||||
- name: Run Doxygen
|
||||
run: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DBUILD_C_DOC=ON -GNinja
|
||||
ninja -v doc_doxygen
|
||||
- name: Extract branch name
|
||||
shell: bash
|
||||
run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})"
|
||||
id: extract_branch
|
||||
if: github.ref == 'refs/heads/master' || contains(github.ref, 'refs/heads/release_')
|
||||
- name: Publish
|
||||
run: |
|
||||
cd build/
|
||||
tar cvjf ${{ steps.extract_branch.outputs.branch }}.tar.bz2 doc_doxygen/
|
||||
python -m awscli s3 cp ./${{ steps.extract_branch.outputs.branch }}.tar.bz2 s3://xgboost-docs/doxygen/ --acl public-read
|
||||
if: github.ref == 'refs/heads/master' || contains(github.ref, 'refs/heads/release_')
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID_IAM_S3_UPLOADER }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY_IAM_S3_UPLOADER }}
|
||||
|
||||
sphinx:
|
||||
runs-on: ubuntu-latest
|
||||
name: Build docs using Sphinx
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '3.8'
|
||||
architecture: 'x64'
|
||||
- name: Install system packages
|
||||
run: |
|
||||
sudo apt-get install -y --no-install-recommends graphviz
|
||||
python -m pip install wheel setuptools
|
||||
python -m pip install -r doc/requirements.txt
|
||||
- name: Extract branch name
|
||||
shell: bash
|
||||
run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})"
|
||||
id: extract_branch
|
||||
if: github.ref == 'refs/heads/master' || contains(github.ref, 'refs/heads/release_')
|
||||
- name: Run Sphinx
|
||||
run: |
|
||||
make -C doc html
|
||||
env:
|
||||
SPHINX_GIT_BRANCH: ${{ steps.extract_branch.outputs.branch }}
|
||||
125
.github/workflows/python_tests.yml
vendored
Normal file
125
.github/workflows/python_tests.yml
vendored
Normal file
@@ -0,0 +1,125 @@
|
||||
name: XGBoost-Python-Tests
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
python-sdist-test:
|
||||
runs-on: ${{ matrix.os }}
|
||||
name: Test installing XGBoost Python source package on ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-10.15, windows-latest]
|
||||
python-version: ["3.8"]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
- name: Install osx system dependencies
|
||||
if: matrix.os == 'macos-10.15'
|
||||
run: |
|
||||
# Use libomp 11.1.0: https://github.com/dmlc/xgboost/issues/7039
|
||||
wget https://raw.githubusercontent.com/Homebrew/homebrew-core/679923b4eb48a8dc7ecc1f05d06063cd79b3fc00/Formula/libomp.rb -O $(find $(brew --repository) -name libomp.rb)
|
||||
brew install ninja libomp
|
||||
brew pin libomp
|
||||
- name: Install Ubuntu system dependencies
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
run: |
|
||||
sudo apt-get install -y --no-install-recommends ninja-build
|
||||
- uses: conda-incubator/setup-miniconda@v2
|
||||
with:
|
||||
auto-update-conda: true
|
||||
python-version: ${{ matrix.python-version }}
|
||||
activate-environment: test
|
||||
- name: Display Conda env
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
conda info
|
||||
conda list
|
||||
- name: Build and install XGBoost
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
cd python-package
|
||||
python --version
|
||||
python setup.py sdist
|
||||
pip install -v ./dist/xgboost-*.tar.gz
|
||||
cd ..
|
||||
python -c 'import xgboost'
|
||||
|
||||
python-tests:
|
||||
name: Test XGBoost Python package on ${{ matrix.config.os }}
|
||||
runs-on: ${{ matrix.config.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
config:
|
||||
- {os: windows-2016, python-version: '3.8'}
|
||||
- {os: macos-10.15, python-version "3.8" }
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
|
||||
- uses: conda-incubator/setup-miniconda@v2
|
||||
with:
|
||||
auto-update-conda: true
|
||||
python-version: ${{ matrix.config.python-version }}
|
||||
activate-environment: win64_test
|
||||
environment-file: tests/ci_build/conda_env/win64_cpu_test.yml
|
||||
|
||||
- name: Display Conda env
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
conda info
|
||||
conda list
|
||||
|
||||
- name: Build XGBoost on Windows
|
||||
shell: bash -l {0}
|
||||
if: matrix.config.os == 'windows-2016'
|
||||
run: |
|
||||
mkdir build_msvc
|
||||
cd build_msvc
|
||||
cmake .. -G"Visual Studio 15 2017" -DCMAKE_CONFIGURATION_TYPES="Release" -A x64 -DGOOGLE_TEST=ON -DUSE_DMLC_GTEST=ON
|
||||
cmake --build . --config Release --parallel $(nproc)
|
||||
|
||||
- name: Build XGBoost on macos
|
||||
if: matrix.config.os == 'macos-10.15'
|
||||
run: |
|
||||
wget https://raw.githubusercontent.com/Homebrew/homebrew-core/679923b4eb48a8dc7ecc1f05d06063cd79b3fc00/Formula/libomp.rb -O $(find $(brew --repository) -name libomp.rb)
|
||||
brew install ninja libomp
|
||||
brew pin libomp
|
||||
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -GNinja -DGOOGLE_TEST=ON -DUSE_DMLC_GTEST=ON
|
||||
ninja
|
||||
|
||||
- name: Install Python package
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
cd python-package
|
||||
python --version
|
||||
python setup.py bdist_wheel --universal
|
||||
pip install ./dist/*.whl
|
||||
|
||||
- name: Test Python package
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
pytest -s -v ./tests/python
|
||||
|
||||
- name: Rename Python wheel
|
||||
shell: bash -l {0}
|
||||
if: matrix.config.os == 'macos-10.15'
|
||||
run: |
|
||||
TAG=macosx_10_15_x86_64.macosx_11_0_x86_64.macosx_12_0_x86_64
|
||||
python tests/ci_build/rename_whl.py python-package/dist/*.whl ${{ github.sha }} ${TAG}
|
||||
|
||||
- name: Upload Python wheel
|
||||
shell: bash -l {0}
|
||||
if: |
|
||||
(github.ref == 'refs/heads/master' || contains(github.ref, 'refs/heads/release_')) &&
|
||||
matrix.os == 'macos-latest'
|
||||
run: |
|
||||
python -m awscli s3 cp python-package/dist/*.whl s3://xgboost-nightly-builds/${{ steps.extract_branch.outputs.branch }}/ --acl public-read
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID_IAM_S3_UPLOADER }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY_IAM_S3_UPLOADER }}
|
||||
44
.github/workflows/r_nold.yml
vendored
Normal file
44
.github/workflows/r_nold.yml
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
# Run R tests with noLD R. Only triggered by a pull request review
|
||||
# See discussion at https://github.com/dmlc/xgboost/pull/6378
|
||||
|
||||
name: XGBoost-R-noLD
|
||||
|
||||
on:
|
||||
pull_request_review_comment:
|
||||
types: [created]
|
||||
|
||||
env:
|
||||
R_PACKAGES: c('XML', 'igraph', 'data.table', 'ggplot2', 'DiagrammeR', 'Ckmeans.1d.dp', 'vcd', 'testthat', 'lintr', 'knitr', 'rmarkdown', 'e1071', 'cplm', 'devtools', 'float', 'titanic')
|
||||
|
||||
jobs:
|
||||
test-R-noLD:
|
||||
if: github.event.comment.body == '/gha run r-nold-test' && contains('OWNER,MEMBER,COLLABORATOR', github.event.comment.author_association)
|
||||
timeout-minutes: 120
|
||||
runs-on: ubuntu-latest
|
||||
container: rhub/debian-gcc-devel-nold
|
||||
steps:
|
||||
- name: Install git and system packages
|
||||
shell: bash
|
||||
run: |
|
||||
apt-get update && apt-get install -y git libcurl4-openssl-dev libssl-dev libssh2-1-dev libgit2-dev libxml2-dev
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
cat > install_libs.R <<EOT
|
||||
install.packages(${{ env.R_PACKAGES }},
|
||||
repos = 'http://cloud.r-project.org',
|
||||
dependencies = c('Depends', 'Imports', 'LinkingTo'))
|
||||
EOT
|
||||
/tmp/R-devel/bin/Rscript install_libs.R
|
||||
|
||||
- name: Run R tests
|
||||
shell: bash
|
||||
run: |
|
||||
cd R-package && \
|
||||
/tmp/R-devel/bin/R CMD INSTALL . && \
|
||||
/tmp/R-devel/bin/R -q -e "library(testthat); setwd('tests'); source('testthat.R')"
|
||||
149
.github/workflows/r_tests.yml
vendored
Normal file
149
.github/workflows/r_tests.yml
vendored
Normal file
@@ -0,0 +1,149 @@
|
||||
name: XGBoost-R-Tests
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
env:
|
||||
R_PACKAGES: c('XML', 'data.table', 'ggplot2', 'DiagrammeR', 'Ckmeans.1d.dp', 'vcd', 'testthat', 'lintr', 'knitr', 'rmarkdown', 'e1071', 'cplm', 'devtools', 'float', 'titanic')
|
||||
GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
jobs:
|
||||
lintr:
|
||||
runs-on: ${{ matrix.config.os }}
|
||||
name: Run R linters on OS ${{ matrix.config.os }}, R ${{ matrix.config.r }}, Compiler ${{ matrix.config.compiler }}, Build ${{ matrix.config.build }}
|
||||
strategy:
|
||||
matrix:
|
||||
config:
|
||||
- {os: windows-latest, r: 'release', compiler: 'mingw', build: 'autotools'}
|
||||
env:
|
||||
R_REMOTES_NO_ERRORS_FROM_WARNINGS: true
|
||||
RSPM: ${{ matrix.config.rspm }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
|
||||
- uses: r-lib/actions/setup-r@master
|
||||
with:
|
||||
r-version: ${{ matrix.config.r }}
|
||||
|
||||
- name: Cache R packages
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ${{ env.R_LIBS_USER }}
|
||||
key: ${{ runner.os }}-r-${{ matrix.config.r }}-2-${{ hashFiles('R-package/DESCRIPTION') }}
|
||||
restore-keys: ${{ runner.os }}-r-${{ matrix.config.r }}-2-${{ hashFiles('R-package/DESCRIPTION') }}
|
||||
|
||||
- name: Install dependencies
|
||||
shell: Rscript {0}
|
||||
run: |
|
||||
install.packages(${{ env.R_PACKAGES }},
|
||||
repos = 'http://cloud.r-project.org',
|
||||
dependencies = c('Depends', 'Imports', 'LinkingTo'))
|
||||
- name: Install igraph on Windows
|
||||
shell: Rscript {0}
|
||||
if: matrix.config.os == 'windows-latest'
|
||||
run: |
|
||||
install.packages('igraph', type='binary')
|
||||
|
||||
- name: Run lintr
|
||||
run: |
|
||||
cd R-package
|
||||
R.exe CMD INSTALL .
|
||||
Rscript.exe tests/helper_scripts/run_lint.R
|
||||
|
||||
test-with-R:
|
||||
runs-on: ${{ matrix.config.os }}
|
||||
name: Test R on OS ${{ matrix.config.os }}, R ${{ matrix.config.r }}, Compiler ${{ matrix.config.compiler }}, Build ${{ matrix.config.build }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
- {os: windows-2016, r: 'release', compiler: 'mingw', build: 'autotools'}
|
||||
- {os: windows-2016, r: 'release', compiler: 'msvc', build: 'cmake'}
|
||||
- {os: windows-2016, r: 'release', compiler: 'mingw', build: 'cmake'}
|
||||
env:
|
||||
R_REMOTES_NO_ERRORS_FROM_WARNINGS: true
|
||||
RSPM: ${{ matrix.config.rspm }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
|
||||
- uses: r-lib/actions/setup-r@master
|
||||
with:
|
||||
r-version: ${{ matrix.config.r }}
|
||||
|
||||
- name: Cache R packages
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ${{ env.R_LIBS_USER }}
|
||||
key: ${{ runner.os }}-r-${{ matrix.config.r }}-2-${{ hashFiles('R-package/DESCRIPTION') }}
|
||||
restore-keys: ${{ runner.os }}-r-${{ matrix.config.r }}-2-${{ hashFiles('R-package/DESCRIPTION') }}
|
||||
|
||||
- name: Install dependencies
|
||||
shell: Rscript {0}
|
||||
run: |
|
||||
install.packages(${{ env.R_PACKAGES }},
|
||||
repos = 'http://cloud.r-project.org',
|
||||
dependencies = c('Depends', 'Imports', 'LinkingTo'))
|
||||
- name: Install igraph on Windows
|
||||
shell: Rscript {0}
|
||||
if: matrix.config.os == 'windows-2016'
|
||||
run: |
|
||||
install.packages('igraph', type='binary', dependencies = c('Depends', 'Imports', 'LinkingTo'))
|
||||
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '3.7'
|
||||
architecture: 'x64'
|
||||
|
||||
- name: Test R
|
||||
run: |
|
||||
python tests/ci_build/test_r_package.py --compiler='${{ matrix.config.compiler }}' --build-tool='${{ matrix.config.build }}'
|
||||
|
||||
test-R-CRAN:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
- {r: 'release'}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
|
||||
- uses: r-lib/actions/setup-r@master
|
||||
with:
|
||||
r-version: ${{ matrix.config.r }}
|
||||
|
||||
- uses: r-lib/actions/setup-tinytex@master
|
||||
|
||||
- name: Install system packages
|
||||
run: |
|
||||
sudo apt-get update && sudo apt-get install libcurl4-openssl-dev libssl-dev libssh2-1-dev libgit2-dev pandoc pandoc-citeproc libglpk-dev
|
||||
|
||||
- name: Cache R packages
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ${{ env.R_LIBS_USER }}
|
||||
key: ${{ runner.os }}-r-${{ matrix.config.r }}-2-${{ hashFiles('R-package/DESCRIPTION') }}
|
||||
restore-keys: ${{ runner.os }}-r-${{ matrix.config.r }}-2-${{ hashFiles('R-package/DESCRIPTION') }}
|
||||
|
||||
- name: Install dependencies
|
||||
shell: Rscript {0}
|
||||
run: |
|
||||
install.packages(${{ env.R_PACKAGES }},
|
||||
repos = 'http://cloud.r-project.org',
|
||||
dependencies = c('Depends', 'Imports', 'LinkingTo'))
|
||||
install.packages('igraph', repos = 'http://cloud.r-project.org', dependencies = c('Depends', 'Imports', 'LinkingTo'))
|
||||
|
||||
- name: Check R Package
|
||||
run: |
|
||||
# Print stacktrace upon success of failure
|
||||
make Rcheck || tests/ci_build/print_r_stacktrace.sh fail
|
||||
tests/ci_build/print_r_stacktrace.sh success
|
||||
22
.gitignore
vendored
22
.gitignore
vendored
@@ -51,6 +51,7 @@ Debug
|
||||
#.Rbuildignore
|
||||
R-package.Rproj
|
||||
*.cache*
|
||||
.mypy_cache/
|
||||
# java
|
||||
java/xgboost4j/target
|
||||
java/xgboost4j/tmp
|
||||
@@ -70,6 +71,7 @@ build
|
||||
build_plugin
|
||||
recommonmark/
|
||||
tags
|
||||
TAGS
|
||||
*.class
|
||||
target
|
||||
*.swp
|
||||
@@ -92,6 +94,7 @@ metastore_db
|
||||
# files from R-package source install
|
||||
**/config.status
|
||||
R-package/src/Makevars
|
||||
*.lib
|
||||
|
||||
# Visual Studio Code
|
||||
/.vscode/
|
||||
@@ -103,3 +106,22 @@ R-package/src/Makevars
|
||||
|
||||
# GDB
|
||||
.gdb_history
|
||||
|
||||
# Python joblib.Memory used in pytest.
|
||||
cachedir/
|
||||
|
||||
# Files from local Dask work
|
||||
dask-worker-space/
|
||||
|
||||
# Jupyter notebook checkpoints
|
||||
.ipynb_checkpoints/
|
||||
|
||||
# credentials and key material
|
||||
config
|
||||
credentials
|
||||
credentials.csv
|
||||
*.env
|
||||
*.pem
|
||||
*.pub
|
||||
*.rdp
|
||||
*_rsa
|
||||
|
||||
7
.gitmodules
vendored
7
.gitmodules
vendored
@@ -1,9 +1,10 @@
|
||||
[submodule "dmlc-core"]
|
||||
path = dmlc-core
|
||||
url = https://github.com/dmlc/dmlc-core
|
||||
[submodule "rabit"]
|
||||
path = rabit
|
||||
url = https://github.com/dmlc/rabit
|
||||
branch = main
|
||||
[submodule "cub"]
|
||||
path = cub
|
||||
url = https://github.com/NVlabs/cub
|
||||
[submodule "gputreeshap"]
|
||||
path = gputreeshap
|
||||
url = https://github.com/rapidsai/gputreeshap.git
|
||||
|
||||
46
.travis.yml
46
.travis.yml
@@ -1,52 +1,30 @@
|
||||
# disable sudo for container build.
|
||||
sudo: required
|
||||
|
||||
# Enabling test OS X
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
|
||||
osx_image: xcode10.3
|
||||
dist: bionic
|
||||
|
||||
# Use Build Matrix to do lint and build seperately
|
||||
env:
|
||||
matrix:
|
||||
# python package test
|
||||
- TASK=python_test
|
||||
# test installation of Python source distribution
|
||||
- TASK=python_sdist_test
|
||||
# java package test
|
||||
- TASK=java_test
|
||||
# cmake test
|
||||
- TASK=cmake_test
|
||||
global:
|
||||
- secure: "lqkL5SCM/CBwgVb1GWoOngpojsa0zCSGcvF0O3/45rBT1EpNYtQ4LRJ1+XcHi126vdfGoim/8i7AQhn5eOgmZI8yAPBeoUZ5zSrejD3RUpXr2rXocsvRRP25Z4mIuAGHD9VAHtvTdhBZRVV818W02pYduSzAeaY61q/lU3xmWsE="
|
||||
- secure: "mzms6X8uvdhRWxkPBMwx+mDl3d+V1kUpZa7UgjT+dr4rvZMzvKtjKp/O0JZZVogdgZjUZf444B98/7AvWdSkGdkfz2QdmhWmXzNPfNuHtmfCYMdijsgFIGLuD3GviFL/rBiM2vgn32T3QqFiEJiC5StparnnXimPTc9TpXQRq5c="
|
||||
|
||||
matrix:
|
||||
exclude:
|
||||
|
||||
jobs:
|
||||
include:
|
||||
- os: linux
|
||||
env: TASK=python_test
|
||||
- os: linux
|
||||
env: TASK=java_test
|
||||
- os: linux
|
||||
env: TASK=cmake_test
|
||||
arch: s390x
|
||||
env: TASK=s390x_test
|
||||
|
||||
# dependent brew packages
|
||||
# the dependencies from homebrew is installed manually from setup script due to outdated image from travis.
|
||||
addons:
|
||||
homebrew:
|
||||
update: false
|
||||
apt:
|
||||
packages:
|
||||
- cmake
|
||||
- libomp
|
||||
- graphviz
|
||||
- openssl
|
||||
- libgit2
|
||||
- wget
|
||||
- r
|
||||
update: true
|
||||
- unzip
|
||||
|
||||
before_install:
|
||||
- source tests/travis/travis_setup_env.sh
|
||||
- if [ "${TASK}" != "python_sdist_test" ]; then export PYTHONPATH=${PYTHONPATH}:${PWD}/python-package; fi
|
||||
- echo "MAVEN_OPTS='-Xmx2g -XX:MaxPermSize=1024m -XX:ReservedCodeCacheSize=512m -Dorg.slf4j.simpleLogger.defaultLogLevel=error'" > ~/.mavenrc
|
||||
|
||||
install:
|
||||
- source tests/travis/setup.sh
|
||||
|
||||
205
CMakeLists.txt
205
CMakeLists.txt
@@ -1,9 +1,10 @@
|
||||
cmake_minimum_required(VERSION 3.13)
|
||||
project(xgboost LANGUAGES CXX C VERSION 1.1.0)
|
||||
cmake_minimum_required(VERSION 3.14 FATAL_ERROR)
|
||||
project(xgboost LANGUAGES CXX C VERSION 1.5.1)
|
||||
include(cmake/Utils.cmake)
|
||||
list(APPEND CMAKE_MODULE_PATH "${xgboost_SOURCE_DIR}/cmake/modules")
|
||||
cmake_policy(SET CMP0022 NEW)
|
||||
cmake_policy(SET CMP0079 NEW)
|
||||
set(CMAKE_POLICY_DEFAULT_CMP0063 NEW)
|
||||
cmake_policy(SET CMP0063 NEW)
|
||||
|
||||
if ((${CMAKE_VERSION} VERSION_GREATER 3.13) OR (${CMAKE_VERSION} VERSION_EQUAL 3.13))
|
||||
@@ -23,17 +24,23 @@ write_version()
|
||||
set_default_configuration_release()
|
||||
|
||||
#-- Options
|
||||
## User options
|
||||
option(BUILD_C_DOC "Build documentation for C APIs using Doxygen." OFF)
|
||||
option(USE_OPENMP "Build with OpenMP support." ON)
|
||||
option(BUILD_STATIC_LIB "Build static library" OFF)
|
||||
option(RABIT_BUILD_MPI "Build MPI" OFF)
|
||||
## Bindings
|
||||
option(JVM_BINDINGS "Build JVM bindings" OFF)
|
||||
option(R_LIB "Build shared library for R package" OFF)
|
||||
## Dev
|
||||
option(USE_DEBUG_OUTPUT "Dump internal training results like gradients and predictions to stdout.
|
||||
Should only be used for debugging." OFF)
|
||||
option(FORCE_COLORED_OUTPUT "Force colored output from compilers, useful when ninja is used instead of make." OFF)
|
||||
option(ENABLE_ALL_WARNINGS "Enable all compiler warnings. Only effective for GCC/Clang" OFF)
|
||||
option(LOG_CAPI_INVOCATION "Log all C API invocations for debugging" OFF)
|
||||
option(GOOGLE_TEST "Build google tests" OFF)
|
||||
option(USE_DMLC_GTEST "Use google tests bundled with dmlc-core submodule" OFF)
|
||||
option(USE_DEVICE_DEBUG "Generate CUDA device debug info." OFF)
|
||||
option(USE_NVTX "Build with cuda profiling annotations. Developers only." OFF)
|
||||
set(NVTX_HEADER_DIR "" CACHE PATH "Path to the stand-alone nvtx header")
|
||||
option(RABIT_MOCK "Build rabit with mock" OFF)
|
||||
@@ -42,6 +49,7 @@ option(HIDE_CXX_SYMBOLS "Build shared library and hide all C++ symbols" OFF)
|
||||
option(USE_CUDA "Build with GPU acceleration" OFF)
|
||||
option(USE_NCCL "Build with NCCL to enable distributed GPU support." OFF)
|
||||
option(BUILD_WITH_SHARED_NCCL "Build with shared NCCL library." OFF)
|
||||
option(BUILD_WITH_CUDA_CUB "Build with cub in CUDA installation" OFF)
|
||||
set(GPU_COMPUTE_VER "" CACHE STRING
|
||||
"Semicolon separated list of compute versions to be built against, e.g. '35;61'")
|
||||
## Copied From dmlc
|
||||
@@ -55,8 +63,11 @@ set(ENABLED_SANITIZERS "address" "leak" CACHE STRING
|
||||
"Semicolon separated list of sanitizer names. E.g 'address;leak'. Supported sanitizers are
|
||||
address, leak, undefined and thread.")
|
||||
## Plugins
|
||||
option(PLUGIN_LZ4 "Build lz4 plugin" OFF)
|
||||
option(PLUGIN_DENSE_PARSER "Build dense parser plugin" OFF)
|
||||
option(PLUGIN_RMM "Build with RAPIDS Memory Manager (RMM)" OFF)
|
||||
## TODO: 1. Add check if DPC++ compiler is used for building
|
||||
option(PLUGIN_UPDATER_ONEAPI "DPC++ updater" OFF)
|
||||
option(ADD_PKGCONFIG "Add xgboost.pc into system." ON)
|
||||
|
||||
#-- Checks for building XGBoost
|
||||
if (USE_DEBUG_OUTPUT AND (NOT (CMAKE_BUILD_TYPE MATCHES Debug)))
|
||||
@@ -65,6 +76,9 @@ endif (USE_DEBUG_OUTPUT AND (NOT (CMAKE_BUILD_TYPE MATCHES Debug)))
|
||||
if (USE_NCCL AND NOT (USE_CUDA))
|
||||
message(SEND_ERROR "`USE_NCCL` must be enabled with `USE_CUDA` flag.")
|
||||
endif (USE_NCCL AND NOT (USE_CUDA))
|
||||
if (USE_DEVICE_DEBUG AND NOT (USE_CUDA))
|
||||
message(SEND_ERROR "`USE_DEVICE_DEBUG` must be enabled with `USE_CUDA` flag.")
|
||||
endif (USE_DEVICE_DEBUG AND NOT (USE_CUDA))
|
||||
if (BUILD_WITH_SHARED_NCCL AND (NOT USE_NCCL))
|
||||
message(SEND_ERROR "Build XGBoost with -DUSE_NCCL=ON to enable BUILD_WITH_SHARED_NCCL.")
|
||||
endif (BUILD_WITH_SHARED_NCCL AND (NOT USE_NCCL))
|
||||
@@ -78,6 +92,29 @@ endif (R_LIB AND GOOGLE_TEST)
|
||||
if (USE_AVX)
|
||||
message(SEND_ERROR "The option 'USE_AVX' is deprecated as experimental AVX features have been removed from XGBoost.")
|
||||
endif (USE_AVX)
|
||||
if (PLUGIN_LZ4)
|
||||
message(SEND_ERROR "The option 'PLUGIN_LZ4' is removed from XGBoost.")
|
||||
endif (PLUGIN_LZ4)
|
||||
if (PLUGIN_RMM AND NOT (USE_CUDA))
|
||||
message(SEND_ERROR "`PLUGIN_RMM` must be enabled with `USE_CUDA` flag.")
|
||||
endif (PLUGIN_RMM AND NOT (USE_CUDA))
|
||||
if (PLUGIN_RMM AND NOT ((CMAKE_CXX_COMPILER_ID STREQUAL "Clang") OR (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")))
|
||||
message(SEND_ERROR "`PLUGIN_RMM` must be used with GCC or Clang compiler.")
|
||||
endif (PLUGIN_RMM AND NOT ((CMAKE_CXX_COMPILER_ID STREQUAL "Clang") OR (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")))
|
||||
if (PLUGIN_RMM AND NOT (CMAKE_SYSTEM_NAME STREQUAL "Linux"))
|
||||
message(SEND_ERROR "`PLUGIN_RMM` must be used with Linux.")
|
||||
endif (PLUGIN_RMM AND NOT (CMAKE_SYSTEM_NAME STREQUAL "Linux"))
|
||||
if (ENABLE_ALL_WARNINGS)
|
||||
if ((NOT CMAKE_CXX_COMPILER_ID MATCHES "Clang") AND (NOT CMAKE_CXX_COMPILER_ID STREQUAL "GNU"))
|
||||
message(SEND_ERROR "ENABLE_ALL_WARNINGS is only available for Clang and GCC.")
|
||||
endif ((NOT CMAKE_CXX_COMPILER_ID MATCHES "Clang") AND (NOT CMAKE_CXX_COMPILER_ID STREQUAL "GNU"))
|
||||
endif (ENABLE_ALL_WARNINGS)
|
||||
if (BUILD_STATIC_LIB AND (R_LIB OR JVM_BINDINGS))
|
||||
message(SEND_ERROR "Cannot build a static library libxgboost.a when R or JVM packages are enabled.")
|
||||
endif (BUILD_STATIC_LIB AND (R_LIB OR JVM_BINDINGS))
|
||||
if (PLUGIN_RMM AND (NOT BUILD_WITH_CUDA_CUB))
|
||||
message(SEND_ERROR "Cannot build with RMM using cub submodule.")
|
||||
endif (PLUGIN_RMM AND (NOT BUILD_WITH_CUDA_CUB))
|
||||
|
||||
#-- Sanitizer
|
||||
if (USE_SANITIZER)
|
||||
@@ -86,17 +123,30 @@ if (USE_SANITIZER)
|
||||
endif (USE_SANITIZER)
|
||||
|
||||
if (USE_CUDA)
|
||||
SET(USE_OPENMP ON CACHE BOOL "CUDA requires OpenMP" FORCE)
|
||||
set(USE_OPENMP ON CACHE BOOL "CUDA requires OpenMP" FORCE)
|
||||
# `export CXX=' is ignored by CMake CUDA.
|
||||
set(CMAKE_CUDA_HOST_COMPILER ${CMAKE_CXX_COMPILER})
|
||||
message(STATUS "Configured CUDA host compiler: ${CMAKE_CUDA_HOST_COMPILER}")
|
||||
|
||||
enable_language(CUDA)
|
||||
if (${CMAKE_CUDA_COMPILER_VERSION} VERSION_LESS 10.1)
|
||||
message(FATAL_ERROR "CUDA version must be at least 10.1!")
|
||||
endif()
|
||||
set(GEN_CODE "")
|
||||
format_gencode_flags("${GPU_COMPUTE_VER}" GEN_CODE)
|
||||
message(STATUS "CUDA GEN_CODE: ${GEN_CODE}")
|
||||
add_subdirectory(${PROJECT_SOURCE_DIR}/gputreeshap)
|
||||
|
||||
if ((${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 11.4) AND (NOT BUILD_WITH_CUDA_CUB))
|
||||
message(SEND_ERROR "`BUILD_WITH_CUDA_CUB` should be set to `ON` for CUDA >= 11.4")
|
||||
endif ()
|
||||
endif (USE_CUDA)
|
||||
|
||||
if (FORCE_COLORED_OUTPUT AND (CMAKE_GENERATOR STREQUAL "Ninja") AND
|
||||
((CMAKE_CXX_COMPILER_ID STREQUAL "GNU") OR
|
||||
(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")))
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-color=always")
|
||||
endif()
|
||||
|
||||
find_package(Threads REQUIRED)
|
||||
|
||||
if (USE_OPENMP)
|
||||
@@ -108,90 +158,82 @@ if (USE_OPENMP)
|
||||
find_package(OpenMP REQUIRED)
|
||||
endif (USE_OPENMP)
|
||||
|
||||
if (USE_NCCL)
|
||||
find_package(Nccl REQUIRED)
|
||||
endif (USE_NCCL)
|
||||
|
||||
# dmlc-core
|
||||
msvc_use_static_runtime()
|
||||
add_subdirectory(${xgboost_SOURCE_DIR}/dmlc-core)
|
||||
set_target_properties(dmlc PROPERTIES
|
||||
CXX_STANDARD 11
|
||||
CXX_STANDARD_REQUIRED ON
|
||||
POSITION_INDEPENDENT_CODE ON)
|
||||
list(APPEND LINKED_LIBRARIES_PRIVATE dmlc)
|
||||
|
||||
if (MSVC)
|
||||
if (TARGET dmlc_unit_tests)
|
||||
target_compile_options(dmlc_unit_tests PRIVATE
|
||||
-D_CRT_SECURE_NO_WARNINGS -D_CRT_SECURE_NO_DEPRECATE)
|
||||
endif (TARGET dmlc_unit_tests)
|
||||
endif (MSVC)
|
||||
|
||||
# rabit
|
||||
set(RABIT_BUILD_DMLC OFF)
|
||||
set(DMLC_ROOT ${xgboost_SOURCE_DIR}/dmlc-core)
|
||||
set(RABIT_WITH_R_LIB ${R_LIB})
|
||||
add_subdirectory(rabit)
|
||||
if (RABIT_BUILD_MPI)
|
||||
find_package(MPI REQUIRED)
|
||||
endif (RABIT_BUILD_MPI)
|
||||
|
||||
if (RABIT_MOCK)
|
||||
list(APPEND LINKED_LIBRARIES_PRIVATE rabit_mock_static)
|
||||
else()
|
||||
list(APPEND LINKED_LIBRARIES_PRIVATE rabit)
|
||||
endif(RABIT_MOCK)
|
||||
foreach(lib rabit rabit_base rabit_empty rabit_mock rabit_mock_static)
|
||||
# Explicitly link dmlc to rabit, so that configured header (build_config.h)
|
||||
# from dmlc is correctly applied to rabit.
|
||||
if (TARGET ${lib})
|
||||
target_link_libraries(${lib} dmlc ${CMAKE_THREAD_LIBS_INIT})
|
||||
if (HIDE_CXX_SYMBOLS) # Hide all C++ symbols from Rabit
|
||||
set_target_properties(${lib} PROPERTIES CXX_VISIBILITY_PRESET hidden)
|
||||
endif (HIDE_CXX_SYMBOLS)
|
||||
endif (TARGET ${lib})
|
||||
endforeach()
|
||||
# core xgboost
|
||||
add_subdirectory(${xgboost_SOURCE_DIR}/src)
|
||||
target_link_libraries(objxgboost PUBLIC dmlc)
|
||||
|
||||
# Exports some R specific definitions and objects
|
||||
if (R_LIB)
|
||||
add_subdirectory(${xgboost_SOURCE_DIR}/R-package)
|
||||
endif (R_LIB)
|
||||
|
||||
# core xgboost
|
||||
list(APPEND LINKED_LIBRARIES_PRIVATE Threads::Threads ${CMAKE_THREAD_LIBS_INIT})
|
||||
add_subdirectory(${xgboost_SOURCE_DIR}/plugin)
|
||||
add_subdirectory(${xgboost_SOURCE_DIR}/src)
|
||||
target_link_libraries(objxgboost PUBLIC dmlc)
|
||||
set(XGBOOST_OBJ_SOURCES "${XGBOOST_OBJ_SOURCES};$<TARGET_OBJECTS:objxgboost>")
|
||||
|
||||
#-- library
|
||||
if (BUILD_STATIC_LIB)
|
||||
add_library(xgboost STATIC ${XGBOOST_OBJ_SOURCES})
|
||||
else (BUILD_STATIC_LIB)
|
||||
add_library(xgboost SHARED ${XGBOOST_OBJ_SOURCES})
|
||||
endif (BUILD_STATIC_LIB)
|
||||
|
||||
#-- Hide all C++ symbols
|
||||
if (HIDE_CXX_SYMBOLS)
|
||||
set_target_properties(objxgboost PROPERTIES CXX_VISIBILITY_PRESET hidden)
|
||||
set_target_properties(xgboost PROPERTIES CXX_VISIBILITY_PRESET hidden)
|
||||
endif (HIDE_CXX_SYMBOLS)
|
||||
|
||||
target_include_directories(xgboost
|
||||
INTERFACE
|
||||
$<INSTALL_INTERFACE:${CMAKE_INSTALL_PREFIX}/include>
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_LIST_DIR}/include>)
|
||||
target_link_libraries(xgboost PRIVATE ${LINKED_LIBRARIES_PRIVATE})
|
||||
|
||||
# This creates its own shared library `xgboost4j'.
|
||||
if (JVM_BINDINGS)
|
||||
add_subdirectory(${xgboost_SOURCE_DIR}/jvm-packages)
|
||||
endif (JVM_BINDINGS)
|
||||
|
||||
# Plugin
|
||||
add_subdirectory(${xgboost_SOURCE_DIR}/plugin)
|
||||
|
||||
#-- library
|
||||
if (BUILD_STATIC_LIB)
|
||||
add_library(xgboost STATIC)
|
||||
else (BUILD_STATIC_LIB)
|
||||
add_library(xgboost SHARED)
|
||||
endif (BUILD_STATIC_LIB)
|
||||
target_link_libraries(xgboost PRIVATE objxgboost)
|
||||
target_include_directories(xgboost
|
||||
INTERFACE
|
||||
$<INSTALL_INTERFACE:$<INSTALL_PREFIX>/include>
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_LIST_DIR}/include>)
|
||||
#-- End shared library
|
||||
|
||||
#-- CLI for xgboost
|
||||
add_executable(runxgboost ${xgboost_SOURCE_DIR}/src/cli_main.cc ${XGBOOST_OBJ_SOURCES})
|
||||
|
||||
add_executable(runxgboost ${xgboost_SOURCE_DIR}/src/cli_main.cc)
|
||||
target_link_libraries(runxgboost PRIVATE objxgboost)
|
||||
target_include_directories(runxgboost
|
||||
PRIVATE
|
||||
${xgboost_SOURCE_DIR}/include
|
||||
${xgboost_SOURCE_DIR}/dmlc-core/include
|
||||
${xgboost_SOURCE_DIR}/rabit/include)
|
||||
target_link_libraries(runxgboost PRIVATE ${LINKED_LIBRARIES_PRIVATE})
|
||||
set_target_properties(
|
||||
runxgboost PROPERTIES
|
||||
OUTPUT_NAME xgboost
|
||||
CXX_STANDARD 11
|
||||
CXX_STANDARD_REQUIRED ON)
|
||||
${xgboost_SOURCE_DIR}/rabit/include
|
||||
)
|
||||
set_target_properties(runxgboost PROPERTIES OUTPUT_NAME xgboost)
|
||||
#-- End CLI for xgboost
|
||||
|
||||
# Common setup for all targets
|
||||
foreach(target xgboost objxgboost dmlc runxgboost)
|
||||
xgboost_target_properties(${target})
|
||||
xgboost_target_link_libraries(${target})
|
||||
xgboost_target_defs(${target})
|
||||
endforeach()
|
||||
|
||||
if (JVM_BINDINGS)
|
||||
xgboost_target_properties(xgboost4j)
|
||||
xgboost_target_link_libraries(xgboost4j)
|
||||
xgboost_target_defs(xgboost4j)
|
||||
endif (JVM_BINDINGS)
|
||||
|
||||
set_output_directory(runxgboost ${xgboost_SOURCE_DIR})
|
||||
set_output_directory(xgboost ${xgboost_SOURCE_DIR}/lib)
|
||||
# Ensure these two targets do not build simultaneously, as they produce outputs with conflicting names
|
||||
@@ -199,11 +241,12 @@ add_dependencies(xgboost runxgboost)
|
||||
|
||||
#-- Installing XGBoost
|
||||
if (R_LIB)
|
||||
include(cmake/RPackageInstallTargetSetup.cmake)
|
||||
set_target_properties(xgboost PROPERTIES PREFIX "")
|
||||
if (APPLE)
|
||||
set_target_properties(xgboost PROPERTIES SUFFIX ".so")
|
||||
endif (APPLE)
|
||||
setup_rpackage_install_target(xgboost ${CMAKE_CURRENT_BINARY_DIR})
|
||||
setup_rpackage_install_target(xgboost "${CMAKE_CURRENT_BINARY_DIR}/R-package-install")
|
||||
set(CMAKE_INSTALL_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/dummy_inst")
|
||||
endif (R_LIB)
|
||||
if (MINGW)
|
||||
@@ -215,12 +258,27 @@ if (BUILD_C_DOC)
|
||||
run_doxygen()
|
||||
endif (BUILD_C_DOC)
|
||||
|
||||
include(CPack)
|
||||
|
||||
include(GNUInstallDirs)
|
||||
# Install all headers. Please note that currently the C++ headers does not form an "API".
|
||||
install(DIRECTORY ${xgboost_SOURCE_DIR}/include/xgboost
|
||||
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
|
||||
|
||||
install(TARGETS xgboost runxgboost
|
||||
# Install libraries. If `xgboost` is a static lib, specify `objxgboost` also, to avoid the
|
||||
# following error:
|
||||
#
|
||||
# > install(EXPORT ...) includes target "xgboost" which requires target "objxgboost" that is not
|
||||
# > in any export set.
|
||||
#
|
||||
# https://github.com/dmlc/xgboost/issues/6085
|
||||
if (BUILD_STATIC_LIB)
|
||||
set(INSTALL_TARGETS xgboost runxgboost objxgboost dmlc)
|
||||
else (BUILD_STATIC_LIB)
|
||||
set(INSTALL_TARGETS xgboost runxgboost)
|
||||
endif (BUILD_STATIC_LIB)
|
||||
|
||||
install(TARGETS ${INSTALL_TARGETS}
|
||||
EXPORT XGBoostTargets
|
||||
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
|
||||
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
|
||||
@@ -250,12 +308,18 @@ install(
|
||||
if (GOOGLE_TEST)
|
||||
enable_testing()
|
||||
# Unittests.
|
||||
add_executable(testxgboost)
|
||||
target_link_libraries(testxgboost PRIVATE objxgboost)
|
||||
xgboost_target_properties(testxgboost)
|
||||
xgboost_target_link_libraries(testxgboost)
|
||||
xgboost_target_defs(testxgboost)
|
||||
|
||||
add_subdirectory(${xgboost_SOURCE_DIR}/tests/cpp)
|
||||
|
||||
add_test(
|
||||
NAME TestXGBoostLib
|
||||
COMMAND testxgboost
|
||||
WORKING_DIRECTORY ${xgboost_BINARY_DIR})
|
||||
|
||||
# CLI tests
|
||||
configure_file(
|
||||
${xgboost_SOURCE_DIR}/tests/cli/machine.conf.in
|
||||
@@ -274,3 +338,12 @@ endif (GOOGLE_TEST)
|
||||
# replace /MD with /MT. See https://github.com/dmlc/xgboost/issues/4462
|
||||
# for issues caused by mixing of /MD and /MT flags
|
||||
msvc_use_static_runtime()
|
||||
|
||||
# Add xgboost.pc
|
||||
if (ADD_PKGCONFIG)
|
||||
configure_file(${xgboost_SOURCE_DIR}/cmake/xgboost.pc.in ${xgboost_BINARY_DIR}/xgboost.pc @ONLY)
|
||||
|
||||
install(
|
||||
FILES ${xgboost_BINARY_DIR}/xgboost.pc
|
||||
DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig)
|
||||
endif (ADD_PKGCONFIG)
|
||||
|
||||
@@ -10,8 +10,8 @@ The Project Management Committee(PMC) consists group of active committers that m
|
||||
- Tianqi is a Ph.D. student working on large-scale machine learning. He is the creator of the project.
|
||||
* [Michael Benesty](https://github.com/pommedeterresautee)
|
||||
- Michael is a lawyer and data scientist in France. He is the creator of XGBoost interactive analysis module in R.
|
||||
* [Yuan Tang](https://github.com/terrytangyuan), Ant Financial
|
||||
- Yuan is a software engineer in Ant Financial. He contributed mostly in R and Python packages.
|
||||
* [Yuan Tang](https://github.com/terrytangyuan), Ant Group
|
||||
- Yuan is a software engineer in Ant Group. He contributed mostly in R and Python packages.
|
||||
* [Nan Zhu](https://github.com/CodingCat), Uber
|
||||
- Nan is a software engineer in Uber. He contributed mostly in JVM packages.
|
||||
* [Jiaming Yuan](https://github.com/trivialfis)
|
||||
@@ -37,6 +37,8 @@ Committers are people who have made substantial contribution to the project and
|
||||
- Sergei is a software engineer in Criteo. He contributed mostly in JVM packages.
|
||||
* [Scott Lundberg](http://scottlundberg.com/), University of Washington
|
||||
- Scott is a Ph.D. student at University of Washington. He is the creator of SHAP, a unified approach to explain the output of machine learning models such as decision tree ensembles. He also helps maintain the XGBoost Julia package.
|
||||
* [Egor Smirnov](https://github.com/SmirnovEgorRu), Intel
|
||||
- Egor has led a major effort to improve the performance of XGBoost on multi-core CPUs.
|
||||
|
||||
|
||||
Become a Committer
|
||||
@@ -57,7 +59,7 @@ List of Contributors
|
||||
* [Skipper Seabold](https://github.com/jseabold)
|
||||
- Skipper is the major contributor to the scikit-learn module of XGBoost.
|
||||
* [Zygmunt Zając](https://github.com/zygmuntz)
|
||||
- Zygmunt is the master behind the early stopping feature frequently used by kagglers.
|
||||
- Zygmunt is the master behind the early stopping feature frequently used by Kagglers.
|
||||
* [Ajinkya Kale](https://github.com/ajkl)
|
||||
* [Boliang Chen](https://github.com/cblsjtu)
|
||||
* [Yangqing Men](https://github.com/yanqingmen)
|
||||
@@ -89,7 +91,7 @@ List of Contributors
|
||||
* [Henry Gouk](https://github.com/henrygouk)
|
||||
* [Pierre de Sahb](https://github.com/pdesahb)
|
||||
* [liuliang01](https://github.com/liuliang01)
|
||||
- liuliang01 added support for the qid column for LibSVM input format. This makes ranking task easier in distributed setting.
|
||||
- liuliang01 added support for the qid column for LIBSVM input format. This makes ranking task easier in distributed setting.
|
||||
* [Andrew Thia](https://github.com/BlueTea88)
|
||||
- Andrew Thia implemented feature interaction constraints
|
||||
* [Wei Tian](https://github.com/weitian)
|
||||
|
||||
365
Jenkinsfile
vendored
365
Jenkinsfile
vendored
@@ -6,6 +6,9 @@
|
||||
// Command to run command inside a docker container
|
||||
dockerRun = 'tests/ci_build/ci_build.sh'
|
||||
|
||||
// Which CUDA version to use when building reference distribution wheel
|
||||
ref_cuda_ver = '10.1'
|
||||
|
||||
import groovy.transform.Field
|
||||
|
||||
@Field
|
||||
@@ -31,29 +34,19 @@ pipeline {
|
||||
|
||||
// Build stages
|
||||
stages {
|
||||
stage('Jenkins Linux: Get sources') {
|
||||
agent { label 'linux && cpu' }
|
||||
stage('Jenkins Linux: Initialize') {
|
||||
agent { label 'job_initializer' }
|
||||
steps {
|
||||
script {
|
||||
def buildNumber = env.BUILD_NUMBER as int
|
||||
if (buildNumber > 1) milestone(buildNumber - 1)
|
||||
milestone(buildNumber)
|
||||
|
||||
checkoutSrcs()
|
||||
commit_id = "${GIT_COMMIT}"
|
||||
}
|
||||
sh 'python3 tests/jenkins_get_approval.py'
|
||||
stash name: 'srcs'
|
||||
milestone ordinal: 1
|
||||
}
|
||||
}
|
||||
stage('Jenkins Linux: Formatting Check') {
|
||||
agent none
|
||||
steps {
|
||||
script {
|
||||
parallel ([
|
||||
'clang-tidy': { ClangTidy() },
|
||||
'lint': { Lint() },
|
||||
'sphinx-doc': { SphinxDoc() },
|
||||
'doxygen': { Doxygen() }
|
||||
])
|
||||
}
|
||||
milestone ordinal: 2
|
||||
}
|
||||
}
|
||||
stage('Jenkins Linux: Build') {
|
||||
@@ -61,17 +54,21 @@ pipeline {
|
||||
steps {
|
||||
script {
|
||||
parallel ([
|
||||
'clang-tidy': { ClangTidy() },
|
||||
'build-cpu': { BuildCPU() },
|
||||
'build-cpu-arm64': { BuildCPUARM64() },
|
||||
'build-cpu-rabit-mock': { BuildCPUMock() },
|
||||
'build-cpu-non-omp': { BuildCPUNonOmp() },
|
||||
'build-gpu-cuda9.0': { BuildCUDA(cuda_version: '9.0') },
|
||||
'build-gpu-cuda10.0': { BuildCUDA(cuda_version: '10.0') },
|
||||
// Build reference, distribution-ready Python wheel with CUDA 10.1
|
||||
// using CentOS 7 image
|
||||
'build-gpu-cuda10.1': { BuildCUDA(cuda_version: '10.1') },
|
||||
'build-jvm-packages': { BuildJVMPackages(spark_version: '2.4.3') },
|
||||
// The build-gpu-* builds below use Ubuntu image
|
||||
'build-gpu-cuda11.0': { BuildCUDA(cuda_version: '11.0', build_rmm: true) },
|
||||
'build-gpu-rpkg': { BuildRPackageWithCUDA(cuda_version: '10.1') },
|
||||
'build-jvm-packages-gpu-cuda10.1': { BuildJVMPackagesWithCUDA(spark_version: '3.0.0', cuda_version: '11.0') },
|
||||
'build-jvm-packages': { BuildJVMPackages(spark_version: '3.0.0') },
|
||||
'build-jvm-doc': { BuildJVMDoc() }
|
||||
])
|
||||
}
|
||||
milestone ordinal: 3
|
||||
}
|
||||
}
|
||||
stage('Jenkins Linux: Test') {
|
||||
@@ -80,19 +77,17 @@ pipeline {
|
||||
script {
|
||||
parallel ([
|
||||
'test-python-cpu': { TestPythonCPU() },
|
||||
'test-python-gpu-cuda9.0': { TestPythonGPU(cuda_version: '9.0') },
|
||||
'test-python-gpu-cuda10.0': { TestPythonGPU(cuda_version: '10.0') },
|
||||
'test-python-gpu-cuda10.1': { TestPythonGPU(cuda_version: '10.1') },
|
||||
'test-python-mgpu-cuda10.1': { TestPythonGPU(cuda_version: '10.1', multi_gpu: true) },
|
||||
'test-cpp-gpu': { TestCppGPU(cuda_version: '10.1') },
|
||||
'test-cpp-mgpu': { TestCppGPU(cuda_version: '10.1', multi_gpu: true) },
|
||||
'test-jvm-jdk8': { CrossTestJVMwithJDK(jdk_version: '8', spark_version: '2.4.3') },
|
||||
'test-python-cpu-arm64': { TestPythonCPUARM64() },
|
||||
// artifact_cuda_version doesn't apply to RMM tests; RMM tests will always match CUDA version between artifact and host env
|
||||
'test-python-gpu-cuda11.0-cross': { TestPythonGPU(artifact_cuda_version: '10.1', host_cuda_version: '11.0', test_rmm: true) },
|
||||
'test-python-gpu-cuda11.0': { TestPythonGPU(artifact_cuda_version: '11.0', host_cuda_version: '11.0') },
|
||||
'test-python-mgpu-cuda11.0': { TestPythonGPU(artifact_cuda_version: '10.1', host_cuda_version: '11.0', multi_gpu: true, test_rmm: true) },
|
||||
'test-cpp-gpu-cuda11.0': { TestCppGPU(artifact_cuda_version: '11.0', host_cuda_version: '11.0', test_rmm: true) },
|
||||
'test-jvm-jdk8': { CrossTestJVMwithJDK(jdk_version: '8', spark_version: '3.0.0') },
|
||||
'test-jvm-jdk11': { CrossTestJVMwithJDK(jdk_version: '11') },
|
||||
'test-jvm-jdk12': { CrossTestJVMwithJDK(jdk_version: '12') },
|
||||
'test-r-3.5.3': { TestR(use_r35: true) }
|
||||
'test-jvm-jdk12': { CrossTestJVMwithJDK(jdk_version: '12') }
|
||||
])
|
||||
}
|
||||
milestone ordinal: 4
|
||||
}
|
||||
}
|
||||
stage('Jenkins Linux: Deploy') {
|
||||
@@ -100,10 +95,9 @@ pipeline {
|
||||
steps {
|
||||
script {
|
||||
parallel ([
|
||||
'deploy-jvm-packages': { DeployJVMPackages(spark_version: '2.4.3') }
|
||||
'deploy-jvm-packages': { DeployJVMPackages(spark_version: '3.0.0') }
|
||||
])
|
||||
}
|
||||
milestone ordinal: 5
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -124,13 +118,17 @@ def checkoutSrcs() {
|
||||
}
|
||||
}
|
||||
|
||||
def GetCUDABuildContainerType(cuda_version) {
|
||||
return (cuda_version == ref_cuda_ver) ? 'gpu_build_centos7' : 'gpu_build'
|
||||
}
|
||||
|
||||
def ClangTidy() {
|
||||
node('linux && cpu') {
|
||||
node('linux && cpu_build') {
|
||||
unstash name: 'srcs'
|
||||
echo "Running clang-tidy job..."
|
||||
def container_type = "clang_tidy"
|
||||
def docker_binary = "docker"
|
||||
def dockerArgs = "--build-arg CUDA_VERSION=10.1"
|
||||
def dockerArgs = "--build-arg CUDA_VERSION_ARG=10.1"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} ${dockerArgs} python3 tests/ci_build/tidy.py
|
||||
"""
|
||||
@@ -138,48 +136,6 @@ def ClangTidy() {
|
||||
}
|
||||
}
|
||||
|
||||
def Lint() {
|
||||
node('linux && cpu') {
|
||||
unstash name: 'srcs'
|
||||
echo "Running lint..."
|
||||
def container_type = "cpu"
|
||||
def docker_binary = "docker"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} make lint
|
||||
"""
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def SphinxDoc() {
|
||||
node('linux && cpu') {
|
||||
unstash name: 'srcs'
|
||||
echo "Running sphinx-doc..."
|
||||
def container_type = "cpu"
|
||||
def docker_binary = "docker"
|
||||
def docker_extra_params = "CI_DOCKER_EXTRA_PARAMS_INIT='-e SPHINX_GIT_BRANCH=${BRANCH_NAME}'"
|
||||
sh """#!/bin/bash
|
||||
${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} make -C doc html
|
||||
"""
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def Doxygen() {
|
||||
node('linux && cpu') {
|
||||
unstash name: 'srcs'
|
||||
echo "Running doxygen..."
|
||||
def container_type = "cpu"
|
||||
def docker_binary = "docker"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/doxygen.sh ${BRANCH_NAME}
|
||||
"""
|
||||
echo 'Uploading doc...'
|
||||
s3Upload file: "build/${BRANCH_NAME}.tar.bz2", bucket: 'xgboost-docs', acl: 'PublicRead', path: "doxygen/${BRANCH_NAME}.tar.bz2"
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def BuildCPU() {
|
||||
node('linux && cpu') {
|
||||
unstash name: 'srcs'
|
||||
@@ -191,15 +147,15 @@ def BuildCPU() {
|
||||
# This step is not necessary, but here we include it, to ensure that DMLC_CORE_USE_CMAKE flag is correctly propagated
|
||||
# We want to make sure that we use the configured header build/dmlc/build_config.h instead of include/dmlc/build_config_default.h.
|
||||
# See discussion at https://github.com/dmlc/xgboost/issues/5510
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_via_cmake.sh
|
||||
${dockerRun} ${container_type} ${docker_binary} build/testxgboost
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_via_cmake.sh -DPLUGIN_DENSE_PARSER=ON
|
||||
${dockerRun} ${container_type} ${docker_binary} bash -c "cd build && ctest --extra-verbose"
|
||||
"""
|
||||
// Sanitizer test
|
||||
def docker_extra_params = "CI_DOCKER_EXTRA_PARAMS_INIT='-e ASAN_SYMBOLIZER_PATH=/usr/bin/llvm-symbolizer -e ASAN_OPTIONS=symbolize=1 -e UBSAN_OPTIONS=print_stacktrace=1:log_path=ubsan_error.log --cap-add SYS_PTRACE'"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_via_cmake.sh -DUSE_SANITIZER=ON -DENABLED_SANITIZERS="address;leak;undefined" \
|
||||
-DCMAKE_BUILD_TYPE=Debug -DSANITIZER_PATH=/usr/lib/x86_64-linux-gnu/
|
||||
${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} build/testxgboost
|
||||
${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} bash -c "cd build && ctest --exclude-regex AllTestsInDMLCUnitTests --extra-verbose"
|
||||
"""
|
||||
|
||||
stash name: 'xgboost_cli', includes: 'xgboost'
|
||||
@@ -207,6 +163,35 @@ def BuildCPU() {
|
||||
}
|
||||
}
|
||||
|
||||
def BuildCPUARM64() {
|
||||
node('linux && arm64') {
|
||||
unstash name: 'srcs'
|
||||
echo "Build CPU ARM64"
|
||||
def container_type = "aarch64"
|
||||
def docker_binary = "docker"
|
||||
def wheel_tag = "manylinux2014_aarch64"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_via_cmake.sh --conda-env=aarch64_test -DOPEN_MP:BOOL=ON -DHIDE_CXX_SYMBOL=ON
|
||||
${dockerRun} ${container_type} ${docker_binary} bash -c "cd build && ctest --extra-verbose"
|
||||
${dockerRun} ${container_type} ${docker_binary} bash -c "cd python-package && rm -rf dist/* && python setup.py bdist_wheel --universal"
|
||||
${dockerRun} ${container_type} ${docker_binary} python tests/ci_build/rename_whl.py python-package/dist/*.whl ${commit_id} ${wheel_tag}
|
||||
${dockerRun} ${container_type} ${docker_binary} bash -c "auditwheel repair --plat ${wheel_tag} python-package/dist/*.whl && python tests/ci_build/rename_whl.py wheelhouse/*.whl ${commit_id} ${wheel_tag}"
|
||||
mv -v wheelhouse/*.whl python-package/dist/
|
||||
# Make sure that libgomp.so is vendored in the wheel
|
||||
${dockerRun} ${container_type} ${docker_binary} bash -c "unzip -l python-package/dist/*.whl | grep libgomp || exit -1"
|
||||
"""
|
||||
echo 'Stashing Python wheel...'
|
||||
stash name: "xgboost_whl_arm64_cpu", includes: 'python-package/dist/*.whl'
|
||||
if (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release')) {
|
||||
echo 'Uploading Python wheel...'
|
||||
path = ("${BRANCH_NAME}" == 'master') ? '' : "${BRANCH_NAME}/"
|
||||
s3Upload bucket: 'xgboost-nightly-builds', path: path, acl: 'PublicRead', workingDir: 'python-package/dist', includePathPattern:'**/*.whl'
|
||||
}
|
||||
stash name: 'xgboost_cli_arm64', includes: 'xgboost'
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def BuildCPUMock() {
|
||||
node('linux && cpu') {
|
||||
unstash name: 'srcs'
|
||||
@@ -222,48 +207,101 @@ def BuildCPUMock() {
|
||||
}
|
||||
}
|
||||
|
||||
def BuildCPUNonOmp() {
|
||||
node('linux && cpu') {
|
||||
def BuildCUDA(args) {
|
||||
node('linux && cpu_build') {
|
||||
unstash name: 'srcs'
|
||||
echo "Build CPU without OpenMP"
|
||||
def container_type = "cpu"
|
||||
echo "Build with CUDA ${args.cuda_version}"
|
||||
def container_type = GetCUDABuildContainerType(args.cuda_version)
|
||||
def docker_binary = "docker"
|
||||
def docker_args = "--build-arg CUDA_VERSION_ARG=${args.cuda_version}"
|
||||
def arch_flag = ""
|
||||
if (env.BRANCH_NAME != 'master' && !(env.BRANCH_NAME.startsWith('release'))) {
|
||||
arch_flag = "-DGPU_COMPUTE_VER=75"
|
||||
}
|
||||
def wheel_tag = "manylinux2014_x86_64"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_via_cmake.sh -DUSE_OPENMP=OFF
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_via_cmake.sh -DUSE_CUDA=ON -DUSE_NCCL=ON -DOPEN_MP:BOOL=ON -DHIDE_CXX_SYMBOLS=ON ${arch_flag}
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} bash -c "cd python-package && rm -rf dist/* && python setup.py bdist_wheel --universal"
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} python tests/ci_build/rename_whl.py python-package/dist/*.whl ${commit_id} ${wheel_tag}
|
||||
"""
|
||||
echo "Running Non-OpenMP C++ test..."
|
||||
if (args.cuda_version == ref_cuda_ver) {
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} build/testxgboost
|
||||
${dockerRun} auditwheel_x86_64 ${docker_binary} auditwheel repair --plat ${wheel_tag} python-package/dist/*.whl
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} python tests/ci_build/rename_whl.py wheelhouse/*.whl ${commit_id} ${wheel_tag}
|
||||
mv -v wheelhouse/*.whl python-package/dist/
|
||||
# Make sure that libgomp.so is vendored in the wheel
|
||||
${dockerRun} auditwheel_x86_64 ${docker_binary} bash -c "unzip -l python-package/dist/*.whl | grep libgomp || exit -1"
|
||||
"""
|
||||
}
|
||||
echo 'Stashing Python wheel...'
|
||||
stash name: "xgboost_whl_cuda${args.cuda_version}", includes: 'python-package/dist/*.whl'
|
||||
if (args.cuda_version == ref_cuda_ver && (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release'))) {
|
||||
echo 'Uploading Python wheel...'
|
||||
path = ("${BRANCH_NAME}" == 'master') ? '' : "${BRANCH_NAME}/"
|
||||
s3Upload bucket: 'xgboost-nightly-builds', path: path, acl: 'PublicRead', workingDir: 'python-package/dist', includePathPattern:'**/*.whl'
|
||||
}
|
||||
echo 'Stashing C++ test executable (testxgboost)...'
|
||||
stash name: "xgboost_cpp_tests_cuda${args.cuda_version}", includes: 'build/testxgboost'
|
||||
if (args.build_rmm) {
|
||||
echo "Build with CUDA ${args.cuda_version} and RMM"
|
||||
container_type = "rmm"
|
||||
docker_binary = "docker"
|
||||
docker_args = "--build-arg CUDA_VERSION_ARG=${args.cuda_version}"
|
||||
sh """
|
||||
rm -rf build/
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_via_cmake.sh --conda-env=gpu_test -DUSE_CUDA=ON -DUSE_NCCL=ON -DPLUGIN_RMM=ON -DBUILD_WITH_CUDA_CUB=ON ${arch_flag}
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} bash -c "cd python-package && rm -rf dist/* && python setup.py bdist_wheel --universal"
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} python tests/ci_build/rename_whl.py python-package/dist/*.whl ${commit_id} manylinux2014_x86_64
|
||||
"""
|
||||
echo 'Stashing Python wheel...'
|
||||
stash name: "xgboost_whl_rmm_cuda${args.cuda_version}", includes: 'python-package/dist/*.whl'
|
||||
echo 'Stashing C++ test executable (testxgboost)...'
|
||||
stash name: "xgboost_cpp_tests_rmm_cuda${args.cuda_version}", includes: 'build/testxgboost'
|
||||
}
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def BuildCUDA(args) {
|
||||
node('linux && cpu') {
|
||||
def BuildRPackageWithCUDA(args) {
|
||||
node('linux && cpu_build') {
|
||||
unstash name: 'srcs'
|
||||
echo "Build with CUDA ${args.cuda_version}"
|
||||
def container_type = "gpu_build"
|
||||
def container_type = 'gpu_build_r_centos7'
|
||||
def docker_binary = "docker"
|
||||
def docker_args = "--build-arg CUDA_VERSION=${args.cuda_version}"
|
||||
def docker_args = "--build-arg CUDA_VERSION_ARG=${args.cuda_version}"
|
||||
if (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release')) {
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_via_cmake.sh -DUSE_CUDA=ON -DUSE_NCCL=ON -DOPEN_MP:BOOL=ON -DHIDE_CXX_SYMBOLS=ON
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} bash -c "cd python-package && rm -rf dist/* && python setup.py bdist_wheel --universal"
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} python3 tests/ci_build/rename_whl.py python-package/dist/*.whl ${commit_id} manylinux2010_x86_64
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_r_pkg_with_cuda.sh ${commit_id}
|
||||
"""
|
||||
// Stash wheel for CUDA 9.0 target
|
||||
if (args.cuda_version == '9.0') {
|
||||
echo 'Stashing Python wheel...'
|
||||
stash name: 'xgboost_whl_cuda9', includes: 'python-package/dist/*.whl'
|
||||
echo 'Uploading R tarball...'
|
||||
path = ("${BRANCH_NAME}" == 'master') ? '' : "${BRANCH_NAME}/"
|
||||
s3Upload bucket: 'xgboost-nightly-builds', path: path, acl: 'PublicRead', workingDir: 'python-package/dist', includePathPattern:'**/*.whl'
|
||||
echo 'Stashing C++ test executable (testxgboost)...'
|
||||
stash name: 'xgboost_cpp_tests', includes: 'build/testxgboost'
|
||||
s3Upload bucket: 'xgboost-nightly-builds', path: path, acl: 'PublicRead', includePathPattern:'xgboost_r_gpu_linux_*.tar.gz'
|
||||
}
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def BuildJVMPackagesWithCUDA(args) {
|
||||
node('linux && mgpu') {
|
||||
unstash name: 'srcs'
|
||||
echo "Build XGBoost4J-Spark with Spark ${args.spark_version}, CUDA ${args.cuda_version}"
|
||||
def container_type = "jvm_gpu_build"
|
||||
def docker_binary = "nvidia-docker"
|
||||
def docker_args = "--build-arg CUDA_VERSION_ARG=${args.cuda_version}"
|
||||
def arch_flag = ""
|
||||
if (env.BRANCH_NAME != 'master' && !(env.BRANCH_NAME.startsWith('release'))) {
|
||||
arch_flag = "-DGPU_COMPUTE_VER=75"
|
||||
}
|
||||
// Use only 4 CPU cores
|
||||
def docker_extra_params = "CI_DOCKER_EXTRA_PARAMS_INIT='--cpuset-cpus 0-3'"
|
||||
sh """
|
||||
${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_jvm_packages.sh ${args.spark_version} -Duse.cuda=ON $arch_flag
|
||||
"""
|
||||
echo "Stashing XGBoost4J JAR with CUDA ${args.cuda_version} ..."
|
||||
stash name: 'xgboost4j_jar_gpu', includes: "jvm-packages/xgboost4j-gpu/target/*.jar,jvm-packages/xgboost4j-spark-gpu/target/*.jar"
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def BuildJVMPackages(args) {
|
||||
node('linux && cpu') {
|
||||
unstash name: 'srcs'
|
||||
@@ -290,15 +328,17 @@ def BuildJVMDoc() {
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_jvm_doc.sh ${BRANCH_NAME}
|
||||
"""
|
||||
if (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release')) {
|
||||
echo 'Uploading doc...'
|
||||
s3Upload file: "jvm-packages/${BRANCH_NAME}.tar.bz2", bucket: 'xgboost-docs', acl: 'PublicRead', path: "${BRANCH_NAME}.tar.bz2"
|
||||
}
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def TestPythonCPU() {
|
||||
node('linux && cpu') {
|
||||
unstash name: 'xgboost_whl_cuda9'
|
||||
unstash name: "xgboost_whl_cuda${ref_cuda_ver}"
|
||||
unstash name: 'srcs'
|
||||
unstash name: 'xgboost_cli'
|
||||
echo "Test Python CPU"
|
||||
@@ -306,78 +346,72 @@ def TestPythonCPU() {
|
||||
def docker_binary = "docker"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/test_python.sh cpu
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/test_python.sh cpu-py35
|
||||
"""
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def TestPythonCPUARM64() {
|
||||
node('linux && arm64') {
|
||||
unstash name: "xgboost_whl_arm64_cpu"
|
||||
unstash name: 'srcs'
|
||||
unstash name: 'xgboost_cli_arm64'
|
||||
echo "Test Python CPU ARM64"
|
||||
def container_type = "aarch64"
|
||||
def docker_binary = "docker"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/test_python.sh cpu-arm64
|
||||
"""
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def TestPythonGPU(args) {
|
||||
nodeReq = (args.multi_gpu) ? 'linux && mgpu' : 'linux && gpu'
|
||||
def nodeReq = (args.multi_gpu) ? 'linux && mgpu' : 'linux && gpu'
|
||||
def artifact_cuda_version = (args.artifact_cuda_version) ?: ref_cuda_ver
|
||||
node(nodeReq) {
|
||||
unstash name: 'xgboost_whl_cuda9'
|
||||
unstash name: "xgboost_whl_cuda${artifact_cuda_version}"
|
||||
unstash name: "xgboost_cpp_tests_cuda${artifact_cuda_version}"
|
||||
unstash name: 'srcs'
|
||||
echo "Test Python GPU: CUDA ${args.cuda_version}"
|
||||
echo "Test Python GPU: CUDA ${args.host_cuda_version}"
|
||||
def container_type = "gpu"
|
||||
def docker_binary = "nvidia-docker"
|
||||
def docker_args = "--build-arg CUDA_VERSION=${args.cuda_version}"
|
||||
if (args.multi_gpu) {
|
||||
echo "Using multiple GPUs"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_python.sh mgpu
|
||||
"""
|
||||
if (args.cuda_version != '9.0') {
|
||||
echo "Running tests with cuDF..."
|
||||
sh """
|
||||
${dockerRun} cudf ${docker_binary} ${docker_args} tests/ci_build/test_python.sh mgpu-cudf
|
||||
"""
|
||||
def docker_args = "--build-arg CUDA_VERSION_ARG=${args.host_cuda_version}"
|
||||
def mgpu_indicator = (args.multi_gpu) ? 'mgpu' : 'gpu'
|
||||
// Allocate extra space in /dev/shm to enable NCCL
|
||||
def docker_extra_params = (args.multi_gpu) ? "CI_DOCKER_EXTRA_PARAMS_INIT='--shm-size=4g'" : ''
|
||||
sh "${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_python.sh ${mgpu_indicator}"
|
||||
if (args.test_rmm) {
|
||||
sh "rm -rfv build/ python-package/dist/"
|
||||
unstash name: "xgboost_whl_rmm_cuda${args.host_cuda_version}"
|
||||
unstash name: "xgboost_cpp_tests_rmm_cuda${args.host_cuda_version}"
|
||||
sh "${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_python.sh ${mgpu_indicator} --use-rmm-pool"
|
||||
}
|
||||
} else {
|
||||
echo "Using a single GPU"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_python.sh gpu
|
||||
"""
|
||||
if (args.cuda_version != '9.0') {
|
||||
echo "Running tests with cuDF..."
|
||||
sh """
|
||||
${dockerRun} cudf ${docker_binary} ${docker_args} tests/ci_build/test_python.sh cudf
|
||||
"""
|
||||
}
|
||||
}
|
||||
// For CUDA 10.0 target, run cuDF tests too
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def TestCppRabit() {
|
||||
node(nodeReq) {
|
||||
unstash name: 'xgboost_rabit_tests'
|
||||
unstash name: 'srcs'
|
||||
echo "Test C++, rabit mock on"
|
||||
def container_type = "cpu"
|
||||
def docker_binary = "docker"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/runxgb.sh xgboost tests/ci_build/approx.conf.in
|
||||
"""
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def TestCppGPU(args) {
|
||||
nodeReq = (args.multi_gpu) ? 'linux && mgpu' : 'linux && gpu'
|
||||
def nodeReq = 'linux && mgpu'
|
||||
def artifact_cuda_version = (args.artifact_cuda_version) ?: ref_cuda_ver
|
||||
node(nodeReq) {
|
||||
unstash name: 'xgboost_cpp_tests'
|
||||
unstash name: "xgboost_cpp_tests_cuda${artifact_cuda_version}"
|
||||
unstash name: 'srcs'
|
||||
echo "Test C++, CUDA ${args.cuda_version}"
|
||||
echo "Test C++, CUDA ${args.host_cuda_version}"
|
||||
def container_type = "gpu"
|
||||
def docker_binary = "nvidia-docker"
|
||||
def docker_args = "--build-arg CUDA_VERSION=${args.cuda_version}"
|
||||
if (args.multi_gpu) {
|
||||
echo "Using multiple GPUs"
|
||||
sh "${dockerRun} ${container_type} ${docker_binary} ${docker_args} build/testxgboost --gtest_filter=*.MGPU_*"
|
||||
} else {
|
||||
echo "Using a single GPU"
|
||||
sh "${dockerRun} ${container_type} ${docker_binary} ${docker_args} build/testxgboost --gtest_filter=-*.MGPU_*"
|
||||
def docker_args = "--build-arg CUDA_VERSION_ARG=${args.host_cuda_version}"
|
||||
sh "${dockerRun} ${container_type} ${docker_binary} ${docker_args} build/testxgboost"
|
||||
if (args.test_rmm) {
|
||||
sh "rm -rfv build/"
|
||||
unstash name: "xgboost_cpp_tests_rmm_cuda${args.host_cuda_version}"
|
||||
echo "Test C++, CUDA ${args.host_cuda_version} with RMM"
|
||||
container_type = "rmm"
|
||||
docker_binary = "nvidia-docker"
|
||||
docker_args = "--build-arg CUDA_VERSION_ARG=${args.host_cuda_version}"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} bash -c "source activate gpu_test && build/testxgboost --use-rmm-pool --gtest_filter=-*DeathTest.*"
|
||||
"""
|
||||
}
|
||||
deleteDir()
|
||||
}
|
||||
@@ -405,30 +439,13 @@ def CrossTestJVMwithJDK(args) {
|
||||
}
|
||||
}
|
||||
|
||||
def TestR(args) {
|
||||
node('linux && cpu') {
|
||||
unstash name: 'srcs'
|
||||
echo "Test R package"
|
||||
def container_type = "rproject"
|
||||
def docker_binary = "docker"
|
||||
def use_r35_flag = (args.use_r35) ? "1" : "0"
|
||||
def docker_args = "--build-arg USE_R35=${use_r35_flag}"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_test_rpkg.sh || tests/ci_build/print_r_stacktrace.sh
|
||||
"""
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def DeployJVMPackages(args) {
|
||||
node('linux && cpu') {
|
||||
unstash name: 'srcs'
|
||||
if (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release')) {
|
||||
echo 'Deploying to xgboost-maven-repo S3 repo...'
|
||||
def container_type = "jvm"
|
||||
def docker_binary = "docker"
|
||||
sh """
|
||||
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/deploy_jvm_packages.sh ${args.spark_version}
|
||||
${dockerRun} jvm_gpu_build docker --build-arg CUDA_VERSION_ARG=10.1 tests/ci_build/deploy_jvm_packages.sh ${args.spark_version}
|
||||
"""
|
||||
}
|
||||
deleteDir()
|
||||
|
||||
@@ -10,17 +10,29 @@ def commit_id // necessary to pass a variable from one stage to another
|
||||
|
||||
pipeline {
|
||||
agent none
|
||||
|
||||
// Setup common job properties
|
||||
options {
|
||||
timestamps()
|
||||
timeout(time: 240, unit: 'MINUTES')
|
||||
buildDiscarder(logRotator(numToKeepStr: '10'))
|
||||
preserveStashes()
|
||||
}
|
||||
|
||||
// Build stages
|
||||
stages {
|
||||
stage('Jenkins Win64: Get sources') {
|
||||
agent { label 'win64 && build' }
|
||||
stage('Jenkins Win64: Initialize') {
|
||||
agent { label 'job_initializer' }
|
||||
steps {
|
||||
script {
|
||||
def buildNumber = env.BUILD_NUMBER as int
|
||||
if (buildNumber > 1) milestone(buildNumber - 1)
|
||||
milestone(buildNumber)
|
||||
checkoutSrcs()
|
||||
commit_id = "${GIT_COMMIT}"
|
||||
}
|
||||
sh 'python3 tests/jenkins_get_approval.py'
|
||||
stash name: 'srcs'
|
||||
milestone ordinal: 1
|
||||
}
|
||||
}
|
||||
stage('Jenkins Win64: Build') {
|
||||
@@ -28,10 +40,10 @@ pipeline {
|
||||
steps {
|
||||
script {
|
||||
parallel ([
|
||||
'build-win64-cuda9.0': { BuildWin64() }
|
||||
'build-win64-cuda10.1': { BuildWin64() },
|
||||
'build-rpkg-win64-cuda10.1': { BuildRPackageWithCUDAWin64() }
|
||||
])
|
||||
}
|
||||
milestone ordinal: 2
|
||||
}
|
||||
}
|
||||
stage('Jenkins Win64: Test') {
|
||||
@@ -39,13 +51,9 @@ pipeline {
|
||||
steps {
|
||||
script {
|
||||
parallel ([
|
||||
'test-win64-cpu': { TestWin64CPU() },
|
||||
'test-win64-gpu-cuda9.0': { TestWin64GPU(cuda_target: 'cuda9') },
|
||||
'test-win64-gpu-cuda10.0': { TestWin64GPU(cuda_target: 'cuda10_0') },
|
||||
'test-win64-gpu-cuda10.1': { TestWin64GPU(cuda_target: 'cuda10_1') }
|
||||
'test-win64-cuda10.1': { TestWin64() },
|
||||
])
|
||||
}
|
||||
milestone ordinal: 3
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -67,14 +75,19 @@ def checkoutSrcs() {
|
||||
}
|
||||
|
||||
def BuildWin64() {
|
||||
node('win64 && build') {
|
||||
node('win64 && cuda10_unified') {
|
||||
deleteDir()
|
||||
unstash name: 'srcs'
|
||||
echo "Building XGBoost for Windows AMD64 target..."
|
||||
bat "nvcc --version"
|
||||
def arch_flag = ""
|
||||
if (env.BRANCH_NAME != 'master' && !(env.BRANCH_NAME.startsWith('release'))) {
|
||||
arch_flag = "-DGPU_COMPUTE_VER=75"
|
||||
}
|
||||
bat """
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -G"Visual Studio 15 2017 Win64" -DUSE_CUDA=ON -DCMAKE_VERBOSE_MAKEFILE=ON -DGOOGLE_TEST=ON -DUSE_DMLC_GTEST=ON
|
||||
cmake .. -G"Visual Studio 15 2017 Win64" -DUSE_CUDA=ON -DCMAKE_VERBOSE_MAKEFILE=ON -DGOOGLE_TEST=ON -DUSE_DMLC_GTEST=ON ${arch_flag} -DCMAKE_UNITY_BUILD=ON
|
||||
"""
|
||||
bat """
|
||||
cd build
|
||||
@@ -92,8 +105,11 @@ def BuildWin64() {
|
||||
"""
|
||||
echo 'Stashing Python wheel...'
|
||||
stash name: 'xgboost_whl', includes: 'python-package/dist/*.whl'
|
||||
if (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release')) {
|
||||
echo 'Uploading Python wheel...'
|
||||
path = ("${BRANCH_NAME}" == 'master') ? '' : "${BRANCH_NAME}/"
|
||||
s3Upload bucket: 'xgboost-nightly-builds', path: path, acl: 'PublicRead', workingDir: 'python-package/dist', includePathPattern:'**/*.whl'
|
||||
}
|
||||
echo 'Stashing C++ test executable (testxgboost)...'
|
||||
stash name: 'xgboost_cpp_tests', includes: 'build/testxgboost.exe'
|
||||
stash name: 'xgboost_cli', includes: 'xgboost.exe'
|
||||
@@ -101,51 +117,47 @@ def BuildWin64() {
|
||||
}
|
||||
}
|
||||
|
||||
def TestWin64CPU() {
|
||||
node('win64 && cpu') {
|
||||
def BuildRPackageWithCUDAWin64() {
|
||||
node('win64 && cuda10_unified') {
|
||||
deleteDir()
|
||||
unstash name: 'srcs'
|
||||
unstash name: 'xgboost_whl'
|
||||
unstash name: 'xgboost_cli'
|
||||
echo "Test Win64 CPU"
|
||||
echo "Installing Python wheel..."
|
||||
bat "conda activate && (python -m pip uninstall -y xgboost || cd .)"
|
||||
bat "nvcc --version"
|
||||
if (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release')) {
|
||||
bat """
|
||||
conda activate && for /R %%i in (python-package\\dist\\*.whl) DO python -m pip install "%%i"
|
||||
bash tests/ci_build/build_r_pkg_with_cuda_win64.sh ${commit_id}
|
||||
"""
|
||||
echo "Installing Python dependencies..."
|
||||
bat """
|
||||
conda activate && conda upgrade scikit-learn pandas numpy
|
||||
"""
|
||||
echo "Running Python tests..."
|
||||
bat "conda activate && python -m pytest -v -s --fulltrace tests\\python"
|
||||
bat "conda activate && python -m pip uninstall -y xgboost"
|
||||
echo 'Uploading R tarball...'
|
||||
path = ("${BRANCH_NAME}" == 'master') ? '' : "${BRANCH_NAME}/"
|
||||
s3Upload bucket: 'xgboost-nightly-builds', path: path, acl: 'PublicRead', includePathPattern:'xgboost_r_gpu_win64_*.tar.gz'
|
||||
}
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
def TestWin64GPU(args) {
|
||||
node("win64 && gpu && ${args.cuda_target}") {
|
||||
def TestWin64() {
|
||||
node('win64 && cuda10_unified') {
|
||||
deleteDir()
|
||||
unstash name: 'srcs'
|
||||
unstash name: 'xgboost_whl'
|
||||
unstash name: 'xgboost_cli'
|
||||
unstash name: 'xgboost_cpp_tests'
|
||||
echo "Test Win64 GPU (${args.cuda_target})"
|
||||
echo "Test Win64"
|
||||
bat "nvcc --version"
|
||||
echo "Running C++ tests..."
|
||||
bat "build\\testxgboost.exe"
|
||||
echo "Installing Python wheel..."
|
||||
bat "conda activate && (python -m pip uninstall -y xgboost || cd .)"
|
||||
bat """
|
||||
conda activate && for /R %%i in (python-package\\dist\\*.whl) DO python -m pip install "%%i"
|
||||
"""
|
||||
echo "Installing Python dependencies..."
|
||||
def env_name = 'win64_' + UUID.randomUUID().toString().replaceAll('-', '')
|
||||
bat "conda activate && mamba env create -n ${env_name} --file=tests/ci_build/conda_env/win64_test.yml"
|
||||
echo "Installing Python wheel..."
|
||||
bat """
|
||||
conda activate && conda upgrade scikit-learn pandas numpy
|
||||
conda activate ${env_name} && for /R %%i in (python-package\\dist\\*.whl) DO python -m pip install "%%i"
|
||||
"""
|
||||
echo "Running Python tests..."
|
||||
bat "conda activate ${env_name} && python -m pytest -v -s -rxXs --fulltrace tests\\python"
|
||||
bat """
|
||||
conda activate && python -m pytest -v -s --fulltrace -m "(not slow) and (not mgpu)" tests\\python-gpu
|
||||
conda activate ${env_name} && python -m pytest -v -s -rxXs --fulltrace -m "(not slow) and (not mgpu)" tests\\python-gpu
|
||||
"""
|
||||
bat "conda activate && python -m pip uninstall -y xgboost"
|
||||
bat "conda env remove --name ${env_name}"
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
|
||||
24
Makefile
24
Makefile
@@ -44,7 +44,7 @@ export CXX = g++
|
||||
endif
|
||||
endif
|
||||
|
||||
export CFLAGS= -DDMLC_LOG_CUSTOMIZE=1 -std=c++11 -Wall -Wno-unknown-pragmas -Iinclude $(ADD_CFLAGS)
|
||||
export CFLAGS= -DDMLC_LOG_CUSTOMIZE=1 -std=c++14 -Wall -Wno-unknown-pragmas -Iinclude $(ADD_CFLAGS)
|
||||
CFLAGS += -I$(DMLC_CORE)/include -I$(RABIT)/include -I$(GTEST_PATH)/include
|
||||
|
||||
ifeq ($(TEST_COVER), 1)
|
||||
@@ -86,6 +86,20 @@ cover: check
|
||||
)
|
||||
endif
|
||||
|
||||
|
||||
# dask is required to pass, others are not
|
||||
# If any of the dask tests failed, contributor won't see the other error.
|
||||
mypy:
|
||||
cd python-package; \
|
||||
mypy ./xgboost/dask.py && \
|
||||
mypy ./xgboost/rabit.py && \
|
||||
mypy ../demo/guide-python/external_memory.py && \
|
||||
mypy ../tests/python-gpu/test_gpu_with_dask.py && \
|
||||
mypy ../tests/python/test_data_iterator.py && \
|
||||
mypy ../tests/python-gpu/test_gpu_data_iterator.py && \
|
||||
mypy ./xgboost/sklearn.py || exit 1; \
|
||||
mypy . || true ;
|
||||
|
||||
clean:
|
||||
$(RM) -rf build lib bin *~ */*~ */*/*~ */*/*/*~ */*.o */*/*.o */*/*/*.o #xgboost
|
||||
$(RM) -rf build_tests *.gcov tests/cpp/xgboost_test
|
||||
@@ -134,14 +148,18 @@ Rpack: clean_all
|
||||
sed -i -e 's/@OPENMP_LIB@//g' xgboost/src/Makevars.win
|
||||
rm -f xgboost/src/Makevars.win-e # OSX sed create this extra file; remove it
|
||||
bash R-package/remove_warning_suppression_pragma.sh
|
||||
bash xgboost/remove_warning_suppression_pragma.sh
|
||||
rm xgboost/remove_warning_suppression_pragma.sh
|
||||
rm -rfv xgboost/tests/helper_scripts/
|
||||
|
||||
R ?= R
|
||||
|
||||
Rbuild: Rpack
|
||||
R CMD build --no-build-vignettes xgboost
|
||||
$(R) CMD build xgboost
|
||||
rm -rf xgboost
|
||||
|
||||
Rcheck: Rbuild
|
||||
R CMD check xgboost*.tar.gz
|
||||
$(R) CMD check --as-cran xgboost*.tar.gz
|
||||
|
||||
-include build/*.d
|
||||
-include build/*/*.d
|
||||
|
||||
890
NEWS.md
890
NEWS.md
@@ -3,6 +3,880 @@ XGBoost Change Log
|
||||
|
||||
This file records the changes in xgboost library in reverse chronological order.
|
||||
|
||||
## v1.4.2 (2021.05.13)
|
||||
This is a patch release for Python package with following fixes:
|
||||
|
||||
* Handle the latest version of cupy.ndarray in inplace_predict. (#6933)
|
||||
* Ensure output array from predict_leaf is (n_samples, ) when there's only 1 tree. 1.4.0 outputs (n_samples, 1). (#6889)
|
||||
* Fix empty dataset handling with multi-class AUC. (#6947)
|
||||
* Handle object type from pandas in inplace_predict. (#6927)
|
||||
|
||||
|
||||
## v1.4.1 (2021.04.20)
|
||||
This is a bug fix release.
|
||||
|
||||
* Fix GPU implementation of AUC on some large datasets. (#6866)
|
||||
|
||||
## v1.4.0 (2021.04.12)
|
||||
|
||||
### Introduction of pre-built binary package for R, with GPU support
|
||||
Starting with release 1.4.0, users now have the option of installing `{xgboost}` without
|
||||
having to build it from the source. This is particularly advantageous for users who want
|
||||
to take advantage of the GPU algorithm (`gpu_hist`), as previously they'd have to build
|
||||
`{xgboost}` from the source using CMake and NVCC. Now installing `{xgboost}` with GPU
|
||||
support is as easy as: `R CMD INSTALL ./xgboost_r_gpu_linux.tar.gz`. (#6827)
|
||||
|
||||
See the instructions at https://xgboost.readthedocs.io/en/latest/build.html
|
||||
|
||||
### Improvements on prediction functions
|
||||
XGBoost has many prediction types including shap value computation and inplace prediction.
|
||||
In 1.4 we overhauled the underlying prediction functions for C API and Python API with an
|
||||
unified interface. (#6777, #6693, #6653, #6662, #6648, #6668, #6804)
|
||||
* Starting with 1.4, sklearn interface prediction will use inplace predict by default when
|
||||
input data is supported.
|
||||
* Users can use inplace predict with `dart` booster and enable GPU acceleration just
|
||||
like `gbtree`.
|
||||
* Also all prediction functions with tree models are now thread-safe. Inplace predict is
|
||||
improved with `base_margin` support.
|
||||
* A new set of C predict functions are exposed in the public interface.
|
||||
* A user-visible change is a newly added parameter called `strict_shape`. See
|
||||
https://xgboost.readthedocs.io/en/latest/prediction.html for more details.
|
||||
|
||||
|
||||
### Improvement on Dask interface
|
||||
* Starting with 1.4, the Dask interface is considered to be feature-complete, which means
|
||||
all of the models found in the single node Python interface are now supported in Dask,
|
||||
including but not limited to ranking and random forest. Also, the prediction function
|
||||
is significantly faster and supports shap value computation.
|
||||
- Most of the parameters found in single node sklearn interface are supported by
|
||||
Dask interface. (#6471, #6591)
|
||||
- Implements learning to rank. On the Dask interface, we use the newly added support of
|
||||
query ID to enable group structure. (#6576)
|
||||
- The Dask interface has Python type hints support. (#6519)
|
||||
- All models can be safely pickled. (#6651)
|
||||
- Random forest estimators are now supported. (#6602)
|
||||
- Shap value computation is now supported. (#6575, #6645, #6614)
|
||||
- Evaluation result is printed on the scheduler process. (#6609)
|
||||
- `DaskDMatrix` (and device quantile dmatrix) now accepts all meta-information. (#6601)
|
||||
|
||||
* Prediction optimization. We enhanced and speeded up the prediction function for the
|
||||
Dask interface. See the latest Dask tutorial page in our document for an overview of
|
||||
how you can optimize it even further. (#6650, #6645, #6648, #6668)
|
||||
|
||||
* Bug fixes
|
||||
- If you are using the latest Dask and distributed where `distributed.MultiLock` is
|
||||
present, XGBoost supports training multiple models on the same cluster in
|
||||
parallel. (#6743)
|
||||
- A bug fix for when using `dask.client` to launch async task, XGBoost might use a
|
||||
different client object internally. (#6722)
|
||||
|
||||
* Other improvements on documents, blogs, tutorials, and demos. (#6389, #6366, #6687,
|
||||
#6699, #6532, #6501)
|
||||
|
||||
### Python package
|
||||
With changes from Dask and general improvement on prediction, we have made some
|
||||
enhancements on the general Python interface and IO for booster information. Starting
|
||||
from 1.4, booster feature names and types can be saved into the JSON model. Also some
|
||||
model attributes like `best_iteration`, `best_score` are restored upon model load. On
|
||||
sklearn interface, some attributes are now implemented as Python object property with
|
||||
better documents.
|
||||
|
||||
* Breaking change: All `data` parameters in prediction functions are renamed to `X`
|
||||
for better compliance to sklearn estimator interface guidelines.
|
||||
* Breaking change: XGBoost used to generate some pseudo feature names with `DMatrix`
|
||||
when inputs like `np.ndarray` don't have column names. The procedure is removed to
|
||||
avoid conflict with other inputs. (#6605)
|
||||
* Early stopping with training continuation is now supported. (#6506)
|
||||
* Optional import for Dask and cuDF are now lazy. (#6522)
|
||||
* As mentioned in the prediction improvement summary, the sklearn interface uses inplace
|
||||
prediction whenever possible. (#6718)
|
||||
* Booster information like feature names and feature types are now saved into the JSON
|
||||
model file. (#6605)
|
||||
* All `DMatrix` interfaces including `DeviceQuantileDMatrix` and counterparts in Dask
|
||||
interface (as mentioned in the Dask changes summary) now accept all the meta-information
|
||||
like `group` and `qid` in their constructor for better consistency. (#6601)
|
||||
* Booster attributes are restored upon model load so users don't have to call `attr`
|
||||
manually. (#6593)
|
||||
* On sklearn interface, all models accept `base_margin` for evaluation datasets. (#6591)
|
||||
* Improvements over the setup script including smaller sdist size and faster installation
|
||||
if the C++ library is already built (#6611, #6694, #6565).
|
||||
|
||||
* Bug fixes for Python package:
|
||||
- Don't validate feature when number of rows is 0. (#6472)
|
||||
- Move metric configuration into booster. (#6504)
|
||||
- Calling XGBModel.fit() should clear the Booster by default (#6562)
|
||||
- Support `_estimator_type`. (#6582)
|
||||
- [dask, sklearn] Fix predict proba. (#6566, #6817)
|
||||
- Restore unknown data support. (#6595)
|
||||
- Fix learning rate scheduler with cv. (#6720)
|
||||
- Fixes small typo in sklearn documentation (#6717)
|
||||
- [python-package] Fix class Booster: feature_types = None (#6705)
|
||||
- Fix divide by 0 in feature importance when no split is found. (#6676)
|
||||
|
||||
|
||||
### JVM package
|
||||
* [jvm-packages] fix early stopping doesn't work even without custom_eval setting (#6738)
|
||||
* fix potential TaskFailedListener's callback won't be called (#6612)
|
||||
* [jvm] Add ability to load booster direct from byte array (#6655)
|
||||
* [jvm-packages] JVM library loader extensions (#6630)
|
||||
|
||||
### R package
|
||||
* R documentation: Make construction of DMatrix consistent.
|
||||
* Fix R documentation for xgb.train. (#6764)
|
||||
|
||||
### ROC-AUC
|
||||
We re-implemented the ROC-AUC metric in XGBoost. The new implementation supports
|
||||
multi-class classification and has better support for learning to rank tasks that are not
|
||||
binary. Also, it has a better-defined average on distributed environments with additional
|
||||
handling for invalid datasets. (#6749, #6747, #6797)
|
||||
|
||||
### Global configuration.
|
||||
Starting from 1.4, XGBoost's Python, R and C interfaces support a new global configuration
|
||||
model where users can specify some global parameters. Currently, supported parameters are
|
||||
`verbosity` and `use_rmm`. The latter is experimental, see rmm plugin demo and
|
||||
related README file for details. (#6414, #6656)
|
||||
|
||||
### Other New features.
|
||||
* Better handling for input data types that support `__array_interface__`. For some
|
||||
data types including GPU inputs and `scipy.sparse.csr_matrix`, XGBoost employs
|
||||
`__array_interface__` for processing the underlying data. Starting from 1.4, XGBoost
|
||||
can accept arbitrary array strides (which means column-major is supported) without
|
||||
making data copies, potentially reducing a significant amount of memory consumption.
|
||||
Also version 3 of `__cuda_array_interface__` is now supported. (#6776, #6765, #6459,
|
||||
#6675)
|
||||
* Improved parameter validation, now feeding XGBoost with parameters that contain
|
||||
whitespace will trigger an error. (#6769)
|
||||
* For Python and R packages, file paths containing the home indicator `~` are supported.
|
||||
* As mentioned in the Python changes summary, the JSON model can now save feature
|
||||
information of the trained booster. The JSON schema is updated accordingly. (#6605)
|
||||
* Development of categorical data support is continued. Newly added weighted data support
|
||||
and `dart` booster support. (#6508, #6693)
|
||||
* As mentioned in Dask change summary, ranking now supports the `qid` parameter for
|
||||
query groups. (#6576)
|
||||
* `DMatrix.slice` can now consume a numpy array. (#6368)
|
||||
|
||||
### Other breaking changes
|
||||
* Aside from the feature name generation, there are 2 breaking changes:
|
||||
- Drop saving binary format for memory snapshot. (#6513, #6640)
|
||||
- Change default evaluation metric for binary:logitraw objective to logloss (#6647)
|
||||
|
||||
### CPU Optimization
|
||||
* Aside from the general changes on predict function, some optimizations are applied on
|
||||
CPU implementation. (#6683, #6550, #6696, #6700)
|
||||
* Also performance for sampling initialization in `hist` is improved. (#6410)
|
||||
|
||||
### Notable fixes in the core library
|
||||
These fixes do not reside in particular language bindings:
|
||||
* Fixes for gamma regression. This includes checking for invalid input values, fixes for
|
||||
gamma deviance metric, and better floating point guard for gamma negative log-likelihood
|
||||
metric. (#6778, #6537, #6761)
|
||||
* Random forest with `gpu_hist` might generate low accuracy in previous versions. (#6755)
|
||||
* Fix a bug in GPU sketching when data size exceeds limit of 32-bit integer. (#6826)
|
||||
* Memory consumption fix for row-major adapters (#6779)
|
||||
* Don't estimate sketch batch size when rmm is used. (#6807) (#6830)
|
||||
* Fix in-place predict with missing value. (#6787)
|
||||
* Re-introduce double buffer in UpdatePosition, to fix perf regression in gpu_hist (#6757)
|
||||
* Pass correct split_type to GPU predictor (#6491)
|
||||
* Fix DMatrix feature names/types IO. (#6507)
|
||||
* Use view for `SparsePage` exclusively to avoid some data access races. (#6590)
|
||||
* Check for invalid data. (#6742)
|
||||
* Fix relocatable include in CMakeList (#6734) (#6737)
|
||||
* Fix DMatrix slice with feature types. (#6689)
|
||||
|
||||
### Other deprecation notices:
|
||||
|
||||
* This release will be the last release to support CUDA 10.0. (#6642)
|
||||
|
||||
* Starting in the next release, the Python package will require Pip 19.3+ due to the use
|
||||
of manylinux2014 tag. Also, CentOS 6, RHEL 6 and other old distributions will not be
|
||||
supported.
|
||||
|
||||
### Known issue:
|
||||
|
||||
MacOS build of the JVM packages doesn't support multi-threading out of the box. To enable
|
||||
multi-threading with JVM packages, MacOS users will need to build the JVM packages from
|
||||
the source. See https://xgboost.readthedocs.io/en/latest/jvm/index.html#installation-from-source
|
||||
|
||||
|
||||
### Doc
|
||||
* Dedicated page for `tree_method` parameter is added. (#6564, #6633)
|
||||
* [doc] Add FLAML as a fast tuning tool for XGBoost (#6770)
|
||||
* Add document for tests directory. [skip ci] (#6760)
|
||||
* Fix doc string of config.py to use correct `versionadded` (#6458)
|
||||
* Update demo for prediction. (#6789)
|
||||
* [Doc] Document that AUCPR is for binary classification/ranking (#5899)
|
||||
* Update the C API comments (#6457)
|
||||
* Fix document. [skip ci] (#6669)
|
||||
|
||||
### Maintenance: Testing, continuous integration
|
||||
* Use CPU input for test_boost_from_prediction. (#6818)
|
||||
* [CI] Upload xgboost4j.dll to S3 (#6781)
|
||||
* Update dmlc-core submodule (#6745)
|
||||
* [CI] Use manylinux2010_x86_64 container to vendor libgomp (#6485)
|
||||
* Add conda-forge badge (#6502)
|
||||
* Fix merge conflict. (#6512)
|
||||
* [CI] Split up main.yml, add mypy. (#6515)
|
||||
* [Breaking] Upgrade cuDF and RMM to 0.18 nightlies; require RMM 0.18+ for RMM plugin (#6510)
|
||||
* "featue_map" typo changed to "feature_map" (#6540)
|
||||
* Add script for generating release tarball. (#6544)
|
||||
* Add credentials to .gitignore (#6559)
|
||||
* Remove warnings in tests. (#6554)
|
||||
* Update dmlc-core submodule and conform to new API (#6431)
|
||||
* Suppress hypothesis health check for dask client. (#6589)
|
||||
* Fix pylint. (#6714)
|
||||
* [CI] Clear R package cache (#6746)
|
||||
* Exclude dmlc test on github action. (#6625)
|
||||
* Tests for regression metrics with weights. (#6729)
|
||||
* Add helper script and doc for releasing pip package. (#6613)
|
||||
* Support pylint 2.7.0 (#6726)
|
||||
* Remove R cache in github action. (#6695)
|
||||
* [CI] Do not mix up stashed executable built for ARM and x86_64 platforms (#6646)
|
||||
* [CI] Add ARM64 test to Jenkins pipeline (#6643)
|
||||
* Disable s390x and arm64 tests on travis for now. (#6641)
|
||||
* Move sdist test to action. (#6635)
|
||||
* [dask] Rework base margin test. (#6627)
|
||||
|
||||
|
||||
### Maintenance: Refactor code for legibility and maintainability
|
||||
* Improve OpenMP exception handling (#6680)
|
||||
* Improve string view to reduce string allocation. (#6644)
|
||||
* Simplify Span checks. (#6685)
|
||||
* Use generic dispatching routine for array interface. (#6672)
|
||||
|
||||
|
||||
## v1.3.0 (2020.12.08)
|
||||
|
||||
### XGBoost4J-Spark: Exceptions should cancel jobs gracefully instead of killing SparkContext (#6019).
|
||||
* By default, exceptions in XGBoost4J-Spark causes the whole SparkContext to shut down, necessitating the restart of the Spark cluster. This behavior is often a major inconvenience.
|
||||
* Starting from 1.3.0 release, XGBoost adds a new parameter `killSparkContextOnWorkerFailure` to optionally prevent killing SparkContext. If this parameter is set, exceptions will gracefully cancel training jobs instead of killing SparkContext.
|
||||
|
||||
### GPUTreeSHAP: GPU acceleration of the TreeSHAP algorithm (#6038, #6064, #6087, #6099, #6163, #6281, #6332)
|
||||
* [SHAP (SHapley Additive exPlanations)](https://github.com/slundberg/shap) is a game theoretic approach to explain predictions of machine learning models. It computes feature importance scores for individual examples, establishing how each feature influences a particular prediction. TreeSHAP is an optimized SHAP algorithm specifically designed for decision tree ensembles.
|
||||
* Starting with 1.3.0 release, it is now possible to leverage CUDA-capable GPUs to accelerate the TreeSHAP algorithm. Check out [the demo notebook](https://github.com/dmlc/xgboost/blob/master/demo/gpu_acceleration/shap.ipynb).
|
||||
* The CUDA implementation of the TreeSHAP algorithm is hosted at [rapidsai/GPUTreeSHAP](https://github.com/rapidsai/gputreeshap). XGBoost imports it as a Git submodule.
|
||||
|
||||
### New style Python callback API (#6199, #6270, #6320, #6348, #6376, #6399, #6441)
|
||||
* The XGBoost Python package now offers a re-designed callback API. The new callback API lets you design various extensions of training in idomatic Python. In addition, the new callback API allows you to use early stopping with the native Dask API (`xgboost.dask`). Check out [the tutorial](https://xgboost.readthedocs.io/en/release_1.3.0/python/callbacks.html) and [the demo](https://github.com/dmlc/xgboost/blob/master/demo/guide-python/callbacks.py).
|
||||
|
||||
### Enable the use of `DeviceQuantileDMatrix` / `DaskDeviceQuantileDMatrix` with large data (#6201, #6229, #6234).
|
||||
* `DeviceQuantileDMatrix` can achieve memory saving by avoiding extra copies of the training data, and the saving is bigger for large data. Unfortunately, large data with more than 2^31 elements was triggering integer overflow bugs in CUB and Thrust. Tracking issue: #6228.
|
||||
* This release contains a series of work-arounds to allow the use of `DeviceQuantileDMatrix` with large data:
|
||||
- Loop over `copy_if` (#6201)
|
||||
- Loop over `thrust::reduce` (#6229)
|
||||
- Implement the inclusive scan algorithm in-house, to handle large offsets (#6234)
|
||||
|
||||
### Support slicing of tree models (#6302)
|
||||
* Accessing the best iteration of a model after the application of early stopping used to be error-prone, need to manually pass the `ntree_limit` argument to the `predict()` function.
|
||||
* Now we provide a simple interface to slice tree models by specifying a range of boosting rounds. The tree ensemble can be split into multiple sub-ensembles via the slicing interface. Check out [an example](https://xgboost.readthedocs.io/en/release_1.3.0/python/model.html).
|
||||
* In addition, the early stopping callback now supports `save_best` option. When enabled, XGBoost will save (persist) the model at the best boosting round and discard the trees that were fit subsequent to the best round.
|
||||
|
||||
### Weighted subsampling of features (columns) (#5962)
|
||||
* It is now possible to sample features (columns) via weighted subsampling, in which features with higher weights are more likely to be selected in the sample. Weighted subsampling allows you to encode domain knowledge by emphasizing a particular set of features in the choice of tree splits. In addition, you can prevent particular features from being used in any splits, by assigning them zero weights.
|
||||
* Check out [the demo](https://github.com/dmlc/xgboost/blob/master/demo/guide-python/feature_weights.py).
|
||||
|
||||
### Improved integration with Dask
|
||||
* Support reverse-proxy environment such as Google Kubernetes Engine (#6343, #6475)
|
||||
* An XGBoost training job will no longer use all available workers. Instead, it will only use the workers that contain input data (#6343).
|
||||
* The new callback API works well with the Dask training API.
|
||||
* The `predict()` and `fit()` function of `DaskXGBClassifier` and `DaskXGBRegressor` now accept a base margin (#6155).
|
||||
* Support more meta data in the Dask API (#6130, #6132, #6333).
|
||||
* Allow passing extra keyword arguments as `kwargs` in `predict()` (#6117)
|
||||
* Fix typo in dask interface: `sample_weights` -> `sample_weight` (#6240)
|
||||
* Allow empty data matrix in AFT survival, as Dask may produce empty partitions (#6379)
|
||||
* Speed up prediction by overlapping prediction jobs in all workers (#6412)
|
||||
|
||||
### Experimental support for direct splits with categorical features (#6028, #6128, #6137, #6140, #6164, #6165, #6166, #6179, #6194, #6219)
|
||||
* Currently, XGBoost requires users to one-hot-encode categorical variables. This has adverse performance implications, as the creation of many dummy variables results into higher memory consumption and may require fitting deeper trees to achieve equivalent model accuracy.
|
||||
* The 1.3.0 release of XGBoost contains an experimental support for direct handling of categorical variables in test nodes. Each test node will have the condition of form `feature_value \in match_set`, where the `match_set` on the right hand side contains one or more matching categories. The matching categories in `match_set` represent the condition for traversing to the right child node. Currently, XGBoost will only generate categorical splits with only a single matching category ("one-vs-rest split"). In a future release, we plan to remove this restriction and produce splits with multiple matching categories in `match_set`.
|
||||
* The categorical split requires the use of JSON model serialization. The legacy binary serialization method cannot be used to save (persist) models with categorical splits.
|
||||
* Note. This feature is currently highly experimental. Use it at your own risk. See the detailed list of limitations at [#5949](https://github.com/dmlc/xgboost/pull/5949).
|
||||
|
||||
### Experimental plugin for RAPIDS Memory Manager (#5873, #6131, #6146, #6150, #6182)
|
||||
* RAPIDS Memory Manager library ([rapidsai/rmm](https://github.com/rapidsai/rmm)) provides a collection of efficient memory allocators for NVIDIA GPUs. It is now possible to use XGBoost with memory allocators provided by RMM, by enabling the RMM integration plugin. With this plugin, XGBoost is now able to share a common GPU memory pool with other applications using RMM, such as the RAPIDS data science packages.
|
||||
* See [the demo](https://github.com/dmlc/xgboost/blob/master/demo/rmm_plugin/README.md) for a working example, as well as directions for building XGBoost with the RMM plugin.
|
||||
* The plugin will be soon considered non-experimental, once #6297 is resolved.
|
||||
|
||||
### Experimental plugin for oneAPI programming model (#5825)
|
||||
* oneAPI is a programming interface developed by Intel aimed at providing one programming model for many types of hardware such as CPU, GPU, FGPA and other hardware accelerators.
|
||||
* XGBoost now includes an experimental plugin for using oneAPI for the predictor and objective functions. The plugin is hosted in the directory `plugin/updater_oneapi`.
|
||||
* Roadmap: #5442
|
||||
|
||||
### Pickling the XGBoost model will now trigger JSON serialization (#6027)
|
||||
* The pickle will now contain the JSON string representation of the XGBoost model, as well as related configuration.
|
||||
|
||||
### Performance improvements
|
||||
* Various performance improvement on multi-core CPUs
|
||||
- Optimize DMatrix build time by up to 3.7x. (#5877)
|
||||
- CPU predict performance improvement, by up to 3.6x. (#6127)
|
||||
- Optimize CPU sketch allreduce for sparse data (#6009)
|
||||
- Thread local memory allocation for BuildHist, leading to speedup up to 1.7x. (#6358)
|
||||
- Disable hyperthreading for DMatrix creation (#6386). This speeds up DMatrix creation by up to 2x.
|
||||
- Simple fix for static shedule in predict (#6357)
|
||||
* Unify thread configuration, to make it easy to utilize all CPU cores (#6186)
|
||||
* [jvm-packages] Clean the way deterministic paritioning is computed (#6033)
|
||||
* Speed up JSON serialization by implementing an intrusive pointer class (#6129). It leads to 1.5x-2x performance boost.
|
||||
|
||||
### API additions
|
||||
* [R] Add SHAP summary plot using ggplot2 (#5882)
|
||||
* Modin DataFrame can now be used as input (#6055)
|
||||
* [jvm-packages] Add `getNumFeature` method (#6075)
|
||||
* Add MAPE metric (#6119)
|
||||
* Implement GPU predict leaf. (#6187)
|
||||
* Enable cuDF/cuPy inputs in `XGBClassifier` (#6269)
|
||||
* Document tree method for feature weights. (#6312)
|
||||
* Add `fail_on_invalid_gpu_id` parameter, which will cause XGBoost to terminate upon seeing an invalid value of `gpu_id` (#6342)
|
||||
|
||||
### Breaking: the default evaluation metric for classification is changed to `logloss` / `mlogloss` (#6183)
|
||||
* The default metric used to be accuracy, and it is not statistically consistent to perform early stopping with the accuracy metric when we are really optimizing the log loss for the `binary:logistic` objective.
|
||||
* For statistical consistency, the default metric for classification has been changed to `logloss`. Users may choose to preserve the old behavior by explicitly specifying `eval_metric`.
|
||||
|
||||
### Breaking: `skmaker` is now removed (#5971)
|
||||
* The `skmaker` updater has not been documented nor tested.
|
||||
|
||||
### Breaking: the JSON model format no longer stores the leaf child count (#6094).
|
||||
* The leaf child count field has been deprecated and is not used anywhere in the XGBoost codebase.
|
||||
|
||||
### Breaking: XGBoost now requires MacOS 10.14 (Mojave) and later.
|
||||
* Homebrew has dropped support for MacOS 10.13 (High Sierra), so we are not able to install the OpenMP runtime (`libomp`) from Homebrew on MacOS 10.13. Please use MacOS 10.14 (Mojave) or later.
|
||||
|
||||
### Deprecation notices
|
||||
* The use of `LabelEncoder` in `XGBClassifier` is now deprecated and will be removed in the next minor release (#6269). The deprecation is necessary to support multiple types of inputs, such as cuDF data frames or cuPy arrays.
|
||||
* The use of certain positional arguments in the Python interface is deprecated (#6365). Users will use deprecation warnings for the use of position arguments for certain function parameters. New code should use keyword arguments as much as possible. We have not yet decided when we will fully require the use of keyword arguments.
|
||||
|
||||
### Bug-fixes
|
||||
* On big-endian arch, swap the byte order in the binary serializer to enable loading models that were produced by a little-endian machine (#5813).
|
||||
* [jvm-packages] Fix deterministic partitioning with dataset containing Double.NaN (#5996)
|
||||
* Limit tree depth for GPU hist to 31 to prevent integer overflow (#6045)
|
||||
* [jvm-packages] Set `maxBins` to 256 to align with the default value in the C++ code (#6066)
|
||||
* [R] Fix CRAN check (#6077)
|
||||
* Add back support for `scipy.sparse.coo_matrix` (#6162)
|
||||
* Handle duplicated values in sketching. (#6178)
|
||||
* Catch all standard exceptions in C API. (#6220)
|
||||
* Fix linear GPU input (#6255)
|
||||
* Fix inplace prediction interval. (#6259)
|
||||
* [R] allow `xgb.plot.importance()` calls to fill a grid (#6294)
|
||||
* Lazy import dask libraries. (#6309)
|
||||
* Deterministic data partitioning for external memory (#6317)
|
||||
* Avoid resetting seed for every configuration. (#6349)
|
||||
* Fix label errors in graph visualization (#6369)
|
||||
* [jvm-packages] fix potential unit test suites aborted issue due to race condition (#6373)
|
||||
* [R] Fix warnings from `R check --as-cran` (#6374)
|
||||
* [R] Fix a crash that occurs with noLD R (#6378)
|
||||
* [R] Do not convert continuous labels to factors (#6380)
|
||||
* [R] remove uses of `exists()` (#6387)
|
||||
* Propagate parameters to the underlying `Booster` handle from `XGBClassifier.set_param` / `XGBRegressor.set_param`. (#6416)
|
||||
* [R] Fix R package installation via CMake (#6423)
|
||||
* Enforce row-major order in cuPy array (#6459)
|
||||
* Fix filtering callable objects in the parameters passed to the scikit-learn API. (#6466)
|
||||
|
||||
### Maintenance: Testing, continuous integration, build system
|
||||
* [CI] Improve JVM test in GitHub Actions (#5930)
|
||||
* Refactor plotting test so that it can run independently (#6040)
|
||||
* [CI] Cancel builds on subsequent pushes (#6011)
|
||||
* Fix Dask Pytest fixture (#6024)
|
||||
* [CI] Migrate linters to GitHub Actions (#6035)
|
||||
* [CI] Remove win2016 JVM test from GitHub Actions (#6042)
|
||||
* Fix CMake build with `BUILD_STATIC_LIB` option (#6090)
|
||||
* Don't link imported target in CMake (#6093)
|
||||
* Work around a compiler bug in MacOS AppleClang 11 (#6103)
|
||||
* [CI] Fix CTest by running it in a correct directory (#6104)
|
||||
* [R] Check warnings explicitly for model compatibility tests (#6114)
|
||||
* [jvm-packages] add xgboost4j-gpu/xgboost4j-spark-gpu module to facilitate release (#6136)
|
||||
* [CI] Time GPU tests. (#6141)
|
||||
* [R] remove warning in configure.ac (#6152)
|
||||
* [CI] Upgrade cuDF and RMM to 0.16 nightlies; upgrade to Ubuntu 18.04 (#6157)
|
||||
* [CI] Test C API demo (#6159)
|
||||
* Option for generating device debug info. (#6168)
|
||||
* Update `.gitignore` (#6175, #6193, #6346)
|
||||
* Hide C++ symbols from dmlc-core (#6188)
|
||||
* [CI] Added arm64 job in Travis-CI (#6200)
|
||||
* [CI] Fix Docker build for CUDA 11 (#6202)
|
||||
* [CI] Move non-OpenMP gtest to GitHub Actions (#6210)
|
||||
* [jvm-packages] Fix up build for xgboost4j-gpu, xgboost4j-spark-gpu (#6216)
|
||||
* Add more tests for categorical data support (#6219)
|
||||
* [dask] Test for data initializaton. (#6226)
|
||||
* Bump junit from 4.11 to 4.13.1 in /jvm-packages/xgboost4j (#6230)
|
||||
* Bump junit from 4.11 to 4.13.1 in /jvm-packages/xgboost4j-gpu (#6233)
|
||||
* [CI] Reduce testing load with RMM (#6249)
|
||||
* [CI] Build a Python wheel for aarch64 platform (#6253)
|
||||
* [CI] Time the CPU tests on Jenkins. (#6257)
|
||||
* [CI] Skip Dask tests on ARM. (#6267)
|
||||
* Fix a typo in `is_arm()` in testing.py (#6271)
|
||||
* [CI] replace `egrep` with `grep -E` (#6287)
|
||||
* Support unity build. (#6295)
|
||||
* [CI] Mark flaky tests as XFAIL (#6299)
|
||||
* [CI] Use separate Docker cache for each CUDA version (#6305)
|
||||
* Added `USE_NCCL_LIB_PATH` option to enable user to set `NCCL_LIBRARY` during build (#6310)
|
||||
* Fix flaky data initialization test. (#6318)
|
||||
* Add a badge for GitHub Actions (#6321)
|
||||
* Optional `find_package` for sanitizers. (#6329)
|
||||
* Use pytest conventions consistently in Python tests (#6337)
|
||||
* Fix missing space in warning message (#6340)
|
||||
* Update `custom_metric_obj.rst` (#6367)
|
||||
* [CI] Run R check with `--as-cran` flag on GitHub Actions (#6371)
|
||||
* [CI] Remove R check from Jenkins (#6372)
|
||||
* Mark GPU external memory test as XFAIL. (#6381)
|
||||
* [CI] Add noLD R test (#6382)
|
||||
* Fix MPI build. (#6403)
|
||||
* [CI] Upgrade to MacOS Mojave image (#6406)
|
||||
* Fix flaky sparse page dmatrix test. (#6417)
|
||||
* [CI] Upgrade cuDF and RMM to 0.17 nightlies (#6434)
|
||||
* [CI] Fix CentOS 6 Docker images (#6467)
|
||||
* [CI] Vendor libgomp in the manylinux Python wheel (#6461)
|
||||
* [CI] Hot fix for libgomp vendoring (#6482)
|
||||
|
||||
### Maintenance: Clean up and merge the Rabit submodule (#6023, #6095, #6096, #6105, #6110, #6262, #6275, #6290)
|
||||
* The Rabit submodule is now maintained as part of the XGBoost codebase.
|
||||
* Tests for Rabit are now part of the test suites of XGBoost.
|
||||
* Rabit can now be built on the Windows platform.
|
||||
* We made various code re-formatting for the C++ code with clang-tidy.
|
||||
* Public headers of XGBoost no longer depend on Rabit headers.
|
||||
* Unused CMake targets for Rabit were removed.
|
||||
* Single-point model recovery has been dropped and removed from Rabit, simplifying the Rabit code greatly. The single-point model recovery feature has not been adequately maintained over the years.
|
||||
* We removed the parts of Rabit that were not useful for XGBoost.
|
||||
|
||||
### Maintenance: Refactor code for legibility and maintainability
|
||||
* Unify CPU hist sketching (#5880)
|
||||
* [R] fix uses of 1:length(x) and other small things (#5992)
|
||||
* Unify evaluation functions. (#6037)
|
||||
* Make binary bin search reusable. (#6058)
|
||||
* Unify set index data. (#6062)
|
||||
* [R] Remove `stringi` dependency (#6109)
|
||||
* Merge extract cuts into QuantileContainer. (#6125)
|
||||
* Reduce C++ compiler warnings (#6197, #6198, #6213, #6286, #6325)
|
||||
* Cleanup Python code. (#6223)
|
||||
* Small cleanup to evaluator. (#6400)
|
||||
|
||||
### Usability Improvements, Documentation
|
||||
* [jvm-packages] add example to handle missing value other than 0 (#5677)
|
||||
* Add DMatrix usage examples to the C API demo (#5854)
|
||||
* List `DaskDeviceQuantileDMatrix` in the doc. (#5975)
|
||||
* Update Python custom objective demo. (#5981)
|
||||
* Update the JSON model schema to document more objective functions. (#5982)
|
||||
* [Python] Fix warning when `missing` field is not used. (#5969)
|
||||
* Fix typo in tracker logging (#5994)
|
||||
* Move a warning about empty dataset, so that it's shown for all objectives and metrics (#5998)
|
||||
* Fix the instructions for installing the nightly build. (#6004)
|
||||
* [Doc] Add dtreeviz as a showcase example of integration with 3rd-party software (#6013)
|
||||
* [jvm-packages] [doc] Update install doc for JVM packages (#6051)
|
||||
* Fix typo in `xgboost.callback.early_stop` docstring (#6071)
|
||||
* Add cache suffix to the files used in the external memory demo. (#6088)
|
||||
* [Doc] Document the parameter `kill_spark_context_on_worker_failure` (#6097)
|
||||
* Fix link to the demo for custom objectives (#6100)
|
||||
* Update Dask doc. (#6108)
|
||||
* Validate weights are positive values. (#6115)
|
||||
* Document the updated CMake version requirement. (#6123)
|
||||
* Add demo for `DaskDeviceQuantileDMatrix`. (#6156)
|
||||
* Cosmetic fixes in `faq.rst` (#6161)
|
||||
* Fix error message. (#6176)
|
||||
* [Doc] Add list of winning solutions in data science competitions using XGBoost (#6177)
|
||||
* Fix a comment in demo to use correct reference (#6190)
|
||||
* Update the list of winning solutions using XGBoost (#6192)
|
||||
* Consistent style for build status badge (#6203)
|
||||
* [Doc] Add info on GPU compiler (#6204)
|
||||
* Update the list of winning solutions (#6222, #6254)
|
||||
* Add link to XGBoost's Twitter handle (#6244)
|
||||
* Fix minor typos in XGBClassifier methods' docstrings (#6247)
|
||||
* Add sponsors link to FUNDING.yml (#6252)
|
||||
* Group CLI demo into subdirectory. (#6258)
|
||||
* Reduce warning messages from `gbtree`. (#6273)
|
||||
* Create a tutorial for using the C API in a C/C++ application (#6285)
|
||||
* Update plugin instructions for CMake build (#6289)
|
||||
* [doc] make Dask distributed example copy-pastable (#6345)
|
||||
* [Python] Add option to use `libxgboost.so` from the system path (#6362)
|
||||
* Fixed few grammatical mistakes in doc (#6393)
|
||||
* Fix broken link in CLI doc (#6396)
|
||||
* Improve documentation for the Dask API (#6413)
|
||||
* Revise misleading exception information: no such param of `allow_non_zero_missing` (#6418)
|
||||
* Fix CLI ranking demo. (#6439)
|
||||
* Fix broken links. (#6455)
|
||||
|
||||
### Acknowledgement
|
||||
**Contributors**: Nan Zhu (@CodingCat), @FelixYBW, Jack Dunn (@JackDunnNZ), Jean Lescut-Muller (@JeanLescut), Boris Feld (@Lothiraldan), Nikhil Choudhary (@Nikhil1O1), Rory Mitchell (@RAMitchell), @ShvetsKS, Anthony D'Amato (@Totoketchup), @Wittty-Panda, neko (@akiyamaneko), Alexander Gugel (@alexanderGugel), @dependabot[bot], DIVYA CHAUHAN (@divya661), Daniel Steinberg (@dstein64), Akira Funahashi (@funasoul), Philip Hyunsu Cho (@hcho3), Tong He (@hetong007), Hristo Iliev (@hiliev), Honza Sterba (@honzasterba), @hzy001, Igor Moura (@igormp), @jameskrach, James Lamb (@jameslamb), Naveed Ahmed Saleem Janvekar (@janvekarnaveed), Kyle Nicholson (@kylejn27), lacrosse91 (@lacrosse91), Christian Lorentzen (@lorentzenchr), Manikya Bardhan (@manikyabard), @nabokovas, John Quitto-Graham (@nvidia-johnq), @odidev, Qi Zhang (@qzhang90), Sergio Gavilán (@sgavil), Tanuja Kirthi Doddapaneni (@tanuja3), Cuong Duong (@tcuongd), Yuan Tang (@terrytangyuan), Jiaming Yuan (@trivialfis), vcarpani (@vcarpani), Vladislav Epifanov (@vepifanov), Vitalie Spinu (@vspinu), Bobby Wang (@wbo4958), Zeno Gantner (@zenogantner), zhang_jf (@zuston)
|
||||
|
||||
**Reviewers**: Nan Zhu (@CodingCat), John Zedlewski (@JohnZed), Rory Mitchell (@RAMitchell), @ShvetsKS, Egor Smirnov (@SmirnovEgorRu), Anthony D'Amato (@Totoketchup), @Wittty-Panda, Alexander Gugel (@alexanderGugel), Codecov Comments Bot (@codecov-commenter), Codecov (@codecov-io), DIVYA CHAUHAN (@divya661), Devin Robison (@drobison00), Geoffrey Blake (@geoffreyblake), Mark Harris (@harrism), Philip Hyunsu Cho (@hcho3), Honza Sterba (@honzasterba), Igor Moura (@igormp), @jakirkham, @jameskrach, James Lamb (@jameslamb), Janakarajan Natarajan (@janaknat), Jake Hemstad (@jrhemstad), Keith Kraus (@kkraus14), Kyle Nicholson (@kylejn27), Christian Lorentzen (@lorentzenchr), Michael Mayer (@mayer79), Nikolay Petrov (@napetrov), @odidev, PSEUDOTENSOR / Jonathan McKinney (@pseudotensor), Qi Zhang (@qzhang90), Sergio Gavilán (@sgavil), Scott Lundberg (@slundberg), Cuong Duong (@tcuongd), Yuan Tang (@terrytangyuan), Jiaming Yuan (@trivialfis), vcarpani (@vcarpani), Vladislav Epifanov (@vepifanov), Vincent Nijs (@vnijs), Vitalie Spinu (@vspinu), Bobby Wang (@wbo4958), William Hicks (@wphicks)
|
||||
|
||||
## v1.2.0 (2020.08.22)
|
||||
|
||||
### XGBoost4J-Spark now supports the GPU algorithm (#5171)
|
||||
* Now XGBoost4J-Spark is able to leverage NVIDIA GPU hardware to speed up training.
|
||||
* There is on-going work for accelerating the rest of the data pipeline with NVIDIA GPUs (#5950, #5972).
|
||||
|
||||
### XGBoost now supports CUDA 11 (#5808)
|
||||
* It is now possible to build XGBoost with CUDA 11. Note that we do not yet distribute pre-built binaries built with CUDA 11; all current distributions use CUDA 10.0.
|
||||
|
||||
### Better guidance for persisting XGBoost models in an R environment (#5940, #5964)
|
||||
* Users are strongly encouraged to use `xgb.save()` and `xgb.save.raw()` instead of `saveRDS()`. This is so that the persisted models can be accessed with future releases of XGBoost.
|
||||
* The previous release (1.1.0) had problems loading models that were saved with `saveRDS()`. This release adds a compatibility layer to restore access to the old RDS files. Note that this is meant to be a temporary measure; users are advised to stop using `saveRDS()` and migrate to `xgb.save()` and `xgb.save.raw()`.
|
||||
|
||||
### New objectives and metrics
|
||||
* The pseudo-Huber loss `reg:pseudohubererror` is added (#5647). The corresponding metric is `mphe`. Right now, the slope is hard-coded to 1.
|
||||
* The Accelerated Failure Time objective for survival analysis (`survival:aft`) is now accelerated on GPUs (#5714, #5716). The survival metrics `aft-nloglik` and `interval-regression-accuracy` are also accelerated on GPUs.
|
||||
|
||||
### Improved integration with scikit-learn
|
||||
* Added `n_features_in_` attribute to the scikit-learn interface to store the number of features used (#5780). This is useful for integrating with some scikit-learn features such as `StackingClassifier`. See [this link](https://scikit-learn-enhancement-proposals.readthedocs.io/en/latest/slep010/proposal.html) for more details.
|
||||
* `XGBoostError` now inherits `ValueError`, which conforms scikit-learn's exception requirement (#5696).
|
||||
|
||||
### Improved integration with Dask
|
||||
* The XGBoost Dask API now exposes an asynchronous interface (#5862). See [the document](https://xgboost.readthedocs.io/en/latest/tutorials/dask.html#working-with-asyncio) for details.
|
||||
* Zero-copy ingestion of GPU arrays via `DaskDeviceQuantileDMatrix` (#5623, #5799, #5800, #5803, #5837, #5874, #5901): Previously, the Dask interface had to make 2 data copies: one for concatenating the Dask partition/block into a single block and another for internal representation. To save memory, we introduce `DaskDeviceQuantileDMatrix`. As long as Dask partitions are resident in the GPU memory, `DaskDeviceQuantileDMatrix` is able to ingest them directly without making copies. This matrix type wraps `DeviceQuantileDMatrix`.
|
||||
* The prediction function now returns GPU Series type if the input is from Dask-cuDF (#5710). This is to preserve the input data type.
|
||||
|
||||
### Robust handling of external data types (#5689, #5893)
|
||||
- As we support more and more external data types, the handling logic has proliferated all over the code base and became hard to keep track. It also became unclear how missing values and threads are handled. We refactored the Python package code to collect all data handling logic to a central location, and now we have an explicit list of of all supported data types.
|
||||
|
||||
### Improvements in GPU-side data matrix (`DeviceQuantileDMatrix`)
|
||||
* The GPU-side data matrix now implements its own quantile sketching logic, so that data don't have to be transported back to the main memory (#5700, #5747, #5760, #5846, #5870, #5898). The GK sketching algorithm is also now better documented.
|
||||
- Now we can load extremely sparse dataset like URL, although performance is still sub-optimal.
|
||||
* The GPU-side data matrix now exposes an iterative interface (#5783), so that users are able to construct a matrix from a data iterator. See the [Python demo](https://github.com/dmlc/xgboost/blob/release_1.2.0/demo/guide-python/data_iterator.py).
|
||||
|
||||
### New language binding: Swift (#5728)
|
||||
* Visit https://github.com/kongzii/SwiftXGBoost for more details.
|
||||
|
||||
### Robust model serialization with JSON (#5772, #5804, #5831, #5857, #5934)
|
||||
* We continue efforts from the 1.0.0 release to adopt JSON as the format to save and load models robustly.
|
||||
* JSON model IO is significantly faster and produces smaller model files.
|
||||
* Round-trip reproducibility is guaranteed, via the introduction of an efficient float-to-string conversion algorithm known as [the Ryū algorithm](https://dl.acm.org/doi/10.1145/3192366.3192369). The conversion is locale-independent, producing consistent numeric representation regardless of the locale setting of the user's machine.
|
||||
* We fixed an issue in loading large JSON files to memory.
|
||||
* It is now possible to load a JSON file from a remote source such as S3.
|
||||
|
||||
### Performance improvements
|
||||
* CPU hist tree method optimization
|
||||
- Skip missing lookup in hist row partitioning if data is dense. (#5644)
|
||||
- Specialize training procedures for CPU hist tree method on distributed environment. (#5557)
|
||||
- Add single point histogram for CPU hist. Previously gradient histogram for CPU hist is hard coded to be 64 bit, now users can specify the parameter `single_precision_histogram` to use 32 bit histogram instead for faster training performance. (#5624, #5811)
|
||||
* GPU hist tree method optimization
|
||||
- Removed some unnecessary synchronizations and better memory allocation pattern. (#5707)
|
||||
- Optimize GPU Hist for wide dataset. Previously for wide dataset the atomic operation is performed on global memory, now it can run on shared memory for faster histogram building. But there's a known small regression on GeForce cards with dense data. (#5795, #5926, #5948, #5631)
|
||||
|
||||
### API additions
|
||||
* Support passing fmap to importance plot (#5719). Now importance plot can show actual names of features instead of default ones.
|
||||
* Support 64bit seed. (#5643)
|
||||
* A new C API `XGBoosterGetNumFeature` is added for getting number of features in booster (#5856).
|
||||
* Feature names and feature types are now stored in C++ core and saved in binary DMatrix (#5858).
|
||||
|
||||
### Breaking: The `predict()` method of `DaskXGBClassifier` now produces class predictions (#5986). Use `predict_proba()` to obtain probability predictions.
|
||||
* Previously, `DaskXGBClassifier.predict()` produced probability predictions. This is inconsistent with the behavior of other scikit-learn classifiers, where `predict()` returns class predictions. We make a breaking change in 1.2.0 release so that `DaskXGBClassifier.predict()` now correctly produces class predictions and thus behave like other scikit-learn classifiers. Furthermore, we introduce the `predict_proba()` method for obtaining probability predictions, again to be in line with other scikit-learn classifiers.
|
||||
|
||||
### Breaking: Custom evaluation metric now receives raw prediction (#5954)
|
||||
* Previously, the custom evaluation metric received a transformed prediction result when used with a classifier. Now the custom metric will receive a raw (untransformed) prediction and will need to transform the prediction itself. See [demo/guide-python/custom\_softmax.py](https://github.com/dmlc/xgboost/blob/release_1.2.0/demo/guide-python/custom_softmax.py) for an example.
|
||||
* This change is to make the custom metric behave consistently with the custom objective, which already receives raw prediction (#5564).
|
||||
|
||||
### Breaking: XGBoost4J-Spark now requires Spark 3.0 and Scala 2.12 (#5836, #5890)
|
||||
* Starting with version 3.0, Spark can manage GPU resources and allocate them among executors.
|
||||
* Spark 3.0 dropped support for Scala 2.11 and now only supports Scala 2.12. Thus, XGBoost4J-Spark also only supports Scala 2.12.
|
||||
|
||||
### Breaking: XGBoost Python package now requires Python 3.6 and later (#5715)
|
||||
* Python 3.6 has many useful features such as f-strings.
|
||||
|
||||
### Breaking: XGBoost now adopts the C++14 standard (#5664)
|
||||
* Make sure to use a sufficiently modern C++ compiler that supports C++14, such as Visual Studio 2017, GCC 5.0+, and Clang 3.4+.
|
||||
|
||||
### Bug-fixes
|
||||
* Fix a data race in the prediction function (#5853). As a byproduct, the prediction function now uses a thread-local data store and became thread-safe.
|
||||
* Restore capability to run prediction when the test input has fewer features than the training data (#5955). This capability is necessary to support predicting with LIBSVM inputs. The previous release (1.1) had broken this capability, so we restore it in this version with better tests.
|
||||
* Fix OpenMP build with CMake for R package, to support CMake 3.13 (#5895).
|
||||
* Fix Windows 2016 build (#5902, #5918).
|
||||
* Fix edge cases in scikit-learn interface with Pandas input by disabling feature validation. (#5953)
|
||||
* [R] Enable weighted learning to rank (#5945)
|
||||
* [R] Fix early stopping with custom objective (#5923)
|
||||
* Fix NDK Build (#5886)
|
||||
* Add missing explicit template specializations for greater portability (#5921)
|
||||
* Handle empty rows in data iterators correctly (#5929). This bug affects file loader and JVM data frames.
|
||||
* Fix `IsDense` (#5702)
|
||||
* [jvm-packages] Fix wrong method name `setAllowZeroForMissingValue` (#5740)
|
||||
* Fix shape inference for Dask predict (#5989)
|
||||
|
||||
### Usability Improvements, Documentation
|
||||
* [Doc] Document that CUDA 10.0 is required (#5872)
|
||||
* Refactored command line interface (CLI). Now CLI is able to handle user errors and output basic document. (#5574)
|
||||
* Better error handling in Python: use `raise from` syntax to preserve full stacktrace (#5787).
|
||||
* The JSON model dump now has a formal schema (#5660, #5818). The benefit is to prevent `dump_model()` function from breaking. See [this document](https://xgboost.readthedocs.io/en/latest/tutorials/saving_model.html#difference-between-saving-model-and-dumping-model) to understand the difference between saving and dumping models.
|
||||
* Add a reference to the GPU external memory paper (#5684)
|
||||
* Document more objective parameters in the R package (#5682)
|
||||
* Document the existence of pre-built binary wheels for MacOS (#5711)
|
||||
* Remove `max.depth` in the R gblinear example. (#5753)
|
||||
* Added conda environment file for building docs (#5773)
|
||||
* Mention dask blog post in the doc, which introduces using Dask with GPU and some internal workings. (#5789)
|
||||
* Fix rendering of Markdown docs (#5821)
|
||||
* Document new objectives and metrics available on GPUs (#5909)
|
||||
* Better message when no GPU is found. (#5594)
|
||||
* Remove the use of `silent` parameter from R demos. (#5675)
|
||||
* Don't use masked array in array interface. (#5730)
|
||||
* Update affiliation of @terrytangyuan: Ant Financial -> Ant Group (#5827)
|
||||
* Move dask tutorial closer other distributed tutorials (#5613)
|
||||
* Update XGBoost + Dask overview documentation (#5961)
|
||||
* Show `n_estimators` in the docstring of the scikit-learn interface (#6041)
|
||||
* Fix a type in a doctring of the scikit-learn interface (#5980)
|
||||
|
||||
### Maintenance: testing, continuous integration, build system
|
||||
* [CI] Remove CUDA 9.0 from CI (#5674, #5745)
|
||||
* Require CUDA 10.0+ in CMake build (#5718)
|
||||
* [R] Remove dependency on gendef for Visual Studio builds (fixes #5608) (#5764). This enables building XGBoost with GPU support with R 4.x.
|
||||
* [R-package] Reduce duplication in configure.ac (#5693)
|
||||
* Bump com.esotericsoftware to 4.0.2 (#5690)
|
||||
* Migrate some tests from AppVeyor to GitHub Actions to speed up the tests. (#5911, #5917, #5919, #5922, #5928)
|
||||
* Reduce cost of the Jenkins CI server (#5884, #5904, #5892). We now enforce a daily budget via an automated monitor. We also dramatically reduced the workload for the Windows platform, since the cloud VM cost is vastly greater for Windows.
|
||||
* [R] Set up automated R linter (#5944)
|
||||
* [R] replace uses of T and F with TRUE and FALSE (#5778)
|
||||
* Update Docker container 'CPU' (#5956)
|
||||
* Simplify CMake build with modern CMake techniques (#5871)
|
||||
* Use `hypothesis` package for testing (#5759, #5835, #5849).
|
||||
* Define `_CRT_SECURE_NO_WARNINGS` to remove unneeded warnings in MSVC (#5434)
|
||||
* Run all Python demos in CI, to ensure that they don't break (#5651)
|
||||
* Enhance nvtx support (#5636). Now we can use unified timer between CPU and GPU. Also CMake is able to find nvtx automatically.
|
||||
* Speed up python test. (#5752)
|
||||
* Add helper for generating batches of data. (#5756)
|
||||
* Add c-api-demo to .gitignore (#5855)
|
||||
* Add option to enable all compiler warnings in GCC/Clang (#5897)
|
||||
* Make Python model compatibility test runnable locally (#5941)
|
||||
* Add cupy to Windows CI (#5797)
|
||||
* [CI] Fix cuDF install; merge 'gpu' and 'cudf' test suite (#5814)
|
||||
* Update rabit submodule (#5680, #5876)
|
||||
* Force colored output for Ninja build. (#5959)
|
||||
* [CI] Assign larger /dev/shm to NCCL (#5966)
|
||||
* Add missing Pytest marks to AsyncIO unit test (#5968)
|
||||
* [CI] Use latest cuDF and dask-cudf (#6048)
|
||||
* Add CMake flag to log C API invocations, to aid debugging (#5925)
|
||||
* Fix a unit test on CLI, to handle RC versions (#6050)
|
||||
* [CI] Use mgpu machine to run gpu hist unit tests (#6050)
|
||||
* [CI] Build GPU-enabled JAR artifact and deploy to xgboost-maven-repo (#6050)
|
||||
|
||||
### Maintenance: Refactor code for legibility and maintainability
|
||||
* Remove dead code in DMatrix initialization. (#5635)
|
||||
* Catch dmlc error by ref. (#5678)
|
||||
* Refactor the `gpu_hist` split evaluation in preparation for batched nodes enumeration. (#5610)
|
||||
* Remove column major specialization. (#5755)
|
||||
* Remove unused imports in Python (#5776)
|
||||
* Avoid including `c_api.h` in header files. (#5782)
|
||||
* Remove unweighted GK quantile, which is unused. (#5816)
|
||||
* Add Python binding for rabit ops. (#5743)
|
||||
* Implement `Empty` method for host device vector. (#5781)
|
||||
* Remove print (#5867)
|
||||
* Enforce tree order in JSON (#5974)
|
||||
|
||||
### Acknowledgement
|
||||
**Contributors**: Nan Zhu (@CodingCat), @LionOrCatThatIsTheQuestion, Dmitry Mottl (@Mottl), Rory Mitchell (@RAMitchell), @ShvetsKS, Alex Wozniakowski (@a-wozniakowski), Alexander Gugel (@alexanderGugel), @anttisaukko, @boxdot, Andy Adinets (@canonizer), Ram Rachum (@cool-RR), Elliot Hershberg (@elliothershberg), Jason E. Aten, Ph.D. (@glycerine), Philip Hyunsu Cho (@hcho3), @jameskrach, James Lamb (@jameslamb), James Bourbeau (@jrbourbeau), Peter Jung (@kongzii), Lorenz Walthert (@lorenzwalthert), Oleksandr Kuvshynov (@okuvshynov), Rong Ou (@rongou), Shaochen Shi (@shishaochen), Yuan Tang (@terrytangyuan), Jiaming Yuan (@trivialfis), Bobby Wang (@wbo4958), Zhang Zhang (@zhangzhang10)
|
||||
|
||||
**Reviewers**: Nan Zhu (@CodingCat), @LionOrCatThatIsTheQuestion, Hao Yang (@QuantHao), Rory Mitchell (@RAMitchell), @ShvetsKS, Egor Smirnov (@SmirnovEgorRu), Alex Wozniakowski (@a-wozniakowski), Amit Kumar (@aktech), Avinash Barnwal (@avinashbarnwal), @boxdot, Andy Adinets (@canonizer), Chandra Shekhar Reddy (@chandrureddy), Ram Rachum (@cool-RR), Cristiano Goncalves (@cristianogoncalves), Elliot Hershberg (@elliothershberg), Jason E. Aten, Ph.D. (@glycerine), Philip Hyunsu Cho (@hcho3), Tong He (@hetong007), James Lamb (@jameslamb), James Bourbeau (@jrbourbeau), Lee Drake (@leedrake5), DougM (@mengdong), Oleksandr Kuvshynov (@okuvshynov), RongOu (@rongou), Shaochen Shi (@shishaochen), Xu Xiao (@sperlingxx), Yuan Tang (@terrytangyuan), Theodore Vasiloudis (@thvasilo), Jiaming Yuan (@trivialfis), Bobby Wang (@wbo4958), Zhang Zhang (@zhangzhang10)
|
||||
|
||||
## v1.1.1 (2020.06.06)
|
||||
This patch release applies the following patches to 1.1.0 release:
|
||||
|
||||
* CPU performance improvement in the PyPI wheels (#5720)
|
||||
* Fix loading old model (#5724)
|
||||
* Install pkg-config file (#5744)
|
||||
|
||||
## v1.1.0 (2020.05.17)
|
||||
|
||||
### Better performance on multi-core CPUs (#5244, #5334, #5522)
|
||||
* Poor performance scaling of the `hist` algorithm for multi-core CPUs has been under investigation (#3810). #5244 concludes the ongoing effort to improve performance scaling on multi-CPUs, in particular Intel CPUs. Roadmap: #5104
|
||||
* #5334 makes steps toward reducing memory consumption for the `hist` tree method on CPU.
|
||||
* #5522 optimizes random number generation for data sampling.
|
||||
|
||||
### Deterministic GPU algorithm for regression and classification (#5361)
|
||||
* GPU algorithm for regression and classification tasks is now deterministic.
|
||||
* Roadmap: #5023. Currently only single-GPU training is deterministic. Distributed training with multiple GPUs is not yet deterministic.
|
||||
|
||||
### Improve external memory support on GPUs (#5093, #5365)
|
||||
* Starting from 1.0.0 release, we added support for external memory on GPUs to enable training with larger datasets. Gradient-based sampling (#5093) speeds up the external memory algorithm by intelligently sampling a subset of the training data to copy into the GPU memory. [Learn more about out-of-core GPU gradient boosting.](https://arxiv.org/abs/2005.09148)
|
||||
* GPU-side data sketching now works with data from external memory (#5365).
|
||||
|
||||
### Parameter validation: detection of unused or incorrect parameters (#5477, #5569, #5508)
|
||||
* Mis-spelled training parameter is a common user mistake. In previous versions of XGBoost, mis-spelled parameters were silently ignored. Starting with 1.0.0 release, XGBoost will produce a warning message if there is any unused training parameters. The 1.1.0 release makes parameter validation available to the scikit-learn interface (#5477) and the R binding (#5569).
|
||||
|
||||
### Thread-safe, in-place prediction method (#5389, #5512)
|
||||
* Previously, the prediction method was not thread-safe (#5339). This release adds a new API function `inplace_predict()` that is thread-safe. It is now possible to serve concurrent requests for prediction using a shared model object.
|
||||
* It is now possible to compute prediction in-place for selected data formats (`numpy.ndarray` / `scipy.sparse.csr_matrix` / `cupy.ndarray` / `cudf.DataFrame` / `pd.DataFrame`) without creating a `DMatrix` object.
|
||||
|
||||
### Addition of Accelerated Failure Time objective for survival analysis (#4763, #5473, #5486, #5552, #5553)
|
||||
* Survival analysis (regression) models the time it takes for an event of interest to occur. The target label is potentially censored, i.e. the label is a range rather than a single number. We added a new objective `survival:aft` to support survival analysis. Also added is the new API to specify the ranged labels. Check out [the tutorial](https://xgboost.readthedocs.io/en/release_1.1.0/tutorials/aft_survival_analysis.html) and the [demos](https://github.com/dmlc/xgboost/tree/release_1.1.0/demo/aft_survival).
|
||||
* GPU support is work in progress (#5714).
|
||||
|
||||
### Improved installation experience on Mac OSX (#5597, #5602, #5606, #5701)
|
||||
* It only takes two commands to install the XGBoost Python package: `brew install libomp` followed by `pip install xgboost`. The installed XGBoost will use all CPU cores. Even better, starting with this release, we distribute pre-compiled binary wheels targeting Mac OSX. Now the install command `pip install xgboost` finishes instantly, as it no longer compiles the C++ source of XGBoost. The last three Mac versions (High Sierra, Mojave, Catalina) are supported.
|
||||
* R package: the 1.1.0 release fixes the error `Initializing libomp.dylib, but found libomp.dylib already initialized` (#5701)
|
||||
|
||||
### Ranking metrics are now accelerated on GPUs (#5380, #5387, #5398)
|
||||
|
||||
### GPU-side data matrix to ingest data directly from other GPU libraries (#5420, #5465)
|
||||
* Previously, data on GPU memory had to be copied back to the main memory before it could be used by XGBoost. Starting with 1.1.0 release, XGBoost provides a dedicated interface (`DeviceQuantileDMatrix`) so that it can ingest data from GPU memory directly. The result is that XGBoost interoperates better with GPU-accelerated data science libraries, such as cuDF, cuPy, and PyTorch.
|
||||
* Set device in device dmatrix. (#5596)
|
||||
|
||||
### Robust model serialization with JSON (#5123, #5217)
|
||||
* We continue efforts from the 1.0.0 release to adopt JSON as the format to save and load models robustly. Refer to the release note for 1.0.0 to learn more.
|
||||
* It is now possible to store internal configuration of the trained model (`Booster`) object in R as a JSON string (#5123, #5217).
|
||||
|
||||
### Improved integration with Dask
|
||||
* Pass through `verbose` parameter for dask fit (#5413)
|
||||
* Use `DMLC_TASK_ID`. (#5415)
|
||||
* Order the prediction result. (#5416)
|
||||
* Honor `nthreads` from dask worker. (#5414)
|
||||
* Enable grid searching with scikit-learn. (#5417)
|
||||
* Check non-equal when setting threads. (#5421)
|
||||
* Accept other inputs for prediction. (#5428)
|
||||
* Fix missing value for scikit-learn interface. (#5435)
|
||||
|
||||
### XGBoost4J-Spark: Check number of columns in the data iterator (#5202, #5303)
|
||||
* Before, the native layer in XGBoost did not know the number of columns (features) ahead of time and had to guess the number of columns by counting the feature index when ingesting data. This method has a failure more in distributed setting: if the training data is highly sparse, some features may be completely missing in one or more worker partitions. Thus, one or more workers may deduce an incorrect data shape, leading to crashes or silently wrong models.
|
||||
* Enforce correct data shape by passing the number of columns explicitly from the JVM layer into the native layer.
|
||||
|
||||
### Major refactoring of the `DMatrix` class
|
||||
* Continued from 1.0.0 release.
|
||||
* Remove update prediction cache from predictors. (#5312)
|
||||
* Predict on Ellpack. (#5327)
|
||||
* Partial rewrite EllpackPage (#5352)
|
||||
* Use ellpack for prediction only when sparsepage doesn't exist. (#5504)
|
||||
* RFC: #4354, Roadmap: #5143
|
||||
|
||||
### Breaking: XGBoost Python package now requires Pip 19.0 and higher (#5589)
|
||||
* Your Linux machine may have an old version of Pip and may attempt to install a source package, leading to long installation time. This is because we are now using `manylinux2010` tag in the binary wheel release. Ensure you have Pip 19.0 or newer by running `python3 -m pip -V` to check the version. Upgrade Pip with command
|
||||
```
|
||||
python3 -m pip install --upgrade pip
|
||||
```
|
||||
Upgrading to latest pip allows us to depend on newer versions of system libraries. [TensorFlow](https://www.tensorflow.org/install/pip) also requires Pip 19.0+.
|
||||
|
||||
### Breaking: GPU algorithm now requires CUDA 10.0 and higher (#5649)
|
||||
* CUDA 10.0 is necessary to make the GPU algorithm deterministic (#5361).
|
||||
|
||||
### Breaking: `silent` parameter is now removed (#5476)
|
||||
* Please use `verbosity` instead.
|
||||
|
||||
### Breaking: Set `output_margin` to True for custom objectives (#5564)
|
||||
* Now both R and Python interface custom objectives get un-transformed (raw) prediction outputs.
|
||||
|
||||
### Breaking: `Makefile` is now removed. We use CMake exclusively to build XGBoost (#5513)
|
||||
* Exception: the R package uses Autotools, as the CRAN ecosystem did not yet adopt CMake widely.
|
||||
|
||||
### Breaking: `distcol` updater is now removed (#5507)
|
||||
* The `distcol` updater has been long broken, and currently we lack resources to implement a working implementation from scratch.
|
||||
|
||||
### Deprecation notices
|
||||
* **Python 3.5**. This release is the last release to support Python 3.5. The following release (1.2.0) will require Python 3.6.
|
||||
* **Scala 2.11**. Currently XGBoost4J supports Scala 2.11. However, if a future release of XGBoost adopts Spark 3, it will not support Scala 2.11, as Spark 3 requires Scala 2.12+. We do not yet know which XGBoost release will adopt Spark 3.
|
||||
|
||||
### Known limitations
|
||||
* (Python package) When early stopping is activated with `early_stopping_rounds` at training time, the prediction method (`xgb.predict()`) behaves in a surprising way. If XGBoost runs for M rounds and chooses iteration N (N < M) as the best iteration, then the prediction method will use M trees by default. To use the best iteration (N trees), users will need to manually take the best iteration field `bst.best_iteration` and pass it as the `ntree_limit` argument to `xgb.predict()`. See #5209 and #4052 for additional context.
|
||||
* GPU ranking objective is currently not deterministic (#5561).
|
||||
* When training parameter `reg_lambda` is set to zero, some leaf nodes may be assigned a NaN value. (See [discussion](https://discuss.xgboost.ai/t/still-getting-unexplained-nans-new-replication-code/1383/9).) For now, please set `reg_lambda` to a nonzero value.
|
||||
|
||||
### Community and Governance
|
||||
* The XGBoost Project Management Committee (PMC) is pleased to announce a new committer: Egor Smirnov (@SmirnovEgorRu). He has led a major initiative to improve the performance of XGBoost on multi-core CPUs.
|
||||
|
||||
### Bug-fixes
|
||||
* Improved compatibility with scikit-learn (#5255, #5505, #5538)
|
||||
* Remove f-string, since it's not supported by Python 3.5 (#5330). Note that Python 3.5 support is deprecated and schedule to be dropped in the upcoming release (1.2.0).
|
||||
* Fix the pruner so that it doesn't prune the same branch twice (#5335)
|
||||
* Enforce only major version in JSON model schema (#5336). Any major revision of the model schema would bump up the major version.
|
||||
* Fix a small typo in sklearn.py that broke multiple eval metrics (#5341)
|
||||
* Restore loading model from a memory buffer (#5360)
|
||||
* Define lazy isinstance for Python compat (#5364)
|
||||
* [R] fixed uses of `class()` (#5426)
|
||||
* Force compressed buffer to be 4 bytes aligned, to keep cuda-memcheck happy (#5441)
|
||||
* Remove warning for calling host function (`std::max`) on a GPU device (#5453)
|
||||
* Fix uninitialized value bug in xgboost callback (#5463)
|
||||
* Fix model dump in CLI (#5485)
|
||||
* Fix out-of-bound array access in `WQSummary::SetPrune()` (#5493)
|
||||
* Ensure that configured `dmlc/build_config.h` is picked up by Rabit and XGBoost, to fix build on Alpine (#5514)
|
||||
* Fix a misspelled method, made in a git merge (#5509)
|
||||
* Fix a bug in binary model serialization (#5532)
|
||||
* Fix CLI model IO (#5535)
|
||||
* Don't use `uint` for threads (#5542)
|
||||
* Fix R interaction constraints to handle more than 100000 features (#5543)
|
||||
* [jvm-packages] XGBoost Spark should deal with NaN when parsing evaluation output (#5546)
|
||||
* GPU-side data sketching is now aware of query groups in learning-to-rank data (#5551)
|
||||
* Fix DMatrix slicing for newly added fields (#5552)
|
||||
* Fix configuration status with loading binary model (#5562)
|
||||
* Fix build when OpenMP is disabled (#5566)
|
||||
* R compatibility patches (#5577, #5600)
|
||||
* gpu\_hist performance fixes (#5558)
|
||||
* Don't set seed on CLI interface (#5563)
|
||||
* [R] When serializing model, preserve model attributes related to early stopping (#5573)
|
||||
* Avoid rabit calls in learner configuration (#5581)
|
||||
* Hide C++ symbols in libxgboost.so when building Python wheel (#5590). This fixes apache/incubator-tvm#4953.
|
||||
* Fix compilation on Mac OSX High Sierra (10.13) (#5597)
|
||||
* Fix build on big endian CPUs (#5617)
|
||||
* Resolve crash due to use of `vector<bool>::iterator` (#5642)
|
||||
* Validation JSON model dump using JSON schema (#5660)
|
||||
|
||||
### Performance improvements
|
||||
* Wide dataset quantile performance improvement (#5306)
|
||||
* Reduce memory usage of GPU-side data sketching (#5407)
|
||||
* Reduce span check overhead (#5464)
|
||||
* Serialise booster after training to free up GPU memory (#5484)
|
||||
* Use the maximum amount of GPU shared memory available to speed up the histogram kernel (#5491)
|
||||
* Use non-synchronising scan in Thrust (#5560)
|
||||
* Use `cudaDeviceGetAttribute()` instead of `cudaGetDeviceProperties()` for speed (#5570)
|
||||
|
||||
### API changes
|
||||
* Support importing data from a Pandas SparseArray (#5431)
|
||||
* `HostDeviceVector` (vector shared between CPU and GPU memory) now exposes `HostSpan` interface, to enable access on the CPU side with bound check (#5459)
|
||||
* Accept other gradient types for `SplitEntry` (#5467)
|
||||
|
||||
### Usability Improvements, Documentation
|
||||
* Add `JVM_CHECK_CALL` to prevent C++ exceptions from leaking into the JVM layer (#5199)
|
||||
* Updated Windows build docs (#5283)
|
||||
* Update affiliation of @hcho3 (#5292)
|
||||
* Display Sponsor button, link to OpenCollective (#5325)
|
||||
* Update docs for GPU external memory (#5332)
|
||||
* Add link to GPU documentation (#5437)
|
||||
* Small updates to GPU documentation (#5483)
|
||||
* Edits on tutorial for XGBoost job on Kubernetes (#5487)
|
||||
* Add reference to GPU external memory (#5490)
|
||||
* Fix typos (#5346, #5371, #5384, #5399, #5482, #5515)
|
||||
* Update Python doc (#5517)
|
||||
* Add Neptune and Optuna to list of examples (#5528)
|
||||
* Raise error if the number of data weights doesn't match the number of data sets (#5540)
|
||||
* Add a note about GPU ranking (#5572)
|
||||
* Clarify meaning of `training` parameter in the C API function `XGBoosterPredict()` (#5604)
|
||||
* Better error handling for situations where existing trees cannot be modified (#5406, #5418). This feature is enabled when `process_type` is set to `update`.
|
||||
|
||||
### Maintenance: testing, continuous integration, build system
|
||||
* Add C++ test coverage for data sketching (#5251)
|
||||
* Ignore gdb\_history (#5257)
|
||||
* Rewrite setup.py. (#5271, #5280)
|
||||
* Use `scikit-learn` in extra dependencies (#5310)
|
||||
* Add CMake option to build static library (#5397)
|
||||
* [R] changed FindLibR to take advantage of CMake cache (#5427)
|
||||
* [R] fixed inconsistency in R -e calls in FindLibR.cmake (#5438)
|
||||
* Refactor tests with data generator (#5439)
|
||||
* Resolve failing Travis CI (#5445)
|
||||
* Update dmlc-core. (#5466)
|
||||
* [CI] Use clang-tidy 10 (#5469)
|
||||
* De-duplicate code for checking maximum number of nodes (#5497)
|
||||
* [CI] Use Ubuntu 18.04 LTS in JVM CI, because 19.04 is EOL (#5537)
|
||||
* [jvm-packages] [CI] Create a Maven repository to host SNAPSHOT JARs (#5533)
|
||||
* [jvm-packages] [CI] Publish XGBoost4J JARs with Scala 2.11 and 2.12 (#5539)
|
||||
* [CI] Use Vault repository to re-gain access to devtoolset-4 (#5589)
|
||||
|
||||
### Maintenance: Refactor code for legibility and maintainability
|
||||
* Move prediction cache to Learner (#5220, #5302)
|
||||
* Remove SimpleCSRSource (#5315)
|
||||
* Refactor SparsePageSource, delete cache files after use (#5321)
|
||||
* Remove unnecessary DMatrix methods (#5324)
|
||||
* Split up `LearnerImpl` (#5350)
|
||||
* Move segment sorter to common (#5378)
|
||||
* Move thread local entry into Learner (#5396)
|
||||
* Split up test helpers header (#5455)
|
||||
* Requires setting leaf stat when expanding tree (#5501)
|
||||
* Purge device\_helpers.cuh (#5534)
|
||||
* Use thrust functions instead of custom functions (#5544)
|
||||
|
||||
### Acknowledgement
|
||||
**Contributors**: Nan Zhu (@CodingCat), Rory Mitchell (@RAMitchell), @ShvetsKS, Egor Smirnov (@SmirnovEgorRu), Andrew Kane (@ankane), Avinash Barnwal (@avinashbarnwal), Bart Broere (@bartbroere), Andy Adinets (@canonizer), Chen Qin (@chenqin), Daiki Katsuragawa (@daikikatsuragawa), David Díaz Vico (@daviddiazvico), Darius Kharazi (@dkharazi), Darby Payne (@dpayne), Jason E. Aten, Ph.D. (@glycerine), Philip Hyunsu Cho (@hcho3), James Lamb (@jameslamb), Jan Borchmann (@jborchma), Kamil A. Kaczmarek (@kamil-kaczmarek), Melissa Kohl (@mjkohl32), Nicolas Scozzaro (@nscozzaro), Paul Kaefer (@paulkaefer), Rong Ou (@rongou), Samrat Pandiri (@samratp), Sriram Chandramouli (@sriramch), Yuan Tang (@terrytangyuan), Jiaming Yuan (@trivialfis), Liang-Chi Hsieh (@viirya), Bobby Wang (@wbo4958), Zhang Zhang (@zhangzhang10),
|
||||
|
||||
**Reviewers**: Nan Zhu (@CodingCat), @LeZhengThu, Rory Mitchell (@RAMitchell), @ShvetsKS, Egor Smirnov (@SmirnovEgorRu), Steve Bronder (@SteveBronder), Nikita Titov (@StrikerRUS), Andrew Kane (@ankane), Avinash Barnwal (@avinashbarnwal), @brydag, Andy Adinets (@canonizer), Chandra Shekhar Reddy (@chandrureddy), Chen Qin (@chenqin), Codecov (@codecov-io), David Díaz Vico (@daviddiazvico), Darby Payne (@dpayne), Jason E. Aten, Ph.D. (@glycerine), Philip Hyunsu Cho (@hcho3), James Lamb (@jameslamb), @johnny-cat, Mu Li (@mli), Mate Soos (@msoos), @rnyak, Rong Ou (@rongou), Sriram Chandramouli (@sriramch), Toby Dylan Hocking (@tdhock), Yuan Tang (@terrytangyuan), Oleksandr Pryimak (@trams), Jiaming Yuan (@trivialfis), Liang-Chi Hsieh (@viirya), Bobby Wang (@wbo4958),
|
||||
|
||||
## v1.0.2 (2020.03.03)
|
||||
This patch release applies the following patches to 1.0.0 release:
|
||||
|
||||
* Fix a small typo in sklearn.py that broke multiple eval metrics (#5341)
|
||||
* Restore loading model from buffer (#5360)
|
||||
* Use type name for data type check (#5364)
|
||||
|
||||
## v1.0.1 (2020.02.21)
|
||||
This release is identical to the 1.0.0 release, except that it fixes a small bug that rendered 1.0.0 incompatible with Python 3.5. See #5328.
|
||||
|
||||
## v1.0.0 (2020.02.19)
|
||||
This release marks a major milestone for the XGBoost project.
|
||||
|
||||
@@ -240,7 +1114,7 @@ This release marks a major milestone for the XGBoost project.
|
||||
* Specify version macro in CMake. (#4730)
|
||||
* Include dmlc-tracker into XGBoost Python package (#4731)
|
||||
* [CI] Use long key ID for Ubuntu repository fingerprints. (#4783)
|
||||
* Remove plugin, cuda related code in automake & autoconf files (#4789)
|
||||
* Remove plugin, CUDA related code in automake & autoconf files (#4789)
|
||||
* Skip related tests when scikit-learn is not installed. (#4791)
|
||||
* Ignore vscode and clion files (#4866)
|
||||
* Use bundled Google Test by default (#4900)
|
||||
@@ -271,7 +1145,7 @@ This release marks a major milestone for the XGBoost project.
|
||||
### Usability Improvements, Documentation
|
||||
* Add Random Forest API to Python API doc (#4500)
|
||||
* Fix Python demo and doc. (#4545)
|
||||
* Remove doc about not supporting cuda 10.1 (#4578)
|
||||
* Remove doc about not supporting CUDA 10.1 (#4578)
|
||||
* Address some sphinx warnings and errors, add doc for building doc. (#4589)
|
||||
* Add instruction to run formatting checks locally (#4591)
|
||||
* Fix docstring for `XGBModel.predict()` (#4592)
|
||||
@@ -286,7 +1160,7 @@ This release marks a major milestone for the XGBoost project.
|
||||
* Update XGBoost4J-Spark doc (#4804)
|
||||
* Regular formatting for evaluation metrics (#4803)
|
||||
* [jvm-packages] Refine documentation for handling missing values in XGBoost4J-Spark (#4805)
|
||||
* Monitor for distributed envorinment (#4829). This is useful for identifying performance bottleneck.
|
||||
* Monitor for distributed environment (#4829). This is useful for identifying performance bottleneck.
|
||||
* Add check for length of weights and produce a good error message (#4872)
|
||||
* Fix DMatrix doc (#4884)
|
||||
* Export C++ headers in CMake installation (#4897)
|
||||
@@ -758,7 +1632,7 @@ This release is packed with many new features and bug fixes.
|
||||
### Known issues
|
||||
* Quantile sketcher fails to produce any quantile for some edge cases (#2943)
|
||||
* The `hist` algorithm leaks memory when used with learning rate decay callback (#3579)
|
||||
* Using custom evaluation funciton together with early stopping causes assertion failure in XGBoost4J-Spark (#3595)
|
||||
* Using custom evaluation function together with early stopping causes assertion failure in XGBoost4J-Spark (#3595)
|
||||
* Early stopping doesn't work with `gblinear` learner (#3789)
|
||||
* Label and weight vectors are not reshared upon the change in number of GPUs (#3794). To get around this issue, delete the `DMatrix` object and re-load.
|
||||
* The `DMatrix` Python objects are initialized with incorrect values when given array slices (#3841)
|
||||
@@ -852,7 +1726,7 @@ This version is only applicable for the Python package. The content is identical
|
||||
- Add scripts to cross-build and deploy artifacts (#3276, #3307)
|
||||
- Fix a compilation error for Scala 2.10 (#3332)
|
||||
* BREAKING CHANGES
|
||||
- `XGBClassifier.predict_proba()` no longer accepts paramter `output_margin`. The paramater makes no sense for `predict_proba()` because the method is to predict class probabilities, not raw margin scores.
|
||||
- `XGBClassifier.predict_proba()` no longer accepts parameter `output_margin`. The parameter makes no sense for `predict_proba()` because the method is to predict class probabilities, not raw margin scores.
|
||||
|
||||
## v0.71 (2018.04.11)
|
||||
* This is a minor release, mainly motivated by issues concerning `pip install`, e.g. #2426, #3189, #3118, and #3194.
|
||||
@@ -868,7 +1742,7 @@ This version is only applicable for the Python package. The content is identical
|
||||
- AUC-PR metric for ranking task (#3172)
|
||||
- Monotonic constraints for 'hist' algorithm (#3085)
|
||||
* GPU support
|
||||
- Create an abtract 1D vector class that moves data seamlessly between the main and GPU memory (#2935, #3116, #3068). This eliminates unnecessary PCIe data transfer during training time.
|
||||
- Create an abstract 1D vector class that moves data seamlessly between the main and GPU memory (#2935, #3116, #3068). This eliminates unnecessary PCIe data transfer during training time.
|
||||
- Fix minor bugs (#3051, #3217)
|
||||
- Fix compatibility error for CUDA 9.1 (#3218)
|
||||
* Python package:
|
||||
@@ -896,7 +1770,7 @@ This version is only applicable for the Python package. The content is identical
|
||||
* Refactored gbm to allow more friendly cache strategy
|
||||
- Specialized some prediction routine
|
||||
* Robust `DMatrix` construction from a sparse matrix
|
||||
* Faster consturction of `DMatrix` from 2D NumPy matrices: elide copies, use of multiple threads
|
||||
* Faster construction of `DMatrix` from 2D NumPy matrices: elide copies, use of multiple threads
|
||||
* Automatically remove nan from input data when it is sparse.
|
||||
- This can solve some of user reported problem of istart != hist.size
|
||||
* Fix the single-instance prediction function to obtain correct predictions
|
||||
@@ -924,7 +1798,7 @@ This version is only applicable for the Python package. The content is identical
|
||||
- Faster, histogram-based tree algorithm (`tree_method='hist'`) .
|
||||
- GPU/CUDA accelerated tree algorithms (`tree_method='gpu_hist'` or `'gpu_exact'`), including the GPU-based predictor.
|
||||
- Monotonic constraints: when other features are fixed, force the prediction to be monotonic increasing with respect to a certain specified feature.
|
||||
- Faster gradient caculation using AVX SIMD
|
||||
- Faster gradient calculation using AVX SIMD
|
||||
- Ability to export models in JSON format
|
||||
- Support for Tweedie regression
|
||||
- Additional dropout options for DART: binomial+1, epsilon
|
||||
|
||||
@@ -6,8 +6,11 @@ file(GLOB_RECURSE R_SOURCES
|
||||
${CMAKE_CURRENT_LIST_DIR}/src/*.c)
|
||||
# Use object library to expose symbols
|
||||
add_library(xgboost-r OBJECT ${R_SOURCES})
|
||||
|
||||
set(R_DEFINITIONS
|
||||
if (ENABLE_ALL_WARNINGS)
|
||||
target_compile_options(xgboost-r PRIVATE -Wall -Wextra)
|
||||
endif (ENABLE_ALL_WARNINGS)
|
||||
target_compile_definitions(xgboost-r
|
||||
PUBLIC
|
||||
-DXGBOOST_STRICT_R_MODE=1
|
||||
-DXGBOOST_CUSTOMIZE_GLOBAL_PRNG=1
|
||||
-DDMLC_LOG_BEFORE_THROW=0
|
||||
@@ -15,24 +18,27 @@ set(R_DEFINITIONS
|
||||
-DDMLC_LOG_CUSTOMIZE=1
|
||||
-DRABIT_CUSTOMIZE_MSG_
|
||||
-DRABIT_STRICT_CXX98_)
|
||||
target_compile_definitions(xgboost-r
|
||||
PRIVATE ${R_DEFINITIONS})
|
||||
target_include_directories(xgboost-r
|
||||
PRIVATE
|
||||
${LIBR_INCLUDE_DIRS}
|
||||
${PROJECT_SOURCE_DIR}/include
|
||||
${PROJECT_SOURCE_DIR}/dmlc-core/include
|
||||
${PROJECT_SOURCE_DIR}/rabit/include)
|
||||
target_link_libraries(xgboost-r PUBLIC ${LIBR_CORE_LIBRARY})
|
||||
if (USE_OPENMP)
|
||||
find_package(OpenMP REQUIRED)
|
||||
target_link_libraries(xgboost-r PUBLIC OpenMP::OpenMP_CXX OpenMP::OpenMP_C)
|
||||
endif (USE_OPENMP)
|
||||
set_target_properties(
|
||||
xgboost-r PROPERTIES
|
||||
CXX_STANDARD 11
|
||||
CXX_STANDARD 14
|
||||
CXX_STANDARD_REQUIRED ON
|
||||
POSITION_INDEPENDENT_CODE ON)
|
||||
|
||||
set(XGBOOST_DEFINITIONS "${XGBOOST_DEFINITIONS};${R_DEFINITIONS}" PARENT_SCOPE)
|
||||
set(XGBOOST_OBJ_SOURCES $<TARGET_OBJECTS:xgboost-r> PARENT_SCOPE)
|
||||
set(LINKED_LIBRARIES_PRIVATE ${LINKED_LIBRARIES_PRIVATE} ${LIBR_CORE_LIBRARY} PARENT_SCOPE)
|
||||
# Get compilation and link flags of xgboost-r and propagate to objxgboost
|
||||
target_link_libraries(objxgboost PUBLIC xgboost-r)
|
||||
# Add all objects of xgboost-r to objxgboost
|
||||
target_sources(objxgboost INTERFACE $<TARGET_OBJECTS:xgboost-r>)
|
||||
|
||||
if (USE_OPENMP)
|
||||
target_link_libraries(xgboost-r PRIVATE OpenMP::OpenMP_CXX)
|
||||
endif ()
|
||||
set(LIBR_HOME "${LIBR_HOME}" PARENT_SCOPE)
|
||||
set(LIBR_EXECUTABLE "${LIBR_EXECUTABLE}" PARENT_SCOPE)
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
Package: xgboost
|
||||
Type: Package
|
||||
Title: Extreme Gradient Boosting
|
||||
Version: 1.1.0.1
|
||||
Date: 2020-02-21
|
||||
Version: 1.5.1.1
|
||||
Date: 2021-10-13
|
||||
Authors@R: c(
|
||||
person("Tianqi", "Chen", role = c("aut"),
|
||||
email = "tianqi.tchen@gmail.com"),
|
||||
@@ -53,15 +53,15 @@ Suggests:
|
||||
testthat,
|
||||
lintr,
|
||||
igraph (>= 1.0.1),
|
||||
jsonlite,
|
||||
float
|
||||
float,
|
||||
crayon,
|
||||
titanic
|
||||
Depends:
|
||||
R (>= 3.3.0)
|
||||
Imports:
|
||||
Matrix (>= 1.1-0),
|
||||
methods,
|
||||
data.table (>= 1.9.6),
|
||||
magrittr (>= 1.5),
|
||||
stringi (>= 0.5.2)
|
||||
RoxygenNote: 7.1.0
|
||||
SystemRequirements: GNU make, C++11
|
||||
jsonlite (>= 1.0),
|
||||
RoxygenNote: 7.1.1
|
||||
SystemRequirements: GNU make, C++14
|
||||
|
||||
@@ -36,8 +36,10 @@ export(xgb.create.features)
|
||||
export(xgb.cv)
|
||||
export(xgb.dump)
|
||||
export(xgb.gblinear.history)
|
||||
export(xgb.get.config)
|
||||
export(xgb.ggplot.deepness)
|
||||
export(xgb.ggplot.importance)
|
||||
export(xgb.ggplot.shap.summary)
|
||||
export(xgb.importance)
|
||||
export(xgb.load)
|
||||
export(xgb.load.raw)
|
||||
@@ -46,10 +48,12 @@ export(xgb.plot.deepness)
|
||||
export(xgb.plot.importance)
|
||||
export(xgb.plot.multi.trees)
|
||||
export(xgb.plot.shap)
|
||||
export(xgb.plot.shap.summary)
|
||||
export(xgb.plot.tree)
|
||||
export(xgb.save)
|
||||
export(xgb.save.raw)
|
||||
export(xgb.serialize)
|
||||
export(xgb.set.config)
|
||||
export(xgb.train)
|
||||
export(xgb.unserialize)
|
||||
export(xgboost)
|
||||
@@ -76,14 +80,10 @@ importFrom(graphics,lines)
|
||||
importFrom(graphics,par)
|
||||
importFrom(graphics,points)
|
||||
importFrom(graphics,title)
|
||||
importFrom(magrittr,"%>%")
|
||||
importFrom(jsonlite,fromJSON)
|
||||
importFrom(jsonlite,toJSON)
|
||||
importFrom(stats,median)
|
||||
importFrom(stats,predict)
|
||||
importFrom(stringi,stri_detect_regex)
|
||||
importFrom(stringi,stri_match_first_regex)
|
||||
importFrom(stringi,stri_replace_all_regex)
|
||||
importFrom(stringi,stri_replace_first_regex)
|
||||
importFrom(stringi,stri_split_regex)
|
||||
importFrom(utils,head)
|
||||
importFrom(utils,object.size)
|
||||
importFrom(utils,str)
|
||||
|
||||
@@ -188,7 +188,7 @@ cb.reset.parameters <- function(new_params) {
|
||||
pnames <- gsub("\\.", "_", names(new_params))
|
||||
nrounds <- NULL
|
||||
|
||||
# run some checks in the begining
|
||||
# run some checks in the beginning
|
||||
init <- function(env) {
|
||||
nrounds <<- env$end_iteration - env$begin_iteration + 1
|
||||
|
||||
@@ -263,10 +263,7 @@ cb.reset.parameters <- function(new_params) {
|
||||
#' \itemize{
|
||||
#' \item \code{best_score} the evaluation score at the best iteration
|
||||
#' \item \code{best_iteration} at which boosting iteration the best score has occurred (1-based index)
|
||||
#' \item \code{best_ntreelimit} to use with the \code{ntreelimit} parameter in \code{predict}.
|
||||
#' It differs from \code{best_iteration} in multiclass or random forest settings.
|
||||
#' }
|
||||
#'
|
||||
#' The Same values are also stored as xgb-attributes:
|
||||
#' \itemize{
|
||||
#' \item \code{best_iteration} is stored as a 0-based iteration index (for interoperability of binary models)
|
||||
@@ -351,13 +348,19 @@ cb.early.stop <- function(stopping_rounds, maximize = FALSE,
|
||||
|
||||
finalizer <- function(env) {
|
||||
if (!is.null(env$bst)) {
|
||||
attr_best_score = as.numeric(xgb.attr(env$bst$handle, 'best_score'))
|
||||
if (best_score != attr_best_score)
|
||||
attr_best_score <- as.numeric(xgb.attr(env$bst$handle, 'best_score'))
|
||||
if (best_score != attr_best_score) {
|
||||
# If the difference is too big, throw an error
|
||||
if (abs(best_score - attr_best_score) >= 1e-14) {
|
||||
stop("Inconsistent 'best_score' values between the closure state: ", best_score,
|
||||
" and the xgb.attr: ", attr_best_score)
|
||||
env$bst$best_iteration = best_iteration
|
||||
env$bst$best_ntreelimit = best_ntreelimit
|
||||
env$bst$best_score = best_score
|
||||
}
|
||||
# If the difference is due to floating-point truncation, update best_score
|
||||
best_score <- attr_best_score
|
||||
}
|
||||
env$bst$best_iteration <- best_iteration
|
||||
env$bst$best_ntreelimit <- best_ntreelimit
|
||||
env$bst$best_score <- best_score
|
||||
} else {
|
||||
env$basket$best_iteration <- best_iteration
|
||||
env$basket$best_ntreelimit <- best_ntreelimit
|
||||
@@ -372,7 +375,7 @@ cb.early.stop <- function(stopping_rounds, maximize = FALSE,
|
||||
return(finalizer(env))
|
||||
|
||||
i <- env$iteration
|
||||
score = env$bst_evaluation[metric_idx]
|
||||
score <- env$bst_evaluation[metric_idx]
|
||||
|
||||
if ((maximize && score > best_score) ||
|
||||
(!maximize && score < best_score)) {
|
||||
@@ -492,13 +495,12 @@ cb.cv.predict <- function(save_models = FALSE) {
|
||||
rep(NA_real_, N)
|
||||
}
|
||||
|
||||
ntreelimit <- NVL(env$basket$best_ntreelimit,
|
||||
env$end_iteration * env$num_parallel_tree)
|
||||
iterationrange <- c(1, NVL(env$basket$best_iteration, env$end_iteration) + 1)
|
||||
if (NVL(env$params[['booster']], '') == 'gblinear') {
|
||||
ntreelimit <- 0 # must be 0 for gblinear
|
||||
iterationrange <- c(1, 1) # must be 0 for gblinear
|
||||
}
|
||||
for (fd in env$bst_folds) {
|
||||
pr <- predict(fd$bst, fd$watchlist[[2]], ntreelimit = ntreelimit, reshape = TRUE)
|
||||
pr <- predict(fd$bst, fd$watchlist[[2]], iterationrange = iterationrange, reshape = TRUE)
|
||||
if (is.matrix(pred)) {
|
||||
pred[fd$index, ] <- pr
|
||||
} else {
|
||||
@@ -527,7 +529,7 @@ cb.cv.predict <- function(save_models = FALSE) {
|
||||
#' Callback closure for collecting the model coefficients history of a gblinear booster
|
||||
#' during its training.
|
||||
#'
|
||||
#' @param sparse when set to FALSE/TURE, a dense/sparse matrix is used to store the result.
|
||||
#' @param sparse when set to FALSE/TRUE, a dense/sparse matrix is used to store the result.
|
||||
#' Sparse format is useful when one expects only a subset of coefficients to be non-zero,
|
||||
#' when using the "thrifty" feature selector with fairly small number of top features
|
||||
#' selected per iteration.
|
||||
@@ -554,7 +556,6 @@ cb.cv.predict <- function(save_models = FALSE) {
|
||||
#' #
|
||||
#' # In the iris dataset, it is hard to linearly separate Versicolor class from the rest
|
||||
#' # without considering the 2nd order interactions:
|
||||
#' require(magrittr)
|
||||
#' x <- model.matrix(Species ~ .^2, iris)[,-1]
|
||||
#' colnames(x)
|
||||
#' dtrain <- xgb.DMatrix(scale(x), label = 1*(iris$Species == "versicolor"))
|
||||
@@ -575,7 +576,7 @@ cb.cv.predict <- function(save_models = FALSE) {
|
||||
#' bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 200, eta = 0.8,
|
||||
#' updater = 'coord_descent', feature_selector = 'thrifty', top_k = 1,
|
||||
#' callbacks = list(cb.gblinear.history()))
|
||||
#' xgb.gblinear.history(bst) %>% matplot(type = 'l')
|
||||
#' matplot(xgb.gblinear.history(bst), type = 'l')
|
||||
#' # Componentwise boosting is known to have similar effect to Lasso regularization.
|
||||
#' # Try experimenting with various values of top_k, eta, nrounds,
|
||||
#' # as well as different feature_selectors.
|
||||
@@ -584,7 +585,7 @@ cb.cv.predict <- function(save_models = FALSE) {
|
||||
#' bst <- xgb.cv(param, dtrain, nfold = 5, nrounds = 100, eta = 0.8,
|
||||
#' callbacks = list(cb.gblinear.history()))
|
||||
#' # coefficients in the CV fold #3
|
||||
#' xgb.gblinear.history(bst)[[3]] %>% matplot(type = 'l')
|
||||
#' matplot(xgb.gblinear.history(bst)[[3]], type = 'l')
|
||||
#'
|
||||
#'
|
||||
#' #### Multiclass classification:
|
||||
@@ -597,15 +598,15 @@ cb.cv.predict <- function(save_models = FALSE) {
|
||||
#' bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 70, eta = 0.5,
|
||||
#' callbacks = list(cb.gblinear.history()))
|
||||
#' # Will plot the coefficient paths separately for each class:
|
||||
#' xgb.gblinear.history(bst, class_index = 0) %>% matplot(type = 'l')
|
||||
#' xgb.gblinear.history(bst, class_index = 1) %>% matplot(type = 'l')
|
||||
#' xgb.gblinear.history(bst, class_index = 2) %>% matplot(type = 'l')
|
||||
#' matplot(xgb.gblinear.history(bst, class_index = 0), type = 'l')
|
||||
#' matplot(xgb.gblinear.history(bst, class_index = 1), type = 'l')
|
||||
#' matplot(xgb.gblinear.history(bst, class_index = 2), type = 'l')
|
||||
#'
|
||||
#' # CV:
|
||||
#' bst <- xgb.cv(param, dtrain, nfold = 5, nrounds = 70, eta = 0.5,
|
||||
#' callbacks = list(cb.gblinear.history(FALSE)))
|
||||
#' # 1st forld of 1st class
|
||||
#' xgb.gblinear.history(bst, class_index = 0)[[1]] %>% matplot(type = 'l')
|
||||
#' # 1st fold of 1st class
|
||||
#' matplot(xgb.gblinear.history(bst, class_index = 0)[[1]], type = 'l')
|
||||
#'
|
||||
#' @export
|
||||
cb.gblinear.history <- function(sparse=FALSE) {
|
||||
@@ -613,9 +614,7 @@ cb.gblinear.history <- function(sparse=FALSE) {
|
||||
|
||||
init <- function(env) {
|
||||
if (!is.null(env$bst)) { # xgb.train:
|
||||
coef_path <- list()
|
||||
} else if (!is.null(env$bst_folds)) { # xgb.cv:
|
||||
coef_path <- rep(list(), length(env$bst_folds))
|
||||
} else stop("Parent frame has neither 'bst' nor 'bst_folds'")
|
||||
}
|
||||
|
||||
@@ -638,9 +637,14 @@ cb.gblinear.history <- function(sparse=FALSE) {
|
||||
if (!is.null(env$bst)) { # # xgb.train:
|
||||
coefs <<- list2mat(coefs)
|
||||
} else { # xgb.cv:
|
||||
# first lapply transposes the list
|
||||
coefs <<- lapply(seq_along(coefs[[1]]), function(i) lapply(coefs, "[[", i)) %>%
|
||||
lapply(function(x) list2mat(x))
|
||||
# second lapply transposes the list
|
||||
coefs <<- lapply(
|
||||
X = lapply(
|
||||
X = seq_along(coefs[[1]]),
|
||||
FUN = function(i) lapply(coefs, "[[", i)
|
||||
),
|
||||
FUN = list2mat
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#
|
||||
# This file is for the low level reuseable utility functions
|
||||
# that are not supposed to be visibe to a user.
|
||||
# This file is for the low level reusable utility functions
|
||||
# that are not supposed to be visible to a user.
|
||||
#
|
||||
|
||||
#
|
||||
@@ -20,6 +20,12 @@ NVL <- function(x, val) {
|
||||
stop("typeof(x) == ", typeof(x), " is not supported by NVL")
|
||||
}
|
||||
|
||||
# List of classification and ranking objectives
|
||||
.CLASSIFICATION_OBJECTIVES <- function() {
|
||||
return(c('binary:logistic', 'binary:logitraw', 'binary:hinge', 'multi:softmax',
|
||||
'multi:softprob', 'rank:pairwise', 'rank:ndcg', 'rank:map'))
|
||||
}
|
||||
|
||||
|
||||
#
|
||||
# Low-level functions for boosting --------------------------------------------
|
||||
@@ -69,9 +75,9 @@ check.booster.params <- function(params, ...) {
|
||||
|
||||
if (!is.null(params[['monotone_constraints']]) &&
|
||||
typeof(params[['monotone_constraints']]) != "character") {
|
||||
vec2str = paste(params[['monotone_constraints']], collapse = ',')
|
||||
vec2str = paste0('(', vec2str, ')')
|
||||
params[['monotone_constraints']] = vec2str
|
||||
vec2str <- paste(params[['monotone_constraints']], collapse = ',')
|
||||
vec2str <- paste0('(', vec2str, ')')
|
||||
params[['monotone_constraints']] <- vec2str
|
||||
}
|
||||
|
||||
# interaction constraints parser (convert from list of column indices to string)
|
||||
@@ -145,7 +151,8 @@ xgb.iter.update <- function(booster_handle, dtrain, iter, obj = NULL) {
|
||||
if (is.null(obj)) {
|
||||
.Call(XGBoosterUpdateOneIter_R, booster_handle, as.integer(iter), dtrain)
|
||||
} else {
|
||||
pred <- predict(booster_handle, dtrain, outputmargin = TRUE, training = TRUE)
|
||||
pred <- predict(booster_handle, dtrain, outputmargin = TRUE, training = TRUE,
|
||||
ntreelimit = 0)
|
||||
gpair <- obj(pred, dtrain)
|
||||
.Call(XGBoosterBoostOneIter_R, booster_handle, dtrain, gpair$grad, gpair$hess)
|
||||
}
|
||||
@@ -166,13 +173,13 @@ xgb.iter.eval <- function(booster_handle, watchlist, iter, feval = NULL) {
|
||||
evnames <- names(watchlist)
|
||||
if (is.null(feval)) {
|
||||
msg <- .Call(XGBoosterEvalOneIter_R, booster_handle, as.integer(iter), watchlist, as.list(evnames))
|
||||
msg <- stri_split_regex(msg, '(\\s+|:|\\s+)')[[1]][-1]
|
||||
res <- as.numeric(msg[c(FALSE,TRUE)]) # even indices are the values
|
||||
names(res) <- msg[c(TRUE,FALSE)] # odds are the names
|
||||
mat <- matrix(strsplit(msg, '\\s+|:')[[1]][-1], nrow = 2)
|
||||
res <- structure(as.numeric(mat[2, ]), names = mat[1, ])
|
||||
} else {
|
||||
res <- sapply(seq_along(watchlist), function(j) {
|
||||
w <- watchlist[[j]]
|
||||
preds <- predict(booster_handle, w) # predict using all trees
|
||||
## predict using all trees
|
||||
preds <- predict(booster_handle, w, outputmargin = TRUE, iterationrange = c(1, 1))
|
||||
eval_res <- feval(preds, w)
|
||||
out <- eval_res$value
|
||||
names(out) <- paste0(evnames[j], "-", eval_res$metric)
|
||||
@@ -187,13 +194,23 @@ xgb.iter.eval <- function(booster_handle, watchlist, iter, feval = NULL) {
|
||||
# Helper functions for cross validation ---------------------------------------
|
||||
#
|
||||
|
||||
# Possibly convert the labels into factors, depending on the objective.
|
||||
# The labels are converted into factors only when the given objective refers to the classification
|
||||
# or ranking tasks.
|
||||
convert.labels <- function(labels, objective_name) {
|
||||
if (objective_name %in% .CLASSIFICATION_OBJECTIVES()) {
|
||||
return(as.factor(labels))
|
||||
} else {
|
||||
return(labels)
|
||||
}
|
||||
}
|
||||
|
||||
# Generates random (stratified if needed) CV folds
|
||||
generate.cv.folds <- function(nfold, nrows, stratified, label, params) {
|
||||
|
||||
# cannot do it for rank
|
||||
if (exists('objective', where = params) &&
|
||||
is.character(params$objective) &&
|
||||
strtrim(params$objective, 5) == 'rank:') {
|
||||
objective <- params$objective
|
||||
if (is.character(objective) && strtrim(objective, 5) == 'rank:') {
|
||||
stop("\n\tAutomatic generation of CV-folds is not implemented for ranking!\n",
|
||||
"\tConsider providing pre-computed CV-folds through the 'folds=' parameter.\n")
|
||||
}
|
||||
@@ -206,20 +223,17 @@ generate.cv.folds <- function(nfold, nrows, stratified, label, params) {
|
||||
# - For classification, need to convert y labels to factor before making the folds,
|
||||
# and then do stratification by factor levels.
|
||||
# - For regression, leave y numeric and do stratification by quantiles.
|
||||
if (exists('objective', where = params) &&
|
||||
is.character(params$objective)) {
|
||||
# If 'objective' provided in params, assume that y is a classification label
|
||||
# unless objective is reg:squarederror
|
||||
if (params$objective != 'reg:squarederror')
|
||||
y <- factor(y)
|
||||
if (is.character(objective)) {
|
||||
y <- convert.labels(y, params$objective)
|
||||
} else {
|
||||
# If no 'objective' given in params, it means that user either wants to
|
||||
# use the default 'reg:squarederror' objective or has provided a custom
|
||||
# obj function. Here, assume classification setting when y has 5 or less
|
||||
# unique values:
|
||||
if (length(unique(y)) <= 5)
|
||||
if (length(unique(y)) <= 5) {
|
||||
y <- factor(y)
|
||||
}
|
||||
}
|
||||
folds <- xgb.createFolds(y, nfold)
|
||||
} else {
|
||||
# make simple non-stratified folds
|
||||
@@ -271,7 +285,7 @@ xgb.createFolds <- function(y, k = 10)
|
||||
for (i in seq_along(numInClass)) {
|
||||
## create a vector of integers from 1:k as many times as possible without
|
||||
## going over the number of samples in the class. Note that if the number
|
||||
## of samples in a class is less than k, nothing is producd here.
|
||||
## of samples in a class is less than k, nothing is produced here.
|
||||
seqVector <- rep(seq_len(k), numInClass[i] %/% k)
|
||||
## add enough random integers to get length(seqVector) == numInClass[i]
|
||||
if (numInClass[i] %% k > 0) seqVector <- c(seqVector, sample.int(k, numInClass[i] %% k))
|
||||
@@ -307,6 +321,68 @@ xgb.createFolds <- function(y, k = 10)
|
||||
#' @name xgboost-deprecated
|
||||
NULL
|
||||
|
||||
#' Do not use \code{\link[base]{saveRDS}} or \code{\link[base]{save}} for long-term archival of
|
||||
#' models. Instead, use \code{\link{xgb.save}} or \code{\link{xgb.save.raw}}.
|
||||
#'
|
||||
#' It is a common practice to use the built-in \code{\link[base]{saveRDS}} function (or
|
||||
#' \code{\link[base]{save}}) to persist R objects to the disk. While it is possible to persist
|
||||
#' \code{xgb.Booster} objects using \code{\link[base]{saveRDS}}, it is not advisable to do so if
|
||||
#' the model is to be accessed in the future. If you train a model with the current version of
|
||||
#' XGBoost and persist it with \code{\link[base]{saveRDS}}, the model is not guaranteed to be
|
||||
#' accessible in later releases of XGBoost. To ensure that your model can be accessed in future
|
||||
#' releases of XGBoost, use \code{\link{xgb.save}} or \code{\link{xgb.save.raw}} instead.
|
||||
#'
|
||||
#' @details
|
||||
#' Use \code{\link{xgb.save}} to save the XGBoost model as a stand-alone file. You may opt into
|
||||
#' the JSON format by specifying the JSON extension. To read the model back, use
|
||||
#' \code{\link{xgb.load}}.
|
||||
#'
|
||||
#' Use \code{\link{xgb.save.raw}} to save the XGBoost model as a sequence (vector) of raw bytes
|
||||
#' in a future-proof manner. Future releases of XGBoost will be able to read the raw bytes and
|
||||
#' re-construct the corresponding model. To read the model back, use \code{\link{xgb.load.raw}}.
|
||||
#' The \code{\link{xgb.save.raw}} function is useful if you'd like to persist the XGBoost model
|
||||
#' as part of another R object.
|
||||
#'
|
||||
#' Note: Do not use \code{\link{xgb.serialize}} to store models long-term. It persists not only the
|
||||
#' model but also internal configurations and parameters, and its format is not stable across
|
||||
#' multiple XGBoost versions. Use \code{\link{xgb.serialize}} only for checkpointing.
|
||||
#'
|
||||
#' For more details and explanation about model persistence and archival, consult the page
|
||||
#' \url{https://xgboost.readthedocs.io/en/latest/tutorials/saving_model.html}.
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
|
||||
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
#'
|
||||
#' # Save as a stand-alone file; load it with xgb.load()
|
||||
#' xgb.save(bst, 'xgb.model')
|
||||
#' bst2 <- xgb.load('xgb.model')
|
||||
#'
|
||||
#' # Save as a stand-alone file (JSON); load it with xgb.load()
|
||||
#' xgb.save(bst, 'xgb.model.json')
|
||||
#' bst2 <- xgb.load('xgb.model.json')
|
||||
#' if (file.exists('xgb.model.json')) file.remove('xgb.model.json')
|
||||
#'
|
||||
#' # Save as a raw byte vector; load it with xgb.load.raw()
|
||||
#' xgb_bytes <- xgb.save.raw(bst)
|
||||
#' bst2 <- xgb.load.raw(xgb_bytes)
|
||||
#'
|
||||
#' # Persist XGBoost model as part of another R object
|
||||
#' obj <- list(xgb_model_bytes = xgb.save.raw(bst), description = "My first XGBoost model")
|
||||
#' # Persist the R object. Here, saveRDS() is okay, since it doesn't persist
|
||||
#' # xgb.Booster directly. What's being persisted is the future-proof byte representation
|
||||
#' # as given by xgb.save.raw().
|
||||
#' saveRDS(obj, 'my_object.rds')
|
||||
#' # Read back the R object
|
||||
#' obj2 <- readRDS('my_object.rds')
|
||||
#' # Re-construct xgb.Booster object from the bytes
|
||||
#' bst2 <- xgb.load.raw(obj2$xgb_model_bytes)
|
||||
#' if (file.exists('my_object.rds')) file.remove('my_object.rds')
|
||||
#'
|
||||
#' @name a-compatibility-note-for-saveRDS-save
|
||||
NULL
|
||||
|
||||
# Lookup table for the deprecated parameters bookkeeping
|
||||
depr_par_lut <- matrix(c(
|
||||
'print.every.n', 'print_every_n',
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# Construct an internal xgboost Booster and return a handle to it.
|
||||
# internal utility function
|
||||
xgb.Booster.handle <- function(params = list(), cachelist = list(), modelfile = NULL) {
|
||||
xgb.Booster.handle <- function(params = list(), cachelist = list(),
|
||||
modelfile = NULL, handle = NULL) {
|
||||
if (typeof(cachelist) != "list" ||
|
||||
!all(vapply(cachelist, inherits, logical(1), what = 'xgb.DMatrix'))) {
|
||||
stop("cachelist must be a list of xgb.DMatrix objects")
|
||||
@@ -10,6 +11,7 @@ xgb.Booster.handle <- function(params = list(), cachelist = list(), modelfile =
|
||||
if (typeof(modelfile) == "character") {
|
||||
## A filename
|
||||
handle <- .Call(XGBoosterCreate_R, cachelist)
|
||||
modelfile <- path.expand(modelfile)
|
||||
.Call(XGBoosterLoadModel_R, handle, modelfile[1])
|
||||
class(handle) <- "xgb.Booster.handle"
|
||||
if (length(params) > 0) {
|
||||
@@ -18,7 +20,7 @@ xgb.Booster.handle <- function(params = list(), cachelist = list(), modelfile =
|
||||
return(handle)
|
||||
} else if (typeof(modelfile) == "raw") {
|
||||
## A memory buffer
|
||||
bst <- xgb.unserialize(modelfile)
|
||||
bst <- xgb.unserialize(modelfile, handle)
|
||||
xgb.parameters(bst) <- params
|
||||
return (bst)
|
||||
} else if (inherits(modelfile, "xgb.Booster")) {
|
||||
@@ -62,8 +64,8 @@ is.null.handle <- function(handle) {
|
||||
return(FALSE)
|
||||
}
|
||||
|
||||
# Return a verified to be valid handle out of either xgb.Booster.handle or xgb.Booster
|
||||
# internal utility function
|
||||
# Return a verified to be valid handle out of either xgb.Booster.handle or
|
||||
# xgb.Booster internal utility function
|
||||
xgb.get.handle <- function(object) {
|
||||
if (inherits(object, "xgb.Booster")) {
|
||||
handle <- object$handle
|
||||
@@ -110,6 +112,8 @@ xgb.get.handle <- function(object) {
|
||||
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
#' saveRDS(bst, "xgb.model.rds")
|
||||
#'
|
||||
#' # Warning: The resulting RDS file is only compatible with the current XGBoost version.
|
||||
#' # Refer to the section titled "a-compatibility-note-for-saveRDS-save".
|
||||
#' bst1 <- readRDS("xgb.model.rds")
|
||||
#' if (file.exists("xgb.model.rds")) file.remove("xgb.model.rds")
|
||||
#' # the handle is invalid:
|
||||
@@ -125,7 +129,7 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
|
||||
stop("argument type must be xgb.Booster")
|
||||
|
||||
if (is.null.handle(object$handle)) {
|
||||
object$handle <- xgb.Booster.handle(modelfile = object$raw)
|
||||
object$handle <- xgb.Booster.handle(modelfile = object$raw, handle = object$handle)
|
||||
} else {
|
||||
if (is.null(object$raw) && saveraw) {
|
||||
object$raw <- xgb.serialize(object$handle)
|
||||
@@ -164,8 +168,7 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
|
||||
#' @param outputmargin whether the prediction should be returned in the for of original untransformed
|
||||
#' sum of predictions from boosting iterations' results. E.g., setting \code{outputmargin=TRUE} for
|
||||
#' logistic regression would result in predictions for log-odds instead of probabilities.
|
||||
#' @param ntreelimit limit the number of model's trees or boosting iterations used in prediction (see Details).
|
||||
#' It will use all the trees by default (\code{NULL} value).
|
||||
#' @param ntreelimit Deprecated, use \code{iterationrange} instead.
|
||||
#' @param predleaf whether predict leaf index.
|
||||
#' @param predcontrib whether to return feature contributions to individual predictions (see Details).
|
||||
#' @param approxcontrib whether to use a fast approximation for feature contributions (see Details).
|
||||
@@ -175,16 +178,19 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
|
||||
#' or predinteraction flags is TRUE.
|
||||
#' @param training whether is the prediction result used for training. For dart booster,
|
||||
#' training predicting will perform dropout.
|
||||
#' @param iterationrange Specifies which layer of trees are used in prediction. For
|
||||
#' example, if a random forest is trained with 100 rounds. Specifying
|
||||
#' `iteration_range=(1, 21)`, then only the forests built during [1, 21) (half open set)
|
||||
#' rounds are used in this prediction. It's 1-based index just like R vector. When set
|
||||
#' to \code{c(1, 1)} XGBoost will use all trees.
|
||||
#' @param strict_shape Default is \code{FALSE}. When it's set to \code{TRUE}, output
|
||||
#' type and shape of prediction are invariant to model type.
|
||||
#'
|
||||
#' @param ... Parameters passed to \code{predict.xgb.Booster}
|
||||
#'
|
||||
#' @details
|
||||
#' Note that \code{ntreelimit} is not necessarily equal to the number of boosting iterations
|
||||
#' and it is not necessarily equal to the number of trees in a model.
|
||||
#' E.g., in a random forest-like model, \code{ntreelimit} would limit the number of trees.
|
||||
#' But for multiclass classification, while there are multiple trees per iteration,
|
||||
#' \code{ntreelimit} limits the number of boosting iterations.
|
||||
#'
|
||||
#' Also note that \code{ntreelimit} would currently do nothing for predictions from gblinear,
|
||||
#' Note that \code{iterationrange} would currently do nothing for predictions from gblinear,
|
||||
#' since gblinear doesn't keep its boosting history.
|
||||
#'
|
||||
#' One possible practical applications of the \code{predleaf} option is to use the model
|
||||
@@ -205,7 +211,8 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
|
||||
#' of the most important features first. See below about the format of the returned results.
|
||||
#'
|
||||
#' @return
|
||||
#' For regression or binary classification, it returns a vector of length \code{nrows(newdata)}.
|
||||
#' The return type is different depending whether \code{strict_shape} is set to \code{TRUE}. By default,
|
||||
#' for regression or binary classification, it returns a vector of length \code{nrows(newdata)}.
|
||||
#' For multiclass classification, either a \code{num_class * nrows(newdata)} vector or
|
||||
#' a \code{(nrows(newdata), num_class)} dimension matrix is returned, depending on
|
||||
#' the \code{reshape} value.
|
||||
@@ -227,6 +234,13 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
|
||||
#' For a multiclass case, a list of \code{num_class} elements is returned, where each element is
|
||||
#' such an array.
|
||||
#'
|
||||
#' When \code{strict_shape} is set to \code{TRUE}, the output is always an array. For
|
||||
#' normal prediction, the output is a 2-dimension array \code{(num_class, nrow(newdata))}.
|
||||
#'
|
||||
#' For \code{predcontrib = TRUE}, output is \code{(ncol(newdata) + 1, num_class, nrow(newdata))}
|
||||
#' For \code{predinteraction = TRUE}, output is \code{(ncol(newdata) + 1, ncol(newdata) + 1, num_class, nrow(newdata))}
|
||||
#' For \code{predleaf = TRUE}, output is \code{(n_trees_in_forest, num_class, n_iterations, nrow(newdata))}
|
||||
#'
|
||||
#' @seealso
|
||||
#' \code{\link{xgb.train}}.
|
||||
#'
|
||||
@@ -249,7 +263,7 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
|
||||
#' # use all trees by default
|
||||
#' pred <- predict(bst, test$data)
|
||||
#' # use only the 1st tree
|
||||
#' pred1 <- predict(bst, test$data, ntreelimit = 1)
|
||||
#' pred1 <- predict(bst, test$data, iterationrange = c(1, 2))
|
||||
#'
|
||||
#' # Predicting tree leafs:
|
||||
#' # the result is an nsamples X ntrees matrix
|
||||
@@ -301,31 +315,14 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
|
||||
#' all.equal(pred, pred_labels)
|
||||
#' # prediction from using only 5 iterations should result
|
||||
#' # in the same error as seen in iteration 5:
|
||||
#' pred5 <- predict(bst, as.matrix(iris[, -5]), ntreelimit=5)
|
||||
#' pred5 <- predict(bst, as.matrix(iris[, -5]), iterationrange=c(1, 6))
|
||||
#' sum(pred5 != lb)/length(lb)
|
||||
#'
|
||||
#'
|
||||
#' ## random forest-like model of 25 trees for binary classification:
|
||||
#'
|
||||
#' set.seed(11)
|
||||
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 5,
|
||||
#' nthread = 2, nrounds = 1, objective = "binary:logistic",
|
||||
#' num_parallel_tree = 25, subsample = 0.6, colsample_bytree = 0.1)
|
||||
#' # Inspect the prediction error vs number of trees:
|
||||
#' lb <- test$label
|
||||
#' dtest <- xgb.DMatrix(test$data, label=lb)
|
||||
#' err <- sapply(1:25, function(n) {
|
||||
#' pred <- predict(bst, dtest, ntreelimit=n)
|
||||
#' sum((pred > 0.5) != lb)/length(lb)
|
||||
#' })
|
||||
#' plot(err, type='l', ylim=c(0,0.1), xlab='#trees')
|
||||
#'
|
||||
#' @rdname predict.xgb.Booster
|
||||
#' @export
|
||||
predict.xgb.Booster <- function(object, newdata, missing = NA, outputmargin = FALSE, ntreelimit = NULL,
|
||||
predleaf = FALSE, predcontrib = FALSE, approxcontrib = FALSE, predinteraction = FALSE,
|
||||
reshape = FALSE, training = FALSE, ...) {
|
||||
|
||||
reshape = FALSE, training = FALSE, iterationrange = NULL, strict_shape = FALSE, ...) {
|
||||
object <- xgb.Booster.complete(object, saveraw = FALSE)
|
||||
if (!inherits(newdata, "xgb.DMatrix"))
|
||||
newdata <- xgb.DMatrix(newdata, missing = missing)
|
||||
@@ -333,62 +330,134 @@ predict.xgb.Booster <- function(object, newdata, missing = NA, outputmargin = FA
|
||||
!is.null(colnames(newdata)) &&
|
||||
!identical(object[["feature_names"]], colnames(newdata)))
|
||||
stop("Feature names stored in `object` and `newdata` are different!")
|
||||
if (is.null(ntreelimit))
|
||||
ntreelimit <- NVL(object$best_ntreelimit, 0)
|
||||
if (NVL(object$params[['booster']], '') == 'gblinear')
|
||||
|
||||
if (NVL(object$params[['booster']], '') == 'gblinear' || is.null(ntreelimit))
|
||||
ntreelimit <- 0
|
||||
if (ntreelimit < 0)
|
||||
stop("ntreelimit cannot be negative")
|
||||
|
||||
option <- 0L + 1L * as.logical(outputmargin) + 2L * as.logical(predleaf) + 4L * as.logical(predcontrib) +
|
||||
8L * as.logical(approxcontrib) + 16L * as.logical(predinteraction)
|
||||
if (ntreelimit != 0 && is.null(iterationrange)) {
|
||||
## only ntreelimit, initialize iteration range
|
||||
iterationrange <- c(0, 0)
|
||||
} else if (ntreelimit == 0 && !is.null(iterationrange)) {
|
||||
## only iteration range, handle 1-based indexing
|
||||
iterationrange <- c(iterationrange[1] - 1, iterationrange[2] - 1)
|
||||
} else if (ntreelimit != 0 && !is.null(iterationrange)) {
|
||||
## both are specified, let libgxgboost throw an error
|
||||
} else {
|
||||
## no limit is supplied, use best
|
||||
if (is.null(object$best_iteration)) {
|
||||
iterationrange <- c(0, 0)
|
||||
} else {
|
||||
## We don't need to + 1 as R is 1-based index.
|
||||
iterationrange <- c(0, as.integer(object$best_iteration))
|
||||
}
|
||||
}
|
||||
## Handle the 0 length values.
|
||||
box <- function(val) {
|
||||
if (length(val) == 0) {
|
||||
cval <- vector(, 1)
|
||||
cval[0] <- val
|
||||
return(cval)
|
||||
}
|
||||
return (val)
|
||||
}
|
||||
|
||||
ret <- .Call(XGBoosterPredict_R, object$handle, newdata, option[1],
|
||||
as.integer(ntreelimit), as.integer(training))
|
||||
## We set strict_shape to TRUE then drop the dimensions conditionally
|
||||
args <- list(
|
||||
training = box(training),
|
||||
strict_shape = box(TRUE),
|
||||
iteration_begin = box(as.integer(iterationrange[1])),
|
||||
iteration_end = box(as.integer(iterationrange[2])),
|
||||
ntree_limit = box(as.integer(ntreelimit)),
|
||||
type = box(as.integer(0))
|
||||
)
|
||||
|
||||
set_type <- function(type) {
|
||||
if (args$type != 0) {
|
||||
stop("One type of prediction at a time.")
|
||||
}
|
||||
return(box(as.integer(type)))
|
||||
}
|
||||
if (outputmargin) {
|
||||
args$type <- set_type(1)
|
||||
}
|
||||
if (predcontrib) {
|
||||
args$type <- set_type(if (approxcontrib) 3 else 2)
|
||||
}
|
||||
if (predinteraction) {
|
||||
args$type <- set_type(if (approxcontrib) 5 else 4)
|
||||
}
|
||||
if (predleaf) {
|
||||
args$type <- set_type(6)
|
||||
}
|
||||
|
||||
predts <- .Call(
|
||||
XGBoosterPredictFromDMatrix_R, object$handle, newdata, jsonlite::toJSON(args, auto_unbox = TRUE)
|
||||
)
|
||||
names(predts) <- c("shape", "results")
|
||||
shape <- predts$shape
|
||||
ret <- predts$results
|
||||
|
||||
n_ret <- length(ret)
|
||||
n_row <- nrow(newdata)
|
||||
npred_per_case <- n_ret / n_row
|
||||
if (n_row != shape[1]) {
|
||||
stop("Incorrect predict shape.")
|
||||
}
|
||||
|
||||
if (n_ret %% n_row != 0)
|
||||
stop("prediction length ", n_ret, " is not multiple of nrows(newdata) ", n_row)
|
||||
arr <- array(data = ret, dim = rev(shape))
|
||||
|
||||
cnames <- if (!is.null(colnames(newdata))) c(colnames(newdata), "BIAS") else NULL
|
||||
n_groups <- shape[2]
|
||||
|
||||
## Needed regardless of whether strict shape is being used.
|
||||
if (predcontrib) {
|
||||
dimnames(arr) <- list(cnames, NULL, NULL)
|
||||
} else if (predinteraction) {
|
||||
dimnames(arr) <- list(cnames, cnames, NULL, NULL)
|
||||
}
|
||||
if (strict_shape) {
|
||||
return(arr) # strict shape is calculated by libxgboost uniformly.
|
||||
}
|
||||
|
||||
if (predleaf) {
|
||||
ret <- if (n_ret == n_row) {
|
||||
matrix(ret, ncol = 1)
|
||||
## Predict leaf
|
||||
arr <- if (n_ret == n_row) {
|
||||
matrix(arr, ncol = 1)
|
||||
} else {
|
||||
matrix(ret, nrow = n_row, byrow = TRUE)
|
||||
matrix(arr, nrow = n_row, byrow = TRUE)
|
||||
}
|
||||
} else if (predcontrib) {
|
||||
n_col1 <- ncol(newdata) + 1
|
||||
n_group <- npred_per_case / n_col1
|
||||
cnames <- if (!is.null(colnames(newdata))) c(colnames(newdata), "BIAS") else NULL
|
||||
ret <- if (n_ret == n_row) {
|
||||
matrix(ret, ncol = 1, dimnames = list(NULL, cnames))
|
||||
} else if (n_group == 1) {
|
||||
matrix(ret, nrow = n_row, byrow = TRUE, dimnames = list(NULL, cnames))
|
||||
## Predict contribution
|
||||
arr <- aperm(a = arr, perm = c(2, 3, 1)) # [group, row, col]
|
||||
arr <- if (n_ret == n_row) {
|
||||
matrix(arr, ncol = 1, dimnames = list(NULL, cnames))
|
||||
} else if (n_groups != 1) {
|
||||
## turns array into list of matrices
|
||||
lapply(seq_len(n_groups), function(g) arr[g, , ])
|
||||
} else {
|
||||
arr <- array(ret, c(n_col1, n_group, n_row),
|
||||
dimnames = list(cnames, NULL, NULL)) %>% aperm(c(2,3,1)) # [group, row, col]
|
||||
lapply(seq_len(n_group), function(g) arr[g,,])
|
||||
## remove the first axis (group)
|
||||
as.matrix(arr[1, , ])
|
||||
}
|
||||
} else if (predinteraction) {
|
||||
n_col1 <- ncol(newdata) + 1
|
||||
n_group <- npred_per_case / n_col1^2
|
||||
cnames <- if (!is.null(colnames(newdata))) c(colnames(newdata), "BIAS") else NULL
|
||||
ret <- if (n_ret == n_row) {
|
||||
matrix(ret, ncol = 1, dimnames = list(NULL, cnames))
|
||||
} else if (n_group == 1) {
|
||||
array(ret, c(n_col1, n_col1, n_row), dimnames = list(cnames, cnames, NULL)) %>% aperm(c(3,1,2))
|
||||
## Predict interaction
|
||||
arr <- aperm(a = arr, perm = c(3, 4, 1, 2)) # [group, row, col, col]
|
||||
arr <- if (n_ret == n_row) {
|
||||
matrix(arr, ncol = 1, dimnames = list(NULL, cnames))
|
||||
} else if (n_groups != 1) {
|
||||
## turns array into list of matrices
|
||||
lapply(seq_len(n_groups), function(g) arr[g, , , ])
|
||||
} else {
|
||||
arr <- array(ret, c(n_col1, n_col1, n_group, n_row),
|
||||
dimnames = list(cnames, cnames, NULL, NULL)) %>% aperm(c(3,4,1,2)) # [group, row, col1, col2]
|
||||
lapply(seq_len(n_group), function(g) arr[g,,,])
|
||||
## remove the first axis (group)
|
||||
arr[1, , , ]
|
||||
}
|
||||
} else if (reshape && npred_per_case > 1) {
|
||||
ret <- matrix(ret, nrow = n_row, byrow = TRUE)
|
||||
} else {
|
||||
## Normal prediction
|
||||
arr <- if (reshape && n_groups != 1) {
|
||||
matrix(arr, ncol = n_groups, byrow = TRUE)
|
||||
} else {
|
||||
as.vector(ret)
|
||||
}
|
||||
return(ret)
|
||||
}
|
||||
return(arr)
|
||||
}
|
||||
|
||||
#' @rdname predict.xgb.Booster
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#' Construct xgb.DMatrix object
|
||||
#'
|
||||
#' Construct xgb.DMatrix object from either a dense matrix, a sparse matrix, or a local file.
|
||||
#' Supported input file formats are either a libsvm text file or a binary file that was created previously by
|
||||
#' Supported input file formats are either a LIBSVM text file or a binary file that was created previously by
|
||||
#' \code{\link{xgb.DMatrix.save}}).
|
||||
#'
|
||||
#' @param data a \code{matrix} object (either numeric or integer), a \code{dgCMatrix} object, or a character
|
||||
@@ -11,25 +11,26 @@
|
||||
#' @param missing a float value to represents missing values in data (used only when input is a dense matrix).
|
||||
#' It is useful when a 0 or some other extreme value represents missing values in data.
|
||||
#' @param silent whether to suppress printing an informational message after loading from a file.
|
||||
#' @param nthread Number of threads used for creating DMatrix.
|
||||
#' @param ... the \code{info} data could be passed directly as parameters, without creating an \code{info} list.
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' train <- agaricus.train
|
||||
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
|
||||
#' dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label))
|
||||
#' xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data')
|
||||
#' dtrain <- xgb.DMatrix('xgb.DMatrix.data')
|
||||
#' if (file.exists('xgb.DMatrix.data')) file.remove('xgb.DMatrix.data')
|
||||
#' @export
|
||||
xgb.DMatrix <- function(data, info = list(), missing = NA, silent = FALSE, ...) {
|
||||
xgb.DMatrix <- function(data, info = list(), missing = NA, silent = FALSE, nthread = NULL, ...) {
|
||||
cnames <- NULL
|
||||
if (typeof(data) == "character") {
|
||||
if (length(data) > 1)
|
||||
stop("'data' has class 'character' and length ", length(data),
|
||||
".\n 'data' accepts either a numeric matrix or a single filename.")
|
||||
data <- path.expand(data)
|
||||
handle <- .Call(XGDMatrixCreateFromFile_R, data, as.integer(silent))
|
||||
} else if (is.matrix(data)) {
|
||||
handle <- .Call(XGDMatrixCreateFromMat_R, data, missing)
|
||||
handle <- .Call(XGDMatrixCreateFromMat_R, data, missing, as.integer(NVL(nthread, -1)))
|
||||
cnames <- colnames(data)
|
||||
} else if (inherits(data, "dgCMatrix")) {
|
||||
handle <- .Call(XGDMatrixCreateFromCSC_R, data@p, data@i, data@x, nrow(data))
|
||||
@@ -51,12 +52,12 @@ xgb.DMatrix <- function(data, info = list(), missing = NA, silent = FALSE, ...)
|
||||
|
||||
# get dmatrix from data, label
|
||||
# internal helper method
|
||||
xgb.get.DMatrix <- function(data, label = NULL, missing = NA, weight = NULL) {
|
||||
xgb.get.DMatrix <- function(data, label = NULL, missing = NA, weight = NULL, nthread = NULL) {
|
||||
if (inherits(data, "dgCMatrix") || is.matrix(data)) {
|
||||
if (is.null(label)) {
|
||||
stop("label must be provided when data is a matrix")
|
||||
}
|
||||
dtrain <- xgb.DMatrix(data, label = label, missing = missing)
|
||||
dtrain <- xgb.DMatrix(data, label = label, missing = missing, nthread = nthread)
|
||||
if (!is.null(weight)){
|
||||
setinfo(dtrain, "weight", weight)
|
||||
}
|
||||
@@ -65,6 +66,7 @@ xgb.get.DMatrix <- function(data, label = NULL, missing = NA, weight = NULL) {
|
||||
warning("xgboost: label will be ignored.")
|
||||
}
|
||||
if (is.character(data)) {
|
||||
data <- path.expand(data)
|
||||
dtrain <- xgb.DMatrix(data[1])
|
||||
} else if (inherits(data, "xgb.DMatrix")) {
|
||||
dtrain <- data
|
||||
@@ -160,9 +162,9 @@ dimnames.xgb.DMatrix <- function(x) {
|
||||
#' The \code{name} field can be one of the following:
|
||||
#'
|
||||
#' \itemize{
|
||||
#' \item \code{label}: label Xgboost learn from ;
|
||||
#' \item \code{label}: label XGBoost learn from ;
|
||||
#' \item \code{weight}: to do a weight rescale ;
|
||||
#' \item \code{base_margin}: base margin is the base prediction Xgboost will boost from ;
|
||||
#' \item \code{base_margin}: base margin is the base prediction XGBoost will boost from ;
|
||||
#' \item \code{nrow}: number of rows of the \code{xgb.DMatrix}.
|
||||
#'
|
||||
#' }
|
||||
@@ -171,8 +173,7 @@ dimnames.xgb.DMatrix <- function(x) {
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' train <- agaricus.train
|
||||
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
|
||||
#' dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label))
|
||||
#'
|
||||
#' labels <- getinfo(dtrain, 'label')
|
||||
#' setinfo(dtrain, 'label', 1-labels)
|
||||
@@ -216,16 +217,15 @@ getinfo.xgb.DMatrix <- function(object, name, ...) {
|
||||
#' The \code{name} field can be one of the following:
|
||||
#'
|
||||
#' \itemize{
|
||||
#' \item \code{label}: label Xgboost learn from ;
|
||||
#' \item \code{label}: label XGBoost learn from ;
|
||||
#' \item \code{weight}: to do a weight rescale ;
|
||||
#' \item \code{base_margin}: base margin is the base prediction Xgboost will boost from ;
|
||||
#' \item \code{base_margin}: base margin is the base prediction XGBoost will boost from ;
|
||||
#' \item \code{group}: number of rows in each group (to use with \code{rank:pairwise} objective).
|
||||
#' }
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' train <- agaricus.train
|
||||
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
|
||||
#' dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label))
|
||||
#'
|
||||
#' labels <- getinfo(dtrain, 'label')
|
||||
#' setinfo(dtrain, 'label', 1-labels)
|
||||
@@ -257,8 +257,6 @@ setinfo.xgb.DMatrix <- function(object, name, info, ...) {
|
||||
return(TRUE)
|
||||
}
|
||||
if (name == "weight") {
|
||||
if (length(info) != nrow(object))
|
||||
stop("The length of weights must equal to the number of rows in the input data")
|
||||
.Call(XGDMatrixSetInfo_R, object, name, as.numeric(info))
|
||||
return(TRUE)
|
||||
}
|
||||
@@ -292,8 +290,7 @@ setinfo.xgb.DMatrix <- function(object, name, info, ...) {
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' train <- agaricus.train
|
||||
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
|
||||
#' dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label))
|
||||
#'
|
||||
#' dsub <- slice(dtrain, 1:42)
|
||||
#' labels1 <- getinfo(dsub, 'label')
|
||||
@@ -349,8 +346,7 @@ slice.xgb.DMatrix <- function(object, idxset, ...) {
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' train <- agaricus.train
|
||||
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
|
||||
#' dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label))
|
||||
#'
|
||||
#' dtrain
|
||||
#' print(dtrain, verbose=TRUE)
|
||||
@@ -359,7 +355,7 @@ slice.xgb.DMatrix <- function(object, idxset, ...) {
|
||||
#' @export
|
||||
print.xgb.DMatrix <- function(x, verbose = FALSE, ...) {
|
||||
cat('xgb.DMatrix dim:', nrow(x), 'x', ncol(x), ' info: ')
|
||||
infos <- c()
|
||||
infos <- character(0)
|
||||
if (length(getinfo(x, 'label')) > 0) infos <- 'label'
|
||||
if (length(getinfo(x, 'weight')) > 0) infos <- c(infos, 'weight')
|
||||
if (length(getinfo(x, 'base_margin')) > 0) infos <- c(infos, 'base_margin')
|
||||
|
||||
@@ -7,8 +7,7 @@
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' train <- agaricus.train
|
||||
#' dtrain <- xgb.DMatrix(train$data, label=train$label)
|
||||
#' dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label))
|
||||
#' xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data')
|
||||
#' dtrain <- xgb.DMatrix('xgb.DMatrix.data')
|
||||
#' if (file.exists('xgb.DMatrix.data')) file.remove('xgb.DMatrix.data')
|
||||
@@ -19,6 +18,7 @@ xgb.DMatrix.save <- function(dmatrix, fname) {
|
||||
if (!inherits(dmatrix, "xgb.DMatrix"))
|
||||
stop("dmatrix must be xgb.DMatrix")
|
||||
|
||||
fname <- path.expand(fname)
|
||||
.Call(XGDMatrixSaveBinary_R, dmatrix, fname[1], 0L)
|
||||
return(TRUE)
|
||||
}
|
||||
|
||||
38
R-package/R/xgb.config.R
Normal file
38
R-package/R/xgb.config.R
Normal file
@@ -0,0 +1,38 @@
|
||||
#' Global configuration consists of a collection of parameters that can be applied in the global
|
||||
#' scope. See \url{https://xgboost.readthedocs.io/en/stable/parameter.html} for the full list of
|
||||
#' parameters supported in the global configuration. Use \code{xgb.set.config} to update the
|
||||
#' values of one or more global-scope parameters. Use \code{xgb.get.config} to fetch the current
|
||||
#' values of all global-scope parameters (listed in
|
||||
#' \url{https://xgboost.readthedocs.io/en/stable/parameter.html}).
|
||||
#'
|
||||
#' @rdname xgbConfig
|
||||
#' @title Set and get global configuration
|
||||
#' @name xgb.set.config, xgb.get.config
|
||||
#' @export xgb.set.config xgb.get.config
|
||||
#' @param ... List of parameters to be set, as keyword arguments
|
||||
#' @return
|
||||
#' \code{xgb.set.config} returns \code{TRUE} to signal success. \code{xgb.get.config} returns
|
||||
#' a list containing all global-scope parameters and their values.
|
||||
#'
|
||||
#' @examples
|
||||
#' # Set verbosity level to silent (0)
|
||||
#' xgb.set.config(verbosity = 0)
|
||||
#' # Now global verbosity level is 0
|
||||
#' config <- xgb.get.config()
|
||||
#' print(config$verbosity)
|
||||
#' # Set verbosity level to warning (1)
|
||||
#' xgb.set.config(verbosity = 1)
|
||||
#' # Now global verbosity level is 1
|
||||
#' config <- xgb.get.config()
|
||||
#' print(config$verbosity)
|
||||
xgb.set.config <- function(...) {
|
||||
new_config <- list(...)
|
||||
.Call(XGBSetGlobalConfig_R, jsonlite::toJSON(new_config, auto_unbox = TRUE))
|
||||
return(TRUE)
|
||||
}
|
||||
|
||||
#' @rdname xgbConfig
|
||||
xgb.get.config <- function() {
|
||||
config <- .Call(XGBGetGlobalConfig_R)
|
||||
return(jsonlite::fromJSON(config))
|
||||
}
|
||||
@@ -48,8 +48,8 @@
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' data(agaricus.test, package='xgboost')
|
||||
#' dtrain <- xgb.DMatrix(data = agaricus.train$data, label = agaricus.train$label)
|
||||
#' dtest <- xgb.DMatrix(data = agaricus.test$data, label = agaricus.test$label)
|
||||
#' dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label))
|
||||
#' dtest <- with(agaricus.test, xgb.DMatrix(data, label = label))
|
||||
#'
|
||||
#' param <- list(max_depth=2, eta=1, silent=1, objective='binary:logistic')
|
||||
#' nrounds = 4
|
||||
@@ -83,5 +83,5 @@ xgb.create.features <- function(model, data, ...){
|
||||
check.deprecation(...)
|
||||
pred_with_leaf <- predict(model, data, predleaf = TRUE)
|
||||
cols <- lapply(as.data.frame(pred_with_leaf), factor)
|
||||
cbind(data, sparse.model.matrix( ~ . -1, cols))
|
||||
cbind(data, sparse.model.matrix(~ . -1, cols)) # nolint
|
||||
}
|
||||
|
||||
@@ -2,12 +2,15 @@
|
||||
#'
|
||||
#' The cross validation function of xgboost
|
||||
#'
|
||||
#' @param params the list of parameters. Commonly used ones are:
|
||||
#' @param params the list of parameters. The complete list of parameters is
|
||||
#' available in the \href{http://xgboost.readthedocs.io/en/latest/parameter.html}{online documentation}. Below
|
||||
#' is a shorter summary:
|
||||
#' \itemize{
|
||||
#' \item \code{objective} objective function, common ones are
|
||||
#' \itemize{
|
||||
#' \item \code{reg:squarederror} Regression with squared loss
|
||||
#' \item \code{binary:logistic} logistic regression for classification
|
||||
#' \item \code{reg:squarederror} Regression with squared loss.
|
||||
#' \item \code{binary:logistic} logistic regression for classification.
|
||||
#' \item See \code{\link[=xgb.train]{xgb.train}()} for complete list of objectives.
|
||||
#' }
|
||||
#' \item \code{eta} step size of each boosting step
|
||||
#' \item \code{max_depth} maximum depth of the tree
|
||||
@@ -33,6 +36,8 @@
|
||||
#' \item \code{error} binary classification error rate
|
||||
#' \item \code{rmse} Rooted mean square error
|
||||
#' \item \code{logloss} negative log-likelihood function
|
||||
#' \item \code{mae} Mean absolute error
|
||||
#' \item \code{mape} Mean absolute percentage error
|
||||
#' \item \code{auc} Area under curve
|
||||
#' \item \code{aucpr} Area under PR curve
|
||||
#' \item \code{merror} Exact matching error, used to evaluate multi-class classification
|
||||
@@ -76,7 +81,7 @@
|
||||
#'
|
||||
#' All observations are used for both training and validation.
|
||||
#'
|
||||
#' Adapted from \url{http://en.wikipedia.org/wiki/Cross-validation_\%28statistics\%29#k-fold_cross-validation}
|
||||
#' Adapted from \url{https://en.wikipedia.org/wiki/Cross-validation_\%28statistics\%29}
|
||||
#'
|
||||
#' @return
|
||||
#' An object of class \code{xgb.cv.synchronous} with the following elements:
|
||||
@@ -96,9 +101,7 @@
|
||||
#' parameter or randomly generated.
|
||||
#' \item \code{best_iteration} iteration number with the best evaluation metric value
|
||||
#' (only available with early stopping).
|
||||
#' \item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration,
|
||||
#' which could further be used in \code{predict} method
|
||||
#' (only available with early stopping).
|
||||
#' \item \code{best_ntreelimit} and the \code{ntreelimit} Deprecated attributes, use \code{best_iteration} instead.
|
||||
#' \item \code{pred} CV prediction values available when \code{prediction} is set.
|
||||
#' It is either vector or matrix (see \code{\link{cb.cv.predict}}).
|
||||
#' \item \code{models} a list of the CV folds' models. It is only available with the explicit
|
||||
@@ -107,7 +110,7 @@
|
||||
#'
|
||||
#' @examples
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
#' dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label))
|
||||
#' cv <- xgb.cv(data = dtrain, nrounds = 3, nthread = 2, nfold = 5, metrics = list("rmse","auc"),
|
||||
#' max_depth = 3, eta = 1, objective = "binary:logistic")
|
||||
#' print(cv)
|
||||
@@ -140,9 +143,9 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
|
||||
} else if (inherits(data, 'xgb.DMatrix')) {
|
||||
if (!is.null(label))
|
||||
warning("xgb.cv: label will be ignored, since data is of type xgb.DMatrix")
|
||||
cv_label = getinfo(data, 'label')
|
||||
cv_label <- getinfo(data, 'label')
|
||||
} else {
|
||||
cv_label = label
|
||||
cv_label <- label
|
||||
}
|
||||
|
||||
# CV folds
|
||||
@@ -205,8 +208,8 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
|
||||
basket <- list()
|
||||
|
||||
# extract parameters that can affect the relationship b/w #trees and #iterations
|
||||
num_class <- max(as.numeric(NVL(params[['num_class']], 1)), 1)
|
||||
num_parallel_tree <- max(as.numeric(NVL(params[['num_parallel_tree']], 1)), 1)
|
||||
num_class <- max(as.numeric(NVL(params[['num_class']], 1)), 1) # nolint
|
||||
num_parallel_tree <- max(as.numeric(NVL(params[['num_parallel_tree']], 1)), 1) # nolint
|
||||
|
||||
# those are fixed for CV (no training continuation)
|
||||
begin_iteration <- 1
|
||||
@@ -223,7 +226,7 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
|
||||
})
|
||||
msg <- simplify2array(msg)
|
||||
bst_evaluation <- rowMeans(msg)
|
||||
bst_evaluation_err <- sqrt(rowMeans(msg^2) - bst_evaluation^2)
|
||||
bst_evaluation_err <- sqrt(rowMeans(msg^2) - bst_evaluation^2) # nolint
|
||||
|
||||
for (f in cb$post_iter) f()
|
||||
|
||||
|
||||
@@ -56,16 +56,17 @@ xgb.dump <- function(model, fname = NULL, fmap = "", with_stats=FALSE,
|
||||
as.character(dump_format))
|
||||
|
||||
if (is.null(fname))
|
||||
model_dump <- stri_replace_all_regex(model_dump, '\t', '')
|
||||
model_dump <- gsub('\t', '', model_dump, fixed = TRUE)
|
||||
|
||||
if (dump_format == "text")
|
||||
model_dump <- unlist(stri_split_regex(model_dump, '\n'))
|
||||
model_dump <- unlist(strsplit(model_dump, '\n', fixed = TRUE))
|
||||
|
||||
model_dump <- grep('^\\s*$', model_dump, invert = TRUE, value = TRUE)
|
||||
|
||||
if (is.null(fname)) {
|
||||
return(model_dump)
|
||||
} else {
|
||||
fname <- path.expand(fname)
|
||||
writeLines(model_dump, fname[1])
|
||||
return(TRUE)
|
||||
}
|
||||
|
||||
@@ -99,13 +99,91 @@ xgb.ggplot.deepness <- function(model = NULL, which = c("2x1", "max.depth", "med
|
||||
}
|
||||
}
|
||||
|
||||
#' @rdname xgb.plot.shap.summary
|
||||
#' @export
|
||||
xgb.ggplot.shap.summary <- function(data, shap_contrib = NULL, features = NULL, top_n = 10, model = NULL,
|
||||
trees = NULL, target_class = NULL, approxcontrib = FALSE, subsample = NULL) {
|
||||
data_list <- xgb.shap.data(
|
||||
data = data,
|
||||
shap_contrib = shap_contrib,
|
||||
features = features,
|
||||
top_n = top_n,
|
||||
model = model,
|
||||
trees = trees,
|
||||
target_class = target_class,
|
||||
approxcontrib = approxcontrib,
|
||||
subsample = subsample,
|
||||
max_observations = 10000 # 10,000 samples per feature.
|
||||
)
|
||||
p_data <- prepare.ggplot.shap.data(data_list, normalize = TRUE)
|
||||
# Reverse factor levels so that the first level is at the top of the plot
|
||||
p_data[, "feature" := factor(feature, rev(levels(feature)))]
|
||||
p <- ggplot2::ggplot(p_data, ggplot2::aes(x = feature, y = p_data$shap_value, colour = p_data$feature_value)) +
|
||||
ggplot2::geom_jitter(alpha = 0.5, width = 0.1) +
|
||||
ggplot2::scale_colour_viridis_c(limits = c(-3, 3), option = "plasma", direction = -1) +
|
||||
ggplot2::geom_abline(slope = 0, intercept = 0, colour = "darkgrey") +
|
||||
ggplot2::coord_flip()
|
||||
|
||||
p
|
||||
}
|
||||
|
||||
#' Combine and melt feature values and SHAP contributions for sample
|
||||
#' observations.
|
||||
#'
|
||||
#' Conforms to data format required for ggplot functions.
|
||||
#'
|
||||
#' Internal utility function.
|
||||
#'
|
||||
#' @param data_list List containing 'data' and 'shap_contrib' returned by
|
||||
#' \code{xgb.shap.data()}.
|
||||
#' @param normalize Whether to standardize feature values to have mean 0 and
|
||||
#' standard deviation 1 (useful for comparing multiple features on the same
|
||||
#' plot). Default \code{FALSE}.
|
||||
#'
|
||||
#' @return A data.table containing the observation ID, the feature name, the
|
||||
#' feature value (normalized if specified), and the SHAP contribution value.
|
||||
prepare.ggplot.shap.data <- function(data_list, normalize = FALSE) {
|
||||
data <- data_list[["data"]]
|
||||
shap_contrib <- data_list[["shap_contrib"]]
|
||||
|
||||
data <- data.table::as.data.table(as.matrix(data))
|
||||
if (normalize) {
|
||||
data[, (names(data)) := lapply(.SD, normalize)]
|
||||
}
|
||||
data[, "id" := seq_len(nrow(data))]
|
||||
data_m <- data.table::melt.data.table(data, id.vars = "id", variable.name = "feature", value.name = "feature_value")
|
||||
|
||||
shap_contrib <- data.table::as.data.table(as.matrix(shap_contrib))
|
||||
shap_contrib[, "id" := seq_len(nrow(shap_contrib))]
|
||||
shap_contrib_m <- data.table::melt.data.table(shap_contrib, id.vars = "id", variable.name = "feature", value.name = "shap_value")
|
||||
|
||||
p_data <- data.table::merge.data.table(data_m, shap_contrib_m, by = c("id", "feature"))
|
||||
|
||||
p_data
|
||||
}
|
||||
|
||||
#' Scale feature value to have mean 0, standard deviation 1
|
||||
#'
|
||||
#' This is used to compare multiple features on the same plot.
|
||||
#' Internal utility function
|
||||
#'
|
||||
#' @param x Numeric vector
|
||||
#'
|
||||
#' @return Numeric vector with mean 0 and sd 1.
|
||||
normalize <- function(x) {
|
||||
loc <- mean(x, na.rm = TRUE)
|
||||
scale <- stats::sd(x, na.rm = TRUE)
|
||||
|
||||
(x - loc) / scale
|
||||
}
|
||||
|
||||
# Plot multiple ggplot graph aligned by rows and columns.
|
||||
# ... the plots
|
||||
# cols number of columns
|
||||
# internal utility function
|
||||
multiplot <- function(..., cols = 1) {
|
||||
plots <- list(...)
|
||||
num_plots = length(plots)
|
||||
num_plots <- length(plots)
|
||||
|
||||
layout <- matrix(seq(1, cols * ceiling(num_plots / cols)),
|
||||
ncol = cols, nrow = ceiling(num_plots / cols))
|
||||
@@ -131,5 +209,5 @@ multiplot <- function(..., cols = 1) {
|
||||
|
||||
globalVariables(c(
|
||||
"Cluster", "ggplot", "aes", "geom_bar", "coord_flip", "xlab", "ylab", "ggtitle", "theme",
|
||||
"element_blank", "element_text", "V1", "Weight"
|
||||
"element_blank", "element_text", "V1", "Weight", "feature"
|
||||
))
|
||||
|
||||
@@ -96,41 +96,44 @@ xgb.importance <- function(feature_names = NULL, model = NULL, trees = NULL,
|
||||
if (!(is.null(feature_names) || is.character(feature_names)))
|
||||
stop("feature_names: Has to be a character vector")
|
||||
|
||||
model_text_dump <- xgb.dump(model = model, with_stats = TRUE)
|
||||
|
||||
# linear model
|
||||
if(model_text_dump[2] == "bias:"){
|
||||
weights <- which(model_text_dump == "weight:") %>%
|
||||
{model_text_dump[(. + 1):length(model_text_dump)]} %>%
|
||||
as.numeric
|
||||
|
||||
num_class <- NVL(model$params$num_class, 1)
|
||||
if(is.null(feature_names))
|
||||
feature_names <- seq(to = length(weights) / num_class) - 1
|
||||
if (length(feature_names) * num_class != length(weights))
|
||||
stop("feature_names length does not match the number of features used in the model")
|
||||
|
||||
result <- if (num_class == 1) {
|
||||
data.table(Feature = feature_names, Weight = weights)[order(-abs(Weight))]
|
||||
model <- xgb.Booster.complete(model)
|
||||
config <- jsonlite::fromJSON(xgb.config(model))
|
||||
if (config$learner$gradient_booster$name == "gblinear") {
|
||||
args <- list(importance_type = "weight", feature_names = feature_names)
|
||||
results <- .Call(
|
||||
XGBoosterFeatureScore_R, model$handle, jsonlite::toJSON(args, auto_unbox = TRUE, null = "null")
|
||||
)
|
||||
names(results) <- c("features", "shape", "weight")
|
||||
n_classes <- if (length(results$shape) == 2) { results$shape[2] } else { 0 }
|
||||
importance <- if (n_classes == 0) {
|
||||
data.table(Feature = results$features, Weight = results$weight)[order(-abs(Weight))]
|
||||
} else {
|
||||
data.table(Feature = rep(feature_names, each = num_class),
|
||||
Weight = weights,
|
||||
Class = seq_len(num_class) - 1)[order(Class, -abs(Weight))]
|
||||
data.table(
|
||||
Feature = rep(results$features, each = n_classes), Weight = results$weight, Class = seq_len(n_classes) - 1
|
||||
)[order(Class, -abs(Weight))]
|
||||
}
|
||||
} else {
|
||||
# tree model
|
||||
result <- xgb.model.dt.tree(feature_names = feature_names,
|
||||
text = model_text_dump,
|
||||
trees = trees)[
|
||||
Feature != "Leaf", .(Gain = sum(Quality),
|
||||
Cover = sum(Cover),
|
||||
Frequency = .N), by = Feature][
|
||||
,`:=`(Gain = Gain / sum(Gain),
|
||||
Cover = Cover / sum(Cover),
|
||||
Frequency = Frequency / sum(Frequency))][
|
||||
order(Gain, decreasing = TRUE)]
|
||||
concatenated <- list()
|
||||
output_names <- vector()
|
||||
for (importance_type in c("weight", "total_gain", "total_cover")) {
|
||||
args <- list(importance_type = importance_type, feature_names = feature_names, tree_idx = trees)
|
||||
results <- .Call(
|
||||
XGBoosterFeatureScore_R, model$handle, jsonlite::toJSON(args, auto_unbox = TRUE, null = "null")
|
||||
)
|
||||
names(results) <- c("features", "shape", importance_type)
|
||||
concatenated[
|
||||
switch(importance_type, "weight" = "Frequency", "total_gain" = "Gain", "total_cover" = "Cover")
|
||||
] <- results[importance_type]
|
||||
output_names <- results$features
|
||||
}
|
||||
result
|
||||
importance <- data.table(
|
||||
Feature = output_names,
|
||||
Gain = concatenated$Gain / sum(concatenated$Gain),
|
||||
Cover = concatenated$Cover / sum(concatenated$Cover),
|
||||
Frequency = concatenated$Frequency / sum(concatenated$Frequency)
|
||||
)[order(Gain, decreasing = TRUE)]
|
||||
}
|
||||
importance
|
||||
}
|
||||
|
||||
# Avoid error messages during CRAN check.
|
||||
|
||||
@@ -87,11 +87,11 @@ xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,
|
||||
}
|
||||
|
||||
if (length(text) < 2 ||
|
||||
sum(stri_detect_regex(text, 'yes=(\\d+),no=(\\d+)')) < 1) {
|
||||
sum(grepl('yes=(\\d+),no=(\\d+)', text)) < 1) {
|
||||
stop("Non-tree model detected! This function can only be used with tree models.")
|
||||
}
|
||||
|
||||
position <- which(!is.na(stri_match_first_regex(text, "booster")))
|
||||
position <- which(grepl("booster", text, fixed = TRUE))
|
||||
|
||||
add.tree.id <- function(node, tree) if (use_int_id) node else paste(tree, node, sep = "-")
|
||||
|
||||
@@ -108,9 +108,9 @@ xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,
|
||||
}
|
||||
td <- td[Tree %in% trees & !grepl('^booster', t)]
|
||||
|
||||
td[, Node := stri_match_first_regex(t, "(\\d+):")[,2] %>% as.integer ]
|
||||
td[, Node := as.integer(sub("^([0-9]+):.*", "\\1", t))]
|
||||
if (!use_int_id) td[, ID := add.tree.id(Node, Tree)]
|
||||
td[, isLeaf := !is.na(stri_match_first_regex(t, "leaf"))]
|
||||
td[, isLeaf := grepl("leaf", t, fixed = TRUE)]
|
||||
|
||||
# parse branch lines
|
||||
branch_rx <- paste0("f(\\d+)<(", anynumber_regex, ")\\] yes=(\\d+),no=(\\d+),missing=(\\d+),",
|
||||
@@ -118,10 +118,11 @@ xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,
|
||||
branch_cols <- c("Feature", "Split", "Yes", "No", "Missing", "Quality", "Cover")
|
||||
td[isLeaf == FALSE,
|
||||
(branch_cols) := {
|
||||
matches <- regmatches(t, regexec(branch_rx, t))
|
||||
# skip some indices with spurious capture groups from anynumber_regex
|
||||
xtr <- stri_match_first_regex(t, branch_rx)[, c(2,3,5,6,7,8,10), drop = FALSE]
|
||||
xtr <- do.call(rbind, matches)[, c(2, 3, 5, 6, 7, 8, 10), drop = FALSE]
|
||||
xtr[, 3:5] <- add.tree.id(xtr[, 3:5], Tree)
|
||||
lapply(seq_len(ncol(xtr)), function(i) xtr[,i])
|
||||
as.data.table(xtr)
|
||||
}]
|
||||
# assign feature_names when available
|
||||
if (!is.null(feature_names)) {
|
||||
@@ -135,8 +136,9 @@ xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,
|
||||
leaf_cols <- c("Feature", "Quality", "Cover")
|
||||
td[isLeaf == TRUE,
|
||||
(leaf_cols) := {
|
||||
xtr <- stri_match_first_regex(t, leaf_rx)[, c(2,4)]
|
||||
c("Leaf", lapply(seq_len(ncol(xtr)), function(i) xtr[,i]))
|
||||
matches <- regmatches(t, regexec(leaf_rx, t))
|
||||
xtr <- do.call(rbind, matches)[, c(2, 4)]
|
||||
c("Leaf", as.data.table(xtr))
|
||||
}]
|
||||
|
||||
# convert some columns to numeric
|
||||
|
||||
@@ -99,21 +99,20 @@ xgb.plot.importance <- function(importance_matrix = NULL, top_n = NULL, measure
|
||||
}
|
||||
|
||||
if (plot) {
|
||||
op <- par(no.readonly = TRUE)
|
||||
mar <- op$mar
|
||||
original_mar <- par()$mar
|
||||
|
||||
# reset margins so this function doesn't have side effects
|
||||
on.exit({par(mar = original_mar)})
|
||||
|
||||
mar <- original_mar
|
||||
if (!is.null(left_margin))
|
||||
mar[2] <- left_margin
|
||||
par(mar = mar)
|
||||
|
||||
# reverse the order of rows to have the highest ranked at the top
|
||||
importance_matrix[nrow(importance_matrix):1,
|
||||
importance_matrix[rev(seq_len(nrow(importance_matrix))),
|
||||
barplot(Importance, horiz = TRUE, border = NA, cex.names = cex,
|
||||
names.arg = Feature, las = 1, ...)]
|
||||
grid(NULL, NA)
|
||||
# redraw over the grid
|
||||
importance_matrix[nrow(importance_matrix):1,
|
||||
barplot(Importance, horiz = TRUE, border = NA, add = TRUE)]
|
||||
par(op)
|
||||
}
|
||||
|
||||
invisible(importance_matrix)
|
||||
|
||||
@@ -67,7 +67,7 @@ xgb.plot.multi.trees <- function(model, feature_names = NULL, features_keep = 5,
|
||||
|
||||
# first number of the path represents the tree, then the following numbers are related to the path to follow
|
||||
# root init
|
||||
root.nodes <- tree.matrix[stri_detect_regex(ID, "\\d+-0"), ID]
|
||||
root.nodes <- tree.matrix[Node == 0, ID]
|
||||
tree.matrix[ID %in% root.nodes, abs.node.position := root.nodes]
|
||||
|
||||
precedent.nodes <- root.nodes
|
||||
@@ -75,8 +75,8 @@ xgb.plot.multi.trees <- function(model, feature_names = NULL, features_keep = 5,
|
||||
while (tree.matrix[, sum(is.na(abs.node.position))] > 0) {
|
||||
yes.row.nodes <- tree.matrix[abs.node.position %in% precedent.nodes & !is.na(Yes)]
|
||||
no.row.nodes <- tree.matrix[abs.node.position %in% precedent.nodes & !is.na(No)]
|
||||
yes.nodes.abs.pos <- yes.row.nodes[, abs.node.position] %>% paste0("_0")
|
||||
no.nodes.abs.pos <- no.row.nodes[, abs.node.position] %>% paste0("_1")
|
||||
yes.nodes.abs.pos <- paste0(yes.row.nodes[, abs.node.position], "_0")
|
||||
no.nodes.abs.pos <- paste0(no.row.nodes[, abs.node.position], "_1")
|
||||
|
||||
tree.matrix[ID %in% yes.row.nodes[, Yes], abs.node.position := yes.nodes.abs.pos]
|
||||
tree.matrix[ID %in% no.row.nodes[, No], abs.node.position := no.nodes.abs.pos]
|
||||
@@ -86,28 +86,34 @@ xgb.plot.multi.trees <- function(model, feature_names = NULL, features_keep = 5,
|
||||
tree.matrix[!is.na(Yes), Yes := paste0(abs.node.position, "_0")]
|
||||
tree.matrix[!is.na(No), No := paste0(abs.node.position, "_1")]
|
||||
|
||||
remove.tree <- . %>% stri_replace_first_regex(pattern = "^\\d+-", replacement = "")
|
||||
|
||||
tree.matrix[,`:=`(abs.node.position = remove.tree(abs.node.position),
|
||||
Yes = remove.tree(Yes),
|
||||
No = remove.tree(No))]
|
||||
for (nm in c("abs.node.position", "Yes", "No"))
|
||||
data.table::set(tree.matrix, j = nm, value = sub("^\\d+-", "", tree.matrix[[nm]]))
|
||||
|
||||
nodes.dt <- tree.matrix[
|
||||
, .(Quality = sum(Quality))
|
||||
, by = .(abs.node.position, Feature)
|
||||
][, .(Text = paste0(Feature[1:min(length(Feature), features_keep)],
|
||||
][, .(Text = paste0(
|
||||
paste0(
|
||||
Feature[1:min(length(Feature), features_keep)],
|
||||
" (",
|
||||
format(Quality[1:min(length(Quality), features_keep)], digits = 5),
|
||||
")") %>%
|
||||
paste0(collapse = "\n"))
|
||||
, by = abs.node.position]
|
||||
")"
|
||||
),
|
||||
collapse = "\n"
|
||||
)
|
||||
)
|
||||
, by = abs.node.position
|
||||
]
|
||||
|
||||
edges.dt <- tree.matrix[Feature != "Leaf", .(abs.node.position, Yes)] %>%
|
||||
list(tree.matrix[Feature != "Leaf",.(abs.node.position, No)]) %>%
|
||||
rbindlist() %>%
|
||||
setnames(c("From", "To")) %>%
|
||||
.[, .N, .(From, To)] %>%
|
||||
.[, N:=NULL]
|
||||
edges.dt <- data.table::rbindlist(
|
||||
l = list(
|
||||
tree.matrix[Feature != "Leaf", .(abs.node.position, Yes)],
|
||||
tree.matrix[Feature != "Leaf", .(abs.node.position, No)]
|
||||
)
|
||||
)
|
||||
data.table::setnames(edges.dt, c("From", "To"))
|
||||
edges.dt <- edges.dt[, .N, .(From, To)]
|
||||
edges.dt[, N := NULL]
|
||||
|
||||
nodes <- DiagrammeR::create_node_df(
|
||||
n = nrow(nodes.dt),
|
||||
@@ -123,21 +129,25 @@ xgb.plot.multi.trees <- function(model, feature_names = NULL, features_keep = 5,
|
||||
nodes_df = nodes,
|
||||
edges_df = edges,
|
||||
attr_theme = NULL
|
||||
) %>%
|
||||
DiagrammeR::add_global_graph_attrs(
|
||||
)
|
||||
graph <- DiagrammeR::add_global_graph_attrs(
|
||||
graph = graph,
|
||||
attr_type = "graph",
|
||||
attr = c("layout", "rankdir"),
|
||||
value = c("dot", "LR")
|
||||
) %>%
|
||||
DiagrammeR::add_global_graph_attrs(
|
||||
)
|
||||
graph <- DiagrammeR::add_global_graph_attrs(
|
||||
graph = graph,
|
||||
attr_type = "node",
|
||||
attr = c("color", "fillcolor", "style", "shape", "fontname"),
|
||||
value = c("DimGray", "beige", "filled", "rectangle", "Helvetica")
|
||||
) %>%
|
||||
DiagrammeR::add_global_graph_attrs(
|
||||
)
|
||||
graph <- DiagrammeR::add_global_graph_attrs(
|
||||
graph = graph,
|
||||
attr_type = "edge",
|
||||
attr = c("color", "arrowsize", "arrowhead", "fontname"),
|
||||
value = c("DimGray", "1.5", "vee", "Helvetica"))
|
||||
value = c("DimGray", "1.5", "vee", "Helvetica")
|
||||
)
|
||||
|
||||
if (!render) return(invisible(graph))
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
#' @param col_loess a color to use for the loess curves.
|
||||
#' @param span_loess the \code{span} parameter in \code{\link[stats]{loess}}'s call.
|
||||
#' @param which whether to do univariate or bivariate plotting. NOTE: only 1D is implemented so far.
|
||||
#' @param plot whether a plot should be drawn. If FALSE, only a lits of matrices is returned.
|
||||
#' @param plot whether a plot should be drawn. If FALSE, only a list of matrices is returned.
|
||||
#' @param ... other parameters passed to \code{plot}.
|
||||
#'
|
||||
#' @details
|
||||
@@ -81,6 +81,7 @@
|
||||
#' xgb.plot.shap(agaricus.test$data, model = bst, features = "odor=none")
|
||||
#' contr <- predict(bst, agaricus.test$data, predcontrib = TRUE)
|
||||
#' xgb.plot.shap(agaricus.test$data, contr, model = bst, top_n = 12, n_col = 3)
|
||||
#' xgb.ggplot.shap.summary(agaricus.test$data, contr, model = bst, top_n = 12) # Summary plot
|
||||
#'
|
||||
#' # multiclass example - plots for each class separately:
|
||||
#' nclass <- 3
|
||||
@@ -99,6 +100,7 @@
|
||||
#' n_col = 2, col = col, pch = 16, pch_NA = 17)
|
||||
#' xgb.plot.shap(x, model = mbst, trees = trees0 + 2, target_class = 2, top_n = 4,
|
||||
#' n_col = 2, col = col, pch = 16, pch_NA = 17)
|
||||
#' xgb.ggplot.shap.summary(x, model = mbst, target_class = 0, top_n = 4) # Summary plot
|
||||
#'
|
||||
#' @rdname xgb.plot.shap
|
||||
#' @export
|
||||
@@ -109,69 +111,33 @@ xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
|
||||
plot_NA = TRUE, col_NA = rgb(0.7, 0, 1, 0.6), pch_NA = '.', pos_NA = 1.07,
|
||||
plot_loess = TRUE, col_loess = 2, span_loess = 0.5,
|
||||
which = c("1d", "2d"), plot = TRUE, ...) {
|
||||
|
||||
if (!is.matrix(data) && !inherits(data, "dgCMatrix"))
|
||||
stop("data: must be either matrix or dgCMatrix")
|
||||
|
||||
if (is.null(shap_contrib) && (is.null(model) || !inherits(model, "xgb.Booster")))
|
||||
stop("when shap_contrib is not provided, one must provide an xgb.Booster model")
|
||||
|
||||
if (is.null(features) && (is.null(model) || !inherits(model, "xgb.Booster")))
|
||||
stop("when features are not provided, one must provide an xgb.Booster model to rank the features")
|
||||
|
||||
if (!is.null(shap_contrib) &&
|
||||
(!is.matrix(shap_contrib) || nrow(shap_contrib) != nrow(data) || ncol(shap_contrib) != ncol(data) + 1))
|
||||
stop("shap_contrib is not compatible with the provided data")
|
||||
|
||||
nsample <- if (is.null(subsample)) min(100000, nrow(data)) else as.integer(subsample * nrow(data))
|
||||
idx <- sample(1:nrow(data), nsample)
|
||||
data <- data[idx,]
|
||||
|
||||
if (is.null(shap_contrib)) {
|
||||
shap_contrib <- predict(model, data, predcontrib = TRUE, approxcontrib = approxcontrib)
|
||||
} else {
|
||||
shap_contrib <- shap_contrib[idx,]
|
||||
}
|
||||
data_list <- xgb.shap.data(
|
||||
data = data,
|
||||
shap_contrib = shap_contrib,
|
||||
features = features,
|
||||
top_n = top_n,
|
||||
model = model,
|
||||
trees = trees,
|
||||
target_class = target_class,
|
||||
approxcontrib = approxcontrib,
|
||||
subsample = subsample,
|
||||
max_observations = 100000
|
||||
)
|
||||
data <- data_list[["data"]]
|
||||
shap_contrib <- data_list[["shap_contrib"]]
|
||||
features <- colnames(data)
|
||||
|
||||
which <- match.arg(which)
|
||||
if (which == "2d")
|
||||
stop("2D plots are not implemented yet")
|
||||
|
||||
if (is.null(features)) {
|
||||
imp <- xgb.importance(model = model, trees = trees)
|
||||
top_n <- as.integer(top_n[1])
|
||||
if (top_n < 1 && top_n > 100)
|
||||
stop("top_n: must be an integer within [1, 100]")
|
||||
features <- imp$Feature[1:min(top_n, NROW(imp))]
|
||||
}
|
||||
|
||||
if (is.character(features)) {
|
||||
if (is.null(colnames(data)))
|
||||
stop("Either provide `data` with column names or provide `features` as column indices")
|
||||
features <- match(features, colnames(data))
|
||||
}
|
||||
|
||||
if (n_col > length(features)) n_col <- length(features)
|
||||
|
||||
if (is.list(shap_contrib)) { # multiclass: either choose a class or merge
|
||||
shap_contrib <- if (!is.null(target_class)) shap_contrib[[target_class + 1]]
|
||||
else Reduce("+", lapply(shap_contrib, abs))
|
||||
}
|
||||
|
||||
shap_contrib <- shap_contrib[, features, drop = FALSE]
|
||||
data <- data[, features, drop = FALSE]
|
||||
cols <- colnames(data)
|
||||
if (is.null(cols)) cols <- colnames(shap_contrib)
|
||||
if (is.null(cols)) cols <- paste0('X', 1:ncol(data))
|
||||
colnames(data) <- cols
|
||||
colnames(shap_contrib) <- cols
|
||||
|
||||
if (plot && which == "1d") {
|
||||
op <- par(mfrow = c(ceiling(length(features) / n_col), n_col),
|
||||
oma = c(0, 0, 0, 0) + 0.2,
|
||||
mar = c(3.5, 3.5, 0, 0) + 0.1,
|
||||
mgp = c(1.7, 0.6, 0))
|
||||
for (f in cols) {
|
||||
for (f in features) {
|
||||
ord <- order(data[, f])
|
||||
x <- data[, f][ord]
|
||||
y <- shap_contrib[, f][ord]
|
||||
@@ -191,7 +157,7 @@ xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
|
||||
plot(x2plot, y, pch = pch, xlab = f, col = col, xlim = x_lim, ylim = y_lim, ylab = ylab, ...)
|
||||
grid()
|
||||
if (plot_loess) {
|
||||
# compress x to 3 digits, and mean-aggredate y
|
||||
# compress x to 3 digits, and mean-aggregate y
|
||||
zz <- data.table(x = signif(x, 3), y)[, .(.N, y = mean(y)), x]
|
||||
if (nrow(zz) <= 5) {
|
||||
lines(zz$x, zz$y, col = col_loess)
|
||||
@@ -216,3 +182,108 @@ xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
|
||||
}
|
||||
invisible(list(data = data, shap_contrib = shap_contrib))
|
||||
}
|
||||
|
||||
#' SHAP contribution dependency summary plot
|
||||
#'
|
||||
#' Compare SHAP contributions of different features.
|
||||
#'
|
||||
#' A point plot (each point representing one sample from \code{data}) is
|
||||
#' produced for each feature, with the points plotted on the SHAP value axis.
|
||||
#' Each point (observation) is coloured based on its feature value. The plot
|
||||
#' hence allows us to see which features have a negative / positive contribution
|
||||
#' on the model prediction, and whether the contribution is different for larger
|
||||
#' or smaller values of the feature. We effectively try to replicate the
|
||||
#' \code{summary_plot} function from https://github.com/slundberg/shap.
|
||||
#'
|
||||
#' @inheritParams xgb.plot.shap
|
||||
#'
|
||||
#' @return A \code{ggplot2} object.
|
||||
#' @export
|
||||
#'
|
||||
#' @examples # See \code{\link{xgb.plot.shap}}.
|
||||
#' @seealso \code{\link{xgb.plot.shap}}, \code{\link{xgb.ggplot.shap.summary}},
|
||||
#' \url{https://github.com/slundberg/shap}
|
||||
xgb.plot.shap.summary <- function(data, shap_contrib = NULL, features = NULL, top_n = 10, model = NULL,
|
||||
trees = NULL, target_class = NULL, approxcontrib = FALSE, subsample = NULL) {
|
||||
# Only ggplot implementation is available.
|
||||
xgb.ggplot.shap.summary(data, shap_contrib, features, top_n, model, trees, target_class, approxcontrib, subsample)
|
||||
}
|
||||
|
||||
#' Prepare data for SHAP plots. To be used in xgb.plot.shap, xgb.plot.shap.summary, etc.
|
||||
#' Internal utility function.
|
||||
#'
|
||||
#' @inheritParams xgb.plot.shap
|
||||
#' @keywords internal
|
||||
#'
|
||||
#' @return A list containing: 'data', a matrix containing sample observations
|
||||
#' and their feature values; 'shap_contrib', a matrix containing the SHAP contribution
|
||||
#' values for these observations.
|
||||
xgb.shap.data <- function(data, shap_contrib = NULL, features = NULL, top_n = 1, model = NULL,
|
||||
trees = NULL, target_class = NULL, approxcontrib = FALSE,
|
||||
subsample = NULL, max_observations = 100000) {
|
||||
if (!is.matrix(data) && !inherits(data, "dgCMatrix"))
|
||||
stop("data: must be either matrix or dgCMatrix")
|
||||
|
||||
if (is.null(shap_contrib) && (is.null(model) || !inherits(model, "xgb.Booster")))
|
||||
stop("when shap_contrib is not provided, one must provide an xgb.Booster model")
|
||||
|
||||
if (is.null(features) && (is.null(model) || !inherits(model, "xgb.Booster")))
|
||||
stop("when features are not provided, one must provide an xgb.Booster model to rank the features")
|
||||
|
||||
if (!is.null(shap_contrib) &&
|
||||
(!is.matrix(shap_contrib) || nrow(shap_contrib) != nrow(data) || ncol(shap_contrib) != ncol(data) + 1))
|
||||
stop("shap_contrib is not compatible with the provided data")
|
||||
|
||||
if (is.character(features) && is.null(colnames(data)))
|
||||
stop("either provide `data` with column names or provide `features` as column indices")
|
||||
|
||||
if (is.null(model$feature_names) && model$nfeatures != ncol(data))
|
||||
stop("if model has no feature_names, columns in `data` must match features in model")
|
||||
|
||||
if (!is.null(subsample)) {
|
||||
idx <- sample(x = seq_len(nrow(data)), size = as.integer(subsample * nrow(data)), replace = FALSE)
|
||||
} else {
|
||||
idx <- seq_len(min(nrow(data), max_observations))
|
||||
}
|
||||
data <- data[idx, ]
|
||||
if (is.null(colnames(data))) {
|
||||
colnames(data) <- paste0("X", seq_len(ncol(data)))
|
||||
}
|
||||
|
||||
if (!is.null(shap_contrib)) {
|
||||
if (is.list(shap_contrib)) { # multiclass: either choose a class or merge
|
||||
shap_contrib <- if (!is.null(target_class)) shap_contrib[[target_class + 1]] else Reduce("+", lapply(shap_contrib, abs))
|
||||
}
|
||||
shap_contrib <- shap_contrib[idx, ]
|
||||
if (is.null(colnames(shap_contrib))) {
|
||||
colnames(shap_contrib) <- paste0("X", seq_len(ncol(data)))
|
||||
}
|
||||
} else {
|
||||
shap_contrib <- predict(model, newdata = data, predcontrib = TRUE, approxcontrib = approxcontrib)
|
||||
if (is.list(shap_contrib)) { # multiclass: either choose a class or merge
|
||||
shap_contrib <- if (!is.null(target_class)) shap_contrib[[target_class + 1]] else Reduce("+", lapply(shap_contrib, abs))
|
||||
}
|
||||
}
|
||||
|
||||
if (is.null(features)) {
|
||||
if (!is.null(model$feature_names)) {
|
||||
imp <- xgb.importance(model = model, trees = trees)
|
||||
} else {
|
||||
imp <- xgb.importance(model = model, trees = trees, feature_names = colnames(data))
|
||||
}
|
||||
top_n <- top_n[1]
|
||||
if (top_n < 1 | top_n > 100) stop("top_n: must be an integer within [1, 100]")
|
||||
features <- imp$Feature[1:min(top_n, NROW(imp))]
|
||||
}
|
||||
if (is.character(features)) {
|
||||
features <- match(features, colnames(data))
|
||||
}
|
||||
|
||||
shap_contrib <- shap_contrib[, features, drop = FALSE]
|
||||
data <- data[, features, drop = FALSE]
|
||||
|
||||
list(
|
||||
data = data,
|
||||
shap_contrib = shap_contrib
|
||||
)
|
||||
}
|
||||
|
||||
@@ -99,33 +99,41 @@ xgb.plot.tree <- function(feature_names = NULL, model = NULL, trees = NULL, plot
|
||||
fontcolor = "black")
|
||||
|
||||
edges <- DiagrammeR::create_edge_df(
|
||||
from = match(dt[Feature != "Leaf", c(ID)] %>% rep(2), dt$ID),
|
||||
from = match(rep(dt[Feature != "Leaf", c(ID)], 2), dt$ID),
|
||||
to = match(dt[Feature != "Leaf", c(Yes, No)], dt$ID),
|
||||
label = dt[Feature != "Leaf", paste("<", Split)] %>%
|
||||
c(rep("", nrow(dt[Feature != "Leaf"]))),
|
||||
style = dt[Feature != "Leaf", ifelse(Missing == Yes, "bold", "solid")] %>%
|
||||
c(dt[Feature != "Leaf", ifelse(Missing == No, "bold", "solid")]),
|
||||
label = c(
|
||||
dt[Feature != "Leaf", paste("<", Split)],
|
||||
rep("", nrow(dt[Feature != "Leaf"]))
|
||||
),
|
||||
style = c(
|
||||
dt[Feature != "Leaf", ifelse(Missing == Yes, "bold", "solid")],
|
||||
dt[Feature != "Leaf", ifelse(Missing == No, "bold", "solid")]
|
||||
),
|
||||
rel = "leading_to")
|
||||
|
||||
graph <- DiagrammeR::create_graph(
|
||||
nodes_df = nodes,
|
||||
edges_df = edges,
|
||||
attr_theme = NULL
|
||||
) %>%
|
||||
DiagrammeR::add_global_graph_attrs(
|
||||
)
|
||||
graph <- DiagrammeR::add_global_graph_attrs(
|
||||
graph = graph,
|
||||
attr_type = "graph",
|
||||
attr = c("layout", "rankdir"),
|
||||
value = c("dot", "LR")
|
||||
) %>%
|
||||
DiagrammeR::add_global_graph_attrs(
|
||||
)
|
||||
graph <- DiagrammeR::add_global_graph_attrs(
|
||||
graph = graph,
|
||||
attr_type = "node",
|
||||
attr = c("color", "style", "fontname"),
|
||||
value = c("DimGray", "filled", "Helvetica")
|
||||
) %>%
|
||||
DiagrammeR::add_global_graph_attrs(
|
||||
)
|
||||
graph <- DiagrammeR::add_global_graph_attrs(
|
||||
graph = graph,
|
||||
attr_type = "edge",
|
||||
attr = c("color", "arrowsize", "arrowhead", "fontname"),
|
||||
value = c("DimGray", "1.5", "vee", "Helvetica"))
|
||||
value = c("DimGray", "1.5", "vee", "Helvetica")
|
||||
)
|
||||
|
||||
if (!render) return(invisible(graph))
|
||||
|
||||
|
||||
@@ -13,7 +13,11 @@
|
||||
#'
|
||||
#' Note: a model can also be saved as an R-object (e.g., by using \code{\link[base]{readRDS}}
|
||||
#' or \code{\link[base]{save}}). However, it would then only be compatible with R, and
|
||||
#' corresponding R-methods would need to be used to load it.
|
||||
#' corresponding R-methods would need to be used to load it. Moreover, persisting the model with
|
||||
#' \code{\link[base]{readRDS}} or \code{\link[base]{save}}) will cause compatibility problems in
|
||||
#' future versions of XGBoost. Consult \code{\link{a-compatibility-note-for-saveRDS-save}} to learn
|
||||
#' how to persist models in a future-proof way, i.e. to make the model accessible in future
|
||||
#' releases of XGBoost.
|
||||
#'
|
||||
#' @seealso
|
||||
#' \code{\link{xgb.load}}, \code{\link{xgb.Booster.complete}}.
|
||||
@@ -38,6 +42,7 @@ xgb.save <- function(model, fname) {
|
||||
if (inherits(model, "xgb.DMatrix")) " Use xgb.DMatrix.save to save an xgb.DMatrix object." else "")
|
||||
}
|
||||
model <- xgb.Booster.complete(model, saveraw = FALSE)
|
||||
fname <- path.expand(fname)
|
||||
.Call(XGBoosterSaveModel_R, model$handle, fname[1])
|
||||
return(TRUE)
|
||||
}
|
||||
|
||||
@@ -3,9 +3,9 @@
|
||||
#' \code{xgb.train} is an advanced interface for training an xgboost model.
|
||||
#' The \code{xgboost} function is a simpler wrapper for \code{xgb.train}.
|
||||
#'
|
||||
#' @param params the list of parameters.
|
||||
#' The complete list of parameters is available at \url{http://xgboost.readthedocs.io/en/latest/parameter.html}.
|
||||
#' Below is a shorter summary:
|
||||
#' @param params the list of parameters. The complete list of parameters is
|
||||
#' available in the \href{http://xgboost.readthedocs.io/en/latest/parameter.html}{online documentation}. Below
|
||||
#' is a shorter summary:
|
||||
#'
|
||||
#' 1. General Parameters
|
||||
#'
|
||||
@@ -15,7 +15,7 @@
|
||||
#'
|
||||
#' 2. Booster Parameters
|
||||
#'
|
||||
#' 2.1. Parameter for Tree Booster
|
||||
#' 2.1. Parameters for Tree Booster
|
||||
#'
|
||||
#' \itemize{
|
||||
#' \item \code{eta} control the learning rate: scale the contribution of each tree by a factor of \code{0 < eta < 1} when it is added to the current approximation. Used to prevent overfitting by making the boosting process more conservative. Lower value for \code{eta} implies larger value for \code{nrounds}: low \code{eta} value means model more robust to overfitting but slower to compute. Default: 0.3
|
||||
@@ -24,12 +24,14 @@
|
||||
#' \item \code{min_child_weight} minimum sum of instance weight (hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, then the building process will give up further partitioning. In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node. The larger, the more conservative the algorithm will be. Default: 1
|
||||
#' \item \code{subsample} subsample ratio of the training instance. Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees and this will prevent overfitting. It makes computation shorter (because less data to analyse). It is advised to use this parameter with \code{eta} and increase \code{nrounds}. Default: 1
|
||||
#' \item \code{colsample_bytree} subsample ratio of columns when constructing each tree. Default: 1
|
||||
#' \item \code{num_parallel_tree} Experimental parameter. number of trees to grow per round. Useful to test Random Forest through Xgboost (set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly. Default: 1
|
||||
#' \item \code{lambda} L2 regularization term on weights. Default: 1
|
||||
#' \item \code{alpha} L1 regularization term on weights. (there is no L1 reg on bias because it is not important). Default: 0
|
||||
#' \item \code{num_parallel_tree} Experimental parameter. number of trees to grow per round. Useful to test Random Forest through XGBoost (set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly. Default: 1
|
||||
#' \item \code{monotone_constraints} A numerical vector consists of \code{1}, \code{0} and \code{-1} with its length equals to the number of features in the training data. \code{1} is increasing, \code{-1} is decreasing and \code{0} is no constraint.
|
||||
#' \item \code{interaction_constraints} A list of vectors specifying feature indices of permitted interactions. Each item of the list represents one permitted interaction where specified features are allowed to interact with each other. Feature index values should start from \code{0} (\code{0} references the first column). Leave argument unspecified for no interaction constraints.
|
||||
#' }
|
||||
#'
|
||||
#' 2.2. Parameter for Linear Booster
|
||||
#' 2.2. Parameters for Linear Booster
|
||||
#'
|
||||
#' \itemize{
|
||||
#' \item \code{lambda} L2 regularization term on weights. Default: 0
|
||||
@@ -43,13 +45,23 @@
|
||||
#' \item \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it. The default objective options are below:
|
||||
#' \itemize{
|
||||
#' \item \code{reg:squarederror} Regression with squared loss (Default).
|
||||
#' \item \code{reg:squaredlogerror}: regression with squared log loss \eqn{1/2 * (log(pred + 1) - log(label + 1))^2}. All inputs are required to be greater than -1. Also, see metric rmsle for possible issue with this objective.
|
||||
#' \item \code{reg:logistic} logistic regression.
|
||||
#' \item \code{reg:pseudohubererror}: regression with Pseudo Huber loss, a twice differentiable alternative to absolute loss.
|
||||
#' \item \code{binary:logistic} logistic regression for binary classification. Output probability.
|
||||
#' \item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation.
|
||||
#' \item \code{num_class} set the number of classes. To use only with multiclass objectives.
|
||||
#' \item \code{binary:hinge}: hinge loss for binary classification. This makes predictions of 0 or 1, rather than producing probabilities.
|
||||
#' \item \code{count:poisson}: Poisson regression for count data, output mean of Poisson distribution. \code{max_delta_step} is set to 0.7 by default in poisson regression (used to safeguard optimization).
|
||||
#' \item \code{survival:cox}: Cox regression for right censored survival time data (negative values are considered right censored). Note that predictions are returned on the hazard ratio scale (i.e., as HR = exp(marginal_prediction) in the proportional hazard function \code{h(t) = h0(t) * HR)}.
|
||||
#' \item \code{survival:aft}: Accelerated failure time model for censored survival time data. See \href{https://xgboost.readthedocs.io/en/latest/tutorials/aft_survival_analysis.html}{Survival Analysis with Accelerated Failure Time} for details.
|
||||
#' \item \code{aft_loss_distribution}: Probability Density Function used by \code{survival:aft} and \code{aft-nloglik} metric.
|
||||
#' \item \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective. Class is represented by a number and should be from 0 to \code{num_class - 1}.
|
||||
#' \item \code{multi:softprob} same as softmax, but prediction outputs a vector of ndata * nclass elements, which can be further reshaped to ndata, nclass matrix. The result contains predicted probabilities of each data point belonging to each class.
|
||||
#' \item \code{rank:pairwise} set xgboost to do ranking task by minimizing the pairwise loss.
|
||||
#' \item \code{rank:ndcg}: Use LambdaMART to perform list-wise ranking where \href{https://en.wikipedia.org/wiki/Discounted_cumulative_gain}{Normalized Discounted Cumulative Gain (NDCG)} is maximized.
|
||||
#' \item \code{rank:map}: Use LambdaMART to perform list-wise ranking where \href{https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Mean_average_precision}{Mean Average Precision (MAP)} is maximized.
|
||||
#' \item \code{reg:gamma}: gamma regression with log-link. Output is a mean of gamma distribution. It might be useful, e.g., for modeling insurance claims severity, or for any outcome that might be \href{https://en.wikipedia.org/wiki/Gamma_distribution#Applications}{gamma-distributed}.
|
||||
#' \item \code{reg:tweedie}: Tweedie regression with log-link. It might be useful, e.g., for modeling total loss in insurance, or for any outcome that might be \href{https://en.wikipedia.org/wiki/Tweedie_distribution#Applications}{Tweedie-distributed}.
|
||||
#' }
|
||||
#' \item \code{base_score} the initial prediction score of all instances, global bias. Default: 0.5
|
||||
#' \item \code{eval_metric} evaluation metrics for validation data. Users can pass a self-defined function to it. Default: metric will be assigned according to objective(rmse for regression, and error for classification, mean average precision for ranking). List is provided in detail section.
|
||||
@@ -114,22 +126,24 @@
|
||||
#' Parallelization is automatically enabled if \code{OpenMP} is present.
|
||||
#' Number of threads can also be manually specified via \code{nthread} parameter.
|
||||
#'
|
||||
#' The evaluation metric is chosen automatically by Xgboost (according to the objective)
|
||||
#' The evaluation metric is chosen automatically by XGBoost (according to the objective)
|
||||
#' when the \code{eval_metric} parameter is not provided.
|
||||
#' User may set one or several \code{eval_metric} parameters.
|
||||
#' Note that when using a customized metric, only this single metric can be used.
|
||||
#' The following is the list of built-in metrics for which Xgboost provides optimized implementation:
|
||||
#' The following is the list of built-in metrics for which XGBoost provides optimized implementation:
|
||||
#' \itemize{
|
||||
#' \item \code{rmse} root mean square error. \url{http://en.wikipedia.org/wiki/Root_mean_square_error}
|
||||
#' \item \code{logloss} negative log-likelihood. \url{http://en.wikipedia.org/wiki/Log-likelihood}
|
||||
#' \item \code{mlogloss} multiclass logloss. \url{http://wiki.fast.ai/index.php/Log_Loss}
|
||||
#' \item \code{rmse} root mean square error. \url{https://en.wikipedia.org/wiki/Root_mean_square_error}
|
||||
#' \item \code{logloss} negative log-likelihood. \url{https://en.wikipedia.org/wiki/Log-likelihood}
|
||||
#' \item \code{mlogloss} multiclass logloss. \url{https://scikit-learn.org/stable/modules/generated/sklearn.metrics.log_loss.html}
|
||||
#' \item \code{error} Binary classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}.
|
||||
#' By default, it uses the 0.5 threshold for predicted values to define negative and positive instances.
|
||||
#' Different threshold (e.g., 0.) could be specified as "error@0."
|
||||
#' \item \code{merror} Multiclass classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}.
|
||||
#' \item \code{auc} Area under the curve. \url{http://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation.
|
||||
#' \item \code{mae} Mean absolute error
|
||||
#' \item \code{mape} Mean absolute percentage error
|
||||
#' \item \code{auc} Area under the curve. \url{https://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation.
|
||||
#' \item \code{aucpr} Area under the PR curve. \url{https://en.wikipedia.org/wiki/Precision_and_recall} for ranking evaluation.
|
||||
#' \item \code{ndcg} Normalized Discounted Cumulative Gain (for ranking task). \url{http://en.wikipedia.org/wiki/NDCG}
|
||||
#' \item \code{ndcg} Normalized Discounted Cumulative Gain (for ranking task). \url{https://en.wikipedia.org/wiki/NDCG}
|
||||
#' }
|
||||
#'
|
||||
#' The following callbacks are automatically created when certain parameters are set:
|
||||
@@ -157,9 +171,6 @@
|
||||
#' explicitly passed.
|
||||
#' \item \code{best_iteration} iteration number with the best evaluation metric value
|
||||
#' (only available with early stopping).
|
||||
#' \item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration,
|
||||
#' which could further be used in \code{predict} method
|
||||
#' (only available with early stopping).
|
||||
#' \item \code{best_score} the best evaluation metric value during early stopping.
|
||||
#' (only available with early stopping).
|
||||
#' \item \code{feature_names} names of the training dataset features
|
||||
@@ -181,8 +192,8 @@
|
||||
#' data(agaricus.train, package='xgboost')
|
||||
#' data(agaricus.test, package='xgboost')
|
||||
#'
|
||||
#' dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
#' dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
#' dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label))
|
||||
#' dtest <- with(agaricus.test, xgb.DMatrix(data, label = label))
|
||||
#' watchlist <- list(train = dtrain, eval = dtest)
|
||||
#'
|
||||
#' ## A simple xgb.train example:
|
||||
@@ -321,9 +332,6 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
|
||||
if (is_update && nrounds > niter_init)
|
||||
stop("nrounds cannot be larger than ", niter_init, " (nrounds of xgb_model)")
|
||||
|
||||
# TODO: distributed code
|
||||
rank <- 0
|
||||
|
||||
niter_skip <- ifelse(is_update, 0, niter_init)
|
||||
begin_iteration <- niter_skip + 1
|
||||
end_iteration <- niter_skip + nrounds
|
||||
@@ -335,7 +343,6 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
|
||||
|
||||
xgb.iter.update(bst$handle, dtrain, iteration - 1, obj)
|
||||
|
||||
bst_evaluation <- numeric(0)
|
||||
if (length(watchlist) > 0)
|
||||
bst_evaluation <- xgb.iter.eval(bst$handle, watchlist, iteration - 1, feval)
|
||||
|
||||
@@ -350,7 +357,7 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
|
||||
bst <- xgb.Booster.complete(bst, saveraw = TRUE)
|
||||
|
||||
# store the total number of boosting iterations
|
||||
bst$niter = end_iteration
|
||||
bst$niter <- end_iteration
|
||||
|
||||
# store the evaluation results
|
||||
if (length(evaluation_log) > 0 &&
|
||||
|
||||
@@ -1,12 +1,41 @@
|
||||
#' Load the instance back from \code{\link{xgb.serialize}}
|
||||
#'
|
||||
#' @param buffer the buffer containing booster instance saved by \code{\link{xgb.serialize}}
|
||||
#' @param handle An \code{xgb.Booster.handle} object which will be overwritten with
|
||||
#' the new deserialized object. Must be a null handle (e.g. when loading the model through
|
||||
#' `readRDS`). If not provided, a new handle will be created.
|
||||
#' @return An \code{xgb.Booster.handle} object.
|
||||
#'
|
||||
#' @export
|
||||
xgb.unserialize <- function(buffer) {
|
||||
xgb.unserialize <- function(buffer, handle = NULL) {
|
||||
cachelist <- list()
|
||||
if (is.null(handle)) {
|
||||
handle <- .Call(XGBoosterCreate_R, cachelist)
|
||||
.Call(XGBoosterUnserializeFromBuffer_R, handle, buffer)
|
||||
} else {
|
||||
if (!is.null.handle(handle))
|
||||
stop("'handle' is not null/empty. Cannot overwrite existing handle.")
|
||||
.Call(XGBoosterCreateInEmptyObj_R, cachelist, handle)
|
||||
}
|
||||
tryCatch(
|
||||
.Call(XGBoosterUnserializeFromBuffer_R, handle, buffer),
|
||||
error = function(e) {
|
||||
error_msg <- conditionMessage(e)
|
||||
m <- regexec("(src[\\\\/]learner.cc:[0-9]+): Check failed: (header == serialisation_header_)",
|
||||
error_msg, perl = TRUE)
|
||||
groups <- regmatches(error_msg, m)[[1]]
|
||||
if (length(groups) == 3) {
|
||||
warning(paste("The model had been generated by XGBoost version 1.0.0 or earlier and was ",
|
||||
"loaded from a RDS file. We strongly ADVISE AGAINST using saveRDS() ",
|
||||
"function, to ensure that your model can be read in current and upcoming ",
|
||||
"XGBoost releases. Please use xgb.save() instead to preserve models for the ",
|
||||
"long term. For more details and explanation, see ",
|
||||
"https://xgboost.readthedocs.io/en/latest/tutorials/saving_model.html",
|
||||
sep = ""))
|
||||
.Call(XGBoosterLoadModelFromRaw_R, handle, buffer)
|
||||
} else {
|
||||
stop(e)
|
||||
}
|
||||
})
|
||||
class(handle) <- "xgb.Booster.handle"
|
||||
return (handle)
|
||||
}
|
||||
|
||||
@@ -9,8 +9,8 @@ xgboost <- function(data = NULL, label = NULL, missing = NA, weight = NULL,
|
||||
early_stopping_rounds = NULL, maximize = NULL,
|
||||
save_period = NULL, save_name = "xgboost.model",
|
||||
xgb_model = NULL, callbacks = list(), ...) {
|
||||
|
||||
dtrain <- xgb.get.DMatrix(data, label, missing, weight)
|
||||
merged <- check.booster.params(params, ...)
|
||||
dtrain <- xgb.get.DMatrix(data, label, missing, weight, nthread = merged$nthread)
|
||||
|
||||
watchlist <- list(train = dtrain)
|
||||
|
||||
@@ -90,12 +90,8 @@ NULL
|
||||
#' @importFrom data.table setkey
|
||||
#' @importFrom data.table setkeyv
|
||||
#' @importFrom data.table setnames
|
||||
#' @importFrom magrittr %>%
|
||||
#' @importFrom stringi stri_detect_regex
|
||||
#' @importFrom stringi stri_match_first_regex
|
||||
#' @importFrom stringi stri_replace_first_regex
|
||||
#' @importFrom stringi stri_replace_all_regex
|
||||
#' @importFrom stringi stri_split_regex
|
||||
#' @importFrom jsonlite fromJSON
|
||||
#' @importFrom jsonlite toJSON
|
||||
#' @importFrom utils object.size str tail
|
||||
#' @importFrom stats predict
|
||||
#' @importFrom stats median
|
||||
|
||||
@@ -30,4 +30,4 @@ Examples
|
||||
Development
|
||||
-----------
|
||||
|
||||
* See the [R Package section](https://xgboost.readthedocs.io/en/latest/contribute.html#r-package) of the contributors guide.
|
||||
* See the [R Package section](https://xgboost.readthedocs.io/en/latest/contrib/coding_guide.html#r-coding-guideline) of the contributors guide.
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
#!/bin/sh
|
||||
|
||||
rm -f src/Makevars
|
||||
rm -f CMakeLists.txt
|
||||
|
||||
20
R-package/configure
vendored
20
R-package/configure
vendored
@@ -613,6 +613,7 @@ infodir
|
||||
docdir
|
||||
oldincludedir
|
||||
includedir
|
||||
runstatedir
|
||||
localstatedir
|
||||
sharedstatedir
|
||||
sysconfdir
|
||||
@@ -682,6 +683,7 @@ datadir='${datarootdir}'
|
||||
sysconfdir='${prefix}/etc'
|
||||
sharedstatedir='${prefix}/com'
|
||||
localstatedir='${prefix}/var'
|
||||
runstatedir='${localstatedir}/run'
|
||||
includedir='${prefix}/include'
|
||||
oldincludedir='/usr/include'
|
||||
docdir='${datarootdir}/doc/${PACKAGE_TARNAME}'
|
||||
@@ -934,6 +936,15 @@ do
|
||||
| -silent | --silent | --silen | --sile | --sil)
|
||||
silent=yes ;;
|
||||
|
||||
-runstatedir | --runstatedir | --runstatedi | --runstated \
|
||||
| --runstate | --runstat | --runsta | --runst | --runs \
|
||||
| --run | --ru | --r)
|
||||
ac_prev=runstatedir ;;
|
||||
-runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \
|
||||
| --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \
|
||||
| --run=* | --ru=* | --r=*)
|
||||
runstatedir=$ac_optarg ;;
|
||||
|
||||
-sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
|
||||
ac_prev=sbindir ;;
|
||||
-sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
|
||||
@@ -1071,7 +1082,7 @@ fi
|
||||
for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \
|
||||
datadir sysconfdir sharedstatedir localstatedir includedir \
|
||||
oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
|
||||
libdir localedir mandir
|
||||
libdir localedir mandir runstatedir
|
||||
do
|
||||
eval ac_val=\$$ac_var
|
||||
# Remove trailing slashes.
|
||||
@@ -1224,6 +1235,7 @@ Fine tuning of the installation directories:
|
||||
--sysconfdir=DIR read-only single-machine data [PREFIX/etc]
|
||||
--sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com]
|
||||
--localstatedir=DIR modifiable single-machine data [PREFIX/var]
|
||||
--runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run]
|
||||
--libdir=DIR object code libraries [EPREFIX/lib]
|
||||
--includedir=DIR C header files [PREFIX/include]
|
||||
--oldincludedir=DIR C header files for non-gcc [/usr/include]
|
||||
@@ -2698,7 +2710,7 @@ fi
|
||||
if test `uname -s` = "Darwin"
|
||||
then
|
||||
OPENMP_CXXFLAGS='-Xclang -fopenmp'
|
||||
OPENMP_LIB='/usr/local/lib/libomp.dylib'
|
||||
OPENMP_LIB='-lomp'
|
||||
ac_pkg_openmp=no
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether OpenMP will work in a package" >&5
|
||||
$as_echo_n "checking whether OpenMP will work in a package... " >&6; }
|
||||
@@ -2713,14 +2725,14 @@ main ()
|
||||
return 0;
|
||||
}
|
||||
_ACEOF
|
||||
${CC} -o conftest conftest.c /usr/local/lib/libomp.dylib -Xclang -fopenmp 2>/dev/null && ./conftest && ac_pkg_openmp=yes
|
||||
${CC} -o conftest conftest.c ${OPENMP_LIB} ${OPENMP_CXXFLAGS} 2>/dev/null && ./conftest && ac_pkg_openmp=yes
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${ac_pkg_openmp}" >&5
|
||||
$as_echo "${ac_pkg_openmp}" >&6; }
|
||||
if test "${ac_pkg_openmp}" = no; then
|
||||
OPENMP_CXXFLAGS=''
|
||||
OPENMP_LIB=''
|
||||
echo '*****************************************************************************************'
|
||||
echo 'WARNING: OpenMP is unavailable on this Mac OSX system. Training speed may be suboptimal.'
|
||||
echo ' OpenMP is unavailable on this Mac OSX system. Training speed may be suboptimal.'
|
||||
echo ' To use all CPU cores for training jobs, you should install OpenMP by running\n'
|
||||
echo ' brew install libomp'
|
||||
echo '*****************************************************************************************'
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
### configure.ac -*- Autoconf -*-
|
||||
|
||||
AC_PREREQ(2.62)
|
||||
AC_PREREQ(2.69)
|
||||
|
||||
AC_INIT([xgboost],[0.6-3],[],[xgboost],[])
|
||||
|
||||
@@ -29,17 +29,17 @@ fi
|
||||
if test `uname -s` = "Darwin"
|
||||
then
|
||||
OPENMP_CXXFLAGS='-Xclang -fopenmp'
|
||||
OPENMP_LIB='/usr/local/lib/libomp.dylib'
|
||||
OPENMP_LIB='-lomp'
|
||||
ac_pkg_openmp=no
|
||||
AC_MSG_CHECKING([whether OpenMP will work in a package])
|
||||
AC_LANG_CONFTEST([AC_LANG_PROGRAM([[#include <omp.h>]], [[ return (omp_get_max_threads() <= 1); ]])])
|
||||
${CC} -o conftest conftest.c /usr/local/lib/libomp.dylib -Xclang -fopenmp 2>/dev/null && ./conftest && ac_pkg_openmp=yes
|
||||
${CC} -o conftest conftest.c ${OPENMP_LIB} ${OPENMP_CXXFLAGS} 2>/dev/null && ./conftest && ac_pkg_openmp=yes
|
||||
AC_MSG_RESULT([${ac_pkg_openmp}])
|
||||
if test "${ac_pkg_openmp}" = no; then
|
||||
OPENMP_CXXFLAGS=''
|
||||
OPENMP_LIB=''
|
||||
echo '*****************************************************************************************'
|
||||
echo 'WARNING: OpenMP is unavailable on this Mac OSX system. Training speed may be suboptimal.'
|
||||
echo ' OpenMP is unavailable on this Mac OSX system. Training speed may be suboptimal.'
|
||||
echo ' To use all CPU cores for training jobs, you should install OpenMP by running\n'
|
||||
echo ' brew install libomp'
|
||||
echo '*****************************************************************************************'
|
||||
@@ -52,4 +52,3 @@ AC_SUBST(ENDIAN_FLAG)
|
||||
AC_SUBST(BACKTRACE_LIB)
|
||||
AC_CONFIG_FILES([src/Makevars])
|
||||
AC_OUTPUT
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
basic_walkthrough Basic feature walkthrough
|
||||
caret_wrapper Use xgboost to train in caret library
|
||||
custom_objective Cutomize loss function, and evaluation metric
|
||||
custom_objective Customize loss function, and evaluation metric
|
||||
boost_from_prediction Boosting from existing prediction
|
||||
predict_first_ntree Predicting using first n trees
|
||||
generalized_linear_model Generalized Linear Model
|
||||
@@ -8,8 +8,8 @@ cross_validation Cross validation
|
||||
create_sparse_matrix Create Sparse Matrix
|
||||
predict_leaf_indices Predicting the corresponding leaves
|
||||
early_stopping Early Stop in training
|
||||
poisson_regression Poisson Regression on count data
|
||||
tweedie_regression Tweddie Regression
|
||||
poisson_regression Poisson regression on count data
|
||||
tweedie_regression Tweedie regression
|
||||
gpu_accelerated GPU-accelerated tree building algorithms
|
||||
interaction_constraints Interaction constraints among features
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ XGBoost R Feature Walkthrough
|
||||
====
|
||||
* [Basic walkthrough of wrappers](basic_walkthrough.R)
|
||||
* [Train a xgboost model from caret library](caret_wrapper.R)
|
||||
* [Cutomize loss function, and evaluation metric](custom_objective.R)
|
||||
* [Customize loss function, and evaluation metric](custom_objective.R)
|
||||
* [Boosting from existing prediction](boost_from_prediction.R)
|
||||
* [Predicting using first n trees](predict_first_ntree.R)
|
||||
* [Generalized Linear Model](generalized_linear_model.R)
|
||||
|
||||
@@ -40,7 +40,7 @@ print("Train xgboost with verbose 2, also print information about tree")
|
||||
bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nrounds = 2,
|
||||
nthread = 2, objective = "binary:logistic", verbose = 2)
|
||||
|
||||
# you can also specify data as file path to a LibSVM format input
|
||||
# you can also specify data as file path to a LIBSVM format input
|
||||
# since we do not have this file with us, the following line is just for illustration
|
||||
# bst <- xgboost(data = 'agaricus.train.svm', max_depth = 2, eta = 1, nrounds = 2,objective = "binary:logistic")
|
||||
|
||||
@@ -61,7 +61,7 @@ pred2 <- predict(bst2, test$data)
|
||||
print(paste("sum(abs(pred2-pred))=", sum(abs(pred2 - pred))))
|
||||
|
||||
# save model to R's raw vector
|
||||
raw = xgb.save.raw(bst)
|
||||
raw <- xgb.save.raw(bst)
|
||||
# load binary model to R
|
||||
bst3 <- xgb.load(raw)
|
||||
pred3 <- predict(bst3, test$data)
|
||||
@@ -93,14 +93,14 @@ dtrain2 <- xgb.DMatrix("dtrain.buffer")
|
||||
bst <- xgb.train(data = dtrain2, max_depth = 2, eta = 1, nrounds = 2, watchlist = watchlist,
|
||||
nthread = 2, objective = "binary:logistic")
|
||||
# information can be extracted from xgb.DMatrix using getinfo
|
||||
label = getinfo(dtest, "label")
|
||||
label <- getinfo(dtest, "label")
|
||||
pred <- predict(bst, dtest)
|
||||
err <- as.numeric(sum(as.integer(pred > 0.5) != label)) / length(label)
|
||||
print(paste("test-error=", err))
|
||||
|
||||
# You can dump the tree you learned using xgb.dump into a text file
|
||||
dump_path = file.path(tempdir(), 'dump.raw.txt')
|
||||
xgb.dump(bst, dump_path, with_stats = T)
|
||||
dump_path <- file.path(tempdir(), 'dump.raw.txt')
|
||||
xgb.dump(bst, dump_path, with_stats = TRUE)
|
||||
|
||||
# Finally, you can check which features are the most important.
|
||||
print("Most important features (look at column Gain):")
|
||||
|
||||
@@ -11,7 +11,7 @@ watchlist <- list(eval = dtest, train = dtrain)
|
||||
#
|
||||
print('start running example to start from a initial prediction')
|
||||
# train xgboost for 1 round
|
||||
param <- list(max_depth=2, eta=1, nthread = 2, silent=1, objective='binary:logistic')
|
||||
param <- list(max_depth = 2, eta = 1, nthread = 2, objective = 'binary:logistic')
|
||||
bst <- xgb.train(param, dtrain, 1, watchlist)
|
||||
# Note: we need the margin value instead of transformed prediction in set_base_margin
|
||||
# do predict with output_margin=TRUE, will always give you margin values before logistic transformation
|
||||
|
||||
@@ -9,7 +9,7 @@ require(e1071)
|
||||
# Load Arthritis dataset in memory.
|
||||
data(Arthritis)
|
||||
# Create a copy of the dataset with data.table package (data.table is 100% compliant with R dataframe but its syntax is a lot more consistent and its performance are really good).
|
||||
df <- data.table(Arthritis, keep.rownames = F)
|
||||
df <- data.table(Arthritis, keep.rownames = FALSE)
|
||||
|
||||
# Let's add some new categorical features to see if it helps. Of course these feature are highly correlated to the Age feature. Usually it's not a good thing in ML, but Tree algorithms (including boosted trees) are able to select the best features, even in case of highly correlated features.
|
||||
# For the first feature we create groups of age by rounding the real age. Note that we transform it to factor (categorical data) so the algorithm treat them as independant values.
|
||||
|
||||
@@ -2,36 +2,36 @@ require(xgboost)
|
||||
require(Matrix)
|
||||
require(data.table)
|
||||
if (!require(vcd)) {
|
||||
install.packages('vcd') #Available in Cran. Used for its dataset with categorical values.
|
||||
install.packages('vcd') #Available in CRAN. Used for its dataset with categorical values.
|
||||
require(vcd)
|
||||
}
|
||||
# According to its documentation, Xgboost works only on numbers.
|
||||
# According to its documentation, XGBoost works only on numbers.
|
||||
# Sometimes the dataset we have to work on have categorical data.
|
||||
# A categorical variable is one which have a fixed number of values. By example, if for each observation a variable called "Colour" can have only "red", "blue" or "green" as value, it is a categorical variable.
|
||||
#
|
||||
# In R, categorical variable is called Factor.
|
||||
# Type ?factor in console for more information.
|
||||
#
|
||||
# In this demo we will see how to transform a dense dataframe with categorical variables to a sparse matrix before analyzing it in Xgboost.
|
||||
# In this demo we will see how to transform a dense dataframe with categorical variables to a sparse matrix before analyzing it in XGBoost.
|
||||
# The method we are going to see is usually called "one hot encoding".
|
||||
|
||||
#load Arthritis dataset in memory.
|
||||
data(Arthritis)
|
||||
|
||||
# create a copy of the dataset with data.table package (data.table is 100% compliant with R dataframe but its syntax is a lot more consistent and its performance are really good).
|
||||
df <- data.table(Arthritis, keep.rownames = F)
|
||||
df <- data.table(Arthritis, keep.rownames = FALSE)
|
||||
|
||||
# Let's have a look to the data.table
|
||||
cat("Print the dataset\n")
|
||||
print(df)
|
||||
|
||||
# 2 columns have factor type, one has ordinal type (ordinal variable is a categorical variable with values wich can be ordered, here: None > Some > Marked).
|
||||
# 2 columns have factor type, one has ordinal type (ordinal variable is a categorical variable with values which can be ordered, here: None > Some > Marked).
|
||||
cat("Structure of the dataset\n")
|
||||
str(df)
|
||||
|
||||
# Let's add some new categorical features to see if it helps. Of course these feature are highly correlated to the Age feature. Usually it's not a good thing in ML, but Tree algorithms (including boosted trees) are able to select the best features, even in case of highly correlated features.
|
||||
|
||||
# For the first feature we create groups of age by rounding the real age. Note that we transform it to factor (categorical data) so the algorithm treat them as independant values.
|
||||
# For the first feature we create groups of age by rounding the real age. Note that we transform it to factor (categorical data) so the algorithm treat them as independent values.
|
||||
df[, AgeDiscret := as.factor(round(Age / 10, 0))]
|
||||
|
||||
# Here is an even stronger simplification of the real age with an arbitrary split at 30 years old. I choose this value based on nothing. We will see later if simplifying the information based on arbitrary values is a good strategy (I am sure you already have an idea of how well it will work!).
|
||||
@@ -52,7 +52,7 @@ print(levels(df[,Treatment]))
|
||||
#
|
||||
# Formulae Improved~.-1 used below means transform all categorical features but column Improved to binary values.
|
||||
# Column Improved is excluded because it will be our output column, the one we want to predict.
|
||||
sparse_matrix = sparse.model.matrix(Improved~.-1, data = df)
|
||||
sparse_matrix <- sparse.model.matrix(Improved ~ . - 1, data = df)
|
||||
|
||||
cat("Encoding of the sparse Matrix\n")
|
||||
print(sparse_matrix)
|
||||
@@ -61,7 +61,7 @@ print(sparse_matrix)
|
||||
# 1. Set, for all rows, field in Y column to 0;
|
||||
# 2. set Y to 1 when Improved == Marked;
|
||||
# 3. Return Y column
|
||||
output_vector = df[,Y:=0][Improved == "Marked",Y:=1][,Y]
|
||||
output_vector <- df[, Y := 0][Improved == "Marked", Y := 1][, Y]
|
||||
|
||||
# Following is the same process as other demo
|
||||
cat("Learning...\n")
|
||||
|
||||
@@ -6,7 +6,7 @@ dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
|
||||
nrounds <- 2
|
||||
param <- list(max_depth=2, eta=1, silent=1, nthread=2, objective='binary:logistic')
|
||||
param <- list(max_depth = 2, eta = 1, nthread = 2, objective = 'binary:logistic')
|
||||
|
||||
cat('running cross validation\n')
|
||||
# do cross validation, this will print result out as
|
||||
@@ -22,10 +22,10 @@ xgb.cv(param, dtrain, nrounds, nfold=5,
|
||||
metrics = 'error', showsd = FALSE)
|
||||
|
||||
###
|
||||
# you can also do cross validation with cutomized loss function
|
||||
# you can also do cross validation with customized loss function
|
||||
# See custom_objective.R
|
||||
##
|
||||
print ('running cross validation, with cutomsized loss function')
|
||||
print ('running cross validation, with customized loss function')
|
||||
|
||||
logregobj <- function(preds, dtrain) {
|
||||
labels <- getinfo(dtrain, "label")
|
||||
@@ -40,7 +40,7 @@ evalerror <- function(preds, dtrain) {
|
||||
return(list(metric = "error", value = err))
|
||||
}
|
||||
|
||||
param <- list(max_depth=2, eta=1, silent=1,
|
||||
param <- list(max_depth = 2, eta = 1,
|
||||
objective = logregobj, eval_metric = evalerror)
|
||||
# train with customized objective
|
||||
xgb.cv(params = param, data = dtrain, nrounds = nrounds, nfold = 5)
|
||||
|
||||
@@ -23,9 +23,9 @@ logregobj <- function(preds, dtrain) {
|
||||
|
||||
# user defined evaluation function, return a pair metric_name, result
|
||||
# NOTE: when you do customized loss function, the default prediction value is margin
|
||||
# this may make buildin evalution metric not function properly
|
||||
# this may make builtin evaluation metric not function properly
|
||||
# for example, we are doing logistic loss, the prediction is score before logistic transformation
|
||||
# the buildin evaluation error assumes input is after logistic transformation
|
||||
# the builtin evaluation error assumes input is after logistic transformation
|
||||
# Take this in mind when you use the customization, and maybe you need write customized evaluation function
|
||||
evalerror <- function(preds, dtrain) {
|
||||
labels <- getinfo(dtrain, "label")
|
||||
|
||||
@@ -21,9 +21,9 @@ logregobj <- function(preds, dtrain) {
|
||||
}
|
||||
# user defined evaluation function, return a pair metric_name, result
|
||||
# NOTE: when you do customized loss function, the default prediction value is margin
|
||||
# this may make buildin evalution metric not function properly
|
||||
# this may make builtin evaluation metric not function properly
|
||||
# for example, we are doing logistic loss, the prediction is score before logistic transformation
|
||||
# the buildin evaluation error assumes input is after logistic transformation
|
||||
# the builtin evaluation error assumes input is after logistic transformation
|
||||
# Take this in mind when you use the customization, and maybe you need write customized evaluation function
|
||||
evalerror <- function(preds, dtrain) {
|
||||
labels <- getinfo(dtrain, "label")
|
||||
|
||||
@@ -31,4 +31,3 @@ bst <- xgb.train(param, dtrain, num_round, watchlist)
|
||||
ypred <- predict(bst, dtest)
|
||||
labels <- getinfo(dtest, 'label')
|
||||
cat('error of preds=', mean(as.numeric(ypred > 0.5) != labels), '\n')
|
||||
|
||||
|
||||
@@ -5,7 +5,9 @@ set.seed(1024)
|
||||
|
||||
# Function to obtain a list of interactions fitted in trees, requires input of maximum depth
|
||||
treeInteractions <- function(input_tree, input_max_depth) {
|
||||
trees <- copy(input_tree) # copy tree input to prevent overwriting
|
||||
ID_merge <- i.id <- i.feature <- NULL # Suppress warning "no visible binding for global variable"
|
||||
|
||||
trees <- data.table::copy(input_tree) # copy tree input to prevent overwriting
|
||||
if (input_max_depth < 2) return(list()) # no interactions if max depth < 2
|
||||
if (nrow(input_tree) == 1) return(list())
|
||||
|
||||
@@ -15,23 +17,26 @@ treeInteractions <- function(input_tree, input_max_depth){
|
||||
parents_left <- trees[!is.na(Split), list(i.id = ID, i.feature = Feature, ID_merge = Yes)]
|
||||
parents_right <- trees[!is.na(Split), list(i.id = ID, i.feature = Feature, ID_merge = No)]
|
||||
|
||||
setorderv(trees, 'ID_merge')
|
||||
setorderv(parents_left, 'ID_merge')
|
||||
setorderv(parents_right, 'ID_merge')
|
||||
data.table::setorderv(trees, 'ID_merge')
|
||||
data.table::setorderv(parents_left, 'ID_merge')
|
||||
data.table::setorderv(parents_right, 'ID_merge')
|
||||
|
||||
trees <- merge(trees, parents_left, by='ID_merge', all.x=T)
|
||||
trees[!is.na(i.id), c(paste0('parent_', i-1), paste0('parent_feat_', i-1)):=list(i.id, i.feature)]
|
||||
trees <- merge(trees, parents_left, by = 'ID_merge', all.x = TRUE)
|
||||
trees[!is.na(i.id), c(paste0('parent_', i - 1), paste0('parent_feat_', i - 1))
|
||||
:= list(i.id, i.feature)]
|
||||
trees[, c('i.id', 'i.feature') := NULL]
|
||||
|
||||
trees <- merge(trees, parents_right, by='ID_merge', all.x=T)
|
||||
trees[!is.na(i.id), c(paste0('parent_', i-1), paste0('parent_feat_', i-1)):=list(i.id, i.feature)]
|
||||
trees <- merge(trees, parents_right, by = 'ID_merge', all.x = TRUE)
|
||||
trees[!is.na(i.id), c(paste0('parent_', i - 1), paste0('parent_feat_', i - 1))
|
||||
:= list(i.id, i.feature)]
|
||||
trees[, c('i.id', 'i.feature') := NULL]
|
||||
}
|
||||
|
||||
# Extract nodes with interactions
|
||||
interaction_trees <- trees[!is.na(Split) & !is.na(parent_1),
|
||||
c('Feature',paste0('parent_feat_',1:(input_max_depth-1))), with=F]
|
||||
interaction_trees_split <- split(interaction_trees, 1:nrow(interaction_trees))
|
||||
c('Feature', paste0('parent_feat_', 1:(input_max_depth - 1))),
|
||||
with = FALSE]
|
||||
interaction_trees_split <- split(interaction_trees, seq_len(nrow(interaction_trees)))
|
||||
interaction_list <- lapply(interaction_trees_split, as.character)
|
||||
|
||||
# Remove NAs (no parent interaction)
|
||||
@@ -48,13 +53,14 @@ treeInteractions <- function(input_tree, input_max_depth){
|
||||
# Generate sample data
|
||||
x <- list()
|
||||
for (i in 1:10) {
|
||||
x[[i]] = i*rnorm(1000, 10)
|
||||
x[[i]] <- i * rnorm(1000, 10)
|
||||
}
|
||||
x <- as.data.table(x)
|
||||
|
||||
y = -1*x[, rowSums(.SD)] + x[['V1']]*x[['V2']] + x[['V3']]*x[['V4']]*x[['V5']] + rnorm(1000, 0.001) + 3*sin(x[['V7']])
|
||||
y <- -1 * x[, rowSums(.SD)] + x[['V1']] * x[['V2']] + x[['V3']] * x[['V4']] * x[['V5']]
|
||||
+ rnorm(1000, 0.001) + 3 * sin(x[['V7']])
|
||||
|
||||
train = as.matrix(x)
|
||||
train <- as.matrix(x)
|
||||
|
||||
# Interaction constraint list (column names form)
|
||||
interaction_list <- list(c('V1', 'V2'), c('V3', 'V4', 'V5'))
|
||||
@@ -65,38 +71,40 @@ cols2ids <- function(object, col_names) {
|
||||
names(LUT) <- col_names
|
||||
rapply(object, function(x) LUT[x], classes = "character", how = "replace")
|
||||
}
|
||||
interaction_list_fid = cols2ids(interaction_list, colnames(train))
|
||||
interaction_list_fid <- cols2ids(interaction_list, colnames(train))
|
||||
|
||||
# Fit model with interaction constraints
|
||||
bst = xgboost(data = train, label = y, max_depth = 4,
|
||||
bst <- xgboost(data = train, label = y, max_depth = 4,
|
||||
eta = 0.1, nthread = 2, nrounds = 1000,
|
||||
interaction_constraints = interaction_list_fid)
|
||||
|
||||
bst_tree <- xgb.model.dt.tree(colnames(train), bst)
|
||||
bst_interactions <- treeInteractions(bst_tree, 4) # interactions constrained to combinations of V1*V2 and V3*V4*V5
|
||||
bst_interactions <- treeInteractions(bst_tree, 4)
|
||||
# interactions constrained to combinations of V1*V2 and V3*V4*V5
|
||||
|
||||
# Fit model without interaction constraints
|
||||
bst2 = xgboost(data = train, label = y, max_depth = 4,
|
||||
bst2 <- xgboost(data = train, label = y, max_depth = 4,
|
||||
eta = 0.1, nthread = 2, nrounds = 1000)
|
||||
|
||||
bst2_tree <- xgb.model.dt.tree(colnames(train), bst2)
|
||||
bst2_interactions <- treeInteractions(bst2_tree, 4) # much more interactions
|
||||
|
||||
# Fit model with both interaction and monotonicity constraints
|
||||
bst3 = xgboost(data = train, label = y, max_depth = 4,
|
||||
bst3 <- xgboost(data = train, label = y, max_depth = 4,
|
||||
eta = 0.1, nthread = 2, nrounds = 1000,
|
||||
interaction_constraints = interaction_list_fid,
|
||||
monotone_constraints = c(-1, 0, 0, 0, 0, 0, 0, 0, 0, 0))
|
||||
|
||||
bst3_tree <- xgb.model.dt.tree(colnames(train), bst3)
|
||||
bst3_interactions <- treeInteractions(bst3_tree, 4) # interactions still constrained to combinations of V1*V2 and V3*V4*V5
|
||||
bst3_interactions <- treeInteractions(bst3_tree, 4)
|
||||
# interactions still constrained to combinations of V1*V2 and V3*V4*V5
|
||||
|
||||
# Show monotonic constraints still apply by checking scores after incrementing V1
|
||||
x1 <- sort(unique(x[['V1']]))
|
||||
for (i in 1:length(x1)){
|
||||
testdata <- copy(x[, -c('V1')])
|
||||
for (i in seq_along(x1)){
|
||||
testdata <- copy(x[, - ('V1')])
|
||||
testdata[['V1']] <- x1[i]
|
||||
testdata <- testdata[, paste0('V',1:10), with=F]
|
||||
testdata <- testdata[, paste0('V', 1:10), with = FALSE]
|
||||
pred <- predict(bst3, as.matrix(testdata))
|
||||
|
||||
# Should not print out anything due to monotonic constraints
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
data(mtcars)
|
||||
head(mtcars)
|
||||
bst = xgboost(data=as.matrix(mtcars[,-11]),label=mtcars[,11],
|
||||
bst <- xgboost(data = as.matrix(mtcars[, -11]), label = mtcars[, 11],
|
||||
objective = 'count:poisson', nrounds = 5)
|
||||
pred = predict(bst,as.matrix(mtcars[,-11]))
|
||||
pred <- predict(bst, as.matrix(mtcars[, -11]))
|
||||
sqrt(mean((pred - mtcars[, 11]) ^ 2))
|
||||
|
||||
|
||||
@@ -5,19 +5,19 @@ data(agaricus.test, package='xgboost')
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
|
||||
param <- list(max_depth=2, eta=1, silent=1, objective='binary:logistic')
|
||||
param <- list(max_depth = 2, eta = 1, objective = 'binary:logistic')
|
||||
watchlist <- list(eval = dtest, train = dtrain)
|
||||
nrounds = 2
|
||||
nrounds <- 2
|
||||
|
||||
# training the model for two rounds
|
||||
bst = xgb.train(param, dtrain, nrounds, nthread = 2, watchlist)
|
||||
bst <- xgb.train(param, dtrain, nrounds, nthread = 2, watchlist)
|
||||
cat('start testing prediction from first n trees\n')
|
||||
labels <- getinfo(dtest, 'label')
|
||||
|
||||
### predict using first 1 tree
|
||||
ypred1 = predict(bst, dtest, ntreelimit=1)
|
||||
ypred1 <- predict(bst, dtest, ntreelimit = 1)
|
||||
# by default, we predict using all the trees
|
||||
ypred2 = predict(bst, dtest)
|
||||
ypred2 <- predict(bst, dtest)
|
||||
|
||||
cat('error of ypred1=', mean(as.numeric(ypred1 > 0.5) != labels), '\n')
|
||||
cat('error of ypred2=', mean(as.numeric(ypred2 > 0.5) != labels), '\n')
|
||||
|
||||
@@ -10,18 +10,18 @@ data(agaricus.test, package='xgboost')
|
||||
dtrain <- xgb.DMatrix(data = agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(data = agaricus.test$data, label = agaricus.test$label)
|
||||
|
||||
param <- list(max_depth=2, eta=1, silent=1, objective='binary:logistic')
|
||||
nrounds = 4
|
||||
param <- list(max_depth = 2, eta = 1, objective = 'binary:logistic')
|
||||
nrounds <- 4
|
||||
|
||||
# training the model for two rounds
|
||||
bst = xgb.train(params = param, data = dtrain, nrounds = nrounds, nthread = 2)
|
||||
bst <- xgb.train(params = param, data = dtrain, nrounds = nrounds, nthread = 2)
|
||||
|
||||
# Model accuracy without new features
|
||||
accuracy.before <- sum((predict(bst, agaricus.test$data) >= 0.5) == agaricus.test$label) / length(agaricus.test$label)
|
||||
accuracy.before <- (sum((predict(bst, agaricus.test$data) >= 0.5) == agaricus.test$label)
|
||||
/ length(agaricus.test$label))
|
||||
|
||||
# by default, we predict using all the trees
|
||||
|
||||
pred_with_leaf = predict(bst, dtest, predleaf = TRUE)
|
||||
pred_with_leaf <- predict(bst, dtest, predleaf = TRUE)
|
||||
head(pred_with_leaf)
|
||||
|
||||
create.new.tree.features <- function(model, original.features){
|
||||
@@ -47,7 +47,9 @@ watchlist <- list(train = new.dtrain)
|
||||
bst <- xgb.train(params = param, data = new.dtrain, nrounds = nrounds, nthread = 2)
|
||||
|
||||
# Model accuracy with new features
|
||||
accuracy.after <- sum((predict(bst, new.dtest) >= 0.5) == agaricus.test$label) / length(agaricus.test$label)
|
||||
accuracy.after <- (sum((predict(bst, new.dtest) >= 0.5) == agaricus.test$label)
|
||||
/ length(agaricus.test$label))
|
||||
|
||||
# Here the accuracy was already good and is now perfect.
|
||||
cat(paste("The accuracy was", accuracy.before, "before adding leaf features and it is now", accuracy.after, "!\n"))
|
||||
cat(paste("The accuracy was", accuracy.before, "before adding leaf features and it is now",
|
||||
accuracy.after, "!\n"))
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
# running all scripts in demo folder
|
||||
demo(basic_walkthrough)
|
||||
demo(custom_objective)
|
||||
demo(boost_from_prediction)
|
||||
demo(predict_first_ntree)
|
||||
demo(generalized_linear_model)
|
||||
demo(cross_validation)
|
||||
demo(create_sparse_matrix)
|
||||
demo(predict_leaf_indices)
|
||||
demo(early_stopping)
|
||||
demo(poisson_regression)
|
||||
demo(caret_wrapper)
|
||||
demo(tweedie_regression)
|
||||
#demo(gpu_accelerated) # can only run when built with GPU support
|
||||
demo(basic_walkthrough, package = 'xgboost')
|
||||
demo(custom_objective, package = 'xgboost')
|
||||
demo(boost_from_prediction, package = 'xgboost')
|
||||
demo(predict_first_ntree, package = 'xgboost')
|
||||
demo(generalized_linear_model, package = 'xgboost')
|
||||
demo(cross_validation, package = 'xgboost')
|
||||
demo(create_sparse_matrix, package = 'xgboost')
|
||||
demo(predict_leaf_indices, package = 'xgboost')
|
||||
demo(early_stopping, package = 'xgboost')
|
||||
demo(poisson_regression, package = 'xgboost')
|
||||
demo(caret_wrapper, package = 'xgboost')
|
||||
demo(tweedie_regression, package = 'xgboost')
|
||||
#demo(gpu_accelerated, package = 'xgboost') # can only run when built with GPU support
|
||||
|
||||
2
R-package/demo/tweedie_regression.R
Executable file → Normal file
2
R-package/demo/tweedie_regression.R
Executable file → Normal file
@@ -13,7 +13,7 @@ exclude <- c('POLICYNO', 'PLCYDATE', 'CLM_FREQ5', 'CLM_AMT5', 'CLM_FLAG', 'IN_Y
|
||||
# retains the missing values
|
||||
# NOTE: this dataset is comes ready out of the box
|
||||
options(na.action = 'na.pass')
|
||||
x <- sparse.model.matrix(~ . - 1, data = dt[, -exclude, with = F])
|
||||
x <- sparse.model.matrix(~ . - 1, data = dt[, -exclude, with = FALSE])
|
||||
options(na.action = 'na.omit')
|
||||
|
||||
# response
|
||||
|
||||
96
R-package/inst/make-r-def.R
Normal file
96
R-package/inst/make-r-def.R
Normal file
@@ -0,0 +1,96 @@
|
||||
# [description]
|
||||
# Create a definition file (.def) from a .dll file, using objdump. This
|
||||
# is used by FindLibR.cmake when building the R package with MSVC.
|
||||
#
|
||||
# [usage]
|
||||
#
|
||||
# Rscript make-r-def.R something.dll something.def
|
||||
#
|
||||
# [references]
|
||||
# * https://www.cs.colorado.edu/~main/cs1300/doc/mingwfaq.html
|
||||
|
||||
args <- commandArgs(trailingOnly = TRUE)
|
||||
|
||||
IN_DLL_FILE <- args[[1L]]
|
||||
OUT_DEF_FILE <- args[[2L]]
|
||||
DLL_BASE_NAME <- basename(IN_DLL_FILE)
|
||||
|
||||
message(sprintf("Creating '%s' from '%s'", OUT_DEF_FILE, IN_DLL_FILE))
|
||||
|
||||
# system() will not raise an R exception if the process called
|
||||
# fails. Wrapping it here to get that behavior.
|
||||
#
|
||||
# system() introduces a lot of overhead, at least on Windows,
|
||||
# so trying processx if it is available
|
||||
.pipe_shell_command_to_stdout <- function(command, args, out_file) {
|
||||
has_processx <- suppressMessages({
|
||||
suppressWarnings({
|
||||
require("processx") # nolint
|
||||
})
|
||||
})
|
||||
if (has_processx) {
|
||||
p <- processx::process$new(
|
||||
command = command
|
||||
, args = args
|
||||
, stdout = out_file
|
||||
, windows_verbatim_args = FALSE
|
||||
)
|
||||
invisible(p$wait())
|
||||
} else {
|
||||
message(paste0(
|
||||
"Using system2() to run shell commands. Installing "
|
||||
, "'processx' with install.packages('processx') might "
|
||||
, "make this faster."
|
||||
))
|
||||
exit_code <- system2(
|
||||
command = command
|
||||
, args = shQuote(args)
|
||||
, stdout = out_file
|
||||
)
|
||||
if (exit_code != 0L) {
|
||||
stop(paste0("Command failed with exit code: ", exit_code))
|
||||
}
|
||||
}
|
||||
return(invisible(NULL))
|
||||
}
|
||||
|
||||
# use objdump to dump all the symbols
|
||||
OBJDUMP_FILE <- "objdump-out.txt"
|
||||
.pipe_shell_command_to_stdout(
|
||||
command = "objdump"
|
||||
, args = c("-p", IN_DLL_FILE)
|
||||
, out_file = OBJDUMP_FILE
|
||||
)
|
||||
|
||||
objdump_results <- readLines(OBJDUMP_FILE)
|
||||
result <- file.remove(OBJDUMP_FILE)
|
||||
|
||||
# Only one table in the objdump results matters for our purposes,
|
||||
# see https://www.cs.colorado.edu/~main/cs1300/doc/mingwfaq.html
|
||||
start_index <- which(
|
||||
grepl(
|
||||
pattern = "[Ordinal/Name Pointer] Table"
|
||||
, x = objdump_results
|
||||
, fixed = TRUE
|
||||
)
|
||||
)
|
||||
empty_lines <- which(objdump_results == "")
|
||||
end_of_table <- empty_lines[empty_lines > start_index][1L]
|
||||
|
||||
# Read the contents of the table
|
||||
exported_symbols <- objdump_results[(start_index + 1L):end_of_table]
|
||||
exported_symbols <- gsub("\t", "", exported_symbols)
|
||||
exported_symbols <- gsub(".*\\] ", "", exported_symbols)
|
||||
exported_symbols <- gsub(" ", "", exported_symbols)
|
||||
|
||||
# Write R.def file
|
||||
writeLines(
|
||||
text = c(
|
||||
paste0("LIBRARY \"", DLL_BASE_NAME, "\"")
|
||||
, "EXPORTS"
|
||||
, exported_symbols
|
||||
)
|
||||
, con = OUT_DEF_FILE
|
||||
, sep = "\n"
|
||||
)
|
||||
message(sprintf("Successfully created '%s'", OUT_DEF_FILE))
|
||||
64
R-package/man/a-compatibility-note-for-saveRDS-save.Rd
Normal file
64
R-package/man/a-compatibility-note-for-saveRDS-save.Rd
Normal file
@@ -0,0 +1,64 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/utils.R
|
||||
\name{a-compatibility-note-for-saveRDS-save}
|
||||
\alias{a-compatibility-note-for-saveRDS-save}
|
||||
\title{Do not use \code{\link[base]{saveRDS}} or \code{\link[base]{save}} for long-term archival of
|
||||
models. Instead, use \code{\link{xgb.save}} or \code{\link{xgb.save.raw}}.}
|
||||
\description{
|
||||
It is a common practice to use the built-in \code{\link[base]{saveRDS}} function (or
|
||||
\code{\link[base]{save}}) to persist R objects to the disk. While it is possible to persist
|
||||
\code{xgb.Booster} objects using \code{\link[base]{saveRDS}}, it is not advisable to do so if
|
||||
the model is to be accessed in the future. If you train a model with the current version of
|
||||
XGBoost and persist it with \code{\link[base]{saveRDS}}, the model is not guaranteed to be
|
||||
accessible in later releases of XGBoost. To ensure that your model can be accessed in future
|
||||
releases of XGBoost, use \code{\link{xgb.save}} or \code{\link{xgb.save.raw}} instead.
|
||||
}
|
||||
\details{
|
||||
Use \code{\link{xgb.save}} to save the XGBoost model as a stand-alone file. You may opt into
|
||||
the JSON format by specifying the JSON extension. To read the model back, use
|
||||
\code{\link{xgb.load}}.
|
||||
|
||||
Use \code{\link{xgb.save.raw}} to save the XGBoost model as a sequence (vector) of raw bytes
|
||||
in a future-proof manner. Future releases of XGBoost will be able to read the raw bytes and
|
||||
re-construct the corresponding model. To read the model back, use \code{\link{xgb.load.raw}}.
|
||||
The \code{\link{xgb.save.raw}} function is useful if you'd like to persist the XGBoost model
|
||||
as part of another R object.
|
||||
|
||||
Note: Do not use \code{\link{xgb.serialize}} to store models long-term. It persists not only the
|
||||
model but also internal configurations and parameters, and its format is not stable across
|
||||
multiple XGBoost versions. Use \code{\link{xgb.serialize}} only for checkpointing.
|
||||
|
||||
For more details and explanation about model persistence and archival, consult the page
|
||||
\url{https://xgboost.readthedocs.io/en/latest/tutorials/saving_model.html}.
|
||||
}
|
||||
\examples{
|
||||
data(agaricus.train, package='xgboost')
|
||||
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
|
||||
# Save as a stand-alone file; load it with xgb.load()
|
||||
xgb.save(bst, 'xgb.model')
|
||||
bst2 <- xgb.load('xgb.model')
|
||||
|
||||
# Save as a stand-alone file (JSON); load it with xgb.load()
|
||||
xgb.save(bst, 'xgb.model.json')
|
||||
bst2 <- xgb.load('xgb.model.json')
|
||||
if (file.exists('xgb.model.json')) file.remove('xgb.model.json')
|
||||
|
||||
# Save as a raw byte vector; load it with xgb.load.raw()
|
||||
xgb_bytes <- xgb.save.raw(bst)
|
||||
bst2 <- xgb.load.raw(xgb_bytes)
|
||||
|
||||
# Persist XGBoost model as part of another R object
|
||||
obj <- list(xgb_model_bytes = xgb.save.raw(bst), description = "My first XGBoost model")
|
||||
# Persist the R object. Here, saveRDS() is okay, since it doesn't persist
|
||||
# xgb.Booster directly. What's being persisted is the future-proof byte representation
|
||||
# as given by xgb.save.raw().
|
||||
saveRDS(obj, 'my_object.rds')
|
||||
# Read back the R object
|
||||
obj2 <- readRDS('my_object.rds')
|
||||
# Re-construct xgb.Booster object from the bytes
|
||||
bst2 <- xgb.load.raw(obj2$xgb_model_bytes)
|
||||
if (file.exists('my_object.rds')) file.remove('my_object.rds')
|
||||
|
||||
}
|
||||
@@ -38,10 +38,7 @@ The following additional fields are assigned to the model's R object:
|
||||
\itemize{
|
||||
\item \code{best_score} the evaluation score at the best iteration
|
||||
\item \code{best_iteration} at which boosting iteration the best score has occurred (1-based index)
|
||||
\item \code{best_ntreelimit} to use with the \code{ntreelimit} parameter in \code{predict}.
|
||||
It differs from \code{best_iteration} in multiclass or random forest settings.
|
||||
}
|
||||
|
||||
The Same values are also stored as xgb-attributes:
|
||||
\itemize{
|
||||
\item \code{best_iteration} is stored as a 0-based iteration index (for interoperability of binary models)
|
||||
|
||||
@@ -8,7 +8,7 @@ during its training.}
|
||||
cb.gblinear.history(sparse = FALSE)
|
||||
}
|
||||
\arguments{
|
||||
\item{sparse}{when set to FALSE/TURE, a dense/sparse matrix is used to store the result.
|
||||
\item{sparse}{when set to FALSE/TRUE, a dense/sparse matrix is used to store the result.
|
||||
Sparse format is useful when one expects only a subset of coefficients to be non-zero,
|
||||
when using the "thrifty" feature selector with fairly small number of top features
|
||||
selected per iteration.}
|
||||
@@ -36,7 +36,6 @@ Callback function expects the following values to be set in its calling frame:
|
||||
#
|
||||
# In the iris dataset, it is hard to linearly separate Versicolor class from the rest
|
||||
# without considering the 2nd order interactions:
|
||||
require(magrittr)
|
||||
x <- model.matrix(Species ~ .^2, iris)[,-1]
|
||||
colnames(x)
|
||||
dtrain <- xgb.DMatrix(scale(x), label = 1*(iris$Species == "versicolor"))
|
||||
@@ -57,7 +56,7 @@ matplot(coef_path, type = 'l')
|
||||
bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 200, eta = 0.8,
|
||||
updater = 'coord_descent', feature_selector = 'thrifty', top_k = 1,
|
||||
callbacks = list(cb.gblinear.history()))
|
||||
xgb.gblinear.history(bst) \%>\% matplot(type = 'l')
|
||||
matplot(xgb.gblinear.history(bst), type = 'l')
|
||||
# Componentwise boosting is known to have similar effect to Lasso regularization.
|
||||
# Try experimenting with various values of top_k, eta, nrounds,
|
||||
# as well as different feature_selectors.
|
||||
@@ -66,7 +65,7 @@ xgb.gblinear.history(bst) \%>\% matplot(type = 'l')
|
||||
bst <- xgb.cv(param, dtrain, nfold = 5, nrounds = 100, eta = 0.8,
|
||||
callbacks = list(cb.gblinear.history()))
|
||||
# coefficients in the CV fold #3
|
||||
xgb.gblinear.history(bst)[[3]] \%>\% matplot(type = 'l')
|
||||
matplot(xgb.gblinear.history(bst)[[3]], type = 'l')
|
||||
|
||||
|
||||
#### Multiclass classification:
|
||||
@@ -79,15 +78,15 @@ param <- list(booster = "gblinear", objective = "multi:softprob", num_class = 3,
|
||||
bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 70, eta = 0.5,
|
||||
callbacks = list(cb.gblinear.history()))
|
||||
# Will plot the coefficient paths separately for each class:
|
||||
xgb.gblinear.history(bst, class_index = 0) \%>\% matplot(type = 'l')
|
||||
xgb.gblinear.history(bst, class_index = 1) \%>\% matplot(type = 'l')
|
||||
xgb.gblinear.history(bst, class_index = 2) \%>\% matplot(type = 'l')
|
||||
matplot(xgb.gblinear.history(bst, class_index = 0), type = 'l')
|
||||
matplot(xgb.gblinear.history(bst, class_index = 1), type = 'l')
|
||||
matplot(xgb.gblinear.history(bst, class_index = 2), type = 'l')
|
||||
|
||||
# CV:
|
||||
bst <- xgb.cv(param, dtrain, nfold = 5, nrounds = 70, eta = 0.5,
|
||||
callbacks = list(cb.gblinear.history(FALSE)))
|
||||
# 1st forld of 1st class
|
||||
xgb.gblinear.history(bst, class_index = 0)[[1]] \%>\% matplot(type = 'l')
|
||||
# 1st fold of 1st class
|
||||
matplot(xgb.gblinear.history(bst, class_index = 0)[[1]], type = 'l')
|
||||
|
||||
}
|
||||
\seealso{
|
||||
|
||||
@@ -23,9 +23,9 @@ Get information of an xgb.DMatrix object
|
||||
The \code{name} field can be one of the following:
|
||||
|
||||
\itemize{
|
||||
\item \code{label}: label Xgboost learn from ;
|
||||
\item \code{label}: label XGBoost learn from ;
|
||||
\item \code{weight}: to do a weight rescale ;
|
||||
\item \code{base_margin}: base margin is the base prediction Xgboost will boost from ;
|
||||
\item \code{base_margin}: base margin is the base prediction XGBoost will boost from ;
|
||||
\item \code{nrow}: number of rows of the \code{xgb.DMatrix}.
|
||||
|
||||
}
|
||||
@@ -34,8 +34,7 @@ The \code{name} field can be one of the following:
|
||||
}
|
||||
\examples{
|
||||
data(agaricus.train, package='xgboost')
|
||||
train <- agaricus.train
|
||||
dtrain <- xgb.DMatrix(train$data, label=train$label)
|
||||
dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label))
|
||||
|
||||
labels <- getinfo(dtrain, 'label')
|
||||
setinfo(dtrain, 'label', 1-labels)
|
||||
|
||||
18
R-package/man/normalize.Rd
Normal file
18
R-package/man/normalize.Rd
Normal file
@@ -0,0 +1,18 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/xgb.ggplot.R
|
||||
\name{normalize}
|
||||
\alias{normalize}
|
||||
\title{Scale feature value to have mean 0, standard deviation 1}
|
||||
\usage{
|
||||
normalize(x)
|
||||
}
|
||||
\arguments{
|
||||
\item{x}{Numeric vector}
|
||||
}
|
||||
\value{
|
||||
Numeric vector with mean 0 and sd 1.
|
||||
}
|
||||
\description{
|
||||
This is used to compare multiple features on the same plot.
|
||||
Internal utility function
|
||||
}
|
||||
@@ -17,6 +17,8 @@
|
||||
predinteraction = FALSE,
|
||||
reshape = FALSE,
|
||||
training = FALSE,
|
||||
iterationrange = NULL,
|
||||
strict_shape = FALSE,
|
||||
...
|
||||
)
|
||||
|
||||
@@ -34,8 +36,7 @@ missing values in data (e.g., sometimes 0 or some other extreme value is used).}
|
||||
sum of predictions from boosting iterations' results. E.g., setting \code{outputmargin=TRUE} for
|
||||
logistic regression would result in predictions for log-odds instead of probabilities.}
|
||||
|
||||
\item{ntreelimit}{limit the number of model's trees or boosting iterations used in prediction (see Details).
|
||||
It will use all the trees by default (\code{NULL} value).}
|
||||
\item{ntreelimit}{Deprecated, use \code{iterationrange} instead.}
|
||||
|
||||
\item{predleaf}{whether predict leaf index.}
|
||||
|
||||
@@ -52,10 +53,20 @@ or predinteraction flags is TRUE.}
|
||||
\item{training}{whether is the prediction result used for training. For dart booster,
|
||||
training predicting will perform dropout.}
|
||||
|
||||
\item{iterationrange}{Specifies which layer of trees are used in prediction. For
|
||||
example, if a random forest is trained with 100 rounds. Specifying
|
||||
`iteration_range=(1, 21)`, then only the forests built during [1, 21) (half open set)
|
||||
rounds are used in this prediction. It's 1-based index just like R vector. When set
|
||||
to \code{c(1, 1)} XGBoost will use all trees.}
|
||||
|
||||
\item{strict_shape}{Default is \code{FALSE}. When it's set to \code{TRUE}, output
|
||||
type and shape of prediction are invariant to model type.}
|
||||
|
||||
\item{...}{Parameters passed to \code{predict.xgb.Booster}}
|
||||
}
|
||||
\value{
|
||||
For regression or binary classification, it returns a vector of length \code{nrows(newdata)}.
|
||||
The return type is different depending whether \code{strict_shape} is set to \code{TRUE}. By default,
|
||||
for regression or binary classification, it returns a vector of length \code{nrows(newdata)}.
|
||||
For multiclass classification, either a \code{num_class * nrows(newdata)} vector or
|
||||
a \code{(nrows(newdata), num_class)} dimension matrix is returned, depending on
|
||||
the \code{reshape} value.
|
||||
@@ -76,18 +87,19 @@ two dimensions. The "+ 1" columns corresponds to bias. Summing this array along
|
||||
produce practically the same result as predict with \code{predcontrib = TRUE}.
|
||||
For a multiclass case, a list of \code{num_class} elements is returned, where each element is
|
||||
such an array.
|
||||
|
||||
When \code{strict_shape} is set to \code{TRUE}, the output is always an array. For
|
||||
normal prediction, the output is a 2-dimension array \code{(num_class, nrow(newdata))}.
|
||||
|
||||
For \code{predcontrib = TRUE}, output is \code{(ncol(newdata) + 1, num_class, nrow(newdata))}
|
||||
For \code{predinteraction = TRUE}, output is \code{(ncol(newdata) + 1, ncol(newdata) + 1, num_class, nrow(newdata))}
|
||||
For \code{predleaf = TRUE}, output is \code{(n_trees_in_forest, num_class, n_iterations, nrow(newdata))}
|
||||
}
|
||||
\description{
|
||||
Predicted values based on either xgboost model or model handle object.
|
||||
}
|
||||
\details{
|
||||
Note that \code{ntreelimit} is not necessarily equal to the number of boosting iterations
|
||||
and it is not necessarily equal to the number of trees in a model.
|
||||
E.g., in a random forest-like model, \code{ntreelimit} would limit the number of trees.
|
||||
But for multiclass classification, while there are multiple trees per iteration,
|
||||
\code{ntreelimit} limits the number of boosting iterations.
|
||||
|
||||
Also note that \code{ntreelimit} would currently do nothing for predictions from gblinear,
|
||||
Note that \code{iterationrange} would currently do nothing for predictions from gblinear,
|
||||
since gblinear doesn't keep its boosting history.
|
||||
|
||||
One possible practical applications of the \code{predleaf} option is to use the model
|
||||
@@ -120,7 +132,7 @@ bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
# use all trees by default
|
||||
pred <- predict(bst, test$data)
|
||||
# use only the 1st tree
|
||||
pred1 <- predict(bst, test$data, ntreelimit = 1)
|
||||
pred1 <- predict(bst, test$data, iterationrange = c(1, 2))
|
||||
|
||||
# Predicting tree leafs:
|
||||
# the result is an nsamples X ntrees matrix
|
||||
@@ -172,25 +184,9 @@ str(pred)
|
||||
all.equal(pred, pred_labels)
|
||||
# prediction from using only 5 iterations should result
|
||||
# in the same error as seen in iteration 5:
|
||||
pred5 <- predict(bst, as.matrix(iris[, -5]), ntreelimit=5)
|
||||
pred5 <- predict(bst, as.matrix(iris[, -5]), iterationrange=c(1, 6))
|
||||
sum(pred5 != lb)/length(lb)
|
||||
|
||||
|
||||
## random forest-like model of 25 trees for binary classification:
|
||||
|
||||
set.seed(11)
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 5,
|
||||
nthread = 2, nrounds = 1, objective = "binary:logistic",
|
||||
num_parallel_tree = 25, subsample = 0.6, colsample_bytree = 0.1)
|
||||
# Inspect the prediction error vs number of trees:
|
||||
lb <- test$label
|
||||
dtest <- xgb.DMatrix(test$data, label=lb)
|
||||
err <- sapply(1:25, function(n) {
|
||||
pred <- predict(bst, dtest, ntreelimit=n)
|
||||
sum((pred > 0.5) != lb)/length(lb)
|
||||
})
|
||||
plot(err, type='l', ylim=c(0,0.1), xlab='#trees')
|
||||
|
||||
}
|
||||
\references{
|
||||
Scott M. Lundberg, Su-In Lee, "A Unified Approach to Interpreting Model Predictions", NIPS Proceedings 2017, \url{https://arxiv.org/abs/1705.07874}
|
||||
|
||||
27
R-package/man/prepare.ggplot.shap.data.Rd
Normal file
27
R-package/man/prepare.ggplot.shap.data.Rd
Normal file
@@ -0,0 +1,27 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/xgb.ggplot.R
|
||||
\name{prepare.ggplot.shap.data}
|
||||
\alias{prepare.ggplot.shap.data}
|
||||
\title{Combine and melt feature values and SHAP contributions for sample
|
||||
observations.}
|
||||
\usage{
|
||||
prepare.ggplot.shap.data(data_list, normalize = FALSE)
|
||||
}
|
||||
\arguments{
|
||||
\item{data_list}{List containing 'data' and 'shap_contrib' returned by
|
||||
\code{xgb.shap.data()}.}
|
||||
|
||||
\item{normalize}{Whether to standardize feature values to have mean 0 and
|
||||
standard deviation 1 (useful for comparing multiple features on the same
|
||||
plot). Default \code{FALSE}.}
|
||||
}
|
||||
\value{
|
||||
A data.table containing the observation ID, the feature name, the
|
||||
feature value (normalized if specified), and the SHAP contribution value.
|
||||
}
|
||||
\description{
|
||||
Conforms to data format required for ggplot functions.
|
||||
}
|
||||
\details{
|
||||
Internal utility function.
|
||||
}
|
||||
@@ -19,8 +19,7 @@ Currently it displays dimensions and presence of info-fields and colnames.
|
||||
}
|
||||
\examples{
|
||||
data(agaricus.train, package='xgboost')
|
||||
train <- agaricus.train
|
||||
dtrain <- xgb.DMatrix(train$data, label=train$label)
|
||||
dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label))
|
||||
|
||||
dtrain
|
||||
print(dtrain, verbose=TRUE)
|
||||
|
||||
@@ -25,16 +25,15 @@ Set information of an xgb.DMatrix object
|
||||
The \code{name} field can be one of the following:
|
||||
|
||||
\itemize{
|
||||
\item \code{label}: label Xgboost learn from ;
|
||||
\item \code{label}: label XGBoost learn from ;
|
||||
\item \code{weight}: to do a weight rescale ;
|
||||
\item \code{base_margin}: base margin is the base prediction Xgboost will boost from ;
|
||||
\item \code{base_margin}: base margin is the base prediction XGBoost will boost from ;
|
||||
\item \code{group}: number of rows in each group (to use with \code{rank:pairwise} objective).
|
||||
}
|
||||
}
|
||||
\examples{
|
||||
data(agaricus.train, package='xgboost')
|
||||
train <- agaricus.train
|
||||
dtrain <- xgb.DMatrix(train$data, label=train$label)
|
||||
dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label))
|
||||
|
||||
labels <- getinfo(dtrain, 'label')
|
||||
setinfo(dtrain, 'label', 1-labels)
|
||||
|
||||
@@ -28,8 +28,7 @@ original xgb.DMatrix object
|
||||
}
|
||||
\examples{
|
||||
data(agaricus.train, package='xgboost')
|
||||
train <- agaricus.train
|
||||
dtrain <- xgb.DMatrix(train$data, label=train$label)
|
||||
dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label))
|
||||
|
||||
dsub <- slice(dtrain, 1:42)
|
||||
labels1 <- getinfo(dsub, 'label')
|
||||
|
||||
@@ -38,6 +38,8 @@ bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_dep
|
||||
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
saveRDS(bst, "xgb.model.rds")
|
||||
|
||||
# Warning: The resulting RDS file is only compatible with the current XGBoost version.
|
||||
# Refer to the section titled "a-compatibility-note-for-saveRDS-save".
|
||||
bst1 <- readRDS("xgb.model.rds")
|
||||
if (file.exists("xgb.model.rds")) file.remove("xgb.model.rds")
|
||||
# the handle is invalid:
|
||||
|
||||
@@ -4,7 +4,14 @@
|
||||
\alias{xgb.DMatrix}
|
||||
\title{Construct xgb.DMatrix object}
|
||||
\usage{
|
||||
xgb.DMatrix(data, info = list(), missing = NA, silent = FALSE, ...)
|
||||
xgb.DMatrix(
|
||||
data,
|
||||
info = list(),
|
||||
missing = NA,
|
||||
silent = FALSE,
|
||||
nthread = NULL,
|
||||
...
|
||||
)
|
||||
}
|
||||
\arguments{
|
||||
\item{data}{a \code{matrix} object (either numeric or integer), a \code{dgCMatrix} object, or a character
|
||||
@@ -18,17 +25,18 @@ It is useful when a 0 or some other extreme value represents missing values in d
|
||||
|
||||
\item{silent}{whether to suppress printing an informational message after loading from a file.}
|
||||
|
||||
\item{nthread}{Number of threads used for creating DMatrix.}
|
||||
|
||||
\item{...}{the \code{info} data could be passed directly as parameters, without creating an \code{info} list.}
|
||||
}
|
||||
\description{
|
||||
Construct xgb.DMatrix object from either a dense matrix, a sparse matrix, or a local file.
|
||||
Supported input file formats are either a libsvm text file or a binary file that was created previously by
|
||||
Supported input file formats are either a LIBSVM text file or a binary file that was created previously by
|
||||
\code{\link{xgb.DMatrix.save}}).
|
||||
}
|
||||
\examples{
|
||||
data(agaricus.train, package='xgboost')
|
||||
train <- agaricus.train
|
||||
dtrain <- xgb.DMatrix(train$data, label=train$label)
|
||||
dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label))
|
||||
xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data')
|
||||
dtrain <- xgb.DMatrix('xgb.DMatrix.data')
|
||||
if (file.exists('xgb.DMatrix.data')) file.remove('xgb.DMatrix.data')
|
||||
|
||||
@@ -16,8 +16,7 @@ Save xgb.DMatrix object to binary file
|
||||
}
|
||||
\examples{
|
||||
data(agaricus.train, package='xgboost')
|
||||
train <- agaricus.train
|
||||
dtrain <- xgb.DMatrix(train$data, label=train$label)
|
||||
dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label))
|
||||
xgb.DMatrix.save(dtrain, 'xgb.DMatrix.data')
|
||||
dtrain <- xgb.DMatrix('xgb.DMatrix.data')
|
||||
if (file.exists('xgb.DMatrix.data')) file.remove('xgb.DMatrix.data')
|
||||
|
||||
@@ -59,8 +59,8 @@ a rule on certain features."
|
||||
\examples{
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
dtrain <- xgb.DMatrix(data = agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(data = agaricus.test$data, label = agaricus.test$label)
|
||||
dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label))
|
||||
dtest <- with(agaricus.test, xgb.DMatrix(data, label = label))
|
||||
|
||||
param <- list(max_depth=2, eta=1, silent=1, objective='binary:logistic')
|
||||
nrounds = 4
|
||||
|
||||
@@ -28,12 +28,15 @@ xgb.cv(
|
||||
)
|
||||
}
|
||||
\arguments{
|
||||
\item{params}{the list of parameters. Commonly used ones are:
|
||||
\item{params}{the list of parameters. The complete list of parameters is
|
||||
available in the \href{http://xgboost.readthedocs.io/en/latest/parameter.html}{online documentation}. Below
|
||||
is a shorter summary:
|
||||
\itemize{
|
||||
\item \code{objective} objective function, common ones are
|
||||
\itemize{
|
||||
\item \code{reg:squarederror} Regression with squared loss
|
||||
\item \code{binary:logistic} logistic regression for classification
|
||||
\item \code{reg:squarederror} Regression with squared loss.
|
||||
\item \code{binary:logistic} logistic regression for classification.
|
||||
\item See \code{\link[=xgb.train]{xgb.train}()} for complete list of objectives.
|
||||
}
|
||||
\item \code{eta} step size of each boosting step
|
||||
\item \code{max_depth} maximum depth of the tree
|
||||
@@ -67,6 +70,8 @@ from each CV model. This parameter engages the \code{\link{cb.cv.predict}} callb
|
||||
\item \code{error} binary classification error rate
|
||||
\item \code{rmse} Rooted mean square error
|
||||
\item \code{logloss} negative log-likelihood function
|
||||
\item \code{mae} Mean absolute error
|
||||
\item \code{mape} Mean absolute percentage error
|
||||
\item \code{auc} Area under curve
|
||||
\item \code{aucpr} Area under PR curve
|
||||
\item \code{merror} Exact matching error, used to evaluate multi-class classification
|
||||
@@ -130,9 +135,7 @@ An object of class \code{xgb.cv.synchronous} with the following elements:
|
||||
parameter or randomly generated.
|
||||
\item \code{best_iteration} iteration number with the best evaluation metric value
|
||||
(only available with early stopping).
|
||||
\item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration,
|
||||
which could further be used in \code{predict} method
|
||||
(only available with early stopping).
|
||||
\item \code{best_ntreelimit} and the \code{ntreelimit} Deprecated attributes, use \code{best_iteration} instead.
|
||||
\item \code{pred} CV prediction values available when \code{prediction} is set.
|
||||
It is either vector or matrix (see \code{\link{cb.cv.predict}}).
|
||||
\item \code{models} a list of the CV folds' models. It is only available with the explicit
|
||||
@@ -151,11 +154,11 @@ The cross-validation process is then repeated \code{nrounds} times, with each of
|
||||
|
||||
All observations are used for both training and validation.
|
||||
|
||||
Adapted from \url{http://en.wikipedia.org/wiki/Cross-validation_\%28statistics\%29#k-fold_cross-validation}
|
||||
Adapted from \url{https://en.wikipedia.org/wiki/Cross-validation_\%28statistics\%29}
|
||||
}
|
||||
\examples{
|
||||
data(agaricus.train, package='xgboost')
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label))
|
||||
cv <- xgb.cv(data = dtrain, nrounds = 3, nthread = 2, nfold = 5, metrics = list("rmse","auc"),
|
||||
max_depth = 3, eta = 1, objective = "binary:logistic")
|
||||
print(cv)
|
||||
|
||||
@@ -87,7 +87,7 @@ more than 5 distinct values.}
|
||||
|
||||
\item{which}{whether to do univariate or bivariate plotting. NOTE: only 1D is implemented so far.}
|
||||
|
||||
\item{plot}{whether a plot should be drawn. If FALSE, only a lits of matrices is returned.}
|
||||
\item{plot}{whether a plot should be drawn. If FALSE, only a list of matrices is returned.}
|
||||
|
||||
\item{...}{other parameters passed to \code{plot}.}
|
||||
}
|
||||
@@ -131,6 +131,7 @@ bst <- xgboost(agaricus.train$data, agaricus.train$label, nrounds = 50,
|
||||
xgb.plot.shap(agaricus.test$data, model = bst, features = "odor=none")
|
||||
contr <- predict(bst, agaricus.test$data, predcontrib = TRUE)
|
||||
xgb.plot.shap(agaricus.test$data, contr, model = bst, top_n = 12, n_col = 3)
|
||||
xgb.ggplot.shap.summary(agaricus.test$data, contr, model = bst, top_n = 12) # Summary plot
|
||||
|
||||
# multiclass example - plots for each class separately:
|
||||
nclass <- 3
|
||||
@@ -149,6 +150,7 @@ xgb.plot.shap(x, model = mbst, trees = trees0 + 1, target_class = 1, top_n = 4,
|
||||
n_col = 2, col = col, pch = 16, pch_NA = 17)
|
||||
xgb.plot.shap(x, model = mbst, trees = trees0 + 2, target_class = 2, top_n = 4,
|
||||
n_col = 2, col = col, pch = 16, pch_NA = 17)
|
||||
xgb.ggplot.shap.summary(x, model = mbst, target_class = 0, top_n = 4) # Summary plot
|
||||
|
||||
}
|
||||
\references{
|
||||
|
||||
78
R-package/man/xgb.plot.shap.summary.Rd
Normal file
78
R-package/man/xgb.plot.shap.summary.Rd
Normal file
@@ -0,0 +1,78 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/xgb.ggplot.R, R/xgb.plot.shap.R
|
||||
\name{xgb.ggplot.shap.summary}
|
||||
\alias{xgb.ggplot.shap.summary}
|
||||
\alias{xgb.plot.shap.summary}
|
||||
\title{SHAP contribution dependency summary plot}
|
||||
\usage{
|
||||
xgb.ggplot.shap.summary(
|
||||
data,
|
||||
shap_contrib = NULL,
|
||||
features = NULL,
|
||||
top_n = 10,
|
||||
model = NULL,
|
||||
trees = NULL,
|
||||
target_class = NULL,
|
||||
approxcontrib = FALSE,
|
||||
subsample = NULL
|
||||
)
|
||||
|
||||
xgb.plot.shap.summary(
|
||||
data,
|
||||
shap_contrib = NULL,
|
||||
features = NULL,
|
||||
top_n = 10,
|
||||
model = NULL,
|
||||
trees = NULL,
|
||||
target_class = NULL,
|
||||
approxcontrib = FALSE,
|
||||
subsample = NULL
|
||||
)
|
||||
}
|
||||
\arguments{
|
||||
\item{data}{data as a \code{matrix} or \code{dgCMatrix}.}
|
||||
|
||||
\item{shap_contrib}{a matrix of SHAP contributions that was computed earlier for the above
|
||||
\code{data}. When it is NULL, it is computed internally using \code{model} and \code{data}.}
|
||||
|
||||
\item{features}{a vector of either column indices or of feature names to plot. When it is NULL,
|
||||
feature importance is calculated, and \code{top_n} high ranked features are taken.}
|
||||
|
||||
\item{top_n}{when \code{features} is NULL, top_n [1, 100] most important features in a model are taken.}
|
||||
|
||||
\item{model}{an \code{xgb.Booster} model. It has to be provided when either \code{shap_contrib}
|
||||
or \code{features} is missing.}
|
||||
|
||||
\item{trees}{passed to \code{\link{xgb.importance}} when \code{features = NULL}.}
|
||||
|
||||
\item{target_class}{is only relevant for multiclass models. When it is set to a 0-based class index,
|
||||
only SHAP contributions for that specific class are used.
|
||||
If it is not set, SHAP importances are averaged over all classes.}
|
||||
|
||||
\item{approxcontrib}{passed to \code{\link{predict.xgb.Booster}} when \code{shap_contrib = NULL}.}
|
||||
|
||||
\item{subsample}{a random fraction of data points to use for plotting. When it is NULL,
|
||||
it is set so that up to 100K data points are used.}
|
||||
}
|
||||
\value{
|
||||
A \code{ggplot2} object.
|
||||
}
|
||||
\description{
|
||||
Compare SHAP contributions of different features.
|
||||
}
|
||||
\details{
|
||||
A point plot (each point representing one sample from \code{data}) is
|
||||
produced for each feature, with the points plotted on the SHAP value axis.
|
||||
Each point (observation) is coloured based on its feature value. The plot
|
||||
hence allows us to see which features have a negative / positive contribution
|
||||
on the model prediction, and whether the contribution is different for larger
|
||||
or smaller values of the feature. We effectively try to replicate the
|
||||
\code{summary_plot} function from https://github.com/slundberg/shap.
|
||||
}
|
||||
\examples{
|
||||
# See \code{\link{xgb.plot.shap}}.
|
||||
}
|
||||
\seealso{
|
||||
\code{\link{xgb.plot.shap}}, \code{\link{xgb.ggplot.shap.summary}},
|
||||
\url{https://github.com/slundberg/shap}
|
||||
}
|
||||
@@ -22,7 +22,11 @@ of \code{\link{xgb.train}}.
|
||||
|
||||
Note: a model can also be saved as an R-object (e.g., by using \code{\link[base]{readRDS}}
|
||||
or \code{\link[base]{save}}). However, it would then only be compatible with R, and
|
||||
corresponding R-methods would need to be used to load it.
|
||||
corresponding R-methods would need to be used to load it. Moreover, persisting the model with
|
||||
\code{\link[base]{readRDS}} or \code{\link[base]{save}}) will cause compatibility problems in
|
||||
future versions of XGBoost. Consult \code{\link{a-compatibility-note-for-saveRDS-save}} to learn
|
||||
how to persist models in a future-proof way, i.e. to make the model accessible in future
|
||||
releases of XGBoost.
|
||||
}
|
||||
\examples{
|
||||
data(agaricus.train, package='xgboost')
|
||||
|
||||
55
R-package/man/xgb.shap.data.Rd
Normal file
55
R-package/man/xgb.shap.data.Rd
Normal file
@@ -0,0 +1,55 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/xgb.plot.shap.R
|
||||
\name{xgb.shap.data}
|
||||
\alias{xgb.shap.data}
|
||||
\title{Prepare data for SHAP plots. To be used in xgb.plot.shap, xgb.plot.shap.summary, etc.
|
||||
Internal utility function.}
|
||||
\usage{
|
||||
xgb.shap.data(
|
||||
data,
|
||||
shap_contrib = NULL,
|
||||
features = NULL,
|
||||
top_n = 1,
|
||||
model = NULL,
|
||||
trees = NULL,
|
||||
target_class = NULL,
|
||||
approxcontrib = FALSE,
|
||||
subsample = NULL,
|
||||
max_observations = 1e+05
|
||||
)
|
||||
}
|
||||
\arguments{
|
||||
\item{data}{data as a \code{matrix} or \code{dgCMatrix}.}
|
||||
|
||||
\item{shap_contrib}{a matrix of SHAP contributions that was computed earlier for the above
|
||||
\code{data}. When it is NULL, it is computed internally using \code{model} and \code{data}.}
|
||||
|
||||
\item{features}{a vector of either column indices or of feature names to plot. When it is NULL,
|
||||
feature importance is calculated, and \code{top_n} high ranked features are taken.}
|
||||
|
||||
\item{top_n}{when \code{features} is NULL, top_n [1, 100] most important features in a model are taken.}
|
||||
|
||||
\item{model}{an \code{xgb.Booster} model. It has to be provided when either \code{shap_contrib}
|
||||
or \code{features} is missing.}
|
||||
|
||||
\item{trees}{passed to \code{\link{xgb.importance}} when \code{features = NULL}.}
|
||||
|
||||
\item{target_class}{is only relevant for multiclass models. When it is set to a 0-based class index,
|
||||
only SHAP contributions for that specific class are used.
|
||||
If it is not set, SHAP importances are averaged over all classes.}
|
||||
|
||||
\item{approxcontrib}{passed to \code{\link{predict.xgb.Booster}} when \code{shap_contrib = NULL}.}
|
||||
|
||||
\item{subsample}{a random fraction of data points to use for plotting. When it is NULL,
|
||||
it is set so that up to 100K data points are used.}
|
||||
}
|
||||
\value{
|
||||
A list containing: 'data', a matrix containing sample observations
|
||||
and their feature values; 'shap_contrib', a matrix containing the SHAP contribution
|
||||
values for these observations.
|
||||
}
|
||||
\description{
|
||||
Prepare data for SHAP plots. To be used in xgb.plot.shap, xgb.plot.shap.summary, etc.
|
||||
Internal utility function.
|
||||
}
|
||||
\keyword{internal}
|
||||
@@ -42,9 +42,9 @@ xgboost(
|
||||
)
|
||||
}
|
||||
\arguments{
|
||||
\item{params}{the list of parameters.
|
||||
The complete list of parameters is available at \url{http://xgboost.readthedocs.io/en/latest/parameter.html}.
|
||||
Below is a shorter summary:
|
||||
\item{params}{the list of parameters. The complete list of parameters is
|
||||
available in the \href{http://xgboost.readthedocs.io/en/latest/parameter.html}{online documentation}. Below
|
||||
is a shorter summary:
|
||||
|
||||
1. General Parameters
|
||||
|
||||
@@ -54,7 +54,7 @@ xgboost(
|
||||
|
||||
2. Booster Parameters
|
||||
|
||||
2.1. Parameter for Tree Booster
|
||||
2.1. Parameters for Tree Booster
|
||||
|
||||
\itemize{
|
||||
\item \code{eta} control the learning rate: scale the contribution of each tree by a factor of \code{0 < eta < 1} when it is added to the current approximation. Used to prevent overfitting by making the boosting process more conservative. Lower value for \code{eta} implies larger value for \code{nrounds}: low \code{eta} value means model more robust to overfitting but slower to compute. Default: 0.3
|
||||
@@ -63,12 +63,14 @@ xgboost(
|
||||
\item \code{min_child_weight} minimum sum of instance weight (hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, then the building process will give up further partitioning. In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node. The larger, the more conservative the algorithm will be. Default: 1
|
||||
\item \code{subsample} subsample ratio of the training instance. Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees and this will prevent overfitting. It makes computation shorter (because less data to analyse). It is advised to use this parameter with \code{eta} and increase \code{nrounds}. Default: 1
|
||||
\item \code{colsample_bytree} subsample ratio of columns when constructing each tree. Default: 1
|
||||
\item \code{num_parallel_tree} Experimental parameter. number of trees to grow per round. Useful to test Random Forest through Xgboost (set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly. Default: 1
|
||||
\item \code{lambda} L2 regularization term on weights. Default: 1
|
||||
\item \code{alpha} L1 regularization term on weights. (there is no L1 reg on bias because it is not important). Default: 0
|
||||
\item \code{num_parallel_tree} Experimental parameter. number of trees to grow per round. Useful to test Random Forest through XGBoost (set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly. Default: 1
|
||||
\item \code{monotone_constraints} A numerical vector consists of \code{1}, \code{0} and \code{-1} with its length equals to the number of features in the training data. \code{1} is increasing, \code{-1} is decreasing and \code{0} is no constraint.
|
||||
\item \code{interaction_constraints} A list of vectors specifying feature indices of permitted interactions. Each item of the list represents one permitted interaction where specified features are allowed to interact with each other. Feature index values should start from \code{0} (\code{0} references the first column). Leave argument unspecified for no interaction constraints.
|
||||
}
|
||||
|
||||
2.2. Parameter for Linear Booster
|
||||
2.2. Parameters for Linear Booster
|
||||
|
||||
\itemize{
|
||||
\item \code{lambda} L2 regularization term on weights. Default: 0
|
||||
@@ -82,13 +84,23 @@ xgboost(
|
||||
\item \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it. The default objective options are below:
|
||||
\itemize{
|
||||
\item \code{reg:squarederror} Regression with squared loss (Default).
|
||||
\item \code{reg:squaredlogerror}: regression with squared log loss \eqn{1/2 * (log(pred + 1) - log(label + 1))^2}. All inputs are required to be greater than -1. Also, see metric rmsle for possible issue with this objective.
|
||||
\item \code{reg:logistic} logistic regression.
|
||||
\item \code{reg:pseudohubererror}: regression with Pseudo Huber loss, a twice differentiable alternative to absolute loss.
|
||||
\item \code{binary:logistic} logistic regression for binary classification. Output probability.
|
||||
\item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation.
|
||||
\item \code{num_class} set the number of classes. To use only with multiclass objectives.
|
||||
\item \code{binary:hinge}: hinge loss for binary classification. This makes predictions of 0 or 1, rather than producing probabilities.
|
||||
\item \code{count:poisson}: Poisson regression for count data, output mean of Poisson distribution. \code{max_delta_step} is set to 0.7 by default in poisson regression (used to safeguard optimization).
|
||||
\item \code{survival:cox}: Cox regression for right censored survival time data (negative values are considered right censored). Note that predictions are returned on the hazard ratio scale (i.e., as HR = exp(marginal_prediction) in the proportional hazard function \code{h(t) = h0(t) * HR)}.
|
||||
\item \code{survival:aft}: Accelerated failure time model for censored survival time data. See \href{https://xgboost.readthedocs.io/en/latest/tutorials/aft_survival_analysis.html}{Survival Analysis with Accelerated Failure Time} for details.
|
||||
\item \code{aft_loss_distribution}: Probability Density Function used by \code{survival:aft} and \code{aft-nloglik} metric.
|
||||
\item \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective. Class is represented by a number and should be from 0 to \code{num_class - 1}.
|
||||
\item \code{multi:softprob} same as softmax, but prediction outputs a vector of ndata * nclass elements, which can be further reshaped to ndata, nclass matrix. The result contains predicted probabilities of each data point belonging to each class.
|
||||
\item \code{rank:pairwise} set xgboost to do ranking task by minimizing the pairwise loss.
|
||||
\item \code{rank:ndcg}: Use LambdaMART to perform list-wise ranking where \href{https://en.wikipedia.org/wiki/Discounted_cumulative_gain}{Normalized Discounted Cumulative Gain (NDCG)} is maximized.
|
||||
\item \code{rank:map}: Use LambdaMART to perform list-wise ranking where \href{https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Mean_average_precision}{Mean Average Precision (MAP)} is maximized.
|
||||
\item \code{reg:gamma}: gamma regression with log-link. Output is a mean of gamma distribution. It might be useful, e.g., for modeling insurance claims severity, or for any outcome that might be \href{https://en.wikipedia.org/wiki/Gamma_distribution#Applications}{gamma-distributed}.
|
||||
\item \code{reg:tweedie}: Tweedie regression with log-link. It might be useful, e.g., for modeling total loss in insurance, or for any outcome that might be \href{https://en.wikipedia.org/wiki/Tweedie_distribution#Applications}{Tweedie-distributed}.
|
||||
}
|
||||
\item \code{base_score} the initial prediction score of all instances, global bias. Default: 0.5
|
||||
\item \code{eval_metric} evaluation metrics for validation data. Users can pass a self-defined function to it. Default: metric will be assigned according to objective(rmse for regression, and error for classification, mean average precision for ranking). List is provided in detail section.
|
||||
@@ -175,9 +187,6 @@ An object of class \code{xgb.Booster} with the following elements:
|
||||
explicitly passed.
|
||||
\item \code{best_iteration} iteration number with the best evaluation metric value
|
||||
(only available with early stopping).
|
||||
\item \code{best_ntreelimit} the \code{ntreelimit} value corresponding to the best iteration,
|
||||
which could further be used in \code{predict} method
|
||||
(only available with early stopping).
|
||||
\item \code{best_score} the best evaluation metric value during early stopping.
|
||||
(only available with early stopping).
|
||||
\item \code{feature_names} names of the training dataset features
|
||||
@@ -199,22 +208,24 @@ than the \code{xgboost} interface.
|
||||
Parallelization is automatically enabled if \code{OpenMP} is present.
|
||||
Number of threads can also be manually specified via \code{nthread} parameter.
|
||||
|
||||
The evaluation metric is chosen automatically by Xgboost (according to the objective)
|
||||
The evaluation metric is chosen automatically by XGBoost (according to the objective)
|
||||
when the \code{eval_metric} parameter is not provided.
|
||||
User may set one or several \code{eval_metric} parameters.
|
||||
Note that when using a customized metric, only this single metric can be used.
|
||||
The following is the list of built-in metrics for which Xgboost provides optimized implementation:
|
||||
The following is the list of built-in metrics for which XGBoost provides optimized implementation:
|
||||
\itemize{
|
||||
\item \code{rmse} root mean square error. \url{http://en.wikipedia.org/wiki/Root_mean_square_error}
|
||||
\item \code{logloss} negative log-likelihood. \url{http://en.wikipedia.org/wiki/Log-likelihood}
|
||||
\item \code{mlogloss} multiclass logloss. \url{http://wiki.fast.ai/index.php/Log_Loss}
|
||||
\item \code{rmse} root mean square error. \url{https://en.wikipedia.org/wiki/Root_mean_square_error}
|
||||
\item \code{logloss} negative log-likelihood. \url{https://en.wikipedia.org/wiki/Log-likelihood}
|
||||
\item \code{mlogloss} multiclass logloss. \url{https://scikit-learn.org/stable/modules/generated/sklearn.metrics.log_loss.html}
|
||||
\item \code{error} Binary classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}.
|
||||
By default, it uses the 0.5 threshold for predicted values to define negative and positive instances.
|
||||
Different threshold (e.g., 0.) could be specified as "error@0."
|
||||
\item \code{merror} Multiclass classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}.
|
||||
\item \code{auc} Area under the curve. \url{http://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation.
|
||||
\item \code{mae} Mean absolute error
|
||||
\item \code{mape} Mean absolute percentage error
|
||||
\item \code{auc} Area under the curve. \url{https://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation.
|
||||
\item \code{aucpr} Area under the PR curve. \url{https://en.wikipedia.org/wiki/Precision_and_recall} for ranking evaluation.
|
||||
\item \code{ndcg} Normalized Discounted Cumulative Gain (for ranking task). \url{http://en.wikipedia.org/wiki/NDCG}
|
||||
\item \code{ndcg} Normalized Discounted Cumulative Gain (for ranking task). \url{https://en.wikipedia.org/wiki/NDCG}
|
||||
}
|
||||
|
||||
The following callbacks are automatically created when certain parameters are set:
|
||||
@@ -230,8 +241,8 @@ The following callbacks are automatically created when certain parameters are se
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
dtrain <- with(agaricus.train, xgb.DMatrix(data, label = label))
|
||||
dtest <- with(agaricus.test, xgb.DMatrix(data, label = label))
|
||||
watchlist <- list(train = dtrain, eval = dtest)
|
||||
|
||||
## A simple xgb.train example:
|
||||
|
||||
@@ -4,10 +4,17 @@
|
||||
\alias{xgb.unserialize}
|
||||
\title{Load the instance back from \code{\link{xgb.serialize}}}
|
||||
\usage{
|
||||
xgb.unserialize(buffer)
|
||||
xgb.unserialize(buffer, handle = NULL)
|
||||
}
|
||||
\arguments{
|
||||
\item{buffer}{the buffer containing booster instance saved by \code{\link{xgb.serialize}}}
|
||||
|
||||
\item{handle}{An \code{xgb.Booster.handle} object which will be overwritten with
|
||||
the new deserialized object. Must be a null handle (e.g. when loading the model through
|
||||
`readRDS`). If not provided, a new handle will be created.}
|
||||
}
|
||||
\value{
|
||||
An \code{xgb.Booster.handle} object.
|
||||
}
|
||||
\description{
|
||||
Load the instance back from \code{\link{xgb.serialize}}
|
||||
|
||||
39
R-package/man/xgbConfig.Rd
Normal file
39
R-package/man/xgbConfig.Rd
Normal file
@@ -0,0 +1,39 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/xgb.config.R
|
||||
\name{xgb.set.config, xgb.get.config}
|
||||
\alias{xgb.set.config, xgb.get.config}
|
||||
\alias{xgb.set.config}
|
||||
\alias{xgb.get.config}
|
||||
\title{Set and get global configuration}
|
||||
\usage{
|
||||
xgb.set.config(...)
|
||||
|
||||
xgb.get.config()
|
||||
}
|
||||
\arguments{
|
||||
\item{...}{List of parameters to be set, as keyword arguments}
|
||||
}
|
||||
\value{
|
||||
\code{xgb.set.config} returns \code{TRUE} to signal success. \code{xgb.get.config} returns
|
||||
a list containing all global-scope parameters and their values.
|
||||
}
|
||||
\description{
|
||||
Global configuration consists of a collection of parameters that can be applied in the global
|
||||
scope. See \url{https://xgboost.readthedocs.io/en/stable/parameter.html} for the full list of
|
||||
parameters supported in the global configuration. Use \code{xgb.set.config} to update the
|
||||
values of one or more global-scope parameters. Use \code{xgb.get.config} to fetch the current
|
||||
values of all global-scope parameters (listed in
|
||||
\url{https://xgboost.readthedocs.io/en/stable/parameter.html}).
|
||||
}
|
||||
\examples{
|
||||
# Set verbosity level to silent (0)
|
||||
xgb.set.config(verbosity = 0)
|
||||
# Now global verbosity level is 0
|
||||
config <- xgb.get.config()
|
||||
print(config$verbosity)
|
||||
# Set verbosity level to warning (1)
|
||||
xgb.set.config(verbosity = 1)
|
||||
# Now global verbosity level is 1
|
||||
config <- xgb.get.config()
|
||||
print(config$verbosity)
|
||||
}
|
||||
@@ -3,12 +3,12 @@ PKGROOT=../../
|
||||
ENABLE_STD_THREAD=1
|
||||
# _*_ mode: Makefile; _*_
|
||||
|
||||
CXX_STD = CXX11
|
||||
CXX_STD = CXX14
|
||||
|
||||
XGB_RFLAGS = -DXGBOOST_STRICT_R_MODE=1 -DDMLC_LOG_BEFORE_THROW=0\
|
||||
-DDMLC_ENABLE_STD_THREAD=$(ENABLE_STD_THREAD) -DDMLC_DISABLE_STDIN=1\
|
||||
-DDMLC_LOG_CUSTOMIZE=1 -DXGBOOST_CUSTOMIZE_LOGGER=1\
|
||||
-DRABIT_CUSTOMIZE_MSG_ -DRABIT_STRICT_CXX98_
|
||||
-DRABIT_CUSTOMIZE_MSG_
|
||||
|
||||
# disable the use of thread_local for 32 bit windows:
|
||||
ifeq ($(R_OSTYPE)$(WIN),windows)
|
||||
@@ -17,8 +17,9 @@ endif
|
||||
$(foreach v, $(XGB_RFLAGS), $(warning $(v)))
|
||||
|
||||
PKG_CPPFLAGS= -I$(PKGROOT)/include -I$(PKGROOT)/dmlc-core/include -I$(PKGROOT)/rabit/include -I$(PKGROOT) $(XGB_RFLAGS)
|
||||
PKG_CXXFLAGS= @OPENMP_CXXFLAGS@ @ENDIAN_FLAG@ -pthread
|
||||
PKG_CXXFLAGS= @OPENMP_CXXFLAGS@ @ENDIAN_FLAG@ -pthread $(CXX_VISIBILITY)
|
||||
PKG_LIBS = @OPENMP_CXXFLAGS@ @OPENMP_LIB@ @ENDIAN_FLAG@ @BACKTRACE_LIB@ -pthread
|
||||
OBJECTS= ./xgboost_R.o ./xgboost_custom.o ./xgboost_assert.o ./init.o \
|
||||
$(PKGROOT)/amalgamation/xgboost-all0.o $(PKGROOT)/amalgamation/dmlc-minimum0.o \
|
||||
$(PKGROOT)/rabit/src/engine_empty.o $(PKGROOT)/rabit/src/c_api.o
|
||||
$(PKGROOT)/rabit/src/engine.o $(PKGROOT)/rabit/src/rabit_c_api.o \
|
||||
$(PKGROOT)/rabit/src/allreduce_base.o
|
||||
|
||||
@@ -3,7 +3,7 @@ PKGROOT=./
|
||||
ENABLE_STD_THREAD=0
|
||||
# _*_ mode: Makefile; _*_
|
||||
|
||||
# This file is only used for windows compilation from github
|
||||
# This file is only used for Windows compilation from GitHub
|
||||
# It will be replaced with Makevars.in for the CRAN version
|
||||
.PHONY: all xgblib
|
||||
all: $(SHLIB)
|
||||
@@ -15,12 +15,12 @@ xgblib:
|
||||
cp -r ../../include .
|
||||
cp -r ../../amalgamation .
|
||||
|
||||
CXX_STD = CXX11
|
||||
CXX_STD = CXX14
|
||||
|
||||
XGB_RFLAGS = -DXGBOOST_STRICT_R_MODE=1 -DDMLC_LOG_BEFORE_THROW=0\
|
||||
-DDMLC_ENABLE_STD_THREAD=$(ENABLE_STD_THREAD) -DDMLC_DISABLE_STDIN=1\
|
||||
-DDMLC_LOG_CUSTOMIZE=1 -DXGBOOST_CUSTOMIZE_LOGGER=1\
|
||||
-DRABIT_CUSTOMIZE_MSG_ -DRABIT_STRICT_CXX98_
|
||||
-DRABIT_CUSTOMIZE_MSG_
|
||||
|
||||
# disable the use of thread_local for 32 bit windows:
|
||||
ifeq ($(R_OSTYPE)$(WIN),windows)
|
||||
@@ -33,6 +33,7 @@ PKG_CXXFLAGS= $(SHLIB_OPENMP_CXXFLAGS) $(SHLIB_PTHREAD_FLAGS)
|
||||
PKG_LIBS = $(SHLIB_OPENMP_CXXFLAGS) $(SHLIB_PTHREAD_FLAGS)
|
||||
OBJECTS= ./xgboost_R.o ./xgboost_custom.o ./xgboost_assert.o ./init.o \
|
||||
$(PKGROOT)/amalgamation/xgboost-all0.o $(PKGROOT)/amalgamation/dmlc-minimum0.o \
|
||||
$(PKGROOT)/rabit/src/engine_empty.o $(PKGROOT)/rabit/src/c_api.o
|
||||
$(PKGROOT)/rabit/src/engine.o $(PKGROOT)/rabit/src/rabit_c_api.o \
|
||||
$(PKGROOT)/rabit/src/allreduce_base.o
|
||||
|
||||
$(OBJECTS) : xgblib
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
#include <Rinternals.h>
|
||||
#include <stdlib.h>
|
||||
#include <R_ext/Rdynload.h>
|
||||
#include <R_ext/Visibility.h>
|
||||
|
||||
/* FIXME:
|
||||
Check these declarations against the C/Fortran source code.
|
||||
@@ -17,6 +18,7 @@ Check these declarations against the C/Fortran source code.
|
||||
/* .Call calls */
|
||||
extern SEXP XGBoosterBoostOneIter_R(SEXP, SEXP, SEXP, SEXP);
|
||||
extern SEXP XGBoosterCreate_R(SEXP);
|
||||
extern SEXP XGBoosterCreateInEmptyObj_R(SEXP, SEXP);
|
||||
extern SEXP XGBoosterDumpModel_R(SEXP, SEXP, SEXP, SEXP);
|
||||
extern SEXP XGBoosterEvalOneIter_R(SEXP, SEXP, SEXP, SEXP);
|
||||
extern SEXP XGBoosterGetAttrNames_R(SEXP);
|
||||
@@ -29,6 +31,7 @@ extern SEXP XGBoosterSerializeToBuffer_R(SEXP handle);
|
||||
extern SEXP XGBoosterUnserializeFromBuffer_R(SEXP handle, SEXP raw);
|
||||
extern SEXP XGBoosterModelToRaw_R(SEXP);
|
||||
extern SEXP XGBoosterPredict_R(SEXP, SEXP, SEXP, SEXP, SEXP);
|
||||
extern SEXP XGBoosterPredictFromDMatrix_R(SEXP, SEXP, SEXP);
|
||||
extern SEXP XGBoosterSaveModel_R(SEXP, SEXP);
|
||||
extern SEXP XGBoosterSetAttr_R(SEXP, SEXP, SEXP);
|
||||
extern SEXP XGBoosterSetParam_R(SEXP, SEXP, SEXP);
|
||||
@@ -36,17 +39,21 @@ extern SEXP XGBoosterUpdateOneIter_R(SEXP, SEXP, SEXP);
|
||||
extern SEXP XGCheckNullPtr_R(SEXP);
|
||||
extern SEXP XGDMatrixCreateFromCSC_R(SEXP, SEXP, SEXP, SEXP);
|
||||
extern SEXP XGDMatrixCreateFromFile_R(SEXP, SEXP);
|
||||
extern SEXP XGDMatrixCreateFromMat_R(SEXP, SEXP);
|
||||
extern SEXP XGDMatrixCreateFromMat_R(SEXP, SEXP, SEXP);
|
||||
extern SEXP XGDMatrixGetInfo_R(SEXP, SEXP);
|
||||
extern SEXP XGDMatrixNumCol_R(SEXP);
|
||||
extern SEXP XGDMatrixNumRow_R(SEXP);
|
||||
extern SEXP XGDMatrixSaveBinary_R(SEXP, SEXP, SEXP);
|
||||
extern SEXP XGDMatrixSetInfo_R(SEXP, SEXP, SEXP);
|
||||
extern SEXP XGDMatrixSliceDMatrix_R(SEXP, SEXP);
|
||||
extern SEXP XGBSetGlobalConfig_R(SEXP);
|
||||
extern SEXP XGBGetGlobalConfig_R();
|
||||
extern SEXP XGBoosterFeatureScore_R(SEXP, SEXP);
|
||||
|
||||
static const R_CallMethodDef CallEntries[] = {
|
||||
{"XGBoosterBoostOneIter_R", (DL_FUNC) &XGBoosterBoostOneIter_R, 4},
|
||||
{"XGBoosterCreate_R", (DL_FUNC) &XGBoosterCreate_R, 1},
|
||||
{"XGBoosterCreateInEmptyObj_R", (DL_FUNC) &XGBoosterCreateInEmptyObj_R, 2},
|
||||
{"XGBoosterDumpModel_R", (DL_FUNC) &XGBoosterDumpModel_R, 4},
|
||||
{"XGBoosterEvalOneIter_R", (DL_FUNC) &XGBoosterEvalOneIter_R, 4},
|
||||
{"XGBoosterGetAttrNames_R", (DL_FUNC) &XGBoosterGetAttrNames_R, 1},
|
||||
@@ -59,6 +66,7 @@ static const R_CallMethodDef CallEntries[] = {
|
||||
{"XGBoosterUnserializeFromBuffer_R", (DL_FUNC) &XGBoosterUnserializeFromBuffer_R, 2},
|
||||
{"XGBoosterModelToRaw_R", (DL_FUNC) &XGBoosterModelToRaw_R, 1},
|
||||
{"XGBoosterPredict_R", (DL_FUNC) &XGBoosterPredict_R, 5},
|
||||
{"XGBoosterPredictFromDMatrix_R", (DL_FUNC) &XGBoosterPredictFromDMatrix_R, 3},
|
||||
{"XGBoosterSaveModel_R", (DL_FUNC) &XGBoosterSaveModel_R, 2},
|
||||
{"XGBoosterSetAttr_R", (DL_FUNC) &XGBoosterSetAttr_R, 3},
|
||||
{"XGBoosterSetParam_R", (DL_FUNC) &XGBoosterSetParam_R, 3},
|
||||
@@ -66,20 +74,23 @@ static const R_CallMethodDef CallEntries[] = {
|
||||
{"XGCheckNullPtr_R", (DL_FUNC) &XGCheckNullPtr_R, 1},
|
||||
{"XGDMatrixCreateFromCSC_R", (DL_FUNC) &XGDMatrixCreateFromCSC_R, 4},
|
||||
{"XGDMatrixCreateFromFile_R", (DL_FUNC) &XGDMatrixCreateFromFile_R, 2},
|
||||
{"XGDMatrixCreateFromMat_R", (DL_FUNC) &XGDMatrixCreateFromMat_R, 2},
|
||||
{"XGDMatrixCreateFromMat_R", (DL_FUNC) &XGDMatrixCreateFromMat_R, 3},
|
||||
{"XGDMatrixGetInfo_R", (DL_FUNC) &XGDMatrixGetInfo_R, 2},
|
||||
{"XGDMatrixNumCol_R", (DL_FUNC) &XGDMatrixNumCol_R, 1},
|
||||
{"XGDMatrixNumRow_R", (DL_FUNC) &XGDMatrixNumRow_R, 1},
|
||||
{"XGDMatrixSaveBinary_R", (DL_FUNC) &XGDMatrixSaveBinary_R, 3},
|
||||
{"XGDMatrixSetInfo_R", (DL_FUNC) &XGDMatrixSetInfo_R, 3},
|
||||
{"XGDMatrixSliceDMatrix_R", (DL_FUNC) &XGDMatrixSliceDMatrix_R, 2},
|
||||
{"XGBSetGlobalConfig_R", (DL_FUNC) &XGBSetGlobalConfig_R, 1},
|
||||
{"XGBGetGlobalConfig_R", (DL_FUNC) &XGBGetGlobalConfig_R, 0},
|
||||
{"XGBoosterFeatureScore_R", (DL_FUNC) &XGBoosterFeatureScore_R, 2},
|
||||
{NULL, NULL, 0}
|
||||
};
|
||||
|
||||
#if defined(_WIN32)
|
||||
__declspec(dllexport)
|
||||
#endif // defined(_WIN32)
|
||||
void R_init_xgboost(DllInfo *dll) {
|
||||
void attribute_visible R_init_xgboost(DllInfo *dll) {
|
||||
R_registerRoutines(dll, NULL, CallEntries, NULL, NULL);
|
||||
R_useDynamicSymbols(dll, FALSE);
|
||||
}
|
||||
|
||||
3
R-package/src/xgboost-win.def
Normal file
3
R-package/src/xgboost-win.def
Normal file
@@ -0,0 +1,3 @@
|
||||
LIBRARY xgboost.dll
|
||||
EXPORTS
|
||||
R_init_xgboost
|
||||
@@ -1,6 +1,7 @@
|
||||
// Copyright (c) 2014 by Contributors
|
||||
#include <dmlc/logging.h>
|
||||
#include <dmlc/omp.h>
|
||||
#include <dmlc/common.h>
|
||||
#include <xgboost/c_api.h>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
@@ -8,6 +9,8 @@
|
||||
#include <cstring>
|
||||
#include <cstdio>
|
||||
#include <sstream>
|
||||
|
||||
#include "../../src/common/threading_utils.h"
|
||||
#include "./xgboost_R.h"
|
||||
|
||||
/*!
|
||||
@@ -37,11 +40,11 @@
|
||||
|
||||
using namespace dmlc;
|
||||
|
||||
SEXP XGCheckNullPtr_R(SEXP handle) {
|
||||
XGB_DLL SEXP XGCheckNullPtr_R(SEXP handle) {
|
||||
return ScalarLogical(R_ExternalPtrAddr(handle) == NULL);
|
||||
}
|
||||
|
||||
void _DMatrixFinalizer(SEXP ext) {
|
||||
XGB_DLL void _DMatrixFinalizer(SEXP ext) {
|
||||
R_API_BEGIN();
|
||||
if (R_ExternalPtrAddr(ext) == NULL) return;
|
||||
CHECK_CALL(XGDMatrixFree(R_ExternalPtrAddr(ext)));
|
||||
@@ -49,7 +52,22 @@ void _DMatrixFinalizer(SEXP ext) {
|
||||
R_API_END();
|
||||
}
|
||||
|
||||
SEXP XGDMatrixCreateFromFile_R(SEXP fname, SEXP silent) {
|
||||
XGB_DLL SEXP XGBSetGlobalConfig_R(SEXP json_str) {
|
||||
R_API_BEGIN();
|
||||
CHECK_CALL(XGBSetGlobalConfig(CHAR(asChar(json_str))));
|
||||
R_API_END();
|
||||
return R_NilValue;
|
||||
}
|
||||
|
||||
XGB_DLL SEXP XGBGetGlobalConfig_R() {
|
||||
const char* json_str;
|
||||
R_API_BEGIN();
|
||||
CHECK_CALL(XGBGetGlobalConfig(&json_str));
|
||||
R_API_END();
|
||||
return mkString(json_str);
|
||||
}
|
||||
|
||||
XGB_DLL SEXP XGDMatrixCreateFromFile_R(SEXP fname, SEXP silent) {
|
||||
SEXP ret;
|
||||
R_API_BEGIN();
|
||||
DMatrixHandle handle;
|
||||
@@ -61,8 +79,7 @@ SEXP XGDMatrixCreateFromFile_R(SEXP fname, SEXP silent) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
SEXP XGDMatrixCreateFromMat_R(SEXP mat,
|
||||
SEXP missing) {
|
||||
XGB_DLL SEXP XGDMatrixCreateFromMat_R(SEXP mat, SEXP missing, SEXP n_threads) {
|
||||
SEXP ret;
|
||||
R_API_BEGIN();
|
||||
SEXP dim = getAttrib(mat, R_DimSymbol);
|
||||
@@ -77,14 +94,21 @@ SEXP XGDMatrixCreateFromMat_R(SEXP mat,
|
||||
din = REAL(mat);
|
||||
}
|
||||
std::vector<float> data(nrow * ncol);
|
||||
#pragma omp parallel for schedule(static)
|
||||
dmlc::OMPException exc;
|
||||
int32_t threads = xgboost::common::OmpGetNumThreads(asInteger(n_threads));
|
||||
|
||||
#pragma omp parallel for schedule(static) num_threads(threads)
|
||||
for (omp_ulong i = 0; i < nrow; ++i) {
|
||||
exc.Run([&]() {
|
||||
for (size_t j = 0; j < ncol; ++j) {
|
||||
data[i * ncol +j] = is_int ? static_cast<float>(iin[i + nrow * j]) : din[i + nrow * j];
|
||||
}
|
||||
});
|
||||
}
|
||||
exc.Rethrow();
|
||||
DMatrixHandle handle;
|
||||
CHECK_CALL(XGDMatrixCreateFromMat(BeginPtr(data), nrow, ncol, asReal(missing), &handle));
|
||||
CHECK_CALL(XGDMatrixCreateFromMat_omp(BeginPtr(data), nrow, ncol,
|
||||
asReal(missing), &handle, threads));
|
||||
ret = PROTECT(R_MakeExternalPtr(handle, R_NilValue, R_NilValue));
|
||||
R_RegisterCFinalizerEx(ret, _DMatrixFinalizer, TRUE);
|
||||
R_API_END();
|
||||
@@ -92,9 +116,7 @@ SEXP XGDMatrixCreateFromMat_R(SEXP mat,
|
||||
return ret;
|
||||
}
|
||||
|
||||
SEXP XGDMatrixCreateFromCSC_R(SEXP indptr,
|
||||
SEXP indices,
|
||||
SEXP data,
|
||||
XGB_DLL SEXP XGDMatrixCreateFromCSC_R(SEXP indptr, SEXP indices, SEXP data,
|
||||
SEXP num_row) {
|
||||
SEXP ret;
|
||||
R_API_BEGIN();
|
||||
@@ -111,11 +133,15 @@ SEXP XGDMatrixCreateFromCSC_R(SEXP indptr,
|
||||
for (size_t i = 0; i < nindptr; ++i) {
|
||||
col_ptr_[i] = static_cast<size_t>(p_indptr[i]);
|
||||
}
|
||||
dmlc::OMPException exc;
|
||||
#pragma omp parallel for schedule(static)
|
||||
for (int64_t i = 0; i < static_cast<int64_t>(ndata); ++i) {
|
||||
exc.Run([&]() {
|
||||
indices_[i] = static_cast<unsigned>(p_indices[i]);
|
||||
data_[i] = static_cast<float>(p_data[i]);
|
||||
});
|
||||
}
|
||||
exc.Rethrow();
|
||||
DMatrixHandle handle;
|
||||
CHECK_CALL(XGDMatrixCreateFromCSCEx(BeginPtr(col_ptr_), BeginPtr(indices_),
|
||||
BeginPtr(data_), nindptr, ndata,
|
||||
@@ -127,7 +153,7 @@ SEXP XGDMatrixCreateFromCSC_R(SEXP indptr,
|
||||
return ret;
|
||||
}
|
||||
|
||||
SEXP XGDMatrixSliceDMatrix_R(SEXP handle, SEXP idxset) {
|
||||
XGB_DLL SEXP XGDMatrixSliceDMatrix_R(SEXP handle, SEXP idxset) {
|
||||
SEXP ret;
|
||||
R_API_BEGIN();
|
||||
int len = length(idxset);
|
||||
@@ -147,7 +173,7 @@ SEXP XGDMatrixSliceDMatrix_R(SEXP handle, SEXP idxset) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
SEXP XGDMatrixSaveBinary_R(SEXP handle, SEXP fname, SEXP silent) {
|
||||
XGB_DLL SEXP XGDMatrixSaveBinary_R(SEXP handle, SEXP fname, SEXP silent) {
|
||||
R_API_BEGIN();
|
||||
CHECK_CALL(XGDMatrixSaveBinary(R_ExternalPtrAddr(handle),
|
||||
CHAR(asChar(fname)),
|
||||
@@ -156,16 +182,20 @@ SEXP XGDMatrixSaveBinary_R(SEXP handle, SEXP fname, SEXP silent) {
|
||||
return R_NilValue;
|
||||
}
|
||||
|
||||
SEXP XGDMatrixSetInfo_R(SEXP handle, SEXP field, SEXP array) {
|
||||
XGB_DLL SEXP XGDMatrixSetInfo_R(SEXP handle, SEXP field, SEXP array) {
|
||||
R_API_BEGIN();
|
||||
int len = length(array);
|
||||
const char *name = CHAR(asChar(field));
|
||||
dmlc::OMPException exc;
|
||||
if (!strcmp("group", name)) {
|
||||
std::vector<unsigned> vec(len);
|
||||
#pragma omp parallel for schedule(static)
|
||||
for (int i = 0; i < len; ++i) {
|
||||
exc.Run([&]() {
|
||||
vec[i] = static_cast<unsigned>(INTEGER(array)[i]);
|
||||
});
|
||||
}
|
||||
exc.Rethrow();
|
||||
CHECK_CALL(XGDMatrixSetUIntInfo(R_ExternalPtrAddr(handle),
|
||||
CHAR(asChar(field)),
|
||||
BeginPtr(vec), len));
|
||||
@@ -173,8 +203,11 @@ SEXP XGDMatrixSetInfo_R(SEXP handle, SEXP field, SEXP array) {
|
||||
std::vector<float> vec(len);
|
||||
#pragma omp parallel for schedule(static)
|
||||
for (int i = 0; i < len; ++i) {
|
||||
exc.Run([&]() {
|
||||
vec[i] = REAL(array)[i];
|
||||
});
|
||||
}
|
||||
exc.Rethrow();
|
||||
CHECK_CALL(XGDMatrixSetFloatInfo(R_ExternalPtrAddr(handle),
|
||||
CHAR(asChar(field)),
|
||||
BeginPtr(vec), len));
|
||||
@@ -183,7 +216,7 @@ SEXP XGDMatrixSetInfo_R(SEXP handle, SEXP field, SEXP array) {
|
||||
return R_NilValue;
|
||||
}
|
||||
|
||||
SEXP XGDMatrixGetInfo_R(SEXP handle, SEXP field) {
|
||||
XGB_DLL SEXP XGDMatrixGetInfo_R(SEXP handle, SEXP field) {
|
||||
SEXP ret;
|
||||
R_API_BEGIN();
|
||||
bst_ulong olen;
|
||||
@@ -201,7 +234,7 @@ SEXP XGDMatrixGetInfo_R(SEXP handle, SEXP field) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
SEXP XGDMatrixNumRow_R(SEXP handle) {
|
||||
XGB_DLL SEXP XGDMatrixNumRow_R(SEXP handle) {
|
||||
bst_ulong nrow;
|
||||
R_API_BEGIN();
|
||||
CHECK_CALL(XGDMatrixNumRow(R_ExternalPtrAddr(handle), &nrow));
|
||||
@@ -209,7 +242,7 @@ SEXP XGDMatrixNumRow_R(SEXP handle) {
|
||||
return ScalarInteger(static_cast<int>(nrow));
|
||||
}
|
||||
|
||||
SEXP XGDMatrixNumCol_R(SEXP handle) {
|
||||
XGB_DLL SEXP XGDMatrixNumCol_R(SEXP handle) {
|
||||
bst_ulong ncol;
|
||||
R_API_BEGIN();
|
||||
CHECK_CALL(XGDMatrixNumCol(R_ExternalPtrAddr(handle), &ncol));
|
||||
@@ -224,7 +257,7 @@ void _BoosterFinalizer(SEXP ext) {
|
||||
R_ClearExternalPtr(ext);
|
||||
}
|
||||
|
||||
SEXP XGBoosterCreate_R(SEXP dmats) {
|
||||
XGB_DLL SEXP XGBoosterCreate_R(SEXP dmats) {
|
||||
SEXP ret;
|
||||
R_API_BEGIN();
|
||||
int len = length(dmats);
|
||||
@@ -241,7 +274,22 @@ SEXP XGBoosterCreate_R(SEXP dmats) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
SEXP XGBoosterSetParam_R(SEXP handle, SEXP name, SEXP val) {
|
||||
XGB_DLL SEXP XGBoosterCreateInEmptyObj_R(SEXP dmats, SEXP R_handle) {
|
||||
R_API_BEGIN();
|
||||
int len = length(dmats);
|
||||
std::vector<void*> dvec;
|
||||
for (int i = 0; i < len; ++i) {
|
||||
dvec.push_back(R_ExternalPtrAddr(VECTOR_ELT(dmats, i)));
|
||||
}
|
||||
BoosterHandle handle;
|
||||
CHECK_CALL(XGBoosterCreate(BeginPtr(dvec), dvec.size(), &handle));
|
||||
R_SetExternalPtrAddr(R_handle, handle);
|
||||
R_RegisterCFinalizerEx(R_handle, _BoosterFinalizer, TRUE);
|
||||
R_API_END();
|
||||
return R_NilValue;
|
||||
}
|
||||
|
||||
XGB_DLL SEXP XGBoosterSetParam_R(SEXP handle, SEXP name, SEXP val) {
|
||||
R_API_BEGIN();
|
||||
CHECK_CALL(XGBoosterSetParam(R_ExternalPtrAddr(handle),
|
||||
CHAR(asChar(name)),
|
||||
@@ -250,7 +298,7 @@ SEXP XGBoosterSetParam_R(SEXP handle, SEXP name, SEXP val) {
|
||||
return R_NilValue;
|
||||
}
|
||||
|
||||
SEXP XGBoosterUpdateOneIter_R(SEXP handle, SEXP iter, SEXP dtrain) {
|
||||
XGB_DLL SEXP XGBoosterUpdateOneIter_R(SEXP handle, SEXP iter, SEXP dtrain) {
|
||||
R_API_BEGIN();
|
||||
CHECK_CALL(XGBoosterUpdateOneIter(R_ExternalPtrAddr(handle),
|
||||
asInteger(iter),
|
||||
@@ -259,17 +307,21 @@ SEXP XGBoosterUpdateOneIter_R(SEXP handle, SEXP iter, SEXP dtrain) {
|
||||
return R_NilValue;
|
||||
}
|
||||
|
||||
SEXP XGBoosterBoostOneIter_R(SEXP handle, SEXP dtrain, SEXP grad, SEXP hess) {
|
||||
XGB_DLL SEXP XGBoosterBoostOneIter_R(SEXP handle, SEXP dtrain, SEXP grad, SEXP hess) {
|
||||
R_API_BEGIN();
|
||||
CHECK_EQ(length(grad), length(hess))
|
||||
<< "gradient and hess must have same length";
|
||||
int len = length(grad);
|
||||
std::vector<float> tgrad(len), thess(len);
|
||||
dmlc::OMPException exc;
|
||||
#pragma omp parallel for schedule(static)
|
||||
for (int j = 0; j < len; ++j) {
|
||||
exc.Run([&]() {
|
||||
tgrad[j] = REAL(grad)[j];
|
||||
thess[j] = REAL(hess)[j];
|
||||
});
|
||||
}
|
||||
exc.Rethrow();
|
||||
CHECK_CALL(XGBoosterBoostOneIter(R_ExternalPtrAddr(handle),
|
||||
R_ExternalPtrAddr(dtrain),
|
||||
BeginPtr(tgrad), BeginPtr(thess),
|
||||
@@ -278,7 +330,7 @@ SEXP XGBoosterBoostOneIter_R(SEXP handle, SEXP dtrain, SEXP grad, SEXP hess) {
|
||||
return R_NilValue;
|
||||
}
|
||||
|
||||
SEXP XGBoosterEvalOneIter_R(SEXP handle, SEXP iter, SEXP dmats, SEXP evnames) {
|
||||
XGB_DLL SEXP XGBoosterEvalOneIter_R(SEXP handle, SEXP iter, SEXP dmats, SEXP evnames) {
|
||||
const char *ret;
|
||||
R_API_BEGIN();
|
||||
CHECK_EQ(length(dmats), length(evnames))
|
||||
@@ -303,7 +355,7 @@ SEXP XGBoosterEvalOneIter_R(SEXP handle, SEXP iter, SEXP dmats, SEXP evnames) {
|
||||
return mkString(ret);
|
||||
}
|
||||
|
||||
SEXP XGBoosterPredict_R(SEXP handle, SEXP dmat, SEXP option_mask,
|
||||
XGB_DLL SEXP XGBoosterPredict_R(SEXP handle, SEXP dmat, SEXP option_mask,
|
||||
SEXP ntree_limit, SEXP training) {
|
||||
SEXP ret;
|
||||
R_API_BEGIN();
|
||||
@@ -324,21 +376,60 @@ SEXP XGBoosterPredict_R(SEXP handle, SEXP dmat, SEXP option_mask,
|
||||
return ret;
|
||||
}
|
||||
|
||||
SEXP XGBoosterLoadModel_R(SEXP handle, SEXP fname) {
|
||||
XGB_DLL SEXP XGBoosterPredictFromDMatrix_R(SEXP handle, SEXP dmat, SEXP json_config) {
|
||||
SEXP r_out_shape;
|
||||
SEXP r_out_result;
|
||||
SEXP r_out;
|
||||
|
||||
R_API_BEGIN();
|
||||
char const *c_json_config = CHAR(asChar(json_config));
|
||||
|
||||
bst_ulong out_dim;
|
||||
bst_ulong const *out_shape;
|
||||
float const *out_result;
|
||||
CHECK_CALL(XGBoosterPredictFromDMatrix(R_ExternalPtrAddr(handle),
|
||||
R_ExternalPtrAddr(dmat), c_json_config,
|
||||
&out_shape, &out_dim, &out_result));
|
||||
|
||||
r_out_shape = PROTECT(allocVector(INTSXP, out_dim));
|
||||
size_t len = 1;
|
||||
for (size_t i = 0; i < out_dim; ++i) {
|
||||
INTEGER(r_out_shape)[i] = out_shape[i];
|
||||
len *= out_shape[i];
|
||||
}
|
||||
r_out_result = PROTECT(allocVector(REALSXP, len));
|
||||
|
||||
#pragma omp parallel for
|
||||
for (omp_ulong i = 0; i < len; ++i) {
|
||||
REAL(r_out_result)[i] = out_result[i];
|
||||
}
|
||||
|
||||
r_out = PROTECT(allocVector(VECSXP, 2));
|
||||
|
||||
SET_VECTOR_ELT(r_out, 0, r_out_shape);
|
||||
SET_VECTOR_ELT(r_out, 1, r_out_result);
|
||||
|
||||
R_API_END();
|
||||
UNPROTECT(3);
|
||||
|
||||
return r_out;
|
||||
}
|
||||
|
||||
XGB_DLL SEXP XGBoosterLoadModel_R(SEXP handle, SEXP fname) {
|
||||
R_API_BEGIN();
|
||||
CHECK_CALL(XGBoosterLoadModel(R_ExternalPtrAddr(handle), CHAR(asChar(fname))));
|
||||
R_API_END();
|
||||
return R_NilValue;
|
||||
}
|
||||
|
||||
SEXP XGBoosterSaveModel_R(SEXP handle, SEXP fname) {
|
||||
XGB_DLL SEXP XGBoosterSaveModel_R(SEXP handle, SEXP fname) {
|
||||
R_API_BEGIN();
|
||||
CHECK_CALL(XGBoosterSaveModel(R_ExternalPtrAddr(handle), CHAR(asChar(fname))));
|
||||
R_API_END();
|
||||
return R_NilValue;
|
||||
}
|
||||
|
||||
SEXP XGBoosterModelToRaw_R(SEXP handle) {
|
||||
XGB_DLL SEXP XGBoosterModelToRaw_R(SEXP handle) {
|
||||
SEXP ret;
|
||||
R_API_BEGIN();
|
||||
bst_ulong olen;
|
||||
@@ -353,7 +444,7 @@ SEXP XGBoosterModelToRaw_R(SEXP handle) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
SEXP XGBoosterLoadModelFromRaw_R(SEXP handle, SEXP raw) {
|
||||
XGB_DLL SEXP XGBoosterLoadModelFromRaw_R(SEXP handle, SEXP raw) {
|
||||
R_API_BEGIN();
|
||||
CHECK_CALL(XGBoosterLoadModelFromBuffer(R_ExternalPtrAddr(handle),
|
||||
RAW(raw),
|
||||
@@ -362,7 +453,7 @@ SEXP XGBoosterLoadModelFromRaw_R(SEXP handle, SEXP raw) {
|
||||
return R_NilValue;
|
||||
}
|
||||
|
||||
SEXP XGBoosterSaveJsonConfig_R(SEXP handle) {
|
||||
XGB_DLL SEXP XGBoosterSaveJsonConfig_R(SEXP handle) {
|
||||
const char* ret;
|
||||
R_API_BEGIN();
|
||||
bst_ulong len {0};
|
||||
@@ -373,14 +464,14 @@ SEXP XGBoosterSaveJsonConfig_R(SEXP handle) {
|
||||
return mkString(ret);
|
||||
}
|
||||
|
||||
SEXP XGBoosterLoadJsonConfig_R(SEXP handle, SEXP value) {
|
||||
XGB_DLL SEXP XGBoosterLoadJsonConfig_R(SEXP handle, SEXP value) {
|
||||
R_API_BEGIN();
|
||||
XGBoosterLoadJsonConfig(R_ExternalPtrAddr(handle), CHAR(asChar(value)));
|
||||
CHECK_CALL(XGBoosterLoadJsonConfig(R_ExternalPtrAddr(handle), CHAR(asChar(value))));
|
||||
R_API_END();
|
||||
return R_NilValue;
|
||||
}
|
||||
|
||||
SEXP XGBoosterSerializeToBuffer_R(SEXP handle) {
|
||||
XGB_DLL SEXP XGBoosterSerializeToBuffer_R(SEXP handle) {
|
||||
SEXP ret;
|
||||
R_API_BEGIN();
|
||||
bst_ulong out_len;
|
||||
@@ -395,16 +486,16 @@ SEXP XGBoosterSerializeToBuffer_R(SEXP handle) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
SEXP XGBoosterUnserializeFromBuffer_R(SEXP handle, SEXP raw) {
|
||||
XGB_DLL SEXP XGBoosterUnserializeFromBuffer_R(SEXP handle, SEXP raw) {
|
||||
R_API_BEGIN();
|
||||
XGBoosterUnserializeFromBuffer(R_ExternalPtrAddr(handle),
|
||||
CHECK_CALL(XGBoosterUnserializeFromBuffer(R_ExternalPtrAddr(handle),
|
||||
RAW(raw),
|
||||
length(raw));
|
||||
length(raw)));
|
||||
R_API_END();
|
||||
return R_NilValue;
|
||||
}
|
||||
|
||||
SEXP XGBoosterDumpModel_R(SEXP handle, SEXP fmap, SEXP with_stats, SEXP dump_format) {
|
||||
XGB_DLL SEXP XGBoosterDumpModel_R(SEXP handle, SEXP fmap, SEXP with_stats, SEXP dump_format) {
|
||||
SEXP out;
|
||||
R_API_BEGIN();
|
||||
bst_ulong olen;
|
||||
@@ -441,7 +532,7 @@ SEXP XGBoosterDumpModel_R(SEXP handle, SEXP fmap, SEXP with_stats, SEXP dump_for
|
||||
return out;
|
||||
}
|
||||
|
||||
SEXP XGBoosterGetAttr_R(SEXP handle, SEXP name) {
|
||||
XGB_DLL SEXP XGBoosterGetAttr_R(SEXP handle, SEXP name) {
|
||||
SEXP out;
|
||||
R_API_BEGIN();
|
||||
int success;
|
||||
@@ -461,7 +552,7 @@ SEXP XGBoosterGetAttr_R(SEXP handle, SEXP name) {
|
||||
return out;
|
||||
}
|
||||
|
||||
SEXP XGBoosterSetAttr_R(SEXP handle, SEXP name, SEXP val) {
|
||||
XGB_DLL SEXP XGBoosterSetAttr_R(SEXP handle, SEXP name, SEXP val) {
|
||||
R_API_BEGIN();
|
||||
const char *v = isNull(val) ? nullptr : CHAR(asChar(val));
|
||||
CHECK_CALL(XGBoosterSetAttr(R_ExternalPtrAddr(handle),
|
||||
@@ -470,7 +561,7 @@ SEXP XGBoosterSetAttr_R(SEXP handle, SEXP name, SEXP val) {
|
||||
return R_NilValue;
|
||||
}
|
||||
|
||||
SEXP XGBoosterGetAttrNames_R(SEXP handle) {
|
||||
XGB_DLL SEXP XGBoosterGetAttrNames_R(SEXP handle) {
|
||||
SEXP out;
|
||||
R_API_BEGIN();
|
||||
bst_ulong len;
|
||||
@@ -489,3 +580,51 @@ SEXP XGBoosterGetAttrNames_R(SEXP handle) {
|
||||
UNPROTECT(1);
|
||||
return out;
|
||||
}
|
||||
|
||||
XGB_DLL SEXP XGBoosterFeatureScore_R(SEXP handle, SEXP json_config) {
|
||||
SEXP out_features_sexp;
|
||||
SEXP out_scores_sexp;
|
||||
SEXP out_shape_sexp;
|
||||
SEXP r_out;
|
||||
|
||||
R_API_BEGIN();
|
||||
char const *c_json_config = CHAR(asChar(json_config));
|
||||
bst_ulong out_n_features;
|
||||
char const **out_features;
|
||||
|
||||
bst_ulong out_dim;
|
||||
bst_ulong const *out_shape;
|
||||
float const *out_scores;
|
||||
|
||||
CHECK_CALL(XGBoosterFeatureScore(R_ExternalPtrAddr(handle), c_json_config,
|
||||
&out_n_features, &out_features,
|
||||
&out_dim, &out_shape, &out_scores));
|
||||
|
||||
out_shape_sexp = PROTECT(allocVector(INTSXP, out_dim));
|
||||
size_t len = 1;
|
||||
for (size_t i = 0; i < out_dim; ++i) {
|
||||
INTEGER(out_shape_sexp)[i] = out_shape[i];
|
||||
len *= out_shape[i];
|
||||
}
|
||||
|
||||
out_scores_sexp = PROTECT(allocVector(REALSXP, len));
|
||||
#pragma omp parallel for
|
||||
for (omp_ulong i = 0; i < len; ++i) {
|
||||
REAL(out_scores_sexp)[i] = out_scores[i];
|
||||
}
|
||||
|
||||
out_features_sexp = PROTECT(allocVector(STRSXP, out_n_features));
|
||||
for (size_t i = 0; i < out_n_features; ++i) {
|
||||
SET_STRING_ELT(out_features_sexp, i, mkChar(out_features[i]));
|
||||
}
|
||||
|
||||
r_out = PROTECT(allocVector(VECSXP, 3));
|
||||
SET_VECTOR_ELT(r_out, 0, out_features_sexp);
|
||||
SET_VECTOR_ELT(r_out, 1, out_shape_sexp);
|
||||
SET_VECTOR_ELT(r_out, 2, out_scores_sexp);
|
||||
|
||||
R_API_END();
|
||||
UNPROTECT(4);
|
||||
|
||||
return r_out;
|
||||
}
|
||||
|
||||
@@ -21,6 +21,19 @@
|
||||
*/
|
||||
XGB_DLL SEXP XGCheckNullPtr_R(SEXP handle);
|
||||
|
||||
/*!
|
||||
* \brief Set global configuration
|
||||
* \param json_str a JSON string representing the list of key-value pairs
|
||||
* \return R_NilValue
|
||||
*/
|
||||
XGB_DLL SEXP XGBSetGlobalConfig_R(SEXP json_str);
|
||||
|
||||
/*!
|
||||
* \brief Get global configuration
|
||||
* \return JSON string
|
||||
*/
|
||||
XGB_DLL SEXP XGBGetGlobalConfig_R();
|
||||
|
||||
/*!
|
||||
* \brief load a data matrix
|
||||
* \param fname name of the content
|
||||
@@ -34,10 +47,12 @@ XGB_DLL SEXP XGDMatrixCreateFromFile_R(SEXP fname, SEXP silent);
|
||||
* This assumes the matrix is stored in column major format
|
||||
* \param data R Matrix object
|
||||
* \param missing which value to represent missing value
|
||||
* \param n_threads Number of threads used to construct DMatrix from dense matrix.
|
||||
* \return created dmatrix
|
||||
*/
|
||||
XGB_DLL SEXP XGDMatrixCreateFromMat_R(SEXP mat,
|
||||
SEXP missing);
|
||||
SEXP missing,
|
||||
SEXP n_threads);
|
||||
/*!
|
||||
* \brief create a matrix content from CSC format
|
||||
* \param indptr pointer to column headers
|
||||
@@ -103,6 +118,14 @@ XGB_DLL SEXP XGDMatrixNumCol_R(SEXP handle);
|
||||
*/
|
||||
XGB_DLL SEXP XGBoosterCreate_R(SEXP dmats);
|
||||
|
||||
|
||||
/*!
|
||||
* \brief create xgboost learner, saving the pointer into an existing R object
|
||||
* \param dmats a list of dmatrix handles that will be cached
|
||||
* \param R_handle a clean R external pointer (not holding any object)
|
||||
*/
|
||||
XGB_DLL SEXP XGBoosterCreateInEmptyObj_R(SEXP dmats, SEXP R_handle);
|
||||
|
||||
/*!
|
||||
* \brief set parameters
|
||||
* \param handle handle
|
||||
@@ -143,7 +166,7 @@ XGB_DLL SEXP XGBoosterBoostOneIter_R(SEXP handle, SEXP dtrain, SEXP grad, SEXP h
|
||||
XGB_DLL SEXP XGBoosterEvalOneIter_R(SEXP handle, SEXP iter, SEXP dmats, SEXP evnames);
|
||||
|
||||
/*!
|
||||
* \brief make prediction based on dmat
|
||||
* \brief (Deprecated) make prediction based on dmat
|
||||
* \param handle handle
|
||||
* \param dmat data matrix
|
||||
* \param option_mask output_margin:1 predict_leaf:2
|
||||
@@ -152,6 +175,16 @@ XGB_DLL SEXP XGBoosterEvalOneIter_R(SEXP handle, SEXP iter, SEXP dmats, SEXP evn
|
||||
*/
|
||||
XGB_DLL SEXP XGBoosterPredict_R(SEXP handle, SEXP dmat, SEXP option_mask,
|
||||
SEXP ntree_limit, SEXP training);
|
||||
|
||||
/*!
|
||||
* \brief Run prediction on DMatrix, replacing `XGBoosterPredict_R`
|
||||
* \param handle handle
|
||||
* \param dmat data matrix
|
||||
* \param json_config See `XGBoosterPredictFromDMatrix` in xgboost c_api.h
|
||||
*
|
||||
* \return A list containing 2 vectors, first one for shape while second one for prediction result.
|
||||
*/
|
||||
XGB_DLL SEXP XGBoosterPredictFromDMatrix_R(SEXP handle, SEXP dmat, SEXP json_config);
|
||||
/*!
|
||||
* \brief load model from existing file
|
||||
* \param handle handle
|
||||
@@ -244,4 +277,12 @@ XGB_DLL SEXP XGBoosterSetAttr_R(SEXP handle, SEXP name, SEXP val);
|
||||
*/
|
||||
XGB_DLL SEXP XGBoosterGetAttrNames_R(SEXP handle);
|
||||
|
||||
/*!
|
||||
* \brief Get feature scores from the model.
|
||||
* \param json_config See `XGBoosterFeatureScore` in xgboost c_api.h
|
||||
* \return A vector with the first element as feature names, second element as shape of
|
||||
* feature scores and thrid element as feature scores.
|
||||
*/
|
||||
XGB_DLL SEXP XGBoosterFeatureScore_R(SEXP handle, SEXP json_config);
|
||||
|
||||
#endif // XGBOOST_WRAPPER_R_H_ // NOLINT(*)
|
||||
|
||||
@@ -13,27 +13,10 @@ void CustomLogMessage::Log(const std::string& msg) {
|
||||
}
|
||||
} // namespace dmlc
|
||||
|
||||
// implements rabit error handling.
|
||||
extern "C" {
|
||||
void XGBoostAssert_R(int exp, const char *fmt, ...);
|
||||
void XGBoostCheck_R(int exp, const char *fmt, ...);
|
||||
}
|
||||
|
||||
namespace rabit {
|
||||
namespace utils {
|
||||
extern "C" {
|
||||
void (*Printf)(const char *fmt, ...) = Rprintf;
|
||||
void (*Assert)(int exp, const char *fmt, ...) = XGBoostAssert_R;
|
||||
void (*Check)(int exp, const char *fmt, ...) = XGBoostCheck_R;
|
||||
void (*Error)(const char *fmt, ...) = error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
namespace xgboost {
|
||||
ConsoleLogger::~ConsoleLogger() {
|
||||
if (cur_verbosity_ == LogVerbosity::kIgnore ||
|
||||
cur_verbosity_ <= global_verbosity_) {
|
||||
cur_verbosity_ <= GlobalVerbosity()) {
|
||||
dmlc::CustomLogMessage::Log(log_stream_.str());
|
||||
}
|
||||
}
|
||||
|
||||
105
R-package/tests/helper_scripts/generate_models.R
Normal file
105
R-package/tests/helper_scripts/generate_models.R
Normal file
@@ -0,0 +1,105 @@
|
||||
# Script to generate reference models. The reference models are used to test backward compatibility
|
||||
# of saved model files from XGBoost version 0.90 and 1.0.x.
|
||||
library(xgboost)
|
||||
library(Matrix)
|
||||
|
||||
set.seed(0)
|
||||
metadata <- list(
|
||||
kRounds = 2,
|
||||
kRows = 1000,
|
||||
kCols = 4,
|
||||
kForests = 2,
|
||||
kMaxDepth = 2,
|
||||
kClasses = 3
|
||||
)
|
||||
X <- Matrix(data = rnorm(metadata$kRows * metadata$kCols), nrow = metadata$kRows,
|
||||
ncol = metadata$kCols, sparse = TRUE)
|
||||
w <- runif(metadata$kRows)
|
||||
|
||||
version <- packageVersion('xgboost')
|
||||
target_dir <- 'models'
|
||||
|
||||
save_booster <- function (booster, model_name) {
|
||||
booster_bin <- function (model_name) {
|
||||
return (file.path(target_dir, paste('xgboost-', version, '.', model_name, '.bin', sep = '')))
|
||||
}
|
||||
booster_json <- function (model_name) {
|
||||
return (file.path(target_dir, paste('xgboost-', version, '.', model_name, '.json', sep = '')))
|
||||
}
|
||||
booster_rds <- function (model_name) {
|
||||
return (file.path(target_dir, paste('xgboost-', version, '.', model_name, '.rds', sep = '')))
|
||||
}
|
||||
xgb.save(booster, booster_bin(model_name))
|
||||
saveRDS(booster, booster_rds(model_name))
|
||||
if (version >= '1.0.0') {
|
||||
xgb.save(booster, booster_json(model_name))
|
||||
}
|
||||
}
|
||||
|
||||
generate_regression_model <- function () {
|
||||
print('Regression')
|
||||
y <- rnorm(metadata$kRows)
|
||||
|
||||
data <- xgb.DMatrix(X, label = y)
|
||||
params <- list(tree_method = 'hist', num_parallel_tree = metadata$kForests,
|
||||
max_depth = metadata$kMaxDepth)
|
||||
booster <- xgb.train(params, data, nrounds = metadata$kRounds)
|
||||
save_booster(booster, 'reg')
|
||||
}
|
||||
|
||||
generate_logistic_model <- function () {
|
||||
print('Binary classification with logistic loss')
|
||||
y <- sample(0:1, size = metadata$kRows, replace = TRUE)
|
||||
stopifnot(max(y) == 1, min(y) == 0)
|
||||
|
||||
objective <- c('binary:logistic', 'binary:logitraw')
|
||||
name <- c('logit', 'logitraw')
|
||||
|
||||
for (i in seq_len(length(objective))) {
|
||||
data <- xgb.DMatrix(X, label = y, weight = w)
|
||||
params <- list(tree_method = 'hist', num_parallel_tree = metadata$kForests,
|
||||
max_depth = metadata$kMaxDepth, objective = objective[i])
|
||||
booster <- xgb.train(params, data, nrounds = metadata$kRounds)
|
||||
save_booster(booster, name[i])
|
||||
}
|
||||
}
|
||||
|
||||
generate_classification_model <- function () {
|
||||
print('Multi-class classification')
|
||||
y <- sample(0:(metadata$kClasses - 1), size = metadata$kRows, replace = TRUE)
|
||||
stopifnot(max(y) == metadata$kClasses - 1, min(y) == 0)
|
||||
|
||||
data <- xgb.DMatrix(X, label = y, weight = w)
|
||||
params <- list(num_class = metadata$kClasses, tree_method = 'hist',
|
||||
num_parallel_tree = metadata$kForests, max_depth = metadata$kMaxDepth,
|
||||
objective = 'multi:softmax')
|
||||
booster <- xgb.train(params, data, nrounds = metadata$kRounds)
|
||||
save_booster(booster, 'cls')
|
||||
}
|
||||
|
||||
generate_ranking_model <- function () {
|
||||
print('Learning to rank')
|
||||
y <- sample(0:4, size = metadata$kRows, replace = TRUE)
|
||||
stopifnot(max(y) == 4, min(y) == 0)
|
||||
kGroups <- 20
|
||||
w <- runif(kGroups)
|
||||
g <- rep(50, times = kGroups)
|
||||
|
||||
data <- xgb.DMatrix(X, label = y, group = g)
|
||||
# setinfo(data, 'weight', w)
|
||||
# ^^^ does not work in version <= 1.1.0; see https://github.com/dmlc/xgboost/issues/5942
|
||||
# So call low-level function XGDMatrixSetInfo_R directly. Since this function is not an exported
|
||||
# symbol, use the triple-colon operator.
|
||||
.Call(xgboost:::XGDMatrixSetInfo_R, data, 'weight', as.numeric(w))
|
||||
params <- list(objective = 'rank:ndcg', num_parallel_tree = metadata$kForests,
|
||||
tree_method = 'hist', max_depth = metadata$kMaxDepth)
|
||||
booster <- xgb.train(params, data, nrounds = metadata$kRounds)
|
||||
save_booster(booster, 'ltr')
|
||||
}
|
||||
|
||||
dir.create(target_dir)
|
||||
|
||||
invisible(generate_regression_model())
|
||||
invisible(generate_logistic_model())
|
||||
invisible(generate_classification_model())
|
||||
invisible(generate_ranking_model())
|
||||
71
R-package/tests/helper_scripts/run_lint.R
Normal file
71
R-package/tests/helper_scripts/run_lint.R
Normal file
@@ -0,0 +1,71 @@
|
||||
library(lintr)
|
||||
library(crayon)
|
||||
|
||||
my_linters <- list(
|
||||
absolute_path_linter = lintr::absolute_path_linter,
|
||||
assignment_linter = lintr::assignment_linter,
|
||||
closed_curly_linter = lintr::closed_curly_linter,
|
||||
commas_linter = lintr::commas_linter,
|
||||
equals_na = lintr::equals_na_linter,
|
||||
infix_spaces_linter = lintr::infix_spaces_linter,
|
||||
line_length_linter = lintr::line_length_linter,
|
||||
no_tab_linter = lintr::no_tab_linter,
|
||||
object_usage_linter = lintr::object_usage_linter,
|
||||
object_length_linter = lintr::object_length_linter,
|
||||
open_curly_linter = lintr::open_curly_linter,
|
||||
semicolon = lintr::semicolon_terminator_linter,
|
||||
seq = lintr::seq_linter,
|
||||
spaces_inside_linter = lintr::spaces_inside_linter,
|
||||
spaces_left_parentheses_linter = lintr::spaces_left_parentheses_linter,
|
||||
trailing_blank_lines_linter = lintr::trailing_blank_lines_linter,
|
||||
trailing_whitespace_linter = lintr::trailing_whitespace_linter,
|
||||
true_false = lintr::T_and_F_symbol_linter,
|
||||
unneeded_concatenation = lintr::unneeded_concatenation_linter
|
||||
)
|
||||
|
||||
results <- lapply(
|
||||
list.files(path = '.', pattern = '\\.[Rr]$', recursive = TRUE),
|
||||
function (r_file) {
|
||||
cat(sprintf("Processing %s ...\n", r_file))
|
||||
list(r_file = r_file,
|
||||
output = lintr::lint(filename = r_file, linters = my_linters))
|
||||
})
|
||||
num_issue <- Reduce(sum, lapply(results, function (e) length(e$output)))
|
||||
|
||||
lint2str <- function(lint_entry) {
|
||||
color <- function(type) {
|
||||
switch(type,
|
||||
"warning" = crayon::magenta,
|
||||
"error" = crayon::red,
|
||||
"style" = crayon::blue,
|
||||
crayon::bold
|
||||
)
|
||||
}
|
||||
|
||||
paste0(
|
||||
lapply(lint_entry$output,
|
||||
function (lint_line) {
|
||||
paste0(
|
||||
crayon::bold(lint_entry$r_file, ":",
|
||||
as.character(lint_line$line_number), ":",
|
||||
as.character(lint_line$column_number), ": ", sep = ""),
|
||||
color(lint_line$type)(lint_line$type, ": ", sep = ""),
|
||||
crayon::bold(lint_line$message), "\n",
|
||||
lint_line$line, "\n",
|
||||
lintr:::highlight_string(lint_line$message, lint_line$column_number, lint_line$ranges),
|
||||
"\n",
|
||||
collapse = "")
|
||||
}),
|
||||
collapse = "")
|
||||
}
|
||||
|
||||
if (num_issue > 0) {
|
||||
cat(sprintf('R linters found %d issues:\n', num_issue))
|
||||
for (entry in results) {
|
||||
if (length(entry$output)) {
|
||||
cat(paste0('**** ', crayon::bold(entry$r_file), '\n'))
|
||||
cat(paste0(lint2str(entry), collapse = ''))
|
||||
}
|
||||
}
|
||||
quit(save = 'no', status = 1) # Signal error to parent shell
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
library(testthat)
|
||||
library(xgboost)
|
||||
|
||||
test_check("xgboost")
|
||||
test_check("xgboost", reporter = ProgressReporter)
|
||||
|
||||
@@ -9,15 +9,16 @@ test <- agaricus.test
|
||||
set.seed(1994)
|
||||
|
||||
# disable some tests for Win32
|
||||
windows_flag = .Platform$OS.type == "windows" &&
|
||||
windows_flag <- .Platform$OS.type == "windows" &&
|
||||
.Machine$sizeof.pointer != 8
|
||||
solaris_flag = (Sys.info()['sysname'] == "SunOS")
|
||||
solaris_flag <- (Sys.info()['sysname'] == "SunOS")
|
||||
|
||||
test_that("train and predict binary classification", {
|
||||
nrounds = 2
|
||||
nrounds <- 2
|
||||
expect_output(
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = nrounds, objective = "binary:logistic")
|
||||
eta = 1, nthread = 2, nrounds = nrounds, objective = "binary:logistic",
|
||||
eval_metric = "error")
|
||||
, "train-error")
|
||||
expect_equal(class(bst), "xgb.Booster")
|
||||
expect_equal(bst$niter, nrounds)
|
||||
@@ -33,11 +34,15 @@ test_that("train and predict binary classification", {
|
||||
err_pred1 <- sum((pred1 > 0.5) != train$label) / length(train$label)
|
||||
err_log <- bst$evaluation_log[1, train_error]
|
||||
expect_lt(abs(err_pred1 - err_log), 10e-6)
|
||||
|
||||
pred2 <- predict(bst, train$data, iterationrange = c(1, 2))
|
||||
expect_length(pred1, 6513)
|
||||
expect_equal(pred1, pred2)
|
||||
})
|
||||
|
||||
test_that("parameter validation works", {
|
||||
p <- list(foo = "bar")
|
||||
nrounds = 1
|
||||
nrounds <- 1
|
||||
set.seed(1994)
|
||||
|
||||
d <- cbind(
|
||||
@@ -65,12 +70,12 @@ test_that("parameter validation works", {
|
||||
xgb.train(params = params, data = dtrain, nrounds = nrounds))
|
||||
print(output)
|
||||
}
|
||||
expect_output(incorrect(), "bar, foo")
|
||||
expect_output(incorrect(), '\\\\"bar\\\\", \\\\"foo\\\\"')
|
||||
})
|
||||
|
||||
|
||||
test_that("dart prediction works", {
|
||||
nrounds = 32
|
||||
nrounds <- 32
|
||||
set.seed(1994)
|
||||
|
||||
d <- cbind(
|
||||
@@ -122,7 +127,7 @@ test_that("train and predict softprob", {
|
||||
expect_output(
|
||||
bst <- xgboost(data = as.matrix(iris[, -5]), label = lb,
|
||||
max_depth = 3, eta = 0.5, nthread = 2, nrounds = 5,
|
||||
objective = "multi:softprob", num_class=3)
|
||||
objective = "multi:softprob", num_class = 3, eval_metric = "merror")
|
||||
, "train-merror")
|
||||
expect_false(is.null(bst$evaluation_log))
|
||||
expect_lt(bst$evaluation_log[, min(train_merror)], 0.025)
|
||||
@@ -142,6 +147,24 @@ test_that("train and predict softprob", {
|
||||
pred_labels <- max.col(mpred) - 1
|
||||
err <- sum(pred_labels != lb) / length(lb)
|
||||
expect_equal(bst$evaluation_log[1, train_merror], err, tolerance = 5e-6)
|
||||
|
||||
mpred1 <- predict(bst, as.matrix(iris[, -5]), reshape = TRUE, iterationrange = c(1, 2))
|
||||
expect_equal(mpred, mpred1)
|
||||
|
||||
d <- cbind(
|
||||
x1 = rnorm(100),
|
||||
x2 = rnorm(100),
|
||||
x3 = rnorm(100)
|
||||
)
|
||||
y <- sample.int(10, 100, replace = TRUE) - 1
|
||||
dtrain <- xgb.DMatrix(data = d, info = list(label = y))
|
||||
booster <- xgb.train(
|
||||
params = list(tree_method = "hist"), data = dtrain, nrounds = 4, num_class = 10,
|
||||
objective = "multi:softprob"
|
||||
)
|
||||
predt <- predict(booster, as.matrix(d), reshape = TRUE, strict_shape = FALSE)
|
||||
expect_equal(ncol(predt), 10)
|
||||
expect_equal(rowSums(predt), rep(1, 100), tolerance = 1e-7)
|
||||
})
|
||||
|
||||
test_that("train and predict softmax", {
|
||||
@@ -150,7 +173,7 @@ test_that("train and predict softmax", {
|
||||
expect_output(
|
||||
bst <- xgboost(data = as.matrix(iris[, -5]), label = lb,
|
||||
max_depth = 3, eta = 0.5, nthread = 2, nrounds = 5,
|
||||
objective = "multi:softmax", num_class=3)
|
||||
objective = "multi:softmax", num_class = 3, eval_metric = "merror")
|
||||
, "train-merror")
|
||||
expect_false(is.null(bst$evaluation_log))
|
||||
expect_lt(bst$evaluation_log[, min(train_merror)], 0.025)
|
||||
@@ -167,7 +190,7 @@ test_that("train and predict RF", {
|
||||
lb <- train$label
|
||||
# single iteration
|
||||
bst <- xgboost(data = train$data, label = lb, max_depth = 5,
|
||||
nthread = 2, nrounds = 1, objective = "binary:logistic",
|
||||
nthread = 2, nrounds = 1, objective = "binary:logistic", eval_metric = "error",
|
||||
num_parallel_tree = 20, subsample = 0.6, colsample_bytree = 0.1)
|
||||
expect_equal(bst$niter, 1)
|
||||
expect_equal(xgb.ntree(bst), 20)
|
||||
@@ -181,10 +204,8 @@ test_that("train and predict RF", {
|
||||
pred_err_20 <- sum((pred > 0.5) != lb) / length(lb)
|
||||
expect_equal(pred_err_20, pred_err)
|
||||
|
||||
#pred <- predict(bst, train$data, ntreelimit = 1)
|
||||
#pred_err_1 <- sum((pred > 0.5) != lb)/length(lb)
|
||||
#expect_lt(pred_err, pred_err_1)
|
||||
#expect_lt(pred_err, 0.08)
|
||||
pred1 <- predict(bst, train$data, iterationrange = c(1, 2))
|
||||
expect_equal(pred, pred1)
|
||||
})
|
||||
|
||||
test_that("train and predict RF with softprob", {
|
||||
@@ -193,7 +214,8 @@ test_that("train and predict RF with softprob", {
|
||||
set.seed(11)
|
||||
bst <- xgboost(data = as.matrix(iris[, -5]), label = lb,
|
||||
max_depth = 3, eta = 0.9, nthread = 2, nrounds = nrounds,
|
||||
objective = "multi:softprob", num_class=3, verbose = 0,
|
||||
objective = "multi:softprob", eval_metric = "merror",
|
||||
num_class = 3, verbose = 0,
|
||||
num_parallel_tree = 4, subsample = 0.5, colsample_bytree = 0.5)
|
||||
expect_equal(bst$niter, 15)
|
||||
expect_equal(xgb.ntree(bst), 15 * 3 * 4)
|
||||
@@ -223,7 +245,7 @@ test_that("use of multiple eval metrics works", {
|
||||
|
||||
test_that("training continuation works", {
|
||||
dtrain <- xgb.DMatrix(train$data, label = train$label)
|
||||
watchlist = list(train=dtrain)
|
||||
watchlist <- list(train = dtrain)
|
||||
param <- list(objective = "binary:logistic", max_depth = 2, eta = 1, nthread = 2)
|
||||
|
||||
# for the reference, use 4 iterations at once:
|
||||
@@ -245,17 +267,18 @@ test_that("training continuation works", {
|
||||
expect_equal(bst$raw, bst2$raw)
|
||||
expect_equal(dim(bst2$evaluation_log), c(2, 2))
|
||||
# test continuing from a model in file
|
||||
xgb.save(bst1, "xgboost.model")
|
||||
bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0, xgb_model = "xgboost.model")
|
||||
xgb.save(bst1, "xgboost.json")
|
||||
bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0, xgb_model = "xgboost.json")
|
||||
if (!windows_flag && !solaris_flag)
|
||||
expect_equal(bst$raw, bst2$raw)
|
||||
expect_equal(dim(bst2$evaluation_log), c(2, 2))
|
||||
file.remove("xgboost.json")
|
||||
})
|
||||
|
||||
test_that("model serialization works", {
|
||||
out_path <- "model_serialization"
|
||||
dtrain <- xgb.DMatrix(train$data, label = train$label)
|
||||
watchlist = list(train=dtrain)
|
||||
watchlist <- list(train = dtrain)
|
||||
param <- list(objective = "binary:logistic")
|
||||
booster <- xgb.train(param, dtrain, nrounds = 4, watchlist)
|
||||
raw <- xgb.serialize(booster)
|
||||
@@ -273,7 +296,7 @@ test_that("xgb.cv works", {
|
||||
expect_output(
|
||||
cv <- xgb.cv(data = train$data, label = train$label, max_depth = 2, nfold = 5,
|
||||
eta = 1., nthread = 2, nrounds = 2, objective = "binary:logistic",
|
||||
verbose=TRUE)
|
||||
eval_metric = "error", verbose = TRUE)
|
||||
, "train-error:")
|
||||
expect_is(cv, 'xgb.cv.synchronous')
|
||||
expect_false(is.null(cv$evaluation_log))
|
||||
@@ -298,7 +321,7 @@ test_that("xgb.cv works with stratified folds", {
|
||||
eta = 1., nthread = 2, nrounds = 2, objective = "binary:logistic",
|
||||
verbose = TRUE, stratified = TRUE)
|
||||
# Stratified folds should result in a different evaluation logs
|
||||
expect_true(all(cv$evaluation_log[, test_error_mean] != cv2$evaluation_log[, test_error_mean]))
|
||||
expect_true(all(cv$evaluation_log[, test_logloss_mean] != cv2$evaluation_log[, test_logloss_mean]))
|
||||
})
|
||||
|
||||
test_that("train and predict with non-strict classes", {
|
||||
@@ -328,7 +351,7 @@ test_that("train and predict with non-strict classes", {
|
||||
expect_error(pr <- predict(bst, train_dense), regexp = NA)
|
||||
expect_equal(pr0, pr)
|
||||
|
||||
# when someone inhertis from xgb.Booster, it should still be possible to use it as xgb.Booster
|
||||
# when someone inherits from xgb.Booster, it should still be possible to use it as xgb.Booster
|
||||
class(bst) <- c('super.Booster', 'xgb.Booster')
|
||||
expect_error(pr <- predict(bst, train_dense), regexp = NA)
|
||||
expect_equal(pr0, pr)
|
||||
@@ -338,12 +361,12 @@ test_that("max_delta_step works", {
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
watchlist <- list(train = dtrain)
|
||||
param <- list(objective = "binary:logistic", eval_metric = "logloss", max_depth = 2, nthread = 2, eta = 0.5)
|
||||
nrounds = 5
|
||||
nrounds <- 5
|
||||
# model with no restriction on max_delta_step
|
||||
bst1 <- xgb.train(param, dtrain, nrounds, watchlist, verbose = 1)
|
||||
# model with restricted max_delta_step
|
||||
bst2 <- xgb.train(param, dtrain, nrounds, watchlist, verbose = 1, max_delta_step = 1)
|
||||
# the no-restriction model is expected to have consistently lower loss during the initial interations
|
||||
# the no-restriction model is expected to have consistently lower loss during the initial iterations
|
||||
expect_true(all(bst1$evaluation_log$train_logloss < bst2$evaluation_log$train_logloss))
|
||||
expect_lt(mean(bst1$evaluation_log$train_logloss) / mean(bst2$evaluation_log$train_logloss), 0.8)
|
||||
})
|
||||
@@ -382,3 +405,57 @@ test_that("Configuration works", {
|
||||
reloaded_config <- xgb.config(bst)
|
||||
expect_equal(config, reloaded_config);
|
||||
})
|
||||
|
||||
test_that("strict_shape works", {
|
||||
n_rounds <- 2
|
||||
|
||||
test_strict_shape <- function(bst, X, n_groups) {
|
||||
predt <- predict(bst, X, strict_shape = TRUE)
|
||||
margin <- predict(bst, X, outputmargin = TRUE, strict_shape = TRUE)
|
||||
contri <- predict(bst, X, predcontrib = TRUE, strict_shape = TRUE)
|
||||
interact <- predict(bst, X, predinteraction = TRUE, strict_shape = TRUE)
|
||||
leaf <- predict(bst, X, predleaf = TRUE, strict_shape = TRUE)
|
||||
|
||||
n_rows <- nrow(X)
|
||||
n_cols <- ncol(X)
|
||||
|
||||
expect_equal(dim(predt), c(n_groups, n_rows))
|
||||
expect_equal(dim(margin), c(n_groups, n_rows))
|
||||
expect_equal(dim(contri), c(n_cols + 1, n_groups, n_rows))
|
||||
expect_equal(dim(interact), c(n_cols + 1, n_cols + 1, n_groups, n_rows))
|
||||
expect_equal(dim(leaf), c(1, n_groups, n_rounds, n_rows))
|
||||
|
||||
if (n_groups != 1) {
|
||||
for (g in seq_len(n_groups)) {
|
||||
expect_lt(max(abs(colSums(contri[, g, ]) - margin[g, ])), 1e-5)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
test_iris <- function() {
|
||||
y <- as.numeric(iris$Species) - 1
|
||||
X <- as.matrix(iris[, -5])
|
||||
|
||||
bst <- xgboost(data = X, label = y,
|
||||
max_depth = 2, nrounds = n_rounds,
|
||||
objective = "multi:softprob", num_class = 3, eval_metric = "merror")
|
||||
|
||||
test_strict_shape(bst, X, 3)
|
||||
}
|
||||
|
||||
|
||||
test_agaricus <- function() {
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
X <- agaricus.train$data
|
||||
y <- agaricus.train$label
|
||||
|
||||
bst <- xgboost(data = X, label = y, max_depth = 2,
|
||||
nrounds = n_rounds, objective = "binary:logistic",
|
||||
eval_metric = 'error', eval_metric = 'auc', eval_metric = "logloss")
|
||||
|
||||
test_strict_shape(bst, X, 1)
|
||||
}
|
||||
|
||||
test_iris()
|
||||
test_agaricus()
|
||||
})
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
require(xgboost)
|
||||
require(data.table)
|
||||
require(titanic)
|
||||
|
||||
context("callbacks")
|
||||
|
||||
@@ -21,12 +22,13 @@ ltrain <- add.noise(train$label, 0.2)
|
||||
ltest <- add.noise(test$label, 0.2)
|
||||
dtrain <- xgb.DMatrix(train$data, label = ltrain)
|
||||
dtest <- xgb.DMatrix(test$data, label = ltest)
|
||||
watchlist = list(train=dtrain, test=dtest)
|
||||
watchlist <- list(train = dtrain, test = dtest)
|
||||
|
||||
|
||||
err <- function(label, pr) sum((pr > 0.5) != label) / length(label)
|
||||
|
||||
param <- list(objective = "binary:logistic", max_depth = 2, nthread = 2)
|
||||
param <- list(objective = "binary:logistic", eval_metric = "error",
|
||||
max_depth = 2, nthread = 2)
|
||||
|
||||
|
||||
test_that("cb.print.evaluation works as expected", {
|
||||
@@ -105,7 +107,8 @@ test_that("cb.evaluation.log works as expected", {
|
||||
})
|
||||
|
||||
|
||||
param <- list(objective = "binary:logistic", max_depth = 4, nthread = 2)
|
||||
param <- list(objective = "binary:logistic", eval_metric = "error",
|
||||
max_depth = 4, nthread = 2)
|
||||
|
||||
test_that("can store evaluation_log without printing", {
|
||||
expect_silent(
|
||||
@@ -173,16 +176,16 @@ test_that("cb.reset.parameters works as expected", {
|
||||
})
|
||||
|
||||
test_that("cb.save.model works as expected", {
|
||||
files <- c('xgboost_01.model', 'xgboost_02.model', 'xgboost.model')
|
||||
files <- c('xgboost_01.json', 'xgboost_02.json', 'xgboost.json')
|
||||
for (f in files) if (file.exists(f)) file.remove(f)
|
||||
|
||||
bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, eta = 1, verbose = 0,
|
||||
save_period = 1, save_name = "xgboost_%02d.model")
|
||||
expect_true(file.exists('xgboost_01.model'))
|
||||
expect_true(file.exists('xgboost_02.model'))
|
||||
b1 <- xgb.load('xgboost_01.model')
|
||||
save_period = 1, save_name = "xgboost_%02d.json")
|
||||
expect_true(file.exists('xgboost_01.json'))
|
||||
expect_true(file.exists('xgboost_02.json'))
|
||||
b1 <- xgb.load('xgboost_01.json')
|
||||
expect_equal(xgb.ntree(b1), 1)
|
||||
b2 <- xgb.load('xgboost_02.model')
|
||||
b2 <- xgb.load('xgboost_02.json')
|
||||
expect_equal(xgb.ntree(b2), 2)
|
||||
|
||||
xgb.config(b2) <- xgb.config(bst)
|
||||
@@ -191,9 +194,9 @@ test_that("cb.save.model works as expected", {
|
||||
|
||||
# save_period = 0 saves the last iteration's model
|
||||
bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, eta = 1, verbose = 0,
|
||||
save_period = 0)
|
||||
expect_true(file.exists('xgboost.model'))
|
||||
b2 <- xgb.load('xgboost.model')
|
||||
save_period = 0, save_name = 'xgboost.json')
|
||||
expect_true(file.exists('xgboost.json'))
|
||||
b2 <- xgb.load('xgboost.json')
|
||||
xgb.config(b2) <- xgb.config(bst)
|
||||
expect_equal(bst$raw, b2$raw)
|
||||
|
||||
@@ -236,7 +239,7 @@ test_that("early stopping xgb.train works", {
|
||||
test_that("early stopping using a specific metric works", {
|
||||
set.seed(11)
|
||||
expect_output(
|
||||
bst <- xgb.train(param, dtrain, nrounds = 20, watchlist, eta = 0.6,
|
||||
bst <- xgb.train(param[-2], dtrain, nrounds = 20, watchlist, eta = 0.6,
|
||||
eval_metric = "logloss", eval_metric = "auc",
|
||||
callbacks = list(cb.early.stop(stopping_rounds = 3, maximize = FALSE,
|
||||
metric_name = 'test_logloss')))
|
||||
@@ -252,6 +255,26 @@ test_that("early stopping using a specific metric works", {
|
||||
expect_equal(logloss_log, logloss_pred, tolerance = 1e-5)
|
||||
})
|
||||
|
||||
test_that("early stopping works with titanic", {
|
||||
# This test was inspired by https://github.com/dmlc/xgboost/issues/5935
|
||||
# It catches possible issues on noLD R
|
||||
titanic <- titanic::titanic_train
|
||||
titanic$Pclass <- as.factor(titanic$Pclass)
|
||||
dtx <- model.matrix(~ 0 + ., data = titanic[, c("Pclass", "Sex")])
|
||||
dty <- titanic$Survived
|
||||
|
||||
xgboost::xgboost(
|
||||
data = dtx,
|
||||
label = dty,
|
||||
objective = "binary:logistic",
|
||||
eval_metric = "auc",
|
||||
nrounds = 100,
|
||||
early_stopping_rounds = 3
|
||||
)
|
||||
|
||||
expect_true(TRUE) # should not crash
|
||||
})
|
||||
|
||||
test_that("early stopping xgb.cv works", {
|
||||
set.seed(11)
|
||||
expect_output(
|
||||
@@ -267,7 +290,7 @@ test_that("early stopping xgb.cv works", {
|
||||
|
||||
test_that("prediction in xgb.cv works", {
|
||||
set.seed(11)
|
||||
nrounds = 4
|
||||
nrounds <- 4
|
||||
cv <- xgb.cv(param, dtrain, nfold = 5, eta = 0.5, nrounds = nrounds, prediction = TRUE, verbose = 0)
|
||||
expect_false(is.null(cv$evaluation_log))
|
||||
expect_false(is.null(cv$pred))
|
||||
|
||||
21
R-package/tests/testthat/test_config.R
Normal file
21
R-package/tests/testthat/test_config.R
Normal file
@@ -0,0 +1,21 @@
|
||||
context('Test global configuration')
|
||||
|
||||
test_that('Global configuration works with verbosity', {
|
||||
old_verbosity <- xgb.get.config()$verbosity
|
||||
for (v in c(0, 1, 2, 3)) {
|
||||
xgb.set.config(verbosity = v)
|
||||
expect_equal(xgb.get.config()$verbosity, v)
|
||||
}
|
||||
xgb.set.config(verbosity = old_verbosity)
|
||||
expect_equal(xgb.get.config()$verbosity, old_verbosity)
|
||||
})
|
||||
|
||||
test_that('Global configuration works with use_rmm flag', {
|
||||
old_use_rmm_flag <- xgb.get.config()$use_rmm
|
||||
for (v in c(TRUE, FALSE)) {
|
||||
xgb.set.config(use_rmm = v)
|
||||
expect_equal(xgb.get.config()$use_rmm, v)
|
||||
}
|
||||
xgb.set.config(use_rmm = old_use_rmm_flag)
|
||||
expect_equal(xgb.get.config()$use_rmm, old_use_rmm_flag)
|
||||
})
|
||||
@@ -20,7 +20,7 @@ logregobj <- function(preds, dtrain) {
|
||||
|
||||
evalerror <- function(preds, dtrain) {
|
||||
labels <- getinfo(dtrain, "label")
|
||||
err <- as.numeric(sum(labels != (preds > 0))) / length(labels)
|
||||
err <- as.numeric(sum(labels != (preds > 0.5))) / length(labels)
|
||||
return(list(metric = "error", value = err))
|
||||
}
|
||||
|
||||
@@ -43,6 +43,13 @@ test_that("custom objective in CV works", {
|
||||
expect_lt(cv$evaluation_log[num_round, test_error_mean], 0.03)
|
||||
})
|
||||
|
||||
test_that("custom objective with early stop works", {
|
||||
bst <- xgb.train(param, dtrain, 10, watchlist)
|
||||
expect_equal(class(bst), "xgb.Booster")
|
||||
train_log <- bst$evaluation_log$train_error
|
||||
expect_true(all(diff(train_log) <= 0))
|
||||
})
|
||||
|
||||
test_that("custom objective using DMatrix attr works", {
|
||||
|
||||
attr(dtrain, 'label') <- getinfo(dtrain, 'label')
|
||||
@@ -54,14 +61,14 @@ test_that("custom objective using DMatrix attr works", {
|
||||
hess <- preds * (1 - preds)
|
||||
return(list(grad = grad, hess = hess))
|
||||
}
|
||||
param$objective = logregobjattr
|
||||
param$objective <- logregobjattr
|
||||
bst <- xgb.train(param, dtrain, num_round, watchlist)
|
||||
expect_equal(class(bst), "xgb.Booster")
|
||||
})
|
||||
|
||||
test_that("custom objective with multi-class works", {
|
||||
data = as.matrix(iris[, -5])
|
||||
label = as.numeric(iris$Species) - 1
|
||||
data <- as.matrix(iris[, -5])
|
||||
label <- as.numeric(iris$Species) - 1
|
||||
dtrain <- xgb.DMatrix(data = data, label = label)
|
||||
nclasses <- 3
|
||||
|
||||
@@ -72,6 +79,10 @@ test_that("custom objective with multi-class works", {
|
||||
hess <- rnorm(dim(as.matrix(preds))[1])
|
||||
return (list(grad = grad, hess = hess))
|
||||
}
|
||||
param$objective = fake_softprob
|
||||
fake_merror <- function(preds, dtrain) {
|
||||
expect_equal(dim(data)[1] * nclasses, dim(as.matrix(preds))[1])
|
||||
}
|
||||
param$objective <- fake_softprob
|
||||
param$eval_metric <- fake_merror
|
||||
bst <- xgb.train(param, dtrain, 1, num_class = nclasses)
|
||||
})
|
||||
|
||||
@@ -64,8 +64,8 @@ test_that("xgb.DMatrix: getinfo & setinfo", {
|
||||
expect_true(setinfo(dtest, 'group', c(50, 50)))
|
||||
expect_error(setinfo(dtest, 'group', test_label))
|
||||
|
||||
# providing character values will give a warning
|
||||
expect_warning(setinfo(dtest, 'weight', rep('a', nrow(test_data))))
|
||||
# providing character values will give an error
|
||||
expect_error(setinfo(dtest, 'weight', rep('a', nrow(test_data))))
|
||||
|
||||
# any other label should error
|
||||
expect_error(setinfo(dtest, 'asdf', test_label))
|
||||
@@ -99,7 +99,7 @@ test_that("xgb.DMatrix: colnames", {
|
||||
dtest <- xgb.DMatrix(test_data, label = test_label)
|
||||
expect_equal(colnames(dtest), colnames(test_data))
|
||||
expect_error(colnames(dtest) <- 'asdf')
|
||||
new_names <- make.names(1:ncol(test_data))
|
||||
new_names <- make.names(seq_len(ncol(test_data)))
|
||||
expect_silent(colnames(dtest) <- new_names)
|
||||
expect_equal(colnames(dtest), new_names)
|
||||
expect_silent(colnames(dtest) <- NULL)
|
||||
|
||||
@@ -12,4 +12,5 @@ test_that("train and prediction when gctorture is on", {
|
||||
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
pred <- predict(bst, test$data)
|
||||
gctorture(FALSE)
|
||||
expect_length(pred, length(test$label))
|
||||
})
|
||||
|
||||
@@ -8,7 +8,7 @@ test_that("gblinear works", {
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
|
||||
param <- list(objective = "binary:logistic", booster = "gblinear",
|
||||
param <- list(objective = "binary:logistic", eval_metric = "error", booster = "gblinear",
|
||||
nthread = 2, eta = 0.8, alpha = 0.0001, lambda = 0.0001)
|
||||
watchlist <- list(eval = dtest, train = dtrain)
|
||||
|
||||
@@ -16,7 +16,7 @@ test_that("gblinear works", {
|
||||
ERR_UL <- 0.005 # upper limit for the test set error
|
||||
VERB <- 0 # chatterbox switch
|
||||
|
||||
param$updater = 'shotgun'
|
||||
param$updater <- 'shotgun'
|
||||
bst <- xgb.train(param, dtrain, n, watchlist, verbose = VERB, feature_selector = 'shuffle')
|
||||
ypred <- predict(bst, dtest)
|
||||
expect_equal(length(getinfo(dtest, 'label')), 1611)
|
||||
@@ -29,7 +29,7 @@ test_that("gblinear works", {
|
||||
expect_equal(dim(h), c(n, ncol(dtrain) + 1))
|
||||
expect_is(h, "matrix")
|
||||
|
||||
param$updater = 'coord_descent'
|
||||
param$updater <- 'coord_descent'
|
||||
bst <- xgb.train(param, dtrain, n, watchlist, verbose = VERB, feature_selector = 'cyclic')
|
||||
expect_lt(bst$evaluation_log$eval_error[n], ERR_UL)
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
library(testthat)
|
||||
context('Test helper functions')
|
||||
|
||||
require(xgboost)
|
||||
@@ -5,18 +6,18 @@ require(data.table)
|
||||
require(Matrix)
|
||||
require(vcd, quietly = TRUE)
|
||||
|
||||
float_tolerance = 5e-6
|
||||
float_tolerance <- 5e-6
|
||||
|
||||
# disable some tests for 32-bit environment
|
||||
flag_32bit = .Machine$sizeof.pointer != 8
|
||||
flag_32bit <- .Machine$sizeof.pointer != 8
|
||||
|
||||
set.seed(1982)
|
||||
data(Arthritis)
|
||||
df <- data.table(Arthritis, keep.rownames = F)
|
||||
df <- data.table(Arthritis, keep.rownames = FALSE)
|
||||
df[, AgeDiscret := as.factor(round(Age / 10, 0))]
|
||||
df[, AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))]
|
||||
df[, ID := NULL]
|
||||
sparse_matrix <- sparse.model.matrix(Improved~.-1, data = df)
|
||||
sparse_matrix <- sparse.model.matrix(Improved~.-1, data = df) # nolint
|
||||
label <- df[, ifelse(Improved == "Marked", 1, 0)]
|
||||
|
||||
# binary
|
||||
@@ -46,8 +47,8 @@ mbst.GLM <- xgboost(data = as.matrix(iris[, -5]), label = mlabel, verbose = 0,
|
||||
test_that("xgb.dump works", {
|
||||
if (!flag_32bit)
|
||||
expect_length(xgb.dump(bst.Tree), 200)
|
||||
dump_file = file.path(tempdir(), 'xgb.model.dump')
|
||||
expect_true(xgb.dump(bst.Tree, dump_file, with_stats = T))
|
||||
dump_file <- file.path(tempdir(), 'xgb.model.dump')
|
||||
expect_true(xgb.dump(bst.Tree, dump_file, with_stats = TRUE))
|
||||
expect_true(file.exists(dump_file))
|
||||
expect_gt(file.size(dump_file), 8000)
|
||||
|
||||
@@ -110,7 +111,7 @@ test_that("predict feature contributions works", {
|
||||
pred <- predict(bst.GLM, sparse_matrix, outputmargin = TRUE)
|
||||
expect_lt(max(abs(rowSums(pred_contr) - pred)), 1e-5)
|
||||
# manual calculation of linear terms
|
||||
coefs <- xgb.dump(bst.GLM)[-c(1,2,4)] %>% as.numeric
|
||||
coefs <- as.numeric(xgb.dump(bst.GLM)[-c(1, 2, 4)])
|
||||
coefs <- c(coefs[-1], coefs[1]) # intercept must be the last
|
||||
pred_contr_manual <- sweep(cbind(sparse_matrix, 1), 2, coefs, FUN = "*")
|
||||
expect_equal(as.numeric(pred_contr), as.numeric(pred_contr_manual),
|
||||
@@ -130,7 +131,11 @@ test_that("predict feature contributions works", {
|
||||
pred <- predict(mbst.GLM, as.matrix(iris[, -5]), outputmargin = TRUE, reshape = TRUE)
|
||||
pred_contr <- predict(mbst.GLM, as.matrix(iris[, -5]), predcontrib = TRUE)
|
||||
expect_length(pred_contr, 3)
|
||||
coefs_all <- xgb.dump(mbst.GLM)[-c(1,2,6)] %>% as.numeric %>% matrix(ncol = 3, byrow = TRUE)
|
||||
coefs_all <- matrix(
|
||||
data = as.numeric(xgb.dump(mbst.GLM)[-c(1, 2, 6)]),
|
||||
ncol = 3,
|
||||
byrow = TRUE
|
||||
)
|
||||
for (g in seq_along(pred_contr)) {
|
||||
expect_equal(colnames(pred_contr[[g]]), c(colnames(iris[, -5]), "BIAS"))
|
||||
expect_lt(max(abs(rowSums(pred_contr[[g]]) - pred[, g])), float_tolerance)
|
||||
@@ -160,7 +165,7 @@ test_that("SHAPs sum to predictions, with or without DART", {
|
||||
objective = "reg:squarederror",
|
||||
eval_metric = "rmse"),
|
||||
if (booster == "dart")
|
||||
list(rate_drop = .01, one_drop = T)),
|
||||
list(rate_drop = .01, one_drop = TRUE)),
|
||||
data = d,
|
||||
label = y,
|
||||
nrounds = nrounds)
|
||||
@@ -168,13 +173,13 @@ test_that("SHAPs sum to predictions, with or without DART", {
|
||||
pr <- function(...)
|
||||
predict(fit, newdata = d, ...)
|
||||
pred <- pr()
|
||||
shap <- pr(predcontrib = T)
|
||||
shapi <- pr(predinteraction = T)
|
||||
tol = 1e-5
|
||||
shap <- pr(predcontrib = TRUE)
|
||||
shapi <- pr(predinteraction = TRUE)
|
||||
tol <- 1e-5
|
||||
|
||||
expect_equal(rowSums(shap), pred, tol = tol)
|
||||
expect_equal(apply(shapi, 1, sum), pred, tol = tol)
|
||||
for (i in 1 : nrow(d))
|
||||
for (i in seq_len(nrow(d)))
|
||||
for (f in list(rowSums, colSums))
|
||||
expect_equal(f(shapi[i, , ]), shap[i, ], tol = tol)
|
||||
}
|
||||
@@ -223,7 +228,7 @@ if (grepl('Windows', Sys.info()[['sysname']]) ||
|
||||
X <- 10^runif(100, -20, 20)
|
||||
if (capabilities('long.double')) {
|
||||
X2X <- as.numeric(format(X, digits = 17))
|
||||
expect_identical(X, X2X)
|
||||
expect_equal(X, X2X, tolerance = float_tolerance)
|
||||
}
|
||||
# retrieved attributes to be the same as written
|
||||
for (x in X) {
|
||||
@@ -238,12 +243,13 @@ if (grepl('Windows', Sys.info()[['sysname']]) ||
|
||||
test_that("xgb.Booster serializing as R object works", {
|
||||
saveRDS(bst.Tree, 'xgb.model.rds')
|
||||
bst <- readRDS('xgb.model.rds')
|
||||
if (file.exists('xgb.model.rds')) file.remove('xgb.model.rds')
|
||||
dtrain <- xgb.DMatrix(sparse_matrix, label = label)
|
||||
expect_equal(predict(bst.Tree, dtrain), predict(bst, dtrain), tolerance = float_tolerance)
|
||||
expect_equal(xgb.dump(bst.Tree), xgb.dump(bst))
|
||||
xgb.save(bst, 'xgb.model')
|
||||
if (file.exists('xgb.model')) file.remove('xgb.model')
|
||||
bst <- readRDS('xgb.model.rds')
|
||||
if (file.exists('xgb.model.rds')) file.remove('xgb.model.rds')
|
||||
nil_ptr <- new("externalptr")
|
||||
class(nil_ptr) <- "xgb.Booster.handle"
|
||||
expect_true(identical(bst$handle, nil_ptr))
|
||||
@@ -305,7 +311,35 @@ test_that("xgb.importance works with and without feature names", {
|
||||
# for multiclass
|
||||
imp.Tree <- xgb.importance(model = mbst.Tree)
|
||||
expect_equal(dim(imp.Tree), c(4, 4))
|
||||
xgb.importance(model = mbst.Tree, trees = seq(from=0, by=nclass, length.out=nrounds))
|
||||
|
||||
trees <- seq(from = 0, by = 2, length.out = 2)
|
||||
importance <- xgb.importance(feature_names = feature.names, model = bst.Tree, trees = trees)
|
||||
|
||||
importance_from_dump <- function() {
|
||||
model_text_dump <- xgb.dump(model = bst.Tree, with_stats = TRUE, trees = trees)
|
||||
imp <- xgb.model.dt.tree(
|
||||
feature_names = feature.names,
|
||||
text = model_text_dump,
|
||||
trees = trees
|
||||
)[
|
||||
Feature != "Leaf", .(
|
||||
Gain = sum(Quality),
|
||||
Cover = sum(Cover),
|
||||
Frequency = .N
|
||||
),
|
||||
by = Feature
|
||||
][
|
||||
, `:=`(
|
||||
Gain = Gain / sum(Gain),
|
||||
Cover = Cover / sum(Cover),
|
||||
Frequency = Frequency / sum(Frequency)
|
||||
)
|
||||
][
|
||||
order(Gain, decreasing = TRUE)
|
||||
]
|
||||
imp
|
||||
}
|
||||
expect_equal(importance_from_dump(), importance, tolerance = 1e-6)
|
||||
})
|
||||
|
||||
test_that("xgb.importance works with GLM model", {
|
||||
@@ -335,8 +369,8 @@ test_that("xgb.model.dt.tree and xgb.importance work with a single split model",
|
||||
})
|
||||
|
||||
test_that("xgb.plot.tree works with and without feature names", {
|
||||
xgb.plot.tree(feature_names = feature.names, model = bst.Tree)
|
||||
xgb.plot.tree(model = bst.Tree)
|
||||
expect_silent(xgb.plot.tree(feature_names = feature.names, model = bst.Tree))
|
||||
expect_silent(xgb.plot.tree(model = bst.Tree))
|
||||
})
|
||||
|
||||
test_that("xgb.plot.multi.trees works with and without feature names", {
|
||||
@@ -351,11 +385,47 @@ test_that("xgb.plot.deepness works", {
|
||||
xgb.ggplot.deepness(model = bst.Tree)
|
||||
})
|
||||
|
||||
test_that("xgb.shap.data works when top_n is provided", {
|
||||
data_list <- xgb.shap.data(data = sparse_matrix, model = bst.Tree, top_n = 2)
|
||||
expect_equal(names(data_list), c("data", "shap_contrib"))
|
||||
expect_equal(NCOL(data_list$data), 2)
|
||||
expect_equal(NCOL(data_list$shap_contrib), 2)
|
||||
expect_equal(NROW(data_list$data), NROW(data_list$shap_contrib))
|
||||
expect_gt(length(colnames(data_list$data)), 0)
|
||||
expect_gt(length(colnames(data_list$shap_contrib)), 0)
|
||||
|
||||
# for multiclass without target class provided
|
||||
data_list <- xgb.shap.data(data = as.matrix(iris[, -5]), model = mbst.Tree, top_n = 2)
|
||||
expect_equal(dim(data_list$shap_contrib), c(nrow(iris), 2))
|
||||
# for multiclass with target class provided
|
||||
data_list <- xgb.shap.data(data = as.matrix(iris[, -5]), model = mbst.Tree, top_n = 2, target_class = 0)
|
||||
expect_equal(dim(data_list$shap_contrib), c(nrow(iris), 2))
|
||||
})
|
||||
|
||||
test_that("xgb.shap.data works with subsampling", {
|
||||
data_list <- xgb.shap.data(data = sparse_matrix, model = bst.Tree, top_n = 2, subsample = 0.8)
|
||||
expect_equal(NROW(data_list$data), as.integer(0.8 * nrow(sparse_matrix)))
|
||||
expect_equal(NROW(data_list$data), NROW(data_list$shap_contrib))
|
||||
})
|
||||
|
||||
test_that("prepare.ggplot.shap.data works", {
|
||||
data_list <- xgb.shap.data(data = sparse_matrix, model = bst.Tree, top_n = 2)
|
||||
plot_data <- prepare.ggplot.shap.data(data_list, normalize = TRUE)
|
||||
expect_s3_class(plot_data, "data.frame")
|
||||
expect_equal(names(plot_data), c("id", "feature", "feature_value", "shap_value"))
|
||||
expect_s3_class(plot_data$feature, "factor")
|
||||
# Each observation should have 1 row for each feature
|
||||
expect_equal(nrow(plot_data), nrow(sparse_matrix) * 2)
|
||||
})
|
||||
|
||||
test_that("xgb.plot.shap works", {
|
||||
sh <- xgb.plot.shap(data = sparse_matrix, model = bst.Tree, top_n = 2, col = 4)
|
||||
expect_equal(names(sh), c("data", "shap_contrib"))
|
||||
expect_equal(NCOL(sh$data), 2)
|
||||
expect_equal(NCOL(sh$shap_contrib), 2)
|
||||
})
|
||||
|
||||
test_that("xgb.plot.shap.summary works", {
|
||||
expect_silent(xgb.plot.shap.summary(data = sparse_matrix, model = bst.Tree, top_n = 2))
|
||||
expect_silent(xgb.ggplot.shap.summary(data = sparse_matrix, model = bst.Tree, top_n = 2))
|
||||
})
|
||||
|
||||
test_that("check.deprecation works", {
|
||||
@@ -374,3 +444,26 @@ test_that("check.deprecation works", {
|
||||
, "\'dumm\' was partially matched to \'dummy\'")
|
||||
expect_equal(res, list(a = 1, DUMMY = 22))
|
||||
})
|
||||
|
||||
test_that('convert.labels works', {
|
||||
y <- c(0, 1, 0, 0, 1)
|
||||
for (objective in c('binary:logistic', 'binary:logitraw', 'binary:hinge')) {
|
||||
res <- xgboost:::convert.labels(y, objective_name = objective)
|
||||
expect_s3_class(res, 'factor')
|
||||
expect_equal(res, factor(res))
|
||||
}
|
||||
y <- c(0, 1, 3, 2, 1, 4)
|
||||
for (objective in c('multi:softmax', 'multi:softprob', 'rank:pairwise', 'rank:ndcg',
|
||||
'rank:map')) {
|
||||
res <- xgboost:::convert.labels(y, objective_name = objective)
|
||||
expect_s3_class(res, 'factor')
|
||||
expect_equal(res, factor(res))
|
||||
}
|
||||
y <- c(1.2, 3.0, -1.0, 10.0)
|
||||
for (objective in c('reg:squarederror', 'reg:squaredlogerror', 'reg:logistic',
|
||||
'reg:pseudohubererror', 'count:poisson', 'survival:cox', 'survival:aft',
|
||||
'reg:gamma', 'reg:tweedie')) {
|
||||
res <- xgboost:::convert.labels(y, objective_name = objective)
|
||||
expect_equal(class(res), 'numeric')
|
||||
}
|
||||
})
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
context('Test prediction of feature interactions')
|
||||
|
||||
require(xgboost)
|
||||
require(magrittr)
|
||||
|
||||
set.seed(123)
|
||||
|
||||
@@ -26,13 +25,13 @@ test_that("predict feature interactions works", {
|
||||
param <- list(eta = 0.1, max_depth = 4, base_score = mean(y), lambda = 0, nthread = 2)
|
||||
b <- xgb.train(param, dm, 100)
|
||||
|
||||
pred = predict(b, dm, outputmargin=TRUE)
|
||||
pred <- predict(b, dm, outputmargin = TRUE)
|
||||
|
||||
# SHAP contributions:
|
||||
cont <- predict(b, dm, predcontrib = TRUE)
|
||||
expect_equal(dim(cont), c(N, P + 1))
|
||||
# make sure for each row they add up to marginal predictions
|
||||
max(abs(rowSums(cont) - pred)) %>% expect_lt(0.001)
|
||||
expect_lt(max(abs(rowSums(cont) - pred)), 0.001)
|
||||
# Hand-construct the 'ground truth' feature contributions:
|
||||
gt_cont <- cbind(
|
||||
2. * X[, 1],
|
||||
@@ -52,30 +51,33 @@ test_that("predict feature interactions works", {
|
||||
expect_equal(dimnames(intr), list(NULL, cn, cn))
|
||||
|
||||
# check the symmetry
|
||||
max(abs(aperm(intr, c(1,3,2)) - intr)) %>% expect_lt(0.00001)
|
||||
expect_lt(max(abs(aperm(intr, c(1, 3, 2)) - intr)), 0.00001)
|
||||
|
||||
# sums WRT columns must be close to feature contributions
|
||||
max(abs(apply(intr, c(1,2), sum) - cont)) %>% expect_lt(0.00001)
|
||||
expect_lt(max(abs(apply(intr, c(1, 2), sum) - cont)), 0.00001)
|
||||
|
||||
# diagonal terms for features 3,4,5 must be close to zero
|
||||
Reduce(max, sapply(3:P, function(i) max(abs(intr[, i, i])))) %>% expect_lt(0.05)
|
||||
expect_lt(Reduce(max, sapply(3:P, function(i) max(abs(intr[, i, i])))), 0.05)
|
||||
|
||||
# BIAS must have no interactions
|
||||
max(abs(intr[, 1:P, P+1])) %>% expect_lt(0.00001)
|
||||
expect_lt(max(abs(intr[, 1:P, P + 1])), 0.00001)
|
||||
|
||||
# interactions other than 2 x 3 must be close to zero
|
||||
intr23 <- intr
|
||||
intr23[, 2, 3] <- 0
|
||||
Reduce(max, sapply(1:P, function(i) max(abs(intr23[, i, (i+1):(P+1)])))) %>% expect_lt(0.05)
|
||||
expect_lt(
|
||||
Reduce(max, sapply(1:P, function(i) max(abs(intr23[, i, (i + 1):(P + 1)])))),
|
||||
0.05
|
||||
)
|
||||
|
||||
# Construct the 'ground truth' contributions of interactions directly from the linear terms:
|
||||
gt_intr <- array(0, c(N, P + 1, P + 1))
|
||||
gt_intr[, 2, 3] <- 1. * X[, 2] * X[, 3] # attribute a HALF of the interaction term to each symmetric element
|
||||
gt_intr[, 3, 2] <- gt_intr[, 2, 3]
|
||||
# merge-in the diagonal based on 'ground truth' feature contributions
|
||||
intr_diag = gt_cont - apply(gt_intr, c(1,2), sum)
|
||||
intr_diag <- gt_cont - apply(gt_intr, c(1, 2), sum)
|
||||
for (j in seq_len(P)) {
|
||||
gt_intr[,j,j] = intr_diag[,j]
|
||||
gt_intr[, j, j] <- intr_diag[, j]
|
||||
}
|
||||
# These should be relatively close:
|
||||
expect_lt(max(abs(intr - gt_intr)), 0.1)
|
||||
@@ -107,7 +109,7 @@ test_that("SHAP contribution values are not NAN", {
|
||||
|
||||
shaps <- as.data.frame(predict(fit,
|
||||
newdata = as.matrix(subset(d, fold == 1)[, ivs]),
|
||||
predcontrib = T))
|
||||
predcontrib = TRUE))
|
||||
result <- cbind(shaps, sum = rowSums(shaps), pred = predict(fit,
|
||||
newdata = as.matrix(subset(d, fold == 1)[, ivs])))
|
||||
|
||||
@@ -119,23 +121,39 @@ test_that("multiclass feature interactions work", {
|
||||
dm <- xgb.DMatrix(as.matrix(iris[, -5]), label = as.numeric(iris$Species) - 1)
|
||||
param <- list(eta = 0.1, max_depth = 4, objective = 'multi:softprob', num_class = 3)
|
||||
b <- xgb.train(param, dm, 40)
|
||||
pred = predict(b, dm, outputmargin=TRUE) %>% array(c(3, 150)) %>% t
|
||||
pred <- t(
|
||||
array(
|
||||
data = predict(b, dm, outputmargin = TRUE),
|
||||
dim = c(3, 150)
|
||||
)
|
||||
)
|
||||
|
||||
# SHAP contributions:
|
||||
cont <- predict(b, dm, predcontrib = TRUE)
|
||||
expect_length(cont, 3)
|
||||
# rewrap them as a 3d array
|
||||
cont <- unlist(cont) %>% array(c(150, 5, 3))
|
||||
cont <- array(
|
||||
data = unlist(cont),
|
||||
dim = c(150, 5, 3)
|
||||
)
|
||||
|
||||
# make sure for each row they add up to marginal predictions
|
||||
max(abs(apply(cont, c(1,3), sum) - pred)) %>% expect_lt(0.001)
|
||||
expect_lt(max(abs(apply(cont, c(1, 3), sum) - pred)), 0.001)
|
||||
|
||||
# SHAP interaction contributions:
|
||||
intr <- predict(b, dm, predinteraction = TRUE)
|
||||
expect_length(intr, 3)
|
||||
# rewrap them as a 4d array
|
||||
intr <- unlist(intr) %>% array(c(150, 5, 5, 3)) %>% aperm(c(4, 1, 2, 3)) # [grp, row, col, col]
|
||||
intr <- aperm(
|
||||
a = array(
|
||||
data = unlist(intr),
|
||||
dim = c(150, 5, 5, 3)
|
||||
),
|
||||
perm = c(4, 1, 2, 3) # [grp, row, col, col]
|
||||
)
|
||||
|
||||
# check the symmetry
|
||||
max(abs(aperm(intr, c(1,2,4,3)) - intr)) %>% expect_lt(0.00001)
|
||||
expect_lt(max(abs(aperm(intr, c(1, 2, 4, 3)) - intr)), 0.00001)
|
||||
# sums WRT columns must be close to feature contributions
|
||||
max(abs(apply(intr, c(1,2,3), sum) - aperm(cont, c(3,1,2)))) %>% expect_lt(0.00001)
|
||||
expect_lt(max(abs(apply(intr, c(1, 2, 3), sum) - aperm(cont, c(3, 1, 2)))), 0.00001)
|
||||
})
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user