Compare commits
428 Commits
v0.7
...
release_0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bf32413682 | ||
|
|
e770d2e21d | ||
|
|
2f218fc4be | ||
|
|
3f83dcd502 | ||
|
|
0c1d5f1120 | ||
|
|
92b7577c62 | ||
|
|
9fefa2128d | ||
|
|
7ea5675679 | ||
|
|
74009afcac | ||
|
|
1b7405f688 | ||
|
|
dc2add96c5 | ||
|
|
8e0a08fbcf | ||
|
|
54793544a2 | ||
|
|
2aaae2e7bb | ||
|
|
cecbe0cf71 | ||
|
|
c8c472f39a | ||
|
|
1dac5e2410 | ||
|
|
a985a99cf0 | ||
|
|
0ff84d950e | ||
|
|
60f05352c5 | ||
|
|
549c8d6ae9 | ||
|
|
e1240413c9 | ||
|
|
2e618af743 | ||
|
|
71a604fae3 | ||
|
|
1fe874e58a | ||
|
|
ff2d4c99fa | ||
|
|
754fe8142b | ||
|
|
37ddfd7d6e | ||
|
|
d506a8bc63 | ||
|
|
c18a3660fa | ||
|
|
3be1b9ae30 | ||
|
|
9b917cda4f | ||
|
|
99a290489c | ||
|
|
3320a52192 | ||
|
|
ba584e5e9f | ||
|
|
2a9b085bc8 | ||
|
|
f8ca2960fc | ||
|
|
05243642bb | ||
|
|
017c97b8ce | ||
|
|
325b16bccd | ||
|
|
ae3bb9c2d5 | ||
|
|
8905df4a18 | ||
|
|
1088dff42c | ||
|
|
7a652a8c64 | ||
|
|
59f868bc60 | ||
|
|
0d0ce32908 | ||
|
|
a60e224484 | ||
|
|
e0094d996e | ||
|
|
a1c35cadf0 | ||
|
|
4fac9874e0 | ||
|
|
301cef4638 | ||
|
|
1fc37e4749 | ||
|
|
0f8af85f64 | ||
|
|
5f151c5cf3 | ||
|
|
dade7c3aff | ||
|
|
773ddbcfcb | ||
|
|
e290ec9a80 | ||
|
|
6a569b8cd9 | ||
|
|
55bc149efb | ||
|
|
431c850c03 | ||
|
|
1f022929f4 | ||
|
|
f368d0de2b | ||
|
|
15fe2f1e7c | ||
|
|
be948df23f | ||
|
|
9897b5042f | ||
|
|
7735252925 | ||
|
|
85939c6a6e | ||
|
|
f75a21af25 | ||
|
|
84c99f86f4 | ||
|
|
c055a32609 | ||
|
|
c8c7b9649c | ||
|
|
a2dc929598 | ||
|
|
42bf90eb8f | ||
|
|
e0a279114e | ||
|
|
fd722d60cd | ||
|
|
53f695acf2 | ||
|
|
3d81c48d3f | ||
|
|
84a3af8dc0 | ||
|
|
4be5edaf92 | ||
|
|
93f9ce9ef9 | ||
|
|
9af6b689d6 | ||
|
|
4f26053b09 | ||
|
|
48dddfd635 | ||
|
|
a9d684db18 | ||
|
|
c5f92df475 | ||
|
|
c5130e487a | ||
|
|
9c4ff50e83 | ||
|
|
42cac4a30b | ||
|
|
f9302a56fb | ||
|
|
7d3149a21f | ||
|
|
86aac98e54 | ||
|
|
e9ab4a1c6c | ||
|
|
dc2bfbfde1 | ||
|
|
7ebe8dcf5b | ||
|
|
973fc8b1ff | ||
|
|
93f63324e6 | ||
|
|
aa48b7e903 | ||
|
|
0cd326c1bc | ||
|
|
3a150742c7 | ||
|
|
0a0d4239d3 | ||
|
|
fe999bf968 | ||
|
|
2ea0f887c1 | ||
|
|
c76d993681 | ||
|
|
a2a8954659 | ||
|
|
7af0946ac1 | ||
|
|
143475b27b | ||
|
|
926eb651fe | ||
|
|
daf77ca7b7 | ||
|
|
97984f4890 | ||
|
|
0ddb8a7661 | ||
|
|
d810e6dec9 | ||
|
|
be0bb7dd90 | ||
|
|
e38d5a6831 | ||
|
|
828d75714d | ||
|
|
ad6e0d55f1 | ||
|
|
19ee0a3579 | ||
|
|
2b045aa805 | ||
|
|
d9642cf757 | ||
|
|
1bf4083dc6 | ||
|
|
20d5abf919 | ||
|
|
f1275f52c1 | ||
|
|
1698fe64bb | ||
|
|
91cc14ea70 | ||
|
|
78ec77fa97 | ||
|
|
c22e90d5d2 | ||
|
|
6da462234e | ||
|
|
a650131fc3 | ||
|
|
91537e7353 | ||
|
|
e04ab56b57 | ||
|
|
ad68865d6b | ||
|
|
583c88bce7 | ||
|
|
2febc105a4 | ||
|
|
45d321da28 | ||
|
|
411df9f878 | ||
|
|
42200ec03e | ||
|
|
87f49995be | ||
|
|
e3c1afac6b | ||
|
|
d81fedb955 | ||
|
|
5fbe230636 | ||
|
|
d83c818000 | ||
|
|
2a59ff2f9b | ||
|
|
32de54fdee | ||
|
|
02130af47d | ||
|
|
4ae225a08d | ||
|
|
e26b5d63b2 | ||
|
|
abf2f661be | ||
|
|
55ee9a92a1 | ||
|
|
b38c636d05 | ||
|
|
4302fc4027 | ||
|
|
f00fd87b36 | ||
|
|
516457fadc | ||
|
|
184efff9f9 | ||
|
|
5d6baed998 | ||
|
|
1db28b8718 | ||
|
|
5480e05173 | ||
|
|
9504f411c1 | ||
|
|
ca33bf6476 | ||
|
|
133b8d94df | ||
|
|
11eaf3eed1 | ||
|
|
6d42e56c85 | ||
|
|
7a7269e983 | ||
|
|
ea99b53d8e | ||
|
|
10cd7c8447 | ||
|
|
813d2436d3 | ||
|
|
c23783a0d1 | ||
|
|
91903ac5d4 | ||
|
|
ae7e58b96e | ||
|
|
e0fd60f4e5 | ||
|
|
4b892c2b30 | ||
|
|
785094db53 | ||
|
|
9e73087324 | ||
|
|
34522d56f0 | ||
|
|
c6b5df67f6 | ||
|
|
efc4f85505 | ||
|
|
d594b11f35 | ||
|
|
87aca8c244 | ||
|
|
70d208d68c | ||
|
|
b50bc2c1d4 | ||
|
|
baef5741df | ||
|
|
5a7f7e7d49 | ||
|
|
0b7fd74138 | ||
|
|
51478a39c9 | ||
|
|
fbe9d41dd0 | ||
|
|
79d854c695 | ||
|
|
3b5a1f389a | ||
|
|
2405c59352 | ||
|
|
73140ce84c | ||
|
|
aa53e9fc8d | ||
|
|
9119f9e369 | ||
|
|
0f99cdfe0e | ||
|
|
20a9e716bd | ||
|
|
7bbb44182a | ||
|
|
9acd549dc7 | ||
|
|
42b108136f | ||
|
|
bd41bd6605 | ||
|
|
3209b42b07 | ||
|
|
7707982a85 | ||
|
|
ad3a0bbab8 | ||
|
|
d1e75d615e | ||
|
|
14a8b96476 | ||
|
|
3564b68b98 | ||
|
|
f606cb8ef4 | ||
|
|
beab6e08dd | ||
|
|
4b43810f51 | ||
|
|
5a8bbb39a1 | ||
|
|
8dac0d1009 | ||
|
|
308f664ade | ||
|
|
56e906a789 | ||
|
|
d176a0fbc8 | ||
|
|
190d888695 | ||
|
|
c87153ed32 | ||
|
|
9344f081a4 | ||
|
|
8f4acba34b | ||
|
|
9254c58e4d | ||
|
|
dee0b69674 | ||
|
|
86d88c0758 | ||
|
|
5b662cbe1c | ||
|
|
10c31ab2cb | ||
|
|
7b1427f926 | ||
|
|
72cd1517d6 | ||
|
|
58d783df16 | ||
|
|
78bea0d204 | ||
|
|
7ef2b599c7 | ||
|
|
686e990ffc | ||
|
|
60787ecebc | ||
|
|
3261002099 | ||
|
|
cb4de521c1 | ||
|
|
4ed8a88240 | ||
|
|
4912c1f9c6 | ||
|
|
57f3c2f252 | ||
|
|
24a268a2e3 | ||
|
|
b13c3a8bcc | ||
|
|
cf2d86a4f6 | ||
|
|
983cb0b374 | ||
|
|
993e62b9e7 | ||
|
|
b53a5a262c | ||
|
|
ac7fc1306b | ||
|
|
caf4a756bf | ||
|
|
7c82dc92b2 | ||
|
|
725f4c36f2 | ||
|
|
73bd590a1d | ||
|
|
9265964ee7 | ||
|
|
2c502784ff | ||
|
|
2b7a1c5780 | ||
|
|
ce0f0568a6 | ||
|
|
6288f6d563 | ||
|
|
96826a3515 | ||
|
|
06ef4db4cc | ||
|
|
645996b12f | ||
|
|
0b607fb884 | ||
|
|
4202332783 | ||
|
|
7300002516 | ||
|
|
9c647d8130 | ||
|
|
2e7c3a0ed5 | ||
|
|
aa4ee6a0e4 | ||
|
|
bad76048d1 | ||
|
|
bbb771f32e | ||
|
|
3c72654e3b | ||
|
|
e3e776bd58 | ||
|
|
1c08b3b2ea | ||
|
|
246ec92163 | ||
|
|
55caad6e49 | ||
|
|
69454d9487 | ||
|
|
44811f2330 | ||
|
|
109473dae2 | ||
|
|
8c633d1ca3 | ||
|
|
4a429a7c4f | ||
|
|
7fefd6865d | ||
|
|
31d1baba3d | ||
|
|
34dc9155ab | ||
|
|
70026655b0 | ||
|
|
437b368b1f | ||
|
|
6cf97b4eae | ||
|
|
860263f814 | ||
|
|
b546321c83 | ||
|
|
3b62e75f2e | ||
|
|
dd07c25d12 | ||
|
|
2bb9b9d3db | ||
|
|
b5178d3d99 | ||
|
|
5850a2558a | ||
|
|
8973f2cb0e | ||
|
|
3363b9142e | ||
|
|
07ff52d54c | ||
|
|
b5fad42da2 | ||
|
|
8a5209c55e | ||
|
|
cc6a5a3666 | ||
|
|
e2f09db77a | ||
|
|
a725272e19 | ||
|
|
e9a97e0d88 | ||
|
|
a1505de631 | ||
|
|
a393d44c5d | ||
|
|
8e90b60c4d | ||
|
|
05b089405d | ||
|
|
c004cea788 | ||
|
|
b6dcbf0e07 | ||
|
|
0f145a0365 | ||
|
|
1b59316444 | ||
|
|
a13e29ece1 | ||
|
|
2f8764955c | ||
|
|
2200939416 | ||
|
|
a6331925d2 | ||
|
|
b40959042c | ||
|
|
6bed54ac39 | ||
|
|
cb017d0c9a | ||
|
|
aa90e5c6ce | ||
|
|
66e74d2223 | ||
|
|
48d6e68690 | ||
|
|
45bf4fbffb | ||
|
|
01aff45f26 | ||
|
|
e62639c59b | ||
|
|
aec6299c49 | ||
|
|
295252249e | ||
|
|
0cf88d036f | ||
|
|
18813a26ab | ||
|
|
594bcea83e | ||
|
|
24fde92660 | ||
|
|
30d10ab035 | ||
|
|
8bec8d5e9a | ||
|
|
12e34f32e2 | ||
|
|
64b8cffde3 | ||
|
|
cafc621914 | ||
|
|
e2743548ed | ||
|
|
a0a1df1aba | ||
|
|
0988fb191f | ||
|
|
5cd851ccef | ||
|
|
d062c6f61b | ||
|
|
9ac163d0bb | ||
|
|
eecf341ea7 | ||
|
|
0e78034607 | ||
|
|
2c4359e914 | ||
|
|
e6696337e4 | ||
|
|
578a0c7ddb | ||
|
|
34e3edfb1a | ||
|
|
902ecbade8 | ||
|
|
a96039141a | ||
|
|
286dccb8e8 | ||
|
|
3f7696ff53 | ||
|
|
bd01acdfbc | ||
|
|
f66731181f | ||
|
|
1214081f99 | ||
|
|
b7cbec4d4b | ||
|
|
a510e68dda | ||
|
|
b018ef104f | ||
|
|
34aeee2961 | ||
|
|
8efbadcde4 | ||
|
|
480e3fd764 | ||
|
|
71e226120a | ||
|
|
d367e4fc6b | ||
|
|
8f6aadd4b7 | ||
|
|
3ee725e3bb | ||
|
|
f8b7686719 | ||
|
|
098075b81b | ||
|
|
49b9f39818 | ||
|
|
9a8211f668 | ||
|
|
039dbe6aec | ||
|
|
0c0a78c255 | ||
|
|
747381b520 | ||
|
|
cc79a65ab9 | ||
|
|
d13f1a0f16 | ||
|
|
088bb4b27c | ||
|
|
b8a0d66fe6 | ||
|
|
90a5c4db9d | ||
|
|
c80d51ccb3 | ||
|
|
e1f57b4417 | ||
|
|
4850f67b85 | ||
|
|
c2b647f26e | ||
|
|
25b2919c44 | ||
|
|
d9dd485313 | ||
|
|
a185ddfe03 | ||
|
|
ccf80703ef | ||
|
|
3242b0a378 | ||
|
|
842e28fdcd | ||
|
|
230cb9b787 | ||
|
|
4109818b32 | ||
|
|
443ff746e9 | ||
|
|
a1ec7b1716 | ||
|
|
017acf54d9 | ||
|
|
ace4016c36 | ||
|
|
b087620661 | ||
|
|
92782a8406 | ||
|
|
04221a7469 | ||
|
|
8fb3388af2 | ||
|
|
00d9728e4b | ||
|
|
c85995952f | ||
|
|
9fa45d3a9c | ||
|
|
cdc036b752 | ||
|
|
7a81c87dfa | ||
|
|
706be4e5d4 | ||
|
|
a1b48afa41 | ||
|
|
d5f1b74ef5 | ||
|
|
8937134015 | ||
|
|
32ea70c1c9 | ||
|
|
d5992dd881 | ||
|
|
11bfa8584d | ||
|
|
cf89fa7139 | ||
|
|
5d4cc49080 | ||
|
|
3d7aff5697 | ||
|
|
eb9e30bb30 | ||
|
|
20b733e1a0 | ||
|
|
8153ba6fe7 | ||
|
|
dd82b28e20 | ||
|
|
10eb05a63a | ||
|
|
9ffe8596f2 | ||
|
|
cf19caa46a | ||
|
|
375d75304d | ||
|
|
81d1b17f9c | ||
|
|
b99f56e386 | ||
|
|
874525c152 | ||
|
|
d878c36c84 | ||
|
|
077abb35cd | ||
|
|
94e655329f | ||
|
|
7c99e90ecd | ||
|
|
86bf930497 | ||
|
|
24c2e41287 | ||
|
|
98be9aef9a | ||
|
|
c88bae112e | ||
|
|
5ef684641b | ||
|
|
f87802f00c | ||
|
|
8b2f4e2d39 | ||
|
|
3f3f54bcad | ||
|
|
84ab74f3a5 | ||
|
|
a187ed6c8f | ||
|
|
740eba42f7 | ||
|
|
65fb4e3f5c | ||
|
|
9747ea2acb | ||
|
|
bf43671841 | ||
|
|
14c6392381 | ||
|
|
526801cdb3 |
21
.clang-tidy
Normal file
21
.clang-tidy
Normal file
@@ -0,0 +1,21 @@
|
||||
Checks: 'modernize-*,-modernize-make-*,-modernize-use-auto,-modernize-raw-string-literal,google-*,-google-default-arguments,-clang-diagnostic-#pragma-messages,readability-identifier-naming'
|
||||
CheckOptions:
|
||||
- { key: readability-identifier-naming.ClassCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.StructCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.TypeAliasCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.TypedefCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.TypeTemplateParameterCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.MemberCase, value: lower_case }
|
||||
- { key: readability-identifier-naming.PrivateMemberSuffix, value: '_' }
|
||||
- { key: readability-identifier-naming.ProtectedMemberSuffix, value: '_' }
|
||||
- { key: readability-identifier-naming.EnumCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.EnumConstant, value: CamelCase }
|
||||
- { key: readability-identifier-naming.EnumConstantPrefix, value: k }
|
||||
- { key: readability-identifier-naming.GlobalConstantCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.GlobalConstantPrefix, value: k }
|
||||
- { key: readability-identifier-naming.StaticConstantCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.StaticConstantPrefix, value: k }
|
||||
- { key: readability-identifier-naming.ConstexprVariableCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.ConstexprVariablePrefix, value: k }
|
||||
- { key: readability-identifier-naming.FunctionCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.NamespaceCase, value: lower_case }
|
||||
11
.editorconfig
Normal file
11
.editorconfig
Normal file
@@ -0,0 +1,11 @@
|
||||
root = true
|
||||
|
||||
[*]
|
||||
charset=utf-8
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
insert_final_newline = true
|
||||
|
||||
[*.py]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
7
.github/ISSUE_TEMPLATE.md
vendored
Normal file
7
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
Thanks for participating in the XGBoost community! We use https://discuss.xgboost.ai for any general usage questions and discussions. The issue tracker is used for actionable items such as feature proposals discussion, roadmaps, and bug tracking. You are always welcomed to post on the forum first :)
|
||||
|
||||
Issues that are inactive for a period of time may get closed. We adopt this policy so that we won't lose track of actionable issues that may fall at the bottom of the pile. Feel free to reopen a new one if you feel there is an additional problem that needs attention when an old one gets closed.
|
||||
|
||||
For bug reports, to help the developer act on the issues, please include a description of your environment, preferably a minimum script to reproduce the problem.
|
||||
|
||||
For feature proposals, list clear, small actionable items so we can track the progress of the change.
|
||||
32
.github/lock.yml
vendored
Normal file
32
.github/lock.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
# Configuration for lock-threads - https://github.com/dessant/lock-threads
|
||||
|
||||
# Number of days of inactivity before a closed issue or pull request is locked
|
||||
daysUntilLock: 90
|
||||
|
||||
# Issues and pull requests with these labels will not be locked. Set to `[]` to disable
|
||||
exemptLabels:
|
||||
- feature-request
|
||||
|
||||
# Label to add before locking, such as `outdated`. Set to `false` to disable
|
||||
lockLabel: false
|
||||
|
||||
# Comment to post before locking. Set to `false` to disable
|
||||
lockComment: false
|
||||
|
||||
# Assign `resolved` as the reason for locking. Set to `false` to disable
|
||||
setLockReason: true
|
||||
|
||||
# Limit to only `issues` or `pulls`
|
||||
# only: issues
|
||||
|
||||
# Optionally, specify configuration settings just for `issues` or `pulls`
|
||||
# issues:
|
||||
# exemptLabels:
|
||||
# - help-wanted
|
||||
# lockLabel: outdated
|
||||
|
||||
# pulls:
|
||||
# daysUntilLock: 30
|
||||
|
||||
# Repository to extend settings from
|
||||
# _extends: repo
|
||||
8
.gitignore
vendored
8
.gitignore
vendored
@@ -15,7 +15,6 @@
|
||||
*.Rcheck
|
||||
*.rds
|
||||
*.tar.gz
|
||||
#*txt*
|
||||
*conf
|
||||
*buffer
|
||||
*model
|
||||
@@ -47,13 +46,12 @@ Debug
|
||||
*.cpage.col
|
||||
*.cpage
|
||||
*.Rproj
|
||||
./xgboost
|
||||
./xgboost.mpi
|
||||
./xgboost.mock
|
||||
#.Rbuildignore
|
||||
R-package.Rproj
|
||||
*.cache*
|
||||
#java
|
||||
# java
|
||||
java/xgboost4j/target
|
||||
java/xgboost4j/tmp
|
||||
java/xgboost4j-demo/target
|
||||
@@ -68,10 +66,9 @@ nb-configuration*
|
||||
.settings/
|
||||
build
|
||||
config.mk
|
||||
xgboost
|
||||
/xgboost
|
||||
*.data
|
||||
build_plugin
|
||||
dmlc-core
|
||||
.idea
|
||||
recommonmark/
|
||||
tags
|
||||
@@ -94,3 +91,4 @@ lib/
|
||||
metastore_db
|
||||
|
||||
plugin/updater_gpu/test/cpp/data
|
||||
/include/xgboost/build_config.h
|
||||
|
||||
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -4,9 +4,6 @@
|
||||
[submodule "rabit"]
|
||||
path = rabit
|
||||
url = https://github.com/dmlc/rabit
|
||||
[submodule "nccl"]
|
||||
path = nccl
|
||||
url = https://github.com/dmlc/nccl
|
||||
[submodule "cub"]
|
||||
path = cub
|
||||
url = https://github.com/NVlabs/cub
|
||||
|
||||
23
.travis.yml
23
.travis.yml
@@ -6,9 +6,7 @@ os:
|
||||
- linux
|
||||
- osx
|
||||
|
||||
osx_image: xcode8
|
||||
|
||||
group: deprecated-2017Q4
|
||||
osx_image: xcode9.3
|
||||
|
||||
# Use Build Matrix to do lint and build seperately
|
||||
env:
|
||||
@@ -26,6 +24,10 @@ env:
|
||||
- TASK=cmake_test
|
||||
# c++ test
|
||||
- TASK=cpp_test
|
||||
# distributed test
|
||||
- TASK=distributed_test
|
||||
# address sanitizer test
|
||||
- TASK=sanitizer_test
|
||||
|
||||
matrix:
|
||||
exclude:
|
||||
@@ -39,15 +41,21 @@ matrix:
|
||||
env: TASK=python_lightweight_test
|
||||
- os: osx
|
||||
env: TASK=cpp_test
|
||||
- os: osx
|
||||
env: TASK=distributed_test
|
||||
- os: osx
|
||||
env: TASK=sanitizer_test
|
||||
|
||||
# dependent apt packages
|
||||
addons:
|
||||
apt:
|
||||
sources:
|
||||
- llvm-toolchain-trusty-5.0
|
||||
- ubuntu-toolchain-r-test
|
||||
- george-edison55-precise-backports
|
||||
packages:
|
||||
- cmake
|
||||
- clang
|
||||
- clang-tidy-5.0
|
||||
- cmake-data
|
||||
- doxygen
|
||||
- wget
|
||||
@@ -56,6 +64,13 @@ addons:
|
||||
- graphviz
|
||||
- gcc-4.8
|
||||
- g++-4.8
|
||||
- gcc-7
|
||||
- g++-7
|
||||
homebrew:
|
||||
packages:
|
||||
- gcc@7
|
||||
- graphviz
|
||||
update: true
|
||||
|
||||
before_install:
|
||||
- source dmlc-core/scripts/travis/travis_setup_env.sh
|
||||
|
||||
210
CMakeLists.txt
210
CMakeLists.txt
@@ -8,19 +8,31 @@ set_default_configuration_release()
|
||||
msvc_use_static_runtime()
|
||||
|
||||
# Options
|
||||
option(USE_CUDA "Build with GPU acceleration")
|
||||
option(USE_AVX "Build with AVX instructions. May not produce identical results due to approximate math." OFF)
|
||||
option(USE_NCCL "Build using NCCL for multi-GPU. Also requires USE_CUDA")
|
||||
## GPUs
|
||||
option(USE_CUDA "Build with GPU acceleration" OFF)
|
||||
option(USE_NCCL "Build with multiple GPUs support" OFF)
|
||||
set(GPU_COMPUTE_VER "" CACHE STRING
|
||||
"Space separated list of compute versions to be built against, e.g. '35 61'")
|
||||
|
||||
## Bindings
|
||||
option(JVM_BINDINGS "Build JVM bindings" OFF)
|
||||
option(GOOGLE_TEST "Build google tests" OFF)
|
||||
option(R_LIB "Build shared library for R package" OFF)
|
||||
set(GPU_COMPUTE_VER 35;50;52;60;61 CACHE STRING
|
||||
"Space separated list of compute versions to be built against")
|
||||
|
||||
## Devs
|
||||
option(USE_SANITIZER "Use santizer flags" OFF)
|
||||
option(SANITIZER_PATH "Path to sanitizes.")
|
||||
set(ENABLED_SANITIZERS "address" "leak" CACHE STRING
|
||||
"Semicolon separated list of sanitizer names. E.g 'address;leak'. Supported sanitizers are
|
||||
address, leak and thread.")
|
||||
option(GOOGLE_TEST "Build google tests" OFF)
|
||||
|
||||
# Plugins
|
||||
option(PLUGIN_LZ4 "Build lz4 plugin" OFF)
|
||||
option(PLUGIN_DENSE_PARSER "Build dense parser plugin" OFF)
|
||||
|
||||
# Deprecation warning
|
||||
if(PLUGIN_UPDATER_GPU)
|
||||
set(USE_CUDA ON)
|
||||
message(WARNING "The option 'PLUGIN_UPDATER_GPU' is deprecated. Set 'USE_CUDA' instead.")
|
||||
if(USE_AVX)
|
||||
message(WARNING "The option 'USE_AVX' is deprecated as experimental AVX features have been removed from xgboost.")
|
||||
endif()
|
||||
|
||||
# Compiler flags
|
||||
@@ -39,17 +51,42 @@ else()
|
||||
# Performance
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -funroll-loops")
|
||||
endif()
|
||||
|
||||
# AVX
|
||||
if(USE_AVX)
|
||||
if(MSVC)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:AVX")
|
||||
else()
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx")
|
||||
endif()
|
||||
add_definitions(-DXGBOOST_USE_AVX)
|
||||
if(WIN32 AND MINGW)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -static-libstdc++")
|
||||
endif()
|
||||
|
||||
# Check existence of software pre-fetching
|
||||
include(CheckCXXSourceCompiles)
|
||||
check_cxx_source_compiles("
|
||||
#include <xmmintrin.h>
|
||||
int main() {
|
||||
char data = 0;
|
||||
const char* address = &data;
|
||||
_mm_prefetch(address, _MM_HINT_NTA);
|
||||
return 0;
|
||||
}
|
||||
" XGBOOST_MM_PREFETCH_PRESENT)
|
||||
check_cxx_source_compiles("
|
||||
int main() {
|
||||
char data = 0;
|
||||
const char* address = &data;
|
||||
__builtin_prefetch(address, 0, 0);
|
||||
return 0;
|
||||
}
|
||||
" XGBOOST_BUILTIN_PREFETCH_PRESENT)
|
||||
|
||||
# Sanitizer
|
||||
if(USE_SANITIZER)
|
||||
include(cmake/Sanitizer.cmake)
|
||||
enable_sanitizers("${ENABLED_SANITIZERS}")
|
||||
endif(USE_SANITIZER)
|
||||
|
||||
# dmlc-core
|
||||
add_subdirectory(dmlc-core)
|
||||
set(LINK_LIBRARIES dmlc rabit)
|
||||
|
||||
# enable custom logging
|
||||
add_definitions(-DDMLC_LOG_CUSTOMIZE=1)
|
||||
|
||||
# compiled code customizations for R package
|
||||
if(R_LIB)
|
||||
@@ -64,13 +101,20 @@ if(R_LIB)
|
||||
)
|
||||
endif()
|
||||
|
||||
# Gather source files
|
||||
include_directories (
|
||||
${PROJECT_SOURCE_DIR}/include
|
||||
${PROJECT_SOURCE_DIR}/dmlc-core/include
|
||||
${PROJECT_SOURCE_DIR}/rabit/include
|
||||
)
|
||||
|
||||
file(GLOB_RECURSE SOURCES
|
||||
# Generate configurable header
|
||||
set(CMAKE_LOCAL "${PROJECT_SOURCE_DIR}/cmake")
|
||||
set(INCLUDE_ROOT "${PROJECT_SOURCE_DIR}/include")
|
||||
message(STATUS "${CMAKE_LOCAL}/build_config.h.in -> ${INCLUDE_ROOT}/xgboost/build_config.h")
|
||||
configure_file("${CMAKE_LOCAL}/build_config.h.in" "${INCLUDE_ROOT}/xgboost/build_config.h")
|
||||
|
||||
file(GLOB_RECURSE SOURCES
|
||||
src/*.cc
|
||||
src/*.h
|
||||
include/*.h
|
||||
@@ -84,8 +128,17 @@ file(GLOB_RECURSE CUDA_SOURCES
|
||||
src/*.cuh
|
||||
)
|
||||
|
||||
# Add plugins to source files
|
||||
if(PLUGIN_LZ4)
|
||||
list(APPEND SOURCES plugin/lz4/sparse_page_lz4_format.cc)
|
||||
link_libraries(lz4)
|
||||
endif()
|
||||
if(PLUGIN_DENSE_PARSER)
|
||||
list(APPEND SOURCES plugin/dense_parser/dense_libsvm.cc)
|
||||
endif()
|
||||
|
||||
# rabit
|
||||
# TODO: Create rabit cmakelists.txt
|
||||
# TODO: Use CMakeLists.txt from rabit.
|
||||
set(RABIT_SOURCES
|
||||
rabit/src/allreduce_base.cc
|
||||
rabit/src/allreduce_robust.cc
|
||||
@@ -96,6 +149,7 @@ set(RABIT_EMPTY_SOURCES
|
||||
rabit/src/engine_empty.cc
|
||||
rabit/src/c_api.cc
|
||||
)
|
||||
|
||||
if(MINGW OR R_LIB)
|
||||
# build a dummy rabit library
|
||||
add_library(rabit STATIC ${RABIT_EMPTY_SOURCES})
|
||||
@@ -103,47 +157,73 @@ else()
|
||||
add_library(rabit STATIC ${RABIT_SOURCES})
|
||||
endif()
|
||||
|
||||
if (GENERATE_COMPILATION_DATABASE)
|
||||
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
||||
endif (GENERATE_COMPILATION_DATABASE)
|
||||
|
||||
# dmlc-core
|
||||
add_subdirectory(dmlc-core)
|
||||
set(LINK_LIBRARIES dmlccore rabit)
|
||||
|
||||
|
||||
if(USE_CUDA)
|
||||
if(USE_CUDA AND (NOT GENERATE_COMPILATION_DATABASE))
|
||||
find_package(CUDA 8.0 REQUIRED)
|
||||
cmake_minimum_required(VERSION 3.5)
|
||||
|
||||
add_definitions(-DXGBOOST_USE_CUDA)
|
||||
|
||||
|
||||
include_directories(cub)
|
||||
|
||||
if(USE_NCCL)
|
||||
include_directories(nccl/src)
|
||||
find_package(Nccl REQUIRED)
|
||||
cuda_include_directories(${NCCL_INCLUDE_DIR})
|
||||
add_definitions(-DXGBOOST_USE_NCCL)
|
||||
endif()
|
||||
|
||||
if((CUDA_VERSION_MAJOR EQUAL 9) OR (CUDA_VERSION_MAJOR GREATER 9))
|
||||
message("CUDA 9.0 detected, adding Volta compute capability (7.0).")
|
||||
set(GPU_COMPUTE_VER "${GPU_COMPUTE_VER};70")
|
||||
endif()
|
||||
|
||||
set(GENCODE_FLAGS "")
|
||||
format_gencode_flags("${GPU_COMPUTE_VER}" GENCODE_FLAGS)
|
||||
message("cuda architecture flags: ${GENCODE_FLAGS}")
|
||||
|
||||
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS};--expt-extended-lambda;--expt-relaxed-constexpr;${GENCODE_FLAGS};-lineinfo;")
|
||||
if(NOT MSVC)
|
||||
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS};-Xcompiler -fPIC; -std=c++11")
|
||||
endif()
|
||||
|
||||
if(USE_NCCL)
|
||||
add_subdirectory(nccl)
|
||||
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS};-Xcompiler -fPIC; -Xcompiler -Werror; -std=c++11")
|
||||
endif()
|
||||
|
||||
cuda_add_library(gpuxgboost ${CUDA_SOURCES} STATIC)
|
||||
|
||||
|
||||
if(USE_NCCL)
|
||||
target_link_libraries(gpuxgboost nccl)
|
||||
link_directories(${NCCL_LIBRARY})
|
||||
target_link_libraries(gpuxgboost ${NCCL_LIB_NAME})
|
||||
endif()
|
||||
list(APPEND LINK_LIBRARIES gpuxgboost)
|
||||
list(APPEND LINK_LIBRARIES gpuxgboost)
|
||||
|
||||
elseif (USE_CUDA AND GENERATE_COMPILATION_DATABASE)
|
||||
# Enable CUDA language to generate a compilation database.
|
||||
cmake_minimum_required(VERSION 3.8)
|
||||
|
||||
find_package(CUDA 8.0 REQUIRED)
|
||||
enable_language(CUDA)
|
||||
set(CMAKE_CUDA_COMPILER clang++)
|
||||
set(CUDA_SEPARABLE_COMPILATION ON)
|
||||
if (NOT CLANG_CUDA_GENCODE)
|
||||
set(CLANG_CUDA_GENCODE "--cuda-gpu-arch=sm_35")
|
||||
endif (NOT CLANG_CUDA_GENCODE)
|
||||
set(CMAKE_CUDA_FLAGS " -Wno-deprecated ${CLANG_CUDA_GENCODE} -fPIC ${GENCODE} -std=c++11 -x cuda")
|
||||
message(STATUS "CMAKE_CUDA_FLAGS: ${CMAKE_CUDA_FLAGS}")
|
||||
|
||||
add_library(gpuxgboost STATIC ${CUDA_SOURCES})
|
||||
|
||||
if(USE_NCCL)
|
||||
find_package(Nccl REQUIRED)
|
||||
target_include_directories(gpuxgboost PUBLIC ${NCCL_INCLUDE_DIR})
|
||||
target_compile_definitions(gpuxgboost PUBLIC -DXGBOOST_USE_NCCL)
|
||||
target_link_libraries(gpuxgboost PUBLIC ${NCCL_LIB_NAME})
|
||||
endif()
|
||||
|
||||
target_compile_definitions(gpuxgboost PUBLIC -DXGBOOST_USE_CUDA)
|
||||
# A hack for CMake to make arguments valid for clang++
|
||||
string(REPLACE "-x cu" "-x cuda" CMAKE_CUDA_COMPILE_PTX_COMPILATION
|
||||
${CMAKE_CUDA_COMPILE_PTX_COMPILATION})
|
||||
string(REPLACE "-x cu" "-x cuda" CMAKE_CUDA_COMPILE_WHOLE_COMPILATION
|
||||
${CMAKE_CUDA_COMPILE_WHOLE_COMPILATION})
|
||||
string(REPLACE "-x cu" "-x cuda" CMAKE_CUDA_COMPILE_SEPARABLE_COMPILATION
|
||||
${CMAKE_CUDA_COMPILE_SEPARABLE_COMPILATION})
|
||||
target_include_directories(gpuxgboost PUBLIC cub)
|
||||
endif()
|
||||
|
||||
|
||||
@@ -159,7 +239,6 @@ endif()
|
||||
|
||||
add_library(objxgboost OBJECT ${SOURCES})
|
||||
|
||||
|
||||
# building shared library for R package
|
||||
if(R_LIB)
|
||||
find_package(LibR REQUIRED)
|
||||
@@ -167,22 +246,25 @@ if(R_LIB)
|
||||
list(APPEND LINK_LIBRARIES "${LIBR_CORE_LIBRARY}")
|
||||
MESSAGE(STATUS "LIBR_CORE_LIBRARY " ${LIBR_CORE_LIBRARY})
|
||||
|
||||
include_directories(
|
||||
# Shared library target for the R package
|
||||
add_library(xgboost SHARED $<TARGET_OBJECTS:objxgboost>)
|
||||
include_directories(xgboost
|
||||
"${LIBR_INCLUDE_DIRS}"
|
||||
"${PROJECT_SOURCE_DIR}"
|
||||
)
|
||||
|
||||
# Shared library target for the R package
|
||||
add_library(xgboost SHARED $<TARGET_OBJECTS:objxgboost>)
|
||||
target_link_libraries(xgboost ${LINK_LIBRARIES})
|
||||
# R uses no lib prefix in shared library names of its packages
|
||||
set_target_properties(xgboost PROPERTIES PREFIX "")
|
||||
if(APPLE)
|
||||
set_target_properties(xgboost PROPERTIES SUFFIX ".so")
|
||||
endif()
|
||||
|
||||
setup_rpackage_install_target(xgboost ${CMAKE_CURRENT_BINARY_DIR})
|
||||
# use a dummy location for any other remaining installs
|
||||
set(CMAKE_INSTALL_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/dummy_inst")
|
||||
|
||||
# main targets: shared library & exe
|
||||
# main targets: shared library & exe
|
||||
else()
|
||||
# Executable
|
||||
add_executable(runxgboost $<TARGET_OBJECTS:objxgboost> src/cli_main.cc)
|
||||
@@ -205,41 +287,53 @@ else()
|
||||
add_dependencies(xgboost runxgboost)
|
||||
endif()
|
||||
|
||||
|
||||
# JVM
|
||||
if(JVM_BINDINGS)
|
||||
find_package(JNI QUIET REQUIRED)
|
||||
|
||||
include_directories(${JNI_INCLUDE_DIRS} jvm-packages/xgboost4j/src/native)
|
||||
|
||||
add_library(xgboost4j SHARED
|
||||
$<TARGET_OBJECTS:objxgboost>
|
||||
jvm-packages/xgboost4j/src/native/xgboost4j.cpp)
|
||||
set_output_directory(xgboost4j ${PROJECT_SOURCE_DIR}/lib)
|
||||
$<TARGET_OBJECTS:objxgboost>
|
||||
jvm-packages/xgboost4j/src/native/xgboost4j.cpp)
|
||||
target_include_directories(xgboost4j
|
||||
PRIVATE ${JNI_INCLUDE_DIRS}
|
||||
PRIVATE jvm-packages/xgboost4j/src/native)
|
||||
target_link_libraries(xgboost4j
|
||||
${LINK_LIBRARIES}
|
||||
${JAVA_JVM_LIBRARY})
|
||||
${LINK_LIBRARIES}
|
||||
${JAVA_JVM_LIBRARY})
|
||||
set_output_directory(xgboost4j ${PROJECT_SOURCE_DIR}/lib)
|
||||
endif()
|
||||
|
||||
|
||||
# Test
|
||||
if(GOOGLE_TEST)
|
||||
find_package(GTest REQUIRED)
|
||||
enable_testing()
|
||||
find_package(GTest REQUIRED)
|
||||
|
||||
file(GLOB_RECURSE TEST_SOURCES "tests/cpp/*.cc")
|
||||
auto_source_group("${TEST_SOURCES}")
|
||||
include_directories(${GTEST_INCLUDE_DIR})
|
||||
|
||||
if(USE_CUDA)
|
||||
if(USE_CUDA AND (NOT GENERATE_COMPILATION_DATABASE))
|
||||
file(GLOB_RECURSE CUDA_TEST_SOURCES "tests/cpp/*.cu")
|
||||
cuda_include_directories(${GTEST_INCLUDE_DIRS})
|
||||
cuda_compile(CUDA_TEST_OBJS ${CUDA_TEST_SOURCES})
|
||||
elseif (USE_CUDA AND GENERATE_COMPILATION_DATABASE)
|
||||
file(GLOB_RECURSE CUDA_TEST_SOURCES "tests/cpp/*.cu")
|
||||
else()
|
||||
set(CUDA_TEST_OBJS "")
|
||||
endif()
|
||||
|
||||
add_executable(testxgboost ${TEST_SOURCES} ${CUDA_TEST_OBJS} $<TARGET_OBJECTS:objxgboost>)
|
||||
if (USE_CUDA AND GENERATE_COMPILATION_DATABASE)
|
||||
add_executable(testxgboost ${TEST_SOURCES} ${CUDA_TEST_SOURCES}
|
||||
$<TARGET_OBJECTS:objxgboost>)
|
||||
target_include_directories(testxgboost PRIVATE cub)
|
||||
else ()
|
||||
add_executable(testxgboost ${TEST_SOURCES} ${CUDA_TEST_OBJS}
|
||||
$<TARGET_OBJECTS:objxgboost>)
|
||||
endif ()
|
||||
|
||||
set_output_directory(testxgboost ${PROJECT_SOURCE_DIR})
|
||||
target_include_directories(testxgboost
|
||||
PRIVATE ${GTEST_INCLUDE_DIRS})
|
||||
target_link_libraries(testxgboost ${GTEST_LIBRARIES} ${LINK_LIBRARIES})
|
||||
|
||||
add_test(TestXGBoost testxgboost)
|
||||
|
||||
@@ -6,21 +6,30 @@ Committers
|
||||
----------
|
||||
Committers are people who have made substantial contribution to the project and granted write access to the project.
|
||||
* [Tianqi Chen](https://github.com/tqchen), University of Washington
|
||||
- Tianqi is a PhD working on large-scale machine learning, he is the creator of the project.
|
||||
* [Tong He](https://github.com/hetong007), Simon Fraser University
|
||||
- Tong is a master student working on data mining, he is the maintainer of xgboost R package.
|
||||
- Tianqi is a Ph.D. student working on large-scale machine learning. He is the creator of the project.
|
||||
* [Tong He](https://github.com/hetong007), Amazon AI
|
||||
- Tong is an applied scientist in Amazon AI. He is the maintainer of XGBoost R package.
|
||||
* [Vadim Khotilovich](https://github.com/khotilov)
|
||||
- Vadim contributes many improvements in R and core packages.
|
||||
* [Bing Xu](https://github.com/antinucleon)
|
||||
- Bing is the original creator of xgboost python package and currently the maintainer of [XGBoost.jl](https://github.com/antinucleon/XGBoost.jl).
|
||||
- Bing is the original creator of XGBoost Python package and currently the maintainer of [XGBoost.jl](https://github.com/antinucleon/XGBoost.jl).
|
||||
* [Michael Benesty](https://github.com/pommedeterresautee)
|
||||
- Micheal is a lawyer, data scientist in France, he is the creator of xgboost interactive analysis module in R.
|
||||
* [Yuan Tang](https://github.com/terrytangyuan)
|
||||
- Yuan is a data scientist in Chicago, US. He contributed mostly in R and Python packages.
|
||||
* [Nan Zhu](https://github.com/CodingCat)
|
||||
- Nan is a software engineer in Microsoft. He contributed mostly in JVM packages.
|
||||
* [Sergei Lebedev](https://github.com/superbobry)
|
||||
- Serget is a software engineer in Criteo. He contributed mostly in JVM packages.
|
||||
- Michael is a lawyer and data scientist in France. He is the creator of XGBoost interactive analysis module in R.
|
||||
* [Yuan Tang](https://github.com/terrytangyuan), Ant Financial
|
||||
- Yuan is a software engineer in Ant Financial. He contributed mostly in R and Python packages.
|
||||
* [Nan Zhu](https://github.com/CodingCat), Uber
|
||||
- Nan is a software engineer in Uber. He contributed mostly in JVM packages.
|
||||
* [Sergei Lebedev](https://github.com/superbobry), Criteo
|
||||
- Sergei is a software engineer in Criteo. He contributed mostly in JVM packages.
|
||||
* [Hongliang Liu](https://github.com/phunterlau)
|
||||
* [Scott Lundberg](http://scottlundberg.com/), University of Washington
|
||||
- Scott is a Ph.D. student at University of Washington. He is the creator of SHAP, a unified approach to explain the output of machine learning models such as decision tree ensembles. He also helps maintain the XGBoost Julia package.
|
||||
* [Rory Mitchell](https://github.com/RAMitchell), University of Waikato
|
||||
- Rory is a Ph.D. student at University of Waikato. He is the original creator of the GPU training algorithms. He improved the CMake build system and continuous integration.
|
||||
* [Hyunsu Cho](http://hyunsu-cho.io/), Amazon AI
|
||||
- Hyunsu is an applied scientist in Amazon AI. He is the maintainer of the XGBoost Python package. He also manages the Jenkins continuous integration system (https://xgboost-ci.net/). He is the initial author of the CPU 'hist' updater.
|
||||
* [Jiaming](https://github.com/trivialfis)
|
||||
- Jiaming contributed to the GPU algorithms. He has also introduced new abstractions to improve the quality of the C++ codebase.
|
||||
|
||||
Become a Committer
|
||||
------------------
|
||||
@@ -36,27 +45,25 @@ List of Contributors
|
||||
* [Full List of Contributors](https://github.com/dmlc/xgboost/graphs/contributors)
|
||||
- To contributors: please add your name to the list when you submit a patch to the project:)
|
||||
* [Kailong Chen](https://github.com/kalenhaha)
|
||||
- Kailong is an early contributor of xgboost, he is creator of ranking objectives in xgboost.
|
||||
- Kailong is an early contributor of XGBoost, he is creator of ranking objectives in XGBoost.
|
||||
* [Skipper Seabold](https://github.com/jseabold)
|
||||
- Skipper is the major contributor to the scikit-learn module of xgboost.
|
||||
- Skipper is the major contributor to the scikit-learn module of XGBoost.
|
||||
* [Zygmunt Zając](https://github.com/zygmuntz)
|
||||
- Zygmunt is the master behind the early stopping feature frequently used by kagglers.
|
||||
* [Ajinkya Kale](https://github.com/ajkl)
|
||||
* [Boliang Chen](https://github.com/cblsjtu)
|
||||
* [Yangqing Men](https://github.com/yanqingmen)
|
||||
- Yangqing is the creator of xgboost java package.
|
||||
- Yangqing is the creator of XGBoost java package.
|
||||
* [Engpeng Yao](https://github.com/yepyao)
|
||||
* [Giulio](https://github.com/giuliohome)
|
||||
- Giulio is the creator of windows project of xgboost
|
||||
- Giulio is the creator of Windows project of XGBoost
|
||||
* [Jamie Hall](https://github.com/nerdcha)
|
||||
- Jamie is the initial creator of xgboost sklearn module.
|
||||
- Jamie is the initial creator of XGBoost scikit-learn module.
|
||||
* [Yen-Ying Lee](https://github.com/white1033)
|
||||
* [Masaaki Horikoshi](https://github.com/sinhrks)
|
||||
- Masaaki is the initial creator of xgboost python plotting module.
|
||||
* [Hongliang Liu](https://github.com/phunterlau)
|
||||
- Hongliang is the maintainer of xgboost python PyPI package for pip installation.
|
||||
- Masaaki is the initial creator of XGBoost Python plotting module.
|
||||
* [daiyl0320](https://github.com/daiyl0320)
|
||||
- daiyl0320 contributed patch to xgboost distributed version more robust, and scales stably on TB scale datasets.
|
||||
- daiyl0320 contributed patch to XGBoost distributed version more robust, and scales stably on TB scale datasets.
|
||||
* [Huayi Zhang](https://github.com/irachex)
|
||||
* [Johan Manders](https://github.com/johanmanders)
|
||||
* [yoori](https://github.com/yoori)
|
||||
@@ -67,8 +74,17 @@ List of Contributors
|
||||
* [Alex Bain](https://github.com/convexquad)
|
||||
* [Baltazar Bieniek](https://github.com/bbieniek)
|
||||
* [Adam Pocock](https://github.com/Craigacp)
|
||||
* [Rory Mitchell](https://github.com/RAMitchell)
|
||||
- Rory is the author of the GPU plugin and also contributed the cmake build system and windows continuous integration
|
||||
* [Gideon Whitehead](https://github.com/gaw89)
|
||||
* [Yi-Lin Juang](https://github.com/frankyjuang)
|
||||
* [Andrew Hannigan](https://github.com/andrewhannigan)
|
||||
* [Andy Adinets](https://github.com/canonizer)
|
||||
* [Henry Gouk](https://github.com/henrygouk)
|
||||
* [Pierre de Sahb](https://github.com/pdesahb)
|
||||
* [liuliang01](https://github.com/liuliang01)
|
||||
- liuliang01 added support for the qid column for LibSVM input format. This makes ranking task easier in distributed setting.
|
||||
* [Andrew Thia](https://github.com/BlueTea88)
|
||||
- Andrew Thia implemented feature interaction constraints
|
||||
* [Wei Tian](https://github.com/weitian)
|
||||
* [Chen Qin](https://github.com/chenqin)
|
||||
* [Sam Wilkinson](https://samwilkinson.io)
|
||||
* [Matthew Jones](https://github.com/mt-jones)
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
For bugs or installation issues, please provide the following information.
|
||||
The more information you provide, the more easily we will be able to offer
|
||||
help and advice.
|
||||
|
||||
## Environment info
|
||||
Operating System:
|
||||
|
||||
Compiler:
|
||||
|
||||
Package used (python/R/jvm/C++):
|
||||
|
||||
`xgboost` version used:
|
||||
|
||||
If installing from source, please provide
|
||||
|
||||
1. The commit hash (`git rev-parse HEAD`)
|
||||
2. Logs will be helpful (If logs are large, please upload as attachment).
|
||||
|
||||
If you are using jvm package, please
|
||||
|
||||
1. add [jvm-packages] in the title to make it quickly be identified
|
||||
2. the gcc version and distribution
|
||||
|
||||
If you are using python package, please provide
|
||||
|
||||
1. The python version and distribution
|
||||
2. The command to install `xgboost` if you are not installing from source
|
||||
|
||||
If you are using R package, please provide
|
||||
|
||||
1. The R `sessionInfo()`
|
||||
2. The command to install `xgboost` if you are not installing from source
|
||||
|
||||
## Steps to reproduce
|
||||
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
## What have you tried?
|
||||
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
166
Jenkinsfile
vendored
166
Jenkinsfile
vendored
@@ -3,13 +3,21 @@
|
||||
// Jenkins pipeline
|
||||
// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
|
||||
|
||||
import groovy.transform.Field
|
||||
|
||||
/* Unrestricted tasks: tasks that do NOT generate artifacts */
|
||||
|
||||
// Command to run command inside a docker container
|
||||
dockerRun = 'tests/ci_build/ci_build.sh'
|
||||
def dockerRun = 'tests/ci_build/ci_build.sh'
|
||||
// Utility functions
|
||||
@Field
|
||||
def utils
|
||||
|
||||
def buildMatrix = [
|
||||
[ "enabled": true, "os" : "linux", "withGpu": true, "withOmp": true, "pythonVersion": "2.7" ],
|
||||
[ "enabled": true, "os" : "linux", "withGpu": false, "withOmp": true, "pythonVersion": "2.7" ],
|
||||
[ "enabled": false, "os" : "osx", "withGpu": false, "withOmp": false, "pythonVersion": "2.7" ],
|
||||
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": true, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "9.2", "multiGpu": true],
|
||||
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": true, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "9.2" ],
|
||||
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": true, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "8.0" ],
|
||||
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": false, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "8.0" ],
|
||||
]
|
||||
|
||||
pipeline {
|
||||
@@ -26,126 +34,94 @@ pipeline {
|
||||
|
||||
// Build stages
|
||||
stages {
|
||||
stage('Get sources') {
|
||||
agent any
|
||||
stage('Jenkins: Get sources') {
|
||||
agent {
|
||||
label 'unrestricted'
|
||||
}
|
||||
steps {
|
||||
checkoutSrcs()
|
||||
script {
|
||||
utils = load('tests/ci_build/jenkins_tools.Groovy')
|
||||
utils.checkoutSrcs()
|
||||
}
|
||||
stash name: 'srcs', excludes: '.git/'
|
||||
milestone label: 'Sources ready', ordinal: 1
|
||||
}
|
||||
}
|
||||
stage('Build & Test') {
|
||||
stage('Jenkins: Build & Test') {
|
||||
steps {
|
||||
script {
|
||||
parallel (buildMatrix.findAll{it['enabled']}.collectEntries{ c ->
|
||||
def buildName = getBuildName(c)
|
||||
buildFactory(buildName, c)
|
||||
})
|
||||
def buildName = utils.getBuildName(c)
|
||||
utils.buildFactory(buildName, c, false, this.&buildPlatformCmake)
|
||||
} + [ "clang-tidy" : { buildClangTidyJob() } ])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// initialize source codes
|
||||
def checkoutSrcs() {
|
||||
retry(5) {
|
||||
try {
|
||||
timeout(time: 2, unit: 'MINUTES') {
|
||||
checkout scm
|
||||
sh 'git submodule update --init'
|
||||
}
|
||||
} catch (exc) {
|
||||
deleteDir()
|
||||
error "Failed to fetch source codes"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates cmake and make builds
|
||||
*/
|
||||
def buildFactory(buildName, conf) {
|
||||
def os = conf["os"]
|
||||
def nodeReq = conf["withGpu"] ? "${os} && gpu" : "${os}"
|
||||
def dockerTarget = conf["withGpu"] ? "gpu" : "cpu"
|
||||
[ ("cmake_${buildName}") : { buildPlatformCmake("cmake_${buildName}", conf, nodeReq, dockerTarget) },
|
||||
("make_${buildName}") : { buildPlatformMake("make_${buildName}", conf, nodeReq, dockerTarget) }
|
||||
]
|
||||
}
|
||||
|
||||
/**
|
||||
* Build platform and test it via cmake.
|
||||
*/
|
||||
def buildPlatformCmake(buildName, conf, nodeReq, dockerTarget) {
|
||||
def opts = cmakeOptions(conf)
|
||||
def opts = utils.cmakeOptions(conf)
|
||||
// Destination dir for artifacts
|
||||
def distDir = "dist/${buildName}"
|
||||
def dockerArgs = ""
|
||||
if (conf["withGpu"]) {
|
||||
dockerArgs = "--build-arg CUDA_VERSION=" + conf["cudaVersion"]
|
||||
}
|
||||
def test_suite = conf["withGpu"] ? (conf["multiGpu"] ? "mgpu" : "gpu") : "cpu"
|
||||
// Build node - this is returned result
|
||||
node(nodeReq) {
|
||||
unstash name: 'srcs'
|
||||
echo """
|
||||
|===== XGBoost CMake build =====
|
||||
| dockerTarget: ${dockerTarget}
|
||||
| cmakeOpts : ${opts}
|
||||
|=========================
|
||||
""".stripMargin('|')
|
||||
// Invoke command inside docker
|
||||
sh """
|
||||
${dockerRun} ${dockerTarget} tests/ci_build/build_via_cmake.sh ${opts}
|
||||
${dockerRun} ${dockerTarget} tests/ci_build/test_${dockerTarget}.sh
|
||||
${dockerRun} ${dockerTarget} bash -c "cd python-package; python setup.py bdist_wheel"
|
||||
rm -rf "${distDir}"; mkdir -p "${distDir}/py"
|
||||
cp xgboost "${distDir}"
|
||||
cp -r lib "${distDir}"
|
||||
cp -r python-package/dist "${distDir}/py"
|
||||
"""
|
||||
archiveArtifacts artifacts: "${distDir}/**/*.*", allowEmptyArchive: true
|
||||
retry(1) {
|
||||
node(nodeReq) {
|
||||
unstash name: 'srcs'
|
||||
echo """
|
||||
|===== XGBoost CMake build =====
|
||||
| dockerTarget: ${dockerTarget}
|
||||
| cmakeOpts : ${opts}
|
||||
|=========================
|
||||
""".stripMargin('|')
|
||||
// Invoke command inside docker
|
||||
sh """
|
||||
${dockerRun} ${dockerTarget} ${dockerArgs} tests/ci_build/build_via_cmake.sh ${opts}
|
||||
${dockerRun} ${dockerTarget} ${dockerArgs} tests/ci_build/test_${test_suite}.sh
|
||||
"""
|
||||
if (!conf["multiGpu"]) {
|
||||
sh """
|
||||
${dockerRun} ${dockerTarget} ${dockerArgs} bash -c "cd python-package; rm -f dist/*; python setup.py bdist_wheel --universal"
|
||||
rm -rf "${distDir}"; mkdir -p "${distDir}/py"
|
||||
cp xgboost "${distDir}"
|
||||
cp -r python-package/dist "${distDir}/py"
|
||||
# Test the wheel for compatibility on a barebones CPU container
|
||||
${dockerRun} release ${dockerArgs} bash -c " \
|
||||
pip install --user python-package/dist/xgboost-*-none-any.whl && \
|
||||
pytest -v --fulltrace -s tests/python"
|
||||
# Test the wheel for compatibility on CUDA 10.0 container
|
||||
${dockerRun} gpu --build-arg CUDA_VERSION=10.0 bash -c " \
|
||||
pip install --user python-package/dist/xgboost-*-none-any.whl && \
|
||||
pytest -v -s --fulltrace -m '(not mgpu) and (not slow)' tests/python-gpu"
|
||||
"""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build platform via make
|
||||
* Run a clang-tidy job on a GPU machine
|
||||
*/
|
||||
def buildPlatformMake(buildName, conf, nodeReq, dockerTarget) {
|
||||
def opts = makeOptions(conf)
|
||||
// Destination dir for artifacts
|
||||
def distDir = "dist/${buildName}"
|
||||
// Build node
|
||||
def buildClangTidyJob() {
|
||||
def nodeReq = "linux && gpu && unrestricted"
|
||||
node(nodeReq) {
|
||||
unstash name: 'srcs'
|
||||
echo """
|
||||
|===== XGBoost Make build =====
|
||||
| dockerTarget: ${dockerTarget}
|
||||
| makeOpts : ${opts}
|
||||
|=========================
|
||||
""".stripMargin('|')
|
||||
echo "Running clang-tidy job..."
|
||||
// Invoke command inside docker
|
||||
// Install Google Test and Python yaml
|
||||
dockerTarget = "clang_tidy"
|
||||
dockerArgs = "--build-arg CUDA_VERSION=9.2"
|
||||
sh """
|
||||
${dockerRun} ${dockerTarget} tests/ci_build/build_via_make.sh ${opts}
|
||||
${dockerRun} ${dockerTarget} ${dockerArgs} tests/ci_build/clang_tidy.sh
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
def makeOptions(conf) {
|
||||
return ([
|
||||
conf["withGpu"] ? 'PLUGIN_UPDATER_GPU=ON' : 'PLUGIN_UPDATER_GPU=OFF',
|
||||
conf["withOmp"] ? 'USE_OPENMP=1' : 'USE_OPENMP=0']
|
||||
).join(" ")
|
||||
}
|
||||
|
||||
|
||||
def cmakeOptions(conf) {
|
||||
return ([
|
||||
conf["withGpu"] ? '-DPLUGIN_UPDATER_GPU:BOOL=ON' : '',
|
||||
conf["withOmp"] ? '-DOPEN_MP:BOOL=ON' : '']
|
||||
).join(" ")
|
||||
}
|
||||
|
||||
def getBuildName(conf) {
|
||||
def gpuLabel = conf['withGpu'] ? "_gpu" : "_cpu"
|
||||
def ompLabel = conf['withOmp'] ? "_omp" : ""
|
||||
def pyLabel = "_py${conf['pythonVersion']}"
|
||||
return "${conf['os']}${gpuLabel}${ompLabel}${pyLabel}"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
123
Jenkinsfile-restricted
Normal file
123
Jenkinsfile-restricted
Normal file
@@ -0,0 +1,123 @@
|
||||
#!/usr/bin/groovy
|
||||
// -*- mode: groovy -*-
|
||||
// Jenkins pipeline
|
||||
// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
|
||||
|
||||
import groovy.transform.Field
|
||||
|
||||
/* Restricted tasks: tasks generating artifacts, such as binary wheels and
|
||||
documentation */
|
||||
|
||||
// Command to run command inside a docker container
|
||||
def dockerRun = 'tests/ci_build/ci_build.sh'
|
||||
// Utility functions
|
||||
@Field
|
||||
def utils
|
||||
@Field
|
||||
def commit_id
|
||||
@Field
|
||||
def branch_name
|
||||
|
||||
def buildMatrix = [
|
||||
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": true, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "9.2" ],
|
||||
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": true, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "8.0" ],
|
||||
[ "enabled": true, "os" : "linux", "withGpu": true, "withNccl": false, "withOmp": true, "pythonVersion": "2.7", "cudaVersion": "8.0" ],
|
||||
]
|
||||
|
||||
pipeline {
|
||||
// Each stage specify its own agent
|
||||
agent none
|
||||
|
||||
// Setup common job properties
|
||||
options {
|
||||
ansiColor('xterm')
|
||||
timestamps()
|
||||
timeout(time: 120, unit: 'MINUTES')
|
||||
buildDiscarder(logRotator(numToKeepStr: '10'))
|
||||
}
|
||||
|
||||
// Build stages
|
||||
stages {
|
||||
stage('Jenkins: Get sources') {
|
||||
agent {
|
||||
label 'restricted'
|
||||
}
|
||||
steps {
|
||||
script {
|
||||
utils = load('tests/ci_build/jenkins_tools.Groovy')
|
||||
utils.checkoutSrcs()
|
||||
commit_id = "${GIT_COMMIT}"
|
||||
branch_name = "${GIT_LOCAL_BRANCH}"
|
||||
}
|
||||
stash name: 'srcs', excludes: '.git/'
|
||||
milestone label: 'Sources ready', ordinal: 1
|
||||
}
|
||||
}
|
||||
stage('Jenkins: Build doc') {
|
||||
steps {
|
||||
script {
|
||||
retry(1) {
|
||||
node('linux && cpu && restricted') {
|
||||
unstash name: 'srcs'
|
||||
echo 'Building doc...'
|
||||
dir ('jvm-packages') {
|
||||
sh "bash ./build_doc.sh ${commit_id}"
|
||||
archiveArtifacts artifacts: "${commit_id}.tar.bz2", allowEmptyArchive: true
|
||||
echo 'Deploying doc...'
|
||||
withAWS(credentials:'xgboost-doc-bucket') {
|
||||
s3Upload file: "${commit_id}.tar.bz2", bucket: 'xgboost-docs', acl: 'PublicRead', path: "${branch_name}.tar.bz2"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage('Jenkins: Build artifacts') {
|
||||
steps {
|
||||
script {
|
||||
parallel (buildMatrix.findAll{it['enabled']}.collectEntries{ c ->
|
||||
def buildName = utils.getBuildName(c)
|
||||
utils.buildFactory(buildName, c, true, this.&buildPlatformCmake)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build platform and test it via cmake.
|
||||
*/
|
||||
def buildPlatformCmake(buildName, conf, nodeReq, dockerTarget) {
|
||||
def opts = utils.cmakeOptions(conf)
|
||||
// Destination dir for artifacts
|
||||
def distDir = "dist/${buildName}"
|
||||
def dockerArgs = ""
|
||||
if(conf["withGpu"]){
|
||||
dockerArgs = "--build-arg CUDA_VERSION=" + conf["cudaVersion"]
|
||||
}
|
||||
// Build node - this is returned result
|
||||
retry(1) {
|
||||
node(nodeReq) {
|
||||
unstash name: 'srcs'
|
||||
echo """
|
||||
|===== XGBoost CMake build =====
|
||||
| dockerTarget: ${dockerTarget}
|
||||
| cmakeOpts : ${opts}
|
||||
|=========================
|
||||
""".stripMargin('|')
|
||||
// Invoke command inside docker
|
||||
sh """
|
||||
${dockerRun} ${dockerTarget} ${dockerArgs} tests/ci_build/build_via_cmake.sh ${opts}
|
||||
${dockerRun} ${dockerTarget} ${dockerArgs} bash -c "cd python-package; rm -f dist/*; python setup.py bdist_wheel --universal"
|
||||
rm -rf "${distDir}"; mkdir -p "${distDir}/py"
|
||||
cp xgboost "${distDir}"
|
||||
cp -r lib "${distDir}"
|
||||
cp -r python-package/dist "${distDir}/py"
|
||||
"""
|
||||
archiveArtifacts artifacts: "${distDir}/**/*.*", allowEmptyArchive: true
|
||||
}
|
||||
}
|
||||
}
|
||||
208
LICENSE
208
LICENSE
@@ -1,13 +1,201 @@
|
||||
Copyright (c) 2016 by Contributors
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
1. Definitions.
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright (c) 2018 by Contributors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
29
Makefile
29
Makefile
@@ -68,7 +68,7 @@ endif
|
||||
endif
|
||||
|
||||
export LDFLAGS= -pthread -lm $(ADD_LDFLAGS) $(DMLC_LDFLAGS) $(PLUGIN_LDFLAGS)
|
||||
export CFLAGS= -std=c++11 -Wall -Wno-unknown-pragmas -Iinclude $(ADD_CFLAGS) $(PLUGIN_CFLAGS)
|
||||
export CFLAGS= -DDMLC_LOG_CUSTOMIZE=1 -std=c++11 -Wall -Wno-unknown-pragmas -Iinclude $(ADD_CFLAGS) $(PLUGIN_CFLAGS)
|
||||
CFLAGS += -I$(DMLC_CORE)/include -I$(RABIT)/include -I$(GTEST_PATH)/include
|
||||
#java include path
|
||||
export JAVAINCFLAGS = -I${JAVA_HOME}/include -I./java
|
||||
@@ -198,7 +198,11 @@ endif
|
||||
clean:
|
||||
$(RM) -rf build build_plugin lib bin *~ */*~ */*/*~ */*/*/*~ */*.o */*/*.o */*/*/*.o #xgboost
|
||||
$(RM) -rf build_tests *.gcov tests/cpp/xgboost_test
|
||||
cd R-package/src; $(RM) -rf rabit src include dmlc-core amalgamation *.so *.dll; cd $(ROOTDIR)
|
||||
if [ -d "R-package/src" ]; then \
|
||||
cd R-package/src; \
|
||||
$(RM) -rf rabit src include dmlc-core amalgamation *.so *.dll; \
|
||||
cd $(ROOTDIR); \
|
||||
fi
|
||||
|
||||
clean_all: clean
|
||||
cd $(DMLC_CORE); "$(MAKE)" clean; cd $(ROOTDIR)
|
||||
@@ -212,16 +216,28 @@ pypack: ${XGBOOST_DYLIB}
|
||||
cp ${XGBOOST_DYLIB} python-package/xgboost
|
||||
cd python-package; tar cf xgboost.tar xgboost; cd ..
|
||||
|
||||
# create pip installation pack for PyPI
|
||||
# create pip source dist (sdist) pack for PyPI
|
||||
pippack: clean_all
|
||||
rm -rf xgboost-python
|
||||
# remove symlinked directories in python-package/xgboost
|
||||
rm -rf python-package/xgboost/lib
|
||||
rm -rf python-package/xgboost/dmlc-core
|
||||
rm -rf python-package/xgboost/include
|
||||
rm -rf python-package/xgboost/make
|
||||
rm -rf python-package/xgboost/rabit
|
||||
rm -rf python-package/xgboost/src
|
||||
cp -r python-package xgboost-python
|
||||
cp -r Makefile xgboost-python/xgboost/
|
||||
cp -r make xgboost-python/xgboost/
|
||||
cp -r src xgboost-python/xgboost/
|
||||
cp -r tests xgboost-python/xgboost/
|
||||
cp -r include xgboost-python/xgboost/
|
||||
cp -r dmlc-core xgboost-python/xgboost/
|
||||
cp -r rabit xgboost-python/xgboost/
|
||||
# Use setup_pip.py instead of setup.py
|
||||
mv xgboost-python/setup_pip.py xgboost-python/setup.py
|
||||
# Build sdist tarball
|
||||
cd xgboost-python; python setup.py sdist; mv dist/*.tar.gz ..; cd ..
|
||||
|
||||
# Script to make a clean installable R package.
|
||||
Rpack: clean_all
|
||||
@@ -244,14 +260,17 @@ Rpack: clean_all
|
||||
cp ./LICENSE xgboost
|
||||
cat R-package/src/Makevars.in|sed '2s/.*/PKGROOT=./' | sed '3s/.*/ENABLE_STD_THREAD=0/' > xgboost/src/Makevars.in
|
||||
cp xgboost/src/Makevars.in xgboost/src/Makevars.win
|
||||
sed -i -e 's/@OPENMP_CXXFLAGS@/$$\(SHLIB_OPENMP_CFLAGS\)/g' xgboost/src/Makevars.win
|
||||
sed -i -e 's/@OPENMP_CXXFLAGS@/$$\(SHLIB_OPENMP_CXXFLAGS\)/g' xgboost/src/Makevars.win
|
||||
sed -i -e 's/-pthread/$$\(SHLIB_PTHREAD_FLAGS\)/g' xgboost/src/Makevars.win
|
||||
bash R-package/remove_warning_suppression_pragma.sh
|
||||
rm xgboost/remove_warning_suppression_pragma.sh
|
||||
|
||||
Rbuild: Rpack
|
||||
R CMD build --no-build-vignettes xgboost
|
||||
rm -rf xgboost
|
||||
|
||||
Rcheck: Rbuild
|
||||
R CMD check xgboost*.tar.gz
|
||||
R CMD check xgboost*.tar.gz
|
||||
|
||||
-include build/*.d
|
||||
-include build/*/*.d
|
||||
|
||||
440
NEWS.md
440
NEWS.md
@@ -3,6 +3,442 @@ XGBoost Change Log
|
||||
|
||||
This file records the changes in xgboost library in reverse chronological order.
|
||||
|
||||
## v0.82 (2019.03.03)
|
||||
This release is packed with many new features and bug fixes.
|
||||
|
||||
### Roadmap: better performance scaling for multi-core CPUs (#3957)
|
||||
* Poor performance scaling of the `hist` algorithm for multi-core CPUs has been under investigation (#3810). #3957 marks an important step toward better performance scaling, by using software pre-fetching and replacing STL vectors with C-style arrays. Special thanks to @Laurae2 and @SmirnovEgorRu.
|
||||
* See #3810 for latest progress on this roadmap.
|
||||
|
||||
### New feature: Distributed Fast Histogram Algorithm (`hist`) (#4011, #4102, #4140, #4128)
|
||||
* It is now possible to run the `hist` algorithm in distributed setting. Special thanks to @CodingCat. The benefits include:
|
||||
1. Faster local computation via feature binning
|
||||
2. Support for monotonic constraints and feature interaction constraints
|
||||
3. Simpler codebase than `approx`, allowing for future improvement
|
||||
* Depth-wise tree growing is now performed in a separate code path, so that cross-node syncronization is performed only once per level.
|
||||
|
||||
### New feature: Multi-Node, Multi-GPU training (#4095)
|
||||
* Distributed training is now able to utilize clusters equipped with NVIDIA GPUs. In particular, the rabit AllReduce layer will communicate GPU device information. Special thanks to @mt-jones, @RAMitchell, @rongou, @trivialfis, @canonizer, and @jeffdk.
|
||||
* Resource management systems will be able to assign a rank for each GPU in the cluster.
|
||||
* In Dask, users will be able to construct a collection of XGBoost processes over an inhomogeneous device cluster (i.e. workers with different number and/or kinds of GPUs).
|
||||
|
||||
### New feature: Multiple validation datasets in XGBoost4J-Spark (#3904, #3910)
|
||||
* You can now track the performance of the model during training with multiple evaluation datasets. By specifying `eval_sets` or call `setEvalSets` over a `XGBoostClassifier` or `XGBoostRegressor`, you can pass in multiple evaluation datasets typed as a `Map` from `String` to `DataFrame`. Special thanks to @CodingCat.
|
||||
* See the usage of multiple validation datasets [here](https://github.com/dmlc/xgboost/blob/0c1d5f1120c0a159f2567b267f0ec4ffadee00d0/jvm-packages/xgboost4j-example/src/main/scala/ml/dmlc/xgboost4j/scala/example/spark/SparkTraining.scala#L66-L78)
|
||||
|
||||
### New feature: Additional metric functions for GPUs (#3952)
|
||||
* Element-wise metrics have been ported to GPU: `rmse`, `mae`, `logloss`, `poisson-nloglik`, `gamma-deviance`, `gamma-nloglik`, `error`, `tweedie-nloglik`. Special thanks to @trivialfis and @RAMitchell.
|
||||
* With supported metrics, XGBoost will select the correct devices based on your system and `n_gpus` parameter.
|
||||
|
||||
### New feature: Column sampling at individual nodes (splits) (#3971)
|
||||
* Columns (features) can now be sampled at individual tree nodes, in addition to per-tree and per-level sampling. To enable per-node sampling, set `colsample_bynode` parameter, which represents the fraction of columns sampled at each node. This parameter is set to 1.0 by default (i.e. no sampling per node). Special thanks to @canonizer.
|
||||
* The `colsample_bynode` parameter works cumulatively with other `colsample_by*` parameters: for example, `{'colsample_bynode':0.5, 'colsample_bytree':0.5}` with 100 columns will give 25 features to choose from at each split.
|
||||
|
||||
### Major API change: consistent logging level via `verbosity` (#3982, #4002, #4138)
|
||||
* XGBoost now allows fine-grained control over logging. You can set `verbosity` to 0 (silent), 1 (warning), 2 (info), and 3 (debug). This is useful for controlling the amount of logging outputs. Special thanks to @trivialfis.
|
||||
* Parameters `silent` and `debug_verbose` are now deprecated.
|
||||
* Note: Sometimes XGBoost tries to change configurations based on heuristics, which is displayed as warning message. If there's unexpected behaviour, please try to increase value of verbosity.
|
||||
|
||||
### Major bug fix: external memory (#4040, #4193)
|
||||
* Clarify object ownership in multi-threaded prefetcher, to avoid memory error.
|
||||
* Correctly merge two column batches (which uses [CSC layout](https://en.wikipedia.org/wiki/Sparse_matrix#Compressed_sparse_column_(CSC_or_CCS))).
|
||||
* Add unit tests for external memory.
|
||||
* Special thanks to @trivialfis and @hcho3.
|
||||
|
||||
### Major bug fix: early stopping fixed in XGBoost4J and XGBoost4J-Spark (#3928, #4176)
|
||||
* Early stopping in XGBoost4J and XGBoost4J-Spark is now consistent with its counterpart in the Python package. Training stops if the current iteration is `earlyStoppingSteps` away from the best iteration. If there are multiple evaluation sets, only the last one is used to determinate early stop.
|
||||
* See the updated documentation [here](https://xgboost.readthedocs.io/en/release_0.82/jvm/xgboost4j_spark_tutorial.html#early-stopping)
|
||||
* Special thanks to @CodingCat, @yanboliang, and @mingyang.
|
||||
|
||||
### Major bug fix: infrequent features should not crash distributed training (#4045)
|
||||
* For infrequently occuring features, some partitions may not get any instance. This scenario used to crash distributed training due to mal-formed ranges. The problem has now been fixed.
|
||||
* In practice, one-hot-encoded categorical variables tend to produce rare features, particularly when the cardinality is high.
|
||||
* Special thanks to @CodingCat.
|
||||
|
||||
### Performance improvements
|
||||
* Faster, more space-efficient radix sorting in `gpu_hist` (#3895)
|
||||
* Subtraction trick in histogram calculation in `gpu_hist` (#3945)
|
||||
* More performant re-partition in XGBoost4J-Spark (#4049)
|
||||
|
||||
### Bug-fixes
|
||||
* Fix semantics of `gpu_id` when running multiple XGBoost processes on a multi-GPU machine (#3851)
|
||||
* Fix page storage path for external memory on Windows (#3869)
|
||||
* Fix configuration setup so that DART utilizes GPU (#4024)
|
||||
* Eliminate NAN values from SHAP prediction (#3943)
|
||||
* Prevent empty quantile sketches in `hist` (#4155)
|
||||
* Enable running objectives with 0 GPU (#3878)
|
||||
* Parameters are no longer dependent on system locale (#3891, #3907)
|
||||
* Use consistent data type in the GPU coordinate descent code (#3917)
|
||||
* Remove undefined behavior in the CLI config parser on the ARM platform (#3976)
|
||||
* Initialize counters in GPU AllReduce (#3987)
|
||||
* Prevent deadlocks in GPU AllReduce (#4113)
|
||||
* Load correct values from sliced NumPy arrays (#4147, #4165)
|
||||
* Fix incorrect GPU device selection (#4161)
|
||||
* Make feature binning logic in `hist` aware of query groups when running a ranking task (#4115). For ranking task, query groups are weighted, not individual instances.
|
||||
* Generate correct C++ exception type for `LOG(FATAL)` macro (#4159)
|
||||
* Python package
|
||||
- Python package should run on system without `PATH` environment variable (#3845)
|
||||
- Fix `coef_` and `intercept_` signature to be compatible with `sklearn.RFECV` (#3873)
|
||||
- Use UTF-8 encoding in Python package README, to support non-English locale (#3867)
|
||||
- Add AUC-PR to list of metrics to maximize for early stopping (#3936)
|
||||
- Allow loading pickles without `self.booster` attribute, for backward compatibility (#3938, #3944)
|
||||
- White-list DART for feature importances (#4073)
|
||||
- Update usage of [h2oai/datatable](https://github.com/h2oai/datatable) (#4123)
|
||||
* XGBoost4J-Spark
|
||||
- Address scalability issue in prediction (#4033)
|
||||
- Enforce the use of per-group weights for ranking task (#4118)
|
||||
- Fix vector size of `rawPredictionCol` in `XGBoostClassificationModel` (#3932)
|
||||
- More robust error handling in Spark tracker (#4046, #4108)
|
||||
- Fix return type of `setEvalSets` (#4105)
|
||||
- Return correct value of `getMaxLeaves` (#4114)
|
||||
|
||||
### API changes
|
||||
* Add experimental parameter `single_precision_histogram` to use single-precision histograms for the `gpu_hist` algorithm (#3965)
|
||||
* Python package
|
||||
- Add option to select type of feature importances in the scikit-learn inferface (#3876)
|
||||
- Add `trees_to_df()` method to dump decision trees as Pandas data frame (#4153)
|
||||
- Add options to control node shapes in the GraphViz plotting function (#3859)
|
||||
- Add `xgb_model` option to `XGBClassifier`, to load previously saved model (#4092)
|
||||
- Passing lists into `DMatrix` is now deprecated (#3970)
|
||||
* XGBoost4J
|
||||
- Support multiple feature importance features (#3801)
|
||||
|
||||
### Maintenance: Refactor C++ code for legibility and maintainability
|
||||
* Refactor `hist` algorithm code and add unit tests (#3836)
|
||||
* Minor refactoring of split evaluator in `gpu_hist` (#3889)
|
||||
* Removed unused leaf vector field in the tree model (#3989)
|
||||
* Simplify the tree representation by combining `TreeModel` and `RegTree` classes (#3995)
|
||||
* Simplify and harden tree expansion code (#4008, #4015)
|
||||
* De-duplicate parameter classes in the linear model algorithms (#4013)
|
||||
* Robust handling of ranges with C++20 span in `gpu_exact` and `gpu_coord_descent` (#4020, #4029)
|
||||
* Simplify tree training code (#3825). Also use Span class for robust handling of ranges.
|
||||
|
||||
### Maintenance: testing, continuous integration, build system
|
||||
* Disallow `std::regex` since it's not supported by GCC 4.8.x (#3870)
|
||||
* Add multi-GPU tests for coordinate descent algorithm for linear models (#3893, #3974)
|
||||
* Enforce naming style in Python lint (#3896)
|
||||
* Refactor Python tests (#3897, #3901): Use pytest exclusively, display full trace upon failure
|
||||
* Address `DeprecationWarning` when using Python collections (#3909)
|
||||
* Use correct group for maven site plugin (#3937)
|
||||
* Jenkins CI is now using on-demand EC2 instances exclusively, due to unreliability of Spot instances (#3948)
|
||||
* Better GPU performance logging (#3945)
|
||||
* Fix GPU tests on machines with only 1 GPU (#4053)
|
||||
* Eliminate CRAN check warnings and notes (#3988)
|
||||
* Add unit tests for tree serialization (#3989)
|
||||
* Add unit tests for tree fitting functions in `hist` (#4155)
|
||||
* Add a unit test for `gpu_exact` algorithm (#4020)
|
||||
* Correct JVM CMake GPU flag (#4071)
|
||||
* Fix failing Travis CI on Mac (#4086)
|
||||
* Speed up Jenkins by not compiling CMake (#4099)
|
||||
* Analyze C++ and CUDA code using clang-tidy, as part of Jenkins CI pipeline (#4034)
|
||||
* Fix broken R test: Install Homebrew GCC (#4142)
|
||||
* Check for empty datasets in GPU unit tests (#4151)
|
||||
* Fix Windows compilation (#4139)
|
||||
* Comply with latest convention of cpplint (#4157)
|
||||
* Fix a unit test in `gpu_hist` (#4158)
|
||||
* Speed up data generation in Python tests (#4164)
|
||||
|
||||
### Usability Improvements
|
||||
* Add link to [InfoWorld 2019 Technology of the Year Award](https://www.infoworld.com/article/3336072/application-development/infoworlds-2019-technology-of-the-year-award-winners.html) (#4116)
|
||||
* Remove outdated AWS YARN tutorial (#3885)
|
||||
* Document current limitation in number of features (#3886)
|
||||
* Remove unnecessary warning when `gblinear` is selected (#3888)
|
||||
* Document limitation of CSV parser: header not supported (#3934)
|
||||
* Log training parameters in XGBoost4J-Spark (#4091)
|
||||
* Clarify early stopping behavior in the scikit-learn interface (#3967)
|
||||
* Clarify behavior of `max_depth` parameter (#4078)
|
||||
* Revise Python docstrings for ranking task (#4121). In particular, weights must be per-group in learning-to-rank setting.
|
||||
* Document parameter `num_parallel_tree` (#4022)
|
||||
* Add Jenkins status badge (#4090)
|
||||
* Warn users against using internal functions of `Booster` object (#4066)
|
||||
* Reformat `benchmark_tree.py` to comply with Python style convention (#4126)
|
||||
* Clarify a comment in `objectiveTrait` (#4174)
|
||||
* Fix typos and broken links in documentation (#3890, #3872, #3902, #3919, #3975, #4027, #4156, #4167)
|
||||
|
||||
### Acknowledgement
|
||||
**Contributors** (in no particular order): Jiaming Yuan (@trivialfis), Hyunsu Cho (@hcho3), Nan Zhu (@CodingCat), Rory Mitchell (@RAMitchell), Yanbo Liang (@yanboliang), Andy Adinets (@canonizer), Tong He (@hetong007), Yuan Tang (@terrytangyuan)
|
||||
|
||||
**First-time Contributors** (in no particular order): Jelle Zijlstra (@JelleZijlstra), Jiacheng Xu (@jiachengxu), @ajing, Kashif Rasul (@kashif), @theycallhimavi, Joey Gao (@pjgao), Prabakaran Kumaresshan (@nixphix), Huafeng Wang (@huafengw), @lyxthe, Sam Wilkinson (@scwilkinson), Tatsuhito Kato (@stabacov), Shayak Banerjee (@shayakbanerjee), Kodi Arfer (@Kodiologist), @KyleLi1985, Egor Smirnov (@SmirnovEgorRu), @tmitanitky, Pasha Stetsenko (@st-pasha), Kenichi Nagahara (@keni-chi), Abhai Kollara Dilip (@abhaikollara), Patrick Ford (@pford221), @hshujuan, Matthew Jones (@mt-jones), Thejaswi Rao (@teju85), Adam November (@anovember)
|
||||
|
||||
**First-time Reviewers** (in no particular order): Mingyang Hu (@mingyang), Theodore Vasiloudis (@thvasilo), Jakub Troszok (@troszok), Rong Ou (@rongou), @Denisevi4, Matthew Jones (@mt-jones), Jeff Kaplan (@jeffdk)
|
||||
|
||||
## v0.81 (2018.11.04)
|
||||
### New feature: feature interaction constraints
|
||||
* Users are now able to control which features (independent variables) are allowed to interact by specifying feature interaction constraints (#3466).
|
||||
* [Tutorial](https://xgboost.readthedocs.io/en/release_0.81/tutorials/feature_interaction_constraint.html) is available, as well as [R](https://github.com/dmlc/xgboost/blob/9254c58e4dfff6a59dc0829a2ceb02e45ed17cd0/R-package/demo/interaction_constraints.R) and [Python](https://github.com/dmlc/xgboost/blob/9254c58e4dfff6a59dc0829a2ceb02e45ed17cd0/tests/python/test_interaction_constraints.py) examples.
|
||||
|
||||
### New feature: learning to rank using scikit-learn interface
|
||||
* Learning to rank task is now available for the scikit-learn interface of the Python package (#3560, #3848). It is now possible to integrate the XGBoost ranking model into the scikit-learn learning pipeline.
|
||||
* Examples of using `XGBRanker` class is found at [demo/rank/rank_sklearn.py](https://github.com/dmlc/xgboost/blob/24a268a2e3cb17302db3d72da8f04016b7d352d9/demo/rank/rank_sklearn.py).
|
||||
|
||||
### New feature: R interface for SHAP interactions
|
||||
* SHAP (SHapley Additive exPlanations) is a unified approach to explain the output of any machine learning model. Previously, this feature was only available from the Python package; now it is available from the R package as well (#3636).
|
||||
|
||||
### New feature: GPU predictor now use multiple GPUs to predict
|
||||
* GPU predictor is now able to utilize multiple GPUs at once to accelerate prediction (#3738)
|
||||
|
||||
### New feature: Scale distributed XGBoost to large-scale clusters
|
||||
* Fix OS file descriptor limit assertion error on large cluster (#3835, dmlc/rabit#73) by replacing `select()` based AllReduce/Broadcast with `poll()` based implementation.
|
||||
* Mitigate tracker "thundering herd" issue on large cluster. Add exponential backoff retry when workers connect to tracker.
|
||||
* With this change, we were able to scale to 1.5k executors on a 12 billion row dataset after some tweaks here and there.
|
||||
|
||||
### New feature: Additional objective functions for GPUs
|
||||
* New objective functions ported to GPU: `hinge`, `multi:softmax`, `multi:softprob`, `count:poisson`, `reg:gamma`, `"reg:tweedie`.
|
||||
* With supported objectives, XGBoost will select the correct devices based on your system and `n_gpus` parameter.
|
||||
|
||||
### Major bug fix: learning to rank with XGBoost4J-Spark
|
||||
* Previously, `repartitionForData` would shuffle data and lose ordering necessary for ranking task.
|
||||
* To fix this issue, data points within each RDD partition is explicitly group by their group (query session) IDs (#3654). Also handle empty RDD partition carefully (#3750).
|
||||
|
||||
### Major bug fix: early stopping fixed in XGBoost4J-Spark
|
||||
* Earlier implementation of early stopping had incorrect semantics and didn't let users to specify direction for optimizing (maximize / minimize)
|
||||
* A parameter `maximize_evaluation_metrics` is defined so as to tell whether a metric should be maximized or minimized as part of early stopping criteria (#3808). Also early stopping now has correct semantics.
|
||||
|
||||
### API changes
|
||||
* Column sampling by level (`colsample_bylevel`) is now functional for `hist` algorithm (#3635, #3862)
|
||||
* GPU tag `gpu:` for regression objectives are now deprecated. XGBoost will select the correct devices automatically (#3643)
|
||||
* Add `disable_default_eval_metric` parameter to disable default metric (#3606)
|
||||
* Experimental AVX support for gradient computation is removed (#3752)
|
||||
* XGBoost4J-Spark
|
||||
- Add `rank:ndcg` and `rank:map` to supported objectives (#3697)
|
||||
* Python package
|
||||
- Add `callbacks` argument to `fit()` function of sciki-learn API (#3682)
|
||||
- Add `XGBRanker` to scikit-learn interface (#3560, #3848)
|
||||
- Add `validate_features` argument to `predict()` function of scikit-learn API (#3653)
|
||||
- Allow scikit-learn grid search over parameters specified as keyword arguments (#3791)
|
||||
- Add `coef_` and `intercept_` as properties of scikit-learn wrapper (#3855). Some scikit-learn functions expect these properties.
|
||||
|
||||
### Performance improvements
|
||||
* Address very high GPU memory usage for large data (#3635)
|
||||
* Fix performance regression within `EvaluateSplits()` of `gpu_hist` algorithm. (#3680)
|
||||
|
||||
### Bug-fixes
|
||||
* Fix a problem in GPU quantile sketch with tiny instance weights. (#3628)
|
||||
* Fix copy constructor for `HostDeviceVectorImpl` to prevent dangling pointers (#3657)
|
||||
* Fix a bug in partitioned file loading (#3673)
|
||||
* Fixed an uninitialized pointer in `gpu_hist` (#3703)
|
||||
* Reshared data among GPUs when number of GPUs is changed (#3721)
|
||||
* Add back `max_delta_step` to split evaluation (#3668)
|
||||
* Do not round up integer thresholds for integer features in JSON dump (#3717)
|
||||
* Use `dmlc::TemporaryDirectory` to handle temporaries in cross-platform way (#3783)
|
||||
* Fix accuracy problem with `gpu_hist` when `min_child_weight` and `lambda` are set to 0 (#3793)
|
||||
* Make sure that `tree_method` parameter is recognized and not silently ignored (#3849)
|
||||
* XGBoost4J-Spark
|
||||
- Make sure `thresholds` are considered when executing `predict()` method (#3577)
|
||||
- Avoid losing precision when computing probabilities by converting to `Double` early (#3576)
|
||||
- `getTreeLimit()` should return `Int` (#3602)
|
||||
- Fix checkpoint serialization on HDFS (#3614)
|
||||
- Throw `ControlThrowable` instead of `InterruptedException` so that it is properly re-thrown (#3632)
|
||||
- Remove extraneous output to stdout (#3665)
|
||||
- Allow specification of task type for custom objectives and evaluations (#3646)
|
||||
- Fix distributed updater check (#3739)
|
||||
- Fix issue when spark job execution thread cannot return before we execute `first()` (#3758)
|
||||
* Python package
|
||||
- Fix accessing `DMatrix.handle` before it is set (#3599)
|
||||
- `XGBClassifier.predict()` should return margin scores when `output_margin` is set to true (#3651)
|
||||
- Early stopping callback should maximize metric of form `NDCG@n-` (#3685)
|
||||
- Preserve feature names when slicing `DMatrix` (#3766)
|
||||
* R package
|
||||
- Replace `nround` with `nrounds` to match actual parameter (#3592)
|
||||
- Amend `xgb.createFolds` to handle classes of a single element (#3630)
|
||||
- Fix buggy random generator and make `colsample_bytree` functional (#3781)
|
||||
|
||||
### Maintenance: testing, continuous integration, build system
|
||||
* Add sanitizers tests to Travis CI (#3557)
|
||||
* Add NumPy, Matplotlib, Graphviz as requirements for doc build (#3669)
|
||||
* Comply with CRAN submission policy (#3660, #3728)
|
||||
* Remove copy-paste error in JVM test suite (#3692)
|
||||
* Disable flaky tests in `R-package/tests/testthat/test_update.R` (#3723)
|
||||
* Make Python tests compatible with scikit-learn 0.20 release (#3731)
|
||||
* Separate out restricted and unrestricted tasks, so that pull requests don't build downloadable artifacts (#3736)
|
||||
* Add multi-GPU unit test environment (#3741)
|
||||
* Allow plug-ins to be built by CMake (#3752)
|
||||
* Test wheel compatibility on CPU containers for pull requests (#3762)
|
||||
* Fix broken doc build due to Matplotlib 3.0 release (#3764)
|
||||
* Produce `xgboost.so` for XGBoost-R on Mac OSX, so that `make install` works (#3767)
|
||||
* Retry Jenkins CI tests up to 3 times to improve reliability (#3769, #3769, #3775, #3776, #3777)
|
||||
* Add basic unit tests for `gpu_hist` algorithm (#3785)
|
||||
* Fix Python environment for distributed unit tests (#3806)
|
||||
* Test wheels on CUDA 10.0 container for compatibility (#3838)
|
||||
* Fix JVM doc build (#3853)
|
||||
|
||||
### Maintenance: Refactor C++ code for legibility and maintainability
|
||||
* Merge generic device helper functions into `GPUSet` class (#3626)
|
||||
* Re-factor column sampling logic into `ColumnSampler` class (#3635, #3637)
|
||||
* Replace `std::vector` with `HostDeviceVector` in `MetaInfo` and `SparsePage` (#3446)
|
||||
* Simplify `DMatrix` class (#3395)
|
||||
* De-duplicate CPU/GPU code using `Transform` class (#3643, #3751)
|
||||
* Remove obsoleted `QuantileHistMaker` class (#3761)
|
||||
* Remove obsoleted `NoConstraint` class (#3792)
|
||||
|
||||
### Other Features
|
||||
* C++20-compliant Span class for safe pointer indexing (#3548, #3588)
|
||||
* Add helper functions to manipulate multiple GPU devices (#3693)
|
||||
* XGBoost4J-Spark
|
||||
- Allow specifying host ip from the `xgboost-tracker.properties file` (#3833). This comes in handy when `hosts` files doesn't correctly define localhost.
|
||||
|
||||
### Usability Improvements
|
||||
* Add reference to GitHub repository in `pom.xml` of JVM packages (#3589)
|
||||
* Add R demo of multi-class classification (#3695)
|
||||
* Document JSON dump functionality (#3600, #3603)
|
||||
* Document CUDA requirement and lack of external memory for GPU algorithms (#3624)
|
||||
* Document LambdaMART objectives, both pairwise and listwise (#3672)
|
||||
* Document `aucpr` evaluation metric (#3687)
|
||||
* Document gblinear parameters: `feature_selector` and `top_k` (#3780)
|
||||
* Add instructions for using MinGW-built XGBoost with Python. (#3774)
|
||||
* Removed nonexistent parameter `use_buffer` from documentation (#3610)
|
||||
* Update Python API doc to include all classes and members (#3619, #3682)
|
||||
* Fix typos and broken links in documentation (#3618, #3640, #3676, #3713, #3759, #3784, #3843, #3852)
|
||||
* Binary classification demo should produce LIBSVM with 0-based indexing (#3652)
|
||||
* Process data once for Python and CLI examples of learning to rank (#3666)
|
||||
* Include full text of Apache 2.0 license in the repository (#3698)
|
||||
* Save predictor parameters in model file (#3856)
|
||||
* JVM packages
|
||||
- Let users specify feature names when calling `getModelDump` and `getFeatureScore` (#3733)
|
||||
- Warn the user about the lack of over-the-wire encryption (#3667)
|
||||
- Fix errors in examples (#3719)
|
||||
- Document choice of trackers (#3831)
|
||||
- Document that vanilla Apache Spark is required (#3854)
|
||||
* Python package
|
||||
- Document that custom objective can't contain colon (:) (#3601)
|
||||
- Show a better error message for failed library loading (#3690)
|
||||
- Document that feature importance is unavailable for non-tree learners (#3765)
|
||||
- Document behavior of `get_fscore()` for zero-importance features (#3763)
|
||||
- Recommend pickling as the way to save `XGBClassifier` / `XGBRegressor` / `XGBRanker` (#3829)
|
||||
* R package
|
||||
- Enlarge variable importance plot to make it more visible (#3820)
|
||||
|
||||
### BREAKING CHANGES
|
||||
* External memory page files have changed, breaking backwards compatibility for temporary storage used during external memory training. This only affects external memory users upgrading their xgboost version - we recommend clearing all `*.page` files before resuming training. Model serialization is unaffected.
|
||||
|
||||
### Known issues
|
||||
* Quantile sketcher fails to produce any quantile for some edge cases (#2943)
|
||||
* The `hist` algorithm leaks memory when used with learning rate decay callback (#3579)
|
||||
* Using custom evaluation funciton together with early stopping causes assertion failure in XGBoost4J-Spark (#3595)
|
||||
* Early stopping doesn't work with `gblinear` learner (#3789)
|
||||
* Label and weight vectors are not reshared upon the change in number of GPUs (#3794). To get around this issue, delete the `DMatrix` object and re-load.
|
||||
* The `DMatrix` Python objects are initialized with incorrect values when given array slices (#3841)
|
||||
* The `gpu_id` parameter is broken and not yet properly supported (#3850)
|
||||
|
||||
### Acknowledgement
|
||||
**Contributors** (in no particular order): Hyunsu Cho (@hcho3), Jiaming Yuan (@trivialfis), Nan Zhu (@CodingCat), Rory Mitchell (@RAMitchell), Andy Adinets (@canonizer), Vadim Khotilovich (@khotilov), Sergei Lebedev (@superbobry)
|
||||
|
||||
**First-time Contributors** (in no particular order): Matthew Tovbin (@tovbinm), Jakob Richter (@jakob-r), Grace Lam (@grace-lam), Grant W Schneider (@grantschneider), Andrew Thia (@BlueTea88), Sergei Chipiga (@schipiga), Joseph Bradley (@jkbradley), Chen Qin (@chenqin), Jerry Lin (@linjer), Dmitriy Rybalko (@rdtft), Michael Mui (@mmui), Takahiro Kojima (@515hikaru), Bruce Zhao (@BruceZhaoR), Wei Tian (@weitian), Saumya Bhatnagar (@Sam1301), Juzer Shakir (@JuzerShakir), Zhao Hang (@cleghom), Jonathan Friedman (@jontonsoup), Bruno Tremblay (@meztez), Boris Filippov (@frenzykryger), @Shiki-H, @mrgutkun, @gorogm, @htgeis, @jakehoare, @zengxy, @KOLANICH
|
||||
|
||||
**First-time Reviewers** (in no particular order): Nikita Titov (@StrikerRUS), Xiangrui Meng (@mengxr), Nirmal Borah (@Nirmal-Neel)
|
||||
|
||||
|
||||
## v0.80 (2018.08.13)
|
||||
* **JVM packages received a major upgrade**: To consolidate the APIs and improve the user experience, we refactored the design of XGBoost4J-Spark in a significant manner. (#3387)
|
||||
- Consolidated APIs: It is now much easier to integrate XGBoost models into a Spark ML pipeline. Users can control behaviors like output leaf prediction results by setting corresponding column names. Training is now more consistent with other Estimators in Spark MLLIB: there is now one single method `fit()` to train decision trees.
|
||||
- Better user experience: we refactored the parameters relevant modules in XGBoost4J-Spark to provide both camel-case (Spark ML style) and underscore (XGBoost style) parameters
|
||||
- A brand-new tutorial is [available](https://xgboost.readthedocs.io/en/release_0.80/jvm/xgboost4j_spark_tutorial.html) for XGBoost4J-Spark.
|
||||
- Latest API documentation is now hosted at https://xgboost.readthedocs.io/.
|
||||
* XGBoost documentation now keeps track of multiple versions:
|
||||
- Latest master: https://xgboost.readthedocs.io/en/latest
|
||||
- 0.80 stable: https://xgboost.readthedocs.io/en/release_0.80
|
||||
- 0.72 stable: https://xgboost.readthedocs.io/en/release_0.72
|
||||
* Support for per-group weights in ranking objective (#3379)
|
||||
* Fix inaccurate decimal parsing (#3546)
|
||||
* New functionality
|
||||
- Query ID column support in LIBSVM data files (#2749). This is convenient for performing ranking task in distributed setting.
|
||||
- Hinge loss for binary classification (`binary:hinge`) (#3477)
|
||||
- Ability to specify delimiter and instance weight column for CSV files (#3546)
|
||||
- Ability to use 1-based indexing instead of 0-based (#3546)
|
||||
* GPU support
|
||||
- Quantile sketch, binning, and index compression are now performed on GPU, eliminating PCIe transfer for 'gpu_hist' algorithm (#3319, #3393)
|
||||
- Upgrade to NCCL2 for multi-GPU training (#3404).
|
||||
- Use shared memory atomics for faster training (#3384).
|
||||
- Dynamically allocate GPU memory, to prevent large allocations for deep trees (#3519)
|
||||
- Fix memory copy bug for large files (#3472)
|
||||
* Python package
|
||||
- Importing data from Python datatable (#3272)
|
||||
- Pre-built binary wheels available for 64-bit Linux and Windows (#3424, #3443)
|
||||
- Add new importance measures 'total_gain', 'total_cover' (#3498)
|
||||
- Sklearn API now supports saving and loading models (#3192)
|
||||
- Arbitrary cross validation fold indices (#3353)
|
||||
- `predict()` function in Sklearn API uses `best_ntree_limit` if available, to make early stopping easier to use (#3445)
|
||||
- Informational messages are now directed to Python's `print()` rather than standard output (#3438). This way, messages appear inside Jupyter notebooks.
|
||||
* R package
|
||||
- Oracle Solaris support, per CRAN policy (#3372)
|
||||
* JVM packages
|
||||
- Single-instance prediction (#3464)
|
||||
- Pre-built JARs are now available from Maven Central (#3401)
|
||||
- Add NULL pointer check (#3021)
|
||||
- Consider `spark.task.cpus` when controlling parallelism (#3530)
|
||||
- Handle missing values in prediction (#3529)
|
||||
- Eliminate outputs of `System.out` (#3572)
|
||||
* Refactored C++ DMatrix class for simplicity and de-duplication (#3301)
|
||||
* Refactored C++ histogram facilities (#3564)
|
||||
* Refactored constraints / regularization mechanism for split finding (#3335, #3429). Users may specify an elastic net (L2 + L1 regularization) on leaf weights as well as monotonic constraints on test nodes. The refactor will be useful for a future addition of feature interaction constraints.
|
||||
* Statically link `libstdc++` for MinGW32 (#3430)
|
||||
* Enable loading from `group`, `base_margin` and `weight` (see [here](http://xgboost.readthedocs.io/en/latest/tutorials/input_format.html#auxiliary-files-for-additional-information)) for Python, R, and JVM packages (#3431)
|
||||
* Fix model saving for `count:possion` so that `max_delta_step` doesn't get truncated (#3515)
|
||||
* Fix loading of sparse CSC matrix (#3553)
|
||||
* Fix incorrect handling of `base_score` parameter for Tweedie regression (#3295)
|
||||
|
||||
## v0.72.1 (2018.07.08)
|
||||
This version is only applicable for the Python package. The content is identical to that of v0.72.
|
||||
|
||||
## v0.72 (2018.06.01)
|
||||
* Starting with this release, we plan to make a new release every two months. See #3252 for more details.
|
||||
* Fix a pathological behavior (near-zero second-order gradients) in multiclass objective (#3304)
|
||||
* Tree dumps now use high precision in storing floating-point values (#3298)
|
||||
* Submodules `rabit` and `dmlc-core` have been brought up to date, bringing bug fixes (#3330, #3221).
|
||||
* GPU support
|
||||
- Continuous integration tests for GPU code (#3294, #3309)
|
||||
- GPU accelerated coordinate descent algorithm (#3178)
|
||||
- Abstract 1D vector class now works with multiple GPUs (#3287)
|
||||
- Generate PTX code for most recent architecture (#3316)
|
||||
- Fix a memory bug on NVIDIA K80 cards (#3293)
|
||||
- Address performance instability for single-GPU, multi-core machines (#3324)
|
||||
* Python package
|
||||
- FreeBSD support (#3247)
|
||||
- Validation of feature names in `Booster.predict()` is now optional (#3323)
|
||||
* Updated Sklearn API
|
||||
- Validation sets now support instance weights (#2354)
|
||||
- `XGBClassifier.predict_proba()` should not support `output_margin` option. (#3343) See BREAKING CHANGES below.
|
||||
* R package:
|
||||
- Better handling of NULL in `print.xgb.Booster()` (#3338)
|
||||
- Comply with CRAN policy by removing compiler warning suppression (#3329)
|
||||
- Updated CRAN submission
|
||||
* JVM packages
|
||||
- JVM packages will now use the same versioning scheme as other packages (#3253)
|
||||
- Update Spark to 2.3 (#3254)
|
||||
- Add scripts to cross-build and deploy artifacts (#3276, #3307)
|
||||
- Fix a compilation error for Scala 2.10 (#3332)
|
||||
* BREAKING CHANGES
|
||||
- `XGBClassifier.predict_proba()` no longer accepts paramter `output_margin`. The paramater makes no sense for `predict_proba()` because the method is to predict class probabilities, not raw margin scores.
|
||||
|
||||
## v0.71 (2018.04.11)
|
||||
* This is a minor release, mainly motivated by issues concerning `pip install`, e.g. #2426, #3189, #3118, and #3194.
|
||||
With this release, users of Linux and MacOS will be able to run `pip install` for the most part.
|
||||
* Refactored linear booster class (`gblinear`), so as to support multiple coordinate descent updaters (#3103, #3134). See BREAKING CHANGES below.
|
||||
* Fix slow training for multiclass classification with high number of classes (#3109)
|
||||
* Fix a corner case in approximate quantile sketch (#3167). Applicable for 'hist' and 'gpu_hist' algorithms
|
||||
* Fix memory leak in DMatrix (#3182)
|
||||
* New functionality
|
||||
- Better linear booster class (#3103, #3134)
|
||||
- Pairwise SHAP interaction effects (#3043)
|
||||
- Cox loss (#3043)
|
||||
- AUC-PR metric for ranking task (#3172)
|
||||
- Monotonic constraints for 'hist' algorithm (#3085)
|
||||
* GPU support
|
||||
- Create an abtract 1D vector class that moves data seamlessly between the main and GPU memory (#2935, #3116, #3068). This eliminates unnecessary PCIe data transfer during training time.
|
||||
- Fix minor bugs (#3051, #3217)
|
||||
- Fix compatibility error for CUDA 9.1 (#3218)
|
||||
* Python package:
|
||||
- Correctly handle parameter `verbose_eval=0` (#3115)
|
||||
* R package:
|
||||
- Eliminate segmentation fault on 32-bit Windows platform (#2994)
|
||||
* JVM packages
|
||||
- Fix a memory bug involving double-freeing Booster objects (#3005, #3011)
|
||||
- Handle empty partition in predict (#3014)
|
||||
- Update docs and unify terminology (#3024)
|
||||
- Delete cache files after job finishes (#3022)
|
||||
- Compatibility fixes for latest Spark versions (#3062, #3093)
|
||||
* BREAKING CHANGES: Updated linear modelling algorithms. In particular L1/L2 regularisation penalties are now normalised to number of training examples. This makes the implementation consistent with sklearn/glmnet. L2 regularisation has also been removed from the intercept. To produce linear models with the old regularisation behaviour, the alpha/lambda regularisation parameters can be manually scaled by dividing them by the number of training examples.
|
||||
|
||||
## v0.7 (2017.12.30)
|
||||
* **This version represents a major change from the last release (v0.6), which was released one year and half ago.**
|
||||
* Updated Sklearn API
|
||||
@@ -62,7 +498,7 @@ This file records the changes in xgboost library in reverse chronological order.
|
||||
- Compatibility fix for Python 2.6
|
||||
- Call `print_evaluation` callback at last iteration
|
||||
- Use appropriate integer types when calling native code, to prevent truncation and memory error
|
||||
- Fix shared library loading on Mac OS X
|
||||
- Fix shared library loading on Mac OS X
|
||||
* R package:
|
||||
- New parameters:
|
||||
- `silent` in `xgb.DMatrix()`
|
||||
@@ -103,7 +539,7 @@ This file records the changes in xgboost library in reverse chronological order.
|
||||
- Support instance weights
|
||||
- Use `SparkParallelismTracker` to prevent jobs from hanging forever
|
||||
- Expose train-time evaluation metrics via `XGBoostModel.summary`
|
||||
- Option to specify `host-ip` explicitly in the Rabit tracker
|
||||
- Option to specify `host-ip` explicitly in the Rabit tracker
|
||||
* Documentation
|
||||
- Better math notation for gradient boosting
|
||||
- Updated build instructions for Mac OS X
|
||||
|
||||
@@ -1,12 +1,34 @@
|
||||
Package: xgboost
|
||||
Type: Package
|
||||
Title: Extreme Gradient Boosting
|
||||
Version: 0.6.4.8
|
||||
Date: 2017-12-05
|
||||
Author: Tianqi Chen <tianqi.tchen@gmail.com>, Tong He <hetong007@gmail.com>,
|
||||
Michael Benesty <michael@benesty.fr>, Vadim Khotilovich <khotilovich@gmail.com>,
|
||||
Yuan Tang <terrytangyuan@gmail.com>
|
||||
Maintainer: Tong He <hetong007@gmail.com>
|
||||
Version: 0.82.0.1
|
||||
Date: 2019-03-11
|
||||
Authors@R: c(
|
||||
person("Tianqi", "Chen", role = c("aut"),
|
||||
email = "tianqi.tchen@gmail.com"),
|
||||
person("Tong", "He", role = c("aut", "cre"),
|
||||
email = "hetong007@gmail.com"),
|
||||
person("Michael", "Benesty", role = c("aut"),
|
||||
email = "michael@benesty.fr"),
|
||||
person("Vadim", "Khotilovich", role = c("aut"),
|
||||
email = "khotilovich@gmail.com"),
|
||||
person("Yuan", "Tang", role = c("aut"),
|
||||
email = "terrytangyuan@gmail.com",
|
||||
comment = c(ORCID = "0000-0001-5243-233X")),
|
||||
person("Hyunsu", "Cho", role = c("aut"),
|
||||
email = "chohyu01@cs.washington.edu"),
|
||||
person("Kailong", "Chen", role = c("aut")),
|
||||
person("Rory", "Mitchell", role = c("aut")),
|
||||
person("Ignacio", "Cano", role = c("aut")),
|
||||
person("Tianyi", "Zhou", role = c("aut")),
|
||||
person("Mu", "Li", role = c("aut")),
|
||||
person("Junyuan", "Xie", role = c("aut")),
|
||||
person("Min", "Lin", role = c("aut")),
|
||||
person("Yifeng", "Geng", role = c("aut")),
|
||||
person("Yutian", "Li", role = c("aut")),
|
||||
person("XGBoost contributors", role = c("cph"),
|
||||
comment = "base XGBoost implementation")
|
||||
)
|
||||
Description: Extreme Gradient Boosting, which is an efficient implementation
|
||||
of the gradient boosting framework from Chen & Guestrin (2016) <doi:10.1145/2939672.2939785>.
|
||||
This package is its R interface. The package includes efficient linear
|
||||
@@ -19,6 +41,7 @@ Description: Extreme Gradient Boosting, which is an efficient implementation
|
||||
License: Apache License (== 2.0) | file LICENSE
|
||||
URL: https://github.com/dmlc/xgboost
|
||||
BugReports: https://github.com/dmlc/xgboost/issues
|
||||
NeedsCompilation: yes
|
||||
VignetteBuilder: knitr
|
||||
Suggests:
|
||||
knitr,
|
||||
@@ -28,6 +51,7 @@ Suggests:
|
||||
Ckmeans.1d.dp (>= 3.3.1),
|
||||
vcd (>= 1.3),
|
||||
testthat,
|
||||
lintr,
|
||||
igraph (>= 1.0.1)
|
||||
Depends:
|
||||
R (>= 3.3.0)
|
||||
@@ -37,4 +61,5 @@ Imports:
|
||||
data.table (>= 1.9.6),
|
||||
magrittr (>= 1.5),
|
||||
stringi (>= 0.5.2)
|
||||
RoxygenNote: 6.0.1
|
||||
RoxygenNote: 6.1.0
|
||||
SystemRequirements: GNU make, C++11
|
||||
|
||||
@@ -18,6 +18,7 @@ export("xgb.parameters<-")
|
||||
export(cb.cv.predict)
|
||||
export(cb.early.stop)
|
||||
export(cb.evaluation.log)
|
||||
export(cb.gblinear.history)
|
||||
export(cb.print.evaluation)
|
||||
export(cb.reset.parameters)
|
||||
export(cb.save.model)
|
||||
@@ -32,6 +33,7 @@ export(xgb.attributes)
|
||||
export(xgb.create.features)
|
||||
export(xgb.cv)
|
||||
export(xgb.dump)
|
||||
export(xgb.gblinear.history)
|
||||
export(xgb.ggplot.deepness)
|
||||
export(xgb.ggplot.importance)
|
||||
export(xgb.importance)
|
||||
@@ -49,10 +51,11 @@ export(xgboost)
|
||||
import(methods)
|
||||
importClassesFrom(Matrix,dgCMatrix)
|
||||
importClassesFrom(Matrix,dgeMatrix)
|
||||
importFrom(Matrix,cBind)
|
||||
importFrom(Matrix,colSums)
|
||||
importFrom(Matrix,sparse.model.matrix)
|
||||
importFrom(Matrix,sparseMatrix)
|
||||
importFrom(Matrix,sparseVector)
|
||||
importFrom(Matrix,t)
|
||||
importFrom(data.table,":=")
|
||||
importFrom(data.table,as.data.table)
|
||||
importFrom(data.table,data.table)
|
||||
|
||||
@@ -168,7 +168,7 @@ cb.evaluation.log <- function() {
|
||||
#' at the beginning of each iteration.
|
||||
#'
|
||||
#' Note that when training is resumed from some previous model, and a function is used to
|
||||
#' reset a parameter value, the \code{nround} argument in this function would be the
|
||||
#' reset a parameter value, the \code{nrounds} argument in this function would be the
|
||||
#' the number of boosting rounds in the current training.
|
||||
#'
|
||||
#' Callback function expects the following values to be set in its calling frame:
|
||||
@@ -524,6 +524,223 @@ cb.cv.predict <- function(save_models = FALSE) {
|
||||
}
|
||||
|
||||
|
||||
#' Callback closure for collecting the model coefficients history of a gblinear booster
|
||||
#' during its training.
|
||||
#'
|
||||
#' @param sparse when set to FALSE/TURE, a dense/sparse matrix is used to store the result.
|
||||
#' Sparse format is useful when one expects only a subset of coefficients to be non-zero,
|
||||
#' when using the "thrifty" feature selector with fairly small number of top features
|
||||
#' selected per iteration.
|
||||
#'
|
||||
#' @details
|
||||
#' To keep things fast and simple, gblinear booster does not internally store the history of linear
|
||||
#' model coefficients at each boosting iteration. This callback provides a workaround for storing
|
||||
#' the coefficients' path, by extracting them after each training iteration.
|
||||
#'
|
||||
#' Callback function expects the following values to be set in its calling frame:
|
||||
#' \code{bst} (or \code{bst_folds}).
|
||||
#'
|
||||
#' @return
|
||||
#' Results are stored in the \code{coefs} element of the closure.
|
||||
#' The \code{\link{xgb.gblinear.history}} convenience function provides an easy way to access it.
|
||||
#' With \code{xgb.train}, it is either a dense of a sparse matrix.
|
||||
#' While with \code{xgb.cv}, it is a list (an element per each fold) of such matrices.
|
||||
#'
|
||||
#' @seealso
|
||||
#' \code{\link{callbacks}}, \code{\link{xgb.gblinear.history}}.
|
||||
#'
|
||||
#' @examples
|
||||
#' #### Binary classification:
|
||||
#' #
|
||||
#' # In the iris dataset, it is hard to linearly separate Versicolor class from the rest
|
||||
#' # without considering the 2nd order interactions:
|
||||
#' require(magrittr)
|
||||
#' x <- model.matrix(Species ~ .^2, iris)[,-1]
|
||||
#' colnames(x)
|
||||
#' dtrain <- xgb.DMatrix(scale(x), label = 1*(iris$Species == "versicolor"))
|
||||
#' param <- list(booster = "gblinear", objective = "reg:logistic", eval_metric = "auc",
|
||||
#' lambda = 0.0003, alpha = 0.0003, nthread = 2)
|
||||
#' # For 'shotgun', which is a default linear updater, using high eta values may result in
|
||||
#' # unstable behaviour in some datasets. With this simple dataset, however, the high learning
|
||||
#' # rate does not break the convergence, but allows us to illustrate the typical pattern of
|
||||
#' # "stochastic explosion" behaviour of this lock-free algorithm at early boosting iterations.
|
||||
#' bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 200, eta = 1.,
|
||||
#' callbacks = list(cb.gblinear.history()))
|
||||
#' # Extract the coefficients' path and plot them vs boosting iteration number:
|
||||
#' coef_path <- xgb.gblinear.history(bst)
|
||||
#' matplot(coef_path, type = 'l')
|
||||
#'
|
||||
#' # With the deterministic coordinate descent updater, it is safer to use higher learning rates.
|
||||
#' # Will try the classical componentwise boosting which selects a single best feature per round:
|
||||
#' bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 200, eta = 0.8,
|
||||
#' updater = 'coord_descent', feature_selector = 'thrifty', top_k = 1,
|
||||
#' callbacks = list(cb.gblinear.history()))
|
||||
#' xgb.gblinear.history(bst) %>% matplot(type = 'l')
|
||||
#' # Componentwise boosting is known to have similar effect to Lasso regularization.
|
||||
#' # Try experimenting with various values of top_k, eta, nrounds,
|
||||
#' # as well as different feature_selectors.
|
||||
#'
|
||||
#' # For xgb.cv:
|
||||
#' bst <- xgb.cv(param, dtrain, nfold = 5, nrounds = 100, eta = 0.8,
|
||||
#' callbacks = list(cb.gblinear.history()))
|
||||
#' # coefficients in the CV fold #3
|
||||
#' xgb.gblinear.history(bst)[[3]] %>% matplot(type = 'l')
|
||||
#'
|
||||
#'
|
||||
#' #### Multiclass classification:
|
||||
#' #
|
||||
#' dtrain <- xgb.DMatrix(scale(x), label = as.numeric(iris$Species) - 1)
|
||||
#' param <- list(booster = "gblinear", objective = "multi:softprob", num_class = 3,
|
||||
#' lambda = 0.0003, alpha = 0.0003, nthread = 2)
|
||||
#' # For the default linear updater 'shotgun' it sometimes is helpful
|
||||
#' # to use smaller eta to reduce instability
|
||||
#' bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 70, eta = 0.5,
|
||||
#' callbacks = list(cb.gblinear.history()))
|
||||
#' # Will plot the coefficient paths separately for each class:
|
||||
#' xgb.gblinear.history(bst, class_index = 0) %>% matplot(type = 'l')
|
||||
#' xgb.gblinear.history(bst, class_index = 1) %>% matplot(type = 'l')
|
||||
#' xgb.gblinear.history(bst, class_index = 2) %>% matplot(type = 'l')
|
||||
#'
|
||||
#' # CV:
|
||||
#' bst <- xgb.cv(param, dtrain, nfold = 5, nrounds = 70, eta = 0.5,
|
||||
#' callbacks = list(cb.gblinear.history(FALSE)))
|
||||
#' # 1st forld of 1st class
|
||||
#' xgb.gblinear.history(bst, class_index = 0)[[1]] %>% matplot(type = 'l')
|
||||
#'
|
||||
#' @export
|
||||
cb.gblinear.history <- function(sparse=FALSE) {
|
||||
coefs <- NULL
|
||||
|
||||
init <- function(env) {
|
||||
if (!is.null(env$bst)) { # xgb.train:
|
||||
coef_path <- list()
|
||||
} else if (!is.null(env$bst_folds)) { # xgb.cv:
|
||||
coef_path <- rep(list(), length(env$bst_folds))
|
||||
} else stop("Parent frame has neither 'bst' nor 'bst_folds'")
|
||||
}
|
||||
|
||||
# convert from list to (sparse) matrix
|
||||
list2mat <- function(coef_list) {
|
||||
if (sparse) {
|
||||
coef_mat <- sparseMatrix(x = unlist(lapply(coef_list, slot, "x")),
|
||||
i = unlist(lapply(coef_list, slot, "i")),
|
||||
p = c(0, cumsum(sapply(coef_list, function(x) length(x@x)))),
|
||||
dims = c(length(coef_list[[1]]), length(coef_list)))
|
||||
return(t(coef_mat))
|
||||
} else {
|
||||
return(do.call(rbind, coef_list))
|
||||
}
|
||||
}
|
||||
|
||||
finalizer <- function(env) {
|
||||
if (length(coefs) == 0)
|
||||
return()
|
||||
if (!is.null(env$bst)) { # # xgb.train:
|
||||
coefs <<- list2mat(coefs)
|
||||
} else { # xgb.cv:
|
||||
# first lapply transposes the list
|
||||
coefs <<- lapply(seq_along(coefs[[1]]), function(i) lapply(coefs, "[[", i)) %>%
|
||||
lapply(function(x) list2mat(x))
|
||||
}
|
||||
}
|
||||
|
||||
extract.coef <- function(env) {
|
||||
if (!is.null(env$bst)) { # # xgb.train:
|
||||
cf <- as.numeric(grep('(booster|bias|weigh)', xgb.dump(env$bst), invert = TRUE, value = TRUE))
|
||||
if (sparse) cf <- as(cf, "sparseVector")
|
||||
} else { # xgb.cv:
|
||||
cf <- vector("list", length(env$bst_folds))
|
||||
for (i in seq_along(env$bst_folds)) {
|
||||
dmp <- xgb.dump(xgb.handleToBooster(env$bst_folds[[i]]$bst))
|
||||
cf[[i]] <- as.numeric(grep('(booster|bias|weigh)', dmp, invert = TRUE, value = TRUE))
|
||||
if (sparse) cf[[i]] <- as(cf[[i]], "sparseVector")
|
||||
}
|
||||
}
|
||||
cf
|
||||
}
|
||||
|
||||
callback <- function(env = parent.frame(), finalize = FALSE) {
|
||||
if (is.null(coefs)) init(env)
|
||||
if (finalize) return(finalizer(env))
|
||||
cf <- extract.coef(env)
|
||||
coefs <<- c(coefs, list(cf))
|
||||
}
|
||||
|
||||
attr(callback, 'call') <- match.call()
|
||||
attr(callback, 'name') <- 'cb.gblinear.history'
|
||||
callback
|
||||
}
|
||||
|
||||
#' Extract gblinear coefficients history.
|
||||
#'
|
||||
#' A helper function to extract the matrix of linear coefficients' history
|
||||
#' from a gblinear model created while using the \code{cb.gblinear.history()}
|
||||
#' callback.
|
||||
#'
|
||||
#' @param model either an \code{xgb.Booster} or a result of \code{xgb.cv()}, trained
|
||||
#' using the \code{cb.gblinear.history()} callback.
|
||||
#' @param class_index zero-based class index to extract the coefficients for only that
|
||||
#' specific class in a multinomial multiclass model. When it is NULL, all the
|
||||
#' coeffients are returned. Has no effect in non-multiclass models.
|
||||
#'
|
||||
#' @return
|
||||
#' For an \code{xgb.train} result, a matrix (either dense or sparse) with the columns
|
||||
#' corresponding to iteration's coefficients (in the order as \code{xgb.dump()} would
|
||||
#' return) and the rows corresponding to boosting iterations.
|
||||
#'
|
||||
#' For an \code{xgb.cv} result, a list of such matrices is returned with the elements
|
||||
#' corresponding to CV folds.
|
||||
#'
|
||||
#' @export
|
||||
xgb.gblinear.history <- function(model, class_index = NULL) {
|
||||
|
||||
if (!(inherits(model, "xgb.Booster") ||
|
||||
inherits(model, "xgb.cv.synchronous")))
|
||||
stop("model must be an object of either xgb.Booster or xgb.cv.synchronous class")
|
||||
is_cv <- inherits(model, "xgb.cv.synchronous")
|
||||
|
||||
if (is.null(model[["callbacks"]]) || is.null(model$callbacks[["cb.gblinear.history"]]))
|
||||
stop("model must be trained while using the cb.gblinear.history() callback")
|
||||
|
||||
if (!is_cv) {
|
||||
# extract num_class & num_feat from the internal model
|
||||
dmp <- xgb.dump(model)
|
||||
if(length(dmp) < 2 || dmp[2] != "bias:")
|
||||
stop("It does not appear to be a gblinear model")
|
||||
dmp <- dmp[-c(1,2)]
|
||||
n <- which(dmp == 'weight:')
|
||||
if(length(n) != 1)
|
||||
stop("It does not appear to be a gblinear model")
|
||||
num_class <- n - 1
|
||||
num_feat <- (length(dmp) - 4) / num_class
|
||||
} else {
|
||||
# in case of CV, the object is expected to have this info
|
||||
if (model$params$booster != "gblinear")
|
||||
stop("It does not appear to be a gblinear model")
|
||||
num_class <- NVL(model$params$num_class, 1)
|
||||
num_feat <- model$nfeatures
|
||||
if (is.null(num_feat))
|
||||
stop("This xgb.cv result does not have nfeatures info")
|
||||
}
|
||||
|
||||
if (!is.null(class_index) &&
|
||||
num_class > 1 &&
|
||||
(class_index[1] < 0 || class_index[1] >= num_class))
|
||||
stop("class_index has to be within [0,", num_class - 1, "]")
|
||||
|
||||
coef_path <- environment(model$callbacks$cb.gblinear.history)[["coefs"]]
|
||||
if (!is.null(class_index) && num_class > 1) {
|
||||
coef_path <- if (is.list(coef_path)) {
|
||||
lapply(coef_path,
|
||||
function(x) x[, seq(1 + class_index, by=num_class, length.out=num_feat)])
|
||||
} else {
|
||||
coef_path <- coef_path[, seq(1 + class_index, by=num_class, length.out=num_feat)]
|
||||
}
|
||||
}
|
||||
coef_path
|
||||
}
|
||||
|
||||
|
||||
#
|
||||
# Internal utility functions for callbacks ------------------------------------
|
||||
#
|
||||
|
||||
@@ -74,6 +74,19 @@ check.booster.params <- function(params, ...) {
|
||||
params[['monotone_constraints']] = vec2str
|
||||
}
|
||||
|
||||
# interaction constraints parser (convert from list of column indices to string)
|
||||
if (!is.null(params[['interaction_constraints']]) &&
|
||||
typeof(params[['interaction_constraints']]) != "character"){
|
||||
# check input class
|
||||
if (class(params[['interaction_constraints']]) != 'list') stop('interaction_constraints should be class list')
|
||||
if (!all(unique(sapply(params[['interaction_constraints']], class)) %in% c('numeric','integer'))) {
|
||||
stop('interaction_constraints should be a list of numeric/integer vectors')
|
||||
}
|
||||
|
||||
# recast parameter as string
|
||||
interaction_constraints <- sapply(params[['interaction_constraints']], function(x) paste0('[', paste(x, collapse=','), ']'))
|
||||
params[['interaction_constraints']] <- paste0('[', paste(interaction_constraints, collapse=','), ']')
|
||||
}
|
||||
return(params)
|
||||
}
|
||||
|
||||
@@ -262,7 +275,8 @@ xgb.createFolds <- function(y, k = 10)
|
||||
## add enough random integers to get length(seqVector) == numInClass[i]
|
||||
if (numInClass[i] %% k > 0) seqVector <- c(seqVector, sample.int(k, numInClass[i] %% k))
|
||||
## shuffle the integers for fold assignment and assign to this classes's data
|
||||
foldVector[y == dimnames(numInClass)$y[i]] <- sample(seqVector)
|
||||
## seqVector[sample.int(length(seqVector))] is used to handle length(seqVector) == 1
|
||||
foldVector[y == dimnames(numInClass)$y[i]] <- seqVector[sample.int(length(seqVector))]
|
||||
}
|
||||
} else {
|
||||
foldVector <- seq(along = y)
|
||||
|
||||
@@ -37,11 +37,14 @@ xgb.handleToBooster <- function(handle, raw = NULL) {
|
||||
# Check whether xgb.Booster.handle is null
|
||||
# internal utility function
|
||||
is.null.handle <- function(handle) {
|
||||
if (is.null(handle)) return(TRUE)
|
||||
|
||||
if (!identical(class(handle), "xgb.Booster.handle"))
|
||||
stop("argument type must be xgb.Booster.handle")
|
||||
|
||||
if (is.null(handle) || .Call(XGCheckNullPtr_R, handle))
|
||||
if (.Call(XGCheckNullPtr_R, handle))
|
||||
return(TRUE)
|
||||
|
||||
return(FALSE)
|
||||
}
|
||||
|
||||
@@ -126,11 +129,13 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
|
||||
#' logistic regression would result in predictions for log-odds instead of probabilities.
|
||||
#' @param ntreelimit limit the number of model's trees or boosting iterations used in prediction (see Details).
|
||||
#' It will use all the trees by default (\code{NULL} value).
|
||||
#' @param predleaf whether predict leaf index instead.
|
||||
#' @param predcontrib whether to return feature contributions to individual predictions instead (see Details).
|
||||
#' @param predleaf whether predict leaf index.
|
||||
#' @param predcontrib whether to return feature contributions to individual predictions (see Details).
|
||||
#' @param approxcontrib whether to use a fast approximation for feature contributions (see Details).
|
||||
#' @param predinteraction whether to return contributions of feature interactions to individual predictions (see Details).
|
||||
#' @param reshape whether to reshape the vector of predictions to a matrix form when there are several
|
||||
#' prediction outputs per case. This option has no effect when \code{predleaf = TRUE}.
|
||||
#' prediction outputs per case. This option has no effect when either of predleaf, predcontrib,
|
||||
#' or predinteraction flags is TRUE.
|
||||
#' @param ... Parameters passed to \code{predict.xgb.Booster}
|
||||
#'
|
||||
#' @details
|
||||
@@ -155,6 +160,11 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
|
||||
#' Setting \code{approxcontrib = TRUE} approximates these values following the idea explained
|
||||
#' in \url{http://blog.datadive.net/interpreting-random-forests/}.
|
||||
#'
|
||||
#' With \code{predinteraction = TRUE}, SHAP values of contributions of interaction of each pair of features
|
||||
#' are computed. Note that this operation might be rather expensive in terms of compute and memory.
|
||||
#' Since it quadratically depends on the number of features, it is recommended to perfom selection
|
||||
#' of the most important features first. See below about the format of the returned results.
|
||||
#'
|
||||
#' @return
|
||||
#' For regression or binary classification, it returns a vector of length \code{nrows(newdata)}.
|
||||
#' For multiclass classification, either a \code{num_class * nrows(newdata)} vector or
|
||||
@@ -170,6 +180,14 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
|
||||
#' such a matrix. The contribution values are on the scale of untransformed margin
|
||||
#' (e.g., for binary classification would mean that the contributions are log-odds deviations from bias).
|
||||
#'
|
||||
#' When \code{predinteraction = TRUE} and it is not a multiclass setting, the output is a 3d array with
|
||||
#' dimensions \code{c(nrow, num_features + 1, num_features + 1)}. The off-diagonal (in the last two dimensions)
|
||||
#' elements represent different features interaction contributions. The array is symmetric WRT the last
|
||||
#' two dimensions. The "+ 1" columns corresponds to bias. Summing this array along the last dimension should
|
||||
#' produce practically the same result as predict with \code{predcontrib = TRUE}.
|
||||
#' For a multiclass case, a list of \code{num_class} elements is returned, where each element is
|
||||
#' such an array.
|
||||
#'
|
||||
#' @seealso
|
||||
#' \code{\link{xgb.train}}.
|
||||
#'
|
||||
@@ -266,7 +284,8 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
|
||||
#' @rdname predict.xgb.Booster
|
||||
#' @export
|
||||
predict.xgb.Booster <- function(object, newdata, missing = NA, outputmargin = FALSE, ntreelimit = NULL,
|
||||
predleaf = FALSE, predcontrib = FALSE, approxcontrib = FALSE, reshape = FALSE, ...) {
|
||||
predleaf = FALSE, predcontrib = FALSE, approxcontrib = FALSE, predinteraction = FALSE,
|
||||
reshape = FALSE, ...) {
|
||||
|
||||
object <- xgb.Booster.complete(object, saveraw = FALSE)
|
||||
if (!inherits(newdata, "xgb.DMatrix"))
|
||||
@@ -282,7 +301,8 @@ predict.xgb.Booster <- function(object, newdata, missing = NA, outputmargin = FA
|
||||
if (ntreelimit < 0)
|
||||
stop("ntreelimit cannot be negative")
|
||||
|
||||
option <- 0L + 1L * as.logical(outputmargin) + 2L * as.logical(predleaf) + 4L * as.logical(predcontrib) + 8L * as.logical(approxcontrib)
|
||||
option <- 0L + 1L * as.logical(outputmargin) + 2L * as.logical(predleaf) + 4L * as.logical(predcontrib) +
|
||||
8L * as.logical(approxcontrib) + 16L * as.logical(predinteraction)
|
||||
|
||||
ret <- .Call(XGBoosterPredict_R, object$handle, newdata, option[1], as.integer(ntreelimit))
|
||||
|
||||
@@ -302,17 +322,28 @@ predict.xgb.Booster <- function(object, newdata, missing = NA, outputmargin = FA
|
||||
} else if (predcontrib) {
|
||||
n_col1 <- ncol(newdata) + 1
|
||||
n_group <- npred_per_case / n_col1
|
||||
dnames <- if (!is.null(colnames(newdata))) list(NULL, c(colnames(newdata), "BIAS")) else NULL
|
||||
cnames <- if (!is.null(colnames(newdata))) c(colnames(newdata), "BIAS") else NULL
|
||||
ret <- if (n_ret == n_row) {
|
||||
matrix(ret, ncol = 1, dimnames = dnames)
|
||||
matrix(ret, ncol = 1, dimnames = list(NULL, cnames))
|
||||
} else if (n_group == 1) {
|
||||
matrix(ret, nrow = n_row, byrow = TRUE, dimnames = dnames)
|
||||
matrix(ret, nrow = n_row, byrow = TRUE, dimnames = list(NULL, cnames))
|
||||
} else {
|
||||
grp_mask <- rep(seq_len(n_col1), n_row) +
|
||||
rep((seq_len(n_row) - 1) * n_col1 * n_group, each = n_col1)
|
||||
lapply(seq_len(n_group), function(g) {
|
||||
matrix(ret[grp_mask + n_col1 * (g - 1)], nrow = n_row, byrow = TRUE, dimnames = dnames)
|
||||
})
|
||||
arr <- array(ret, c(n_col1, n_group, n_row),
|
||||
dimnames = list(cnames, NULL, NULL)) %>% aperm(c(2,3,1)) # [group, row, col]
|
||||
lapply(seq_len(n_group), function(g) arr[g,,])
|
||||
}
|
||||
} else if (predinteraction) {
|
||||
n_col1 <- ncol(newdata) + 1
|
||||
n_group <- npred_per_case / n_col1^2
|
||||
cnames <- if (!is.null(colnames(newdata))) c(colnames(newdata), "BIAS") else NULL
|
||||
ret <- if (n_ret == n_row) {
|
||||
matrix(ret, ncol = 1, dimnames = list(NULL, cnames))
|
||||
} else if (n_group == 1) {
|
||||
array(ret, c(n_col1, n_col1, n_row), dimnames = list(cnames, cnames, NULL)) %>% aperm(c(3,1,2))
|
||||
} else {
|
||||
arr <- array(ret, c(n_col1, n_col1, n_group, n_row),
|
||||
dimnames = list(cnames, cnames, NULL, NULL)) %>% aperm(c(3,4,1,2)) # [group, row, col1, col2]
|
||||
lapply(seq_len(n_group), function(g) arr[g,,,])
|
||||
}
|
||||
} else if (reshape && npred_per_case > 1) {
|
||||
ret <- matrix(ret, nrow = n_row, byrow = TRUE)
|
||||
@@ -537,7 +568,7 @@ xgb.ntree <- function(bst) {
|
||||
print.xgb.Booster <- function(x, verbose = FALSE, ...) {
|
||||
cat('##### xgb.Booster\n')
|
||||
|
||||
valid_handle <- is.null.handle(x$handle)
|
||||
valid_handle <- !is.null.handle(x$handle)
|
||||
if (!valid_handle)
|
||||
cat("Handle is invalid! Suggest using xgb.Booster.complete\n")
|
||||
|
||||
|
||||
@@ -52,9 +52,9 @@
|
||||
#' dtest <- xgb.DMatrix(data = agaricus.test$data, label = agaricus.test$label)
|
||||
#'
|
||||
#' param <- list(max_depth=2, eta=1, silent=1, objective='binary:logistic')
|
||||
#' nround = 4
|
||||
#' nrounds = 4
|
||||
#'
|
||||
#' bst = xgb.train(params = param, data = dtrain, nrounds = nround, nthread = 2)
|
||||
#' bst = xgb.train(params = param, data = dtrain, nrounds = nrounds, nthread = 2)
|
||||
#'
|
||||
#' # Model accuracy without new features
|
||||
#' accuracy.before <- sum((predict(bst, agaricus.test$data) >= 0.5) == agaricus.test$label) /
|
||||
@@ -68,7 +68,7 @@
|
||||
#' new.dtrain <- xgb.DMatrix(data = new.features.train, label = agaricus.train$label)
|
||||
#' new.dtest <- xgb.DMatrix(data = new.features.test, label = agaricus.test$label)
|
||||
#' watchlist <- list(train = new.dtrain)
|
||||
#' bst <- xgb.train(params = param, data = new.dtrain, nrounds = nround, nthread = 2)
|
||||
#' bst <- xgb.train(params = param, data = new.dtrain, nrounds = nrounds, nthread = 2)
|
||||
#'
|
||||
#' # Model accuracy with new features
|
||||
#' accuracy.after <- sum((predict(bst, new.dtest) >= 0.5) == agaricus.test$label) /
|
||||
@@ -83,5 +83,5 @@ xgb.create.features <- function(model, data, ...){
|
||||
check.deprecation(...)
|
||||
pred_with_leaf <- predict(model, data, predleaf = TRUE)
|
||||
cols <- lapply(as.data.frame(pred_with_leaf), factor)
|
||||
cBind(data, sparse.model.matrix( ~ . -1, cols))
|
||||
cbind(data, sparse.model.matrix( ~ . -1, cols))
|
||||
}
|
||||
|
||||
@@ -34,6 +34,7 @@
|
||||
#' \item \code{rmse} Rooted mean square error
|
||||
#' \item \code{logloss} negative log-likelihood function
|
||||
#' \item \code{auc} Area under curve
|
||||
#' \item \code{aucpr} Area under PR curve
|
||||
#' \item \code{merror} Exact matching error, used to evaluate multi-class classification
|
||||
#' }
|
||||
#' @param obj customized objective function. Returns gradient and second order
|
||||
@@ -82,12 +83,13 @@
|
||||
#' \item \code{params} parameters that were passed to the xgboost library. Note that it does not
|
||||
#' capture parameters changed by the \code{\link{cb.reset.parameters}} callback.
|
||||
#' \item \code{callbacks} callback functions that were either automatically assigned or
|
||||
#' explicitely passed.
|
||||
#' explicitly passed.
|
||||
#' \item \code{evaluation_log} evaluation history storead as a \code{data.table} with the
|
||||
#' first column corresponding to iteration number and the rest corresponding to the
|
||||
#' CV-based evaluation means and standard deviations for the training and test CV-sets.
|
||||
#' It is created by the \code{\link{cb.evaluation.log}} callback.
|
||||
#' \item \code{niter} number of boosting iterations.
|
||||
#' \item \code{nfeatures} number of features in training data.
|
||||
#' \item \code{folds} the list of CV folds' indices - either those passed through the \code{folds}
|
||||
#' parameter or randomly generated.
|
||||
#' \item \code{best_iteration} iteration number with the best evaluation metric value
|
||||
@@ -184,6 +186,7 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
|
||||
handle <- xgb.Booster.handle(params, list(dtrain, dtest))
|
||||
list(dtrain = dtrain, bst = handle, watchlist = list(train = dtrain, test=dtest), index = folds[[k]])
|
||||
})
|
||||
rm(dall)
|
||||
# a "basket" to collect some results from callbacks
|
||||
basket <- list()
|
||||
|
||||
@@ -221,6 +224,7 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
|
||||
callbacks = callbacks,
|
||||
evaluation_log = evaluation_log,
|
||||
niter = end_iteration,
|
||||
nfeatures = ncol(data),
|
||||
folds = folds
|
||||
)
|
||||
ret <- c(ret, basket)
|
||||
|
||||
@@ -30,7 +30,8 @@
|
||||
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
#' # save the model in file 'xgb.model.dump'
|
||||
#' xgb.dump(bst, 'xgb.model.dump', with_stats = TRUE)
|
||||
#' dump_path = file.path(tempdir(), 'model.dump')
|
||||
#' xgb.dump(bst, dump_path, with_stats = TRUE)
|
||||
#'
|
||||
#' # print the model without saving it to a file
|
||||
#' print(xgb.dump(bst, with_stats = TRUE))
|
||||
|
||||
@@ -22,7 +22,7 @@ xgb.ggplot.importance <- function(importance_matrix = NULL, top_n = NULL, measur
|
||||
|
||||
plot <-
|
||||
ggplot2::ggplot(importance_matrix,
|
||||
ggplot2::aes(x = factor(Feature, levels = rev(Feature)), y = Importance, width = 0.05),
|
||||
ggplot2::aes(x = factor(Feature, levels = rev(Feature)), y = Importance, width = 0.5),
|
||||
environment = environment()) +
|
||||
ggplot2::geom_bar(ggplot2::aes(fill = Cluster), stat = "identity", position = "identity") +
|
||||
ggplot2::coord_flip() +
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
#' a tree's median absolute leaf weight changes through the iterations.
|
||||
#'
|
||||
#' This function was inspired by the blog post
|
||||
#' \url{http://aysent.github.io/2015/11/08/random-forest-leaf-visualization.html}.
|
||||
#' \url{https://github.com/aysent/random-forest-leaf-visualization}.
|
||||
#'
|
||||
#' @return
|
||||
#'
|
||||
|
||||
@@ -212,6 +212,7 @@ xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
|
||||
}
|
||||
if (plot && which == "2d") {
|
||||
# TODO
|
||||
warning("Bivariate plotting is currently not available.")
|
||||
}
|
||||
invisible(list(data = data, shap_contrib = shap_contrib))
|
||||
}
|
||||
|
||||
@@ -22,10 +22,11 @@
|
||||
#' \item \code{gamma} minimum loss reduction required to make a further partition on a leaf node of the tree. the larger, the more conservative the algorithm will be.
|
||||
#' \item \code{max_depth} maximum depth of a tree. Default: 6
|
||||
#' \item \code{min_child_weight} minimum sum of instance weight (hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, then the building process will give up further partitioning. In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node. The larger, the more conservative the algorithm will be. Default: 1
|
||||
#' \item \code{subsample} subsample ratio of the training instance. Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees and this will prevent overfitting. It makes computation shorter (because less data to analyse). It is advised to use this parameter with \code{eta} and increase \code{nround}. Default: 1
|
||||
#' \item \code{subsample} subsample ratio of the training instance. Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees and this will prevent overfitting. It makes computation shorter (because less data to analyse). It is advised to use this parameter with \code{eta} and increase \code{nrounds}. Default: 1
|
||||
#' \item \code{colsample_bytree} subsample ratio of columns when constructing each tree. Default: 1
|
||||
#' \item \code{num_parallel_tree} Experimental parameter. number of trees to grow per round. Useful to test Random Forest through Xgboost (set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly. Default: 1
|
||||
#' \item \code{monotone_constraints} A numerical vector consists of \code{1}, \code{0} and \code{-1} with its length equals to the number of features in the training data. \code{1} is increasing, \code{-1} is decreasing and \code{0} is no constraint.
|
||||
#' \item \code{interaction_constraints} A list of vectors specifying feature indices of permitted interactions. Each item of the list represents one permitted interaction where specified features are allowed to interact with each other. Feature index values should start from \code{0} (\code{0} references the first column). Leave argument unspecified for no interaction constraints.
|
||||
#' }
|
||||
#'
|
||||
#' 2.2. Parameter for Linear Booster
|
||||
@@ -121,12 +122,13 @@
|
||||
#' \itemize{
|
||||
#' \item \code{rmse} root mean square error. \url{http://en.wikipedia.org/wiki/Root_mean_square_error}
|
||||
#' \item \code{logloss} negative log-likelihood. \url{http://en.wikipedia.org/wiki/Log-likelihood}
|
||||
#' \item \code{mlogloss} multiclass logloss. \url{https://www.kaggle.com/wiki/MultiClassLogLoss/}
|
||||
#' \item \code{mlogloss} multiclass logloss. \url{http://wiki.fast.ai/index.php/Log_Loss}
|
||||
#' \item \code{error} Binary classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}.
|
||||
#' By default, it uses the 0.5 threshold for predicted values to define negative and positive instances.
|
||||
#' Different threshold (e.g., 0.) could be specified as "error@0."
|
||||
#' \item \code{merror} Multiclass classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}.
|
||||
#' \item \code{auc} Area under the curve. \url{http://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation.
|
||||
#' \item \code{aucpr} Area under the PR curve. \url{https://en.wikipedia.org/wiki/Precision_and_recall} for ranking evaluation.
|
||||
#' \item \code{ndcg} Normalized Discounted Cumulative Gain (for ranking task). \url{http://en.wikipedia.org/wiki/NDCG}
|
||||
#' }
|
||||
#'
|
||||
@@ -162,6 +164,7 @@
|
||||
#' (only available with early stopping).
|
||||
#' \item \code{feature_names} names of the training dataset features
|
||||
#' (only when comun names were defined in training data).
|
||||
#' \item \code{nfeatures} number of features in training data.
|
||||
#' }
|
||||
#'
|
||||
#' @seealso
|
||||
@@ -351,8 +354,8 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
|
||||
if (inherits(xgb_model, 'xgb.Booster') &&
|
||||
!is_update &&
|
||||
!is.null(xgb_model$evaluation_log) &&
|
||||
all.equal(colnames(evaluation_log),
|
||||
colnames(xgb_model$evaluation_log))) {
|
||||
isTRUE(all.equal(colnames(evaluation_log),
|
||||
colnames(xgb_model$evaluation_log)))) {
|
||||
evaluation_log <- rbindlist(list(xgb_model$evaluation_log, evaluation_log))
|
||||
}
|
||||
bst$evaluation_log <- evaluation_log
|
||||
@@ -363,6 +366,7 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
|
||||
bst$callbacks <- callbacks
|
||||
if (!is.null(colnames(dtrain)))
|
||||
bst$feature_names <- colnames(dtrain)
|
||||
|
||||
bst$nfeatures <- ncol(dtrain)
|
||||
|
||||
return(bst)
|
||||
}
|
||||
|
||||
@@ -77,10 +77,11 @@ NULL
|
||||
|
||||
# Various imports
|
||||
#' @importClassesFrom Matrix dgCMatrix dgeMatrix
|
||||
#' @importFrom Matrix cBind
|
||||
#' @importFrom Matrix colSums
|
||||
#' @importFrom Matrix sparse.model.matrix
|
||||
#' @importFrom Matrix sparseVector
|
||||
#' @importFrom Matrix sparseMatrix
|
||||
#' @importFrom Matrix t
|
||||
#' @importFrom data.table data.table
|
||||
#' @importFrom data.table is.data.table
|
||||
#' @importFrom data.table as.data.table
|
||||
|
||||
@@ -30,4 +30,4 @@ Examples
|
||||
Development
|
||||
-----------
|
||||
|
||||
* See the [R Package section](https://xgboost.readthedocs.io/en/latest/how_to/contribute.html#r-package) of the contributiors guide.
|
||||
* See the [R Package section](https://xgboost.readthedocs.io/en/latest/how_to/contribute.html#r-package) of the contributors guide.
|
||||
|
||||
4
R-package/configure
vendored
4
R-package/configure
vendored
@@ -1667,12 +1667,12 @@ OPENMP_CXXFLAGS=""
|
||||
|
||||
if test `uname -s` = "Linux"
|
||||
then
|
||||
OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CFLAGS)"
|
||||
OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CXXFLAGS)"
|
||||
fi
|
||||
|
||||
if test `uname -s` = "Darwin"
|
||||
then
|
||||
OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CFLAGS)"
|
||||
OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CXXFLAGS)"
|
||||
ac_pkg_openmp=no
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether OpenMP will work in a package" >&5
|
||||
$as_echo_n "checking whether OpenMP will work in a package... " >&6; }
|
||||
|
||||
@@ -8,12 +8,12 @@ OPENMP_CXXFLAGS=""
|
||||
|
||||
if test `uname -s` = "Linux"
|
||||
then
|
||||
OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CFLAGS)"
|
||||
OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CXXFLAGS)"
|
||||
fi
|
||||
|
||||
if test `uname -s` = "Darwin"
|
||||
then
|
||||
OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CFLAGS)"
|
||||
OPENMP_CXXFLAGS="\$(SHLIB_OPENMP_CXXFLAGS)"
|
||||
ac_pkg_openmp=no
|
||||
AC_MSG_CHECKING([whether OpenMP will work in a package])
|
||||
AC_LANG_CONFTEST(
|
||||
|
||||
0
R-package/configure.win
Normal file
0
R-package/configure.win
Normal file
@@ -11,4 +11,5 @@ early_stopping Early Stop in training
|
||||
poisson_regression Poisson Regression on count data
|
||||
tweedie_regression Tweddie Regression
|
||||
gpu_accelerated GPU-accelerated tree building algorithms
|
||||
interaction_constraints Interaction constraints among features
|
||||
|
||||
|
||||
@@ -99,7 +99,8 @@ err <- as.numeric(sum(as.integer(pred > 0.5) != label))/length(label)
|
||||
print(paste("test-error=", err))
|
||||
|
||||
# You can dump the tree you learned using xgb.dump into a text file
|
||||
xgb.dump(bst, "dump.raw.txt", with_stats = T)
|
||||
dump_path = file.path(tempdir(), 'dump.raw.txt')
|
||||
xgb.dump(bst, dump_path, with_stats = T)
|
||||
|
||||
# Finally, you can check which features are the most important.
|
||||
print("Most important features (look at column Gain):")
|
||||
|
||||
@@ -5,20 +5,20 @@ data(agaricus.test, package='xgboost')
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
|
||||
nround <- 2
|
||||
nrounds <- 2
|
||||
param <- list(max_depth=2, eta=1, silent=1, nthread=2, objective='binary:logistic')
|
||||
|
||||
cat('running cross validation\n')
|
||||
# do cross validation, this will print result out as
|
||||
# [iteration] metric_name:mean_value+std_value
|
||||
# std_value is standard deviation of the metric
|
||||
xgb.cv(param, dtrain, nround, nfold=5, metrics={'error'})
|
||||
xgb.cv(param, dtrain, nrounds, nfold=5, metrics={'error'})
|
||||
|
||||
cat('running cross validation, disable standard deviation display\n')
|
||||
# do cross validation, this will print result out as
|
||||
# [iteration] metric_name:mean_value+std_value
|
||||
# std_value is standard deviation of the metric
|
||||
xgb.cv(param, dtrain, nround, nfold=5,
|
||||
xgb.cv(param, dtrain, nrounds, nfold=5,
|
||||
metrics='error', showsd = FALSE)
|
||||
|
||||
###
|
||||
@@ -43,9 +43,9 @@ evalerror <- function(preds, dtrain) {
|
||||
param <- list(max_depth=2, eta=1, silent=1,
|
||||
objective = logregobj, eval_metric = evalerror)
|
||||
# train with customized objective
|
||||
xgb.cv(params = param, data = dtrain, nrounds = nround, nfold = 5)
|
||||
xgb.cv(params = param, data = dtrain, nrounds = nrounds, nfold = 5)
|
||||
|
||||
# do cross validation with prediction values for each fold
|
||||
res <- xgb.cv(params = param, data = dtrain, nrounds = nround, nfold = 5, prediction = TRUE)
|
||||
res <- xgb.cv(params = param, data = dtrain, nrounds = nrounds, nfold = 5, prediction = TRUE)
|
||||
res$evaluation_log
|
||||
length(res$pred)
|
||||
|
||||
@@ -33,7 +33,7 @@ evalerror <- function(preds, dtrain) {
|
||||
return(list(metric = "error", value = err))
|
||||
}
|
||||
|
||||
param <- list(max_depth=2, eta=1, nthread = 2, silent=1,
|
||||
param <- list(max_depth=2, eta=1, nthread = 2, verbosity=0,
|
||||
objective=logregobj, eval_metric=evalerror)
|
||||
print ('start training with user customized objective')
|
||||
# training with customized objective, we can also do step by step training
|
||||
@@ -57,7 +57,7 @@ logregobjattr <- function(preds, dtrain) {
|
||||
hess <- preds * (1 - preds)
|
||||
return(list(grad = grad, hess = hess))
|
||||
}
|
||||
param <- list(max_depth=2, eta=1, nthread = 2, silent=1,
|
||||
param <- list(max_depth=2, eta=1, nthread = 2, verbosity=0,
|
||||
objective=logregobjattr, eval_metric=evalerror)
|
||||
print ('start training with user customized objective, with additional attributes in DMatrix')
|
||||
# training with customized objective, we can also do step by step training
|
||||
|
||||
@@ -7,7 +7,7 @@ dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
# note: for customized objective function, we leave objective as default
|
||||
# note: what we are getting is margin value in prediction
|
||||
# you must know what you are doing
|
||||
param <- list(max_depth=2, eta=1, nthread = 2, silent=1)
|
||||
param <- list(max_depth=2, eta=1, nthread=2, verbosity=0)
|
||||
watchlist <- list(eval = dtest)
|
||||
num_round <- 20
|
||||
# user define objective function, given prediction, return gradient and second order gradient
|
||||
@@ -32,9 +32,9 @@ evalerror <- function(preds, dtrain) {
|
||||
}
|
||||
print ('start training with early Stopping setting')
|
||||
|
||||
bst <- xgb.train(param, dtrain, num_round, watchlist,
|
||||
bst <- xgb.train(param, dtrain, num_round, watchlist,
|
||||
objective = logregobj, eval_metric = evalerror, maximize = FALSE,
|
||||
early_stopping_round = 3)
|
||||
bst <- xgb.cv(param, dtrain, num_round, nfold = 5,
|
||||
bst <- xgb.cv(param, dtrain, num_round, nfold = 5,
|
||||
objective = logregobj, eval_metric = evalerror,
|
||||
maximize = FALSE, early_stopping_rounds = 3)
|
||||
|
||||
105
R-package/demo/interaction_constraints.R
Normal file
105
R-package/demo/interaction_constraints.R
Normal file
@@ -0,0 +1,105 @@
|
||||
library(xgboost)
|
||||
library(data.table)
|
||||
|
||||
set.seed(1024)
|
||||
|
||||
# Function to obtain a list of interactions fitted in trees, requires input of maximum depth
|
||||
treeInteractions <- function(input_tree, input_max_depth){
|
||||
trees <- copy(input_tree) # copy tree input to prevent overwriting
|
||||
if (input_max_depth < 2) return(list()) # no interactions if max depth < 2
|
||||
if (nrow(input_tree) == 1) return(list())
|
||||
|
||||
# Attach parent nodes
|
||||
for (i in 2:input_max_depth){
|
||||
if (i == 2) trees[, ID_merge:=ID] else trees[, ID_merge:=get(paste0('parent_',i-2))]
|
||||
parents_left <- trees[!is.na(Split), list(i.id=ID, i.feature=Feature, ID_merge=Yes)]
|
||||
parents_right <- trees[!is.na(Split), list(i.id=ID, i.feature=Feature, ID_merge=No)]
|
||||
|
||||
setorderv(trees, 'ID_merge')
|
||||
setorderv(parents_left, 'ID_merge')
|
||||
setorderv(parents_right, 'ID_merge')
|
||||
|
||||
trees <- merge(trees, parents_left, by='ID_merge', all.x=T)
|
||||
trees[!is.na(i.id), c(paste0('parent_', i-1), paste0('parent_feat_', i-1)):=list(i.id, i.feature)]
|
||||
trees[, c('i.id','i.feature'):=NULL]
|
||||
|
||||
trees <- merge(trees, parents_right, by='ID_merge', all.x=T)
|
||||
trees[!is.na(i.id), c(paste0('parent_', i-1), paste0('parent_feat_', i-1)):=list(i.id, i.feature)]
|
||||
trees[, c('i.id','i.feature'):=NULL]
|
||||
}
|
||||
|
||||
# Extract nodes with interactions
|
||||
interaction_trees <- trees[!is.na(Split) & !is.na(parent_1),
|
||||
c('Feature',paste0('parent_feat_',1:(input_max_depth-1))), with=F]
|
||||
interaction_trees_split <- split(interaction_trees, 1:nrow(interaction_trees))
|
||||
interaction_list <- lapply(interaction_trees_split, as.character)
|
||||
|
||||
# Remove NAs (no parent interaction)
|
||||
interaction_list <- lapply(interaction_list, function(x) x[!is.na(x)])
|
||||
|
||||
# Remove non-interactions (same variable)
|
||||
interaction_list <- lapply(interaction_list, unique) # remove same variables
|
||||
interaction_length <- sapply(interaction_list, length)
|
||||
interaction_list <- interaction_list[interaction_length > 1]
|
||||
interaction_list <- unique(lapply(interaction_list, sort))
|
||||
return(interaction_list)
|
||||
}
|
||||
|
||||
# Generate sample data
|
||||
x <- list()
|
||||
for (i in 1:10){
|
||||
x[[i]] = i*rnorm(1000, 10)
|
||||
}
|
||||
x <- as.data.table(x)
|
||||
|
||||
y = -1*x[, rowSums(.SD)] + x[['V1']]*x[['V2']] + x[['V3']]*x[['V4']]*x[['V5']] + rnorm(1000, 0.001) + 3*sin(x[['V7']])
|
||||
|
||||
train = as.matrix(x)
|
||||
|
||||
# Interaction constraint list (column names form)
|
||||
interaction_list <- list(c('V1','V2'),c('V3','V4','V5'))
|
||||
|
||||
# Convert interaction constraint list into feature index form
|
||||
cols2ids <- function(object, col_names) {
|
||||
LUT <- seq_along(col_names) - 1
|
||||
names(LUT) <- col_names
|
||||
rapply(object, function(x) LUT[x], classes="character", how="replace")
|
||||
}
|
||||
interaction_list_fid = cols2ids(interaction_list, colnames(train))
|
||||
|
||||
# Fit model with interaction constraints
|
||||
bst = xgboost(data = train, label = y, max_depth = 4,
|
||||
eta = 0.1, nthread = 2, nrounds = 1000,
|
||||
interaction_constraints = interaction_list_fid)
|
||||
|
||||
bst_tree <- xgb.model.dt.tree(colnames(train), bst)
|
||||
bst_interactions <- treeInteractions(bst_tree, 4) # interactions constrained to combinations of V1*V2 and V3*V4*V5
|
||||
|
||||
# Fit model without interaction constraints
|
||||
bst2 = xgboost(data = train, label = y, max_depth = 4,
|
||||
eta = 0.1, nthread = 2, nrounds = 1000)
|
||||
|
||||
bst2_tree <- xgb.model.dt.tree(colnames(train), bst2)
|
||||
bst2_interactions <- treeInteractions(bst2_tree, 4) # much more interactions
|
||||
|
||||
# Fit model with both interaction and monotonicity constraints
|
||||
bst3 = xgboost(data = train, label = y, max_depth = 4,
|
||||
eta = 0.1, nthread = 2, nrounds = 1000,
|
||||
interaction_constraints = interaction_list_fid,
|
||||
monotone_constraints = c(-1,0,0,0,0,0,0,0,0,0))
|
||||
|
||||
bst3_tree <- xgb.model.dt.tree(colnames(train), bst3)
|
||||
bst3_interactions <- treeInteractions(bst3_tree, 4) # interactions still constrained to combinations of V1*V2 and V3*V4*V5
|
||||
|
||||
# Show monotonic constraints still apply by checking scores after incrementing V1
|
||||
x1 <- sort(unique(x[['V1']]))
|
||||
for (i in 1:length(x1)){
|
||||
testdata <- copy(x[, -c('V1')])
|
||||
testdata[['V1']] <- x1[i]
|
||||
testdata <- testdata[, paste0('V',1:10), with=F]
|
||||
pred <- predict(bst3, as.matrix(testdata))
|
||||
|
||||
# Should not print out anything due to monotonic constraints
|
||||
if (i > 1) if (any(pred > prev_pred)) print(i)
|
||||
prev_pred <- pred
|
||||
}
|
||||
@@ -7,10 +7,10 @@ dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
|
||||
param <- list(max_depth=2, eta=1, silent=1, objective='binary:logistic')
|
||||
watchlist <- list(eval = dtest, train = dtrain)
|
||||
nround = 2
|
||||
nrounds = 2
|
||||
|
||||
# training the model for two rounds
|
||||
bst = xgb.train(param, dtrain, nround, nthread = 2, watchlist)
|
||||
bst = xgb.train(param, dtrain, nrounds, nthread = 2, watchlist)
|
||||
cat('start testing prediction from first n trees\n')
|
||||
labels <- getinfo(dtest,'label')
|
||||
|
||||
|
||||
@@ -11,10 +11,10 @@ dtrain <- xgb.DMatrix(data = agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(data = agaricus.test$data, label = agaricus.test$label)
|
||||
|
||||
param <- list(max_depth=2, eta=1, silent=1, objective='binary:logistic')
|
||||
nround = 4
|
||||
nrounds = 4
|
||||
|
||||
# training the model for two rounds
|
||||
bst = xgb.train(params = param, data = dtrain, nrounds = nround, nthread = 2)
|
||||
bst = xgb.train(params = param, data = dtrain, nrounds = nrounds, nthread = 2)
|
||||
|
||||
# Model accuracy without new features
|
||||
accuracy.before <- sum((predict(bst, agaricus.test$data) >= 0.5) == agaricus.test$label) / length(agaricus.test$label)
|
||||
@@ -32,7 +32,7 @@ create.new.tree.features <- function(model, original.features){
|
||||
leaf.id <- sort(unique(pred_with_leaf[,i]))
|
||||
cols[[i]] <- factor(x = pred_with_leaf[,i], level = leaf.id)
|
||||
}
|
||||
cBind(original.features, sparse.model.matrix( ~ . -1, as.data.frame(cols)))
|
||||
cbind(original.features, sparse.model.matrix( ~ . -1, as.data.frame(cols)))
|
||||
}
|
||||
|
||||
# Convert previous features to one hot encoding
|
||||
@@ -43,7 +43,7 @@ new.features.test <- create.new.tree.features(bst, agaricus.test$data)
|
||||
new.dtrain <- xgb.DMatrix(data = new.features.train, label = agaricus.train$label)
|
||||
new.dtest <- xgb.DMatrix(data = new.features.test, label = agaricus.test$label)
|
||||
watchlist <- list(train = new.dtrain)
|
||||
bst <- xgb.train(params = param, data = new.dtrain, nrounds = nround, nthread = 2)
|
||||
bst <- xgb.train(params = param, data = new.dtrain, nrounds = nrounds, nthread = 2)
|
||||
|
||||
# Model accuracy with new features
|
||||
accuracy.after <- sum((predict(bst, new.dtest) >= 0.5) == agaricus.test$label) / length(agaricus.test$label)
|
||||
|
||||
95
R-package/man/cb.gblinear.history.Rd
Normal file
95
R-package/man/cb.gblinear.history.Rd
Normal file
@@ -0,0 +1,95 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/callbacks.R
|
||||
\name{cb.gblinear.history}
|
||||
\alias{cb.gblinear.history}
|
||||
\title{Callback closure for collecting the model coefficients history of a gblinear booster
|
||||
during its training.}
|
||||
\usage{
|
||||
cb.gblinear.history(sparse = FALSE)
|
||||
}
|
||||
\arguments{
|
||||
\item{sparse}{when set to FALSE/TURE, a dense/sparse matrix is used to store the result.
|
||||
Sparse format is useful when one expects only a subset of coefficients to be non-zero,
|
||||
when using the "thrifty" feature selector with fairly small number of top features
|
||||
selected per iteration.}
|
||||
}
|
||||
\value{
|
||||
Results are stored in the \code{coefs} element of the closure.
|
||||
The \code{\link{xgb.gblinear.history}} convenience function provides an easy way to access it.
|
||||
With \code{xgb.train}, it is either a dense of a sparse matrix.
|
||||
While with \code{xgb.cv}, it is a list (an element per each fold) of such matrices.
|
||||
}
|
||||
\description{
|
||||
Callback closure for collecting the model coefficients history of a gblinear booster
|
||||
during its training.
|
||||
}
|
||||
\details{
|
||||
To keep things fast and simple, gblinear booster does not internally store the history of linear
|
||||
model coefficients at each boosting iteration. This callback provides a workaround for storing
|
||||
the coefficients' path, by extracting them after each training iteration.
|
||||
|
||||
Callback function expects the following values to be set in its calling frame:
|
||||
\code{bst} (or \code{bst_folds}).
|
||||
}
|
||||
\examples{
|
||||
#### Binary classification:
|
||||
#
|
||||
# In the iris dataset, it is hard to linearly separate Versicolor class from the rest
|
||||
# without considering the 2nd order interactions:
|
||||
require(magrittr)
|
||||
x <- model.matrix(Species ~ .^2, iris)[,-1]
|
||||
colnames(x)
|
||||
dtrain <- xgb.DMatrix(scale(x), label = 1*(iris$Species == "versicolor"))
|
||||
param <- list(booster = "gblinear", objective = "reg:logistic", eval_metric = "auc",
|
||||
lambda = 0.0003, alpha = 0.0003, nthread = 2)
|
||||
# For 'shotgun', which is a default linear updater, using high eta values may result in
|
||||
# unstable behaviour in some datasets. With this simple dataset, however, the high learning
|
||||
# rate does not break the convergence, but allows us to illustrate the typical pattern of
|
||||
# "stochastic explosion" behaviour of this lock-free algorithm at early boosting iterations.
|
||||
bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 200, eta = 1.,
|
||||
callbacks = list(cb.gblinear.history()))
|
||||
# Extract the coefficients' path and plot them vs boosting iteration number:
|
||||
coef_path <- xgb.gblinear.history(bst)
|
||||
matplot(coef_path, type = 'l')
|
||||
|
||||
# With the deterministic coordinate descent updater, it is safer to use higher learning rates.
|
||||
# Will try the classical componentwise boosting which selects a single best feature per round:
|
||||
bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 200, eta = 0.8,
|
||||
updater = 'coord_descent', feature_selector = 'thrifty', top_k = 1,
|
||||
callbacks = list(cb.gblinear.history()))
|
||||
xgb.gblinear.history(bst) \%>\% matplot(type = 'l')
|
||||
# Componentwise boosting is known to have similar effect to Lasso regularization.
|
||||
# Try experimenting with various values of top_k, eta, nrounds,
|
||||
# as well as different feature_selectors.
|
||||
|
||||
# For xgb.cv:
|
||||
bst <- xgb.cv(param, dtrain, nfold = 5, nrounds = 100, eta = 0.8,
|
||||
callbacks = list(cb.gblinear.history()))
|
||||
# coefficients in the CV fold #3
|
||||
xgb.gblinear.history(bst)[[3]] \%>\% matplot(type = 'l')
|
||||
|
||||
|
||||
#### Multiclass classification:
|
||||
#
|
||||
dtrain <- xgb.DMatrix(scale(x), label = as.numeric(iris$Species) - 1)
|
||||
param <- list(booster = "gblinear", objective = "multi:softprob", num_class = 3,
|
||||
lambda = 0.0003, alpha = 0.0003, nthread = 2)
|
||||
# For the default linear updater 'shotgun' it sometimes is helpful
|
||||
# to use smaller eta to reduce instability
|
||||
bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 70, eta = 0.5,
|
||||
callbacks = list(cb.gblinear.history()))
|
||||
# Will plot the coefficient paths separately for each class:
|
||||
xgb.gblinear.history(bst, class_index = 0) \%>\% matplot(type = 'l')
|
||||
xgb.gblinear.history(bst, class_index = 1) \%>\% matplot(type = 'l')
|
||||
xgb.gblinear.history(bst, class_index = 2) \%>\% matplot(type = 'l')
|
||||
|
||||
# CV:
|
||||
bst <- xgb.cv(param, dtrain, nfold = 5, nrounds = 70, eta = 0.5,
|
||||
callbacks = list(cb.gblinear.history(FALSE)))
|
||||
# 1st forld of 1st class
|
||||
xgb.gblinear.history(bst, class_index = 0)[[1]] \%>\% matplot(type = 'l')
|
||||
|
||||
}
|
||||
\seealso{
|
||||
\code{\link{callbacks}}, \code{\link{xgb.gblinear.history}}.
|
||||
}
|
||||
@@ -22,7 +22,7 @@ This is a "pre-iteration" callback function used to reset booster's parameters
|
||||
at the beginning of each iteration.
|
||||
|
||||
Note that when training is resumed from some previous model, and a function is used to
|
||||
reset a parameter value, the \code{nround} argument in this function would be the
|
||||
reset a parameter value, the \code{nrounds} argument in this function would be the
|
||||
the number of boosting rounds in the current training.
|
||||
|
||||
Callback function expects the following values to be set in its calling frame:
|
||||
|
||||
@@ -7,7 +7,8 @@
|
||||
\usage{
|
||||
\method{predict}{xgb.Booster}(object, newdata, missing = NA,
|
||||
outputmargin = FALSE, ntreelimit = NULL, predleaf = FALSE,
|
||||
predcontrib = FALSE, approxcontrib = FALSE, reshape = FALSE, ...)
|
||||
predcontrib = FALSE, approxcontrib = FALSE,
|
||||
predinteraction = FALSE, reshape = FALSE, ...)
|
||||
|
||||
\method{predict}{xgb.Booster.handle}(object, ...)
|
||||
}
|
||||
@@ -26,14 +27,17 @@ logistic regression would result in predictions for log-odds instead of probabil
|
||||
\item{ntreelimit}{limit the number of model's trees or boosting iterations used in prediction (see Details).
|
||||
It will use all the trees by default (\code{NULL} value).}
|
||||
|
||||
\item{predleaf}{whether predict leaf index instead.}
|
||||
\item{predleaf}{whether predict leaf index.}
|
||||
|
||||
\item{predcontrib}{whether to return feature contributions to individual predictions instead (see Details).}
|
||||
\item{predcontrib}{whether to return feature contributions to individual predictions (see Details).}
|
||||
|
||||
\item{approxcontrib}{whether to use a fast approximation for feature contributions (see Details).}
|
||||
|
||||
\item{predinteraction}{whether to return contributions of feature interactions to individual predictions (see Details).}
|
||||
|
||||
\item{reshape}{whether to reshape the vector of predictions to a matrix form when there are several
|
||||
prediction outputs per case. This option has no effect when \code{predleaf = TRUE}.}
|
||||
prediction outputs per case. This option has no effect when either of predleaf, predcontrib,
|
||||
or predinteraction flags is TRUE.}
|
||||
|
||||
\item{...}{Parameters passed to \code{predict.xgb.Booster}}
|
||||
}
|
||||
@@ -51,6 +55,14 @@ When \code{predcontrib = TRUE} and it is not a multiclass setting, the output is
|
||||
For a multiclass case, a list of \code{num_class} elements is returned, where each element is
|
||||
such a matrix. The contribution values are on the scale of untransformed margin
|
||||
(e.g., for binary classification would mean that the contributions are log-odds deviations from bias).
|
||||
|
||||
When \code{predinteraction = TRUE} and it is not a multiclass setting, the output is a 3d array with
|
||||
dimensions \code{c(nrow, num_features + 1, num_features + 1)}. The off-diagonal (in the last two dimensions)
|
||||
elements represent different features interaction contributions. The array is symmetric WRT the last
|
||||
two dimensions. The "+ 1" columns corresponds to bias. Summing this array along the last dimension should
|
||||
produce practically the same result as predict with \code{predcontrib = TRUE}.
|
||||
For a multiclass case, a list of \code{num_class} elements is returned, where each element is
|
||||
such an array.
|
||||
}
|
||||
\description{
|
||||
Predicted values based on either xgboost model or model handle object.
|
||||
@@ -76,6 +88,11 @@ values (Lundberg 2017) that sum to the difference between the expected output
|
||||
of the model and the current prediction (where the hessian weights are used to compute the expectations).
|
||||
Setting \code{approxcontrib = TRUE} approximates these values following the idea explained
|
||||
in \url{http://blog.datadive.net/interpreting-random-forests/}.
|
||||
|
||||
With \code{predinteraction = TRUE}, SHAP values of contributions of interaction of each pair of features
|
||||
are computed. Note that this operation might be rather expensive in terms of compute and memory.
|
||||
Since it quadratically depends on the number of features, it is recommended to perfom selection
|
||||
of the most important features first. See below about the format of the returned results.
|
||||
}
|
||||
\examples{
|
||||
## binary classification:
|
||||
|
||||
@@ -63,9 +63,9 @@ dtrain <- xgb.DMatrix(data = agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(data = agaricus.test$data, label = agaricus.test$label)
|
||||
|
||||
param <- list(max_depth=2, eta=1, silent=1, objective='binary:logistic')
|
||||
nround = 4
|
||||
nrounds = 4
|
||||
|
||||
bst = xgb.train(params = param, data = dtrain, nrounds = nround, nthread = 2)
|
||||
bst = xgb.train(params = param, data = dtrain, nrounds = nrounds, nthread = 2)
|
||||
|
||||
# Model accuracy without new features
|
||||
accuracy.before <- sum((predict(bst, agaricus.test$data) >= 0.5) == agaricus.test$label) /
|
||||
@@ -79,7 +79,7 @@ new.features.test <- xgb.create.features(model = bst, agaricus.test$data)
|
||||
new.dtrain <- xgb.DMatrix(data = new.features.train, label = agaricus.train$label)
|
||||
new.dtest <- xgb.DMatrix(data = new.features.test, label = agaricus.test$label)
|
||||
watchlist <- list(train = new.dtrain)
|
||||
bst <- xgb.train(params = param, data = new.dtrain, nrounds = nround, nthread = 2)
|
||||
bst <- xgb.train(params = param, data = new.dtrain, nrounds = nrounds, nthread = 2)
|
||||
|
||||
# Model accuracy with new features
|
||||
accuracy.after <- sum((predict(bst, new.dtest) >= 0.5) == agaricus.test$label) /
|
||||
|
||||
@@ -4,11 +4,12 @@
|
||||
\alias{xgb.cv}
|
||||
\title{Cross Validation}
|
||||
\usage{
|
||||
xgb.cv(params = list(), data, nrounds, nfold, label = NULL, missing = NA,
|
||||
prediction = FALSE, showsd = TRUE, metrics = list(), obj = NULL,
|
||||
feval = NULL, stratified = TRUE, folds = NULL, verbose = TRUE,
|
||||
print_every_n = 1L, early_stopping_rounds = NULL, maximize = NULL,
|
||||
callbacks = list(), ...)
|
||||
xgb.cv(params = list(), data, nrounds, nfold, label = NULL,
|
||||
missing = NA, prediction = FALSE, showsd = TRUE,
|
||||
metrics = list(), obj = NULL, feval = NULL, stratified = TRUE,
|
||||
folds = NULL, verbose = TRUE, print_every_n = 1L,
|
||||
early_stopping_rounds = NULL, maximize = NULL, callbacks = list(),
|
||||
...)
|
||||
}
|
||||
\arguments{
|
||||
\item{params}{the list of parameters. Commonly used ones are:
|
||||
@@ -51,6 +52,7 @@ from each CV model. This parameter engages the \code{\link{cb.cv.predict}} callb
|
||||
\item \code{rmse} Rooted mean square error
|
||||
\item \code{logloss} negative log-likelihood function
|
||||
\item \code{auc} Area under curve
|
||||
\item \code{aucpr} Area under PR curve
|
||||
\item \code{merror} Exact matching error, used to evaluate multi-class classification
|
||||
}}
|
||||
|
||||
@@ -98,12 +100,13 @@ An object of class \code{xgb.cv.synchronous} with the following elements:
|
||||
\item \code{params} parameters that were passed to the xgboost library. Note that it does not
|
||||
capture parameters changed by the \code{\link{cb.reset.parameters}} callback.
|
||||
\item \code{callbacks} callback functions that were either automatically assigned or
|
||||
explicitely passed.
|
||||
explicitly passed.
|
||||
\item \code{evaluation_log} evaluation history storead as a \code{data.table} with the
|
||||
first column corresponding to iteration number and the rest corresponding to the
|
||||
CV-based evaluation means and standard deviations for the training and test CV-sets.
|
||||
It is created by the \code{\link{cb.evaluation.log}} callback.
|
||||
\item \code{niter} number of boosting iterations.
|
||||
\item \code{nfeatures} number of features in training data.
|
||||
\item \code{folds} the list of CV folds' indices - either those passed through the \code{folds}
|
||||
parameter or randomly generated.
|
||||
\item \code{best_iteration} iteration number with the best evaluation metric value
|
||||
|
||||
@@ -44,7 +44,8 @@ test <- agaricus.test
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
# save the model in file 'xgb.model.dump'
|
||||
xgb.dump(bst, 'xgb.model.dump', with_stats = TRUE)
|
||||
dump_path = file.path(tempdir(), 'model.dump')
|
||||
xgb.dump(bst, dump_path, with_stats = TRUE)
|
||||
|
||||
# print the model without saving it to a file
|
||||
print(xgb.dump(bst, with_stats = TRUE))
|
||||
|
||||
29
R-package/man/xgb.gblinear.history.Rd
Normal file
29
R-package/man/xgb.gblinear.history.Rd
Normal file
@@ -0,0 +1,29 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/callbacks.R
|
||||
\name{xgb.gblinear.history}
|
||||
\alias{xgb.gblinear.history}
|
||||
\title{Extract gblinear coefficients history.}
|
||||
\usage{
|
||||
xgb.gblinear.history(model, class_index = NULL)
|
||||
}
|
||||
\arguments{
|
||||
\item{model}{either an \code{xgb.Booster} or a result of \code{xgb.cv()}, trained
|
||||
using the \code{cb.gblinear.history()} callback.}
|
||||
|
||||
\item{class_index}{zero-based class index to extract the coefficients for only that
|
||||
specific class in a multinomial multiclass model. When it is NULL, all the
|
||||
coeffients are returned. Has no effect in non-multiclass models.}
|
||||
}
|
||||
\value{
|
||||
For an \code{xgb.train} result, a matrix (either dense or sparse) with the columns
|
||||
corresponding to iteration's coefficients (in the order as \code{xgb.dump()} would
|
||||
return) and the rows corresponding to boosting iterations.
|
||||
|
||||
For an \code{xgb.cv} result, a list of such matrices is returned with the elements
|
||||
corresponding to CV folds.
|
||||
}
|
||||
\description{
|
||||
A helper function to extract the matrix of linear coefficients' history
|
||||
from a gblinear model created while using the \code{cb.gblinear.history()}
|
||||
callback.
|
||||
}
|
||||
@@ -5,11 +5,11 @@
|
||||
\alias{xgb.plot.deepness}
|
||||
\title{Plot model trees deepness}
|
||||
\usage{
|
||||
xgb.ggplot.deepness(model = NULL, which = c("2x1", "max.depth", "med.depth",
|
||||
"med.weight"))
|
||||
xgb.ggplot.deepness(model = NULL, which = c("2x1", "max.depth",
|
||||
"med.depth", "med.weight"))
|
||||
|
||||
xgb.plot.deepness(model = NULL, which = c("2x1", "max.depth", "med.depth",
|
||||
"med.weight"), plot = TRUE, ...)
|
||||
xgb.plot.deepness(model = NULL, which = c("2x1", "max.depth",
|
||||
"med.depth", "med.weight"), plot = TRUE, ...)
|
||||
}
|
||||
\arguments{
|
||||
\item{model}{either an \code{xgb.Booster} model generated by the \code{xgb.train} function
|
||||
@@ -50,7 +50,7 @@ per tree with respect to tree number are created. And \code{which="med.weight"}
|
||||
a tree's median absolute leaf weight changes through the iterations.
|
||||
|
||||
This function was inspired by the blog post
|
||||
\url{http://aysent.github.io/2015/11/08/random-forest-leaf-visualization.html}.
|
||||
\url{https://github.com/aysent/random-forest-leaf-visualization}.
|
||||
}
|
||||
\examples{
|
||||
|
||||
|
||||
@@ -9,8 +9,8 @@ xgb.ggplot.importance(importance_matrix = NULL, top_n = NULL,
|
||||
measure = NULL, rel_to_first = FALSE, n_clusters = c(1:10), ...)
|
||||
|
||||
xgb.plot.importance(importance_matrix = NULL, top_n = NULL,
|
||||
measure = NULL, rel_to_first = FALSE, left_margin = 10, cex = NULL,
|
||||
plot = TRUE, ...)
|
||||
measure = NULL, rel_to_first = FALSE, left_margin = 10,
|
||||
cex = NULL, plot = TRUE, ...)
|
||||
}
|
||||
\arguments{
|
||||
\item{importance_matrix}{a \code{data.table} returned by \code{\link{xgb.importance}}.}
|
||||
|
||||
@@ -6,8 +6,8 @@
|
||||
\usage{
|
||||
xgb.plot.shap(data, shap_contrib = NULL, features = NULL, top_n = 1,
|
||||
model = NULL, trees = NULL, target_class = NULL,
|
||||
approxcontrib = FALSE, subsample = NULL, n_col = 1, col = rgb(0, 0, 1,
|
||||
0.2), pch = ".", discrete_n_uniq = 5, discrete_jitter = 0.01,
|
||||
approxcontrib = FALSE, subsample = NULL, n_col = 1, col = rgb(0,
|
||||
0, 1, 0.2), pch = ".", discrete_n_uniq = 5, discrete_jitter = 0.01,
|
||||
ylab = "SHAP", plot_NA = TRUE, col_NA = rgb(0.7, 0, 1, 0.6),
|
||||
pch_NA = ".", pos_NA = 1.07, plot_loess = TRUE, col_loess = 2,
|
||||
span_loess = 0.5, which = c("1d", "2d"), plot = TRUE, ...)
|
||||
|
||||
@@ -5,15 +5,17 @@
|
||||
\alias{xgboost}
|
||||
\title{eXtreme Gradient Boosting Training}
|
||||
\usage{
|
||||
xgb.train(params = list(), data, nrounds, watchlist = list(), obj = NULL,
|
||||
feval = NULL, verbose = 1, print_every_n = 1L,
|
||||
xgb.train(params = list(), data, nrounds, watchlist = list(),
|
||||
obj = NULL, feval = NULL, verbose = 1, print_every_n = 1L,
|
||||
early_stopping_rounds = NULL, maximize = NULL, save_period = NULL,
|
||||
save_name = "xgboost.model", xgb_model = NULL, callbacks = list(), ...)
|
||||
save_name = "xgboost.model", xgb_model = NULL, callbacks = list(),
|
||||
...)
|
||||
|
||||
xgboost(data = NULL, label = NULL, missing = NA, weight = NULL,
|
||||
params = list(), nrounds, verbose = 1, print_every_n = 1L,
|
||||
early_stopping_rounds = NULL, maximize = NULL, save_period = NULL,
|
||||
save_name = "xgboost.model", xgb_model = NULL, callbacks = list(), ...)
|
||||
save_name = "xgboost.model", xgb_model = NULL, callbacks = list(),
|
||||
...)
|
||||
}
|
||||
\arguments{
|
||||
\item{params}{the list of parameters.
|
||||
@@ -35,7 +37,7 @@ xgboost(data = NULL, label = NULL, missing = NA, weight = NULL,
|
||||
\item \code{gamma} minimum loss reduction required to make a further partition on a leaf node of the tree. the larger, the more conservative the algorithm will be.
|
||||
\item \code{max_depth} maximum depth of a tree. Default: 6
|
||||
\item \code{min_child_weight} minimum sum of instance weight (hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, then the building process will give up further partitioning. In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node. The larger, the more conservative the algorithm will be. Default: 1
|
||||
\item \code{subsample} subsample ratio of the training instance. Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees and this will prevent overfitting. It makes computation shorter (because less data to analyse). It is advised to use this parameter with \code{eta} and increase \code{nround}. Default: 1
|
||||
\item \code{subsample} subsample ratio of the training instance. Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees and this will prevent overfitting. It makes computation shorter (because less data to analyse). It is advised to use this parameter with \code{eta} and increase \code{nrounds}. Default: 1
|
||||
\item \code{colsample_bytree} subsample ratio of columns when constructing each tree. Default: 1
|
||||
\item \code{num_parallel_tree} Experimental parameter. number of trees to grow per round. Useful to test Random Forest through Xgboost (set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly. Default: 1
|
||||
\item \code{monotone_constraints} A numerical vector consists of \code{1}, \code{0} and \code{-1} with its length equals to the number of features in the training data. \code{1} is increasing, \code{-1} is decreasing and \code{0} is no constraint.
|
||||
@@ -155,6 +157,7 @@ An object of class \code{xgb.Booster} with the following elements:
|
||||
(only available with early stopping).
|
||||
\item \code{feature_names} names of the training dataset features
|
||||
(only when comun names were defined in training data).
|
||||
\item \code{nfeatures} number of features in training data.
|
||||
}
|
||||
}
|
||||
\description{
|
||||
@@ -179,12 +182,13 @@ The folloiwing is the list of built-in metrics for which Xgboost provides optimi
|
||||
\itemize{
|
||||
\item \code{rmse} root mean square error. \url{http://en.wikipedia.org/wiki/Root_mean_square_error}
|
||||
\item \code{logloss} negative log-likelihood. \url{http://en.wikipedia.org/wiki/Log-likelihood}
|
||||
\item \code{mlogloss} multiclass logloss. \url{https://www.kaggle.com/wiki/MultiClassLogLoss/}
|
||||
\item \code{mlogloss} multiclass logloss. \url{http://wiki.fast.ai/index.php/Log_Loss}
|
||||
\item \code{error} Binary classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}.
|
||||
By default, it uses the 0.5 threshold for predicted values to define negative and positive instances.
|
||||
Different threshold (e.g., 0.) could be specified as "error@0."
|
||||
\item \code{merror} Multiclass classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}.
|
||||
\item \code{auc} Area under the curve. \url{http://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation.
|
||||
\item \code{aucpr} Area under the PR curve. \url{https://en.wikipedia.org/wiki/Precision_and_recall} for ranking evaluation.
|
||||
\item \code{ndcg} Normalized Discounted Cumulative Gain (for ranking task). \url{http://en.wikipedia.org/wiki/NDCG}
|
||||
}
|
||||
|
||||
|
||||
14
R-package/remove_warning_suppression_pragma.sh
Executable file
14
R-package/remove_warning_suppression_pragma.sh
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
# remove all #pragma's that suppress compiler warnings
|
||||
set -e
|
||||
set -x
|
||||
for file in xgboost/src/dmlc-core/include/dmlc/*.h
|
||||
do
|
||||
sed -i.bak -e 's/^.*#pragma GCC diagnostic.*$//' -e 's/^.*#pragma clang diagnostic.*$//' -e 's/^.*#pragma warning.*$//' "${file}"
|
||||
done
|
||||
for file in xgboost/src/dmlc-core/include/dmlc/*.h.bak
|
||||
do
|
||||
rm "${file}"
|
||||
done
|
||||
set +x
|
||||
set +e
|
||||
@@ -10,9 +10,15 @@ XGB_RFLAGS = -DXGBOOST_STRICT_R_MODE=1 -DDMLC_LOG_BEFORE_THROW=0\
|
||||
-DDMLC_LOG_CUSTOMIZE=1 -DXGBOOST_CUSTOMIZE_LOGGER=1\
|
||||
-DRABIT_CUSTOMIZE_MSG_ -DRABIT_STRICT_CXX98_
|
||||
|
||||
# disable the use of thread_local for 32 bit windows:
|
||||
ifeq ($(R_OSTYPE)$(WIN),windows)
|
||||
XGB_RFLAGS += -DDMLC_CXX11_THREAD_LOCAL=0
|
||||
endif
|
||||
$(foreach v, $(XGB_RFLAGS), $(warning $(v)))
|
||||
|
||||
PKG_CPPFLAGS= -I$(PKGROOT)/include -I$(PKGROOT)/dmlc-core/include -I$(PKGROOT)/rabit/include -I$(PKGROOT) $(XGB_RFLAGS)
|
||||
PKG_CXXFLAGS= @OPENMP_CXXFLAGS@ $(SHLIB_PTHREAD_FLAGS)
|
||||
PKG_LIBS = @OPENMP_CXXFLAGS@ $(SHLIB_PTHREAD_FLAGS)
|
||||
PKG_CXXFLAGS= @OPENMP_CXXFLAGS@ -pthread
|
||||
PKG_LIBS = @OPENMP_CXXFLAGS@ -pthread
|
||||
OBJECTS= ./xgboost_R.o ./xgboost_custom.o ./xgboost_assert.o ./init.o\
|
||||
$(PKGROOT)/amalgamation/xgboost-all0.o $(PKGROOT)/amalgamation/dmlc-minimum0.o\
|
||||
$(PKGROOT)/rabit/src/engine_empty.o $(PKGROOT)/rabit/src/c_api.o
|
||||
|
||||
@@ -4,7 +4,7 @@ ENABLE_STD_THREAD=0
|
||||
# _*_ mode: Makefile; _*_
|
||||
|
||||
# This file is only used for windows compilation from github
|
||||
# It will be replaced by Makevars in CRAN version
|
||||
# It will be replaced with Makevars.in for the CRAN version
|
||||
.PHONY: all xgblib
|
||||
all: $(SHLIB)
|
||||
$(SHLIB): xgblib
|
||||
@@ -22,9 +22,15 @@ XGB_RFLAGS = -DXGBOOST_STRICT_R_MODE=1 -DDMLC_LOG_BEFORE_THROW=0\
|
||||
-DDMLC_LOG_CUSTOMIZE=1 -DXGBOOST_CUSTOMIZE_LOGGER=1\
|
||||
-DRABIT_CUSTOMIZE_MSG_ -DRABIT_STRICT_CXX98_
|
||||
|
||||
# disable the use of thread_local for 32 bit windows:
|
||||
ifeq ($(R_OSTYPE)$(WIN),windows)
|
||||
XGB_RFLAGS += -DDMLC_CXX11_THREAD_LOCAL=0
|
||||
endif
|
||||
$(foreach v, $(XGB_RFLAGS), $(warning $(v)))
|
||||
|
||||
PKG_CPPFLAGS= -I$(PKGROOT)/include -I$(PKGROOT)/dmlc-core/include -I$(PKGROOT)/rabit/include -I$(PKGROOT) $(XGB_RFLAGS)
|
||||
PKG_CXXFLAGS= $(SHLIB_OPENMP_CFLAGS) $(SHLIB_PTHREAD_FLAGS)
|
||||
PKG_LIBS = $(SHLIB_OPENMP_CFLAGS) $(SHLIB_PTHREAD_FLAGS)
|
||||
PKG_CXXFLAGS= $(SHLIB_OPENMP_CXXFLAGS) $(SHLIB_PTHREAD_FLAGS)
|
||||
PKG_LIBS = $(SHLIB_OPENMP_CXXFLAGS) $(SHLIB_PTHREAD_FLAGS)
|
||||
OBJECTS= ./xgboost_R.o ./xgboost_custom.o ./xgboost_assert.o ./init.o\
|
||||
$(PKGROOT)/amalgamation/xgboost-all0.o $(PKGROOT)/amalgamation/dmlc-minimum0.o\
|
||||
$(PKGROOT)/rabit/src/engine_empty.o $(PKGROOT)/rabit/src/c_api.o
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/* Copyright (c) 2015 by Contributors
|
||||
*
|
||||
*
|
||||
* This file was initially generated using the following R command:
|
||||
* tools::package_native_routine_registration_skeleton('.', con = 'src/init.c', character_only = F)
|
||||
* and edited to conform to xgboost C linter requirements. For details, see
|
||||
@@ -10,7 +10,7 @@
|
||||
#include <stdlib.h>
|
||||
#include <R_ext/Rdynload.h>
|
||||
|
||||
/* FIXME:
|
||||
/* FIXME:
|
||||
Check these declarations against the C/Fortran source code.
|
||||
*/
|
||||
|
||||
@@ -19,10 +19,10 @@ extern SEXP XGBoosterBoostOneIter_R(SEXP, SEXP, SEXP, SEXP);
|
||||
extern SEXP XGBoosterCreate_R(SEXP);
|
||||
extern SEXP XGBoosterDumpModel_R(SEXP, SEXP, SEXP, SEXP);
|
||||
extern SEXP XGBoosterEvalOneIter_R(SEXP, SEXP, SEXP, SEXP);
|
||||
extern SEXP XGBoosterGetAttr_R(SEXP, SEXP);
|
||||
extern SEXP XGBoosterGetAttrNames_R(SEXP);
|
||||
extern SEXP XGBoosterLoadModel_R(SEXP, SEXP);
|
||||
extern SEXP XGBoosterGetAttr_R(SEXP, SEXP);
|
||||
extern SEXP XGBoosterLoadModelFromRaw_R(SEXP, SEXP);
|
||||
extern SEXP XGBoosterLoadModel_R(SEXP, SEXP);
|
||||
extern SEXP XGBoosterModelToRaw_R(SEXP);
|
||||
extern SEXP XGBoosterPredict_R(SEXP, SEXP, SEXP, SEXP);
|
||||
extern SEXP XGBoosterSaveModel_R(SEXP, SEXP);
|
||||
@@ -45,10 +45,10 @@ static const R_CallMethodDef CallEntries[] = {
|
||||
{"XGBoosterCreate_R", (DL_FUNC) &XGBoosterCreate_R, 1},
|
||||
{"XGBoosterDumpModel_R", (DL_FUNC) &XGBoosterDumpModel_R, 4},
|
||||
{"XGBoosterEvalOneIter_R", (DL_FUNC) &XGBoosterEvalOneIter_R, 4},
|
||||
{"XGBoosterGetAttr_R", (DL_FUNC) &XGBoosterGetAttr_R, 2},
|
||||
{"XGBoosterGetAttrNames_R", (DL_FUNC) &XGBoosterGetAttrNames_R, 1},
|
||||
{"XGBoosterLoadModel_R", (DL_FUNC) &XGBoosterLoadModel_R, 2},
|
||||
{"XGBoosterGetAttr_R", (DL_FUNC) &XGBoosterGetAttr_R, 2},
|
||||
{"XGBoosterLoadModelFromRaw_R", (DL_FUNC) &XGBoosterLoadModelFromRaw_R, 2},
|
||||
{"XGBoosterLoadModel_R", (DL_FUNC) &XGBoosterLoadModel_R, 2},
|
||||
{"XGBoosterModelToRaw_R", (DL_FUNC) &XGBoosterModelToRaw_R, 1},
|
||||
{"XGBoosterPredict_R", (DL_FUNC) &XGBoosterPredict_R, 4},
|
||||
{"XGBoosterSaveModel_R", (DL_FUNC) &XGBoosterSaveModel_R, 2},
|
||||
@@ -70,7 +70,7 @@ static const R_CallMethodDef CallEntries[] = {
|
||||
|
||||
#if defined(_WIN32)
|
||||
__declspec(dllexport)
|
||||
#endif
|
||||
#endif // defined(_WIN32)
|
||||
void R_init_xgboost(DllInfo *dll) {
|
||||
R_registerRoutines(dll, NULL, CallEntries, NULL, NULL);
|
||||
R_useDynamicSymbols(dll, FALSE);
|
||||
|
||||
@@ -32,7 +32,10 @@ extern "C" {
|
||||
|
||||
namespace xgboost {
|
||||
ConsoleLogger::~ConsoleLogger() {
|
||||
dmlc::CustomLogMessage::Log(log_stream_.str());
|
||||
if (cur_verbosity_ == LogVerbosity::kIgnore ||
|
||||
cur_verbosity_ <= global_verbosity_) {
|
||||
dmlc::CustomLogMessage::Log(log_stream_.str());
|
||||
}
|
||||
}
|
||||
TrackerLogger::~TrackerLogger() {
|
||||
dmlc::CustomLogMessage::Log(log_stream_.str());
|
||||
@@ -46,10 +49,11 @@ namespace common {
|
||||
bool CheckNAN(double v) {
|
||||
return ISNAN(v);
|
||||
}
|
||||
#if !defined(XGBOOST_USE_CUDA)
|
||||
double LogGamma(double v) {
|
||||
return lgammafn(v);
|
||||
}
|
||||
|
||||
#endif // !defined(XGBOOST_USE_CUDA)
|
||||
// customize random engine.
|
||||
void CustomGlobalRandomEngine::seed(CustomGlobalRandomEngine::result_type val) {
|
||||
// ignore the seed
|
||||
|
||||
@@ -11,6 +11,7 @@ set.seed(1994)
|
||||
# disable some tests for Win32
|
||||
windows_flag = .Platform$OS.type == "windows" &&
|
||||
.Machine$sizeof.pointer != 8
|
||||
solaris_flag = (Sys.info()['sysname'] == "SunOS")
|
||||
|
||||
test_that("train and predict binary classification", {
|
||||
nrounds = 2
|
||||
@@ -152,20 +153,20 @@ test_that("training continuation works", {
|
||||
bst1 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0)
|
||||
# continue for two more:
|
||||
bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0, xgb_model = bst1)
|
||||
if (!windows_flag)
|
||||
if (!windows_flag && !solaris_flag)
|
||||
expect_equal(bst$raw, bst2$raw)
|
||||
expect_false(is.null(bst2$evaluation_log))
|
||||
expect_equal(dim(bst2$evaluation_log), c(4, 2))
|
||||
expect_equal(bst2$evaluation_log, bst$evaluation_log)
|
||||
# test continuing from raw model data
|
||||
bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0, xgb_model = bst1$raw)
|
||||
if (!windows_flag)
|
||||
if (!windows_flag && !solaris_flag)
|
||||
expect_equal(bst$raw, bst2$raw)
|
||||
expect_equal(dim(bst2$evaluation_log), c(2, 2))
|
||||
# test continuing from a model in file
|
||||
xgb.save(bst1, "xgboost.model")
|
||||
bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0, xgb_model = "xgboost.model")
|
||||
if (!windows_flag)
|
||||
if (!windows_flag && !solaris_flag)
|
||||
expect_equal(bst$raw, bst2$raw)
|
||||
expect_equal(dim(bst2$evaluation_log), c(2, 2))
|
||||
})
|
||||
@@ -181,7 +182,7 @@ test_that("xgb.cv works", {
|
||||
expect_is(cv, 'xgb.cv.synchronous')
|
||||
expect_false(is.null(cv$evaluation_log))
|
||||
expect_lt(cv$evaluation_log[, min(test_error_mean)], 0.03)
|
||||
expect_lt(cv$evaluation_log[, min(test_error_std)], 0.004)
|
||||
expect_lt(cv$evaluation_log[, min(test_error_std)], 0.008)
|
||||
expect_equal(cv$niter, 2)
|
||||
expect_false(is.null(cv$folds) && is.list(cv$folds))
|
||||
expect_length(cv$folds, 5)
|
||||
@@ -222,3 +223,42 @@ test_that("train and predict with non-strict classes", {
|
||||
expect_error(pr <- predict(bst, train_dense), regexp = NA)
|
||||
expect_equal(pr0, pr)
|
||||
})
|
||||
|
||||
test_that("max_delta_step works", {
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
watchlist <- list(train = dtrain)
|
||||
param <- list(objective = "binary:logistic", eval_metric="logloss", max_depth = 2, nthread = 2, eta = 0.5)
|
||||
nrounds = 5
|
||||
# model with no restriction on max_delta_step
|
||||
bst1 <- xgb.train(param, dtrain, nrounds, watchlist, verbose = 1)
|
||||
# model with restricted max_delta_step
|
||||
bst2 <- xgb.train(param, dtrain, nrounds, watchlist, verbose = 1, max_delta_step = 1)
|
||||
# the no-restriction model is expected to have consistently lower loss during the initial interations
|
||||
expect_true(all(bst1$evaluation_log$train_logloss < bst2$evaluation_log$train_logloss))
|
||||
expect_lt(mean(bst1$evaluation_log$train_logloss)/mean(bst2$evaluation_log$train_logloss), 0.8)
|
||||
})
|
||||
|
||||
test_that("colsample_bytree works", {
|
||||
# Randomly generate data matrix by sampling from uniform distribution [-1, 1]
|
||||
set.seed(1)
|
||||
train_x <- matrix(runif(1000, min = -1, max = 1), ncol = 100)
|
||||
train_y <- as.numeric(rowSums(train_x) > 0)
|
||||
test_x <- matrix(runif(1000, min = -1, max = 1), ncol = 100)
|
||||
test_y <- as.numeric(rowSums(test_x) > 0)
|
||||
colnames(train_x) <- paste0("Feature_", sprintf("%03d", 1:100))
|
||||
colnames(test_x) <- paste0("Feature_", sprintf("%03d", 1:100))
|
||||
dtrain <- xgb.DMatrix(train_x, label = train_y)
|
||||
dtest <- xgb.DMatrix(test_x, label = test_y)
|
||||
watchlist <- list(train = dtrain, eval = dtest)
|
||||
# Use colsample_bytree = 0.01, so that roughly one out of 100 features is
|
||||
# chosen for each tree
|
||||
param <- list(max_depth = 2, eta = 0, silent = 1, nthread = 2,
|
||||
colsample_bytree = 0.01, objective = "binary:logistic",
|
||||
eval_metric = "auc")
|
||||
set.seed(2)
|
||||
bst <- xgb.train(param, dtrain, nrounds = 100, watchlist, verbose = 0)
|
||||
xgb.importance(model = bst)
|
||||
# If colsample_bytree works properly, a variety of features should be used
|
||||
# in the 100 trees
|
||||
expect_gte(nrow(xgb.importance(model = bst)), 30)
|
||||
})
|
||||
|
||||
@@ -282,7 +282,7 @@ test_that("prediction in xgb.cv works for gblinear too", {
|
||||
})
|
||||
|
||||
test_that("prediction in early-stopping xgb.cv works", {
|
||||
set.seed(1)
|
||||
set.seed(11)
|
||||
expect_output(
|
||||
cv <- xgb.cv(param, dtrain, nfold = 5, eta = 0.1, nrounds = 20,
|
||||
early_stopping_rounds = 5, maximize = FALSE, prediction = TRUE)
|
||||
|
||||
@@ -77,6 +77,18 @@ test_that("xgb.DMatrix: slice, dim", {
|
||||
expect_equal(getinfo(dsub1, 'label'), getinfo(dsub2, 'label'))
|
||||
})
|
||||
|
||||
test_that("xgb.DMatrix: slice, trailing empty rows", {
|
||||
data(agaricus.train, package='xgboost')
|
||||
train_data <- agaricus.train$data
|
||||
train_label <- agaricus.train$label
|
||||
dtrain <- xgb.DMatrix(data=train_data, label=train_label)
|
||||
slice(dtrain, 6513L)
|
||||
train_data[6513, ] <- 0
|
||||
dtrain <- xgb.DMatrix(data=train_data, label=train_label)
|
||||
slice(dtrain, 6513L)
|
||||
expect_equal(nrow(dtrain), 6513)
|
||||
})
|
||||
|
||||
test_that("xgb.DMatrix: colnames", {
|
||||
dtest <- xgb.DMatrix(test_data, label=test_label)
|
||||
expect_equal(colnames(dtest), colnames(test_data))
|
||||
|
||||
@@ -9,7 +9,7 @@ test_that("train and prediction when gctorture is on", {
|
||||
test <- agaricus.test
|
||||
gctorture(TRUE)
|
||||
bst <- xgboost(data = train$data, label = train$label, max.depth = 2,
|
||||
eta = 1, nthread = 2, nround = 2, objective = "binary:logistic")
|
||||
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
pred <- predict(bst, test$data)
|
||||
gctorture(FALSE)
|
||||
})
|
||||
|
||||
@@ -2,18 +2,47 @@ context('Test generalized linear models')
|
||||
|
||||
require(xgboost)
|
||||
|
||||
test_that("glm works", {
|
||||
test_that("gblinear works", {
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
expect_equal(class(dtrain), "xgb.DMatrix")
|
||||
expect_equal(class(dtest), "xgb.DMatrix")
|
||||
|
||||
param <- list(objective = "binary:logistic", booster = "gblinear",
|
||||
nthread = 2, alpha = 0.0001, lambda = 1)
|
||||
nthread = 2, eta = 0.8, alpha = 0.0001, lambda = 0.0001)
|
||||
watchlist <- list(eval = dtest, train = dtrain)
|
||||
num_round <- 2
|
||||
bst <- xgb.train(param, dtrain, num_round, watchlist)
|
||||
|
||||
n <- 5 # iterations
|
||||
ERR_UL <- 0.005 # upper limit for the test set error
|
||||
VERB <- 0 # chatterbox switch
|
||||
|
||||
param$updater = 'shotgun'
|
||||
bst <- xgb.train(param, dtrain, n, watchlist, verbose = VERB, feature_selector = 'shuffle')
|
||||
ypred <- predict(bst, dtest)
|
||||
expect_equal(length(getinfo(dtest, 'label')), 1611)
|
||||
expect_lt(bst$evaluation_log$eval_error[n], ERR_UL)
|
||||
|
||||
bst <- xgb.train(param, dtrain, n, watchlist, verbose = VERB, feature_selector = 'cyclic',
|
||||
callbacks = list(cb.gblinear.history()))
|
||||
expect_lt(bst$evaluation_log$eval_error[n], ERR_UL)
|
||||
h <- xgb.gblinear.history(bst)
|
||||
expect_equal(dim(h), c(n, ncol(dtrain) + 1))
|
||||
expect_is(h, "matrix")
|
||||
|
||||
param$updater = 'coord_descent'
|
||||
bst <- xgb.train(param, dtrain, n, watchlist, verbose = VERB, feature_selector = 'cyclic')
|
||||
expect_lt(bst$evaluation_log$eval_error[n], ERR_UL)
|
||||
|
||||
bst <- xgb.train(param, dtrain, n, watchlist, verbose = VERB, feature_selector = 'shuffle')
|
||||
expect_lt(bst$evaluation_log$eval_error[n], ERR_UL)
|
||||
|
||||
bst <- xgb.train(param, dtrain, 2, watchlist, verbose = VERB, feature_selector = 'greedy')
|
||||
expect_lt(bst$evaluation_log$eval_error[2], ERR_UL)
|
||||
|
||||
bst <- xgb.train(param, dtrain, n, watchlist, verbose = VERB, feature_selector = 'thrifty',
|
||||
top_n = 50, callbacks = list(cb.gblinear.history(sparse = TRUE)))
|
||||
expect_lt(bst$evaluation_log$eval_error[n], ERR_UL)
|
||||
h <- xgb.gblinear.history(bst)
|
||||
expect_equal(dim(h), c(n, ncol(dtrain) + 1))
|
||||
expect_s4_class(h, "dgCMatrix")
|
||||
})
|
||||
|
||||
@@ -5,6 +5,11 @@ require(data.table)
|
||||
require(Matrix)
|
||||
require(vcd, quietly = TRUE)
|
||||
|
||||
float_tolerance = 5e-6
|
||||
|
||||
# disable some tests for Win32
|
||||
win32_flag = .Platform$OS.type == "windows" && .Machine$sizeof.pointer != 8
|
||||
|
||||
set.seed(1982)
|
||||
data(Arthritis)
|
||||
df <- data.table(Arthritis, keep.rownames = F)
|
||||
@@ -39,15 +44,18 @@ mbst.GLM <- xgboost(data = as.matrix(iris[, -5]), label = mlabel, verbose = 0,
|
||||
|
||||
|
||||
test_that("xgb.dump works", {
|
||||
expect_length(xgb.dump(bst.Tree), 200)
|
||||
expect_true(xgb.dump(bst.Tree, 'xgb.model.dump', with_stats = T))
|
||||
expect_true(file.exists('xgb.model.dump'))
|
||||
expect_gt(file.size('xgb.model.dump'), 8000)
|
||||
if (!win32_flag)
|
||||
expect_length(xgb.dump(bst.Tree), 200)
|
||||
dump_file = file.path(tempdir(), 'xgb.model.dump')
|
||||
expect_true(xgb.dump(bst.Tree, dump_file, with_stats = T))
|
||||
expect_true(file.exists(dump_file))
|
||||
expect_gt(file.size(dump_file), 8000)
|
||||
|
||||
# JSON format
|
||||
dmp <- xgb.dump(bst.Tree, dump_format = "json")
|
||||
expect_length(dmp, 1)
|
||||
expect_length(grep('nodeid', strsplit(dmp, '\n')[[1]]), 188)
|
||||
if (!win32_flag)
|
||||
expect_length(grep('nodeid', strsplit(dmp, '\n')[[1]]), 188)
|
||||
})
|
||||
|
||||
test_that("xgb.dump works for gblinear", {
|
||||
@@ -85,7 +93,8 @@ test_that("predict feature contributions works", {
|
||||
X <- sparse_matrix
|
||||
colnames(X) <- NULL
|
||||
expect_error(pred_contr_ <- predict(bst.Tree, X, predcontrib = TRUE), regexp = NA)
|
||||
expect_equal(pred_contr, pred_contr_, check.attributes = FALSE)
|
||||
expect_equal(pred_contr, pred_contr_, check.attributes = FALSE,
|
||||
tolerance = float_tolerance)
|
||||
|
||||
# gbtree binary classifier (approximate method)
|
||||
expect_error(pred_contr <- predict(bst.Tree, sparse_matrix, predcontrib = TRUE, approxcontrib = TRUE), regexp = NA)
|
||||
@@ -104,7 +113,8 @@ test_that("predict feature contributions works", {
|
||||
coefs <- xgb.dump(bst.GLM)[-c(1,2,4)] %>% as.numeric
|
||||
coefs <- c(coefs[-1], coefs[1]) # intercept must be the last
|
||||
pred_contr_manual <- sweep(cbind(sparse_matrix, 1), 2, coefs, FUN="*")
|
||||
expect_equal(as.numeric(pred_contr), as.numeric(pred_contr_manual), 1e-5)
|
||||
expect_equal(as.numeric(pred_contr), as.numeric(pred_contr_manual),
|
||||
tolerance = float_tolerance)
|
||||
|
||||
# gbtree multiclass
|
||||
pred <- predict(mbst.Tree, as.matrix(iris[, -5]), outputmargin = TRUE, reshape = TRUE)
|
||||
@@ -123,11 +133,12 @@ test_that("predict feature contributions works", {
|
||||
coefs_all <- xgb.dump(mbst.GLM)[-c(1,2,6)] %>% as.numeric %>% matrix(ncol = 3, byrow = TRUE)
|
||||
for (g in seq_along(pred_contr)) {
|
||||
expect_equal(colnames(pred_contr[[g]]), c(colnames(iris[, -5]), "BIAS"))
|
||||
expect_lt(max(abs(rowSums(pred_contr[[g]]) - pred[, g])), 2e-6)
|
||||
expect_lt(max(abs(rowSums(pred_contr[[g]]) - pred[, g])), float_tolerance)
|
||||
# manual calculation of linear terms
|
||||
coefs <- c(coefs_all[-1, g], coefs_all[1, g]) # intercept needs to be the last
|
||||
pred_contr_manual <- sweep(as.matrix(cbind(iris[,-5], 1)), 2, coefs, FUN="*")
|
||||
expect_equal(as.numeric(pred_contr[[g]]), as.numeric(pred_contr_manual), 2e-6)
|
||||
expect_equal(as.numeric(pred_contr[[g]]), as.numeric(pred_contr_manual),
|
||||
tolerance = float_tolerance)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -171,14 +182,16 @@ if (grepl('Windows', Sys.info()[['sysname']]) ||
|
||||
# check that lossless conversion works with 17 digits
|
||||
# numeric -> character -> numeric
|
||||
X <- 10^runif(100, -20, 20)
|
||||
X2X <- as.numeric(format(X, digits = 17))
|
||||
expect_identical(X, X2X)
|
||||
if (capabilities('long.double')) {
|
||||
X2X <- as.numeric(format(X, digits = 17))
|
||||
expect_identical(X, X2X)
|
||||
}
|
||||
# retrieved attributes to be the same as written
|
||||
for (x in X) {
|
||||
xgb.attr(bst.Tree, "x") <- x
|
||||
expect_identical(as.numeric(xgb.attr(bst.Tree, "x")), x)
|
||||
expect_equal(as.numeric(xgb.attr(bst.Tree, "x")), x, tolerance = float_tolerance)
|
||||
xgb.attributes(bst.Tree) <- list(a = "A", b = x)
|
||||
expect_identical(as.numeric(xgb.attr(bst.Tree, "b")), x)
|
||||
expect_equal(as.numeric(xgb.attr(bst.Tree, "b")), x, tolerance = float_tolerance)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -187,7 +200,7 @@ test_that("xgb.Booster serializing as R object works", {
|
||||
saveRDS(bst.Tree, 'xgb.model.rds')
|
||||
bst <- readRDS('xgb.model.rds')
|
||||
dtrain <- xgb.DMatrix(sparse_matrix, label = label)
|
||||
expect_equal(predict(bst.Tree, dtrain), predict(bst, dtrain))
|
||||
expect_equal(predict(bst.Tree, dtrain), predict(bst, dtrain), tolerance = float_tolerance)
|
||||
expect_equal(xgb.dump(bst.Tree), xgb.dump(bst))
|
||||
xgb.save(bst, 'xgb.model')
|
||||
nil_ptr <- new("externalptr")
|
||||
@@ -195,14 +208,15 @@ test_that("xgb.Booster serializing as R object works", {
|
||||
expect_true(identical(bst$handle, nil_ptr))
|
||||
bst <- xgb.Booster.complete(bst)
|
||||
expect_true(!identical(bst$handle, nil_ptr))
|
||||
expect_equal(predict(bst.Tree, dtrain), predict(bst, dtrain))
|
||||
expect_equal(predict(bst.Tree, dtrain), predict(bst, dtrain), tolerance = float_tolerance)
|
||||
})
|
||||
|
||||
test_that("xgb.model.dt.tree works with and without feature names", {
|
||||
names.dt.trees <- c("Tree", "Node", "ID", "Feature", "Split", "Yes", "No", "Missing", "Quality", "Cover")
|
||||
dt.tree <- xgb.model.dt.tree(feature_names = feature.names, model = bst.Tree)
|
||||
expect_equal(names.dt.trees, names(dt.tree))
|
||||
expect_equal(dim(dt.tree), c(188, 10))
|
||||
if (!win32_flag)
|
||||
expect_equal(dim(dt.tree), c(188, 10))
|
||||
expect_output(str(dt.tree), 'Feature.*\\"Age\\"')
|
||||
|
||||
dt.tree.0 <- xgb.model.dt.tree(model = bst.Tree)
|
||||
@@ -228,18 +242,20 @@ test_that("xgb.model.dt.tree throws error for gblinear", {
|
||||
|
||||
test_that("xgb.importance works with and without feature names", {
|
||||
importance.Tree <- xgb.importance(feature_names = feature.names, model = bst.Tree)
|
||||
expect_equal(dim(importance.Tree), c(7, 4))
|
||||
if (!win32_flag)
|
||||
expect_equal(dim(importance.Tree), c(7, 4))
|
||||
expect_equal(colnames(importance.Tree), c("Feature", "Gain", "Cover", "Frequency"))
|
||||
expect_output(str(importance.Tree), 'Feature.*\\"Age\\"')
|
||||
|
||||
importance.Tree.0 <- xgb.importance(model = bst.Tree)
|
||||
expect_equal(importance.Tree, importance.Tree.0)
|
||||
expect_equal(importance.Tree, importance.Tree.0, tolerance = float_tolerance)
|
||||
|
||||
# when model contains no feature names:
|
||||
bst.Tree.x <- bst.Tree
|
||||
bst.Tree.x$feature_names <- NULL
|
||||
importance.Tree.x <- xgb.importance(model = bst.Tree)
|
||||
expect_equal(importance.Tree[, -1, with=FALSE], importance.Tree.x[, -1, with=FALSE])
|
||||
expect_equal(importance.Tree[, -1, with=FALSE], importance.Tree.x[, -1, with=FALSE],
|
||||
tolerance = float_tolerance)
|
||||
|
||||
imp2plot <- xgb.plot.importance(importance_matrix = importance.Tree)
|
||||
expect_equal(colnames(imp2plot), c("Feature", "Gain", "Cover", "Frequency", "Importance"))
|
||||
|
||||
38
R-package/tests/testthat/test_interaction_constraints.R
Normal file
38
R-package/tests/testthat/test_interaction_constraints.R
Normal file
@@ -0,0 +1,38 @@
|
||||
require(xgboost)
|
||||
|
||||
context("interaction constraints")
|
||||
|
||||
set.seed(1024)
|
||||
x1 <- rnorm(1000, 1)
|
||||
x2 <- rnorm(1000, 1)
|
||||
x3 <- sample(c(1,2,3), size=1000, replace=TRUE)
|
||||
y <- x1 + x2 + x3 + x1*x2*x3 + rnorm(1000, 0.001) + 3*sin(x1)
|
||||
train <- matrix(c(x1,x2,x3), ncol = 3)
|
||||
|
||||
test_that("interaction constraints for regression", {
|
||||
# Fit a model that only allows interaction between x1 and x2
|
||||
bst <- xgboost(data = train, label = y, max_depth = 3,
|
||||
eta = 0.1, nthread = 2, nrounds = 100, verbose = 0,
|
||||
interaction_constraints = list(c(0,1)))
|
||||
|
||||
# Set all observations to have the same x3 values then increment
|
||||
# by the same amount
|
||||
preds <- lapply(c(1,2,3), function(x){
|
||||
tmat <- matrix(c(x1,x2,rep(x,1000)), ncol=3)
|
||||
return(predict(bst, tmat))
|
||||
})
|
||||
|
||||
# Check incrementing x3 has the same effect on all observations
|
||||
# since x3 is constrained to be independent of x1 and x2
|
||||
# and all observations start off from the same x3 value
|
||||
diff1 <- preds[[2]] - preds[[1]]
|
||||
test1 <- all(abs(diff1 - diff1[1]) < 1e-4)
|
||||
|
||||
diff2 <- preds[[3]] - preds[[2]]
|
||||
test2 <- all(abs(diff2 - diff2[1]) < 1e-4)
|
||||
|
||||
expect_true({
|
||||
test1 & test2
|
||||
}, "Interaction Contraint Satisfied")
|
||||
|
||||
})
|
||||
141
R-package/tests/testthat/test_interactions.R
Normal file
141
R-package/tests/testthat/test_interactions.R
Normal file
@@ -0,0 +1,141 @@
|
||||
context('Test prediction of feature interactions')
|
||||
|
||||
require(xgboost)
|
||||
require(magrittr)
|
||||
|
||||
set.seed(123)
|
||||
|
||||
test_that("predict feature interactions works", {
|
||||
# simulate some binary data and a linear outcome with an interaction term
|
||||
N <- 1000
|
||||
P <- 5
|
||||
X <- matrix(rbinom(N * P, 1, 0.5), ncol=P, dimnames = list(NULL, letters[1:P]))
|
||||
# center the data (as contributions are computed WRT feature means)
|
||||
X <- scale(X, scale=FALSE)
|
||||
|
||||
# outcome without any interactions, without any noise:
|
||||
f <- function(x) 2 * x[, 1] - 3 * x[, 2]
|
||||
# outcome with interactions, without noise:
|
||||
f_int <- function(x) f(x) + 2 * x[, 2] * x[, 3]
|
||||
# outcome with interactions, with noise:
|
||||
#f_int_noise <- function(x) f_int(x) + rnorm(N, 0, 0.3)
|
||||
|
||||
y <- f_int(X)
|
||||
|
||||
dm <- xgb.DMatrix(X, label = y)
|
||||
param <- list(eta=0.1, max_depth=4, base_score=mean(y), lambda=0, nthread=2)
|
||||
b <- xgb.train(param, dm, 100)
|
||||
|
||||
pred = predict(b, dm, outputmargin=TRUE)
|
||||
|
||||
# SHAP contributions:
|
||||
cont <- predict(b, dm, predcontrib=TRUE)
|
||||
expect_equal(dim(cont), c(N, P+1))
|
||||
# make sure for each row they add up to marginal predictions
|
||||
max(abs(rowSums(cont) - pred)) %>% expect_lt(0.001)
|
||||
# Hand-construct the 'ground truth' feature contributions:
|
||||
gt_cont <- cbind(
|
||||
2. * X[, 1],
|
||||
-3. * X[, 2] + 1. * X[, 2] * X[, 3], # attribute a HALF of the interaction term to feature #2
|
||||
1. * X[, 2] * X[, 3] # and another HALF of the interaction term to feature #3
|
||||
)
|
||||
gt_cont <- cbind(gt_cont, matrix(0, nrow=N, ncol=P + 1 - 3))
|
||||
# These should be relatively close:
|
||||
expect_lt(max(abs(cont - gt_cont)), 0.05)
|
||||
|
||||
|
||||
# SHAP interaction contributions:
|
||||
intr <- predict(b, dm, predinteraction=TRUE)
|
||||
expect_equal(dim(intr), c(N, P+1, P+1))
|
||||
# check assigned colnames
|
||||
cn <- c(letters[1:P], "BIAS")
|
||||
expect_equal(dimnames(intr), list(NULL, cn, cn))
|
||||
|
||||
# check the symmetry
|
||||
max(abs(aperm(intr, c(1,3,2)) - intr)) %>% expect_lt(0.00001)
|
||||
|
||||
# sums WRT columns must be close to feature contributions
|
||||
max(abs(apply(intr, c(1,2), sum) - cont)) %>% expect_lt(0.00001)
|
||||
|
||||
# diagonal terms for features 3,4,5 must be close to zero
|
||||
Reduce(max, sapply(3:P, function(i) max(abs(intr[, i, i])))) %>% expect_lt(0.05)
|
||||
|
||||
# BIAS must have no interactions
|
||||
max(abs(intr[, 1:P, P+1])) %>% expect_lt(0.00001)
|
||||
|
||||
# interactions other than 2 x 3 must be close to zero
|
||||
intr23 <- intr
|
||||
intr23[,2,3] <- 0
|
||||
Reduce(max, sapply(1:P, function(i) max(abs(intr23[, i, (i+1):(P+1)])))) %>% expect_lt(0.05)
|
||||
|
||||
# Construct the 'ground truth' contributions of interactions directly from the linear terms:
|
||||
gt_intr <- array(0, c(N, P+1, P+1))
|
||||
gt_intr[,2,3] <- 1. * X[, 2] * X[, 3] # attribute a HALF of the interaction term to each symmetric element
|
||||
gt_intr[,3,2] <- gt_intr[, 2, 3]
|
||||
# merge-in the diagonal based on 'ground truth' feature contributions
|
||||
intr_diag = gt_cont - apply(gt_intr, c(1,2), sum)
|
||||
for(j in seq_len(P)) {
|
||||
gt_intr[,j,j] = intr_diag[,j]
|
||||
}
|
||||
# These should be relatively close:
|
||||
expect_lt(max(abs(intr - gt_intr)), 0.1)
|
||||
})
|
||||
|
||||
test_that("SHAP contribution values are not NAN", {
|
||||
d <- data.frame(
|
||||
x1 = c(-2.3, 1.4, 5.9, 2, 2.5, 0.3, -3.6, -0.2, 0.5, -2.8, -4.6, 3.3, -1.2,
|
||||
-1.1, -2.3, 0.4, -1.5, -0.2, -1, 3.7),
|
||||
x2 = c(291.179171, 269.198331, 289.942097, 283.191669, 269.673332,
|
||||
294.158346, 287.255835, 291.530838, 285.899586, 269.290833,
|
||||
268.649586, 291.530841, 280.074593, 269.484168, 293.94042,
|
||||
294.327506, 296.20709, 295.441669, 283.16792, 270.227085),
|
||||
y = c(9, 15, 5.7, 9.2, 22.4, 5, 9, 3.2, 7.2, 13.1, 7.8, 16.9, 6.5, 22.1,
|
||||
5.3, 10.4, 11.1, 13.9, 11, 20.5),
|
||||
fold = c(2, 2, 2, 1, 2, 2, 1, 2, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2))
|
||||
|
||||
ivs <- c("x1", "x2")
|
||||
|
||||
fit <- xgboost(
|
||||
verbose = 0,
|
||||
params = list(
|
||||
objective = "reg:linear",
|
||||
eval_metric = "rmse"),
|
||||
data = as.matrix(subset(d, fold == 2)[, ivs]),
|
||||
label = subset(d, fold == 2)$y,
|
||||
nthread = 1,
|
||||
nrounds = 3)
|
||||
|
||||
shaps <- as.data.frame(predict(fit,
|
||||
newdata = as.matrix(subset(d, fold == 1)[, ivs]),
|
||||
predcontrib = T))
|
||||
result <- cbind(shaps, sum = rowSums(shaps), pred = predict(fit,
|
||||
newdata = as.matrix(subset(d, fold == 1)[, ivs])))
|
||||
|
||||
expect_true(identical(TRUE, all.equal(result$sum, result$pred, tol = 1e-6)))
|
||||
})
|
||||
|
||||
|
||||
test_that("multiclass feature interactions work", {
|
||||
dm <- xgb.DMatrix(as.matrix(iris[,-5]), label=as.numeric(iris$Species)-1)
|
||||
param <- list(eta=0.1, max_depth=4, objective='multi:softprob', num_class=3)
|
||||
b <- xgb.train(param, dm, 40)
|
||||
pred = predict(b, dm, outputmargin=TRUE) %>% array(c(3, 150)) %>% t
|
||||
|
||||
# SHAP contributions:
|
||||
cont <- predict(b, dm, predcontrib=TRUE)
|
||||
expect_length(cont, 3)
|
||||
# rewrap them as a 3d array
|
||||
cont <- unlist(cont) %>% array(c(150, 5, 3))
|
||||
# make sure for each row they add up to marginal predictions
|
||||
max(abs(apply(cont, c(1,3), sum) - pred)) %>% expect_lt(0.001)
|
||||
|
||||
# SHAP interaction contributions:
|
||||
intr <- predict(b, dm, predinteraction=TRUE)
|
||||
expect_length(intr, 3)
|
||||
# rewrap them as a 4d array
|
||||
intr <- unlist(intr) %>% array(c(150, 5, 5, 3)) %>% aperm(c(4, 1, 2, 3)) # [grp, row, col, col]
|
||||
# check the symmetry
|
||||
max(abs(aperm(intr, c(1,2,4,3)) - intr)) %>% expect_lt(0.00001)
|
||||
# sums WRT columns must be close to feature contributions
|
||||
max(abs(apply(intr, c(1,2,3), sum) - aperm(cont, c(3,1,2)))) %>% expect_lt(0.00001)
|
||||
})
|
||||
@@ -7,6 +7,10 @@ data(agaricus.test, package = 'xgboost')
|
||||
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
|
||||
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
|
||||
|
||||
# Disable flaky tests for 32-bit Windows.
|
||||
# See https://github.com/dmlc/xgboost/issues/3720
|
||||
win32_flag = .Platform$OS.type == "windows" && .Machine$sizeof.pointer != 8
|
||||
|
||||
test_that("updating the model works", {
|
||||
watchlist = list(train = dtrain, test = dtest)
|
||||
|
||||
@@ -29,7 +33,9 @@ test_that("updating the model works", {
|
||||
tr1r <- xgb.model.dt.tree(model = bst1r)
|
||||
# all should be the same when no subsampling
|
||||
expect_equal(bst1$evaluation_log, bst1r$evaluation_log)
|
||||
expect_equal(tr1, tr1r, tolerance = 0.00001, check.attributes = FALSE)
|
||||
if (!win32_flag) {
|
||||
expect_equal(tr1, tr1r, tolerance = 0.00001, check.attributes = FALSE)
|
||||
}
|
||||
|
||||
# the same boosting with subsampling with an extra 'refresh' updater:
|
||||
p2r <- modifyList(p2, list(updater = 'grow_colmaker,prune,refresh', refresh_leaf = FALSE))
|
||||
@@ -38,7 +44,9 @@ test_that("updating the model works", {
|
||||
tr2r <- xgb.model.dt.tree(model = bst2r)
|
||||
# should be the same evaluation but different gains and larger cover
|
||||
expect_equal(bst2$evaluation_log, bst2r$evaluation_log)
|
||||
expect_equal(tr2[Feature == 'Leaf']$Quality, tr2r[Feature == 'Leaf']$Quality)
|
||||
if (!win32_flag) {
|
||||
expect_equal(tr2[Feature == 'Leaf']$Quality, tr2r[Feature == 'Leaf']$Quality)
|
||||
}
|
||||
expect_gt(sum(abs(tr2[Feature != 'Leaf']$Quality - tr2r[Feature != 'Leaf']$Quality)), 100)
|
||||
expect_gt(sum(tr2r$Cover) / sum(tr2$Cover), 1.5)
|
||||
|
||||
@@ -61,7 +69,9 @@ test_that("updating the model works", {
|
||||
expect_gt(sum(tr2u$Cover) / sum(tr2$Cover), 1.5)
|
||||
# the results should be the same as for the model with an extra 'refresh' updater
|
||||
expect_equal(bst2r$evaluation_log, bst2u$evaluation_log)
|
||||
expect_equal(tr2r, tr2u, tolerance = 0.00001, check.attributes = FALSE)
|
||||
if (!win32_flag) {
|
||||
expect_equal(tr2r, tr2u, tolerance = 0.00001, check.attributes = FALSE)
|
||||
}
|
||||
|
||||
# process type 'update' for no-subsampling model, refreshing only the tree stats from TEST data:
|
||||
p1ut <- modifyList(p1, list(process_type = 'update', updater = 'refresh', refresh_leaf = FALSE))
|
||||
|
||||
39
README.md
39
README.md
@@ -1,51 +1,34 @@
|
||||
<img src=https://raw.githubusercontent.com/dmlc/dmlc.github.io/master/img/logo-m/xgboost.png width=135/> eXtreme Gradient Boosting
|
||||
===========
|
||||
[](https://xgboost-ci.net/blue/organizations/jenkins/xgboost/activity)
|
||||
[](https://travis-ci.org/dmlc/xgboost)
|
||||
[](https://ci.appveyor.com/project/tqchen/xgboost)
|
||||
[](https://xgboost.readthedocs.org)
|
||||
[](./LICENSE)
|
||||
[](http://cran.r-project.org/web/packages/xgboost)
|
||||
[](https://pypi.python.org/pypi/xgboost/)
|
||||
[](https://gitter.im/dmlc/xgboost?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
|
||||
[Community](https://xgboost.ai/community) |
|
||||
[Documentation](https://xgboost.readthedocs.org) |
|
||||
[Resources](demo/README.md) |
|
||||
[Installation](https://xgboost.readthedocs.org/en/latest/build.html) |
|
||||
[Release Notes](NEWS.md) |
|
||||
[RoadMap](https://github.com/dmlc/xgboost/issues/873)
|
||||
[Contributors](CONTRIBUTORS.md) |
|
||||
[Release Notes](NEWS.md)
|
||||
|
||||
XGBoost is an optimized distributed gradient boosting library designed to be highly ***efficient***, ***flexible*** and ***portable***.
|
||||
It implements machine learning algorithms under the [Gradient Boosting](https://en.wikipedia.org/wiki/Gradient_boosting) framework.
|
||||
XGBoost provides a parallel tree boosting (also known as GBDT, GBM) that solve many data science problems in a fast and accurate way.
|
||||
The same code runs on major distributed environment (Hadoop, SGE, MPI) and can solve problems beyond billions of examples.
|
||||
|
||||
What's New
|
||||
----------
|
||||
* [XGBoost GPU support with fast histogram algorithm](https://github.com/dmlc/xgboost/tree/master/plugin/updater_gpu)
|
||||
* [XGBoost4J: Portable Distributed XGboost in Spark, Flink and Dataflow](http://dmlc.ml/2016/03/14/xgboost4j-portable-distributed-xgboost-in-spark-flink-and-dataflow.html), see [JVM-Package](https://github.com/dmlc/xgboost/tree/master/jvm-packages)
|
||||
* [Story and Lessons Behind the Evolution of XGBoost](http://homes.cs.washington.edu/~tqchen/2016/03/10/story-and-lessons-behind-the-evolution-of-xgboost.html)
|
||||
* [Tutorial: Distributed XGBoost on AWS with YARN](https://xgboost.readthedocs.io/en/latest/tutorials/aws_yarn.html)
|
||||
* [XGBoost brick](NEWS.md) Release
|
||||
|
||||
Ask a Question
|
||||
--------------
|
||||
* For reporting bugs please use the [xgboost/issues](https://github.com/dmlc/xgboost/issues) page.
|
||||
* For generic questions or to share your experience using XGBoost please use the [XGBoost User Group](https://groups.google.com/forum/#!forum/xgboost-user/)
|
||||
|
||||
Help to Make XGBoost Better
|
||||
---------------------------
|
||||
XGBoost has been developed and used by a group of active community members. Your help is very valuable to make the package better for everyone.
|
||||
- Check out [call for contributions](https://github.com/dmlc/xgboost/issues?q=is%3Aissue+label%3Acall-for-contribution+is%3Aopen) and [Roadmap](https://github.com/dmlc/xgboost/issues/873) to see what can be improved, or open an issue if you want something.
|
||||
- Contribute to the [documents and examples](https://github.com/dmlc/xgboost/blob/master/doc/) to share your experience with other users.
|
||||
- Add your stories and experience to [Awesome XGBoost](demo/README.md).
|
||||
- Please add your name to [CONTRIBUTORS.md](CONTRIBUTORS.md) and after your patch has been merged.
|
||||
- Please also update [NEWS.md](NEWS.md) on changes and improvements in API and docs.
|
||||
|
||||
License
|
||||
-------
|
||||
© Contributors, 2016. Licensed under an [Apache-2](https://github.com/dmlc/xgboost/blob/master/LICENSE) license.
|
||||
|
||||
Contribute to XGBoost
|
||||
---------------------
|
||||
XGBoost has been developed and used by a group of active community members. Your help is very valuable to make the package better for everyone.
|
||||
Checkout the [Community Page](https://xgboost.ai/community)
|
||||
|
||||
Reference
|
||||
---------
|
||||
- Tianqi Chen and Carlos Guestrin. [XGBoost: A Scalable Tree Boosting System](http://arxiv.org/abs/1603.02754). In 22nd SIGKDD Conference on Knowledge Discovery and Data Mining, 2016
|
||||
- XGBoost originates from research project at University of Washington, see also the [Project Page at UW](http://dmlc.cs.washington.edu/xgboost.html).
|
||||
- Tianqi Chen and Carlos Guestrin. [XGBoost: A Scalable Tree Boosting System](http://arxiv.org/abs/1603.02754). In 22nd SIGKDD Conference on Knowledge Discovery and Data Mining, 2016
|
||||
- XGBoost originates from research project at University of Washington.
|
||||
|
||||
@@ -7,6 +7,8 @@
|
||||
#include "../dmlc-core/src/io/recordio_split.cc"
|
||||
#include "../dmlc-core/src/io/input_split_base.cc"
|
||||
#include "../dmlc-core/src/io/local_filesys.cc"
|
||||
#include "../dmlc-core/src/io/filesys.cc"
|
||||
#include "../dmlc-core/src/io/indexed_recordio_split.cc"
|
||||
#include "../dmlc-core/src/data.cc"
|
||||
#include "../dmlc-core/src/io.cc"
|
||||
#include "../dmlc-core/src/recordio.cc"
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#include "../src/objective/regression_obj.cc"
|
||||
#include "../src/objective/multiclass_obj.cc"
|
||||
#include "../src/objective/rank_obj.cc"
|
||||
#include "../src/objective/hinge.cc"
|
||||
|
||||
// gbms
|
||||
#include "../src/gbm/gbm.cc"
|
||||
@@ -43,20 +44,27 @@
|
||||
#endif
|
||||
|
||||
// tress
|
||||
#include "../src/tree/split_evaluator.cc"
|
||||
#include "../src/tree/tree_model.cc"
|
||||
#include "../src/tree/tree_updater.cc"
|
||||
#include "../src/tree/updater_colmaker.cc"
|
||||
#include "../src/tree/updater_fast_hist.cc"
|
||||
#include "../src/tree/updater_quantile_hist.cc"
|
||||
#include "../src/tree/updater_prune.cc"
|
||||
#include "../src/tree/updater_refresh.cc"
|
||||
#include "../src/tree/updater_sync.cc"
|
||||
#include "../src/tree/updater_histmaker.cc"
|
||||
#include "../src/tree/updater_skmaker.cc"
|
||||
|
||||
// linear
|
||||
#include "../src/linear/linear_updater.cc"
|
||||
#include "../src/linear/updater_coordinate.cc"
|
||||
#include "../src/linear/updater_shotgun.cc"
|
||||
|
||||
// global
|
||||
#include "../src/learner.cc"
|
||||
#include "../src/logging.cc"
|
||||
#include "../src/common/common.cc"
|
||||
#include "../src/common/host_device_vector.cc"
|
||||
#include "../src/common/hist_util.cc"
|
||||
|
||||
// c_api
|
||||
|
||||
17
appveyor.yml
17
appveyor.yml
@@ -44,16 +44,18 @@ install:
|
||||
- set DO_PYTHON=off
|
||||
- if /i "%target%" == "mingw" set DO_PYTHON=on
|
||||
- if /i "%target%_%ver%_%configuration%" == "msvc_2015_Release" set DO_PYTHON=on
|
||||
- if /i "%DO_PYTHON%" == "on" conda install -y numpy scipy pandas matplotlib nose scikit-learn graphviz python-graphviz
|
||||
- if /i "%DO_PYTHON%" == "on" conda install -y numpy scipy pandas matplotlib pytest scikit-learn graphviz python-graphviz
|
||||
# R: based on https://github.com/krlmlr/r-appveyor
|
||||
- ps: |
|
||||
if($env:target -eq 'rmingw' -or $env:target -eq 'rmsvc') {
|
||||
#$ErrorActionPreference = "Stop"
|
||||
Invoke-WebRequest http://raw.github.com/krlmlr/r-appveyor/master/scripts/appveyor-tool.ps1 -OutFile "$Env:TEMP\appveyor-tool.ps1"
|
||||
Invoke-WebRequest https://raw.githubusercontent.com/krlmlr/r-appveyor/master/scripts/appveyor-tool.ps1 -OutFile "$Env:TEMP\appveyor-tool.ps1"
|
||||
Import-Module "$Env:TEMP\appveyor-tool.ps1"
|
||||
Bootstrap
|
||||
$DEPS = "c('data.table','magrittr','stringi','ggplot2','DiagrammeR','Ckmeans.1d.dp','vcd','testthat','igraph','knitr','rmarkdown')"
|
||||
cmd /c "R.exe -q -e ""install.packages($DEPS, repos='$CRAN', type='win.binary')"" 2>&1"
|
||||
$DEPS = "c('data.table','magrittr','stringi','ggplot2','DiagrammeR','Ckmeans.1d.dp','vcd','testthat','lintr','knitr','rmarkdown')"
|
||||
cmd.exe /c "R.exe -q -e ""install.packages($DEPS, repos='$CRAN', type='both')"" 2>&1"
|
||||
$BINARY_DEPS = "c('XML','igraph')"
|
||||
cmd.exe /c "R.exe -q -e ""install.packages($BINARY_DEPS, repos='$CRAN', type='win.binary')"" 2>&1"
|
||||
}
|
||||
|
||||
build_script:
|
||||
@@ -81,7 +83,7 @@ build_script:
|
||||
- if /i "%target%" == "rmingw" (
|
||||
make Rbuild &&
|
||||
ls -l &&
|
||||
R.exe CMD INSTALL --no-multiarch xgboost*.tar.gz
|
||||
R.exe CMD INSTALL xgboost*.tar.gz
|
||||
)
|
||||
# R package: cmake + VC2015
|
||||
- if /i "%target%" == "rmsvc" (
|
||||
@@ -94,14 +96,13 @@ build_script:
|
||||
|
||||
test_script:
|
||||
- cd %APPVEYOR_BUILD_FOLDER%
|
||||
- if /i "%DO_PYTHON%" == "on" python -m nose tests/python
|
||||
- if /i "%DO_PYTHON%" == "on" python -m pytest tests/python
|
||||
# mingw R package: run the R check (which includes unit tests), and also keep the built binary package
|
||||
- if /i "%target%" == "rmingw" (
|
||||
set _R_CHECK_CRAN_INCOMING_=FALSE&&
|
||||
R.exe CMD check xgboost*.tar.gz --no-manual --no-build-vignettes --as-cran --install-args=--build --no-multiarch
|
||||
R.exe CMD check xgboost*.tar.gz --no-manual --no-build-vignettes --as-cran --install-args=--build
|
||||
)
|
||||
# MSVC R package: run only the unit tests
|
||||
# TODO: create a binary msvc-built package to keep as an artifact
|
||||
- if /i "%target%" == "rmsvc" (
|
||||
cd build_rmsvc%ver%\R-package &&
|
||||
R.exe -q -e "library(testthat); setwd('tests'); source('testthat.R')"
|
||||
|
||||
14
build.sh
14
build.sh
@@ -15,25 +15,21 @@ else
|
||||
|
||||
if [[ ! -e ./rabit/Makefile ]]; then
|
||||
echo ""
|
||||
echo "Please clone the rabit repository into this directory."
|
||||
echo "Here are the commands:"
|
||||
echo "rm -rf rabit"
|
||||
echo "git clone https://github.com/dmlc/rabit.git rabit"
|
||||
echo "Please init the rabit submodule:"
|
||||
echo "git submodule update --init --recursive -- rabit"
|
||||
not_ready=1
|
||||
fi
|
||||
|
||||
if [[ ! -e ./dmlc-core/Makefile ]]; then
|
||||
echo ""
|
||||
echo "Please clone the dmlc-core repository into this directory."
|
||||
echo "Here are the commands:"
|
||||
echo "rm -rf dmlc-core"
|
||||
echo "git clone https://github.com/dmlc/dmlc-core.git dmlc-core"
|
||||
echo "Please init the dmlc-core submodule:"
|
||||
echo "git submodule update --init --recursive -- dmlc-core"
|
||||
not_ready=1
|
||||
fi
|
||||
|
||||
if [[ "${not_ready}" == "1" ]]; then
|
||||
echo ""
|
||||
echo "Please fix the errors above and retry the build or reclone the repository with:"
|
||||
echo "Please fix the errors above and retry the build, or reclone the repository with:"
|
||||
echo "git clone --recursive https://github.com/dmlc/xgboost.git"
|
||||
echo ""
|
||||
exit 1
|
||||
|
||||
58
cmake/Sanitizer.cmake
Normal file
58
cmake/Sanitizer.cmake
Normal file
@@ -0,0 +1,58 @@
|
||||
# Set appropriate compiler and linker flags for sanitizers.
|
||||
#
|
||||
# Usage of this module:
|
||||
# enable_sanitizers("address;leak")
|
||||
|
||||
# Add flags
|
||||
macro(enable_sanitizer santizer)
|
||||
if(${santizer} MATCHES "address")
|
||||
find_package(ASan REQUIRED)
|
||||
set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=address")
|
||||
link_libraries(${ASan_LIBRARY})
|
||||
|
||||
elseif(${santizer} MATCHES "thread")
|
||||
find_package(TSan REQUIRED)
|
||||
set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=thread")
|
||||
link_libraries(${TSan_LIBRARY})
|
||||
|
||||
elseif(${santizer} MATCHES "leak")
|
||||
find_package(LSan REQUIRED)
|
||||
set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=leak")
|
||||
link_libraries(${LSan_LIBRARY})
|
||||
|
||||
else()
|
||||
message(FATAL_ERROR "Santizer ${santizer} not supported.")
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
macro(enable_sanitizers SANITIZERS)
|
||||
# Check sanitizers compatibility.
|
||||
# Idealy, we should use if(san IN_LIST SANITIZERS) ... endif()
|
||||
# But I haven't figure out how to make it work.
|
||||
foreach ( _san ${SANITIZERS} )
|
||||
string(TOLOWER ${_san} _san)
|
||||
if (_san MATCHES "thread")
|
||||
if (${_use_other_sanitizers})
|
||||
message(FATAL_ERROR
|
||||
"thread sanitizer is not compatible with ${_san} sanitizer.")
|
||||
endif()
|
||||
set(_use_thread_sanitizer 1)
|
||||
else ()
|
||||
if (${_use_thread_sanitizer})
|
||||
message(FATAL_ERROR
|
||||
"${_san} sanitizer is not compatible with thread sanitizer.")
|
||||
endif()
|
||||
set(_use_other_sanitizers 1)
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
message("Sanitizers: ${SANITIZERS}")
|
||||
|
||||
foreach( _san ${SANITIZERS} )
|
||||
string(TOLOWER ${_san} _san)
|
||||
enable_sanitizer(${_san})
|
||||
endforeach()
|
||||
message("Sanitizers compile flags: ${SAN_COMPILE_FLAGS}")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_COMPILE_FLAGS}")
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_COMPILE_FLAGS}")
|
||||
endmacro()
|
||||
@@ -54,10 +54,25 @@ function(set_default_configuration_release)
|
||||
endif()
|
||||
endfunction(set_default_configuration_release)
|
||||
|
||||
# Generate nvcc compiler flags given a list of architectures
|
||||
# Also generates PTX for the most recent architecture for forwards compatibility
|
||||
function(format_gencode_flags flags out)
|
||||
# Set up architecture flags
|
||||
if(NOT flags)
|
||||
if((CUDA_VERSION_MAJOR EQUAL 9) OR (CUDA_VERSION_MAJOR GREATER 9))
|
||||
set(flags "35;50;52;60;61;70")
|
||||
else()
|
||||
set(flags "35;50;52;60;61")
|
||||
endif()
|
||||
endif()
|
||||
# Generate SASS
|
||||
foreach(ver ${flags})
|
||||
set(${out} "${${out}}-gencode arch=compute_${ver},code=sm_${ver};")
|
||||
endforeach()
|
||||
# Generate PTX for last architecture
|
||||
list(GET flags -1 ver)
|
||||
set(${out} "${${out}}-gencode arch=compute_${ver},code=compute_${ver};")
|
||||
|
||||
set(${out} "${${out}}" PARENT_SCOPE)
|
||||
endfunction(format_gencode_flags flags)
|
||||
|
||||
|
||||
11
cmake/build_config.h.in
Normal file
11
cmake/build_config.h.in
Normal file
@@ -0,0 +1,11 @@
|
||||
/*!
|
||||
* Copyright 2019 by Contributors
|
||||
* \file build_config.h
|
||||
*/
|
||||
#ifndef XGBOOST_BUILD_CONFIG_H_
|
||||
#define XGBOOST_BUILD_CONFIG_H_
|
||||
|
||||
#cmakedefine XGBOOST_MM_PREFETCH_PRESENT
|
||||
#cmakedefine XGBOOST_BUILTIN_PREFETCH_PRESENT
|
||||
|
||||
#endif // XGBOOST_BUILD_CONFIG_H_
|
||||
13
cmake/modules/FindASan.cmake
Normal file
13
cmake/modules/FindASan.cmake
Normal file
@@ -0,0 +1,13 @@
|
||||
set(ASan_LIB_NAME ASan)
|
||||
|
||||
find_library(ASan_LIBRARY
|
||||
NAMES libasan.so libasan.so.4 libasan.so.3 libasan.so.2 libasan.so.1 libasan.so.0
|
||||
PATHS ${SANITIZER_PATH} /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib ${CMAKE_PREFIX_PATH}/lib)
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(ASan DEFAULT_MSG
|
||||
ASan_LIBRARY)
|
||||
|
||||
mark_as_advanced(
|
||||
ASan_LIBRARY
|
||||
ASan_LIB_NAME)
|
||||
@@ -1,79 +0,0 @@
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Tries to find GTest headers and libraries.
|
||||
#
|
||||
# Usage of this module as follows:
|
||||
#
|
||||
# find_package(GTest)
|
||||
#
|
||||
# Variables used by this module, they can change the default behaviour and need
|
||||
# to be set before calling find_package:
|
||||
#
|
||||
# GTest_HOME - When set, this path is inspected instead of standard library
|
||||
# locations as the root of the GTest installation.
|
||||
# The environment variable GTEST_HOME overrides this veriable.
|
||||
#
|
||||
# This module defines
|
||||
# GTEST_INCLUDE_DIR, directory containing headers
|
||||
# GTEST_LIBS, directory containing gtest libraries
|
||||
# GTEST_STATIC_LIB, path to libgtest.a
|
||||
# GTEST_SHARED_LIB, path to libgtest's shared library
|
||||
# GTEST_FOUND, whether gtest has been found
|
||||
|
||||
find_path(GTEST_INCLUDE_DIR NAMES gtest/gtest.h gtest.h PATHS ${CMAKE_SOURCE_DIR}/gtest/include NO_DEFAULT_PATH)
|
||||
find_library(GTEST_LIBRARIES NAMES gtest PATHS ${CMAKE_SOURCE_DIR}/gtest/lib NO_DEFAULT_PATH)
|
||||
|
||||
if (GTEST_INCLUDE_DIR )
|
||||
message(STATUS "Found the GTest includes: ${GTEST_INCLUDE_DIR}")
|
||||
endif ()
|
||||
|
||||
|
||||
if (GTEST_INCLUDE_DIR AND GTEST_LIBRARIES)
|
||||
set(GTEST_FOUND TRUE)
|
||||
get_filename_component( GTEST_LIBS ${GTEST_LIBRARIES} PATH )
|
||||
set(GTEST_LIB_NAME gtest)
|
||||
set(GTEST_STATIC_LIB ${GTEST_LIBS}/${CMAKE_STATIC_LIBRARY_PREFIX}${GTEST_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX})
|
||||
set(GTEST_MAIN_STATIC_LIB ${GTEST_LIBS}/${CMAKE_STATIC_LIBRARY_PREFIX}${GTEST_LIB_NAME}_main${CMAKE_STATIC_LIBRARY_SUFFIX})
|
||||
set(GTEST_SHARED_LIB ${GTEST_LIBS}/${CMAKE_SHARED_LIBRARY_PREFIX}${GTEST_LIB_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX})
|
||||
else ()
|
||||
set(GTEST_FOUND FALSE)
|
||||
endif ()
|
||||
|
||||
if (GTEST_FOUND)
|
||||
if (NOT GTest_FIND_QUIETLY)
|
||||
message(STATUS "Found the GTest library: ${GTEST_LIBRARIES}")
|
||||
endif ()
|
||||
else ()
|
||||
if (NOT GTest_FIND_QUIETLY)
|
||||
set(GTEST_ERR_MSG "Could not find the GTest library. Looked in ")
|
||||
if ( _gtest_roots )
|
||||
set(GTEST_ERR_MSG "${GTEST_ERR_MSG} in ${_gtest_roots}.")
|
||||
else ()
|
||||
set(GTEST_ERR_MSG "${GTEST_ERR_MSG} system search paths.")
|
||||
endif ()
|
||||
if (GTest_FIND_REQUIRED)
|
||||
message(FATAL_ERROR "${GTEST_ERR_MSG}")
|
||||
else (GTest_FIND_REQUIRED)
|
||||
message(STATUS "${GTEST_ERR_MSG}")
|
||||
endif (GTest_FIND_REQUIRED)
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
mark_as_advanced(
|
||||
GTEST_INCLUDE_DIR
|
||||
GTEST_LIBS
|
||||
GTEST_LIBRARIES
|
||||
GTEST_STATIC_LIB
|
||||
GTEST_SHARED_LIB
|
||||
)
|
||||
13
cmake/modules/FindLSan.cmake
Normal file
13
cmake/modules/FindLSan.cmake
Normal file
@@ -0,0 +1,13 @@
|
||||
set(LSan_LIB_NAME lsan)
|
||||
|
||||
find_library(LSan_LIBRARY
|
||||
NAMES liblsan.so liblsan.so.0 liblsan.so.0.0.0
|
||||
PATHS ${SANITIZER_PATH} /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib ${CMAKE_PREFIX_PATH}/lib)
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(LSan DEFAULT_MSG
|
||||
LSan_LIBRARY)
|
||||
|
||||
mark_as_advanced(
|
||||
LSan_LIBRARY
|
||||
LSan_LIB_NAME)
|
||||
@@ -117,7 +117,7 @@ else()
|
||||
# ask R for R_HOME
|
||||
if(LIBR_EXECUTABLE)
|
||||
execute_process(
|
||||
COMMAND ${LIBR_EXECUTABLE} "--slave" "--no-save" "-e" "cat(normalizePath(R.home(), winslash='/'))"
|
||||
COMMAND ${LIBR_EXECUTABLE} "--slave" "--no-save" "-e" "cat(normalizePath(R.home(),winslash='/'))"
|
||||
OUTPUT_VARIABLE LIBR_HOME)
|
||||
endif()
|
||||
# if R executable not available, query R_HOME path from registry
|
||||
|
||||
58
cmake/modules/FindNccl.cmake
Normal file
58
cmake/modules/FindNccl.cmake
Normal file
@@ -0,0 +1,58 @@
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Tries to find NCCL headers and libraries.
|
||||
#
|
||||
# Usage of this module as follows:
|
||||
#
|
||||
# find_package(NCCL)
|
||||
#
|
||||
# Variables used by this module, they can change the default behaviour and need
|
||||
# to be set before calling find_package:
|
||||
#
|
||||
# NCCL_ROOT - When set, this path is inspected instead of standard library
|
||||
# locations as the root of the NCCL installation.
|
||||
# The environment variable NCCL_ROOT overrides this veriable.
|
||||
#
|
||||
# This module defines
|
||||
# Nccl_FOUND, whether nccl has been found
|
||||
# NCCL_INCLUDE_DIR, directory containing header
|
||||
# NCCL_LIBRARY, directory containing nccl library
|
||||
# NCCL_LIB_NAME, nccl library name
|
||||
#
|
||||
# This module assumes that the user has already called find_package(CUDA)
|
||||
|
||||
|
||||
set(NCCL_LIB_NAME nccl_static)
|
||||
|
||||
find_path(NCCL_INCLUDE_DIR
|
||||
NAMES nccl.h
|
||||
PATHS $ENV{NCCL_ROOT}/include ${NCCL_ROOT}/include ${CUDA_INCLUDE_DIRS} /usr/include)
|
||||
|
||||
find_library(NCCL_LIBRARY
|
||||
NAMES ${NCCL_LIB_NAME}
|
||||
PATHS $ENV{NCCL_ROOT}/lib ${NCCL_ROOT}/lib ${CUDA_INCLUDE_DIRS}/../lib /usr/lib)
|
||||
|
||||
if (NCCL_INCLUDE_DIR AND NCCL_LIBRARY)
|
||||
get_filename_component(NCCL_LIBRARY ${NCCL_LIBRARY} PATH)
|
||||
endif ()
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(Nccl DEFAULT_MSG
|
||||
NCCL_INCLUDE_DIR NCCL_LIBRARY)
|
||||
|
||||
mark_as_advanced(
|
||||
NCCL_INCLUDE_DIR
|
||||
NCCL_LIBRARY
|
||||
NCCL_LIB_NAME
|
||||
)
|
||||
13
cmake/modules/FindTSan.cmake
Normal file
13
cmake/modules/FindTSan.cmake
Normal file
@@ -0,0 +1,13 @@
|
||||
set(TSan_LIB_NAME tsan)
|
||||
|
||||
find_library(TSan_LIBRARY
|
||||
NAMES libtsan.so libtsan.so.0 libtsan.so.0.0.0
|
||||
PATHS ${SANITIZER_PATH} /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib ${CMAKE_PREFIX_PATH}/lib)
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(TSan DEFAULT_MSG
|
||||
TSan_LIBRARY)
|
||||
|
||||
mark_as_advanced(
|
||||
TSan_LIBRARY
|
||||
TSan_LIB_NAME)
|
||||
@@ -135,6 +135,7 @@ Send a PR to add a one sentence description:)
|
||||
|
||||
## Awards
|
||||
- [John Chambers Award](http://stat-computing.org/awards/jmc/winners.html) - 2016 Winner: XGBoost R Package, by Tong He (Simon Fraser University) and Tianqi Chen (University of Washington)
|
||||
- [InfoWorld’s 2019 Technology of the Year Award](https://www.infoworld.com/article/3336072/application-development/infoworlds-2019-technology-of-the-year-award-winners.html)
|
||||
|
||||
## Windows Binaries
|
||||
Unofficial windows binaries and instructions on how to use them are hosted on [Guido Tapia's blog](http://www.picnet.com.au/blogs/guido/post/2016/09/22/xgboost-windows-x64-binaries-for-download/)
|
||||
|
||||
@@ -62,7 +62,7 @@ test:data = "agaricus.txt.test"
|
||||
We use the tree booster and logistic regression objective in our setting. This indicates that we accomplish our task using classic gradient boosting regression tree(GBRT), which is a promising method for binary classification.
|
||||
|
||||
The parameters shown in the example gives the most common ones that are needed to use xgboost.
|
||||
If you are interested in more parameter settings, the complete parameter settings and detailed descriptions are [here](../../doc/parameter.md). Besides putting the parameters in the configuration file, we can set them by passing them as arguments as below:
|
||||
If you are interested in more parameter settings, the complete parameter settings and detailed descriptions are [here](../../doc/parameter.rst). Besides putting the parameters in the configuration file, we can set them by passing them as arguments as below:
|
||||
|
||||
```
|
||||
../../xgboost mushroom.conf max_depth=6
|
||||
@@ -80,12 +80,6 @@ booster = gblinear
|
||||
# L2 regularization term on weights, default 0
|
||||
lambda = 0.01
|
||||
# L1 regularization term on weights, default 0
|
||||
If ```agaricus.txt.test.buffer``` exists, and automatically loads from binary buffer if possible, this can speedup training process when you do training many times. You can disable it by setting ```use_buffer=0```.
|
||||
- Buffer file can also be used as standalone input, i.e if buffer file exists, but original agaricus.txt.test was removed, xgboost will still run
|
||||
* Deviation from LibSVM input format: xgboost is compatible with LibSVM format, with the following minor differences:
|
||||
- xgboost allows feature index starts from 0
|
||||
- for binary classification, the label is 1 for positive, 0 for negative, instead of +1,-1
|
||||
- the feature indices in each line *do not* need to be sorted
|
||||
alpha = 0.01
|
||||
# L2 regularization term on bias, default 0
|
||||
lambda_bias = 0.01
|
||||
@@ -102,7 +96,7 @@ After training, we can use the output model to get the prediction of the test da
|
||||
For binary classification, the output predictions are probability confidence scores in [0,1], corresponds to the probability of the label to be positive.
|
||||
|
||||
#### Dump Model
|
||||
This is a preliminary feature, so far only tree model support text dump. XGBoost can display the tree models in text files and we can scan the model in an easy way:
|
||||
This is a preliminary feature, so only tree models support text dump. XGBoost can display the tree models in text or JSON files, and we can scan the model in an easy way:
|
||||
```
|
||||
../../xgboost mushroom.conf task=dump model_in=0002.model name_dump=dump.raw.txt
|
||||
../../xgboost mushroom.conf task=dump model_in=0002.model fmap=featmap.txt name_dump=dump.nice.txt
|
||||
|
||||
@@ -18,7 +18,7 @@ def loadfmap( fname ):
|
||||
if it.strip() == '':
|
||||
continue
|
||||
k , v = it.split('=')
|
||||
fmap[ idx ][ v ] = len(nmap) + 1
|
||||
fmap[ idx ][ v ] = len(nmap)
|
||||
nmap[ len(nmap) ] = ftype+'='+k
|
||||
return fmap, nmap
|
||||
|
||||
|
||||
@@ -2,8 +2,6 @@
|
||||
|
||||
This demo shows how to train a model on the [forest cover type](https://archive.ics.uci.edu/ml/datasets/covertype) dataset using GPU acceleration. The forest cover type dataset has 581,012 rows and 54 features, making it time consuming to process. We compare the run-time and accuracy of the GPU and CPU histogram algorithms.
|
||||
|
||||
This demo requires the [GPU plug-in](https://github.com/dmlc/xgboost/tree/master/plugin/updater_gpu) to be built and installed.
|
||||
This demo requires the [GPU plug-in](https://xgboost.readthedocs.io/en/latest/gpu/index.html) to be built and installed.
|
||||
|
||||
The dataset is automatically loaded via the sklearn script.
|
||||
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
XGBoost Python Feature Walkthrough
|
||||
==================================
|
||||
* [Basic walkthrough of wrappers](basic_walkthrough.py)
|
||||
* [Cutomize loss function, and evaluation metric](custom_objective.py)
|
||||
* [Customize loss function, and evaluation metric](custom_objective.py)
|
||||
* [Boosting from existing prediction](boost_from_prediction.py)
|
||||
* [Predicting using first n trees](predict_first_ntree.py)
|
||||
* [Generalized Linear Model](generalized_linear_model.py)
|
||||
|
||||
@@ -42,7 +42,7 @@ xgb.cv(param, dtrain, num_round, nfold=5,
|
||||
metrics={'auc'}, seed=0, fpreproc=fpreproc)
|
||||
|
||||
###
|
||||
# you can also do cross validation with cutomized loss function
|
||||
# you can also do cross validation with customized loss function
|
||||
# See custom_objective.py
|
||||
##
|
||||
print('running cross validation, with cutomsized loss function')
|
||||
|
||||
@@ -33,10 +33,10 @@ def logregobj(preds, dtrain):
|
||||
# Take this in mind when you use the customization, and maybe you need write customized evaluation function
|
||||
def evalerror(preds, dtrain):
|
||||
labels = dtrain.get_label()
|
||||
# return a pair metric_name, result
|
||||
# return a pair metric_name, result. The metric name must not contain a colon (:) or a space
|
||||
# since preds are margin(before logistic transformation, cutoff at 0)
|
||||
return 'error', float(sum(labels != (preds > 0.0))) / len(labels)
|
||||
return 'my-error', float(sum(labels != (preds > 0.0))) / len(labels)
|
||||
|
||||
# training with customized objective, we can also do step by step training
|
||||
# simply look at xgboost.py's implementation of train
|
||||
bst = xgb.train(param, dtrain, num_round, watchlist, logregobj, evalerror)
|
||||
bst = xgb.train(param, dtrain, num_round, watchlist, obj=logregobj, feval=evalerror)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/bin/bash
|
||||
export PYTHONPATH=PYTHONPATH:../../python-package
|
||||
export PYTHONPATH=$PYTHONPATH:../../python-package
|
||||
python basic_walkthrough.py
|
||||
python custom_objective.py
|
||||
python boost_from_prediction.py
|
||||
|
||||
@@ -24,9 +24,9 @@ param <- list("objective" = "binary:logitraw",
|
||||
"silent" = 1,
|
||||
"nthread" = 16)
|
||||
watchlist <- list("train" = xgmat)
|
||||
nround = 120
|
||||
nrounds = 120
|
||||
print ("loading data end, start to boost trees")
|
||||
bst = xgb.train(param, xgmat, nround, watchlist );
|
||||
bst = xgb.train(param, xgmat, nrounds, watchlist );
|
||||
# save out model
|
||||
xgb.save(bst, "higgs.model")
|
||||
print ('finish training')
|
||||
|
||||
@@ -39,9 +39,9 @@ for (i in 1:length(threads)){
|
||||
"silent" = 1,
|
||||
"nthread" = thread)
|
||||
watchlist <- list("train" = xgmat)
|
||||
nround = 120
|
||||
nrounds = 120
|
||||
print ("loading data end, start to boost trees")
|
||||
bst = xgb.train(param, xgmat, nround, watchlist );
|
||||
bst = xgb.train(param, xgmat, nrounds, watchlist );
|
||||
# save out model
|
||||
xgb.save(bst, "higgs.model")
|
||||
print ('finish training')
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
Benckmark for Otto Group Competition
|
||||
Benchmark for Otto Group Competition
|
||||
=========
|
||||
|
||||
This is a folder containing the benchmark for the [Otto Group Competition on Kaggle](http://www.kaggle.com/c/otto-group-product-classification-challenge).
|
||||
@@ -20,5 +20,3 @@ devtools::install_github('tqchen/xgboost',subdir='R-package')
|
||||
```
|
||||
|
||||
Windows users may need to install [RTools](http://cran.r-project.org/bin/windows/Rtools/) first.
|
||||
|
||||
|
||||
|
||||
@@ -23,13 +23,13 @@ param <- list("objective" = "multi:softprob",
|
||||
"nthread" = 8)
|
||||
|
||||
# Run Cross Validation
|
||||
cv.nround = 50
|
||||
cv.nrounds = 50
|
||||
bst.cv = xgb.cv(param=param, data = x[trind,], label = y,
|
||||
nfold = 3, nrounds=cv.nround)
|
||||
nfold = 3, nrounds=cv.nrounds)
|
||||
|
||||
# Train the model
|
||||
nround = 50
|
||||
bst = xgboost(param=param, data = x[trind,], label = y, nrounds=nround)
|
||||
nrounds = 50
|
||||
bst = xgboost(param=param, data = x[trind,], label = y, nrounds=nrounds)
|
||||
|
||||
# Make prediction
|
||||
pred = predict(bst,x[teind,])
|
||||
|
||||
@@ -121,19 +121,19 @@ param <- list("objective" = "multi:softprob",
|
||||
"eval_metric" = "mlogloss",
|
||||
"num_class" = numberOfClasses)
|
||||
|
||||
cv.nround <- 5
|
||||
cv.nrounds <- 5
|
||||
cv.nfold <- 3
|
||||
|
||||
bst.cv = xgb.cv(param=param, data = trainMatrix, label = y,
|
||||
nfold = cv.nfold, nrounds = cv.nround)
|
||||
nfold = cv.nfold, nrounds = cv.nrounds)
|
||||
```
|
||||
> As we can see the error rate is low on the test dataset (for a 5mn trained model).
|
||||
|
||||
Finally, we are ready to train the real model!!!
|
||||
|
||||
```{r modelTraining}
|
||||
nround = 50
|
||||
bst = xgboost(param=param, data = trainMatrix, label = y, nrounds=nround)
|
||||
nrounds = 50
|
||||
bst = xgboost(param=param, data = trainMatrix, label = y, nrounds=nrounds)
|
||||
```
|
||||
|
||||
Model understanding
|
||||
@@ -142,7 +142,7 @@ Model understanding
|
||||
Feature importance
|
||||
------------------
|
||||
|
||||
So far, we have built a model made of **`r nround`** trees.
|
||||
So far, we have built a model made of **`r nrounds`** trees.
|
||||
|
||||
To build a tree, the dataset is divided recursively several times. At the end of the process, you get groups of observations (here, these observations are properties regarding **Otto** products).
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
Demonstrating how to use XGBoost accomplish Multi-Class classification task on [UCI Dermatology dataset](https://archive.ics.uci.edu/ml/datasets/Dermatology)
|
||||
|
||||
Make sure you make make xgboost python module in ../../python
|
||||
Make sure you make xgboost python module in ../../python
|
||||
|
||||
1. Run runexp.sh
|
||||
```bash
|
||||
./runexp.sh
|
||||
```
|
||||
|
||||
|
||||
**R version** please see the `train.R`.
|
||||
|
||||
64
demo/multiclass_classification/train.R
Normal file
64
demo/multiclass_classification/train.R
Normal file
@@ -0,0 +1,64 @@
|
||||
library(data.table)
|
||||
library(xgboost)
|
||||
|
||||
if (!file.exists("./dermatology.data")) {
|
||||
download.file(
|
||||
"https://archive.ics.uci.edu/ml/machine-learning-databases/dermatology/dermatology.data",
|
||||
"dermatology.data",
|
||||
method = "curl"
|
||||
)
|
||||
}
|
||||
|
||||
df <- fread("dermatology.data", sep = ",", header = FALSE)
|
||||
|
||||
df[, `:=`(V34 = as.integer(ifelse(V34 == "?", 0L, V34)),
|
||||
V35 = V35 - 1L)]
|
||||
|
||||
idx <- sample(nrow(df), size = round(0.7 * nrow(df)), replace = FALSE)
|
||||
|
||||
train <- df[idx,]
|
||||
test <- df[-idx,]
|
||||
|
||||
train_x <- train[, 1:34]
|
||||
train_y <- train[, V35]
|
||||
|
||||
test_x <- test[, 1:34]
|
||||
test_y <- test[, V35]
|
||||
|
||||
xg_train <- xgb.DMatrix(data = as.matrix(train_x), label = train_y)
|
||||
xg_test = xgb.DMatrix(as.matrix(test_x), label = test_y)
|
||||
|
||||
params <- list(
|
||||
objective = 'multi:softmax',
|
||||
num_class = 6,
|
||||
max_depth = 6,
|
||||
nthread = 4,
|
||||
eta = 0.1
|
||||
)
|
||||
|
||||
watchlist = list(train = xg_train, test = xg_test)
|
||||
|
||||
bst <- xgb.train(
|
||||
params = params,
|
||||
data = xg_train,
|
||||
watchlist = watchlist,
|
||||
nrounds = 5
|
||||
)
|
||||
|
||||
pred <- predict(bst, xg_test)
|
||||
error_rate <- sum(pred != test_y) / length(test_y)
|
||||
print(paste("Test error using softmax =", error_rate))
|
||||
|
||||
# do the same thing again, but output probabilities
|
||||
params$objective <- 'multi:softprob'
|
||||
bst <- xgb.train(params, xg_train, nrounds = 5, watchlist)
|
||||
|
||||
pred_prob <- predict(bst, xg_test)
|
||||
|
||||
pred_mat <- matrix(pred_prob, ncol = 6, byrow = TRUE)
|
||||
# validation
|
||||
# rowSums(pred_mat)
|
||||
|
||||
pred_label <- apply(pred_mat, 1, which.max) - 1L
|
||||
error_rate = sum(pred_label != test_y) / length(test_y)
|
||||
print(paste("Test error using softprob =", error_rate))
|
||||
@@ -1,6 +1,6 @@
|
||||
Learning to rank
|
||||
====
|
||||
XGBoost supports accomplishing ranking tasks. In ranking scenario, data are often grouped and we need the [group information file](../../doc/input_format.md#group-input-format) to specify ranking tasks. The model used in XGBoost for ranking is the LambdaRank, this function is not yet completed. Currently, we provide pairwise rank.
|
||||
XGBoost supports accomplishing ranking tasks. In ranking scenario, data are often grouped and we need the [group information file](../../doc/tutorials/input_format.rst#group-input-format) to specify ranking tasks. The model used in XGBoost for ranking is the LambdaRank, this function is not yet completed. Currently, we provide pairwise rank.
|
||||
|
||||
### Parameters
|
||||
The configuration setting is similar to the regression and binary classification setting, except user need to specify the objectives:
|
||||
@@ -14,8 +14,28 @@ For more usage details please refer to the [binary classification demo](../binar
|
||||
|
||||
Instructions
|
||||
====
|
||||
The dataset for ranking demo is from LETOR04 MQ2008 fold1,
|
||||
You can use the following command to run the example
|
||||
The dataset for ranking demo is from LETOR04 MQ2008 fold1.
|
||||
Before running the examples, you need to get the data by running:
|
||||
|
||||
Get the data: ./wgetdata.sh
|
||||
Run the example: ./runexp.sh
|
||||
```
|
||||
./wgetdata.sh
|
||||
```
|
||||
|
||||
### Command Line
|
||||
Run the example:
|
||||
```
|
||||
./runexp.sh
|
||||
```
|
||||
|
||||
### Python
|
||||
There are two ways of doing ranking in python.
|
||||
|
||||
Run the example using `xgboost.train`:
|
||||
```
|
||||
python rank.py
|
||||
```
|
||||
|
||||
Run the example using `XGBRanker`:
|
||||
```
|
||||
python rank_sklearn.py
|
||||
```
|
||||
|
||||
41
demo/rank/rank.py
Normal file
41
demo/rank/rank.py
Normal file
@@ -0,0 +1,41 @@
|
||||
#!/usr/bin/python
|
||||
import xgboost as xgb
|
||||
from xgboost import DMatrix
|
||||
from sklearn.datasets import load_svmlight_file
|
||||
|
||||
|
||||
# This script demonstrate how to do ranking with xgboost.train
|
||||
x_train, y_train = load_svmlight_file("mq2008.train")
|
||||
x_valid, y_valid = load_svmlight_file("mq2008.vali")
|
||||
x_test, y_test = load_svmlight_file("mq2008.test")
|
||||
|
||||
group_train = []
|
||||
with open("mq2008.train.group", "r") as f:
|
||||
data = f.readlines()
|
||||
for line in data:
|
||||
group_train.append(int(line.split("\n")[0]))
|
||||
|
||||
group_valid = []
|
||||
with open("mq2008.vali.group", "r") as f:
|
||||
data = f.readlines()
|
||||
for line in data:
|
||||
group_valid.append(int(line.split("\n")[0]))
|
||||
|
||||
group_test = []
|
||||
with open("mq2008.test.group", "r") as f:
|
||||
data = f.readlines()
|
||||
for line in data:
|
||||
group_test.append(int(line.split("\n")[0]))
|
||||
|
||||
train_dmatrix = DMatrix(x_train, y_train)
|
||||
valid_dmatrix = DMatrix(x_valid, y_valid)
|
||||
test_dmatrix = DMatrix(x_test)
|
||||
|
||||
train_dmatrix.set_group(group_train)
|
||||
valid_dmatrix.set_group(group_valid)
|
||||
|
||||
params = {'objective': 'rank:pairwise', 'eta': 0.1, 'gamma': 1.0,
|
||||
'min_child_weight': 0.1, 'max_depth': 6}
|
||||
xgb_model = xgb.train(params, train_dmatrix, num_boost_round=4,
|
||||
evals=[(valid_dmatrix, 'validation')])
|
||||
pred = xgb_model.predict(test_dmatrix)
|
||||
35
demo/rank/rank_sklearn.py
Normal file
35
demo/rank/rank_sklearn.py
Normal file
@@ -0,0 +1,35 @@
|
||||
#!/usr/bin/python
|
||||
import xgboost as xgb
|
||||
from sklearn.datasets import load_svmlight_file
|
||||
|
||||
|
||||
# This script demonstrate how to do ranking with XGBRanker
|
||||
x_train, y_train = load_svmlight_file("mq2008.train")
|
||||
x_valid, y_valid = load_svmlight_file("mq2008.vali")
|
||||
x_test, y_test = load_svmlight_file("mq2008.test")
|
||||
|
||||
group_train = []
|
||||
with open("mq2008.train.group", "r") as f:
|
||||
data = f.readlines()
|
||||
for line in data:
|
||||
group_train.append(int(line.split("\n")[0]))
|
||||
|
||||
group_valid = []
|
||||
with open("mq2008.vali.group", "r") as f:
|
||||
data = f.readlines()
|
||||
for line in data:
|
||||
group_valid.append(int(line.split("\n")[0]))
|
||||
|
||||
group_test = []
|
||||
with open("mq2008.test.group", "r") as f:
|
||||
data = f.readlines()
|
||||
for line in data:
|
||||
group_test.append(int(line.split("\n")[0]))
|
||||
|
||||
params = {'objective': 'rank:pairwise', 'learning_rate': 0.1,
|
||||
'gamma': 1.0, 'min_child_weight': 0.1,
|
||||
'max_depth': 6, 'n_estimators': 4}
|
||||
model = xgb.sklearn.XGBRanker(**params)
|
||||
model.fit(x_train, y_train, group_train,
|
||||
eval_set=[(x_valid, y_valid)], eval_group=[group_valid])
|
||||
pred = model.predict(x_test)
|
||||
@@ -1,11 +1,5 @@
|
||||
python trans_data.py train.txt mq2008.train mq2008.train.group
|
||||
|
||||
python trans_data.py test.txt mq2008.test mq2008.test.group
|
||||
|
||||
python trans_data.py vali.txt mq2008.vali mq2008.vali.group
|
||||
#!/bin/bash
|
||||
|
||||
../../xgboost mq2008.conf
|
||||
|
||||
../../xgboost mq2008.conf task=pred model_in=0004.model
|
||||
|
||||
|
||||
|
||||
@@ -1,4 +1,10 @@
|
||||
#!/bin/bash
|
||||
wget http://research.microsoft.com/en-us/um/beijing/projects/letor/LETOR4.0/Data/MQ2008.rar
|
||||
wget https://s3-us-west-2.amazonaws.com/xgboost-examples/MQ2008.rar
|
||||
unrar x MQ2008.rar
|
||||
mv -f MQ2008/Fold1/*.txt .
|
||||
|
||||
python trans_data.py train.txt mq2008.train mq2008.train.group
|
||||
|
||||
python trans_data.py test.txt mq2008.test mq2008.test.group
|
||||
|
||||
python trans_data.py vali.txt mq2008.vali mq2008.vali.group
|
||||
|
||||
Submodule dmlc-core updated: b5bec5481d...ac983092ee
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user