Compare commits

...

1104 Commits

Author SHA1 Message Date
Philip Hyunsu Cho
a78d0d4110 Release patch release 1.3.1 (#6543) 2020-12-21 23:22:32 -08:00
Jiaming Yuan
76c361431f Remove cupy.array_equal, since it's not compatible with cuPy 7.8 (#6528) (#6535)
Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>
2020-12-20 15:11:50 +08:00
Jiaming Yuan
d95d02132a Fix handling of print period in EvaluationMonitor (#6499) (#6534)
Co-authored-by: Kirill Shvets <kirill.shvets@intel.com>

Co-authored-by: ShvetsKS <33296480+ShvetsKS@users.noreply.github.com>
Co-authored-by: Kirill Shvets <kirill.shvets@intel.com>
2020-12-20 15:07:42 +08:00
Jiaming Yuan
7109c6c1f2 [backport] Move metric configuration into booster. (#6504) (#6533) 2020-12-20 10:36:32 +08:00
Jiaming Yuan
bce7ca313c [backport] Fix save_best. (#6523) 2020-12-18 20:00:29 +08:00
Jiaming Yuan
8be2cd8c91 Enable loading model from <1.0.0 trained with objective='binary:logitraw' (#6517) (#6524)
* Enable loading model from <1.0.0 trained with objective='binary:logitraw'

* Add binary:logitraw in model compatibility testing suite

* Feedback from @trivialfis: Override ProbToMargin() for LogisticRaw

Co-authored-by: Jiaming Yuan <jm.yuan@outlook.com>

Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>
2020-12-18 04:10:09 +08:00
Philip Hyunsu Cho
c5f0cdbc72 Hot fix for libgomp vendoring (#6482)
* Hot fix for libgomp vendoring

* Set post0 in setup.py
2020-12-09 10:04:45 -08:00
Jiaming Yuan
1bf3899983 Fix dask ip resolution. (#6475)
This adopts the solution used in dask/dask-xgboost#40 which employs the get_host_ip from dmlc-core tracker.
2020-12-07 16:38:16 -08:00
Jiaming Yuan
c39f6b25f0 Fix filtering callable objects in skl xgb param. (#6466)
Co-authored-by: Hyunsu Cho <chohyu01@cs.washington.edu>
2020-12-07 16:38:16 -08:00
Philip Hyunsu Cho
2b3e301543 [CI] Fix CentOS 6 Docker images (#6467) 2020-12-07 16:38:16 -08:00
Hyunsu Cho
10d3419fa6 Release 1.3.0 2020-12-03 21:35:09 -08:00
Philip Hyunsu Cho
b273e5bd4c Vendor libgomp in the manylinux Python wheel (#6461)
* Vendor libgomp in the manylinux2014_aarch64 wheel

* Use vault repo, since CentOS 6 has reached End-of-Life on Nov 30

* Vendor libgomp in the manylinux2010_x86_64 wheel

* Run verification step inside the container
2020-12-03 21:29:40 -08:00
Philip Hyunsu Cho
3a83fcb0eb Enforce row-major order in cuPy array (#6459) 2020-12-03 21:29:24 -08:00
hzy001
3efc4ea0d1 Fix broken links. (#6455)
Co-authored-by: Hao Ziyu <haoziyu@qiyi.com>
Co-authored-by: fis <jm.yuan@outlook.com>
2020-12-03 21:29:03 -08:00
Jiaming Yuan
a2c778e2d1 Fix period in evaluation monitor. (#6441) 2020-12-03 21:28:45 -08:00
Jiaming Yuan
8a0db293c5 Fix CLI ranking demo. (#6439)
Save model at final round.
2020-12-03 21:28:28 -08:00
Honza Sterba
028ec5f028 Optionaly fail when gpu_id is set to invalid value (#6342) 2020-12-03 21:27:58 -08:00
ShvetsKS
38c80bcec4 Thread local memory allocation for BuildHist (#6358)
* thread mem locality

* fix apply

* cleanup

* fix lint

* fix tests

* simple try

* fix

* fix

* apply comments

* fix comments

* fix

* apply simple comment

Co-authored-by: ShvetsKS <kirill.shvets@intel.com>
2020-12-03 21:27:31 -08:00
Philip Hyunsu Cho
16ff63905d [CI] Upgrade cuDF and RMM to 0.17 nightlies (#6434) 2020-12-03 21:27:01 -08:00
Philip Hyunsu Cho
a9b09919f9 [R] Fix R package installation via CMake (#6423) 2020-12-03 21:26:29 -08:00
Hyunsu Cho
f3b060401a Release 1.3.0 RC1 2020-11-21 11:36:08 -08:00
Jiaming Yuan
42d31d9dcb Fix MPI build. (#6403) 2020-11-21 13:38:21 +08:00
Jiaming Yuan
2ce2a1a4d8 [SKL] Propagate parameters to booster during set_param. (#6416) 2020-11-20 20:37:35 +08:00
zhang_jf
cc581b3b6b Misleading exception information: no such param of "allow_non_zero_missing" (#6418) 2020-11-20 19:33:34 +08:00
Jiaming Yuan
00218d065a [dask] Update document. [skip ci] (#6413) 2020-11-20 19:16:19 +08:00
Jiaming Yuan
c120822a24 Fix flaky sparse page dmatrix test. (#6417) 2020-11-20 19:15:45 +08:00
Jiaming Yuan
a7b42adb74 Fix dask predict (#6412) 2020-11-20 10:10:52 +08:00
Jiaming Yuan
44a9d69efb Small cleanup to evaluator. (#6400) 2020-11-20 09:33:51 +08:00
Philip Hyunsu Cho
9c9070aea2 Use pytest conventions consistently (#6337)
* Do not derive from unittest.TestCase (not needed for pytest)

* assertRaises -> pytest.raises

* Simplify test_empty_dmatrix with test parametrization

* setUpClass -> setup_class, tearDownClass -> teardown_class

* Don't import unittest; import pytest

* Use plain assert

* Use parametrized tests in more places

* Fix test_gpu_with_sklearn.py

* Put back run_empty_dmatrix_reg / run_empty_dmatrix_cls

* Fix test_eta_decay_gpu_hist

* Add parametrized tests for monotone constraints

* Fix test names

* Remove test parametrization

* Revise test_slice to be not flaky
2020-11-19 17:00:15 -08:00
Philip Hyunsu Cho
c763b50dd0 [CI] Upgrade to MacOS Mojave image (#6406) 2020-11-18 20:29:10 -08:00
Nan Zhu
4d1d5d4010 [jvm-packages] fix potential unit test suites aborted issue (#6373)
* fix race conditio

* code cleaning

rm pom.xml-e

* clean again

* fix compilation issue

* recover

* avoid using getOrCreate

* interrupt zombie threads

* safe guard

* fix deadlock

* Update SparkParallelismTracker.scala
2020-11-17 10:59:26 -08:00
Philip Hyunsu Cho
e426b6e040 [R] Do not convert continuous labels to factors (#6380)
* [R] Do not convert continuous labels to factors

* Address reviewer's comment
2020-11-17 09:19:16 -08:00
James Lamb
3cca1c5fa1 [R] remove uses of exists() (#6387) 2020-11-17 15:06:23 +08:00
Jiaming Yuan
3ac173fc8b Fix typo. (#6399) 2020-11-16 16:59:12 -08:00
Nikhil Choudhary
ae1662028a Fixed few grammatical mistakes in doc (#6393) 2020-11-15 13:48:08 +08:00
Philip Hyunsu Cho
5cb24d0d39 Fix broken link in CLI doc (#6396) 2020-11-14 17:58:07 -08:00
ShvetsKS
512b464cfa Disable HT for DMatrix creation (#6386)
Co-authored-by: SHVETS, KIRILL <kirill.shvets@intel.com>
2020-11-14 22:18:33 +08:00
Jiaming Yuan
fcd6fad822 [dask] Small cleanup. (#6391) 2020-11-14 22:15:05 +08:00
Jiaming Yuan
4ccf92ea34 [dask] Fix union of workers. (#6375) 2020-11-13 16:55:05 +08:00
Jiaming Yuan
fcfeb4959c Deprecate positional arguments. (#6365)
Deprecate positional arguments in following functions:

- `__init__` for all classes in sklearn module.
- `fit` method for all classes in sklearn module.
- dask interface.
- `set_info` for `DMatrix` class.

Refactor the evaluation matrices handling.
2020-11-13 11:10:30 +08:00
Philip Hyunsu Cho
e5193c21a1 [dask] Allow empty data matrix in AFT survival (#6379)
* [dask] Allow empty data matrix in AFT survival

* Add unit test
2020-11-12 17:49:58 -08:00
Philip Hyunsu Cho
5a33c2f3a0 [CI] Add noLD R test (#6382)
* [CI] Add noLD test

* Make noLD test only trigger with a PR comment

* [CI] Don't install stringi

* Add the Titanic example as a unit test

* Document trigger

* add to index

* Clarify that it needs to be a review comment
2020-11-12 12:41:25 -08:00
Jiaming Yuan
c1a62b5fa2 Expect gpu external memory to fail. (#6381) 2020-11-12 19:24:48 +08:00
Jiaming Yuan
c90f968d92 Update Python documents. (#6376) 2020-11-12 17:51:32 +08:00
Philip Hyunsu Cho
c5645180a6 [R] Fix a crash that occurs with noLD R (#6378) 2020-11-11 21:09:08 -08:00
James Lamb
12d27f43ff [doc] make Dask distributed example copy-pastable (#6345) 2020-11-11 20:22:17 -08:00
Jiaming Yuan
d711d648cb Fix label errors in graph visualization (#6369) 2020-11-11 17:44:59 -08:00
Jiaming Yuan
debeae2509 [R] Fix warnings from R check --as-cran (#6374)
* Remove exit and printf.

* Fix warnings.
2020-11-11 18:39:37 +08:00
Jiaming Yuan
6e12c2a6f8 [dask] Supoort running on GKE. (#6343)
* Avoid accessing `scheduler_info()['workers']`.
* Avoid calling `client.gather` inside task.
* Avoid using `client.scheduler_address`.
2020-11-11 18:04:34 +08:00
Jiaming Yuan
8a17610666 Implement GPU predict leaf. (#6187) 2020-11-11 17:33:47 +08:00
Philip Hyunsu Cho
7f101d1b33 [CI] Remove R check from Jenkins (#6372)
* Remove R check from Jenkins

* Print stacktrace when CRAN test fail in GitHub Actions

* Add verbose flag in tests/ci_build/print_r_stacktrace.sh

* Fix path in tests/ci_build/print_r_stacktrace.sh
2020-11-10 22:46:54 -08:00
Jiaming Yuan
a5cfa7841e Run R check as cran on action. [skip ci] (#6371)
Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>
2020-11-11 12:02:53 +08:00
Jiaming Yuan
43efadea2e Deterministic data partitioning for external memory (#6317)
* Make external memory data partitioning deterministic.

* Change the meaning of `page_size` from bytes to number of rows.

* Design a data pool.

* Note for external memory.

* Enable unity build on Windows CI.

* Force garbage collect on test.
2020-11-11 06:11:06 +08:00
Jean Lescut-Muller
9564886d9f Update custom_metric_obj.rst (#6367) 2020-11-10 22:29:22 +08:00
Jiaming Yuan
e65e3cf36e Support shared library in system path. (#6362) 2020-11-10 16:04:25 +08:00
Jiaming Yuan
184e2eac7d Add period to evaluation monitor. (#6348) 2020-11-10 07:47:48 +08:00
ShvetsKS
d411f98d26 simple fix for static shedule in predict (#6357)
Co-authored-by: ShvetsKS <kirill.shvets@intel.com>
2020-11-09 17:01:30 +08:00
Jiaming Yuan
519cee115a Avoid resetting seed for every configuration. (#6349) 2020-11-06 10:28:35 +08:00
James Lamb
f3a4253984 Ignore files from local Dask development (#6346) 2020-11-05 13:54:46 +08:00
Jack Dunn
51e6531315 Fix missing space in warning message (#6340) 2020-11-04 06:03:16 -05:00
Jiaming Yuan
2cc9662005 Support slicing tree model (#6302)
This PR is meant the end the confusion around best_ntree_limit and unify model slicing. We have multi-class and random forests, asking users to understand how to set ntree_limit is difficult and error prone.

* Implement the save_best option in early stopping.

Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>
2020-11-02 23:27:39 -08:00
Rory Mitchell
29745c6df2 Fix inclusive scan for large sizes (#6234) 2020-11-03 17:01:43 +13:00
Jiaming Yuan
7756192906 [dask] Fix prediction on DaskDMatrix with multiple meta data. (#6333)
* Unify the meta handling methods.
2020-11-02 19:18:44 -05:00
Jiaming Yuan
5a7b3592ed Optional find_package for sanitizers. (#6329) 2020-11-02 19:17:17 -05:00
Jiaming Yuan
048acf81cd Enable shap sparse test. (#6332) 2020-11-01 20:59:27 +08:00
Igor Moura
5e1e972aea Clean up warnings (#6325) 2020-10-30 23:50:29 +08:00
nabokovas
f0fe18fc28 Add a new github actions badge (#6321) 2020-10-30 17:57:21 +08:00
Jiaming Yuan
6ff331b705 Fix Python callback. (#6320) 2020-10-30 05:03:44 +08:00
Sergio Gavilán
b181a88f9f Reduced some C++ compiler warnings (#6197)
* Removed some warnings

* Rebase with master

* Solved C++ Google Tests errors made by refactoring in order to remove warnings

* Undo renaming path -> path_

* Fix style check

Co-authored-by: Hyunsu Cho <chohyu01@cs.washington.edu>
2020-10-29 12:36:00 -07:00
Jiaming Yuan
c80657b542 Fix flaky data initialization test. (#6318) 2020-10-30 03:11:22 +08:00
Naveed Ahmed Saleem Janvekar
608bda7052 [jvm-packages] add example to handle missing value other than 0 (#5677)
add example to handle missing value other than 0 under Dealing with missing values section
2020-10-28 17:24:35 -07:00
Jiaming Yuan
74ea82209b Lazy import dask libraries. (#6309)
* Lazy import dask libraries.

* Lint && fix.

* Use short name.
2020-10-28 15:50:11 -07:00
Jiaming Yuan
dfac5f89e9 Group CLI demo into subdirectory. (#6258)
CLI is not most developed interface. Putting them into correct directory can help new users to avoid it as most of the use cases are from a language binding.
2020-10-28 14:40:44 -07:00
James Lamb
6383757dca [R] allow xgb.plot.importance() calls to fill a grid (#6294) 2020-10-28 14:37:28 -07:00
Tanuja Kirthi Doddapaneni
d261ba029a Added USE_NCCL_LIB_PATH option to enable user to set NCCL_LIBRARY during build (#6310)
Description: To enable user to set NCCL_LIBRARY during build
2020-10-28 14:36:31 -07:00
vcarpani
671971e12e Compiler warnings (#6286)
* Fix warnings for json.h

* Fix warnings for metric.h

* Fix warnings for updater_quantile_hist.cc.

* Fix warnings for updater_histmaker.cc.

Co-authored-by: Hyunsu Cho <chohyu01@cs.washington.edu>
2020-10-28 13:46:15 -07:00
Jiaming Yuan
e8884c4637 Document tree method for feature weights. (#6312) 2020-10-28 13:42:13 -07:00
Philip Hyunsu Cho
143b278267 Mark flaky tests as XFAIL (#6299)
* Temporarily skip TestGPUUpdaters::test_categorical

* Temporarily skip test_boost_from_prediction[approx]
2020-10-28 11:50:57 -07:00
Jiaming Yuan
c4da967b5c Support unity build. (#6295)
* Support unity build.

* Setup on Windows Jenkins.

* Revert "Setup on Windows Jenkins."

This reverts commit 8345cb8d2b009eec8ae9fa6f16412a7c9b6ec12c.
2020-10-28 11:49:28 -07:00
Philip Hyunsu Cho
f6169c0b16 [CI] Use separate Docker cache for each CUDA version (#6305) 2020-10-28 11:07:00 -07:00
Jiaming Yuan
3310e208fd Fix inplace prediction interval. (#6259)
* Add back the interval in call.
* Make the interval non-optional.
2020-10-28 13:13:59 +08:00
Jiaming Yuan
cc76724762 Reduce warning. (#6273) 2020-10-27 12:24:19 -07:00
DIVYA CHAUHAN
4e9c4f2d73 Create a tutorial for using the C API in a C/C++ application (#6285)
Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>
2020-10-27 12:19:20 -07:00
James Lamb
e1de390e6e [ci] replace 'egrep' with 'grep -E' (#6287) 2020-10-27 12:05:48 -07:00
Rory Mitchell
f0c3ff313f Update GPUTreeShap, add docs (#6281)
* Update GPUTreeShap, add docs

* Fix test

Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>
2020-10-27 18:22:12 +13:00
Jiaming Yuan
b180223d18 Cleanup RABIT. (#6290)
* Remove recovery and MPI speed tests.
* Remove readme.
* Remove Python binding.
* Add checks in C API.
2020-10-27 08:48:22 +08:00
Akira Funahashi
8e0f5a6fc7 Update plugin instructions for CMake build (#6289) 2020-10-26 17:42:07 -07:00
Philip Hyunsu Cho
c8ec62103a Deprecate LabelEncoder in XGBClassifier; Enable cuDF/cuPy inputs in XGBClassifier (#6269)
* Deprecate LabelEncoder in XGBClassifier; skip LabelEncoder for cuDF/cuPy inputs

* Add unit tests for cuDF and cuPy inputs with XGBClassifier

* Fix lint

* Clarify warning

* Move use_label_encoder option to XGBClassifier constructor

* Add a test for cudf.Series

* Add use_label_encoder to XGBRFClassifier doc

* Address reviewer feedback
2020-10-26 13:20:51 -07:00
Jiaming Yuan
bcfab4d726 Revert "Disable JSON full serialization for now. (#6248)" (#6266)
This reverts commit 6d293020fb.
2020-10-27 03:30:47 +08:00
Jiaming Yuan
d61b628bf5 Remove RABIT CMake targets. (#6275)
* Now it's built as part of libxgboost.
* Set correct C API error in RABIT initialization and finalization.
* Remove redundant message.
* Guard the tracker print C API.
2020-10-27 01:30:20 +08:00
Jiaming Yuan
2686d32a36 Skip dask tests on ARM. (#6267)
Co-authored-by: Hyunsu Cho <chohyu01@cs.washington.edu>
2020-10-26 15:09:05 +08:00
Philip Hyunsu Cho
677f676172 Use UserWarning for old callback, as DeprecationWarning is not visible (#6270) 2020-10-22 01:10:52 -07:00
Philip Hyunsu Cho
1300467d36 Fix a typo in is_arm() in testing.py [skip ci] (#6271) 2020-10-22 13:07:14 +08:00
Jiaming Yuan
b5c2a47b20 Drop single point model recovery (#6262)
* Pass rabit params in JVM package.
* Implement timeout using poll timeout parameter.
* Remove OOB data check.
2020-10-21 15:27:03 +08:00
Jiaming Yuan
81c37c28d5 Time the CPU tests on Jenkins. (#6257)
* Time the CPU tests on Jenkins.
* Reduce thread contention.
* Add doc.
* Skip heavy tests on ARM.
2020-10-20 17:19:07 -07:00
Igor Moura
d1254808d5 Clean up C++ warnings (#6213) 2020-10-19 23:02:33 +08:00
Jiaming Yuan
ddf37cca30 Unify thread configuration. (#6186) 2020-10-19 16:05:42 +08:00
Philip Hyunsu Cho
7f6ed5780c [CI] Build a Python wheel for aarch64 platform (#6253) 2020-10-18 22:35:19 -07:00
Jiaming Yuan
5037abeb86 Fix linear gpu input (#6255) 2020-10-19 12:02:36 +08:00
Yuan Tang
cdcdab98b8 Add sponsors link to FUNDING.yml (#6252) 2020-10-18 19:17:11 -07:00
Philip Hyunsu Cho
65ea42bd42 [CI] Reduce testing load with RMM (#6249)
* [CI] Reduce testing load with RMM

* Address reviewer's comment
2020-10-18 19:16:46 -07:00
Manikya Bardhan
549f361b71 Updated winning solutions list (#6254) 2020-10-19 04:06:48 +08:00
Jiaming Yuan
6d293020fb Disable JSON full serialization for now. (#6248)
* Disable JSON serialization for now.

* Multi-class classification is checkpointing for each iteration.
This brings significant overhead.

Revert: 90355b4f00

* Set R tests to use binary.
2020-10-16 17:59:54 +08:00
Jiaming Yuan
52452bebb9 Fix cls typo. (#6247) 2020-10-16 16:40:44 +08:00
Yuan Tang
3098d7cee0 Add link to XGBoost's Twitter handle (#6244) 2020-10-15 16:54:34 -07:00
Jiaming Yuan
3da5a69dc9 Fix typo in dask interface. (#6240) 2020-10-15 15:26:29 +08:00
dependabot[bot]
06e453ddf4 Bump junit from 4.11 to 4.13.1 in /jvm-packages/xgboost4j (#6230)
Bumps [junit](https://github.com/junit-team/junit4) from 4.11 to 4.13.1.
- [Release notes](https://github.com/junit-team/junit4/releases)
- [Changelog](https://github.com/junit-team/junit4/blob/main/doc/ReleaseNotes4.11.md)
- [Commits](https://github.com/junit-team/junit4/compare/r4.11...r4.13.1)

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2020-10-13 19:46:19 -07:00
dependabot[bot]
b51a717deb Bump junit from 4.11 to 4.13.1 in /jvm-packages/xgboost4j-gpu (#6233)
Bumps [junit](https://github.com/junit-team/junit4) from 4.11 to 4.13.1.
- [Release notes](https://github.com/junit-team/junit4/releases)
- [Changelog](https://github.com/junit-team/junit4/blob/main/doc/ReleaseNotes4.11.md)
- [Commits](https://github.com/junit-team/junit4/compare/r4.11...r4.13.1)

Signed-off-by: dependabot[bot] <support@github.com>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2020-10-13 19:44:56 -07:00
Jiaming Yuan
bed7ae4083 Loop over thrust::reduce. (#6229)
* Check input chunk size of dqdm.
* Add doc for current limitation.
2020-10-14 10:40:56 +13:00
Rory Mitchell
734a911a26 Loop over copy_if (#6201)
* Loop over copy_if

* Catch OOM.

Co-authored-by: fis <jm.yuan@outlook.com>
2020-10-14 10:23:16 +13:00
Wittty-Panda
0fc263ead5 Update the list of winning solutions (#6222) 2020-10-13 20:05:12 +08:00
Jiaming Yuan
b05073bda5 [dask] Test for data initializaton. (#6226) 2020-10-13 11:08:35 +08:00
Jiaming Yuan
2443275891 Cleanup Python code. (#6223)
* Remove pathlike as XGBoost 1.2 requires Python 3.6.
* Move conditional import of dask/distributed into dask module.
2020-10-12 15:44:41 +08:00
Jiaming Yuan
70c2039748 Catch all standard exceptions in C API. (#6220)
* `std::bad_alloc` is not guaranteed to be caught.
2020-10-12 14:01:46 +08:00
Jiaming Yuan
2241563f23 Handle duplicated values in sketching. (#6178)
* Accumulate weights in duplicated values.
* Fix device id in iterative dmatrix.
2020-10-10 19:32:44 +08:00
Jiaming Yuan
ab5b35134f Rework Python callback functions. (#6199)
* Define a new callback interface for Python.
* Deprecate the old callbacks.
* Enable early stopping on dask.
2020-10-10 17:52:36 +08:00
Jiaming Yuan
b5b24354b8 More categorical tests and disable shap sparse test. (#6219)
* Fix tree load with 32 category.
2020-10-10 16:12:37 +08:00
Philip Hyunsu Cho
c991eb612d [jvm-packages] Fix up build for xgboost4j-gpu, xgboost4j-spark-gpu (#6216)
* [CI] Clean up build for JVM packages

* Use correct path for saving native lib

* Fix groupId of maven-surefire-plugin

* Fix stashing of xgboost4j_jar_gpu

* [CI] Don't run xgboost4j-tester with GPU, since it doesn't use gpu_hist
2020-10-09 14:08:15 -07:00
Jiaming Yuan
70ce5216b5 Add high level tests for categorical data. (#6179)
* Fix unique.
2020-10-09 09:27:23 +08:00
vcarpani
6bc9747df5 Reduce compile warnings (#6198)
Co-authored-by: Hyunsu Cho <chohyu01@cs.washington.edu>
2020-10-08 23:14:59 +08:00
ShvetsKS
a4ce0eae43 CPU predict performance improvement (#6127)
Co-authored-by: ShvetsKS <kirill.shvets@intel.com>
2020-10-08 15:50:21 +03:00
Jiaming Yuan
4cfdcaaf7b Move non-OpenMP gtest to GitHub Actions (#6210) 2020-10-08 00:58:21 -07:00
Jiaming Yuan
ddc4f20e54 Add JSON schema for categorical splits. (#6194) 2020-10-07 17:33:31 +08:00
odidev
a2fea33103 Added arm64 job in Travis-CI (#6200)
Signed-off-by: odidev <odidev@puresoftware.com>
2020-10-07 15:02:09 +08:00
Igor Moura
5908598666 [Doc] Add info on GPU compiler (#6204)
* Add note about the required compiler version for CUDA. 
* Also added a link that gives a short explanation on compute capability version
2020-10-06 11:35:18 +08:00
Yuan Tang
1013224888 Consistent style for build status badge (#6203) 2020-10-05 18:23:21 -07:00
Philip Hyunsu Cho
f121f2738f [CI] Fix Docker build for CUDA 11 (#6202) 2020-10-05 17:54:14 -07:00
Jiaming Yuan
fd58005edf Ignore cachedir by joblib. [skip ci] (#6193) 2020-10-04 14:54:32 +08:00
DIVYA CHAUHAN
750bd0ae9a Update the list of winning solutions using XGBoost (#6192)
Co-authored-by: divya <divyachauhan661@gmail.com>
Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>
2020-10-03 13:39:58 -07:00
Christian Lorentzen
cf4f019ed6 [Breaking] Change default evaluation metric for classification to logloss / mlogloss (#6183)
* Change DefaultEvalMetric of classification from error to logloss

* Change default binary metric in plugin/example/custom_obj.cc

* Set old error metric in python tests

* Set old error metric in R tests

* Fix missed eval metrics and typos in R tests

* Fix setting eval_metric twice in R tests

* Add warning for empty eval_metric for classification

* Fix Dask tests

Co-authored-by: Hyunsu Cho <chohyu01@cs.washington.edu>
2020-10-02 12:06:47 -07:00
John Quitto-Graham
e0e4f15d0e Fix a comment in demo to use correct reference (#6190)
Co-authored-by: John Quitto Graham <johnq@dgx07.aselab.nvidia.com>
2020-10-01 13:16:04 -07:00
Philip Hyunsu Cho
eb7946ff25 Hide C++ symbols from dmlc-core (#6188) 2020-10-01 10:07:13 -07:00
lacrosse91
6bc41df2fe [Doc] Add list of winning solutions in data science competitions using XGBoost (#6177) 2020-09-30 14:41:29 -07:00
Jiaming Yuan
f0c63902ff Use default allocator in sketching. (#6182) 2020-09-30 14:55:59 +08:00
Jiaming Yuan
444131a2e6 Add categorical data support to GPU Hist. (#6164) 2020-09-29 11:27:25 +08:00
Jiaming Yuan
798af22ff4 Add categorical data support to GPU predictor. (#6165) 2020-09-29 11:25:34 +08:00
Jiaming Yuan
7622b8cdb8 Enable categorical data support on Python DMatrix. (#6166)
* Only pandas is recognized.
2020-09-29 11:22:56 +08:00
Jiaming Yuan
52c0b3f100 Fix error message. (#6176) 2020-09-29 11:18:25 +08:00
Rory Mitchell
dda9e1e487 Update GPUTreeshap (#6163)
* Reduce shap test duration

* Test interoperability with shap package

* Add feature interactions

* Update GPUTreeShap
2020-09-28 09:43:47 +13:00
Jiaming Yuan
434a3f35a3 Add TAGS to gitignore. [skip ci] (#6175) 2020-09-27 21:27:40 +08:00
Jiaming Yuan
07355599c2 Option for generating device debug info. (#6168)
* Supply `-G;-src-in-ptx` when `USE_DEVICE_DEBUG` is set and debug mode is selected.
* Refactor CMake script to gather all CUDA configuration.
* Use CMAKE_CUDA_ARCHITECTURES.  Close #6029.
* Add compute 80.  Close #5999
2020-09-27 03:26:56 +08:00
Kyle Nicholson
e6a238c020 Update base margin dask (#6155)
* Add `base-margin`
* Add `output_margin` to regressor.

Co-authored-by: fis <jm.yuan@outlook.com>
2020-09-26 21:30:52 +08:00
Alexander Gugel
03b8fdec74 Add DMatrix usage examples to c-api-demo (#5854)
* Add DMatrix usage examples to c-api-demo

* Add XGDMatrixCreateFromCSREx example

* Add XGDMatrixCreateFromCSCEx example
2020-09-26 02:10:12 -07:00
Philip Hyunsu Cho
2c4dedb7a0 [CI] Test C API demo (#6159)
* Fix CMake install config to use dependencies

* [CI] Test C API demo

* Explicitly cast num_feature, to avoid warning in Linux
2020-09-25 14:49:01 -07:00
Philip Hyunsu Cho
bd2b1eabd0 Add back support for scipy.sparse.coo_matrix (#6162) 2020-09-25 00:49:49 -07:00
Philip Hyunsu Cho
72ef553550 Fall back to CUB allocator if RMM memory pool is not set up (#6150)
* Fall back to CUB allocator if RMM memory pool is not set up

* Fix build

* Prevent memory leak

* Add note about lack of memory initialisation

* Add check for other fast allocators

* Set use_cub_allocator_ to true when RMM is not enabled

* Fix clang-tidy

* Do not demangle symbol; add check to ensure Linux+Clang/GCC combo
2020-09-24 11:04:50 -07:00
Zeno Gantner
5b05f88ba9 Cosmetic fixes in faq.rst (#6161) 2020-09-24 21:05:10 +08:00
Jiaming Yuan
14afdb4d92 Support categorical data in ellpack. (#6140) 2020-09-24 19:28:57 +08:00
Jiaming Yuan
78d72ef936 Add DaskDeviceQuantileDMatrix demo. (#6156) 2020-09-24 14:08:28 +08:00
Philip Hyunsu Cho
678ea40b24 [CI] Upgrade cuDF and RMM to 0.16 nightlies; upgrade to Ubuntu 18.04 (#6157)
* [CI] Upgrade cuDF and RMM to 0.16 nightlies

* Use Ubuntu 18.04 in RMM test, since RMM needs GCC 7+
2020-09-23 19:48:44 -07:00
James Lamb
c686bc0461 [R] remove warning in configure.ac (fixes #6151) (#6152)
* [R] remove warning in configure.ac (fixes #6151)

* update configure
2020-09-22 22:47:38 -07:00
Jiaming Yuan
e033caa3ba Remove linking RMM library. (#6146)
* Remove linking RMM library.

* RMM is now header only.

* Remove remaining reference.
2020-09-22 16:59:33 -07:00
Jiaming Yuan
452ac8ea62 Time GPU tests on CI. (#6141) 2020-09-22 14:25:10 +08:00
Jiaming Yuan
33d80ffad0 [dask] Support more meta data on functional interface. (#6132)
* Add base_margin, label_(lower|upper)_bound.
* Test survival training with dask.
2020-09-21 16:56:37 +08:00
Jiaming Yuan
7065779afa Improve JSON format for categorical features. (#6128)
* Gather categories for all nodes.
2020-09-21 15:35:05 +08:00
Jiaming Yuan
210c131ce7 Support categorical data in GPU sketching. (#6137) 2020-09-21 13:53:06 +08:00
Nan Zhu
c932fb50a1 [jvm-packages]add xgboost4j-gpu/xgboost4j-spark-gpu module to facilitate release (#6136)
* add xgboost4j-gpu/xgboost4j-spark-gpu module to facilitate release

* Update pom.xml
2020-09-20 09:20:38 -07:00
Jiaming Yuan
a069a21e03 Implement intrusive ptr (#6129)
* Use intrusive ptr for JSON.
2020-09-20 20:07:16 +08:00
Jiaming Yuan
e319b63f9e Merge extract cuts into QuantileContainer. (#6125)
* Use pruning for initial summary construction.
2020-09-18 16:36:39 +08:00
Jiaming Yuan
cc82ca167a [dask] Refactor meta data handling. (#6130) 2020-09-18 13:26:40 +08:00
Jiaming Yuan
5384ed85c8 Use caching allocator from RMM, when RMM is enabled (#6131) 2020-09-17 21:51:49 -07:00
neko
6bc9b9dc4f Fix doc for CMake requirement. (#6123) 2020-09-16 17:59:43 +08:00
Philip Hyunsu Cho
9e955fb9b0 [R] Check warnings explicitly for model compatibility tests (#6114)
* [R] Check warnings explicitly for model compatibility tests

* Address reviewer's feedback
2020-09-15 10:49:48 -07:00
Philip Hyunsu Cho
33577ef5d3 Add MAPE metric (#6119) 2020-09-14 18:45:27 -07:00
Rory Mitchell
47350f6acb Allow kwargs in dask predict (#6117) 2020-09-15 13:04:03 +12:00
Jiaming Yuan
b5f52f0b1b Validate weights are positive values. (#6115) 2020-09-15 09:03:55 +08:00
Jiaming Yuan
c6f2b8c841 Upgrade gputreeshap. (#6099)
* Upgrade gputreeshap.

Co-authored-by: Rory Mitchell <r.a.mitchell.nz@gmail.com>
2020-09-15 12:57:22 +12:00
Vitalie Spinu
1453bee3e7 [R] Remove stringi dependency (#6109)
* [R] Fix empty empty tests and a test warnings

* [R] Remove stringi dependency (fix #5905)

* Fix R lint check

* [R] Fix automatic conversion to factor in R < 4.0.0 in xgb.model.dt.tree

* Add `R` Makefile variable

Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>
2020-09-12 13:18:08 -07:00
Jiaming Yuan
07945290a2 Remove unused RABIT targets. (#6110)
* Remove rabit mock.
* Remove rabit base.
2020-09-11 14:09:44 +08:00
Jiaming Yuan
c92d751ad1 Enable building rabit on Windows (#6105) 2020-09-11 11:54:46 +08:00
Jiaming Yuan
08bdb2efc8 Fix dask doc. [skip ci] (#6108) 2020-09-11 10:56:12 +08:00
Bobby Wang
00b0ad1293 [Doc] add doc for kill_spark_context_on_worker_failure parameter (#6097)
* [Doc] add doc for kill_spark_context_on_worker_failure parameter

* resolve comments
2020-09-09 21:28:44 -07:00
Philip Hyunsu Cho
d0ccb13d09 Work around a compiler bug in MacOS AppleClang 11 (#6103)
* Workaround a compiler bug in MacOS AppleClang

* [CI] Run C++ test with MacOS Catalina + AppleClang 11.0.3

* [CI] Migrate cmake_test on MacOS from Travis CI to GitHub Actions

* Install OpenMP runtime

* [CI] Use CMake to locate lz4 lib
2020-09-09 21:21:55 -07:00
Philip Hyunsu Cho
9338582d79 [CI] Fix CTest by running it in a correct directory (#6104)
* [CI] Fix CTest by running it in a correct directory

* [CI] Do not run dmlc-core unit tests with sanitizer
2020-09-09 10:31:09 -07:00
Jiaming Yuan
3dcd85fab5 Refactor rabit tests (#6096)
* Merge rabit tests into XGBoost.
* Run them On CI.
* Simplification for CMake scripts.
2020-09-09 12:30:29 +08:00
Jiaming Yuan
318bffaa10 Fix custom obj link. [skip ci] (#6100) 2020-09-09 10:55:38 +08:00
Jiaming Yuan
b0001a6e29 Correct style warnings from clang-tidy for rabit. (#6095) 2020-09-08 12:13:58 +08:00
Hristo Iliev
da61d9460b [jvm-packages] Add getNumFeature method (#6075)
* Add getNumFeature to the Java API
* Add getNumFeature to the Scala API
* Add unit tests for getNumFeature

Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>
2020-09-07 20:57:46 -07:00
Jiaming Yuan
93e9af43bb Unify set index data. (#6062) 2020-09-08 11:38:41 +08:00
Jiaming Yuan
e5d40b39cd [Breaking] Don't save leaf child count in JSON. (#6094)
The field is deprecated and not used anywhere in XGBoost.
2020-09-08 11:11:13 +08:00
Jiaming Yuan
5994f3b14c Don't link imported target. (#6093) 2020-09-07 02:51:09 -07:00
Philip Hyunsu Cho
974ba12f38 Fix CMake build with BUILD_STATIC_LIB option (#6090)
* Fix CMake build with BUILD_STATIC_LIB option

* Disable BUILD_STATIC_LIB option when R/JVM pkg is enabled

* Add objxgboost to install target only when BUILD_STATIC_LIB=ON
2020-09-07 02:38:29 -07:00
Daniel Steinberg
68c55a37d9 Add cache name back to external_memory.py files. (#6088) 2020-09-06 16:01:09 +08:00
Boris Feld
24ca9348f7 Fix typo in xgboost.callback.early_stop docstring (#6071) 2020-09-06 13:37:07 +08:00
Rory Mitchell
2e907abdb8 Updates to GPUTreeShap (#6087)
* Extract paths on device

* Update GPUTreeShap
2020-09-06 13:39:08 +12:00
Bobby Wang
0e2d5669f6 [jvm-packages] cancel job instead of killing SparkContext (#6019)
* cancel job instead of killing SparkContext

This PR changes the default behavior that kills SparkContext. Instead, This PR
cancels jobs when coming across task failed. That means the SparkContext is
still alive even some exceptions happen.

* add a parameter to control if killing SparkContext

* cancel the jobs the failed task belongs to

* remove the jobId from the map when one job failed.

* resolve comments
2020-09-02 14:20:59 -07:00
Tong He
3912f3de06 Updates from 1.2.0 cran submission (#6077)
* update for 1.2.0 cran submission

* recover cmakelists

* fix unittest from the shap PR

* trigger CI
2020-09-02 20:50:23 +08:00
Philip Hyunsu Cho
9be969cc7a Add release note for 1.2.0 in NEWS.md (#6063)
* Update query_contributors.py to account for pagination

* Add the release note for 1.2.0

* Add release note for patch releases 

* Apply suggestions from code review 

* Fix typo 

Co-authored-by: Jiaming Yuan <jm.yuan@outlook.com>
Co-authored-by: John Zedlewski <904524+JohnZed@users.noreply.github.com>
2020-09-02 00:49:02 -07:00
Anthony D'Amato
ada964f16e Clean the way deterministic paritioning is computed (#6033)
We propose to only use the rowHashCode to compute the partitionKey, adding the FeatureValue hashCode does not bring more value and would make the computation slower. Even though a collision would appear at 0.2% with MurmurHash3 this is bearable for partitioning, this won't have any impact on the data balancing.
2020-08-30 14:38:23 -07:00
ShvetsKS
c1ca872d1e Modin DF support (#6055)
* Modin DF support

* mode change

* tests were added, ci env was extended

* mode change

* Remove redundant installation of modin

* Add a pytest skip marker for modin

* Install Modin[ray] from PyPI

* fix interfering

* avoid extra conversion

* delete cv test for modin

* revert cv function

Co-authored-by: ShvetsKS <kirill.shvets@intel.com>
Co-authored-by: Hyunsu Cho <chohyu01@cs.washington.edu>
2020-08-29 22:33:30 +03:00
FelixYBW
3a990433f9 set maxBins to 256. Align with c code in src/tree/param.h (#6066) 2020-08-28 15:06:11 +03:00
Rory Mitchell
9bddecee05 Update GPUTreeShap (#6064)
* Update GPUTreeShap

* Update src/CMakeLists.txt

Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>

Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>
2020-08-27 12:01:53 -07:00
Jiaming Yuan
2fcc4f2886 Unify evaluation functions. (#6037) 2020-08-26 14:23:27 +08:00
Jiaming Yuan
80c8547147 Make binary bin search reusable. (#6058)
* Move binary search row to hist util.
* Remove dead code.
2020-08-26 05:05:11 +08:00
Philip Hyunsu Cho
9c14e430af [CI] Improve JVM test in GitHub Actions (#5930)
* [CI] Improve JVM test in GitHub Actions

* Use env var for Wagon options [skip ci]

* Move the retry flag to pom.xml [skip ci]

* Export env var RABIT_MOCK to run Spark tests [skip ci]

* Correct location of env var

* Re-try up to 5 times [skip ci]

* Don't run distributed training test on Windows

* Fix typo

* Update main.yml
2020-08-25 10:14:46 -07:00
Jiaming Yuan
81d8dd79ca Bump header version. (#6056) 2020-08-26 00:29:00 +08:00
Jiaming Yuan
20c95be625 Expand categorical node. (#6028)
Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>
2020-08-25 18:53:57 +08:00
Rory Mitchell
9a4e8b1d81 GPUTreeShap (#6038) 2020-08-25 12:47:41 +12:00
Philip Hyunsu Cho
b3193052b3 Bump version to 1.3.0 snapshot in master (#6052) 2020-08-23 17:13:46 -07:00
Philip Hyunsu Cho
4729458a36 [jvm-packages] [doc] Update install doc for JVM packages (#6051) 2020-08-23 14:14:53 -07:00
Philip Hyunsu Cho
cfced58c1c [CI] Port CI fixes from the 1.2.0 branch (#6050)
* Fix a unit test on CLI, to handle RC versions

* [CI] Use mgpu machine to run gpu hist unit tests

* [CI] Build GPU-enabled JAR artifact and deploy to xgboost-maven-repo
2020-08-22 23:24:46 -07:00
Jiaming Yuan
a144daf034 Limit tree depth for GPU hist. (#6045) 2020-08-22 19:34:52 +08:00
Jiaming Yuan
b9ebbffc57 Fix plotting test. (#6040)
Previously the test loads a model generated by `test_basic.py`, now we generate
the model explicitly.

* Cleanup saved files for basic tests.
2020-08-22 13:18:48 +08:00
Jiaming Yuan
7a46515d3d Remove win2016 jvm github action test. (#6042) 2020-08-20 19:39:46 -07:00
Jiaming Yuan
7be2e04bd4 Fix scikit learn cls doc. (#6041) 2020-08-20 19:23:06 -07:00
Philip Hyunsu Cho
1fd29edf66 [CI] Migrate linters to GitHub Actions (#6035)
* [CI] Move lint to GitHub Actions

* [CI] Move Doxygen to GitHub Actions

* [CI] Move Sphinx build test to GitHub Actions

* [CI] Reduce workload for Windows R tests

* [CI] Move clang-tidy to Build stage
2020-08-19 12:33:51 -07:00
ShvetsKS
24f2e6c97e Optimize DMatrix build time. (#5877)
Co-authored-by: SHVETS, KIRILL <kirill.shvets@intel.com>
2020-08-20 01:37:03 +08:00
Jiaming Yuan
29b7fea572 Optimize cpu sketch allreduce for sparse data. (#6009)
* Bypass RABIT serialization reducer and use custom allgather based merging.
2020-08-19 10:03:45 +08:00
Jiaming Yuan
90355b4f00 Make JSON the default full serialization format. (#6027) 2020-08-19 09:57:43 +08:00
Anthony D'Amato
f58e41bad8 Fix deterministic partitioning with dataset containing Double.NaN (#5996)
The functions featureValueOfSparseVector or featureValueOfDenseVector could return a Float.NaN if the input vectore was containing any missing values. This would make fail the partition key computation and most of the vectors would end up in the same partition. We fix this by avoid returning a NaN and simply use the row HashCode in this case.
We added a test to ensure that the repartition is indeed now uniform on input dataset containing values by checking that the partitions size variance is below a certain threshold.

Signed-off-by: Anthony D'Amato <anthony.damato@hotmail.fr>
2020-08-18 18:55:37 -07:00
Cuong Duong
e51cba6195 Add SHAP summary plot using ggplot2 (#5882)
* add SHAP summary plot using ggplot2

* Update xgb.plot.shap

* Update example in xgb.plot.shap documentation

* update logic, add tests

* whitespace fixes

* whitespace fixes for test_helpers

* namespace for sd function

* explicitly declare variables that are automatically evaluated by data.table

* Fix R lint

Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>
2020-08-18 18:04:09 -07:00
Qi Zhang
989ddd036f Swap byte-order in binary serializer to support big-endian arch (#5813)
* fixed some endian issues

* Use dmlc::ByteSwap() to simplify code

* Fix lint check

* [CI] Add test for s390x

* Download latest CMake on s390x

* Fix a bug in my code

* Save magic number in dmatrix with byteswap on big-endian machine

* Save version in binary with byteswap on big-endian machine

* Load scalar with byteswap in MetaInfo

* Add a debugging message

* Handle arrays correctly when byteswapping

* EOF can also be 255

* Handle magic number in MetaInfo carefully

* Skip Tree.Load test for big-endian, since the test manually builds little-endian binary model

* Handle missing packages in Python tests

* Don't use boto3 in model compatibility tests

* Add s390 Docker file for local testing

* Add model compatibility tests

* Add R compatibility test

* Revert "Add R compatibility test"

This reverts commit c2d2bdcb7dbae133cbb927fcd20f7e83ee2b18a8.

Co-authored-by: Qi Zhang <q.zhang@ibm.com>
Co-authored-by: Hyunsu Cho <chohyu01@cs.washington.edu>
2020-08-18 14:47:17 -07:00
Jiaming Yuan
4d99c58a5f Feature weights (#5962) 2020-08-18 19:55:41 +08:00
Jiaming Yuan
a418278064 Merge pull request #6023 from trivialfis/merge-rabit
Merge rabit
2020-08-18 09:01:56 +08:00
Philip Hyunsu Cho
14d5ce712c [CI] Fix Dask Pytest fixture (#6024) 2020-08-17 16:45:22 -07:00
fis
111968ca58 Merge rabit 2020-08-18 03:52:33 +08:00
fis
1c5904df3f Remove rabit. 2020-08-18 03:48:36 +08:00
Jiaming Yuan
d240463b38 Revert "Remove warning about memset. (#6003)" (#6020)
This reverts commit 12e3fb6a6c.
2020-08-17 20:10:15 +08:00
Philip Hyunsu Cho
511bb22ffd [Doc] Add dtreeviz as a showcase example of integration with 3rd-party software (#6013) 2020-08-13 20:53:59 -07:00
Philip Hyunsu Cho
e3ec7b01df [CI] Cancel builds on subsequent pushes (#6011)
* [CI] Cancel builds on subsequent pushes

* Use a more secure method

* test commit
2020-08-13 11:17:39 -07:00
Jiaming Yuan
674c409e9d Remove rabit dependency on public headers. (#6005) 2020-08-13 08:26:20 +08:00
Jiaming Yuan
12e3fb6a6c Remove warning about memset. (#6003) 2020-08-13 08:25:46 +08:00
Philip Hyunsu Cho
9adb812a0a RMM integration plugin (#5873)
* [CI] Add RMM as an optional dependency

* Replace caching allocator with pool allocator from RMM

* Revert "Replace caching allocator with pool allocator from RMM"

This reverts commit e15845d4e72e890c2babe31a988b26503a7d9038.

* Use rmm::mr::get_default_resource()

* Try setting default resource (doesn't work yet)

* Allocate pool_mr in the heap

* Prevent leaking pool_mr handle

* Separate EXPECT_DEATH() in separate test suite suffixed DeathTest

* Turn off death tests for RMM

* Address reviewer's feedback

* Prevent leaking of cuda_mr

* Fix Jenkinsfile syntax

* Remove unnecessary function in Jenkinsfile

* [CI] Install NCCL into RMM container

* Run Python tests

* Try building with RMM, CUDA 10.0

* Do not use RMM for CUDA 10.0 target

* Actually test for test_rmm flag

* Fix TestPythonGPU

* Use CNMeM allocator, since pool allocator doesn't yet support multiGPU

* Use 10.0 container to build RMM-enabled XGBoost

* Revert "Use 10.0 container to build RMM-enabled XGBoost"

This reverts commit 789021fa31112e25b683aef39fff375403060141.

* Fix Jenkinsfile

* [CI] Assign larger /dev/shm to NCCL

* Use 10.2 artifact to run multi-GPU Python tests

* Add CUDA 10.0 -> 11.0 cross-version test; remove CUDA 10.0 target

* Rename Conda env rmm_test -> gpu_test

* Use env var to opt into CNMeM pool for C++ tests

* Use identical CUDA version for RMM builds and tests

* Use Pytest fixtures to enable RMM pool in Python tests

* Move RMM to plugin/CMakeLists.txt; use PLUGIN_RMM

* Use per-device MR; use command arg in gtest

* Set CMake prefix path to use Conda env

* Use 0.15 nightly version of RMM

* Remove unnecessary header

* Fix a unit test when cudf is missing

* Add RMM demos

* Remove print()

* Use HostDeviceVector in GPU predictor

* Simplify pytest setup; use LocalCUDACluster fixture

* Address reviewers' commments

Co-authored-by: Hyunsu Cho <chohyu01@cs.wasshington.edu>
2020-08-12 01:26:02 -07:00
Jiaming Yuan
c3ea3b7e37 Fix nightly build doc. [skip ci] (#6004)
* Fix nightly build doc. [skip ci]

* Fix title too short. [skip ci]
2020-08-12 15:00:40 +08:00
Jiaming Yuan
ee70a2380b Unify CPU hist sketching (#5880) 2020-08-12 01:33:06 +08:00
jameskrach
bd6b7f4aa7 [Breaking] Fix .predict() method and add .predict_proba() in xgboost.dask.DaskXGBClassifier (#5986) 2020-08-11 16:11:28 +08:00
Jiaming Yuan
6f7112a848 Move warning about empty dataset. (#5998) 2020-08-11 14:10:51 +08:00
Jiaming Yuan
f93f1c03fc Rabit update. (#5978)
* Remove parameter on JVM Packages.
2020-08-11 09:17:32 +08:00
Jiaming Yuan
0b2a26fa74 Remove skmaker. (#5971) 2020-08-09 15:23:31 +08:00
Vladislav Epifanov
388f975cf5 Introducing DPC++-based plugin (predictor, objective function) supporting oneAPI programming model (#5825)
* Added plugin with DPC++-based predictor and objective function

* Update CMakeLists.txt

* Update regression_obj_oneapi.cc

* Added README.md for OneAPI plugin

* Added OneAPI predictor support to gbtree

* Update README.md

* Merged kernels in gradient computation. Enabled multiple loss functions with DPC++ backend

* Aligned plugin CMake files with latest master changes. Fixed whitespace typos

* Removed debug output

* [CI] Make oneapi_plugin a CMake target

* Added tests for OneAPI plugin for predictor and obj. functions

* Temporarily switched to default selector for device dispacthing in OneAPI plugin to enable execution in environments without gpus

* Updated readme file.

* Fixed USM usage in predictor

* Removed workaround with explicit templated names for DPC++ kernels

* Fixed warnings in plugin tests

* Fix CMake build of gtest

Co-authored-by: Hyunsu Cho <chohyu01@cs.washington.edu>
2020-08-08 18:40:40 -07:00
Anthony D'Amato
7cf3e9be59 Fix typo in tracker logging (#5994) 2020-08-09 03:45:46 +08:00
James Lamb
589b385ec6 [R] fix uses of 1:length(x) and other small things (#5992) 2020-08-09 03:31:33 +08:00
Jiaming Yuan
801e6b6800 Fix dask predict shape infer. (#5989) 2020-08-08 14:29:22 +08:00
Jiaming Yuan
4acdd7c6f6 Remove stop process. (#143) 2020-08-05 10:12:00 -07:00
Jiaming Yuan
9c6e791e64 Enforce tree order in JSON. (#5974)
* Make JSON model IO more future proof by using tree id in model loading.
2020-08-05 16:44:52 +08:00
Jiaming Yuan
dde9c5aaff Fix missing data warning. (#5969)
* Fix data warning.

* Add numpy/scipy test.
2020-08-05 16:19:12 +08:00
Jiaming Yuan
8599f87597 Update JSON schema. (#5982)
* Update JSON schema for pseudo huber.
* Update JSON model schema.
2020-08-05 15:21:11 +08:00
Jiaming Yuan
9c93531709 Update Python custom objective demo. (#5981) 2020-08-05 12:27:19 +08:00
Jiaming Yuan
1149a7a292 Fix sklearn doc. (#5980) 2020-08-05 12:26:19 +08:00
Jiaming Yuan
b069431c28 Export DaskDeviceQuantileDMatrix in doc. [skip ci] (#5975) 2020-08-05 00:48:10 +08:00
Shaochen Shi
71197d1dfa [jvm-packages] Fix wrong method name setAllowZeroForMissingValue. (#5740)
* Allow non-zero for missing value when training.

* Fix wrong method names.

* Add a unit test

* Move the getter/setter unit test to MissingValueHandlingSuite

Co-authored-by: Hyunsu Cho <chohyu01@cs.washington.edu>
2020-08-01 17:16:42 -07:00
Philip Hyunsu Cho
5a2dcd1c33 [R] Provide better guidance for persisting XGBoost model (#5964)
* [R] Provide better guidance for persisting XGBoost model

* Update saving_model.rst

* Add a paragraph about xgb.serialize()
2020-07-31 20:00:26 -07:00
Philip Hyunsu Cho
bf2990e773 Add missing Pytest marks to AsyncIO unit test (#5968) 2020-08-01 10:56:24 +08:00
Philip Hyunsu Cho
5f3c811e84 [CI] Assign larger /dev/shm to NCCL (#5966)
* [CI] Assign larger /dev/shm to NCCL

* Use 10.2 artifact to run multi-GPU Python tests

* Add CUDA 10.0 -> 11.0 cross-version test; remove CUDA 10.0 target
2020-07-31 10:05:04 -07:00
Philip Hyunsu Cho
3fcfaad577 Add CMake flag to log C API invocations, to aid debugging (#5925)
* Add CMake flag to log C API invocations, to aid debugging

* Remove unnecessary parentheses
2020-07-30 19:24:28 -07:00
James Bourbeau
3b88bc948f Update XGBoost + Dask overview documentation (#5961)
* Add imports to code snippet

* Better writing.
2020-07-31 09:58:50 +08:00
Jiaming Yuan
70903c872f Force colored output for ninja build. (#5959) 2020-07-30 20:48:03 +08:00
boxdot
d268a2a463 Thread-safe prediction by making the prediction cache thread-local. (#5853)
Co-authored-by: Jiaming Yuan <jm.yuan@outlook.com>
2020-07-30 12:33:50 +08:00
Jiaming Yuan
fa3715f584 [Dask] Asyncio support. (#5862) 2020-07-30 06:23:58 +08:00
Jiaming Yuan
e4a273e1da Fix evaluate root split. (#5948) 2020-07-29 19:33:29 +08:00
Philip Hyunsu Cho
071e10c1d1 [CI] Fix broken Docker container 'cpu' (#5956) 2020-07-29 04:29:57 -07:00
Jiaming Yuan
f5fdcbe194 Disable feature validation on sklearn predict prob. (#5953)
* Fix issue when scikit learn interface receives transformed inputs.
2020-07-29 19:26:44 +08:00
Jiaming Yuan
18349a7ccf [Breaking] Fix custom metric for multi output. (#5954)
* Set output margin to true for custom metric.  This fixes only R and Python.
2020-07-29 19:25:27 +08:00
Jiaming Yuan
75b8c22b0b Fix prediction heuristic (#5955)
* Relax check for prediction.
* Relax test in spark test.
* Add tests in C++.
2020-07-29 19:24:07 +08:00
Philip Hyunsu Cho
5879acde9a [CI] Improve R linter script (#5944)
* [CI] Move lint to a separate script

* [CI] Improved lintr launcher

* Add lintr as a separate action

* Add custom parsing logic to print out logs

* Fix lintr issues in demos

* Run R demos

* Fix CRAN checks

* Install XGBoost into R env before running lintr

* Install devtools (needed to run demos)
2020-07-27 00:55:35 -07:00
Bobby Wang
8943eb4314 [BLOCKING] [jvm-packages] add gpu_hist and enable gpu scheduling (#5171)
* [jvm-packages] add gpu_hist tree method

* change updater hist to grow_quantile_histmaker

* add gpu scheduling

* pass correct parameters to xgboost library

* remove debug info

* add use.cuda for pom

* add CI for gpu_hist for jvm

* add gpu unit tests

* use gpu node to build jvm

* use nvidia-docker

* Add CLI interface to create_jni.py using argparse

Co-authored-by: Hyunsu Cho <chohyu01@cs.washington.edu>
2020-07-26 21:53:24 -07:00
Philip Hyunsu Cho
6347fa1c2e [R] Enable weighted learning to rank (#5945)
* [R] enable weighted learning to rank

* Add R unit test for ranking

* Fix lint
2020-07-26 21:10:36 -07:00
Philip Hyunsu Cho
ace7fd328b [R] Add a compatibility layer to load Booster object from an old RDS file (#5940)
* [R] Add a compatibility layer to load Booster from an old RDS
* Modify QuantileHistMaker::LoadConfig() to be backward compatible with 1.1.x
* Add a big warning about compatibility in QuantileHistMaker::LoadConfig()
* Add testing suite
* Discourage use of saveRDS() in CRAN doc
2020-07-26 00:06:49 -07:00
Jiaming Yuan
40361043ae [BLOCKING] Remove to_string. (#5934) 2020-07-26 10:21:26 +08:00
Philip Hyunsu Cho
12110c900e [CI] Make Python model compatibility test runnable locally (#5941) 2020-07-25 16:58:02 -07:00
Philip Hyunsu Cho
487ab0ce73 [BLOCKING] Handle empty rows in data iterators correctly (#5929)
* [jvm-packages] Handle empty rows in data iterators correctly

* Fix clang-tidy error

* last empty row

* Add comments [skip ci]

Co-authored-by: Nan Zhu <nanzhu@uber.com>
2020-07-25 13:46:19 -07:00
FelixYBW
e6cd74ead3 Set a minimal reducer size and parent_down size (#139)
* set a minimal reducer msg size. Receive the same data size from parent each time.

* When parent read from a child, check it receive minimal reduce size.
 fix bug. Rewrite the minimal reducer size check, make sure it's 1~N times of minimal reduce size

 Assume the minimal reduce size is X, the logic here is
 1: each child upload total_size of message
 2: each parent receive X message at least, up to total_size
 3: parent reduce X or NxX or total_size message
 4: parent sends X or NxX or total_size message to its parent
 4: parent's parent receive X message at least, up to total_size. Then reduce X or NxX or total_size message
 6: parent's parent sends X or NxX or total_size message to its children
 7: parent receives X or NxX or total_size message, sends to its children
 8: child receive X or NxN or total_size message.

 During the whole process, each transfer is (1~N)xX Byte message or up to total_size.

 if X is larger than total_size, then allreduce allways reduce the whole messages and pass down.

* Follow style check rule

* fix the cpplint check

* fix allreduce_base header seq

Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>
2020-07-25 12:46:45 -07:00
Jiaming Yuan
a4de2f68e4 Use cudaOccupancyMaxPotentialBlockSize to calculate the block size. (#5926) 2020-07-23 14:24:42 +08:00
Jiaming Yuan
fbfbd525d8 Cache dependencies on Github Action. (#5928) 2020-07-23 14:00:19 +08:00
Philip Hyunsu Cho
4af857f95d Add explicit template specialization for portability (#5921)
* Add explicit template specializations

* Adding Specialization for FileAdapterBatch
2020-07-22 12:31:17 -07:00
Jiaming Yuan
bc1d3ee230 Fix r early stop with custom objective. (#5923)
* Specify `ntreelimit`.
2020-07-23 03:28:17 +08:00
Jiaming Yuan
30363d9c35 Remove R and JVM from appveyor. (#5922) 2020-07-23 03:26:48 +08:00
Jiaming Yuan
66cc1e02aa Setup github action. (#5917) 2020-07-22 15:05:25 +08:00
Philip Hyunsu Cho
627cf41a60 Add option to enable all compiler warnings in GCC/Clang (#5897)
* Add option to enable all compiler warnings in GCC/Clang

* Fix -Wall for CUDA sources

* Make -Wall private req for xgboost-r
2020-07-21 23:34:03 -07:00
Jiaming Yuan
9b688aca3b Fix mingw build with R. (#5918) 2020-07-22 02:56:49 +08:00
Philip Hyunsu Cho
8d7702766a [Doc] Document new objectives and metrics available on GPUs (#5909) 2020-07-21 02:10:59 -07:00
Jiaming Yuan
03fb98fbde Fix typo in CI. [skip ci] (#5919) 2020-07-21 14:25:27 +08:00
Jiaming Yuan
8b1afce316 Add Github Action for R. (#5911)
* Fix lintr errors.
2020-07-20 19:23:36 +08:00
Andy Adinets
b3d2e7644a Support building XGBoost with CUDA 11 (#5808)
* Change serialization test.
* Add CUDA 11 tests on Linux CI.

Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>
2020-07-20 07:58:41 +08:00
Philip Hyunsu Cho
ac9136ee49 Further improvements and savings in Jenkins pipeline (#5904)
* Publish artifacts only on the master and release branches

* Build CUDA only for Compute Capability 7.5 when building PRs

* Run all Windows jobs in a single worker image

* Build nightly XGBoost4J SNAPSHOT JARs with Scala 2.12 only

* Show skipped Python tests on Windows

* Make Graphviz optional for Python tests

* Add back C++ tests

* Unstash xgboost_cpp_tests

* Fix label to CUDA 10.1

* Install cuPy for CUDA 10.1

* Install jsonschema

* Address reviewer's feedback
2020-07-18 03:30:40 -07:00
Jiaming Yuan
6c0c87216f Fix Windows 2016 build. (#5902) 2020-07-18 05:50:17 +08:00
Philip Hyunsu Cho
71b0528a2f GPU implementation of AFT survival objective and metric (#5714)
* Add interval accuracy

* De-virtualize AFT functions

* Lint

* Refactor AFT metric using GPU-CPU reducer

* Fix R build

* Fix build on Windows

* Fix copyright header

* Clang-tidy

* Fix crashing demo

* Fix typos in comment; explain GPU ID

* Remove unnecessary #include

* Add C++ test for interval accuracy

* Fix a bug in accuracy metric: use log pred

* Refactor AFT objective using GPU-CPU Transform

* Lint

* Fix lint

* Use Ninja to speed up build

* Use time, not /usr/bin/time

* Add cpu_build worker class, with concurrency = 1

* Use concurrency = 1 only for CUDA build

* concurrency = 1 for clang-tidy

* Address reviewer's feedback

* Update link to AFT paper
2020-07-17 01:18:13 -07:00
Jiaming Yuan
7c2686146e Dask device dmatrix (#5901)
* Fix softprob with empty dmatrix.
2020-07-17 13:17:43 +08:00
Jiaming Yuan
e471056ec4 Fix sketch size calculation. (#5898) 2020-07-17 08:33:16 +08:00
Bobby Wang
730866a7bc [CI] update spark version to 3.0.0 (#5890)
* [CI] update spark version to 3.0.0

* Update Dockerfile.jvm_cross

Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>
2020-07-16 00:23:44 -07:00
Jiaming Yuan
029a8b533f Simplify the data backends. (#5893) 2020-07-16 15:17:31 +08:00
Philip Hyunsu Cho
7aee0e51ed Fix R package build with CMake 3.13 (#5895)
* Fix R package build with CMake 3.13

* Require OpenMP for xgboost-r target
2020-07-15 20:22:11 -07:00
Philip Hyunsu Cho
3c40f4a7f5 [CI] Reduce load on Windows CI pipeline (#5892) 2020-07-14 18:47:05 -07:00
Jiaming Yuan
3cae287dea Fix NDK Build. (#5886)
* Explicit cast for slice.
2020-07-14 18:34:19 +08:00
Alexander Gugel
970b4b3fa2 Add XGBoosterGetNumFeature (#5856)
- add GetNumFeature to Learner
- add XGBoosterGetNumFeature to C API
- update c-api-demo accordingly
2020-07-13 23:25:17 -07:00
Philip Hyunsu Cho
e0c179c7cc [CI] Enforce daily budget in Jenkins CI (#5884)
* [CI] Throttle Jenkins CI

* Don't use Jenkins master instance
2020-07-13 21:51:11 -07:00
Jiaming Yuan
dd445af56e Cleanup on device sketch. (#5874)
* Remove old functions.

* Merge weighted and un-weighted into a common interface.
2020-07-14 10:15:54 +08:00
Bobby Wang
9f85e92602 [jvm-packages] update spark dependency to 3.0.0 (#5836) 2020-07-12 20:58:30 -07:00
Philip Hyunsu Cho
23e2c6ec91 Upgrade Rabit (#5876) 2020-07-09 16:18:33 -07:00
Zhang Zhang
1813804e36 Add new parameter singlePrecisionHistogram to xgboost4j-spark (#5811)
Expose the existing 'singlePrecisionHistogram' param to the Spark layer.
2020-07-08 16:29:35 -07:00
Philip Hyunsu Cho
0d411b0397 [CI] Simplify CMake build with modern CMake techniques (#5871)
* [CI] Simplify CMake build

* Make sure that plugins can be built

* [CI] Install lz4 on Mac
2020-07-08 04:23:24 -07:00
Philip Hyunsu Cho
22a31b1faa [Doc] Document that CUDA 10.0 is required [skip ci] (#5872) 2020-07-07 18:55:19 -07:00
Rong Ou
06320729d4 fix device sketch with weights in external memory mode (#5870) 2020-07-08 08:44:07 +08:00
Jiaming Yuan
d0a29c3135 Remove print. (#5867) 2020-07-08 04:12:14 +08:00
Jiaming Yuan
a3ec964346 Accept iterator in device dmatrix. (#5783)
* Remove Device DMatrix.
2020-07-07 21:44:48 +08:00
Jiaming Yuan
048d969be4 Implement GK sketching on GPU. (#5846)
* Implement GK sketching on GPU.
* Strong tests on quantile building.
* Handle sparse dataset by binary searching the column index.
* Hypothesis test on dask.
2020-07-07 12:16:21 +08:00
Andy Adinets
ac3f0e78dc Split Features into Groups to Compute Histograms in Shared Memory (#5795) 2020-07-07 15:04:35 +12:00
Jiaming Yuan
93c44a9a64 Move feature names and types of DMatrix from Python to C++. (#5858)
* Add thread local return entry for DMatrix.
* Save feature name and feature type in binary file.

Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>
2020-07-07 09:40:13 +08:00
Jiaming Yuan
4b0852ee41 Use dmlc stream when URI protocol is not local file. (#5857) 2020-07-07 03:07:12 +08:00
Alexander Gugel
0f17e35bce Add c-api-demo to .gitignore (#5855) 2020-07-05 04:35:22 +08:00
Philip Hyunsu Cho
efe3e48ae2 Ensure that LoadSequentialFile() actually read the whole file (#5831) 2020-07-04 16:17:11 +08:00
Jiaming Yuan
1a0801238e Implement iterative DMatrix. (#5837) 2020-07-03 11:44:52 +08:00
Jiaming Yuan
4d277d750d Relax linear test. (#5849)
* Increased error in coordinate is mostly due to floating point error.
* Shotgun uses Hogwild!, which is non-deterministic and can have even greater
floating point error.
2020-07-03 07:49:53 +08:00
Jiaming Yuan
eb067c1c34 Relax test for shotgun. (#5835) 2020-07-01 19:20:29 +08:00
Jiaming Yuan
90a9c68874 Implement a DMatrix Proxy. (#5803) 2020-06-29 15:03:10 +08:00
Philip Hyunsu Cho
74bf00a5ab De-duplicate macro _CRT_SECURE_NO_WARNINGS / _CRT_SECURE_NO_DEPRECATE (#136)
* De-duplicate macro _CRT_SECURE_NO_WARNINGS / _CRT_SECURE_NO_DEPRECATE

* Move all macros to base.h

* Fix CI
2020-06-28 09:51:50 -07:00
Jiaming Yuan
47c89775d6 Accept string for ArrayInterface constructor. (#5799) 2020-06-27 00:06:54 +08:00
Yuan Tang
95f11ed27e Rename Ant Financial to Ant Group (#5827) 2020-06-25 15:25:36 -04:00
Jiaming Yuan
8234091368 Remove unweighted GK quantile. (#5816) 2020-06-23 14:27:46 +08:00
Philip Hyunsu Cho
dcff96ed27 [Doc] Fix rendering of Markdown docs, e.g. R doc (#5821) 2020-06-21 23:49:22 -07:00
Jiaming Yuan
8104f10328 Update document for model dump. (#5818)
* Clarify the relationship between dump and save.
* Mention the schema.
2020-06-22 14:33:54 +08:00
Jiaming Yuan
26143ad0b1 Update rabit. (#5680) 2020-06-22 14:32:43 +08:00
Jiaming Yuan
c4d721200a Implement extend method for meta info. (#5800)
* Implement extend for host device vector.
2020-06-20 03:32:03 +08:00
Philip Hyunsu Cho
a6d9a06b7b [CI] Fix cuDF install; merge 'gpu' and 'cudf' test suite (#5814) 2020-06-19 16:42:57 +08:00
Philip Hyunsu Cho
a67bc64819 Add an option to run brute-force test for JSON round-trip (#5804)
* Add an option to run brute-force test for JSON round-trip

* Apply reviewer's feedback

* Remove unneeded objects

* Parallel run.

* Max.

* Use signed 64-bit loop var, to support MSVC

* Add exhaustive test to CI

* Run JSON test in Win build worker

* Revert "Run JSON test in Win build worker"

This reverts commit c97b2c7dda37b3585b445d36961605b79552ca89.

* Revert "Add exhaustive test to CI"

This reverts commit c149c2ce9971a07a7289f9b9bc247818afd5a667.

Co-authored-by: fis <jm.yuan@outlook.com>
2020-06-17 23:46:02 -07:00
Rory Mitchell
abdf894fcf Add cupy to Windows CI (#5797)
* Add cupy to Windows CI

* Update Jenkinsfile-win64

Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>

* Update Jenkinsfile-win64

Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>

* Update tests/python-gpu/test_gpu_prediction.py

Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>

Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>
2020-06-17 21:55:09 -07:00
Jiaming Yuan
38ee514787 Implement fast number serialization routines. (#5772)
* Implement ryu algorithm.
* Implement integer printing.
* Full coverage roundtrip test.
2020-06-17 12:39:23 +08:00
fis
7c3a168ffd Revert "Accept string for ArrayInterface constructor."
This reverts commit e8ecafb8dc.
2020-06-16 20:02:35 +08:00
fis
e8ecafb8dc Accept string for ArrayInterface constructor. 2020-06-16 20:00:24 +08:00
Rory Mitchell
b47b5ac771 Use hypothesis (#5759)
* Use hypothesis

* Allow int64 array interface for groups

* Add packages to Windows CI

* Add to travis

* Make sure device index is set correctly

* Fix dask-cudf test

* appveyor
2020-06-16 12:45:59 +12:00
Ram Rachum
02884b08aa Fix exception causes all over the codebase (#5787) 2020-06-15 21:06:07 +08:00
Alex
ae18a094b0 Add new skl model attribute for number of features (#5780) 2020-06-15 18:01:59 +08:00
James Lamb
d39da42e69 [R] Remove dependency on gendef for Visual Studio builds (fixes #5608) (#5764)
* [R-package] Remove dependency on gendef for Visual Studio builds (fixes #5608)

* clarify docs

* removed debugging print statement

* Make R CMake install more robust

* Fix doc format; add ToC

* Update build.rst

* Fix AppVeyor

Co-authored-by: Hyunsu Cho <chohyu01@cs.washington.edu>
2020-06-15 00:20:44 +00:00
Jiaming Yuan
529b5c2cfd [DOC] Mention dask blog post in doc. [skip ci] (#5789) 2020-06-14 13:00:19 +08:00
anttisaukko
1bcbe1fc14 Bump com.esotericsoftware to 4.0.2 (#5690)
Co-authored-by: Antti Saukko <antti.saukko@verizonmedia.com>
2020-06-13 21:06:14 -07:00
Jiaming Yuan
1fa84b61c1 Implement Empty method for host device vector. (#5781)
* Fix accessing nullptr.
2020-06-13 19:02:26 +08:00
Jiaming Yuan
306e38ff31 Avoid including c_api.h in header files. (#5782) 2020-06-12 16:24:24 +08:00
Jiaming Yuan
3028fa6b42 Implement weighted sketching for adapter. (#5760)
* Bounded memory tests.
* Fixed memory estimation.
2020-06-12 06:20:39 +08:00
James Lamb
c35be9dc40 [R] replace uses of T and F with TRUE and FALSE (#5778)
* [R-package] replace uses of T and F with TRUE and FALSE

* enable linting

* Remove skip

Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>
2020-06-11 06:08:02 -04:00
Elliot Hershberg
cb7f7e542c Added conda environment file for building docs (#5773) 2020-06-11 16:51:24 +08:00
James Lamb
c96e1ef283 [python-package] remove unused imports (#5776) 2020-06-11 16:50:27 +08:00
Philip Hyunsu Cho
1d22a9be1c Revert "Reorder includes. (#5749)" (#5771)
This reverts commit d3a0efbf16.
2020-06-09 10:29:28 -07:00
Philip Hyunsu Cho
d087a12b04 Add release note for 1.1.0 in NEWS.md (#5763)
* Add release note for 1.1.0 in NEWS.md

* Address reviewer's feedback
2020-06-08 14:16:10 -07:00
Philip Hyunsu Cho
b5ab009c19 Document addition of new committer @SmirnovEgorRu (#5762) 2020-06-07 22:57:49 -07:00
Jiaming Yuan
cacff9232a Remove column major specialization. (#5755)
Co-authored-by: Hyunsu Cho <chohyu01@cs.washington.edu>
2020-06-05 16:19:14 +08:00
Philip Hyunsu Cho
8fe7f5dc43 [CI] Pass new cpplint check (#141) 2020-06-05 00:45:53 -07:00
Jiaming Yuan
bd9d57f579 Add helper for generating batches of data. (#5756)
* Add helper for generating batches of data.

* VC keyword clash.

* Another clash.
2020-06-05 09:53:56 +08:00
Rory Mitchell
359023c0fa Speed up python test (#5752)
* Speed up tests

* Prevent DeviceQuantileDMatrix initialisation with numpy

* Use joblib.memory

* Use RandomState
2020-06-05 11:39:24 +12:00
Jiaming Yuan
cfc23c6a6b Remove max.depth in R gblinear example. (#5753) 2020-06-04 02:59:22 +08:00
Jiaming Yuan
d3a0efbf16 Reorder includes. (#5749)
* Reorder includes.

* R.
2020-06-03 17:30:47 +12:00
ShvetsKS
cd3d14ad0e Add float32 histogram (#5624)
* new single_precision_histogram param was added.

Co-authored-by: SHVETS, KIRILL <kirill.shvets@intel.com>
Co-authored-by: fis <jm.yuan@outlook.com>
2020-06-03 11:24:53 +08:00
Jiaming Yuan
e49607af19 Add Python binding for rabit ops. (#5743) 2020-06-02 19:47:23 +08:00
Jiaming Yuan
e533908922 Expose device sketching in header. (#5747) 2020-06-02 13:02:53 +08:00
Peter Jung
0be0e6fd88 Add pkgconfig to cmake (#5744)
* Add pkgconfig to cmake

* Move xgboost.pc.in to cmake/

Co-authored-by: Peter Jung <peter.jung@heureka.cz>
Co-authored-by: Hyunsu Cho <chohyu01@cs.washington.edu>
2020-06-01 18:22:33 -07:00
Philip Hyunsu Cho
b77e3e3fcc [CI] Remove CUDA 9.0 from CI (#5745) 2020-06-01 18:15:45 -07:00
Jiaming Yuan
325156c7a9 Bump version in header. (#5742) 2020-06-01 18:21:18 +08:00
Jiaming Yuan
d19cec70f1 Don't use mask in array interface. (#5730) 2020-06-01 12:17:24 +08:00
Peter Jung
267c1ed784 Add swift package reference (#5728)
Co-authored-by: Peter Jung <peter.jung@heureka.cz>
2020-06-01 15:29:23 +12:00
Philip Hyunsu Cho
073b625bde Bump version to 1.2.0 snapshot in master (#5733) 2020-05-31 00:11:34 -07:00
Jiaming Yuan
9e1b29944e Fix loading old model. (#5724)
* Add test.
2020-05-31 14:55:32 +08:00
ShvetsKS
057c762ecd Fix release degradation (#5720)
* fix release degradation, related to 5666

* less resizes

Co-authored-by: SHVETS, KIRILL <kirill.shvets@intel.com>
2020-05-31 04:37:54 +03:00
Peter Jung
251dc8a663 Allow pass fmap to importance plot (#5719)
Co-authored-by: Peter Jung <peter.jung@heureka.cz>
Co-authored-by: Hyunsu Cho <chohyu01@cs.washington.edu>
2020-05-29 19:55:35 +08:00
Rory Mitchell
f779980f7e gpu_hist performance tweaks (#5707)
* Remove device vectors

* Remove allreduce synchronize

* Remove double buffer
2020-05-29 16:48:53 +12:00
Philip Hyunsu Cho
ca0d605b34 [Doc] Fix typos in AFT tutorial (#5716) 2020-05-28 14:04:34 -07:00
Jiaming Yuan
35e2205256 [dask] Return GPU Series when input is from cuDF. (#5710)
* Refactor predict function.
2020-05-28 17:51:20 +08:00
Philip Hyunsu Cho
91c646392d Require Python 3.6+; drop Python 3.5 from CI (#5715) 2020-05-27 16:19:30 -07:00
Philip Hyunsu Cho
fdbb6ae856 Require CUDA 10.0+ in CMake build (#5718) 2020-05-27 16:18:18 -07:00
Jiaming Yuan
75a0025a3d [CI] Remove CUDA 9.0 from Windows CI. (#5674)
* Remove CUDA 9.0 on Windows CI.

* Require cuda10 tag, to differentiate

Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>
2020-05-27 12:23:36 -07:00
Dmitry Mottl
78b4e95f25 Changed build.rst (binary wheels are supported for macOS also) (#5711) 2020-05-27 07:18:45 -07:00
Philip Hyunsu Cho
e3aa7f1441 Define _CRT_SECURE_NO_WARNINGS to remove unneeded warnings in MSVC (#5434) 2020-05-25 22:46:07 -07:00
Jiaming Yuan
f145241593 Let XGBoostError inherit ValueError. (#5696) 2020-05-26 08:34:56 +08:00
Jiaming Yuan
8438c7d0e4 Fix IsDense. (#5702) 2020-05-26 08:24:37 +08:00
Philip Hyunsu Cho
e35ad8a074 [R] Fix duplicated libomp.dylib error on Mac OSX (#5701) 2020-05-24 23:37:33 -07:00
Jiaming Yuan
1ba24a7597 Remove redundant sketching. (#5700) 2020-05-24 08:47:20 +08:00
James Lamb
f656ef2fed [R-package] Reduce duplication in configure.ac (#5693)
* updated configure
2020-05-22 12:15:22 +08:00
Jiaming Yuan
5af8161a1a Implement Python data handler. (#5689)
* Define data handlers for DMatrix.
* Throw ValueError in scikit learn interface.
2020-05-22 11:53:55 +08:00
Andy Adinets
646def51e0 C++14 for xgboost (#5664) 2020-05-21 12:26:40 +12:00
Lorenz Walthert
60511a3222 Document more objective parameters in R package (#5682) 2020-05-20 14:00:55 +08:00
ShvetsKS
dd01e4ba8d Distributed optimizations for 'hist' method with CPUs (#5557)
Co-authored-by: SHVETS, KIRILL <kirill.shvets@intel.com>
2020-05-20 06:03:03 +03:00
Rong Ou
e21a608552 add pointers to the gpu external memory paper (#5684) 2020-05-19 19:46:16 -07:00
Jiaming Yuan
7903286961 Remove silent from R demos. (#5675)
* Remove silent from R demos.

* Vignettes.
2020-05-19 18:20:46 +08:00
Jiaming Yuan
a6008d5d93 Add RABIT_DLL tag to definitions of rabit APIs. (#140)
* Add RABIT_DLL tag to definitions of rabit APIs.
* Fix Travis tests.
2020-05-19 18:20:31 +08:00
Jiaming Yuan
dd9aeb60ae [JVM Packages] Catch dmlc error by ref. (#5678) 2020-05-19 13:00:12 +08:00
LionOrCatThatIsTheQuestion
83981a9ce3 Pseudo-huber loss metric added (#5647)
- Add pseudo huber loss objective.
- Add pseudo huber loss metric.

Co-authored-by: Reetz <s02reetz@iavgroup.local>
2020-05-18 21:08:07 +08:00
Jiaming Yuan
535479e69f Add JSON schema to model dump. (#5660) 2020-05-15 10:18:43 +08:00
Jiaming Yuan
2c1a439869 Update Python demos with tests. (#5651)
* Remove GPU memory usage demo.
* Add tests for demos.
* Remove `silent`.
* Remove shebang as it's not portable.
2020-05-12 12:04:42 +08:00
Oleksandr Kuvshynov
4e64e2ef8e skip missing lookup if nothing is missing in CPU hist partition kernel. (#5644)
* [xgboost] skip missing lookup if nothing is missing
2020-05-12 05:50:08 +03:00
Jiaming Yuan
9ad40901a8 Upgrade to CUDA 10.0 (#5649) (#5652)
Co-authored-by: fis <jm.yuan@outlook.com>

Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>
2020-05-11 22:27:36 +08:00
Rory Mitchell
fcf57823b6 Reduce device synchronisation (#5631)
* Reduce device synchronisation

* Initialise pinned memory
2020-05-07 21:19:46 +12:00
Rory Mitchell
9910265064 Resolve vector<bool>::iterator crash (#5642) 2020-05-07 21:18:01 +12:00
Jiaming Yuan
21ed1f0c6d Support 64bit seed. (#5643) 2020-05-07 14:52:38 +08:00
Jiaming Yuan
eaf2a00b5c Enhance nvtx support. (#5636) 2020-05-06 22:54:24 +08:00
Jiaming Yuan
67d267f9da Move device dmatrix construction code into ellpack. (#5623) 2020-05-06 19:43:59 +08:00
Jiaming Yuan
33e052b1e5 Remove dead code. (#5635) 2020-05-06 17:03:48 +08:00
Philip Hyunsu Cho
8de7f1928e Fix build on big endian CPUs (#5617)
* Fix build on big endian CPUs

* Clang-tidy
2020-04-29 21:56:34 -07:00
Rory Mitchell
b9649e7b8e Refactor gpu_hist split evaluation (#5610)
* Refactor

* Rewrite evaluate splits

* Add more tests
2020-04-30 08:58:12 +12:00
Yuan Tang
dfcdfabf1f Move dask tutorial closer other distributed tutorials (#5613) 2020-04-28 02:24:00 +08:00
Jiaming Yuan
c90457f489 Refactor the CLI. (#5574)
* Enable parameter validation.
* Enable JSON.
* Catch `dmlc::Error`.
* Show help message.
2020-04-26 10:56:33 +08:00
Jiaming Yuan
7d93932423 Better message when no GPU is found. (#5594) 2020-04-26 10:00:57 +08:00
Jason E. Aten, Ph.D
8dfe7b3686 Clarify meaning of training parameter in XGBoosterPredict() (#5604)
Co-authored-by: Hyunsu Cho <chohyu01@cs.washington.edu>
Co-authored-by: Jiaming Yuan <jm.yuan@outlook.com>
2020-04-25 16:48:42 -07:00
Philip Hyunsu Cho
4fd95272c8 Instruct Mac users to install libomp (#5606) 2020-04-25 15:50:30 -07:00
Philip Hyunsu Cho
474cfddf91 [R] Address warnings to comply with CRAN submission policy (#5600)
* [R] Address warnings to comply with CRAN submission policy

* Include <xgboost/logging.h>
2020-04-25 13:34:36 -07:00
Philip Hyunsu Cho
a23de1c108 [CI] Grant public read access to Mac OSX wheels (#5602) 2020-04-25 11:51:26 -07:00
Philip Hyunsu Cho
f68155de6c Fix compilation on Mac OSX High Sierra (10.13) (#5597)
* Fix compilation on Mac OSX High Sierra

* [CI] Build Mac OSX binary wheel using Travis CI
2020-04-25 10:53:03 -07:00
Jiaming Yuan
e726dd9902 Set device in device dmatrix. (#5596) 2020-04-25 13:42:53 +08:00
Philip Hyunsu Cho
ef26bc45bf Hide C++ symbols in libxgboost.so when building Python wheel (#5590)
* Hide C++ symbols in libxgboost.so when building Python wheel

* Update Jenkinsfile

* Add test

* Upgrade rabit

* Add setup.py option.

Co-authored-by: fis <jm.yuan@outlook.com>
2020-04-24 13:32:05 -07:00
Philip Hyunsu Cho
4fb34a008d Use 'default' visibility for C symbols (#138) 2020-04-23 20:48:52 -07:00
Rory Mitchell
660be66207 Avoid rabit calls in learner configuration (#5581) 2020-04-24 14:59:29 +12:00
Philip Hyunsu Cho
92913aaf7f [CI] Use Vault repository to re-gain access to devtoolset-4 (#5589)
* [CI] Use Vault repository to re-gain access to devtoolset-4

* Use manylinux2010 tag

* Update Dockerfile.jvm

* Fix rename_whl.py

* Upgrade Pip, to handle manylinux2010 tag

* Update insert_vcomp140.py

* Update test_python.sh
2020-04-23 18:53:54 -07:00
Philip Hyunsu Cho
e4f5b6c84f Port R compatibility patches from 1.0.0 release branch (#5577)
* Don't use memset to set struct when compiling for R

* Support 32-bit Solaris target for R package
2020-04-21 22:51:18 -07:00
Jiaming Yuan
f27b6f9ba6 Update document. (#5572) 2020-04-22 02:37:37 +08:00
Jiaming Yuan
c355ab65ed Enable parameter validation for R. (#5569)
* Enable parameter validation for R.

* Add test.
2020-04-21 11:19:09 -07:00
Jiaming Yuan
564b22cee5 Restore attributes in complete. (#5573) 2020-04-21 11:06:55 -07:00
Rory Mitchell
a734f52807 Use cudaDeviceGetAttribute instead of cudaGetDeviceProperties (#5570) 2020-04-21 14:58:29 +12:00
Andy Adinets
73142041b9 For histograms, opting into maximum shared memory available per block. (#5491) 2020-04-21 14:56:42 +12:00
Jiaming Yuan
9c1103e06c [Breaking] Set output margin to True for custom objective. (#5564)
* Set output margin to True for custom objective in Python and R.

* Add a demo for writing multi-class custom objective function.

* Run tests on selected demos.
2020-04-20 20:44:12 +08:00
Jiaming Yuan
fcbedcedf8 Fix configuration I load model. (#5562) 2020-04-20 17:25:11 +08:00
Jiaming Yuan
29a4cfe400 Group aware GPU sketching. (#5551)
* Group aware GPU weighted sketching.

* Distribute group weights to each data point.
* Relax the test.
* Validate input meta info.
* Fix metainfo copy ctor.
2020-04-20 17:18:52 +08:00
Liang-Chi Hsieh
397d8f0ee7 [jvm-packages] XGBoost Spark should deal with NaN when parsing evaluation output (#5546) 2020-04-19 23:10:30 -07:00
Jiaming Yuan
b809f5d8b8 Don't set seed on CLI interface. (#5563) 2020-04-20 12:17:03 +08:00
Jiaming Yuan
ccd30e4491 Fix non-openmp build. (#5566)
* Add test to Jenkins.
* Fix threading utils tests.
* Require thread library.
2020-04-20 12:16:38 +08:00
Rory Mitchell
b2827a80e1 Use non-synchronising scan (#5560) 2020-04-20 15:51:34 +12:00
Rory Mitchell
d6d1035950 gpu_hist performance fixes (#5558)
* Remove unnecessary cuda API calls

* Fix histogram memory growth
2020-04-19 12:21:13 +12:00
Jiaming Yuan
e1f22baf8c Fix slice and get info. (#5552) 2020-04-18 18:00:13 +08:00
Jiaming Yuan
c245eb8755 Fix r interaction constraints (#5543)
* Unify the parsing code.

* Cleanup.
2020-04-18 06:53:51 +08:00
Jiaming Yuan
93df871c8c Assert matching length of evaluation inputs. (#5540) 2020-04-18 06:52:55 +08:00
Jiaming Yuan
c69a19e2b1 Fix skl nan tag. (#5538) 2020-04-18 06:52:17 +08:00
Jiaming Yuan
cfee9fae91 Don't use uint for threads. (#5542) 2020-04-17 09:45:42 +08:00
Jiaming Yuan
bb29ce2818 Add missing aft parameters. [skip ci] (#5553) 2020-04-16 12:08:55 -07:00
ShvetsKS
a2d86b8e4b Optimizations for RNG in InitData kernel (#5522)
* optimizations for subsampling in InitData

* optimizations for subsampling in InitData

Co-authored-by: SHVETS, KIRILL <kirill.shvets@intel.com>
2020-04-16 18:24:32 +03:00
Rory Mitchell
e268fb0093 Use thrust functions instead of custom functions (#5544) 2020-04-16 21:41:16 +12:00
Melissa Kohl
6a169cd41a Fix uninitialized value bug in xgboost callback (#5463)
Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>
2020-04-16 07:50:54 +08:00
Jiaming Yuan
468b1594d3 Fix CLI model IO. (#5535)
* Add test for comparing Python and CLI training result.
2020-04-16 07:48:47 +08:00
Philip Hyunsu Cho
0676a19e70 [jvm-packages] [CI] Publish XGBoost4J JARs with Scala 2.11 and 2.12 (#5539) 2020-04-15 09:32:02 -07:00
Philip Hyunsu Cho
ec02f40d42 [CI] Use Ubuntu 18.04 LTS in JVM CI, because 19.04 is EOL (#5537) 2020-04-15 07:32:46 -07:00
Jiaming Yuan
8b04736b81 [dask] dask cudf inplace prediction. (#5512)
* Add inplace prediction for dask-cudf.

* Remove Dockerfile.release, since it's not used anywhere

* Use Conda exclusively in CUDF and GPU containers

* Improve cupy memory copying.

* Add skip marks to tests.

* Add mgpu-cudf category on the CI to run all distributed tests.

Co-authored-by: Hyunsu Cho <chohyu01@cs.washington.edu>
2020-04-15 18:15:51 +08:00
Rory Mitchell
ca4e05660e Purge device_helpers.cuh (#5534)
* Simplifications with caching_device_vector

* Purge device helpers
2020-04-15 21:51:56 +12:00
Jiaming Yuan
a2f54963b6 Write binary header. (#5532) 2020-04-15 17:47:57 +08:00
Philip Hyunsu Cho
1b1969f20d [jvm-packages] [CI] Create a Maven repository to host SNAPSHOT JARs (#5533) 2020-04-14 19:33:32 -07:00
Kamil A. Kaczmarek
2809fb8b6f Add Neptune and Optuna to list of examples (#5528) 2020-04-14 11:00:50 -07:00
Jiaming Yuan
c90119eb67 Update Python doc. [skip ci] (#5517)
* Update doc for copying booster. [skip ci]

The issue is resolved in  #5312 .

* Add version for new APIs. [skip ci]
2020-04-14 16:25:20 +08:00
Philip Hyunsu Cho
88b64c8162 Ensure that configured dmlc/build_config.h is picked up by Rabit and XGBoost (#5514)
* Ensure that configured header (build_config.h) from dmlc-core is picked up by Rabit and XGBoost

* Check which Rabit target is being used

* Use CMake 3.13 in all Jenkins tests

* Upgrade CMake in Travis CI

* Install CMake using Kitware installer

* Remove existing CMake (3.12.4)
2020-04-11 23:48:28 -07:00
Nicolas Scozzaro
04f69b43e6 fix typo "customized" (#5515) 2020-04-12 14:43:48 +08:00
Liang-Chi Hsieh
449ab79e0c [CI] Use devtoolset-6 because devtoolset-4 is EOL and no longer available (#5506)
* Use devtoolset-6.

* [CI] Use devtoolset-6 because devtoolset-4 is EOL and no longer available

* CUDA 9.0 doesn't work with devtoolset-6; use devtoolset-4 for GPU build only

Co-authored-by: Hyunsu Cho <chohyu01@cs.washington.edu>
2020-04-11 19:49:06 -07:00
Jiaming Yuan
b56c902841 [R] R raw serialization. (#5123)
* Add bindings for serialization.
* Change `xgb.save.raw' into full serialization instead of simple model.
* Add `xgb.load.raw' for unserialization.
* Run devtools.
2020-04-11 17:16:54 +08:00
Jiaming Yuan
a3db79df22 Remove makefiles. (#5513) 2020-04-11 13:25:53 +08:00
Rory Mitchell
093e2227e3 Serialise booster after training to reset state (#5484)
* Serialise booster after training to reset state

* Prevent process_type being set on load

* Check for correct updater sequence
2020-04-11 16:27:12 +12:00
Jiaming Yuan
4a0c8ef237 Update doc for parameter validation. (#5508)
* Update doc for parameter validation.

* Fix github rebase.
2020-04-11 00:43:46 +08:00
Jiaming Yuan
1334aca437 Fix github merge. (#5509) 2020-04-10 22:17:38 +08:00
Jiaming Yuan
866a477319 Unify max nodes. (#5497) 2020-04-10 19:26:35 +08:00
Jiaming Yuan
bd653fad4c Remove distcol updater. (#5507)
Closes #5498.
2020-04-10 12:52:56 +08:00
Jiaming Yuan
7d52c0b8c2 Requires setting leaf stat when expanding tree. (#5501)
* Fix GPU Hist feature importance.
2020-04-10 12:27:03 +08:00
Jiaming Yuan
dc2950fd90 Fix checking booster. (#5505)
* Use `get_params()` instead of `getattr` intrinsic.
2020-04-10 12:21:21 +08:00
Jiaming Yuan
6671b42dd4 Use ellpack for prediction only when sparsepage doesn't exist. (#5504) 2020-04-10 12:15:46 +08:00
Bobby Wang
ad826e913f [jvm-packages]add feature size for LabelPoint and DataBatch (#5303)
* fix type error

* Validate number of features.

* resolve comments

* add feature size for LabelPoint and DataBatch

* pass the feature size to native

* move feature size validating tests into a separate suite

* resolve comments

Co-authored-by: fis <jm.yuan@outlook.com>
2020-04-07 16:49:52 -07:00
Zhang Zhang
8bc595ea1e Fix out-of-bound array access in WQSummary::SetPrune() (#5493) 2020-04-08 10:02:31 +12:00
Rong Ou
a1085396e2 add reference to gpu external memory (#5490) 2020-04-07 11:15:58 +12:00
Yuan Tang
9097e8f0d9 Edits on tutorial for XGBoost job on Kubernetes (#5487) 2020-04-05 07:36:33 -04:00
Paul Kaefer
c362125d7b corrected spelling of 'list' (#5482) 2020-04-05 09:15:08 +08:00
Jiaming Yuan
0012f2ef93 Upgrade clang-tidy on CI. (#5469)
* Correct all clang-tidy errors.
* Upgrade clang-tidy to 10 on CI.

Co-authored-by: Hyunsu Cho <chohyu01@cs.washington.edu>
2020-04-05 04:42:29 +08:00
Philip Hyunsu Cho
30e94ddd04 Add R code to AFT tutorial [skip ci] (#5486) 2020-04-04 13:06:12 -07:00
Rory Mitchell
15800107ad Small updates to GPU documentation (#5483) 2020-04-04 13:02:27 -07:00
Jiaming Yuan
a9313802ea Fix dump model. (#5485) 2020-04-05 03:52:54 +08:00
Philip Hyunsu Cho
5fc5ec539d Implement robust regularization in 'survival:aft' objective (#5473)
* Robust regularization of AFT gradient and hessian

* Fix AFT doc; expose it to tutorial TOC

* Apply robust regularization to uncensored case too

* Revise unit test slightly

* Fix lint

* Update test_survival.py

* Use GradientPairPrecise

* Remove unused variables
2020-04-04 12:21:24 -07:00
Jiaming Yuan
939973630d Accept other gradient types for split entry. (#5467) 2020-04-03 10:38:44 +08:00
Jiaming Yuan
86beb68ce8 Implement host span. (#5459) 2020-04-03 10:37:51 +08:00
Jiaming Yuan
459b175dc6 Split up test helpers header. (#5455) 2020-04-03 10:36:53 +08:00
Jiaming Yuan
c218d8ffbf Enable parameter validation for skl. (#5477) 2020-04-03 10:23:58 +08:00
Jiaming Yuan
d0b86c75d9 Remove silent parameter. (#5476) 2020-04-03 08:03:26 +08:00
Jiaming Yuan
29c6ad943a Prevent copying SimpleDMatrix. (#5453)
* Set default dtor for SimpleDMatrix to initialize default copy ctor, which is
deleted due to unique ptr.

* Remove commented code.
* Remove warning for calling host function (std::max).
* Remove warning for initialization order.
* Remove warning for unused variables.
2020-04-02 07:01:49 +08:00
Jiaming Yuan
e86030c360 Update dmlc-core. (#5466)
* Copy dmlc travis script to XGBoost.
2020-04-02 04:16:39 +08:00
Jiaming Yuan
babcb996e7 Reduce span check overhead. (#5464) 2020-04-01 22:07:24 +08:00
Rory Mitchell
15f40e51e9 Add support for dlpack, expose python docs for DeviceQuantileDMatrix (#5465) 2020-04-01 23:34:32 +13:00
Jiaming Yuan
6601a641d7 Thread safe, inplace prediction. (#5389)
Normal prediction with DMatrix is now thread safe with locks.  Added inplace prediction is lock free thread safe.

When data is on device (cupy, cudf), the returned data is also on device.

* Implementation for numpy, csr, cudf and cupy.

* Implementation for dask.

* Remove sync in simple dmatrix.
2020-03-30 15:35:28 +08:00
James Lamb
7f980e9f83 [R-package] fixed inconsistency in R -e calls in FindLibR.cmake (#5438) 2020-03-28 19:24:21 +08:00
ShvetsKS
27a8e36fc3 Reducing memory consumption for 'hist' method on CPU (#5334) 2020-03-28 14:45:52 +13:00
Rory Mitchell
13b10a6370 Device dmatrix (#5420) 2020-03-28 14:42:21 +13:00
Jiaming Yuan
780de49ddb Resolve travis failure. (#5445)
* Install dependencies by pip.
2020-03-27 19:37:58 +08:00
Jiaming Yuan
4942da64ae Refactor tests with data generator. (#5439) 2020-03-27 06:44:44 +08:00
Jiaming Yuan
7146b91d5a Force compressed buffer to be 4 bytes aligned. (#5441) 2020-03-27 06:43:52 +08:00
Avinash Barnwal
dcf439932a Add Accelerated Failure Time loss for survival analysis task (#4763)
* [WIP] Add lower and upper bounds on the label for survival analysis

* Update test MetaInfo.SaveLoadBinary to account for extra two fields

* Don't clear qids_ for version 2 of MetaInfo

* Add SetInfo() and GetInfo() method for lower and upper bounds

* changes to aft

* Add parameter class for AFT; use enum's to represent distribution and event type

* Add AFT metric

* changes to neg grad to grad

* changes to binomial loss

* changes to overflow

* changes to eps

* changes to code refactoring

* changes to code refactoring

* changes to code refactoring

* Re-factor survival analysis

* Remove aft namespace

* Move function bodies out of AFTNormal and AFTLogistic, to reduce clutter

* Move function bodies out of AFTLoss, to reduce clutter

* Use smart pointer to store AFTDistribution and AFTLoss

* Rename AFTNoiseDistribution enum to AFTDistributionType for clarity

The enum class was not a distribution itself but a distribution type

* Add AFTDistribution::Create() method for convenience

* changes to extreme distribution

* changes to extreme distribution

* changes to extreme

* changes to extreme distribution

* changes to left censored

* deleted cout

* changes to x,mu and sd and code refactoring

* changes to print

* changes to hessian formula in censored and uncensored

* changes to variable names and pow

* changes to Logistic Pdf

* changes to parameter

* Expose lower and upper bound labels to R package

* Use example weights; normalize log likelihood metric

* changes to CHECK

* changes to logistic hessian to standard formula

* changes to logistic formula

* Comply with coding style guideline

* Revert back Rabit submodule

* Revert dmlc-core submodule

* Comply with coding style guideline (clang-tidy)

* Fix an error in AFTLoss::Gradient()

* Add missing files to amalgamation

* Address @RAMitchell's comment: minimize future change in MetaInfo interface

* Fix lint

* Fix compilation error on 32-bit target, when size_t == bst_uint

* Allocate sufficient memory to hold extra label info

* Use OpenMP to speed up

* Fix compilation on Windows

* Address reviewer's feedback

* Add unit tests for probability distributions

* Make Metric subclass of Configurable

* Address reviewer's feedback: Configure() AFT metric

* Add a dummy test for AFT metric configuration

* Complete AFT configuration test; remove debugging print

* Rename AFT parameters

* Clarify test comment

* Add a dummy test for AFT loss for uncensored case

* Fix a bug in AFT loss for uncensored labels

* Complete unit test for AFT loss metric

* Simplify unit tests for AFT metric

* Add unit test to verify aggregate output from AFT metric

* Use EXPECT_* instead of ASSERT_*, so that we run all unit tests

* Use aft_loss_param when serializing AFTObj

This is to be consistent with AFT metric

* Add unit tests for AFT Objective

* Fix OpenMP bug; clarify semantics for shared variables used in OpenMP loops

* Add comments

* Remove AFT prefix from probability distribution; put probability distribution in separate source file

* Add comments

* Define kPI and kEulerMascheroni in probability_distribution.h

* Add probability_distribution.cc to amalgamation

* Remove unnecessary diff

* Address reviewer's feedback: define variables where they're used

* Eliminate all INFs and NANs from AFT loss and gradient

* Add demo

* Add tutorial

* Fix lint

* Use 'survival:aft' to be consistent with 'survival:cox'

* Move sample data to demo/data

* Add visual demo with 1D toy data

* Add Python tests

Co-authored-by: Philip Cho <chohyu01@cs.washington.edu>
2020-03-25 13:52:51 -07:00
Rory Mitchell
1de36cdf1e Add link to GPU documentation (#5437) 2020-03-24 09:29:29 +13:00
sriramch
d2231fc840 Ranking metric acceleration on the gpu (#5398) 2020-03-22 19:38:48 +13:00
Jiaming Yuan
cd7d6f7d59 [dask] Fix missing value for scikit-learn interface. (#5435) 2020-03-20 10:56:01 -04:00
James Lamb
4b7e2b7bff [R-package] fixed uses of class() (#5426)
Thank you a lot. Good catch!
2020-03-20 14:51:20 +01:00
Jiaming Yuan
abca9908ba Support pandas SparseArray. (#5431) 2020-03-20 21:40:22 +08:00
James Lamb
3cf665d3ec [R-package] changed FindLibR to take advantage of CMake cache (#5427) 2020-03-20 03:32:15 +08:00
Jiaming Yuan
760d5d0c3c [dask] Accept other inputs for prediction. (#5428)
* Returns a series when input is dataframe.

* Merge assert client.
2020-03-19 17:05:55 +08:00
Jiaming Yuan
8ca06ab329 [dask] Check non-equal when setting threads. (#5421)
* Check non-equal.

`nthread` can be restored from internal parameter, which is mis-interpreted as
user defined parameter.

* Check None.
2020-03-17 13:07:20 +08:00
Jiaming Yuan
b51124c158 [dask] Enable gridsearching with skl. (#5417) 2020-03-16 04:51:51 +08:00
Jiaming Yuan
761a5dbdfc [dask] Honor nthreads from dask worker. (#5414) 2020-03-16 04:51:24 +08:00
Jiaming Yuan
21b671aa06 [dask] Order the prediction result. (#5416) 2020-03-15 19:34:04 +08:00
Jiaming Yuan
668e432e2d [dask] Use DMLC_TASK_ID. (#5415) 2020-03-15 16:47:03 +08:00
Jiaming Yuan
fc88105620 Better error message for updating. (#5418) 2020-03-15 16:46:21 +08:00
Jiaming Yuan
ab7a46a1a4 Check whether current updater can modify a tree. (#5406)
* Check whether current updater can modify a tree.

* Fix tree model JSON IO for pruned trees.
2020-03-14 09:24:08 +08:00
Rory Mitchell
b745b7acce Fix memory usage of device sketching (#5407) 2020-03-14 13:43:24 +13:00
Jan Borchmann
bb8c8df39d [dask] passed through verbose for dask fit (#5413) 2020-03-14 06:33:53 +08:00
Jiaming Yuan
45a97ddf32 Split up LearnerImpl. (#5350) 2020-03-12 16:30:23 +08:00
Rory Mitchell
3ad4333b0e Partial rewrite EllpackPage (#5352) 2020-03-11 10:15:53 +13:00
Darby Payne
7a99f8f27f Adding static library option (#5397) 2020-03-10 18:22:15 +08:00
Bart Broere
a931589c96 Fix typo (#5399) 2020-03-09 19:41:39 +08:00
Rory Mitchell
a38e7bd19c Sketching from adapters (#5365)
* Sketching from adapters

* Add weights test
2020-03-07 21:07:58 +13:00
Jiaming Yuan
0dd97c206b Move thread local entry into Learner. (#5396)
* Move thread local entry into Learner.

This is an attempt to workaround CUDA context issue in static variable, where
the CUDA context can be released before device vector.

* Add PredictionEntry to thread local entry.

This eliminates one copy of prediction vector.

* Don't define CUDA C API in a namespace.
2020-03-07 15:37:39 +08:00
sriramch
1ba6706167 - create a gpu metrics (internal) registry (#5387)
* - create a gpu metrics (internal) registry
  - the objective is to separate the cpu and gpu implementations such that they evolve
    indepedently. to that end, this approach will:
    - preserve the same metrics configuration (from the end user perspective)
    - internally delegate the responsibility to the gpu metrics builder when there is a
      valid device present
    - decouple the gpu metrics builder from the cpu ones to prevent misuse
    - move away from including the cuda file from within the cc file and segregate the code
      via ifdef's
2020-03-07 15:31:35 +13:00
Jiaming Yuan
8d06878bf9 Deterministic GPU histogram. (#5361)
* Use pre-rounding based method to obtain reproducible floating point
  summation.
* GPU Hist for regression and classification are bit-by-bit reproducible.
* Add doc.
* Switch to thrust reduce for `node_sum_gradient`.
2020-03-04 15:13:28 +08:00
Philip Hyunsu Cho
9775da02d9 Add release note for 1.0.0 in NEWS.md (#5329)
* Add release note for 1.0.0

* Fix a small bug in the Python script that compiles the list of contributors

* Clarify governance of CI infrastructure; now PMC is formally in charge

* Address reviewer comment

* Fix typo
2020-03-03 21:35:43 -08:00
sriramch
5dc8e894c9 Fixes and changes to the ranking metrics computed on cpu (#5380)
* - fixes and changes to the ranking metrics computed on cpu
  - auc/aucpr ranking metric accelerated on cpu
  - fixes to the auc/aucpr metrics
2020-03-03 15:56:36 +13:00
Darius Kharazi
71a8b8c65a Fix simple typo: information.c -> information (#5384)
Closes #5383
2020-03-03 08:50:14 +08:00
Egor Smirnov
1b97eaf7a7 Optimized ApplySplit, BuildHist and UpdatePredictCache functions on CPU (#5244)
* Split up sparse and dense build hist kernels.
* Add `PartitionBuilder`.
2020-02-29 16:11:42 +08:00
sriramch
b81f8cbbc0 Move segment sorter to common (#5378)
- move segment sorter to common
- this is the first of a handful of pr's that splits the larger pr #5326
- it moves this facility to common (from ranking objective class), so that it can be
    used for metric computation
- it also wraps all the bald device pointers into span.
2020-02-29 15:42:07 +08:00
Jiaming Yuan
2ba8c13b69 Revert "Enable rabit test (#5358)" (#5377)
This reverts commit 9a5efffebe.
2020-02-29 04:25:03 +08:00
Chen Qin
9a5efffebe Enable rabit test (#5358) 2020-02-28 22:29:02 +08:00
Samrat Pandiri
2d76d40dfd Update dask.rst to correct a spelling mistake (#5371)
Change `signle-node` to `single-node`
2020-02-27 20:46:41 +08:00
Jiaming Yuan
a461a9a90a Define lazy isinstance for Python compat. (#5364)
* Avoid importing datatable.
* Fix #5363.
2020-02-26 14:23:33 +08:00
Jiaming Yuan
0fd455e162 Restore loading model from buffer. (#5360) 2020-02-26 11:30:13 +08:00
Jiaming Yuan
f2b8cd2922 Add number of columns to native data iterator. (#5202)
* Change native data iter into an adapter.
2020-02-25 23:42:01 +08:00
Jiaming Yuan
e0509b3307 Fix pruner. (#5335)
* Honor the tree depth.
* Prevent pruning pruned node.
2020-02-25 08:32:46 +08:00
Rory Mitchell
b0ed3f0a66 Remove unnecessary DMatrix methods (#5324) 2020-02-25 12:40:39 +13:00
Jiaming Yuan
655cf17b60 Predict on Ellpack. (#5327)
* Unify GPU prediction node.
* Add `PageExists`.
* Dispatch prediction on input data for GPU Predictor.
2020-02-23 06:27:03 +08:00
daiki katsuragawa
70a91ec3ba Update README.md (#5346) 2020-02-23 02:52:37 +08:00
Philip Hyunsu Cho
cfae247231 Fix a small typo in sklearn.py that broke multiple eval metrics (#5341) 2020-02-22 19:02:37 +08:00
Rong Ou
d6b31df449 update docs for gpu external memory (#5332)
* update docs for gpu external memory

* add hist limitation
2020-02-22 14:57:40 +08:00
Philip Hyunsu Cho
7ac7e8778f Port patches from 1.0.0 branch (#5336)
* Remove f-string, since it's not supported by Python 3.5 (#5330)

* Remove f-string, since it's not supported by Python 3.5

* Add Python 3.5 to CI, to ensure compatibility

* Remove duplicated matplotlib

* Show deprecation notice for Python 3.5

* Fix lint

* Fix lint

* Fix a unit test that mistook MINOR ver for PATCH ver

* Enforce only major version in JSON model schema

* Bump version to 1.1.0-SNAPSHOT
2020-02-21 13:13:21 -08:00
Philip Hyunsu Cho
8aa8ef1031 Display Sponsor button, link to OpenCollective (#5325) 2020-02-19 01:58:21 -08:00
Rory Mitchell
bc96ceb8b2 Refactor SparsePageSource, delete cache files after use (#5321)
* Refactor sparse page source

* Delete temporary cache files

* Log fatal if cache exists

* Log fatal if multiple threads used with prefetcher
2020-02-19 16:43:41 +13:00
Rory Mitchell
b2b2c4e231 Remove SimpleCSRSource (#5315) 2020-02-18 16:49:17 +13:00
Jiaming Yuan
9f77c18b0d Add JVM_CHECK_CALL. (#5199)
* Added a check call macro in jvm package, prevents executing other functions
from jvm when error occurred in XGBoost. For example, when prediction fails jvm
should not try to allocate memory based on the output prediction size.
2020-02-18 11:10:55 +08:00
Jiaming Yuan
0110754a76 Remove update prediction cache from predictors. (#5312)
Move this function into gbtree, and uses only updater for doing so. As now the predictor knows exactly how many trees to predict, there's no need for it to update the prediction cache.
2020-02-17 11:35:47 +08:00
Jiaming Yuan
e433a379e4 Fix changing locale. (#5314)
* Fix changing locale.

* Don't use locale guard.

As number parsing is implemented in house, we don't need locale.

* Update doc.
2020-02-17 11:31:13 +08:00
Rory Mitchell
7e32af5c21 Wide dataset quantile performance improvement (#5306) 2020-02-16 10:24:42 +13:00
Jiaming Yuan
ed2465cce4 Add configuration to R interface. (#5217)
* Save and load internal parameter configuration as JSON.
2020-02-16 03:01:58 +08:00
Jiaming Yuan
8ca9744b07 Use scikit-learn in extra dependencies. (#5310) 2020-02-15 07:12:51 +08:00
Jiaming Yuan
c35cdecddd Move prediction cache to Learner. (#5220)
* Move prediction cache into Learner.

* Clean-ups

- Remove duplicated cache in Learner and GBM.
- Remove ad-hoc fix of invalid cache.
- Remove `PredictFromCache` in predictors.
- Remove prediction cache for linear altogether, as it's only moving the
  prediction into training process but doesn't provide any actual overall speed
  gain.
- The cache is now unique to Learner, which means the ownership is no longer
  shared by any other components.

* Changes

- Add version to prediction cache.
- Use weak ptr to check expired DMatrix.
- Pass shared pointer instead of raw pointer.
2020-02-14 13:04:23 +08:00
Rory Mitchell
24ad9dec0b Testing hist_util (#5251)
* Rank tests

* Remove categorical split specialisation

* Extend tests to multiple features, switch to WQSketch

* Add tests for SparseCuts

* Add external memory quantile tests, fix some existing tests
2020-02-14 14:36:43 +13:00
Jiaming Yuan
911a902835 Merge model compatibility fixes from 1.0rc branch. (#5305)
* Port test model compatibility.
* Port logit model fix.

https://github.com/dmlc/xgboost/pull/5248
https://github.com/dmlc/xgboost/pull/5281
2020-02-13 20:41:58 +08:00
Jiaming Yuan
29eeea709a Pass shared pointer instead of raw pointer to Learner. (#5302)
Extracted from https://github.com/dmlc/xgboost/pull/5220 .
2020-02-11 14:16:38 +08:00
Philip Hyunsu Cho
2e0067e790 Update affiliation of @hcho3 (#5292) 2020-02-06 20:58:39 -08:00
Andrew Kane
94828a7c0c Updated Windows build docs (#5283) 2020-02-05 12:19:54 +08:00
Jiaming Yuan
84e395d91e Fix CMake build on Windows with setuptools. (#5280) 2020-02-05 10:47:39 +08:00
Jiaming Yuan
595a00466d Rewrite setup.py. (#5271)
The setup.py is rewritten.  This new script uses only Python code and provide customized
implementation of setuptools commands.  This way users can run most of setuptools commands
just like any other Python libraries.

* Remove setup_pip.py
* Remove soft links.
* Define customized commands.
* Remove shell script.
* Remove makefile script.
* Update the doc for building from source.
2020-02-04 13:35:42 +08:00
Rong Ou
e4b74c4d22 Gradient based sampling for GPU Hist (#5093)
* Implement gradient based sampling for GPU Hist tree method.
* Add samplers and handle compacted page in GPU Hist.
2020-02-04 10:31:27 +08:00
Philip Hyunsu Cho
c74216f22c Declare Python 3.8 support in setup.py (#5274) 2020-02-03 10:38:52 -08:00
David Díaz Vico
71e7e3b96f Improved sklearn compatibility (#5255) 2020-02-03 13:30:45 +08:00
Jiaming Yuan
a5cc112eea Export JSON config in get_params. (#5256) 2020-02-03 12:46:51 +08:00
Jiaming Yuan
ed0216642f Avoid dask test fixtures. (#5270)
* Fix Travis OSX timeout.

* Fix classifier.
2020-02-03 12:39:20 +08:00
Jiaming Yuan
856b81c727 Ignore gdb_history. [skip ci] (#5257) 2020-02-02 20:40:09 +08:00
Nan Zhu
d7b45fbcaf [jvm-packages] do not use multiple jobs to make checkpoints (#5082)
* temp

* temp

* tep

* address the comments

* fix stylistic issues

* fix

* external checkpoint
2020-02-01 19:36:39 -08:00
Philip Hyunsu Cho
fa26313feb Remove use of std::cout from R package (#5261) 2020-02-01 05:52:19 -08:00
Philip Hyunsu Cho
2f7fcff4d7 Fix build on FreeBSD (#133) 2020-01-27 12:15:32 -08:00
Nan Zhu
6e563951af fix hanging trainings (#132)
* fix hanging connections

* remove logging
2020-01-27 09:12:02 -08:00
Chen Qin
0d6a853212 fix xgboost build failure introduced by allgather interface (#129)
* fix missing allgether rabit declaration

* fix allgather signature mismatch

* fix type conversion

* fix GetRingPrevRank
2020-01-01 22:45:14 +08:00
Chen Qin
493ad834a1 allow duplicated bootstrap allreduce overwrite previous results (#128)
* allow timeout to 0 to eanble immediate exit

* disable duplicated signature check, overwrite results with same key
2019-11-13 10:19:58 +08:00
nateagr
1907b25cd0 Expose RabitAllGatherRing and RabitGetRingPrevRank (#113)
* add unittests

* Expose RabitAllGatherRing and RabitGetRingPrevRank

* Enabled TCP_NODELAY to decrease latency
2019-11-12 19:55:32 +08:00
Jiaming Yuan
90e2239372 Fix cmake variable. (#126) 2019-11-05 01:27:08 -05:00
Chen Qin
2f25347168 allow timeout to 0 to eanble immediate exit (#125) 2019-10-22 14:38:55 -07:00
Chen Qin
d22e0809a8 throw dmlc::Error (#120)
* throw dmlc::Error handled by xgboost jni
2019-10-16 13:12:15 -04:00
Philip Hyunsu Cho
33dbc10aab Fix compilation failure on Windows (#119)
* Fix compilation failure on Windows

* Fix lint
2019-10-15 23:37:42 +07:00
Chen Qin
8e2c201d23 fix assert timeout_sec (#117) 2019-10-14 04:44:26 -04:00
Jiaming Yuan
ed9328ceae Fix lint. (#115) 2019-10-13 07:38:29 -04:00
Jiaming Yuan
6dab74689c Add SeekEnd to MemoryFixSizeBuffer. (#109)
* Don't assert buffer size.
2019-10-13 00:09:25 -04:00
Chen Qin
5d1b613910 exit when allreduce/broadcast error cause timeout (#112)
* keep async timeout task

* add missing pthread to cmake

* add tests

* Add a sleep period to avoid flushing the tracker.
2019-10-11 03:39:39 -04:00
Chen Qin
af7281afe3 unittests mock, cleanup (#111)
* cleanup, fix issue involved after remove is_bootstrap parameter

* misc

* clean

* add unittests
2019-10-01 13:36:11 -07:00
Chen Qin
ddcc2d85da Clean up cmake script and code includes (#106)
* Clean up CMake scripts and related include paths.
* Add unittests.
2019-09-26 02:29:04 -04:00
Xu Xiao
e92641887b remove unreached code of AllreduceRobust::CheckAndRecover (#108) 2019-09-18 23:06:59 -04:00
Jiaming Yuan
d4ce6807c7 Don't use _builtin_FUNCTION. (#107) 2019-09-18 12:05:23 -04:00
Chen Qin
9a7ac85d7e remove is_bootstrap parameter (#102)
* apply openmp simd

* clean __buildin detection, moving windows build check from xgboost project, add openmp support for vectorize reduce

* apply openmp only to rabit

* orgnize rabit signature

* remove is_bootstrap, use load_checkpoint as implict flag

* visual studio don't support latest openmp

* orgnize omp declarations

* replace memory copy with vector cast

* Revert "replace memory copy with vector cast"

This reverts commit 28de4792dcdff40d83d458510d23b7ef0b191d79.

* Revert "orgnize omp declarations"

This reverts commit 31341233d31ce93ccf34d700262b1f3f6690bbfe.

* remove openmp settings, merge into a upcoming pr

* mis

* per feedback, update comments
2019-09-10 11:45:50 -07:00
Chen Qin
5797dcb64e support bootstrap allreduce/broadcast (#98)
* support run rabit tests as xgboost subproject using xgboost/dmlc-core

* support tracker config set/get

* remove redudant printf

* remove redudant printf

* add c++0x declaration

* log allreduce/broadcast caller, engine should track caller stack for
investigation

* tracker support binary config format

* Revert "tracker support binary config format"

This reverts commit 2a28e5e2b55c200cb621af8d19f17ab1bc62503b.

* remove caller, prototype fetch allreduce/broadcast results from resbuf

* store cached allreduce/broadcast seq_no to tracker

* allow restore all caches from other nodes

* try new rabit collective cache, todo: recv_link seems down

* link up cache restore with main recovery

* cleanup load cache state

* update cache api

* pass test.mk

* have a working tests

* try to unify check into actionsummary

* more logging to debug distributed hist three method issue

* update rabit interface to support caller signature matching

* splite seq_counter from cur_cache_seq to different variables

* still see issue with inf loop

* support debug print caller as well as allreduce op

* cleanup

* remove get/set cache from model_recover, adding recover in
loadcheckpoint

* clarify rabit cache strategy, cache is set only by successful collective
call involving all nodes with unique cache key. if all nodes call
getcache at same time, we keep rabit run collective call. If some nodes
call getcache while others not, we backfill cache from those nodes with
most entries

* revert caller logs

* fix lint error

* fix engine mpi signature

* support getcache by ref

* allow result buffer presiet to filestream

* add loging

* try fix checkpoint failure recovery case

* use int64_t to avoid overflow caused seq fault

* try avoid int overflow

* try fix checkpoint failure recovery case

* try avoid seqno overflow to negative by offseting specifial flag value
adding cache seq no to checkpoint/load checkpoint/check point ack to avoid
confusion from cache recovery

* fix cache seq assert error

* remove loging, handle edge case

* add extensive log to checkpoint state  with different seq no

* fix lint errors

* clean up comments before merge back to master

* add logs to allreduce/broadcast/checkpoint

* use unsinged int 32 and give seq no larger range

* address remove allreduce dropseq code segment

* using caller signature to filter bootstrapallreduces

* remove get/set cache from empty

* apply signature to reducer

* apply signature to broadcast

* add key to broadcat log

* fix broadcast signature

* fix default _line value for non linux system

* adding comments, remove sleep(1)

* fix osx build issue

* try fix mpi

* fix doc

* fix engine_empty api

* logging, adding more logs, restore immutable assertion

* print unsinged int with ud

* fix lint

* rename seqtype to kSeq and KCache indicating it's usage
apply kDiffSeq check to load_cache routine

* comment allreduce/broadcast log

* allow tests run on arm

* enable flag to turn on / off cache

* add log info alert if user choose to enable rabit bootstrap cache

* add rabit_debug setting so user can use config to turn on

* log flags when user turn on rabit_debug

* force rabit restart if tracker assign -1 rank

* use OPENMP to vecotrize reducer

* address comment

* Revert "address comment"

This reverts commit 1dc61f33e7357dad8fa65528abeb81db92c5f9ed.

* fix checkpoint size print 0

* per feedback, remove DISABLEOPEMP, address race condition

* - remove openmp from this pr
- update name from cache to boostrapcache

* add default value of signature macros

* remove openmp from cmake file

* Update src/allreduce_robust.cc

Co-Authored-By: Philip Hyunsu Cho <chohyu01@cs.washington.edu>

* Update src/allreduce_robust.cc

Co-Authored-By: Philip Hyunsu Cho <chohyu01@cs.washington.edu>

* run test with cmake

* remove openmp

* fix cmake based tests

* use cmake test fix darwin .dylib issue

* move around rabit_signature definition due to windows build

* misc, add c++ check in CMakeFile

* per feedback

* resolve CMake file

* update rabit version
2019-08-27 18:12:33 -07:00
Nan Zhu
dba32d54d1 shutdown for multiple times (#99) 2019-07-16 12:41:39 -07:00
Nan Zhu
65b718a5e7 return values in Init and Finalize (#96)
* make inti function return values

* address the comments
2019-06-25 20:05:54 -07:00
Nan Zhu
fc85f776f4 allow not stop process in error (#97)
* allow not stop process in error

* fix merge error
2019-06-25 13:04:39 -07:00
Nan Zhu
a429748e24 allow multi call on init (#92) 2019-04-26 18:41:02 -07:00
Chen Qin
5c3b36f346 Allow using external dmlc-core (#91)
* Set `RABIT_BUILD_DMLC=1` if use dmlc-core in rabit

* remove dmlc-core
2019-04-26 15:28:45 +08:00
Chen Qin
e3d51d3e62 [rabit harden] Enable all tests (#90)
* include osx in tests
* address `time_wait` on port assignment
* increase submit attempts.
* cleanup tests
2019-04-24 19:12:11 +08:00
Chen Qin
ecd4bf7aae [rabit harden] replace hardcopy dmlc-core headers with submodule links (#86)
* backport dmlc header changes to rabit

* use gitmodule to reference latest dmlc header files

* include ref to dmlc-core
fix cmake

* update cmake file, add cmake build traivs task

* try force using g++-4.8

* per feedback, update cmake
2019-03-23 13:11:29 +08:00
Chen Qin
785d7e54d3 [mpi] add engine_mpi travis build (#83) 2019-03-15 22:58:47 +08:00
Chen Qin
ed06e0c6af [rabit harden] fix rabit tests (#81)
* enable model recovery tests
* force use gcc4.8 in Travis
2019-03-15 07:16:45 +08:00
Jiaming Yuan
1cc34f01db Fix ssize_t definition. (#80)
* Fix linter.
2019-02-18 19:25:08 +08:00
Jiaming Yuan
0101a4719c Remove dmlc logging. (#78)
* Remove dmlc logging header.

* Fix lint.
2019-02-16 18:37:54 -08:00
Jiaming Yuan
05941a5f96 Try fixing mingw build error when using CMake. (#77)
* Try fixing mingw build error when using CMake.

* Check __MINGW32__ .

* Fix linter.
2019-02-16 22:35:43 +08:00
Chen Qin
eb2590b774 workaround macosx java test race condition (#74)
* fix error in dmlc#57, clean up comments and naming

* include missing packages, disable recovery tests for now

* disable local_recover tests until we have a bug fix

* support larger cluster

* fix lint, merge with master

* fix mac osx test failure in https://github.com/dmlc/xgboost/pull/3818

* Update allreduce_robust.cc
2018-10-26 12:39:31 -07:00
Chen Qin
3a35dabfae support larger cluster (#73)
* fix error in dmlc#57, clean up comments and naming

* include missing packages, disable recovery tests for now

* disable local_recover tests until we have a bug fix

* support larger cluster

* fix lint, merge with master
2018-10-22 10:13:45 -07:00
Chen Qin
69cdfae22f disable travis model_recover tests, fix doc generate failure (#71)
* add missing packackges used in dmlc submit

* disable local_recovery tests til we have code fix

* fix doc gen failure
2018-10-19 18:18:16 -07:00
Chen Qin
785bde6f87 add missing packackges used in dmlc submit (#70) 2018-10-19 13:04:33 -07:00
Ruifeng Zheng
edc403fb2c init (#60) 2018-07-04 12:31:24 -07:00
Philip Hyunsu Cho
87143deb4c Don't define DMLC_LOG_STACK_TRACE on Solaris (#59)
DMLC_LOG_STACK_TRACE involves use of non-standard header execinfo.h, which
causes compilation failure on Solaris.
2018-06-15 22:33:46 -07:00
trivialfis
fc5072b100 Fix building shared library. (#58) 2018-05-24 09:05:37 -07:00
Will Storey
7bc46b8c75 Allow compiling with -Werror=strict-prototypes (#56)
Without this, with gcc 7.3.0, we see things like:

/xgboost/include/xgboost/c_api.h:98:1: error: function
declaration isn't a prototype [-Werror=strict-prototypes]
 XGB_DLL const char *XGBGetLastError();
  ^~~~~~~
2018-03-18 22:21:35 -07:00
Dennis O'Brien
440e81db0b Fixed print statements and xrange to be compatibile with Python 2 and 3. (#55) 2018-02-26 12:19:04 -08:00
David Hirvonen
0759d5ed2b add cmake w/ relocatable pkgconfig installation (#53) 2018-01-07 14:49:39 -08:00
snehlatamohite
2eb1a1a371 Use -msse2 flag depending upon architecure while compiling the rabit code (#49) 2017-09-01 08:42:45 -07:00
Qiang Kou (KK)
41c96a25a9 To compile on ARM CPU (#46) 2017-07-12 20:24:19 -07:00
Artem Krylysov
0b406754fa Fix C API header compatibility with C compilers (#44) 2017-06-01 09:21:48 -07:00
Ziyue Huang
ab5f203b44 fix error: ‘nullptr’ was not declared in this scope (#43) 2017-04-23 10:44:11 -07:00
tqchen
a1acf23b60 only doc rabit 2017-03-17 22:09:13 -07:00
tqchen
a764d45cfb sync dmlc headers 2017-03-16 10:16:23 -07:00
AbdealiJK
21b5e12913 allreduce_robust.cc: Allow num_global_replica to be 0 (#38)
In some cases, users may not want to have any global replica of
the data being broadcasted/all-reduced. In such cases, set the
result_buffer_round to -1 as a flag that this is not necessary
and check for it.
2016-11-23 19:34:11 -08:00
Tianqi Chen
032152ad24 Update .travis.yml 2016-11-23 10:14:32 -08:00
kabu4i
af1b7d6e7a Applied FreeBSD support (#37) 2016-11-15 21:10:51 -08:00
tqchen
a9a2a69dc1 Merge branch 'master' of ssh://github.com/tqchen/rabit 2016-08-26 15:06:03 -07:00
tqchen
cd1db1afaa sync dmlc header 2016-08-26 15:05:42 -07:00
tomlaube
1007a26641 Fixing the imports to work with MPI (#30) 2016-08-26 15:04:41 -07:00
elferdo
7e15fdd9c6 FreeBSD does not have fopen64 (as of 10.3). Detect it and replace with (#29)
fopen.
2016-08-20 08:35:01 -07:00
Tianqi Chen
2dd7476ad7 Merge pull request #28 from randomjohnnyh/master
Use getaddrinfo instead of gethostbyname for thread safety
2016-07-27 10:40:24 -07:00
Johnny Ho
9d235c31a7 Use getaddrinfo instead of gethostbyname for thread safety 2016-07-27 02:35:02 -04:00
Tianqi Chen
8f61535b83 Update README.md 2016-05-10 20:14:53 -07:00
Tianqi Chen
b8aec1730c Update README.md 2016-05-10 20:14:29 -07:00
tqchen
e19fced5cb [FIX] rabit on single node 2016-05-10 20:05:59 -07:00
tqchen
849b20b7c8 add distributed checking 2016-04-11 15:43:01 -07:00
tqchen
be50e7b632 Make rabit library thread local 2016-03-01 20:12:51 -08:00
tqchen
aeb4008606 remove connect msg 2016-02-29 16:27:48 -08:00
tqchen
1392e9f3da fix travis 2016-02-29 15:51:36 -08:00
tqchen
225f5258c7 [DMLC] Add dep to dmlc logging 2016-02-29 14:59:44 -08:00
tqchen
56ec4263f9 fix type 2016-02-28 13:19:54 -08:00
tqchen
e3188afbe8 fix 2016-02-28 13:09:18 -08:00
tqchen
c7d53aecc3 add link tag 2016-02-28 09:44:11 -08:00
tqchen
26c87ec6e7 fix test 2016-02-28 09:35:08 -08:00
tqchen
f0f07ecd22 fix 2016-02-27 20:51:00 -08:00
tqchen
e814dc8a4b Fix docstring 2016-02-27 18:13:42 -08:00
tqchen
d45fca0298 fix build 2016-02-27 18:10:58 -08:00
tqchen
7479791f6a refactor: librabit 2016-02-27 10:14:41 -08:00
tqchen
73b6e9bbd0 [TRACKER] remove tracker in rabit, use DMLC 2016-02-27 09:07:40 -08:00
tqchen
112d866dc9 [RABIT] fix rabit in local mode 2016-01-12 21:34:26 -08:00
tqchen
05b958c178 [RABIT] Sync with dmlc 2016-01-09 21:43:29 -08:00
Tianqi Chen
bed63208af Merge pull request #26 from DrAndrey/master
Fix bug with name of sleep function
2015-11-18 09:58:21 -08:00
Andrey
291ab05023 Remove redundant whitespace again 2015-11-18 10:21:03 +03:00
Andrey
de251635b1 Remove redundant whitespace 2015-11-18 00:53:53 +03:00
Andrey
3a6be65a20 Fix bug with name of sleep function 2015-11-17 21:45:52 +03:00
Tianqi Chen
e81a11dd7e Merge pull request #25 from daiyl0320/master
add retry mechanism to ConnectTracker and modify Listen backlog to 128 in rabit_traker.py
2015-10-20 19:34:01 -07:00
yonglong.dyl
35c3b371ea add retry mechanism to ConnectTracker and modify Listen backlog to 128
in rabit_traker.py
2015-10-21 10:24:07 +08:00
tqchen
c71ed6fccb try deply doxygen 2015-08-22 21:37:14 -07:00
tqchen
62e5647a33 try deply doxygen 2015-08-22 21:33:23 -07:00
tqchen
732f1c634c try 2015-08-21 08:40:55 -07:00
tqchen
2fa6e0245a ok 2015-08-21 08:08:32 -07:00
tqchen
053766503c minor 2015-08-21 08:07:25 -07:00
tqchen
7b59dcb8b8 minor 2015-08-21 07:59:06 -07:00
tqchen
5934950ce2 new doc 2015-08-02 17:54:46 -07:00
tqchen
f5381871a3 ok 2015-08-01 21:40:05 -07:00
tqchen
44b60490f4 new doc 2015-08-01 21:36:09 -07:00
tqchen
387339bf17 add more 2015-07-30 18:16:15 -07:00
tqchen
9d4397aa4a chg 2015-07-30 17:59:16 -07:00
tqchen
2879a4853b chg 2015-07-30 17:58:42 -07:00
tqchen
30e3110170 ok 2015-07-28 23:18:15 -07:00
tqchen
9ff0301515 add link translation 2015-07-28 23:16:48 -07:00
tqchen
6b629c2e81 k 2015-07-27 18:41:17 -07:00
tqchen
32e19558e6 ok 2015-07-27 18:38:22 -07:00
tqchen
8f4839d1d9 fix 2015-07-27 18:34:43 -07:00
tqchen
93137b2e52 ok 2015-07-27 18:34:07 -07:00
tqchen
7eeeb79599 reload recommonmark 2015-07-27 18:33:19 -07:00
tqchen
a8f00cc4a5 minor 2015-07-27 18:16:03 -07:00
tqchen
19b0f019c7 ok 2015-07-27 18:14:01 -07:00
tqchen
dd011849b7 minor 2015-07-27 17:59:22 -07:00
tqchen
c1cdc194e9 minor 2015-07-27 17:50:02 -07:00
tqchen
fcf0f4351a try rst 2015-07-27 17:47:28 -07:00
tqchen
cbc21ae531 try 2015-07-27 17:46:08 -07:00
tqchen
62ddfa7709 tiny 2015-07-26 21:13:35 -07:00
tqchen
aefc05cb91 final change 2015-07-26 21:09:58 -07:00
tqchen
2aee9b4959 minor 2015-07-26 20:57:48 -07:00
tqchen
fe4e7c2b96 ok 2015-07-26 20:56:54 -07:00
tqchen
800198349f change to subtitle 2015-07-26 20:54:10 -07:00
tqchen
5ca33e48ea ok 2015-07-26 20:52:52 -07:00
tqchen
88f7d24de9 update guide 2015-07-26 20:52:34 -07:00
tqchen
29d43ab52f add code 2015-07-26 14:57:24 -07:00
tqchen
fe8bb3b60e minor hack for readthedocs 2015-07-26 14:47:40 -07:00
tqchen
229c71d9b5 Merge branch 'master' of ssh://github.com/dmlc/rabit 2015-07-26 14:46:24 -07:00
tqchen
7424218392 ok 2015-07-26 14:46:16 -07:00
Tianqi Chen
d1d45bbdae Update README.md 2015-07-26 14:43:08 -07:00
Tianqi Chen
1e8813f3bd Update README.md 2015-07-26 14:42:57 -07:00
Tianqi Chen
1ccc9903a1 Update README.md 2015-07-26 14:41:25 -07:00
tqchen
0323e0670e remove readme 2015-07-26 14:22:50 -07:00
tqchen
679a835d38 remove theme 2015-07-26 14:14:19 -07:00
tqchen
7ea5b7c209 remove numpydoc to napoleon 2015-07-26 14:02:43 -07:00
tqchen
b73e2be55e Merge branch 'master' of ssh://github.com/dmlc/rabit
Conflicts:
	doc/python-requirements.txt
2015-07-26 13:52:31 -07:00
tqchen
174228356d ok 2015-07-26 13:51:56 -07:00
Tianqi Chen
1838e25b8a Update python-requirements.txt 2015-07-26 13:05:52 -07:00
tqchen
bc4e957c39 ok 2015-07-26 13:00:18 -07:00
tqchen
fba6fc208c ok 2015-07-26 12:54:21 -07:00
tqchen
025110185e ok 2015-07-26 12:52:37 -07:00
tqchen
d50b905824 ok 2015-07-26 12:46:19 -07:00
tqchen
d4f2509178 ok 2015-07-26 12:43:49 -07:00
tqchen
cdf401a77c ok 2015-07-26 12:40:21 -07:00
tqchen
fef0ef26f1 new doc 2015-07-26 12:29:18 -07:00
tqchen
cef360d782 ok 2015-07-26 12:15:00 -07:00
tqchen
c125d2a8bb ok 2015-07-26 12:14:54 -07:00
tqchen
270a49ee75 add requirments 2015-07-23 22:22:52 -07:00
tqchen
744f9015bb get the basic doc 2015-07-23 22:14:42 -07:00
tqchen
1cb5cad50c Merge branch 'master' of ssh://github.com/dmlc/rabit 2015-07-03 17:42:00 -07:00
tqchen
8cc07ba391 minor 2015-07-03 17:41:52 -07:00
Tianqi Chen
d74f126592 Update .travis.yml 2015-07-03 15:35:47 -07:00
Tianqi Chen
52b3dcdf07 Update .travis.yml 2015-07-03 15:33:38 -07:00
Tianqi Chen
099581b591 Update .travis.yml 2015-07-03 15:31:43 -07:00
Tianqi Chen
1258046f14 Update .travis.yml 2015-07-03 15:29:30 -07:00
Tianqi Chen
7addac910b Update Makefile 2015-07-03 15:23:26 -07:00
Tianqi Chen
0ea7adff92 Update .travis.yml 2015-07-03 15:21:20 -07:00
Tianqi Chen
f858856586 Update travis_script.sh 2015-07-03 15:20:59 -07:00
Tianqi Chen
d8eac4ae27 Update README.md 2015-07-03 15:17:22 -07:00
tqchen
3cc49ad0e8 lint and travis 2015-07-03 15:15:11 -07:00
tqchen
ceedf4ea96 fix 2015-05-28 12:37:06 -07:00
Tianqi Chen
fd8920c71d fix win32 2015-05-28 12:24:26 -07:00
tqchen
8bbed35736 modify 2015-05-28 10:44:19 -07:00
Tianqi Chen
9520b90c4f Merge pull request #14 from dmlc/hjk41
add kLongLong and kULongLong
2015-05-20 05:38:01 +02:00
Chuntao Hong
df14bb1671 fix type 2015-05-20 11:36:17 +08:00
Chuntao Hong
f441dc7ed8 replace tab with blankspace 2015-05-20 11:33:48 +08:00
Chuntao Hong
2467942886 remove unnecessary include 2015-05-20 11:32:16 +08:00
Chuntao Hong
181ef47053 defined long long and ulonglong 2015-05-20 11:27:50 +08:00
Chuntao Hong
1582180e5b use int32_t to define int and int64_t to define long. in VC long is 32bit 2015-05-20 10:09:09 +08:00
tqchen
e0b7da0302 fix 2015-05-02 21:47:43 -07:00
tqchen
fa99857467 try fix warning on some platforms 2015-05-01 22:45:11 -07:00
tqchen
24f17df782 ok 2015-04-29 20:23:39 -07:00
tqchen
4fe8d1d66b ok io 2015-04-29 20:21:37 -07:00
tqchen
a5d77ca08d checkin new dmlc interface 2015-04-29 20:17:27 -07:00
tqchen
d1d2ab4599 remove at end 2015-04-28 10:49:44 -07:00
tqchen
e1ddcc2eb7 Merge branch 'master' of ssh://github.com/dmlc/rabit 2015-04-27 15:55:58 -07:00
tqchen
6745667eb0 new dmlc io 2015-04-27 15:55:51 -07:00
tqchen
c5b4610cfe sge scheduler change 2015-04-26 22:08:47 -07:00
tqchen
fed1683b9b minor 2015-04-25 21:24:38 -07:00
Tianqi Chen
c01520f173 change 2015-04-25 21:23:16 -07:00
tqchen
27340f95e4 final minor 2015-04-25 21:19:42 -07:00
Tianqi Chen
e03eabccda allow win32 2015-04-25 21:18:36 -07:00
tqchen
82ca10acb6 better handling at msvc 2015-04-25 20:52:07 -07:00
Tianqi Chen
6601939588 Merge pull request #12 from zjf/patch-2
Update rabit-inl.h
2015-04-23 23:16:49 -07:00
Jianfeng Zhu
df8f917463 Update rabit-inl.h
Fix missing parenthese
2015-04-24 14:09:47 +08:00
tqchen
c60b284e1f resize during tracker print 2015-04-20 11:37:45 -07:00
tqchen
c67967161e fix io style 2015-04-19 00:21:38 -07:00
tqchen
f52daf9be1 make timer cross platform 2015-04-19 00:01:48 -07:00
tqchen
7568f75f45 new io interface 2015-04-17 20:35:44 -07:00
tqchen
3bf8661ec1 add std before basic 2015-04-13 13:43:34 -07:00
tqchen
18f4d6c0ba remove rabit learn 2015-04-11 20:25:52 -07:00
tqchen
bcfbe51e7e fix dmlc io 2015-04-11 18:16:52 -07:00
tqchen
ad383b084d ok 2015-04-11 17:55:20 -07:00
tqchen
3b8c04a902 Merge branch 'master' of ssh://github.com/dmlc/rabit 2015-04-11 17:35:11 -07:00
tqchen
9dd97cc141 keepup with dmlc core 2015-04-11 17:35:03 -07:00
Ubuntu
ef13aaf379 ch 2015-04-11 05:29:07 +00:00
tqchen
50a66b3855 fix empty engine 2015-04-09 08:44:33 -07:00
tqchen
e08542c635 fix doc 2015-04-08 15:30:56 -07:00
tqchen
e95c96232a remove I prefix from interface, serializable now takes in pointer 2015-04-08 15:25:58 -07:00
tqchen
b15f6cd2ac rabit unifires with dmlc 2015-04-05 09:55:24 -07:00
tqchen
5634ec3008 ok 2015-04-03 22:25:33 -07:00
tqchen
2dd6c2f0c9 Merge branch 'master' of ssh://github.com/dmlc/rabit 2015-03-30 22:18:20 -07:00
tqchen
38d7f999a7 checkin wormhole spliter 2015-03-30 22:18:02 -07:00
Tianqi Chen
8acb96a627 Merge pull request #10 from ryanzz/master
fixed a mistake
2015-03-30 08:46:15 -07:00
ryanzz
911a1f0ce2 fixed a mistake 2015-03-30 16:25:36 +08:00
tqchen
732d8c33d1 inteface changing 2015-03-29 22:00:37 -07:00
tqchen
684ea0ad26 inteface changing 2015-03-29 22:00:33 -07:00
tqchen
8cb4c02165 add dmlc support 2015-03-28 22:44:10 -07:00
tqchen
be2ff703bc allow adapting wormhole 2015-03-27 17:33:51 -07:00
tqchen
16975b447c try pass on tokens during application submission 2015-03-27 11:04:19 -07:00
tqchen
eb1f4a4003 change auto to ip 2015-03-26 23:26:30 -07:00
tqchen
59e63bc135 minor 2015-03-21 00:38:37 -07:00
tqchen
62330505e1 ok 2015-03-21 00:37:59 -07:00
tqchen
14477f9f5a add namenode 2015-03-21 00:35:30 -07:00
tqchen
75a6d349c6 add libhdfs opts 2015-03-21 00:26:30 -07:00
tqchen
e3c76bfafb minmum fix 2015-03-21 00:25:16 -07:00
tqchen
8b3c435241 chg 2015-03-20 15:11:50 -07:00
tqchen
2035799817 test code 2015-03-20 13:02:46 -07:00
tqchen
7751b2b320 add debug 2015-03-15 23:52:16 -07:00
tqchen
769031375a ok 2015-03-15 23:47:24 -07:00
tqchen
bd346b4844 ok 2015-03-15 23:44:32 -07:00
tqchen
faba1dca6c add testload 2015-03-15 23:42:18 -07:00
tqchen
6f7783e4f6 add testload 2015-03-15 23:42:17 -07:00
tqchen
e5f034040e ok 2015-03-15 23:20:30 -07:00
tqchen
3ed9ec808f chg 2015-03-15 23:19:54 -07:00
tqchen
e552ac401e ask for more ram in am 2015-03-15 23:14:56 -07:00
tqchen
b2505e3d6f only stop nm when sucess 2015-03-15 23:02:15 -07:00
tqchen
bc696c9273 add queue info 2015-03-15 22:54:09 -07:00
tqchen
f3e867ed97 add option queue 2015-03-15 22:38:51 -07:00
tqchen
5dc843cff3 refactor fileio 2015-03-14 16:46:54 -07:00
tqchen
cd9c81be91 quick fix 2015-03-14 09:20:04 -07:00
tqchen
1e23af2adc add virtual destructor to iseekstream 2015-03-14 00:20:37 -07:00
tqchen
f165ffbc95 fix hdfs 2015-03-13 22:59:04 -07:00
tqchen
8cc650847a allow demo to pass in env 2015-03-13 22:27:36 -07:00
tqchen
fad4d69ee4 ok 2015-03-13 21:38:03 -07:00
tqchen
0fd6197b8b fix more 2015-03-13 21:36:09 -07:00
tqchen
7423837303 fix more 2015-03-13 21:36:08 -07:00
tqchen
d25de54008 add temporal solution, run_yarn_prog.py 2015-03-13 21:13:19 -07:00
tqchen
e5a9e31d13 final attempt 2015-03-13 00:04:51 -07:00
tqchen
ed3bee84c2 add command back 2015-03-12 22:48:30 -07:00
tqchen
07740003b8 add hdfs to resource 2015-03-12 22:43:41 -07:00
tqchen
9b66e7edf2 fix hadoop 2015-03-12 20:57:49 -07:00
tqchen
6812f14886 ok 2015-03-12 09:44:43 -07:00
tqchen
08e1c16dd2 change hadoop prefix back to hadoop home 2015-03-12 09:06:42 -07:00
Tianqi Chen
d6b68286ee Update build.sh 2015-03-12 09:03:02 -07:00
tqchen
146e069000 bugfix: logical boundary for ring buffer 2015-03-11 20:28:34 -07:00
tqchen
19cb685c40 ok 2015-03-12 02:59:50 +00:00
tqchen
4cf3c13750 Merge branch 'master' of ssh://github.com/tqchen/rabit
Conflicts:
	tracker/rabit_tracker.py
2015-03-11 13:35:35 -07:00
tqchen
20daddbeda add tracker 2015-03-11 13:27:23 -07:00
tqchen
c57dad8b17 add ringbased passing and batch schedule 2015-03-11 12:00:19 -07:00
tqchen
295d8a12f1 update 2015-03-10 15:28:10 -07:00
tqchen
994cb02a66 add sge 2015-03-10 15:26:40 -07:00
tqchen
014c86603d OK 2015-03-10 10:51:39 -07:00
tqchen
091634b259 fix 2015-03-09 14:56:01 -07:00
tqchen
d558f6f550 redefine distributed means 2015-03-09 14:43:05 -07:00
tqchen
c8efc01367 more complicated yarn script 2015-03-09 14:36:44 -07:00
tqchen
28ca7becbd add linear readme 2015-03-09 13:12:40 -07:00
tqchen
ca4b20fad1 add linear readme 2015-03-09 13:12:04 -07:00
tqchen
1133628c01 add linear readme 2015-03-09 13:11:17 -07:00
tqchen
6a1167611c update docs 2015-03-09 13:00:34 -07:00
Tianqi Chen
a607047aa1 Update build.sh 2015-03-08 23:55:42 -07:00
tqchen
2c1cfd8be6 complete yarn 2015-03-08 23:51:42 -07:00
tqchen
4f28e32ebd change formater 2015-03-08 12:29:07 -07:00
tqchen
2fbda812bc fix stdin input 2015-03-08 12:22:11 -07:00
tqchen
3258bcf531 checkin yarn master 2015-03-08 11:03:13 -07:00
tqchen
67ebf81e7a allow setup from env variables 2015-03-07 16:45:31 -08:00
tqchen
9b6bf57e79 fix hdfs 2015-03-07 09:08:21 -08:00
tqchen
395d5c29d5 add make system 2015-03-06 22:30:23 -08:00
tqchen
88ce76767e refactor io, initial hdfs file access need test 2015-03-06 22:17:27 -08:00
tqchen
19be870562 chgs 2015-03-06 21:12:04 -08:00
tqchen
a1bd3c64f0 Merge branch 'master' of ssh://github.com/tqchen/rabit 2015-03-06 21:09:59 -08:00
tqchen
1a573f987b introduce input split 2015-03-06 21:08:04 -08:00
tqchen
29476f1c6b fix timer issue 2015-03-06 20:59:10 -08:00
tqchen
d4ec037f2e fix rabit 2015-03-03 13:12:05 -08:00
tqchen
6612fcf36c Merge branch 'master' of ssh://github.com/tqchen/rabit 2015-03-02 16:10:15 -08:00
tqchen
d29892cb22 add mock option statis 2015-03-02 16:10:08 -08:00
tqchen
4fa054e26e new tracker 2015-03-02 07:32:25 +00:00
tqchen
75c647cd84 update tracker for host IP 2015-03-01 23:27:59 -08:00
tqchen
e4ce8efab5 add hadoop linear example 2015-03-02 04:36:48 +00:00
tqchen
76ecb4a031 add hadoop linear example 2015-03-02 04:35:56 +00:00
Ubuntu
2e1c4c945e add hadoop linear example 2015-03-02 04:35:01 +00:00
tqchen
4db0a62a06 bugfix of lazy prepare 2015-02-11 20:31:46 -08:00
tqchen
87017bd4cd license 2015-02-11 14:49:51 -08:00
tqchen
dc703e1b62 license 2015-02-11 14:48:59 -08:00
tqchen
c171440324 change license to bsd 2015-02-11 14:44:26 -08:00
Tianqi Chen
7db2070598 Update README.md 2015-02-09 20:53:29 -08:00
tqchen
581fe06a9b add mocktest 2015-02-09 20:46:38 -08:00
tqchen
d2f252f87a ok 2015-02-09 20:35:30 -08:00
tqchen
4a5b9e5f78 add all 2015-02-09 20:26:39 -08:00
tqchen
12ee049a74 init version of lbfgs 2015-02-09 17:44:32 -08:00
tqchen
37a28376bb complete lbfgs solver 2015-02-09 11:04:19 -08:00
tqchen
6ade7cba94 complete lbfgs 2015-02-08 23:08:59 -08:00
tqchen
1bb8fe9615 chg makefile 2015-01-30 16:46:10 -08:00
tqchen
fb13cab216 change makefile 2015-01-30 16:30:45 -08:00
Tyler
1479e370f8 fixed small bug in mpi submission script 2015-01-25 00:12:46 -08:00
Tianqi Chen
0ca7a63670 Update README.md 2015-01-22 09:16:46 -08:00
tqchen
5ef4830b55 ok 2015-01-20 20:30:22 -08:00
tqchen
93a13381c1 chg note 2015-01-20 20:27:43 -08:00
tqchen
4ebe657dd7 fix in cxx11 2015-01-19 21:37:02 -08:00
tqchen
85b746394e change def of reducer to take function ptr 2015-01-19 21:24:52 -08:00
tqchen
fe6366eb40 add engine base 2015-01-19 19:11:15 -08:00
Tianqi Chen
a98720ebc9 more deps 2015-01-19 08:20:43 -08:00
tqchen
1db6449b01 remove include in -I, make things easier to direct compile 2015-01-18 21:30:19 -08:00
tqchen
c7282acb2a doc 2015-01-18 19:55:04 -08:00
tqchen
f332750359 minor fix 2015-01-18 18:17:41 -08:00
tqchen
9edb3b306f update doc 2015-01-18 18:14:20 -08:00
tqchen
c46120a46b add win32 ver 2015-01-16 21:10:47 -08:00
Tianqi Chen
537497f520 changes 2015-01-16 21:10:01 -08:00
Tianqi Chen
56a80f431b check in windows solutions, pass small test in windows 2015-01-16 20:56:34 -08:00
tqchen
774d501c1f add languages 2015-01-16 11:13:27 -08:00
tqchen
7396c87249 chg 2015-01-16 10:53:31 -08:00
tqchen
c7533f92bb desgin goal 2015-01-16 10:50:05 -08:00
tqchen
38b7fec37a ok 2015-01-16 10:46:55 -08:00
tqchen
c798fc2a29 change toolkit to rabitlearn 2015-01-16 10:45:54 -08:00
tqchen
f5245c615c ok 2015-01-16 10:12:47 -08:00
nachocano
aebb7998a3 updating doc 2015-01-16 00:45:04 -08:00
nachocano
b87da8fe9a small typo 2015-01-15 10:52:39 -08:00
tqchen
1f35478b82 chg docs 2015-01-15 10:29:32 -08:00
tqchen
6d5ac6446c chg test folder 2015-01-15 10:24:58 -08:00
tqchen
8f23eb11d7 change convention 2015-01-15 10:22:59 -08:00
tqchen
0617281863 phrase python as a lib 2015-01-15 10:09:14 -08:00
nachocano
7d67f6f26d removing section 2015-01-15 01:24:04 -08:00
nachocano
34c8253ad6 Merge branch 'master' of https://github.com/tqchen/allreduce 2015-01-15 01:22:21 -08:00
nachocano
86e61ad6a5 adding changes suggested by Tianqi 2015-01-15 01:21:40 -08:00
tqchen
6dbaddd2b9 ok 2015-01-14 22:11:00 -08:00
tqchen
a7faac2f09 ok 2015-01-14 21:59:45 -08:00
tqchen
f161d2f1e5 fix bug in initialization of routing 2015-01-14 19:40:41 -08:00
tqchen
797fe27efe struct return type version 2015-01-14 15:43:28 -08:00
tqchen
a57c5c5425 add more error report when things goes wrong, need review 2015-01-14 15:32:36 -08:00
tqchen
968b33ec79 set all tracker thread to deamon 2015-01-14 12:05:00 -08:00
tqchen
87c7817124 add lazy check, need test, find a race condition 2015-01-14 11:58:43 -08:00
Tianqi Chen
bddfa2fc24 Merge pull request #7 from lqhl/master
update the fault tolerence section
2015-01-14 10:09:04 -08:00
Tianqi Chen
d05df9836b Merge pull request #8 from cblsjtu/master
correct a mistake
2015-01-14 10:07:17 -08:00
Boliang Chen
2f2e481fc3 correct a mistake 2015-01-14 20:53:34 +08:00
Qin Liu
1dda51f1fa update the fault tolerence section 2015-01-14 17:07:30 +08:00
tqchen
348a1e7619 change default behavior to behave normal 2015-01-13 22:21:15 -08:00
tqchen
478d250818 minor change 2015-01-13 20:01:15 -08:00
tqchen
532575b752 ok 2015-01-13 14:41:37 -08:00
tqchen
c127f9650c Merge branch 'master' of ssh://github.com/tqchen/rabit 2015-01-13 14:29:20 -08:00
tqchen
3419cf9aa7 add auto caching of python in hadoop script, mock test module to python, with checkpt 2015-01-13 14:29:10 -08:00
tqchen
877fc42e40 add data 2015-01-13 12:51:55 -08:00
nachocano
f79e5fc041 adding more stuff 2015-01-13 01:00:58 -08:00
nachocano
95c6d7398f adding more stuff 2015-01-13 00:59:20 -08:00
nachocano
5c7967e863 adding link 2015-01-13 00:49:57 -08:00
nachocano
54e2f7e90d adding wrapper section 2015-01-13 00:48:37 -08:00
nachocano
48c42bf189 fixing stuff 2015-01-13 00:18:46 -08:00
nachocano
92c94176c1 adding some changes to kmeans 2015-01-13 00:13:05 -08:00
tqchen
15e085cd32 basic allreduce lib ready 2015-01-12 22:59:36 -08:00
tqchen
2d72c853df checkin broadcast python module 2015-01-12 22:32:13 -08:00
tqchen
9a4a81f100 add wrapper 2015-01-12 21:33:01 -08:00
tqchen
61626aaf85 add more data types 2015-01-12 20:45:07 -08:00
tqchen
5a457d69fc Merge branch 'master' of ssh://github.com/tqchen/rabit
Conflicts:
	tracker/rabit_hadoop.py
2015-01-12 12:03:00 -08:00
tqchen
7572794add add stacklevel for rabit 2015-01-12 12:02:28 -08:00
Tianqi Chen
60a10b3322 Merge pull request #6 from cblsjtu/master
modify some explanation
2015-01-12 08:55:41 -08:00
Boliang Chen
ec3fd9bd2a modify some explanation 2015-01-12 23:46:20 +08:00
Boliang Chen
34cde09b2b modify some explanation 2015-01-12 23:41:45 +08:00
nachocano
8dd94461e1 guide 2015-01-12 00:24:46 -08:00
nachocano
9e04ab62fb adding breaks 2015-01-12 00:23:42 -08:00
nachocano
9907bafa1d fix 2015-01-12 00:20:43 -08:00
nachocano
30f3971bee adding more description to toolkit 2015-01-12 00:14:40 -08:00
tqchen
6b651176a3 yarn is part of hadoop script 2015-01-11 21:28:13 -08:00
tqchen
a120edc56e shorter 2015-01-11 11:48:08 -08:00
tqchen
5146409a1d simpler 2015-01-11 11:47:37 -08:00
tqchen
db2ebf7410 use unified script, auto detect hadoop version 2015-01-11 11:46:12 -08:00
tqchen
bfc3f61010 minor 2015-01-11 11:15:12 -08:00
tqchen
78bfe867e6 unify hadoop and yarn script 2015-01-11 11:13:02 -08:00
Tianqi Chen
03dca6d6b3 Merge pull request #5 from EricChenDM/master
add yarn script
2015-01-11 10:41:08 -08:00
chenshuaihua
b2dec95862 yarn script 2015-01-12 00:09:00 +08:00
chenshuaihua
26b5fdac40 yarn script 2015-01-11 23:54:31 +08:00
chenshuaihua
00323f462a yarn script 2015-01-11 23:32:14 +08:00
chenshuaihua
981f69ff55 yarn script 2015-01-11 23:23:58 +08:00
chenshuaihua
5e843cfbbd yarn script 2015-01-11 23:22:26 +08:00
chenshuaihua
b5ac85f103 yarn script 2015-01-11 23:19:04 +08:00
chenshuaihua
d81fb6a9e6 test 2015-01-11 21:59:38 +08:00
nachocano
d269cb9c50 guide stuff 2015-01-11 01:43:32 -08:00
nachocano
2d97833f48 slightly change 2015-01-11 01:35:04 -08:00
nachocano
eef79067a8 more cosmetic stuff 2015-01-11 01:31:10 -08:00
nachocano
aea4c10847 cosmetic changes to tutorial 2015-01-11 01:07:51 -08:00
Tianqi Chen
7eb4258951 Merge pull request #4 from cblsjtu/master
explain time out
2015-01-10 23:43:01 -08:00
Boliang Chen
c6d0be57d4 explain timeout 2015-01-11 15:39:50 +08:00
Boliang Chen
80b0d06b7e merger from tqchen 2015-01-11 14:56:20 +08:00
Boliang Chen
8685b740cc Merge remote-tracking branch 'tqchen/master' 2015-01-11 14:53:10 +08:00
Boliang Chen
7fa23f2d2f modify default jobname 2015-01-11 14:52:48 +08:00
tqchen
ed264002a0 Merge branch 'master' of ssh://github.com/tqchen/rabit
Conflicts:
	tracker/rabit_hadoop.py
2015-01-10 22:50:38 -08:00
tqchen
2e3361f0e0 fix -f 2015-01-10 22:49:56 -08:00
Boliang Chen
363994f29d Merge remote-tracking branch 'tqchen/master' 2015-01-11 13:46:32 +08:00
Boliang Chen
3f4bf96c5d temp 2015-01-11 13:46:18 +08:00
tqchen
0100fdd18d auto jobname 2015-01-10 21:21:39 -08:00
cblsjtu
c0f85c681e Merge pull request #1 from tqchen/master
merge from tqchen
2015-01-11 11:00:08 +08:00
tqchen
43c129f431 chg script 2015-01-10 17:49:09 -08:00
tqchen
500a57697d chg script 2015-01-10 17:45:53 -08:00
tqchen
c2ab64afe3 fix comment 2015-01-10 10:01:31 -08:00
tqchen
6b30fb2bea update cache script 2015-01-10 09:58:10 -08:00
Tianqi Chen
9d34d2e036 Merge pull request #1 from cblsjtu/master
fix several bugs
2015-01-10 09:29:55 -08:00
Boliang Chen
76c15dffde remove blank 2015-01-11 00:16:05 +08:00
Boliang Chen
d986693fbd fix bugs 2015-01-11 00:14:37 +08:00
Boliang Chen
7f5cb3aa0e modify hs 2015-01-10 10:58:53 +08:00
Boliang Chen
697a01bfb4 har -> jar 2015-01-10 10:54:12 +08:00
tqchen
1b4921977f update doc 2015-01-03 05:20:18 -08:00
tqchen
be355c1e60 minor 2015-01-01 06:06:55 -08:00
tqchen
d10a435d64 correct 2015-01-01 06:06:02 -08:00
tqchen
eb2b086b65 ok 2015-01-01 06:04:02 -08:00
tqchen
08ca3b0849 add more links 2015-01-01 06:02:32 -08:00
tqchen
61f21859d9 add api 2015-01-01 05:57:46 -08:00
tqchen
2bfbbfb381 checkin API doc 2015-01-01 05:48:34 -08:00
tqchen
31a3d22af4 add broadcast 2015-01-01 05:42:38 -08:00
tqchen
90a8505208 update guide 2015-01-01 05:42:03 -08:00
tqchen
06206e1d03 start checkin guides 2014-12-30 06:22:54 -08:00
tqchen
bfb9aa3d77 add native script 2014-12-30 04:37:50 -08:00
tqchen
1bcea65117 change nslave to nworker 2014-12-29 18:44:30 -08:00
tqchen
bdfa1a0220 change nslave to nworker 2014-12-29 18:42:24 -08:00
tqchen
39504825d8 add kmeans example 2014-12-29 18:32:56 -08:00
tqchen
76abd80cb7 change indentation 2014-12-29 18:17:20 -08:00
tqchen
b1340bf310 add auto cache 2014-12-29 06:50:17 -08:00
tqchen
c731e82fae add command 2014-12-29 06:37:07 -08:00
tqchen
491716c418 chg 2014-12-29 06:21:34 -08:00
tqchen
d64d0ef1dc cleanup submission script 2014-12-29 06:11:58 -08:00
tqchen
27d6977a3e cpplint pass 2014-12-28 05:12:07 -08:00
tqchen
15836eb98e add task id 2014-12-22 04:17:23 -08:00
tqchen
0dd51d5dd0 add attempt id for hadoop 2014-12-22 04:12:38 -08:00
tqchen
6e6031cbe9 add mock 2014-12-22 03:59:01 -08:00
tqchen
d82a6ed811 add file command 2014-12-22 03:48:14 -08:00
tqchen
ab7492dbc2 add support for yarn 2014-12-22 03:24:00 -08:00
tqchen
d3433c5946 change script 2014-12-22 01:54:11 -08:00
tqchen
975bcc8261 fix 2014-12-22 01:26:59 -08:00
tqchen
dd8d9646c4 rm mpi dep 2014-12-22 01:25:06 -08:00
tqchen
bb2ecc6ad5 remove c++11 2014-12-22 01:10:14 -08:00
tqchen
7a2ae105ea fix script 2014-12-22 01:03:12 -08:00
tqchen
fd533d9a76 add kmeans 2014-12-22 00:32:08 -08:00
tqchen
5fe3c58b4a add kmeans hadoop 2014-12-22 00:31:01 -08:00
tqchen
dcb6e22a9e add mapred tasks 2014-12-22 00:20:13 -08:00
tqchen
12399a1d42 add more mocktest 2014-12-21 17:59:12 -08:00
tqchen
a624051b85 add keepalive to socket, fix recover problem when a node is requester and pass data 2014-12-21 17:55:08 -08:00
tqchen
cfea4dbe85 fix rabit for single node without initialization 2014-12-21 04:35:32 -08:00
tqchen
e40047f9c2 new mock test 2014-12-20 18:38:54 -08:00
tqchen
10bb407a2c add mock engine 2014-12-20 18:31:33 -08:00
tqchen
ecf91ee081 change usage 2014-12-20 16:54:15 -08:00
tqchen
925d014271 change file structure 2014-12-20 16:19:54 -08:00
tqchen
77d74f6c0d fix bug in lambda allreduce 2014-12-20 05:04:16 -08:00
tqchen
5570e7ceae add complex types 2014-12-19 21:12:10 -08:00
tqchen
e72a869fd1 add complex reducer in 2014-12-19 20:57:53 -08:00
tqchen
2c0a0671ad skip actions when there is only 1 node 2014-12-19 19:21:21 -08:00
tqchen
6151899ce2 add tracker print 2014-12-19 18:40:06 -08:00
tqchen
6bf282c6c2 isolate iserializable 2014-12-19 17:36:42 -08:00
tqchen
8c35cff02c improve script 2014-12-19 04:21:16 -08:00
tqchen
9f42b78a18 improve tracker script 2014-12-19 04:20:45 -08:00
tqchen
69d7f71ae8 change kmeans to using lambda 2014-12-19 02:12:53 -08:00
tqchen
1754fdbf4e enable support for lambda preprocessing function, and c++11 2014-12-19 02:00:43 -08:00
tqchen
58331067f8 cleanup testcases 2014-12-18 23:50:59 -08:00
tqchen
aa2cb38543 ResetLink still not ok 2014-12-18 21:45:38 -08:00
tqchen
6b18ee9edb Merge branch 'master' of ssh://github.com/tqchen/rabit 2014-12-18 19:02:05 -08:00
tqchen
c8faed0b54 pass local model recover test 2014-12-18 18:53:58 -08:00
tqchen
dbd05a65b5 nice fix, start check local check 2014-12-18 18:39:24 -08:00
Tianqi Chen
31403a41cd Update rabit.h 2014-12-09 21:03:41 -08:00
tqchen
3f22596e3c check in license 2014-12-09 20:57:54 -08:00
tqchen
cc5efb8d81 Merge branch 'master' of ssh://github.com/tqchen/rabit 2014-12-09 20:56:33 -08:00
root
5aff7fab29 adding : 2014-12-08 17:15:49 +00:00
root
dfb3961eea changing port 2014-12-08 17:13:42 +00:00
Tianqi Chen
39f2dcdfef Update rabit_tracker.py 2014-12-08 08:36:55 -08:00
tqchen
2750679270 normal state running ok 2014-12-07 20:57:29 -08:00
tqchen
b38fa40fa6 fix ring passing 2014-12-07 20:25:42 -08:00
tqchen
8d570b54c7 add code to help link reuse, start test numreplica 2014-12-07 16:22:02 -08:00
tqchen
e2adce1cc1 add ring setup version 2014-12-07 16:09:28 -08:00
tqchen
322e40c72e Merge branch 'master' of ssh://github.com/tqchen/rabit 2014-12-06 23:00:18 -08:00
tqchen
328cf187ba check in the ring passing 2014-12-06 23:00:10 -08:00
nachocano
20b03e781c to run all executables 2014-12-06 15:37:09 -08:00
nachocano
fcf2f0a03d to stderr 2014-12-06 15:22:29 -08:00
nachocano
cd8ab469ff Merge branch 'master' of https://github.com/tqchen/allreduce 2014-12-06 15:14:19 -08:00
nachocano
659b9cd517 changing number of repetitions 2014-12-06 15:14:14 -08:00
root
52d472c209 using hostfile 2014-12-06 20:30:35 +00:00
nachocano
9ed59e71f6 speed runner 2014-12-06 12:09:40 -08:00
nachocano
e0053c62e1 adding executable 2014-12-06 12:05:08 -08:00
nachocano
8f0d7d1d3e changing to -ho not to conflict with help 2014-12-06 12:01:05 -08:00
nachocano
771891491c Merge branch 'master' of https://github.com/tqchen/allreduce 2014-12-06 11:59:22 -08:00
nachocano
f203d13efc speed runner 2014-12-06 11:59:16 -08:00
nachocano
14e400226a submit mpi to include machine file 2014-12-06 11:33:05 -08:00
tqchen
58f80c5675 Merge branch 'master' of ssh://github.com/tqchen/rabit 2014-12-06 11:25:18 -08:00
tqchen
4a7d84e861 chg string bcast 2014-12-06 11:25:08 -08:00
tqchen
1519f74f3c ok 2014-12-06 11:20:52 -08:00
tqchen
0e012cb05e add speed test 2014-12-06 11:05:24 -08:00
tqchen
19631ecef6 more tracker renaming 2014-12-06 09:24:12 -08:00
tqchen
a569bf2698 change gitignore 2014-12-06 09:19:08 -08:00
tqchen
dc12958fc7 rename master to tracker, to emphasie rabit is p2p in computing 2014-12-06 09:15:31 -08:00
nachocano
67b68ceae6 adding timing 2014-12-05 16:00:47 -08:00
nachocano
54eb5623cb worked on my machine !!! finally 2014-12-05 15:24:00 -08:00
nachocano
d9c22e54de closer, but still does not work... stays in map 100%. I think an exception is being thrown 2014-12-05 13:28:42 -08:00
tqchen
7765e2dc55 add status report 2014-12-05 09:49:26 -08:00
tqchen
ab278513ab ok 2014-12-05 09:39:51 -08:00
Tianqi Chen
e7a22792ac Update submit_job_hadoop.py 2014-12-05 09:14:44 -08:00
Tianqi Chen
e05098cacb Update submit_job_hadoop.py 2014-12-05 09:10:26 -08:00
Tianqi Chen
f9e95ab522 Update submit_job_hadoop.py 2014-12-05 09:09:20 -08:00
nachocano
bb7d6814a7 creating initial version of hadoop submit script. Not working.
Not sure how to get the master uri and port. I believe I cannot do it before I launch the job.

Updating the name from submit_job to submit_job_mpi
2014-12-05 03:27:02 -08:00
nachocano
e00fb99e7b cosmetic 2014-12-04 19:02:11 -08:00
nachocano
e9a3f5169e cosmetic changes 2014-12-04 18:02:07 -08:00
tqchen
1af3e81ada chg robust to reliable 2014-12-04 17:32:22 -08:00
tqchen
7cd5474f1a chg interface 2014-12-04 17:31:40 -08:00
tqchen
821eb21ae2 before make rabit public 2014-12-04 17:30:58 -08:00
tqchen
cc410b8c90 add local model in checkpoint interface, a new goal 2014-12-04 11:09:15 -08:00
tqchen
79e7862583 change note 2014-12-04 09:09:56 -08:00
tqchen
f9d634ce06 change notes 2014-12-04 09:09:29 -08:00
tqchen
65a1cdf8e5 remove doc from main repo 2014-12-04 09:07:36 -08:00
tqchen
67229fd7a9 change model 2014-12-04 09:05:48 -08:00
tqchen
3033177e9e ok 2014-12-03 22:36:16 -08:00
tqchen
656a8fa3a2 ok 2014-12-03 22:32:30 -08:00
tqchen
0e9b64649a ok 2014-12-03 22:30:23 -08:00
tqchen
9da3c6c573 Merge branch 'master' of ssh://github.com/tqchen/rabit 2014-12-03 22:28:59 -08:00
tqchen
09a1305628 chg readme 2014-12-03 22:27:52 -08:00
nachocano
7d314fef78 open for writing 2014-12-03 21:58:58 -08:00
nachocano
dece767084 Revert "open for writing"
This reverts commit 63bf9c7995.
2014-12-03 21:58:33 -08:00
nachocano
63bf9c7995 open for writing 2014-12-03 21:58:17 -08:00
tqchen
1c76483b4b ok 2014-12-03 21:53:34 -08:00
tqchen
9abe6ad4d8 checkin makefile 2014-12-03 21:30:11 -08:00
tqchen
8175df1002 bug fix in kmeans 2014-12-03 20:05:16 -08:00
tqchen
a1a1a8895e add kmeans 2014-12-03 18:23:58 -08:00
tqchen
69af79d45d sparse kmeans 2014-12-03 18:15:28 -08:00
nachocano
e3a95b2d1a Merge branch 'master' of https://github.com/tqchen/allreduce 2014-12-03 15:39:05 -08:00
nachocano
5c23b94069 updating kmeans based on Tianqi feedback. More efficient now 2014-12-03 15:38:58 -08:00
tqchen
85bb6cd027 Merge branch 'master' of ssh://github.com/tqchen/rabit 2014-12-03 15:13:09 -08:00
tqchen
90b9f1a98a add keepalive script 2014-12-03 15:04:30 -08:00
nachocano
55c2a5dc83 Merge branch 'master' of https://github.com/tqchen/allreduce 2014-12-03 14:21:42 -08:00
nachocano
1d0d5bb141 kmeans seems to be working.. not restarting anything though 2014-12-03 14:21:10 -08:00
tqchen
7a983a4079 add keepalive 2014-12-03 13:21:30 -08:00
tqchen
2523288509 basic recovery works 2014-12-03 12:19:08 -08:00
tqchen
8a6768763d bug fixed ver 2014-12-03 11:51:39 -08:00
tqchen
a186f8c3aa ok 2014-12-03 11:19:43 -08:00
tqchen
ceeb6f0690 bug version, check in and rollback 2014-12-03 11:17:39 -08:00
tqchen
f3e5b6e13c ok 2014-12-03 10:00:47 -08:00
tqchen
34f2f887b1 add more broadcast and basic broadcast 2014-12-03 09:59:13 -08:00
nachocano
20b51cc9ce cleaner 2014-12-03 01:44:34 -08:00
nachocano
56aad86231 adding incomplete kmeans.
I'm having a problem with the broadcast, and still need to implement the logic
2014-12-03 01:16:13 -08:00
tqchen
ed1de6df80 change AllReduce to Allreduce 2014-12-02 21:11:48 -08:00
nachocano
8cb5b68cb6 Merge branch 'master' of https://github.com/tqchen/allreduce 2014-12-02 11:28:27 -08:00
nachocano
e4abca9494 changing report folder to doc 2014-12-02 11:28:20 -08:00
tqchen
0a3300d773 rabit run on MPI 2014-12-02 11:20:19 -08:00
nachocano
2fab05c83e adding some design goals. 2014-12-02 11:07:07 -08:00
nachocano
40f7ee1cab adding simple image 2014-12-02 01:49:54 -08:00
nachocano
2c166d7a3a adding some initial skeleton of the report. 2014-12-02 01:19:36 -08:00
tqchen
dcea64c838 check in model recover 2014-12-01 21:41:37 -08:00
tqchen
255218a2f3 change in interface, seems resetlink is still bad 2014-12-01 21:39:51 -08:00
tqchen
b76cd5858c seems ok version 2014-12-01 20:18:25 -08:00
tqchen
46b5d46111 fix one bug, another comes 2014-12-01 19:53:41 -08:00
tqchen
993ff8bb91 find one bug, continue to next one 2014-12-01 19:34:27 -08:00
tqchen
2cde04867f Merge branch 'master' of ssh://github.com/tqchen/rabit 2014-12-01 16:57:33 -08:00
tqchen
337840d29b recover not yet working 2014-12-01 16:57:26 -08:00
Tianqi Chen
fd2c57b8a4 Update engine_robust.cc 2014-12-01 15:32:57 -08:00
tqchen
1c5167d96e rabit seems ready to run 2014-12-01 10:32:30 -08:00
Tianqi Chen
0d63646015 Update README.md 2014-12-01 10:04:10 -08:00
Tianqi Chen
b5367f48f6 Update README.md 2014-12-01 10:03:45 -08:00
Tianqi Chen
62c8ce9657 Update README.md 2014-12-01 10:03:31 -08:00
tqchen
eb2ca06d67 fresh name fresh start 2014-12-01 09:17:05 -08:00
tqchen
16f729115e checkin allreduce recover 2014-11-30 22:41:04 -08:00
tqchen
9355f5faf2 more conservative exception watching 2014-11-30 21:39:22 -08:00
tqchen
8cef2086f5 smarter select for allreduce and bcast 2014-11-30 21:31:45 -08:00
tqchen
f7928c68a3 next round try more careful select design 2014-11-30 21:07:34 -08:00
tqchen
ecb09a23bc add recover data, do a round of review 2014-11-30 20:59:55 -08:00
tqchen
b9b58a1275 bugfix in decide 2014-11-30 17:48:30 -08:00
tqchen
4a6c01c83c minor change in decide 2014-11-30 17:48:02 -08:00
tqchen
27f6f8ea9e bugfix in msg passing 2014-11-30 17:42:18 -08:00
tqchen
d8d648549f finish message passing, do a review on msg passing and decide 2014-11-30 17:40:30 -08:00
tqchen
38cd595235 check in message passing 2014-11-30 16:38:47 -08:00
tqchen
7a60cb7f3e checkin decide request, todo message passing 2014-11-30 16:37:26 -08:00
tqchen
68f13cd739 tight 2014-11-30 11:46:21 -08:00
tqchen
d1ce3c697c inline 2014-11-30 11:45:50 -08:00
tqchen
2e536eda29 check in the recover strategy 2014-11-30 11:42:59 -08:00
tqchen
155ed3a814 seems a OK version of reset, start to work on decide exec 2014-11-29 22:22:51 -08:00
tqchen
5b0bb53184 refactor code style, reset link still need thoughts 2014-11-29 20:15:27 -08:00
tqchen
42505f473d finish reset link log 2014-11-29 15:14:43 -08:00
tqchen
98756c068a livelock in oob send recv 2014-11-28 21:58:15 -08:00
tqchen
aa54a038f2 livelock in oob send recv 2014-11-28 21:56:58 -08:00
tqchen
a30075794b initial version of robust engine, add discard link, need more random mock test, next milestone will be recovery 2014-11-28 15:56:12 -08:00
nachocano
a8128493c2 execute it like this: ./test.sh 4 4000 testcase0.conf ./
Now we are passing the folder where the round instances are saved.
The problem is that calling utils::Check or utils::Assert on 1 or 2 nodes, shutdowns all of them. Only those should be shutdown and this will work. There maybe some other mechanism to shutdown a particular node. Tianqi?
2014-11-28 01:48:26 -08:00
nachocano
faed8285cd execute it like ./test.sh 4 4000 testcase0.conf to obtain a successful execution
updating mock. It now wraps the calls to sync and reads config from configuration file.
I believe it's better not to use the preprocessor directive, i.e. not to put any test code in the engine_tcp. I just call the mock in the test_allreduce file. It's a file purely for testing purposes, so it's fine to use the mock there.
2014-11-28 00:16:35 -08:00
nachocano
21f3f3eec4 adding const to variable to comply with google code convention...
may need to change more stuff though. Taint what else do you mean? Spaces, tabs, names?
2014-11-27 17:03:31 -08:00
tqchen
2f1ba40786 change in socket, to pass out error code 2014-11-27 16:17:07 -08:00
nachocano
c565104491 adding some references to mock inside TEST preprocessor directive.
It shouldn't be an assert because it shutdowns the process. Instead should check on the value and return some sort of error, so that we can recover.
The mock contains queues, indexed by the rank of the process. For each node, you can configure the behavior you expect (success or failure for now) when you call any of the methods (AllReduce, Broadcast, LoadCheckPoint and CheckPoint)... If you call several times AllReduce, the outputs will pop from the queue, i.e., first you can retrieve a success, then a failure and so on.
Pretty basic for now, need to tune it better
2014-11-26 17:24:29 -08:00
nachocano
54fcff189f dummy mock for now 2014-11-26 16:37:23 -08:00
tqchen
d37f38c455 initial version of allreduce 2014-11-25 16:15:56 -08:00
Tianqi Chen
5e5bdda491 Initial commit 2014-11-25 14:37:18 -08:00
725 changed files with 53064 additions and 17619 deletions

2
.github/FUNDING.yml vendored Normal file
View File

@@ -0,0 +1,2 @@
open_collective: xgboost
custom: https://xgboost.ai/sponsors

356
.github/workflows/main.yml vendored Normal file
View File

@@ -0,0 +1,356 @@
# This is a basic workflow to help you get started with Actions
name: XGBoost-CI
# Controls when the action will run. Triggers the workflow on push or pull request
# events but only for the master branch
on: [push, pull_request]
env:
R_PACKAGES: c('XML', 'igraph', 'data.table', 'magrittr', 'ggplot2', 'DiagrammeR', 'Ckmeans.1d.dp', 'vcd', 'testthat', 'lintr', 'knitr', 'rmarkdown', 'e1071', 'cplm', 'devtools', 'float', 'titanic')
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
gtest-cpu:
name: Test Google C++ test (CPU)
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [macos-10.15]
steps:
- uses: actions/checkout@v2
with:
submodules: 'true'
- name: Install system packages
run: |
brew install lz4 ninja libomp
- name: Build gtest binary
run: |
mkdir build
cd build
cmake .. -DGOOGLE_TEST=ON -DUSE_OPENMP=ON -DUSE_DMLC_GTEST=ON -DPLUGIN_LZ4=ON -DPLUGIN_DENSE_PARSER=ON -GNinja
ninja -v
- name: Run gtest binary
run: |
cd build
ctest --extra-verbose
gtest-cpu-nonomp:
name: Test Google C++ unittest (CPU Non-OMP)
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v2
with:
submodules: 'true'
- name: Install system packages
run: |
sudo apt-get install -y --no-install-recommends ninja-build
- name: Build and install XGBoost
shell: bash -l {0}
run: |
mkdir build
cd build
cmake .. -GNinja -DGOOGLE_TEST=ON -DUSE_DMLC_GTEST=ON -DUSE_OPENMP=OFF
ninja -v
- name: Run gtest binary
run: |
cd build
ctest --extra-verbose
c-api-demo:
name: Test installing XGBoost lib + building the C API demo
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: ["ubuntu-latest"]
python-version: ["3.8"]
steps:
- uses: actions/checkout@v2
with:
submodules: 'true'
- name: Install system packages
run: |
sudo apt-get install -y --no-install-recommends ninja-build
- uses: conda-incubator/setup-miniconda@v2
with:
auto-update-conda: true
python-version: ${{ matrix.python-version }}
- name: Display Conda env
shell: bash -l {0}
run: |
conda info
conda list
- name: Build and install XGBoost
shell: bash -l {0}
run: |
mkdir build
cd build
cmake .. -DBUILD_STATIC_LIB=ON -DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX -GNinja
ninja -v install
- name: Build and run C API demo
shell: bash -l {0}
run: |
cd demo/c-api/
mkdir build
cd build
cmake .. -GNinja -DCMAKE_PREFIX_PATH=$CONDA_PREFIX
ninja -v
cd ..
./build/api-demo
test-with-jvm:
name: Test JVM on OS ${{ matrix.os }}
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [windows-latest, ubuntu-latest]
steps:
- uses: actions/checkout@v2
with:
submodules: 'true'
- uses: actions/setup-java@v1
with:
java-version: 1.8
- name: Cache Maven packages
uses: actions/cache@v2
with:
path: ~/.m2
key: ${{ runner.os }}-m2-${{ hashFiles('./jvm-packages/pom.xml') }}
restore-keys: ${{ runner.os }}-m2
- name: Test XGBoost4J
run: |
cd jvm-packages
mvn test -B -pl :xgboost4j_2.12
- name: Test XGBoost4J-Spark
run: |
rm -rfv build/
cd jvm-packages
mvn -B test
if: matrix.os == 'ubuntu-latest' # Distributed training doesn't work on Windows
env:
RABIT_MOCK: ON
lint:
runs-on: ubuntu-latest
name: Code linting for Python and C++
steps:
- uses: actions/checkout@v2
with:
submodules: 'true'
- uses: actions/setup-python@v2
with:
python-version: '3.7'
architecture: 'x64'
- name: Install Python packages
run: |
python -m pip install wheel setuptools
python -m pip install pylint cpplint numpy scipy scikit-learn
- name: Run lint
run: |
make lint
doxygen:
runs-on: ubuntu-latest
name: Generate C/C++ API doc using Doxygen
steps:
- uses: actions/checkout@v2
with:
submodules: 'true'
- uses: actions/setup-python@v2
with:
python-version: '3.7'
architecture: 'x64'
- name: Install system packages
run: |
sudo apt-get install -y --no-install-recommends doxygen graphviz ninja-build
python -m pip install wheel setuptools
python -m pip install awscli
- name: Run Doxygen
run: |
mkdir build
cd build
cmake .. -DBUILD_C_DOC=ON -GNinja
ninja -v doc_doxygen
- name: Extract branch name
shell: bash
run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})"
id: extract_branch
if: github.ref == 'refs/heads/master' || contains(github.ref, 'refs/heads/release_')
- name: Publish
run: |
cd build/
tar cvjf ${{ steps.extract_branch.outputs.branch }}.tar.bz2 doc_doxygen/
python -m awscli s3 cp ./${{ steps.extract_branch.outputs.branch }}.tar.bz2 s3://xgboost-docs/ --acl public-read
if: github.ref == 'refs/heads/master' || contains(github.ref, 'refs/heads/release_')
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID_IAM_S3_UPLOADER }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY_IAM_S3_UPLOADER }}
sphinx:
runs-on: ubuntu-latest
name: Build docs using Sphinx
steps:
- uses: actions/checkout@v2
with:
submodules: 'true'
- uses: actions/setup-python@v2
with:
python-version: '3.7'
architecture: 'x64'
- name: Install system packages
run: |
sudo apt-get install -y --no-install-recommends graphviz
python -m pip install wheel setuptools
python -m pip install -r doc/requirements.txt
- name: Extract branch name
shell: bash
run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})"
id: extract_branch
if: github.ref == 'refs/heads/master' || contains(github.ref, 'refs/heads/release_')
- name: Run Sphinx
run: |
make -C doc html
env:
SPHINX_GIT_BRANCH: ${{ steps.extract_branch.outputs.branch }}
lintr:
runs-on: ${{ matrix.config.os }}
name: Run R linters on OS ${{ matrix.config.os }}, R ${{ matrix.config.r }}, Compiler ${{ matrix.config.compiler }}, Build ${{ matrix.config.build }}
strategy:
matrix:
config:
- {os: windows-latest, r: 'release', compiler: 'mingw', build: 'autotools'}
env:
R_REMOTES_NO_ERRORS_FROM_WARNINGS: true
RSPM: ${{ matrix.config.rspm }}
steps:
- uses: actions/checkout@v2
with:
submodules: 'true'
- uses: r-lib/actions/setup-r@master
with:
r-version: ${{ matrix.config.r }}
- name: Cache R packages
uses: actions/cache@v2
with:
path: ${{ env.R_LIBS_USER }}
key: ${{ runner.os }}-r-${{ matrix.config.r }}-1-${{ hashFiles('R-package/DESCRIPTION') }}
restore-keys: ${{ runner.os }}-r-${{ matrix.config.r }}-2-
- name: Install dependencies
shell: Rscript {0}
run: |
install.packages(${{ env.R_PACKAGES }},
repos = 'http://cloud.r-project.org',
dependencies = c('Depends', 'Imports', 'LinkingTo'))
- name: Run lintr
run: |
cd R-package
R.exe CMD INSTALL .
Rscript.exe tests/helper_scripts/run_lint.R
test-with-R:
runs-on: ${{ matrix.config.os }}
name: Test R on OS ${{ matrix.config.os }}, R ${{ matrix.config.r }}, Compiler ${{ matrix.config.compiler }}, Build ${{ matrix.config.build }}
strategy:
fail-fast: false
matrix:
config:
- {os: windows-2016, r: 'release', compiler: 'mingw', build: 'autotools'}
- {os: windows-2016, r: 'release', compiler: 'msvc', build: 'cmake'}
- {os: windows-2016, r: 'release', compiler: 'mingw', build: 'cmake'}
env:
R_REMOTES_NO_ERRORS_FROM_WARNINGS: true
RSPM: ${{ matrix.config.rspm }}
steps:
- uses: actions/checkout@v2
with:
submodules: 'true'
- uses: r-lib/actions/setup-r@master
with:
r-version: ${{ matrix.config.r }}
- name: Cache R packages
uses: actions/cache@v2
with:
path: ${{ env.R_LIBS_USER }}
key: ${{ runner.os }}-r-${{ matrix.config.r }}-1-${{ hashFiles('R-package/DESCRIPTION') }}
restore-keys: ${{ runner.os }}-r-${{ matrix.config.r }}-2-
- name: Install dependencies
shell: Rscript {0}
run: |
install.packages(${{ env.R_PACKAGES }},
repos = 'http://cloud.r-project.org',
dependencies = c('Depends', 'Imports', 'LinkingTo'))
- uses: actions/setup-python@v2
with:
python-version: '3.7'
architecture: 'x64'
- name: Test R
run: |
python tests/ci_build/test_r_package.py --compiler="${{ matrix.config.compiler }}" --build-tool="${{ matrix.config.build }}"
test-R-CRAN:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
config:
- {r: 'release'}
steps:
- uses: actions/checkout@v2
with:
submodules: 'true'
- uses: r-lib/actions/setup-r@master
with:
r-version: ${{ matrix.config.r }}
- uses: r-lib/actions/setup-tinytex@master
- name: Cache R packages
uses: actions/cache@v2
with:
path: ${{ env.R_LIBS_USER }}
key: ${{ runner.os }}-r-${{ matrix.config.r }}-1-${{ hashFiles('R-package/DESCRIPTION') }}
restore-keys: ${{ runner.os }}-r-${{ matrix.config.r }}-2-
- name: Install system packages
run: |
sudo apt-get update && sudo apt-get install libcurl4-openssl-dev libssl-dev libssh2-1-dev libgit2-dev
- name: Install dependencies
shell: Rscript {0}
run: |
install.packages(${{ env.R_PACKAGES }},
repos = 'http://cloud.r-project.org',
dependencies = c('Depends', 'Imports', 'LinkingTo'))
- name: Check R Package
run: |
# Print stacktrace upon success of failure
make Rcheck || tests/ci_build/print_r_stacktrace.sh fail
tests/ci_build/print_r_stacktrace.sh success

44
.github/workflows/r_nold.yml vendored Normal file
View File

@@ -0,0 +1,44 @@
# Run R tests with noLD R. Only triggered by a pull request review
# See discussion at https://github.com/dmlc/xgboost/pull/6378
name: XGBoost-R-noLD
on:
pull_request_review_comment:
types: [created]
env:
R_PACKAGES: c('XML', 'igraph', 'data.table', 'magrittr', 'ggplot2', 'DiagrammeR', 'Ckmeans.1d.dp', 'vcd', 'testthat', 'lintr', 'knitr', 'rmarkdown', 'e1071', 'cplm', 'devtools', 'float', 'titanic')
jobs:
test-R-noLD:
if: github.event.comment.body == '/gha run r-nold-test' && contains('OWNER,MEMBER,COLLABORATOR', github.event.comment.author_association)
timeout-minutes: 120
runs-on: ubuntu-latest
container: rhub/debian-gcc-devel-nold
steps:
- name: Install git and system packages
shell: bash
run: |
apt-get update && apt-get install -y git libcurl4-openssl-dev libssl-dev libssh2-1-dev libgit2-dev libxml2-dev
- uses: actions/checkout@v2
with:
submodules: 'true'
- name: Install dependencies
shell: bash
run: |
cat > install_libs.R <<EOT
install.packages(${{ env.R_PACKAGES }},
repos = 'http://cloud.r-project.org',
dependencies = c('Depends', 'Imports', 'LinkingTo'))
EOT
/tmp/R-devel/bin/Rscript install_libs.R
- name: Run R tests
shell: bash
run: |
cd R-package && \
/tmp/R-devel/bin/R CMD INSTALL . && \
/tmp/R-devel/bin/R -q -e "library(testthat); setwd('tests'); source('testthat.R')"

16
.gitignore vendored
View File

@@ -51,6 +51,7 @@ Debug
#.Rbuildignore
R-package.Rproj
*.cache*
.mypy_cache/
# java
java/xgboost4j/target
java/xgboost4j/tmp
@@ -65,12 +66,12 @@ nb-configuration*
.pydevproject
.settings/
build
config.mk
/xgboost
*.data
build_plugin
recommonmark/
tags
TAGS
*.class
target
*.swp
@@ -93,6 +94,7 @@ metastore_db
# files from R-package source install
**/config.status
R-package/src/Makevars
*.lib
# Visual Studio Code
/.vscode/
@@ -101,3 +103,15 @@ R-package/src/Makevars
.idea
*.iml
/cmake-build-debug/
# GDB
.gdb_history
# Python joblib.Memory used in pytest.
cachedir/
# Files from local Dask work
dask-worker-space/
# Jupyter notebook checkpoints
.ipynb_checkpoints/

6
.gitmodules vendored
View File

@@ -1,9 +1,9 @@
[submodule "dmlc-core"]
path = dmlc-core
url = https://github.com/dmlc/dmlc-core
[submodule "rabit"]
path = rabit
url = https://github.com/dmlc/rabit
[submodule "cub"]
path = cub
url = https://github.com/NVlabs/cub
[submodule "gputreeshap"]
path = gputreeshap
url = https://github.com/rapidsai/gputreeshap.git

View File

@@ -1,34 +1,40 @@
# disable sudo for container build.
sudo: required
# Enabling test OS X
os:
- linux
- osx
osx_image: xcode10.3
dist: bionic
# Use Build Matrix to do lint and build seperately
env:
matrix:
# python package test
- TASK=python_test
# test installation of Python source distribution
- TASK=python_sdist_test
# java package test
- TASK=java_test
# cmake test
- TASK=cmake_test
global:
- secure: "PR16i9F8QtNwn99C5NDp8nptAS+97xwDtXEJJfEiEVhxPaaRkOp0MPWhogCaK0Eclxk1TqkgWbdXFknwGycX620AzZWa/A1K3gAs+GrpzqhnPMuoBJ0Z9qxXTbSJvCyvMbYwVrjaxc/zWqdMU8waWz8A7iqKGKs/SqbQ3rO6v7c="
- secure: "dAGAjBokqm/0nVoLMofQni/fWIBcYSmdq4XvCBX1ZAMDsWnuOfz/4XCY6h2lEI1rVHZQ+UdZkc9PioOHGPZh5BnvE49/xVVWr9c4/61lrDOlkD01ZjSAeoV0fAZq+93V/wPl4QV+MM+Sem9hNNzFSbN5VsQLAiWCSapWsLdKzqA="
matrix:
exclude:
jobs:
include:
- os: linux
arch: amd64
env: TASK=python_sdist_test
- os: linux
arch: arm64
env: TASK=python_sdist_test
- os: linux
arch: arm64
env: TASK=python_test
- os: linux
services:
- docker
- os: osx
arch: amd64
osx_image: xcode10.2
env: TASK=python_test
- os: osx
arch: amd64
osx_image: xcode10.2
env: TASK=python_sdist_test
- os: osx
arch: amd64
osx_image: xcode10.2
env: TASK=java_test
- os: linux
env: TASK=cmake_test
arch: s390x
env: TASK=s390x_test
# dependent brew packages
addons:
@@ -39,12 +45,17 @@ addons:
- graphviz
- openssl
- libgit2
- lz4
- wget
- r
update: true
apt:
packages:
- snapd
- unzip
before_install:
- source dmlc-core/scripts/travis/travis_setup_env.sh
- source tests/travis/travis_setup_env.sh
- if [ "${TASK}" != "python_sdist_test" ]; then export PYTHONPATH=${PYTHONPATH}:${PWD}/python-package; fi
- echo "MAVEN_OPTS='-Xmx2g -XX:MaxPermSize=1024m -XX:ReservedCodeCacheSize=512m -Dorg.slf4j.simpleLogger.defaultLogLevel=error'" > ~/.mavenrc
@@ -60,7 +71,7 @@ cache:
- ${HOME}/.cache/pip
before_cache:
- dmlc-core/scripts/travis/travis_before_cache.sh
- tests/travis/travis_before_cache.sh
after_failure:
- tests/travis/travis_after_failure.sh

View File

@@ -1,8 +1,11 @@
cmake_minimum_required(VERSION 3.12)
project(xgboost LANGUAGES CXX C VERSION 1.0.0)
cmake_minimum_required(VERSION 3.13)
project(xgboost LANGUAGES CXX C VERSION 1.3.1)
include(cmake/Utils.cmake)
list(APPEND CMAKE_MODULE_PATH "${xgboost_SOURCE_DIR}/cmake/modules")
cmake_policy(SET CMP0022 NEW)
cmake_policy(SET CMP0079 NEW)
set(CMAKE_POLICY_DEFAULT_CMP0063 NEW)
cmake_policy(SET CMP0063 NEW)
if ((${CMAKE_VERSION} VERSION_GREATER 3.13) OR (${CMAKE_VERSION} VERSION_EQUAL 3.13))
cmake_policy(SET CMP0077 NEW)
@@ -21,19 +24,27 @@ write_version()
set_default_configuration_release()
#-- Options
## User options
option(BUILD_C_DOC "Build documentation for C APIs using Doxygen." OFF)
option(USE_OPENMP "Build with OpenMP support." ON)
option(BUILD_STATIC_LIB "Build static library" OFF)
option(RABIT_BUILD_MPI "Build MPI" OFF)
## Bindings
option(JVM_BINDINGS "Build JVM bindings" OFF)
option(R_LIB "Build shared library for R package" OFF)
## Dev
option(USE_DEBUG_OUTPUT "Dump internal training results like gradients and predictions to stdout.
Should only be used for debugging." OFF)
option(FORCE_COLORED_OUTPUT "Force colored output from compilers, useful when ninja is used instead of make." OFF)
option(ENABLE_ALL_WARNINGS "Enable all compiler warnings. Only effective for GCC/Clang" OFF)
option(LOG_CAPI_INVOCATION "Log all C API invocations for debugging" OFF)
option(GOOGLE_TEST "Build google tests" OFF)
option(USE_DMLC_GTEST "Use google tests bundled with dmlc-core submodule" OFF)
option(USE_DEVICE_DEBUG "Generate CUDA device debug info." OFF)
option(USE_NVTX "Build with cuda profiling annotations. Developers only." OFF)
set(NVTX_HEADER_DIR "" CACHE PATH "Path to the stand-alone nvtx header")
option(RABIT_MOCK "Build rabit with mock" OFF)
option(HIDE_CXX_SYMBOLS "Build shared library and hide all C++ symbols" OFF)
## CUDA
option(USE_CUDA "Build with GPU acceleration" OFF)
option(USE_NCCL "Build with NCCL to enable distributed GPU support." OFF)
@@ -49,10 +60,14 @@ option(USE_SANITIZER "Use santizer flags" OFF)
option(SANITIZER_PATH "Path to sanitizes.")
set(ENABLED_SANITIZERS "address" "leak" CACHE STRING
"Semicolon separated list of sanitizer names. E.g 'address;leak'. Supported sanitizers are
address, leak and thread.")
address, leak, undefined and thread.")
## Plugins
option(PLUGIN_LZ4 "Build lz4 plugin" OFF)
option(PLUGIN_DENSE_PARSER "Build dense parser plugin" OFF)
option(PLUGIN_RMM "Build with RAPIDS Memory Manager (RMM)" OFF)
## TODO: 1. Add check if DPC++ compiler is used for building
option(PLUGIN_UPDATER_ONEAPI "DPC++ updater" OFF)
option(ADD_PKGCONFIG "Add xgboost.pc into system." ON)
#-- Checks for building XGBoost
if (USE_DEBUG_OUTPUT AND (NOT (CMAKE_BUILD_TYPE MATCHES Debug)))
@@ -61,6 +76,9 @@ endif (USE_DEBUG_OUTPUT AND (NOT (CMAKE_BUILD_TYPE MATCHES Debug)))
if (USE_NCCL AND NOT (USE_CUDA))
message(SEND_ERROR "`USE_NCCL` must be enabled with `USE_CUDA` flag.")
endif (USE_NCCL AND NOT (USE_CUDA))
if (USE_DEVICE_DEBUG AND NOT (USE_CUDA))
message(SEND_ERROR "`USE_DEVICE_DEBUG` must be enabled with `USE_CUDA` flag.")
endif (USE_DEVICE_DEBUG AND NOT (USE_CUDA))
if (BUILD_WITH_SHARED_NCCL AND (NOT USE_NCCL))
message(SEND_ERROR "Build XGBoost with -DUSE_NCCL=ON to enable BUILD_WITH_SHARED_NCCL.")
endif (BUILD_WITH_SHARED_NCCL AND (NOT USE_NCCL))
@@ -74,6 +92,23 @@ endif (R_LIB AND GOOGLE_TEST)
if (USE_AVX)
message(SEND_ERROR "The option 'USE_AVX' is deprecated as experimental AVX features have been removed from XGBoost.")
endif (USE_AVX)
if (PLUGIN_RMM AND NOT (USE_CUDA))
message(SEND_ERROR "`PLUGIN_RMM` must be enabled with `USE_CUDA` flag.")
endif (PLUGIN_RMM AND NOT (USE_CUDA))
if (PLUGIN_RMM AND NOT ((CMAKE_CXX_COMPILER_ID STREQUAL "Clang") OR (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")))
message(SEND_ERROR "`PLUGIN_RMM` must be used with GCC or Clang compiler.")
endif (PLUGIN_RMM AND NOT ((CMAKE_CXX_COMPILER_ID STREQUAL "Clang") OR (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")))
if (PLUGIN_RMM AND NOT (CMAKE_SYSTEM_NAME STREQUAL "Linux"))
message(SEND_ERROR "`PLUGIN_RMM` must be used with Linux.")
endif (PLUGIN_RMM AND NOT (CMAKE_SYSTEM_NAME STREQUAL "Linux"))
if (ENABLE_ALL_WARNINGS)
if ((NOT CMAKE_CXX_COMPILER_ID MATCHES "Clang") AND (NOT CMAKE_CXX_COMPILER_ID STREQUAL "GNU"))
message(SEND_ERROR "ENABLE_ALL_WARNINGS is only available for Clang and GCC.")
endif ((NOT CMAKE_CXX_COMPILER_ID MATCHES "Clang") AND (NOT CMAKE_CXX_COMPILER_ID STREQUAL "GNU"))
endif (ENABLE_ALL_WARNINGS)
if (BUILD_STATIC_LIB AND (R_LIB OR JVM_BINDINGS))
message(SEND_ERROR "Cannot build a static library libxgboost.a when R or JVM packages are enabled.")
endif (BUILD_STATIC_LIB AND (R_LIB OR JVM_BINDINGS))
#-- Sanitizer
if (USE_SANITIZER)
@@ -88,11 +123,22 @@ if (USE_CUDA)
message(STATUS "Configured CUDA host compiler: ${CMAKE_CUDA_HOST_COMPILER}")
enable_language(CUDA)
if (${CMAKE_CUDA_COMPILER_VERSION} VERSION_LESS 10.0)
message(FATAL_ERROR "CUDA version must be at least 10.0!")
endif()
set(GEN_CODE "")
format_gencode_flags("${GPU_COMPUTE_VER}" GEN_CODE)
message(STATUS "CUDA GEN_CODE: ${GEN_CODE}")
add_subdirectory(${PROJECT_SOURCE_DIR}/gputreeshap)
endif (USE_CUDA)
if (FORCE_COLORED_OUTPUT AND (CMAKE_GENERATOR STREQUAL "Ninja") AND
((CMAKE_CXX_COMPILER_ID STREQUAL "GNU") OR
(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")))
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-color=always")
endif()
find_package(Threads REQUIRED)
if (USE_OPENMP)
if (APPLE)
# Require CMake 3.16+ on Mac OSX, as previous versions of CMake had trouble locating
@@ -106,40 +152,59 @@ endif (USE_OPENMP)
msvc_use_static_runtime()
add_subdirectory(${xgboost_SOURCE_DIR}/dmlc-core)
set_target_properties(dmlc PROPERTIES
CXX_STANDARD 11
CXX_STANDARD 14
CXX_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON)
list(APPEND LINKED_LIBRARIES_PRIVATE dmlc)
if (MSVC)
target_compile_options(dmlc PRIVATE
-D_CRT_SECURE_NO_WARNINGS -D_CRT_SECURE_NO_DEPRECATE)
if (TARGET dmlc_unit_tests)
target_compile_options(dmlc_unit_tests PRIVATE
-D_CRT_SECURE_NO_WARNINGS -D_CRT_SECURE_NO_DEPRECATE)
endif (TARGET dmlc_unit_tests)
endif (MSVC)
if (ENABLE_ALL_WARNINGS)
target_compile_options(dmlc PRIVATE -Wall -Wextra)
endif (ENABLE_ALL_WARNINGS)
# rabit
set(RABIT_BUILD_DMLC OFF)
set(DMLC_ROOT ${xgboost_SOURCE_DIR}/dmlc-core)
set(RABIT_WITH_R_LIB ${R_LIB})
add_subdirectory(rabit)
if (RABIT_MOCK)
list(APPEND LINKED_LIBRARIES_PRIVATE rabit_mock_static)
else()
list(APPEND LINKED_LIBRARIES_PRIVATE rabit)
endif(RABIT_MOCK)
# core xgboost
add_subdirectory(${xgboost_SOURCE_DIR}/src)
target_link_libraries(objxgboost PUBLIC dmlc)
# Exports some R specific definitions and objects
if (R_LIB)
add_subdirectory(${xgboost_SOURCE_DIR}/R-package)
endif (R_LIB)
# core xgboost
# Plugin
add_subdirectory(${xgboost_SOURCE_DIR}/plugin)
add_subdirectory(${xgboost_SOURCE_DIR}/src)
set(XGBOOST_OBJ_SOURCES "${XGBOOST_OBJ_SOURCES};$<TARGET_OBJECTS:objxgboost>")
#-- Shared library
add_library(xgboost SHARED ${XGBOOST_OBJ_SOURCES})
#-- library
if (BUILD_STATIC_LIB)
add_library(xgboost STATIC)
else (BUILD_STATIC_LIB)
add_library(xgboost SHARED)
endif (BUILD_STATIC_LIB)
target_link_libraries(xgboost PRIVATE objxgboost)
if (USE_CUDA)
xgboost_set_cuda_flags(xgboost)
endif (USE_CUDA)
#-- Hide all C++ symbols
if (HIDE_CXX_SYMBOLS)
foreach(target objxgboost xgboost dmlc)
set_target_properties(${target} PROPERTIES CXX_VISIBILITY_PRESET hidden)
endforeach()
endif (HIDE_CXX_SYMBOLS)
target_include_directories(xgboost
INTERFACE
$<INSTALL_INTERFACE:${CMAKE_INSTALL_PREFIX}/include>
$<BUILD_INTERFACE:${CMAKE_CURRENT_LIST_DIR}/include>)
target_link_libraries(xgboost PRIVATE ${LINKED_LIBRARIES_PRIVATE})
# This creates its own shared library `xgboost4j'.
if (JVM_BINDINGS)
@@ -148,18 +213,21 @@ endif (JVM_BINDINGS)
#-- End shared library
#-- CLI for xgboost
add_executable(runxgboost ${xgboost_SOURCE_DIR}/src/cli_main.cc ${XGBOOST_OBJ_SOURCES})
add_executable(runxgboost ${xgboost_SOURCE_DIR}/src/cli_main.cc)
target_link_libraries(runxgboost PRIVATE objxgboost)
if (USE_NVTX)
enable_nvtx(runxgboost)
endif (USE_NVTX)
target_include_directories(runxgboost
PRIVATE
${xgboost_SOURCE_DIR}/include
${xgboost_SOURCE_DIR}/dmlc-core/include
${xgboost_SOURCE_DIR}/rabit/include)
target_link_libraries(runxgboost PRIVATE ${LINKED_LIBRARIES_PRIVATE})
set_target_properties(
runxgboost PROPERTIES
OUTPUT_NAME xgboost
CXX_STANDARD 11
CXX_STANDARD 14
CXX_STANDARD_REQUIRED ON)
#-- End CLI for xgboost
@@ -170,11 +238,12 @@ add_dependencies(xgboost runxgboost)
#-- Installing XGBoost
if (R_LIB)
include(cmake/RPackageInstallTargetSetup.cmake)
set_target_properties(xgboost PROPERTIES PREFIX "")
if (APPLE)
set_target_properties(xgboost PROPERTIES SUFFIX ".so")
endif (APPLE)
setup_rpackage_install_target(xgboost ${CMAKE_CURRENT_BINARY_DIR})
setup_rpackage_install_target(xgboost "${CMAKE_CURRENT_BINARY_DIR}/R-package-install")
set(CMAKE_INSTALL_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/dummy_inst")
endif (R_LIB)
if (MINGW)
@@ -191,7 +260,20 @@ include(GNUInstallDirs)
install(DIRECTORY ${xgboost_SOURCE_DIR}/include/xgboost
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
install(TARGETS xgboost runxgboost
# Install libraries. If `xgboost` is a static lib, specify `objxgboost` also, to avoid the
# following error:
#
# > install(EXPORT ...) includes target "xgboost" which requires target "objxgboost" that is not
# > in any export set.
#
# https://github.com/dmlc/xgboost/issues/6085
if (BUILD_STATIC_LIB)
set(INSTALL_TARGETS xgboost runxgboost objxgboost dmlc)
else (BUILD_STATIC_LIB)
set(INSTALL_TARGETS xgboost runxgboost)
endif (BUILD_STATIC_LIB)
install(TARGETS ${INSTALL_TARGETS}
EXPORT XGBoostTargets
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
@@ -245,3 +327,12 @@ endif (GOOGLE_TEST)
# replace /MD with /MT. See https://github.com/dmlc/xgboost/issues/4462
# for issues caused by mixing of /MD and /MT flags
msvc_use_static_runtime()
# Add xgboost.pc
if (ADD_PKGCONFIG)
configure_file(${xgboost_SOURCE_DIR}/cmake/xgboost.pc.in ${xgboost_BINARY_DIR}/xgboost.pc @ONLY)
install(
FILES ${xgboost_BINARY_DIR}/xgboost.pc
DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig)
endif (ADD_PKGCONFIG)

View File

@@ -10,14 +10,14 @@ The Project Management Committee(PMC) consists group of active committers that m
- Tianqi is a Ph.D. student working on large-scale machine learning. He is the creator of the project.
* [Michael Benesty](https://github.com/pommedeterresautee)
- Michael is a lawyer and data scientist in France. He is the creator of XGBoost interactive analysis module in R.
* [Yuan Tang](https://github.com/terrytangyuan), Ant Financial
- Yuan is a software engineer in Ant Financial. He contributed mostly in R and Python packages.
* [Yuan Tang](https://github.com/terrytangyuan), Ant Group
- Yuan is a software engineer in Ant Group. He contributed mostly in R and Python packages.
* [Nan Zhu](https://github.com/CodingCat), Uber
- Nan is a software engineer in Uber. He contributed mostly in JVM packages.
* [Jiaming Yuan](https://github.com/trivialfis)
- Jiaming contributed to the GPU algorithms. He has also introduced new abstractions to improve the quality of the C++ codebase.
* [Hyunsu Cho](http://hyunsu-cho.io/), Amazon AI
- Hyunsu is an applied scientist in Amazon AI. He is the maintainer of the XGBoost Python package. He also manages the Jenkins continuous integration system (https://xgboost-ci.net/). He is the initial author of the CPU 'hist' updater.
* [Hyunsu Cho](http://hyunsu-cho.io/), NVIDIA
- Hyunsu is the maintainer of the XGBoost Python package. He also manages the Jenkins continuous integration system (https://xgboost-ci.net/). He is the initial author of the CPU 'hist' updater.
* [Rory Mitchell](https://github.com/RAMitchell), University of Waikato
- Rory is a Ph.D. student at University of Waikato. He is the original creator of the GPU training algorithms. He improved the CMake build system and continuous integration.
* [Hongliang Liu](https://github.com/phunterlau)
@@ -37,6 +37,8 @@ Committers are people who have made substantial contribution to the project and
- Sergei is a software engineer in Criteo. He contributed mostly in JVM packages.
* [Scott Lundberg](http://scottlundberg.com/), University of Washington
- Scott is a Ph.D. student at University of Washington. He is the creator of SHAP, a unified approach to explain the output of machine learning models such as decision tree ensembles. He also helps maintain the XGBoost Julia package.
* [Egor Smirnov](https://github.com/SmirnovEgorRu), Intel
- Egor has led a major effort to improve the performance of XGBoost on multi-core CPUs.
Become a Committer

300
Jenkinsfile vendored
View File

@@ -6,6 +6,9 @@
// Command to run command inside a docker container
dockerRun = 'tests/ci_build/ci_build.sh'
// Which CUDA version to use when building reference distribution wheel
ref_cuda_ver = '10.0'
import groovy.transform.Field
@Field
@@ -31,29 +34,19 @@ pipeline {
// Build stages
stages {
stage('Jenkins Linux: Get sources') {
agent { label 'linux && cpu' }
stage('Jenkins Linux: Initialize') {
agent { label 'job_initializer' }
steps {
script {
def buildNumber = env.BUILD_NUMBER as int
if (buildNumber > 1) milestone(buildNumber - 1)
milestone(buildNumber)
checkoutSrcs()
commit_id = "${GIT_COMMIT}"
}
sh 'python3 tests/jenkins_get_approval.py'
stash name: 'srcs'
milestone ordinal: 1
}
}
stage('Jenkins Linux: Formatting Check') {
agent none
steps {
script {
parallel ([
'clang-tidy': { ClangTidy() },
'lint': { Lint() },
'sphinx-doc': { SphinxDoc() },
'doxygen': { Doxygen() }
])
}
milestone ordinal: 2
}
}
stage('Jenkins Linux: Build') {
@@ -61,16 +54,21 @@ pipeline {
steps {
script {
parallel ([
'clang-tidy': { ClangTidy() },
'build-cpu': { BuildCPU() },
'build-cpu-rabit-mock': { BuildCPUMock() },
'build-gpu-cuda9.0': { BuildCUDA(cuda_version: '9.0') },
// Build reference, distribution-ready Python wheel with CUDA 10.0
// using CentOS 6 image
'build-gpu-cuda10.0': { BuildCUDA(cuda_version: '10.0') },
// The build-gpu-* builds below use Ubuntu image
'build-gpu-cuda10.1': { BuildCUDA(cuda_version: '10.1') },
'build-jvm-packages': { BuildJVMPackages(spark_version: '2.4.3') },
'build-gpu-cuda10.2': { BuildCUDA(cuda_version: '10.2', build_rmm: true) },
'build-gpu-cuda11.0': { BuildCUDA(cuda_version: '11.0') },
'build-jvm-packages-gpu-cuda10.0': { BuildJVMPackagesWithCUDA(spark_version: '3.0.0', cuda_version: '10.0') },
'build-jvm-packages': { BuildJVMPackages(spark_version: '3.0.0') },
'build-jvm-doc': { BuildJVMDoc() }
])
}
milestone ordinal: 3
}
}
stage('Jenkins Linux: Test') {
@@ -79,20 +77,28 @@ pipeline {
script {
parallel ([
'test-python-cpu': { TestPythonCPU() },
'test-python-gpu-cuda9.0': { TestPythonGPU(cuda_version: '9.0') },
'test-python-gpu-cuda10.0': { TestPythonGPU(cuda_version: '10.0') },
'test-python-gpu-cuda10.1': { TestPythonGPU(cuda_version: '10.1') },
'test-python-mgpu-cuda10.1': { TestPythonGPU(cuda_version: '10.1', multi_gpu: true) },
'test-cpp-gpu': { TestCppGPU(cuda_version: '10.1') },
'test-cpp-mgpu': { TestCppGPU(cuda_version: '10.1', multi_gpu: true) },
'test-jvm-jdk8': { CrossTestJVMwithJDK(jdk_version: '8', spark_version: '2.4.3') },
// artifact_cuda_version doesn't apply to RMM tests; RMM tests will always match CUDA version between artifact and host env
'test-python-gpu-cuda10.2': { TestPythonGPU(artifact_cuda_version: '10.0', host_cuda_version: '10.2', test_rmm: true) },
'test-python-gpu-cuda11.0-cross': { TestPythonGPU(artifact_cuda_version: '10.0', host_cuda_version: '11.0') },
'test-python-gpu-cuda11.0': { TestPythonGPU(artifact_cuda_version: '11.0', host_cuda_version: '11.0') },
'test-python-mgpu-cuda10.2': { TestPythonGPU(artifact_cuda_version: '10.0', host_cuda_version: '10.2', multi_gpu: true, test_rmm: true) },
'test-cpp-gpu-cuda10.2': { TestCppGPU(artifact_cuda_version: '10.2', host_cuda_version: '10.2', test_rmm: true) },
'test-cpp-gpu-cuda11.0': { TestCppGPU(artifact_cuda_version: '11.0', host_cuda_version: '11.0') },
'test-jvm-jdk8': { CrossTestJVMwithJDK(jdk_version: '8', spark_version: '3.0.0') },
'test-jvm-jdk11': { CrossTestJVMwithJDK(jdk_version: '11') },
'test-jvm-jdk12': { CrossTestJVMwithJDK(jdk_version: '12') },
'test-r-3.4.4': { TestR(use_r35: false) },
'test-r-3.5.3': { TestR(use_r35: true) }
'test-jvm-jdk12': { CrossTestJVMwithJDK(jdk_version: '12') }
])
}
}
}
stage('Jenkins Linux: Deploy') {
agent none
steps {
script {
parallel ([
'deploy-jvm-packages': { DeployJVMPackages(spark_version: '3.0.0') }
])
}
milestone ordinal: 4
}
}
}
@@ -113,13 +119,17 @@ def checkoutSrcs() {
}
}
def GetCUDABuildContainerType(cuda_version) {
return (cuda_version == ref_cuda_ver) ? 'gpu_build_centos6' : 'gpu_build'
}
def ClangTidy() {
node('linux && cpu') {
node('linux && cpu_build') {
unstash name: 'srcs'
echo "Running clang-tidy job..."
def container_type = "clang_tidy"
def docker_binary = "docker"
def dockerArgs = "--build-arg CUDA_VERSION=9.2"
def dockerArgs = "--build-arg CUDA_VERSION_ARG=10.1"
sh """
${dockerRun} ${container_type} ${docker_binary} ${dockerArgs} python3 tests/ci_build/tidy.py
"""
@@ -127,48 +137,6 @@ def ClangTidy() {
}
}
def Lint() {
node('linux && cpu') {
unstash name: 'srcs'
echo "Running lint..."
def container_type = "cpu"
def docker_binary = "docker"
sh """
${dockerRun} ${container_type} ${docker_binary} make lint
"""
deleteDir()
}
}
def SphinxDoc() {
node('linux && cpu') {
unstash name: 'srcs'
echo "Running sphinx-doc..."
def container_type = "cpu"
def docker_binary = "docker"
def docker_extra_params = "CI_DOCKER_EXTRA_PARAMS_INIT='-e SPHINX_GIT_BRANCH=${BRANCH_NAME}'"
sh """#!/bin/bash
${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} make -C doc html
"""
deleteDir()
}
}
def Doxygen() {
node('linux && cpu') {
unstash name: 'srcs'
echo "Running doxygen..."
def container_type = "cpu"
def docker_binary = "docker"
sh """
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/doxygen.sh ${BRANCH_NAME}
"""
echo 'Uploading doc...'
s3Upload file: "build/${BRANCH_NAME}.tar.bz2", bucket: 'xgboost-docs', acl: 'PublicRead', path: "doxygen/${BRANCH_NAME}.tar.bz2"
deleteDir()
}
}
def BuildCPU() {
node('linux && cpu') {
unstash name: 'srcs'
@@ -176,17 +144,22 @@ def BuildCPU() {
def container_type = "cpu"
def docker_binary = "docker"
sh """
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_via_cmake.sh
${dockerRun} ${container_type} ${docker_binary} build/testxgboost
${dockerRun} ${container_type} ${docker_binary} rm -fv dmlc-core/include/dmlc/build_config_default.h
# This step is not necessary, but here we include it, to ensure that DMLC_CORE_USE_CMAKE flag is correctly propagated
# We want to make sure that we use the configured header build/dmlc/build_config.h instead of include/dmlc/build_config_default.h.
# See discussion at https://github.com/dmlc/xgboost/issues/5510
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_via_cmake.sh -DPLUGIN_LZ4=ON -DPLUGIN_DENSE_PARSER=ON
${dockerRun} ${container_type} ${docker_binary} bash -c "cd build && ctest --extra-verbose"
"""
// Sanitizer test
def docker_extra_params = "CI_DOCKER_EXTRA_PARAMS_INIT='-e ASAN_SYMBOLIZER_PATH=/usr/bin/llvm-symbolizer -e ASAN_OPTIONS=symbolize=1 -e UBSAN_OPTIONS=print_stacktrace=1:log_path=ubsan_error.log --cap-add SYS_PTRACE'"
def docker_args = "--build-arg CMAKE_VERSION=3.12"
sh """
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_via_cmake.sh -DUSE_SANITIZER=ON -DENABLED_SANITIZERS="address;leak;undefined" \
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_via_cmake.sh -DUSE_SANITIZER=ON -DENABLED_SANITIZERS="address;leak;undefined" \
-DCMAKE_BUILD_TYPE=Debug -DSANITIZER_PATH=/usr/lib/x86_64-linux-gnu/
${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} build/testxgboost
${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} bash -c "cd build && ctest --exclude-regex AllTestsInDMLCUnitTests --extra-verbose"
"""
stash name: 'xgboost_cli', includes: 'xgboost'
deleteDir()
}
}
@@ -206,28 +179,78 @@ def BuildCPUMock() {
}
}
def BuildCUDA(args) {
node('linux && cpu') {
node('linux && cpu_build') {
unstash name: 'srcs'
echo "Build with CUDA ${args.cuda_version}"
def container_type = "gpu_build"
def container_type = GetCUDABuildContainerType(args.cuda_version)
def docker_binary = "docker"
def docker_args = "--build-arg CUDA_VERSION=${args.cuda_version}"
def docker_args = "--build-arg CUDA_VERSION_ARG=${args.cuda_version}"
def arch_flag = ""
if (env.BRANCH_NAME != 'master' && !(env.BRANCH_NAME.startsWith('release'))) {
arch_flag = "-DGPU_COMPUTE_VER=75"
}
def wheel_tag = "manylinux2010_x86_64"
sh """
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_via_cmake.sh -DUSE_CUDA=ON -DUSE_NCCL=ON -DOPEN_MP:BOOL=ON
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_via_cmake.sh -DUSE_CUDA=ON -DUSE_NCCL=ON -DOPEN_MP:BOOL=ON -DHIDE_CXX_SYMBOLS=ON ${arch_flag}
${dockerRun} ${container_type} ${docker_binary} ${docker_args} bash -c "cd python-package && rm -rf dist/* && python setup.py bdist_wheel --universal"
${dockerRun} ${container_type} ${docker_binary} ${docker_args} python3 tests/ci_build/rename_whl.py python-package/dist/*.whl ${commit_id} manylinux1_x86_64
${dockerRun} ${container_type} ${docker_binary} ${docker_args} python tests/ci_build/rename_whl.py python-package/dist/*.whl ${commit_id} ${wheel_tag}
"""
// Stash wheel for CUDA 9.0 target
if (args.cuda_version == '9.0') {
if (args.cuda_version == ref_cuda_ver) {
sh """
${dockerRun} auditwheel_x86_64 ${docker_binary} auditwheel repair --plat ${wheel_tag} python-package/dist/*.whl
mv -v wheelhouse/*.whl python-package/dist/
# Make sure that libgomp.so is vendored in the wheel
${dockerRun} auditwheel_x86_64 ${docker_binary} bash -c "unzip -l python-package/dist/*.whl | grep libgomp || exit -1"
"""
}
echo 'Stashing Python wheel...'
stash name: 'xgboost_whl_cuda9', includes: 'python-package/dist/*.whl'
stash name: "xgboost_whl_cuda${args.cuda_version}", includes: 'python-package/dist/*.whl'
if (args.cuda_version == ref_cuda_ver && (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release'))) {
echo 'Uploading Python wheel...'
path = ("${BRANCH_NAME}" == 'master') ? '' : "${BRANCH_NAME}/"
s3Upload bucket: 'xgboost-nightly-builds', path: path, acl: 'PublicRead', workingDir: 'python-package/dist', includePathPattern:'**/*.whl'
echo 'Stashing C++ test executable (testxgboost)...'
stash name: 'xgboost_cpp_tests', includes: 'build/testxgboost'
}
echo 'Stashing C++ test executable (testxgboost)...'
stash name: "xgboost_cpp_tests_cuda${args.cuda_version}", includes: 'build/testxgboost'
if (args.build_rmm) {
echo "Build with CUDA ${args.cuda_version} and RMM"
container_type = "rmm"
docker_binary = "docker"
docker_args = "--build-arg CUDA_VERSION_ARG=${args.cuda_version}"
sh """
rm -rf build/
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_via_cmake.sh --conda-env=gpu_test -DUSE_CUDA=ON -DUSE_NCCL=ON -DPLUGIN_RMM=ON ${arch_flag}
${dockerRun} ${container_type} ${docker_binary} ${docker_args} bash -c "cd python-package && rm -rf dist/* && python setup.py bdist_wheel --universal"
${dockerRun} ${container_type} ${docker_binary} ${docker_args} python tests/ci_build/rename_whl.py python-package/dist/*.whl ${commit_id} manylinux2010_x86_64
"""
echo 'Stashing Python wheel...'
stash name: "xgboost_whl_rmm_cuda${args.cuda_version}", includes: 'python-package/dist/*.whl'
echo 'Stashing C++ test executable (testxgboost)...'
stash name: "xgboost_cpp_tests_rmm_cuda${args.cuda_version}", includes: 'build/testxgboost'
}
deleteDir()
}
}
def BuildJVMPackagesWithCUDA(args) {
node('linux && mgpu') {
unstash name: 'srcs'
echo "Build XGBoost4J-Spark with Spark ${args.spark_version}, CUDA ${args.cuda_version}"
def container_type = "jvm_gpu_build"
def docker_binary = "nvidia-docker"
def docker_args = "--build-arg CUDA_VERSION_ARG=${args.cuda_version}"
def arch_flag = ""
if (env.BRANCH_NAME != 'master' && !(env.BRANCH_NAME.startsWith('release'))) {
arch_flag = "-DGPU_COMPUTE_VER=75"
}
// Use only 4 CPU cores
def docker_extra_params = "CI_DOCKER_EXTRA_PARAMS_INIT='--cpuset-cpus 0-3'"
sh """
${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_jvm_packages.sh ${args.spark_version} -Duse.cuda=ON $arch_flag
"""
echo "Stashing XGBoost4J JAR with CUDA ${args.cuda_version} ..."
stash name: 'xgboost4j_jar_gpu', includes: "jvm-packages/xgboost4j-gpu/target/*.jar,jvm-packages/xgboost4j-spark-gpu/target/*.jar"
deleteDir()
}
}
@@ -244,7 +267,7 @@ def BuildJVMPackages(args) {
${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_jvm_packages.sh ${args.spark_version}
"""
echo 'Stashing XGBoost4J JAR...'
stash name: 'xgboost4j_jar', includes: 'jvm-packages/xgboost4j/target/*.jar,jvm-packages/xgboost4j-spark/target/*.jar,jvm-packages/xgboost4j-example/target/*.jar'
stash name: 'xgboost4j_jar', includes: "jvm-packages/xgboost4j/target/*.jar,jvm-packages/xgboost4j-spark/target/*.jar,jvm-packages/xgboost4j-example/target/*.jar"
deleteDir()
}
}
@@ -258,16 +281,19 @@ def BuildJVMDoc() {
sh """
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/build_jvm_doc.sh ${BRANCH_NAME}
"""
if (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release')) {
echo 'Uploading doc...'
s3Upload file: "jvm-packages/${BRANCH_NAME}.tar.bz2", bucket: 'xgboost-docs', acl: 'PublicRead', path: "${BRANCH_NAME}.tar.bz2"
}
deleteDir()
}
}
def TestPythonCPU() {
node('linux && cpu') {
unstash name: 'xgboost_whl_cuda9'
unstash name: "xgboost_whl_cuda${ref_cuda_ver}"
unstash name: 'srcs'
unstash name: 'xgboost_cli'
echo "Test Python CPU"
def container_type = "cpu"
def docker_binary = "docker"
@@ -279,65 +305,51 @@ def TestPythonCPU() {
}
def TestPythonGPU(args) {
nodeReq = (args.multi_gpu) ? 'linux && mgpu' : 'linux && gpu'
def nodeReq = (args.multi_gpu) ? 'linux && mgpu' : 'linux && gpu'
def artifact_cuda_version = (args.artifact_cuda_version) ?: ref_cuda_ver
node(nodeReq) {
unstash name: 'xgboost_whl_cuda9'
unstash name: "xgboost_whl_cuda${artifact_cuda_version}"
unstash name: "xgboost_cpp_tests_cuda${artifact_cuda_version}"
unstash name: 'srcs'
echo "Test Python GPU: CUDA ${args.cuda_version}"
echo "Test Python GPU: CUDA ${args.host_cuda_version}"
def container_type = "gpu"
def docker_binary = "nvidia-docker"
def docker_args = "--build-arg CUDA_VERSION=${args.cuda_version}"
if (args.multi_gpu) {
echo "Using multiple GPUs"
sh """
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_python.sh mgpu
"""
} else {
echo "Using a single GPU"
sh """
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_python.sh gpu
"""
def docker_args = "--build-arg CUDA_VERSION_ARG=${args.host_cuda_version}"
def mgpu_indicator = (args.multi_gpu) ? 'mgpu' : 'gpu'
// Allocate extra space in /dev/shm to enable NCCL
def docker_extra_params = (args.multi_gpu) ? "CI_DOCKER_EXTRA_PARAMS_INIT='--shm-size=4g'" : ''
sh "${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_python.sh ${mgpu_indicator}"
if (args.test_rmm) {
sh "rm -rfv build/ python-package/dist/"
unstash name: "xgboost_whl_rmm_cuda${args.host_cuda_version}"
unstash name: "xgboost_cpp_tests_rmm_cuda${args.host_cuda_version}"
sh "${docker_extra_params} ${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/test_python.sh ${mgpu_indicator} --use-rmm-pool"
}
// For CUDA 10.0 target, run cuDF tests too
if (args.cuda_version == '10.0') {
echo "Running tests with cuDF..."
sh """
${dockerRun} cudf ${docker_binary} ${docker_args} tests/ci_build/test_python.sh cudf
"""
}
deleteDir()
}
}
def TestCppRabit() {
node(nodeReq) {
unstash name: 'xgboost_rabit_tests'
unstash name: 'srcs'
echo "Test C++, rabit mock on"
def container_type = "cpu"
def docker_binary = "docker"
sh """
${dockerRun} ${container_type} ${docker_binary} tests/ci_build/runxgb.sh xgboost tests/ci_build/approx.conf.in
"""
deleteDir()
}
}
def TestCppGPU(args) {
nodeReq = (args.multi_gpu) ? 'linux && mgpu' : 'linux && gpu'
def nodeReq = 'linux && mgpu'
def artifact_cuda_version = (args.artifact_cuda_version) ?: ref_cuda_ver
node(nodeReq) {
unstash name: 'xgboost_cpp_tests'
unstash name: "xgboost_cpp_tests_cuda${artifact_cuda_version}"
unstash name: 'srcs'
echo "Test C++, CUDA ${args.cuda_version}"
echo "Test C++, CUDA ${args.host_cuda_version}"
def container_type = "gpu"
def docker_binary = "nvidia-docker"
def docker_args = "--build-arg CUDA_VERSION=${args.cuda_version}"
if (args.multi_gpu) {
echo "Using multiple GPUs"
sh "${dockerRun} ${container_type} ${docker_binary} ${docker_args} build/testxgboost --gtest_filter=*.MGPU_*"
} else {
echo "Using a single GPU"
sh "${dockerRun} ${container_type} ${docker_binary} ${docker_args} build/testxgboost --gtest_filter=-*.MGPU_*"
def docker_args = "--build-arg CUDA_VERSION_ARG=${args.host_cuda_version}"
sh "${dockerRun} ${container_type} ${docker_binary} ${docker_args} build/testxgboost"
if (args.test_rmm) {
sh "rm -rfv build/"
unstash name: "xgboost_cpp_tests_rmm_cuda${args.host_cuda_version}"
echo "Test C++, CUDA ${args.host_cuda_version} with RMM"
container_type = "rmm"
docker_binary = "nvidia-docker"
docker_args = "--build-arg CUDA_VERSION_ARG=${args.host_cuda_version}"
sh """
${dockerRun} ${container_type} ${docker_binary} ${docker_args} bash -c "source activate gpu_test && build/testxgboost --use-rmm-pool --gtest_filter=-*DeathTest.*"
"""
}
deleteDir()
}
@@ -365,17 +377,15 @@ def CrossTestJVMwithJDK(args) {
}
}
def TestR(args) {
def DeployJVMPackages(args) {
node('linux && cpu') {
unstash name: 'srcs'
echo "Test R package"
def container_type = "rproject"
def docker_binary = "docker"
def use_r35_flag = (args.use_r35) ? "1" : "0"
def docker_args = "--build-arg USE_R35=${use_r35_flag}"
if (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release')) {
echo 'Deploying to xgboost-maven-repo S3 repo...'
sh """
${dockerRun} ${container_type} ${docker_binary} ${docker_args} tests/ci_build/build_test_rpkg.sh || tests/ci_build/print_r_stacktrace.sh
${dockerRun} jvm_gpu_build docker --build-arg CUDA_VERSION_ARG=10.0 tests/ci_build/deploy_jvm_packages.sh ${args.spark_version}
"""
}
deleteDir()
}
}

View File

@@ -10,17 +10,29 @@ def commit_id // necessary to pass a variable from one stage to another
pipeline {
agent none
// Setup common job properties
options {
timestamps()
timeout(time: 240, unit: 'MINUTES')
buildDiscarder(logRotator(numToKeepStr: '10'))
preserveStashes()
}
// Build stages
stages {
stage('Jenkins Win64: Get sources') {
agent { label 'win64 && build' }
stage('Jenkins Win64: Initialize') {
agent { label 'job_initializer' }
steps {
script {
def buildNumber = env.BUILD_NUMBER as int
if (buildNumber > 1) milestone(buildNumber - 1)
milestone(buildNumber)
checkoutSrcs()
commit_id = "${GIT_COMMIT}"
}
sh 'python3 tests/jenkins_get_approval.py'
stash name: 'srcs'
milestone ordinal: 1
}
}
stage('Jenkins Win64: Build') {
@@ -28,10 +40,9 @@ pipeline {
steps {
script {
parallel ([
'build-win64-cuda9.0': { BuildWin64() }
'build-win64-cuda10.1': { BuildWin64() }
])
}
milestone ordinal: 2
}
}
stage('Jenkins Win64: Test') {
@@ -39,13 +50,9 @@ pipeline {
steps {
script {
parallel ([
'test-win64-cpu': { TestWin64CPU() },
'test-win64-gpu-cuda9.0': { TestWin64GPU(cuda_target: 'cuda9') },
'test-win64-gpu-cuda10.0': { TestWin64GPU(cuda_target: 'cuda10_0') },
'test-win64-gpu-cuda10.1': { TestWin64GPU(cuda_target: 'cuda10_1') }
'test-win64-cuda10.1': { TestWin64() },
])
}
milestone ordinal: 3
}
}
}
@@ -67,14 +74,18 @@ def checkoutSrcs() {
}
def BuildWin64() {
node('win64 && build') {
node('win64 && cuda10_unified') {
unstash name: 'srcs'
echo "Building XGBoost for Windows AMD64 target..."
bat "nvcc --version"
def arch_flag = ""
if (env.BRANCH_NAME != 'master' && !(env.BRANCH_NAME.startsWith('release'))) {
arch_flag = "-DGPU_COMPUTE_VER=75"
}
bat """
mkdir build
cd build
cmake .. -G"Visual Studio 15 2017 Win64" -DUSE_CUDA=ON -DCMAKE_VERBOSE_MAKEFILE=ON -DGOOGLE_TEST=ON -DUSE_DMLC_GTEST=ON
cmake .. -G"Visual Studio 15 2017 Win64" -DUSE_CUDA=ON -DCMAKE_VERBOSE_MAKEFILE=ON -DGOOGLE_TEST=ON -DUSE_DMLC_GTEST=ON ${arch_flag} -DCMAKE_UNITY_BUILD=ON
"""
bat """
cd build
@@ -92,50 +103,41 @@ def BuildWin64() {
"""
echo 'Stashing Python wheel...'
stash name: 'xgboost_whl', includes: 'python-package/dist/*.whl'
if (env.BRANCH_NAME == 'master' || env.BRANCH_NAME.startsWith('release')) {
echo 'Uploading Python wheel...'
path = ("${BRANCH_NAME}" == 'master') ? '' : "${BRANCH_NAME}/"
s3Upload bucket: 'xgboost-nightly-builds', path: path, acl: 'PublicRead', workingDir: 'python-package/dist', includePathPattern:'**/*.whl'
}
echo 'Stashing C++ test executable (testxgboost)...'
stash name: 'xgboost_cpp_tests', includes: 'build/testxgboost.exe'
stash name: 'xgboost_cli', includes: 'xgboost.exe'
deleteDir()
}
}
def TestWin64CPU() {
node('win64 && cpu') {
unstash name: 'srcs'
unstash name: 'xgboost_whl'
echo "Test Win64 CPU"
echo "Installing Python wheel..."
bat "conda activate && (python -m pip uninstall -y xgboost || cd .)"
bat """
conda activate && for /R %%i in (python-package\\dist\\*.whl) DO python -m pip install "%%i"
"""
echo "Running Python tests..."
bat "conda activate && python -m pytest -v -s --fulltrace tests\\python"
bat "conda activate && python -m pip uninstall -y xgboost"
deleteDir()
}
}
def TestWin64GPU(args) {
node("win64 && gpu && ${args.cuda_target}") {
def TestWin64() {
node('win64 && cuda10_unified') {
unstash name: 'srcs'
unstash name: 'xgboost_whl'
unstash name: 'xgboost_cli'
unstash name: 'xgboost_cpp_tests'
echo "Test Win64 GPU (${args.cuda_target})"
echo "Test Win64"
bat "nvcc --version"
echo "Running C++ tests..."
bat "build\\testxgboost.exe"
echo "Installing Python dependencies..."
def env_name = 'win64_' + UUID.randomUUID().toString().replaceAll('-', '')
bat "conda env create -n ${env_name} --file=tests/ci_build/conda_env/win64_test.yml"
echo "Installing Python wheel..."
bat "conda activate && (python -m pip uninstall -y xgboost || cd .)"
bat """
conda activate && for /R %%i in (python-package\\dist\\*.whl) DO python -m pip install "%%i"
conda activate ${env_name} && for /R %%i in (python-package\\dist\\*.whl) DO python -m pip install "%%i"
"""
echo "Running Python tests..."
bat "conda activate ${env_name} && python -m pytest -v -s -rxXs --fulltrace tests\\python"
bat """
conda activate && python -m pytest -v -s --fulltrace -m "(not slow) and (not mgpu)" tests\\python-gpu
conda activate ${env_name} && python -m pytest -v -s -rxXs --fulltrace -m "(not slow) and (not mgpu)" tests\\python-gpu
"""
bat "conda activate && python -m pip uninstall -y xgboost"
bat "conda env remove --name ${env_name}"
deleteDir()
}
}

150
Makefile
View File

@@ -1,11 +1,3 @@
ifndef config
ifneq ("$(wildcard ./config.mk)","")
config = config.mk
else
config = make/config.mk
endif
endif
ifndef DMLC_CORE
DMLC_CORE = dmlc-core
endif
@@ -30,16 +22,6 @@ ifndef MAKE_OK
endif
$(warning MAKE [$(MAKE)] - $(if $(MAKE_OK),checked OK,PROBLEM))
ifeq ($(OS), Windows_NT)
UNAME="Windows"
else
UNAME=$(shell uname)
endif
include $(config)
ifeq ($(USE_OPENMP), 0)
export NO_OPENMP = 1
endif
include $(DMLC_CORE)/make/dmlc.mk
# set compiler defaults for OSX versus *nix
@@ -62,75 +44,21 @@ export CXX = g++
endif
endif
export LDFLAGS= -pthread -lm $(ADD_LDFLAGS) $(DMLC_LDFLAGS)
export CFLAGS= -DDMLC_LOG_CUSTOMIZE=1 -std=c++11 -Wall -Wno-unknown-pragmas -Iinclude $(ADD_CFLAGS)
export CFLAGS= -DDMLC_LOG_CUSTOMIZE=1 -std=c++14 -Wall -Wno-unknown-pragmas -Iinclude $(ADD_CFLAGS)
CFLAGS += -I$(DMLC_CORE)/include -I$(RABIT)/include -I$(GTEST_PATH)/include
#java include path
export JAVAINCFLAGS = -I${JAVA_HOME}/include -I./java
ifeq ($(TEST_COVER), 1)
CFLAGS += -g -O0 -fprofile-arcs -ftest-coverage
else
CFLAGS += -O3 -funroll-loops
ifeq ($(USE_SSE), 1)
CFLAGS += -msse2
endif
endif
ifndef LINT_LANG
LINT_LANG= "all"
endif
ifeq ($(UNAME), Windows)
XGBOOST_DYLIB = lib/xgboost.dll
JAVAINCFLAGS += -I${JAVA_HOME}/include/win32
else
ifeq ($(UNAME), Darwin)
XGBOOST_DYLIB = lib/libxgboost.dylib
CFLAGS += -fPIC
else
XGBOOST_DYLIB = lib/libxgboost.so
CFLAGS += -fPIC
endif
endif
ifeq ($(UNAME), Linux)
LDFLAGS += -lrt
JAVAINCFLAGS += -I${JAVA_HOME}/include/linux
endif
ifeq ($(UNAME), Darwin)
JAVAINCFLAGS += -I${JAVA_HOME}/include/darwin
endif
OPENMP_FLAGS =
ifeq ($(USE_OPENMP), 1)
OPENMP_FLAGS = -fopenmp
else
OPENMP_FLAGS = -DDISABLE_OPENMP
endif
CFLAGS += $(OPENMP_FLAGS)
# specify tensor path
.PHONY: clean all lint clean_all doxygen rcpplint pypack Rpack Rbuild Rcheck java pylint
all: lib/libxgboost.a $(XGBOOST_DYLIB) xgboost
$(DMLC_CORE)/libdmlc.a: $(wildcard $(DMLC_CORE)/src/*.cc $(DMLC_CORE)/src/*/*.cc)
+ cd $(DMLC_CORE); "$(MAKE)" libdmlc.a config=$(ROOTDIR)/$(config); cd $(ROOTDIR)
$(RABIT)/lib/$(LIB_RABIT): $(wildcard $(RABIT)/src/*.cc)
+ cd $(RABIT); "$(MAKE)" lib/$(LIB_RABIT) USE_SSE=$(USE_SSE); cd $(ROOTDIR)
jvm: jvm-packages/lib/libxgboost4j.so
SRC = $(wildcard src/*.cc src/*/*.cc)
ALL_OBJ = $(patsubst src/%.cc, build/%.o, $(SRC))
AMALGA_OBJ = amalgamation/xgboost-all0.o
LIB_DEP = $(DMLC_CORE)/libdmlc.a $(RABIT)/lib/$(LIB_RABIT)
ALL_DEP = $(filter-out build/cli_main.o, $(ALL_OBJ)) $(LIB_DEP)
CLI_OBJ = build/cli_main.o
include tests/cpp/xgboost_test.mk
.PHONY: clean all lint clean_all doxygen rcpplint pypack Rpack Rbuild Rcheck
build/%.o: src/%.cc
@mkdir -p $(@D)
@@ -141,27 +69,6 @@ build/%.o: src/%.cc
amalgamation/xgboost-all0.o: amalgamation/xgboost-all0.cc
$(CXX) -c $(CFLAGS) $< -o $@
# Equivalent to lib/libxgboost_all.so
lib/libxgboost_all.so: $(AMALGA_OBJ) $(LIB_DEP)
@mkdir -p $(@D)
$(CXX) $(CFLAGS) -shared -o $@ $(filter %.o %.a, $^) $(LDFLAGS)
lib/libxgboost.a: $(ALL_DEP)
@mkdir -p $(@D)
ar crv $@ $(filter %.o, $?)
lib/xgboost.dll lib/libxgboost.so lib/libxgboost.dylib: $(ALL_DEP)
@mkdir -p $(@D)
$(CXX) $(CFLAGS) -shared -o $@ $(filter %.o %a, $^) $(LDFLAGS)
jvm-packages/lib/libxgboost4j.so: jvm-packages/xgboost4j/src/native/xgboost4j.cpp $(ALL_DEP)
@mkdir -p $(@D)
$(CXX) $(CFLAGS) $(JAVAINCFLAGS) -shared -o $@ $(filter %.cpp %.o %.a, $^) $(LDFLAGS)
xgboost: $(CLI_OBJ) $(ALL_DEP)
$(CXX) $(CFLAGS) -o $@ $(filter %.o %.a, $^) $(LDFLAGS)
rcpplint:
python3 dmlc-core/scripts/lint.py xgboost ${LINT_LANG} R-package/src
@@ -172,16 +79,6 @@ lint: rcpplint
python-package/xgboost/src --pylint-rc ${PWD}/python-package/.pylintrc xgboost \
${LINT_LANG} include src python-package
pylint:
flake8 --ignore E501 python-package
flake8 --ignore E501 tests/python
test: $(ALL_TEST)
$(ALL_TEST)
check: test
./tests/cpp/xgboost_test
ifeq ($(TEST_COVER), 1)
cover: check
@- $(foreach COV_OBJ, $(COVER_OBJ), \
@@ -202,38 +99,9 @@ clean_all: clean
cd $(DMLC_CORE); "$(MAKE)" clean; cd $(ROOTDIR)
cd $(RABIT); "$(MAKE)" clean; cd $(ROOTDIR)
doxygen:
doxygen doc/Doxyfile
# create standalone python tar file.
pypack: ${XGBOOST_DYLIB}
cp ${XGBOOST_DYLIB} python-package/xgboost
cd python-package; tar cf xgboost.tar xgboost; cd ..
# create pip source dist (sdist) pack for PyPI
pippack: clean_all
rm -rf xgboost-python
# remove symlinked directories in python-package/xgboost
rm -rf python-package/xgboost/lib
rm -rf python-package/xgboost/dmlc-core
rm -rf python-package/xgboost/include
rm -rf python-package/xgboost/make
rm -rf python-package/xgboost/rabit
rm -rf python-package/xgboost/src
cp -r python-package xgboost-python
cp -r CMakeLists.txt xgboost-python/xgboost/
cp -r cmake xgboost-python/xgboost/
cp -r plugin xgboost-python/xgboost/
cp -r make xgboost-python/xgboost/
cp -r src xgboost-python/xgboost/
cp -r tests xgboost-python/xgboost/
cp -r include xgboost-python/xgboost/
cp -r dmlc-core xgboost-python/xgboost/
cp -r rabit xgboost-python/xgboost/
# Use setup_pip.py instead of setup.py
mv xgboost-python/setup_pip.py xgboost-python/setup.py
# Build sdist tarball
cd xgboost-python; python setup.py sdist; mv dist/*.tar.gz ..; cd ..
cd python-package; python setup.py sdist; mv dist/*.tar.gz ..; cd ..
# Script to make a clean installable R package.
Rpack: clean_all
@@ -254,9 +122,9 @@ Rpack: clean_all
cp -r dmlc-core/include xgboost/src/dmlc-core/include
cp -r dmlc-core/src xgboost/src/dmlc-core/src
cp ./LICENSE xgboost
# Modify PKGROOT in Makevars.in
# Modify PKGROOT in Makevars.in
cat R-package/src/Makevars.in|sed '2s/.*/PKGROOT=./' > xgboost/src/Makevars.in
# Configure Makevars.win (Windows-specific Makevars, likely using MinGW)
# Configure Makevars.win (Windows-specific Makevars, likely using MinGW)
cp xgboost/src/Makevars.in xgboost/src/Makevars.win
cat xgboost/src/Makevars.in| sed '3s/.*/ENABLE_STD_THREAD=0/' > xgboost/src/Makevars.win
sed -i -e 's/@OPENMP_CXXFLAGS@/$$\(SHLIB_OPENMP_CXXFLAGS\)/g' xgboost/src/Makevars.win
@@ -266,14 +134,18 @@ Rpack: clean_all
sed -i -e 's/@OPENMP_LIB@//g' xgboost/src/Makevars.win
rm -f xgboost/src/Makevars.win-e # OSX sed create this extra file; remove it
bash R-package/remove_warning_suppression_pragma.sh
bash xgboost/remove_warning_suppression_pragma.sh
rm xgboost/remove_warning_suppression_pragma.sh
rm -rfv xgboost/tests/helper_scripts/
R ?= R
Rbuild: Rpack
R CMD build --no-build-vignettes xgboost
$(R) CMD build xgboost
rm -rf xgboost
Rcheck: Rbuild
R CMD check xgboost*.tar.gz
$(R) CMD check --as-cran xgboost*.tar.gz
-include build/*.d
-include build/*/*.d

689
NEWS.md
View File

@@ -3,6 +3,695 @@ XGBoost Change Log
This file records the changes in xgboost library in reverse chronological order.
## v1.2.0 (2020.08.22)
### XGBoost4J-Spark now supports the GPU algorithm (#5171)
* Now XGBoost4J-Spark is able to leverage NVIDIA GPU hardware to speed up training.
* There is on-going work for accelerating the rest of the data pipeline with NVIDIA GPUs (#5950, #5972).
### XGBoost now supports CUDA 11 (#5808)
* It is now possible to build XGBoost with CUDA 11. Note that we do not yet distribute pre-built binaries built with CUDA 11; all current distributions use CUDA 10.0.
### Better guidance for persisting XGBoost models in an R environment (#5940, #5964)
* Users are strongly encouraged to use `xgb.save()` and `xgb.save.raw()` instead of `saveRDS()`. This is so that the persisted models can be accessed with future releases of XGBoost.
* The previous release (1.1.0) had problems loading models that were saved with `saveRDS()`. This release adds a compatibility layer to restore access to the old RDS files. Note that this is meant to be a temporary measure; users are advised to stop using `saveRDS()` and migrate to `xgb.save()` and `xgb.save.raw()`.
### New objectives and metrics
* The pseudo-Huber loss `reg:pseudohubererror` is added (#5647). The corresponding metric is `mphe`. Right now, the slope is hard-coded to 1.
* The Accelerated Failure Time objective for survival analysis (`survival:aft`) is now accelerated on GPUs (#5714, #5716). The survival metrics `aft-nloglik` and `interval-regression-accuracy` are also accelerated on GPUs.
### Improved integration with scikit-learn
* Added `n_features_in_` attribute to the scikit-learn interface to store the number of features used (#5780). This is useful for integrating with some scikit-learn features such as `StackingClassifier`. See [this link](https://scikit-learn-enhancement-proposals.readthedocs.io/en/latest/slep010/proposal.html) for more details.
* `XGBoostError` now inherits `ValueError`, which conforms scikit-learn's exception requirement (#5696).
### Improved integration with Dask
* The XGBoost Dask API now exposes an asynchronous interface (#5862). See [the document](https://xgboost.readthedocs.io/en/latest/tutorials/dask.html#working-with-asyncio) for details.
* Zero-copy ingestion of GPU arrays via `DaskDeviceQuantileDMatrix` (#5623, #5799, #5800, #5803, #5837, #5874, #5901): Previously, the Dask interface had to make 2 data copies: one for concatenating the Dask partition/block into a single block and another for internal representation. To save memory, we introduce `DaskDeviceQuantileDMatrix`. As long as Dask partitions are resident in the GPU memory, `DaskDeviceQuantileDMatrix` is able to ingest them directly without making copies. This matrix type wraps `DeviceQuantileDMatrix`.
* The prediction function now returns GPU Series type if the input is from Dask-cuDF (#5710). This is to preserve the input data type.
### Robust handling of external data types (#5689, #5893)
- As we support more and more external data types, the handling logic has proliferated all over the code base and became hard to keep track. It also became unclear how missing values and threads are handled. We refactored the Python package code to collect all data handling logic to a central location, and now we have an explicit list of of all supported data types.
### Improvements in GPU-side data matrix (`DeviceQuantileDMatrix`)
* The GPU-side data matrix now implements its own quantile sketching logic, so that data don't have to be transported back to the main memory (#5700, #5747, #5760, #5846, #5870, #5898). The GK sketching algorithm is also now better documented.
- Now we can load extremely sparse dataset like URL, although performance is still sub-optimal.
* The GPU-side data matrix now exposes an iterative interface (#5783), so that users are able to construct a matrix from a data iterator. See the [Python demo](https://github.com/dmlc/xgboost/blob/release_1.2.0/demo/guide-python/data_iterator.py).
### New language binding: Swift (#5728)
* Visit https://github.com/kongzii/SwiftXGBoost for more details.
### Robust model serialization with JSON (#5772, #5804, #5831, #5857, #5934)
* We continue efforts from the 1.0.0 release to adopt JSON as the format to save and load models robustly.
* JSON model IO is significantly faster and produces smaller model files.
* Round-trip reproducibility is guaranteed, via the introduction of an efficient float-to-string conversion algorithm known as [the Ryū algorithm](https://dl.acm.org/doi/10.1145/3192366.3192369). The conversion is locale-independent, producing consistent numeric representation regardless of the locale setting of the user's machine.
* We fixed an issue in loading large JSON files to memory.
* It is now possible to load a JSON file from a remote source such as S3.
### Performance improvements
* CPU hist tree method optimization
- Skip missing lookup in hist row partitioning if data is dense. (#5644)
- Specialize training procedures for CPU hist tree method on distributed environment. (#5557)
- Add single point histogram for CPU hist. Previously gradient histogram for CPU hist is hard coded to be 64 bit, now users can specify the parameter `single_precision_histogram` to use 32 bit histogram instead for faster training performance. (#5624, #5811)
* GPU hist tree method optimization
- Removed some unnecessary synchronizations and better memory allocation pattern. (#5707)
- Optimize GPU Hist for wide dataset. Previously for wide dataset the atomic operation is performed on global memory, now it can run on shared memory for faster histogram building. But there's a known small regression on GeForce cards with dense data. (#5795, #5926, #5948, #5631)
### API additions
* Support passing fmap to importance plot (#5719). Now importance plot can show actual names of features instead of default ones.
* Support 64bit seed. (#5643)
* A new C API `XGBoosterGetNumFeature` is added for getting number of features in booster (#5856).
* Feature names and feature types are now stored in C++ core and saved in binary DMatrix (#5858).
### Breaking: The `predict()` method of `DaskXGBClassifier` now produces class predictions (#5986). Use `predict_proba()` to obtain probability predictions.
* Previously, `DaskXGBClassifier.predict()` produced probability predictions. This is inconsistent with the behavior of other scikit-learn classifiers, where `predict()` returns class predictions. We make a breaking change in 1.2.0 release so that `DaskXGBClassifier.predict()` now correctly produces class predictions and thus behave like other scikit-learn classifiers. Furthermore, we introduce the `predict_proba()` method for obtaining probability predictions, again to be in line with other scikit-learn classifiers.
### Breaking: Custom evaluation metric now receives raw prediction (#5954)
* Previously, the custom evaluation metric received a transformed prediction result when used with a classifier. Now the custom metric will receive a raw (untransformed) prediction and will need to transform the prediction itself. See [demo/guide-python/custom\_softmax.py](https://github.com/dmlc/xgboost/blob/release_1.2.0/demo/guide-python/custom_softmax.py) for an example.
* This change is to make the custom metric behave consistently with the custom objective, which already receives raw prediction (#5564).
### Breaking: XGBoost4J-Spark now requires Spark 3.0 and Scala 2.12 (#5836, #5890)
* Starting with version 3.0, Spark can manage GPU resources and allocate them among executors.
* Spark 3.0 dropped support for Scala 2.11 and now only supports Scala 2.12. Thus, XGBoost4J-Spark also only supports Scala 2.12.
### Breaking: XGBoost Python package now requires Python 3.6 and later (#5715)
* Python 3.6 has many useful features such as f-strings.
### Breaking: XGBoost now adopts the C++14 standard (#5664)
* Make sure to use a sufficiently modern C++ compiler that supports C++14, such as Visual Studio 2017, GCC 5.0+, and Clang 3.4+.
### Bug-fixes
* Fix a data race in the prediction function (#5853). As a byproduct, the prediction function now uses a thread-local data store and became thread-safe.
* Restore capability to run prediction when the test input has fewer features than the training data (#5955). This capability is necessary to support predicting with LIBSVM inputs. The previous release (1.1) had broken this capability, so we restore it in this version with better tests.
* Fix OpenMP build with CMake for R package, to support CMake 3.13 (#5895).
* Fix Windows 2016 build (#5902, #5918).
* Fix edge cases in scikit-learn interface with Pandas input by disabling feature validation. (#5953)
* [R] Enable weighted learning to rank (#5945)
* [R] Fix early stopping with custom objective (#5923)
* Fix NDK Build (#5886)
* Add missing explicit template specializations for greater portability (#5921)
* Handle empty rows in data iterators correctly (#5929). This bug affects file loader and JVM data frames.
* Fix `IsDense` (#5702)
* [jvm-packages] Fix wrong method name `setAllowZeroForMissingValue` (#5740)
* Fix shape inference for Dask predict (#5989)
### Usability Improvements, Documentation
* [Doc] Document that CUDA 10.0 is required (#5872)
* Refactored command line interface (CLI). Now CLI is able to handle user errors and output basic document. (#5574)
* Better error handling in Python: use `raise from` syntax to preserve full stacktrace (#5787).
* The JSON model dump now has a formal schema (#5660, #5818). The benefit is to prevent `dump_model()` function from breaking. See [this document](https://xgboost.readthedocs.io/en/latest/tutorials/saving_model.html#difference-between-saving-model-and-dumping-model) to understand the difference between saving and dumping models.
* Add a reference to the GPU external memory paper (#5684)
* Document more objective parameters in the R package (#5682)
* Document the existence of pre-built binary wheels for MacOS (#5711)
* Remove `max.depth` in the R gblinear example. (#5753)
* Added conda environment file for building docs (#5773)
* Mention dask blog post in the doc, which introduces using Dask with GPU and some internal workings. (#5789)
* Fix rendering of Markdown docs (#5821)
* Document new objectives and metrics available on GPUs (#5909)
* Better message when no GPU is found. (#5594)
* Remove the use of `silent` parameter from R demos. (#5675)
* Don't use masked array in array interface. (#5730)
* Update affiliation of @terrytangyuan: Ant Financial -> Ant Group (#5827)
* Move dask tutorial closer other distributed tutorials (#5613)
* Update XGBoost + Dask overview documentation (#5961)
* Show `n_estimators` in the docstring of the scikit-learn interface (#6041)
* Fix a type in a doctring of the scikit-learn interface (#5980)
### Maintenance: testing, continuous integration, build system
* [CI] Remove CUDA 9.0 from CI (#5674, #5745)
* Require CUDA 10.0+ in CMake build (#5718)
* [R] Remove dependency on gendef for Visual Studio builds (fixes #5608) (#5764). This enables building XGBoost with GPU support with R 4.x.
* [R-package] Reduce duplication in configure.ac (#5693)
* Bump com.esotericsoftware to 4.0.2 (#5690)
* Migrate some tests from AppVeyor to GitHub Actions to speed up the tests. (#5911, #5917, #5919, #5922, #5928)
* Reduce cost of the Jenkins CI server (#5884, #5904, #5892). We now enforce a daily budget via an automated monitor. We also dramatically reduced the workload for the Windows platform, since the cloud VM cost is vastly greater for Windows.
* [R] Set up automated R linter (#5944)
* [R] replace uses of T and F with TRUE and FALSE (#5778)
* Update Docker container 'CPU' (#5956)
* Simplify CMake build with modern CMake techniques (#5871)
* Use `hypothesis` package for testing (#5759, #5835, #5849).
* Define `_CRT_SECURE_NO_WARNINGS` to remove unneeded warnings in MSVC (#5434)
* Run all Python demos in CI, to ensure that they don't break (#5651)
* Enhance nvtx support (#5636). Now we can use unified timer between CPU and GPU. Also CMake is able to find nvtx automatically.
* Speed up python test. (#5752)
* Add helper for generating batches of data. (#5756)
* Add c-api-demo to .gitignore (#5855)
* Add option to enable all compiler warnings in GCC/Clang (#5897)
* Make Python model compatibility test runnable locally (#5941)
* Add cupy to Windows CI (#5797)
* [CI] Fix cuDF install; merge 'gpu' and 'cudf' test suite (#5814)
* Update rabit submodule (#5680, #5876)
* Force colored output for Ninja build. (#5959)
* [CI] Assign larger /dev/shm to NCCL (#5966)
* Add missing Pytest marks to AsyncIO unit test (#5968)
* [CI] Use latest cuDF and dask-cudf (#6048)
* Add CMake flag to log C API invocations, to aid debugging (#5925)
* Fix a unit test on CLI, to handle RC versions (#6050)
* [CI] Use mgpu machine to run gpu hist unit tests (#6050)
* [CI] Build GPU-enabled JAR artifact and deploy to xgboost-maven-repo (#6050)
### Maintenance: Refactor code for legibility and maintainability
* Remove dead code in DMatrix initialization. (#5635)
* Catch dmlc error by ref. (#5678)
* Refactor the `gpu_hist` split evaluation in preparation for batched nodes enumeration. (#5610)
* Remove column major specialization. (#5755)
* Remove unused imports in Python (#5776)
* Avoid including `c_api.h` in header files. (#5782)
* Remove unweighted GK quantile, which is unused. (#5816)
* Add Python binding for rabit ops. (#5743)
* Implement `Empty` method for host device vector. (#5781)
* Remove print (#5867)
* Enforce tree order in JSON (#5974)
### Acknowledgement
**Contributors**: Nan Zhu (@CodingCat), @LionOrCatThatIsTheQuestion, Dmitry Mottl (@Mottl), Rory Mitchell (@RAMitchell), @ShvetsKS, Alex Wozniakowski (@a-wozniakowski), Alexander Gugel (@alexanderGugel), @anttisaukko, @boxdot, Andy Adinets (@canonizer), Ram Rachum (@cool-RR), Elliot Hershberg (@elliothershberg), Jason E. Aten, Ph.D. (@glycerine), Philip Hyunsu Cho (@hcho3), @jameskrach, James Lamb (@jameslamb), James Bourbeau (@jrbourbeau), Peter Jung (@kongzii), Lorenz Walthert (@lorenzwalthert), Oleksandr Kuvshynov (@okuvshynov), Rong Ou (@rongou), Shaochen Shi (@shishaochen), Yuan Tang (@terrytangyuan), Jiaming Yuan (@trivialfis), Bobby Wang (@wbo4958), Zhang Zhang (@zhangzhang10)
**Reviewers**: Nan Zhu (@CodingCat), @LionOrCatThatIsTheQuestion, Hao Yang (@QuantHao), Rory Mitchell (@RAMitchell), @ShvetsKS, Egor Smirnov (@SmirnovEgorRu), Alex Wozniakowski (@a-wozniakowski), Amit Kumar (@aktech), Avinash Barnwal (@avinashbarnwal), @boxdot, Andy Adinets (@canonizer), Chandra Shekhar Reddy (@chandrureddy), Ram Rachum (@cool-RR), Cristiano Goncalves (@cristianogoncalves), Elliot Hershberg (@elliothershberg), Jason E. Aten, Ph.D. (@glycerine), Philip Hyunsu Cho (@hcho3), Tong He (@hetong007), James Lamb (@jameslamb), James Bourbeau (@jrbourbeau), Lee Drake (@leedrake5), DougM (@mengdong), Oleksandr Kuvshynov (@okuvshynov), RongOu (@rongou), Shaochen Shi (@shishaochen), Xu Xiao (@sperlingxx), Yuan Tang (@terrytangyuan), Theodore Vasiloudis (@thvasilo), Jiaming Yuan (@trivialfis), Bobby Wang (@wbo4958), Zhang Zhang (@zhangzhang10)
## v1.1.1 (2020.06.06)
This patch release applies the following patches to 1.1.0 release:
* CPU performance improvement in the PyPI wheels (#5720)
* Fix loading old model (#5724)
* Install pkg-config file (#5744)
## v1.1.0 (2020.05.17)
### Better performance on multi-core CPUs (#5244, #5334, #5522)
* Poor performance scaling of the `hist` algorithm for multi-core CPUs has been under investigation (#3810). #5244 concludes the ongoing effort to improve performance scaling on multi-CPUs, in particular Intel CPUs. Roadmap: #5104
* #5334 makes steps toward reducing memory consumption for the `hist` tree method on CPU.
* #5522 optimizes random number generation for data sampling.
### Deterministic GPU algorithm for regression and classification (#5361)
* GPU algorithm for regression and classification tasks is now deterministic.
* Roadmap: #5023. Currently only single-GPU training is deterministic. Distributed training with multiple GPUs is not yet deterministic.
### Improve external memory support on GPUs (#5093, #5365)
* Starting from 1.0.0 release, we added support for external memory on GPUs to enable training with larger datasets. Gradient-based sampling (#5093) speeds up the external memory algorithm by intelligently sampling a subset of the training data to copy into the GPU memory. [Learn more about out-of-core GPU gradient boosting.](https://arxiv.org/abs/2005.09148)
* GPU-side data sketching now works with data from external memory (#5365).
### Parameter validation: detection of unused or incorrect parameters (#5477, #5569, #5508)
* Mis-spelled training parameter is a common user mistake. In previous versions of XGBoost, mis-spelled parameters were silently ignored. Starting with 1.0.0 release, XGBoost will produce a warning message if there is any unused training parameters. The 1.1.0 release makes parameter validation available to the scikit-learn interface (#5477) and the R binding (#5569).
### Thread-safe, in-place prediction method (#5389, #5512)
* Previously, the prediction method was not thread-safe (#5339). This release adds a new API function `inplace_predict()` that is thread-safe. It is now possible to serve concurrent requests for prediction using a shared model object.
* It is now possible to compute prediction in-place for selected data formats (`numpy.ndarray` / `scipy.sparse.csr_matrix` / `cupy.ndarray` / `cudf.DataFrame` / `pd.DataFrame`) without creating a `DMatrix` object.
### Addition of Accelerated Failure Time objective for survival analysis (#4763, #5473, #5486, #5552, #5553)
* Survival analysis (regression) models the time it takes for an event of interest to occur. The target label is potentially censored, i.e. the label is a range rather than a single number. We added a new objective `survival:aft` to support survival analysis. Also added is the new API to specify the ranged labels. Check out [the tutorial](https://xgboost.readthedocs.io/en/release_1.1.0/tutorials/aft_survival_analysis.html) and the [demos](https://github.com/dmlc/xgboost/tree/release_1.1.0/demo/aft_survival).
* GPU support is work in progress (#5714).
### Improved installation experience on Mac OSX (#5597, #5602, #5606, #5701)
* It only takes two commands to install the XGBoost Python package: `brew install libomp` followed by `pip install xgboost`. The installed XGBoost will use all CPU cores. Even better, starting with this release, we distribute pre-compiled binary wheels targeting Mac OSX. Now the install command `pip install xgboost` finishes instantly, as it no longer compiles the C++ source of XGBoost. The last three Mac versions (High Sierra, Mojave, Catalina) are supported.
* R package: the 1.1.0 release fixes the error `Initializing libomp.dylib, but found libomp.dylib already initialized` (#5701)
### Ranking metrics are now accelerated on GPUs (#5380, #5387, #5398)
### GPU-side data matrix to ingest data directly from other GPU libraries (#5420, #5465)
* Previously, data on GPU memory had to be copied back to the main memory before it could be used by XGBoost. Starting with 1.1.0 release, XGBoost provides a dedicated interface (`DeviceQuantileDMatrix`) so that it can ingest data from GPU memory directly. The result is that XGBoost interoperates better with GPU-accelerated data science libraries, such as cuDF, cuPy, and PyTorch.
* Set device in device dmatrix. (#5596)
### Robust model serialization with JSON (#5123, #5217)
* We continue efforts from the 1.0.0 release to adopt JSON as the format to save and load models robustly. Refer to the release note for 1.0.0 to learn more.
* It is now possible to store internal configuration of the trained model (`Booster`) object in R as a JSON string (#5123, #5217).
### Improved integration with Dask
* Pass through `verbose` parameter for dask fit (#5413)
* Use `DMLC_TASK_ID`. (#5415)
* Order the prediction result. (#5416)
* Honor `nthreads` from dask worker. (#5414)
* Enable grid searching with scikit-learn. (#5417)
* Check non-equal when setting threads. (#5421)
* Accept other inputs for prediction. (#5428)
* Fix missing value for scikit-learn interface. (#5435)
### XGBoost4J-Spark: Check number of columns in the data iterator (#5202, #5303)
* Before, the native layer in XGBoost did not know the number of columns (features) ahead of time and had to guess the number of columns by counting the feature index when ingesting data. This method has a failure more in distributed setting: if the training data is highly sparse, some features may be completely missing in one or more worker partitions. Thus, one or more workers may deduce an incorrect data shape, leading to crashes or silently wrong models.
* Enforce correct data shape by passing the number of columns explicitly from the JVM layer into the native layer.
### Major refactoring of the `DMatrix` class
* Continued from 1.0.0 release.
* Remove update prediction cache from predictors. (#5312)
* Predict on Ellpack. (#5327)
* Partial rewrite EllpackPage (#5352)
* Use ellpack for prediction only when sparsepage doesn't exist. (#5504)
* RFC: #4354, Roadmap: #5143
### Breaking: XGBoost Python package now requires Pip 19.0 and higher (#5589)
* Your Linux machine may have an old version of Pip and may attempt to install a source package, leading to long installation time. This is because we are now using `manylinux2010` tag in the binary wheel release. Ensure you have Pip 19.0 or newer by running `python3 -m pip -V` to check the version. Upgrade Pip with command
```
python3 -m pip install --upgrade pip
```
Upgrading to latest pip allows us to depend on newer versions of system libraries. [TensorFlow](https://www.tensorflow.org/install/pip) also requires Pip 19.0+.
### Breaking: GPU algorithm now requires CUDA 10.0 and higher (#5649)
* CUDA 10.0 is necessary to make the GPU algorithm deterministic (#5361).
### Breaking: `silent` parameter is now removed (#5476)
* Please use `verbosity` instead.
### Breaking: Set `output_margin` to True for custom objectives (#5564)
* Now both R and Python interface custom objectives get un-transformed (raw) prediction outputs.
### Breaking: `Makefile` is now removed. We use CMake exclusively to build XGBoost (#5513)
* Exception: the R package uses Autotools, as the CRAN ecosystem did not yet adopt CMake widely.
### Breaking: `distcol` updater is now removed (#5507)
* The `distcol` updater has been long broken, and currently we lack resources to implement a working implementation from scratch.
### Deprecation notices
* **Python 3.5**. This release is the last release to support Python 3.5. The following release (1.2.0) will require Python 3.6.
* **Scala 2.11**. Currently XGBoost4J supports Scala 2.11. However, if a future release of XGBoost adopts Spark 3, it will not support Scala 2.11, as Spark 3 requires Scala 2.12+. We do not yet know which XGBoost release will adopt Spark 3.
### Known limitations
* (Python package) When early stopping is activated with `early_stopping_rounds` at training time, the prediction method (`xgb.predict()`) behaves in a surprising way. If XGBoost runs for M rounds and chooses iteration N (N < M) as the best iteration, then the prediction method will use M trees by default. To use the best iteration (N trees), users will need to manually take the best iteration field `bst.best_iteration` and pass it as the `ntree_limit` argument to `xgb.predict()`. See #5209 and #4052 for additional context.
* GPU ranking objective is currently not deterministic (#5561).
* When training parameter `reg_lambda` is set to zero, some leaf nodes may be assigned a NaN value. (See [discussion](https://discuss.xgboost.ai/t/still-getting-unexplained-nans-new-replication-code/1383/9).) For now, please set `reg_lambda` to a nonzero value.
### Community and Governance
* The XGBoost Project Management Committee (PMC) is pleased to announce a new committer: Egor Smirnov (@SmirnovEgorRu). He has led a major initiative to improve the performance of XGBoost on multi-core CPUs.
### Bug-fixes
* Improved compatibility with scikit-learn (#5255, #5505, #5538)
* Remove f-string, since it's not supported by Python 3.5 (#5330). Note that Python 3.5 support is deprecated and schedule to be dropped in the upcoming release (1.2.0).
* Fix the pruner so that it doesn't prune the same branch twice (#5335)
* Enforce only major version in JSON model schema (#5336). Any major revision of the model schema would bump up the major version.
* Fix a small typo in sklearn.py that broke multiple eval metrics (#5341)
* Restore loading model from a memory buffer (#5360)
* Define lazy isinstance for Python compat (#5364)
* [R] fixed uses of `class()` (#5426)
* Force compressed buffer to be 4 bytes aligned, to keep cuda-memcheck happy (#5441)
* Remove warning for calling host function (`std::max`) on a GPU device (#5453)
* Fix uninitialized value bug in xgboost callback (#5463)
* Fix model dump in CLI (#5485)
* Fix out-of-bound array access in `WQSummary::SetPrune()` (#5493)
* Ensure that configured `dmlc/build_config.h` is picked up by Rabit and XGBoost, to fix build on Alpine (#5514)
* Fix a misspelled method, made in a git merge (#5509)
* Fix a bug in binary model serialization (#5532)
* Fix CLI model IO (#5535)
* Don't use `uint` for threads (#5542)
* Fix R interaction constraints to handle more than 100000 features (#5543)
* [jvm-packages] XGBoost Spark should deal with NaN when parsing evaluation output (#5546)
* GPU-side data sketching is now aware of query groups in learning-to-rank data (#5551)
* Fix DMatrix slicing for newly added fields (#5552)
* Fix configuration status with loading binary model (#5562)
* Fix build when OpenMP is disabled (#5566)
* R compatibility patches (#5577, #5600)
* gpu\_hist performance fixes (#5558)
* Don't set seed on CLI interface (#5563)
* [R] When serializing model, preserve model attributes related to early stopping (#5573)
* Avoid rabit calls in learner configuration (#5581)
* Hide C++ symbols in libxgboost.so when building Python wheel (#5590). This fixes apache/incubator-tvm#4953.
* Fix compilation on Mac OSX High Sierra (10.13) (#5597)
* Fix build on big endian CPUs (#5617)
* Resolve crash due to use of `vector<bool>::iterator` (#5642)
* Validation JSON model dump using JSON schema (#5660)
### Performance improvements
* Wide dataset quantile performance improvement (#5306)
* Reduce memory usage of GPU-side data sketching (#5407)
* Reduce span check overhead (#5464)
* Serialise booster after training to free up GPU memory (#5484)
* Use the maximum amount of GPU shared memory available to speed up the histogram kernel (#5491)
* Use non-synchronising scan in Thrust (#5560)
* Use `cudaDeviceGetAttribute()` instead of `cudaGetDeviceProperties()` for speed (#5570)
### API changes
* Support importing data from a Pandas SparseArray (#5431)
* `HostDeviceVector` (vector shared between CPU and GPU memory) now exposes `HostSpan` interface, to enable access on the CPU side with bound check (#5459)
* Accept other gradient types for `SplitEntry` (#5467)
### Usability Improvements, Documentation
* Add `JVM_CHECK_CALL` to prevent C++ exceptions from leaking into the JVM layer (#5199)
* Updated Windows build docs (#5283)
* Update affiliation of @hcho3 (#5292)
* Display Sponsor button, link to OpenCollective (#5325)
* Update docs for GPU external memory (#5332)
* Add link to GPU documentation (#5437)
* Small updates to GPU documentation (#5483)
* Edits on tutorial for XGBoost job on Kubernetes (#5487)
* Add reference to GPU external memory (#5490)
* Fix typos (#5346, #5371, #5384, #5399, #5482, #5515)
* Update Python doc (#5517)
* Add Neptune and Optuna to list of examples (#5528)
* Raise error if the number of data weights doesn't match the number of data sets (#5540)
* Add a note about GPU ranking (#5572)
* Clarify meaning of `training` parameter in the C API function `XGBoosterPredict()` (#5604)
* Better error handling for situations where existing trees cannot be modified (#5406, #5418). This feature is enabled when `process_type` is set to `update`.
### Maintenance: testing, continuous integration, build system
* Add C++ test coverage for data sketching (#5251)
* Ignore gdb\_history (#5257)
* Rewrite setup.py. (#5271, #5280)
* Use `scikit-learn` in extra dependencies (#5310)
* Add CMake option to build static library (#5397)
* [R] changed FindLibR to take advantage of CMake cache (#5427)
* [R] fixed inconsistency in R -e calls in FindLibR.cmake (#5438)
* Refactor tests with data generator (#5439)
* Resolve failing Travis CI (#5445)
* Update dmlc-core. (#5466)
* [CI] Use clang-tidy 10 (#5469)
* De-duplicate code for checking maximum number of nodes (#5497)
* [CI] Use Ubuntu 18.04 LTS in JVM CI, because 19.04 is EOL (#5537)
* [jvm-packages] [CI] Create a Maven repository to host SNAPSHOT JARs (#5533)
* [jvm-packages] [CI] Publish XGBoost4J JARs with Scala 2.11 and 2.12 (#5539)
* [CI] Use Vault repository to re-gain access to devtoolset-4 (#5589)
### Maintenance: Refactor code for legibility and maintainability
* Move prediction cache to Learner (#5220, #5302)
* Remove SimpleCSRSource (#5315)
* Refactor SparsePageSource, delete cache files after use (#5321)
* Remove unnecessary DMatrix methods (#5324)
* Split up `LearnerImpl` (#5350)
* Move segment sorter to common (#5378)
* Move thread local entry into Learner (#5396)
* Split up test helpers header (#5455)
* Requires setting leaf stat when expanding tree (#5501)
* Purge device\_helpers.cuh (#5534)
* Use thrust functions instead of custom functions (#5544)
### Acknowledgement
**Contributors**: Nan Zhu (@CodingCat), Rory Mitchell (@RAMitchell), @ShvetsKS, Egor Smirnov (@SmirnovEgorRu), Andrew Kane (@ankane), Avinash Barnwal (@avinashbarnwal), Bart Broere (@bartbroere), Andy Adinets (@canonizer), Chen Qin (@chenqin), Daiki Katsuragawa (@daikikatsuragawa), David Díaz Vico (@daviddiazvico), Darius Kharazi (@dkharazi), Darby Payne (@dpayne), Jason E. Aten, Ph.D. (@glycerine), Philip Hyunsu Cho (@hcho3), James Lamb (@jameslamb), Jan Borchmann (@jborchma), Kamil A. Kaczmarek (@kamil-kaczmarek), Melissa Kohl (@mjkohl32), Nicolas Scozzaro (@nscozzaro), Paul Kaefer (@paulkaefer), Rong Ou (@rongou), Samrat Pandiri (@samratp), Sriram Chandramouli (@sriramch), Yuan Tang (@terrytangyuan), Jiaming Yuan (@trivialfis), Liang-Chi Hsieh (@viirya), Bobby Wang (@wbo4958), Zhang Zhang (@zhangzhang10),
**Reviewers**: Nan Zhu (@CodingCat), @LeZhengThu, Rory Mitchell (@RAMitchell), @ShvetsKS, Egor Smirnov (@SmirnovEgorRu), Steve Bronder (@SteveBronder), Nikita Titov (@StrikerRUS), Andrew Kane (@ankane), Avinash Barnwal (@avinashbarnwal), @brydag, Andy Adinets (@canonizer), Chandra Shekhar Reddy (@chandrureddy), Chen Qin (@chenqin), Codecov (@codecov-io), David Díaz Vico (@daviddiazvico), Darby Payne (@dpayne), Jason E. Aten, Ph.D. (@glycerine), Philip Hyunsu Cho (@hcho3), James Lamb (@jameslamb), @johnny-cat, Mu Li (@mli), Mate Soos (@msoos), @rnyak, Rong Ou (@rongou), Sriram Chandramouli (@sriramch), Toby Dylan Hocking (@tdhock), Yuan Tang (@terrytangyuan), Oleksandr Pryimak (@trams), Jiaming Yuan (@trivialfis), Liang-Chi Hsieh (@viirya), Bobby Wang (@wbo4958),
## v1.0.2 (2020.03.03)
This patch release applies the following patches to 1.0.0 release:
* Fix a small typo in sklearn.py that broke multiple eval metrics (#5341)
* Restore loading model from buffer (#5360)
* Use type name for data type check (#5364)
## v1.0.1 (2020.02.21)
This release is identical to the 1.0.0 release, except that it fixes a small bug that rendered 1.0.0 incompatible with Python 3.5. See #5328.
## v1.0.0 (2020.02.19)
This release marks a major milestone for the XGBoost project.
### Apache-style governance, contribution policy, and semantic versioning (#4646, #4659)
* Starting with 1.0.0 release, the XGBoost Project is adopting Apache-style governance. The full community guideline is [available in the doc website](https://xgboost.readthedocs.io/en/release_1.0.0/contrib/community.html). Note that we now have Project Management Committee (PMC) who would steward the project on the long-term basis. The PMC is also entrusted to run and fund the project's continuous integration (CI) infrastructure (https://xgboost-ci.net).
* We also adopt the [semantic versioning](https://semver.org/). See [our release versioning policy](https://xgboost.readthedocs.io/en/release_1.0.0/contrib/release.html).
### Better performance scaling for multi-core CPUs (#4502, #4529, #4716, #4851, #5008, #5107, #5138, #5156)
* Poor performance scaling of the `hist` algorithm for multi-core CPUs has been under investigation (#3810). Previous effort #4529 was replaced with a series of pull requests (#5107, #5138, #5156) aimed at achieving the same performance benefits while keeping the C++ codebase legible. The latest performance benchmark results show [up to 5x speedup on Intel CPUs with many cores](https://github.com/dmlc/xgboost/pull/5156#issuecomment-580024413). Note: #5244, which concludes the effort, will become part of the upcoming release 1.1.0.
### Improved installation experience on Mac OSX (#4672, #5074, #5080, #5146, #5240)
* It used to be quite complicated to install XGBoost on Mac OSX. XGBoost uses OpenMP to distribute work among multiple CPU cores, and Mac's default C++ compiler (Apple Clang) does not come with OpenMP. Existing work-around (using another C++ compiler) was complex and prone to fail with cryptic diagnosis (#4933, #4949, #4969).
* Now it only takes two commands to install XGBoost: `brew install libomp` followed by `pip install xgboost`. The installed XGBoost will use all CPU cores.
* Even better, XGBoost is now available from Homebrew: `brew install xgboost`. See Homebrew/homebrew-core#50467.
* Previously, if you installed the XGBoost R package using the command `install.packages('xgboost')`, it could only use a single CPU core and you would experience slow training performance. With 1.0.0 release, the R package will use all CPU cores out of box.
### Distributed XGBoost now available on Kubernetes (#4621, #4939)
* Check out the [tutorial for setting up distributed XGBoost on a Kubernetes cluster](https://xgboost.readthedocs.io/en/release_1.0.0/tutorials/kubernetes.html).
### Ruby binding for XGBoost (#4856)
### New Native Dask interface for multi-GPU and multi-node scaling (#4473, #4507, #4617, #4819, #4907, #4914, #4941, #4942, #4951, #4973, #5048, #5077, #5144, #5270)
* XGBoost now integrates seamlessly with [Dask](https://dask.org/), a lightweight distributed framework for data processing. Together with the first-class support for cuDF data frames (see below), it is now easier than ever to create end-to-end data pipeline running on one or more NVIDIA GPUs.
* Multi-GPU training with Dask is now up to 20% faster than the previous release (#4914, #4951).
### First-class support for cuDF data frames and cuPy arrays (#4737, #4745, #4794, #4850, #4891, #4902, #4918, #4927, #4928, #5053, #5189, #5194, #5206, #5219, #5225)
* [cuDF](https://github.com/rapidsai/cudf) is a data frame library for loading and processing tabular data on NVIDIA GPUs. It provides a Pandas-like API.
* [cuPy](https://github.com/cupy/cupy) implements a NumPy-compatible multi-dimensional array on NVIDIA GPUs.
* Now users can keep the data on the GPU memory throughout the end-to-end data pipeline, obviating the need for copying data between the main memory and GPU memory.
* XGBoost can accept any data structure that exposes `__array_interface__` signature, opening way to support other columar formats that are compatible with Apache Arrow.
### [Feature interaction constraint](https://xgboost.readthedocs.io/en/release_1.0.0/tutorials/feature_interaction_constraint.html) is now available with `approx` and `gpu_hist` algorithms (#4534, #4587, #4596, #5034).
### Learning to rank is now GPU accelerated (#4873, #5004, #5129)
* Supported ranking objectives: NDGC, Map, Pairwise.
* [Up to 2x improved training performance on GPUs](https://devblogs.nvidia.com/learning-to-rank-with-xgboost-and-gpu/).
### Enable `gamma` parameter for GPU training (#4874, #4953)
* The `gamma` parameter specifies the minimum loss reduction required to add a new split in a tree. A larger value for `gamma` has the effect of pre-pruning the tree, by making harder to add splits.
### External memory for GPU training (#4486, #4526, #4747, #4833, #4879, #5014)
* It is now possible to use NVIDIA GPUs even when the size of training data exceeds the available GPU memory. Note that the external memory support for GPU is still experimental. #5093 will further improve performance and will become part of the upcoming release 1.1.0.
* RFC for enabling external memory with GPU algorithms: #4357
### Improve Scikit-Learn interface (#4558, #4842, #4929, #5049, #5151, #5130, #5227)
* Many users of XGBoost enjoy the convenience and breadth of Scikit-Learn ecosystem. In this release, we revise the Scikit-Learn API of XGBoost (`XGBRegressor`, `XGBClassifier`, and `XGBRanker`) to achieve feature parity with the traditional XGBoost interface (`xgboost.train()`).
* Insert check to validate data shapes.
* Produce an error message if `eval_set` is not a tuple. An error message is better than silently crashing.
* Allow using `numpy.RandomState` object.
* Add `n_jobs` as an alias of `nthread`.
* Roadmap: #5152
### XGBoost4J-Spark: Redesigning checkpointing mechanism
* RFC is available at #4786
* Clean up checkpoint file after a successful training job (#4754): The current implementation in XGBoost4J-Spark does not clean up the checkpoint file after a successful training job. If the user runs another job with the same checkpointing directory, she will get a wrong model because the second job will re-use the checkpoint file left over from the first job. To prevent this scenario, we propose to always clean up the checkpoint file after every successful training job.
* Avoid Multiple Jobs for Checkpointing (#5082): The current method for checkpoint is to collect the booster produced at the last iteration of each checkpoint internal to Driver and persist it in HDFS. The major issue with this approach is that it needs to re-perform the data preparation for training if the user did not choose to cache the training dataset. To avoid re-performing data prep, we build external-memory checkpointing in the XGBoost4J layer as well.
* Enable deterministic repartitioning when checkpoint is enabled (#4807): Distributed algorithm for gradient boosting assumes a fixed partition of the training data between multiple iterations. In previous versions, there was no guarantee that data partition would stay the same, especially when a worker goes down and some data had to recovered from previous checkpoint. In this release, we make data partition deterministic by using the data hash value of each data row in computing the partition.
### XGBoost4J-Spark: handle errors thrown by the native code (#4560)
* All core logic of XGBoost is written in C++, so XGBoost4J-Spark internally uses the C++ code via Java Native Interface (JNI). #4560 adds a proper error handling for any errors or exceptions arising from the C++ code, so that the XGBoost Spark application can be torn down in an orderly fashion.
### XGBoost4J-Spark: Refine method to count the number of alive cores (#4858)
* The `SparkParallelismTracker` class ensures that sufficient number of executor cores are alive. To that end, it is important to query the number of alive cores reliably.
### XGBoost4J: Add `BigDenseMatrix` to store more than `Integer.MAX_VALUE` elements (#4383)
### Robust model serialization with JSON (#4632, #4708, #4739, #4868, #4936, #4945, #4974, #5086, #5087, #5089, #5091, #5094, #5110, #5111, #5112, #5120, #5137, #5218, #5222, #5236, #5245, #5248, #5281)
* In this release, we introduce an experimental support of using [JSON](https://www.json.org/json-en.html) for serializing (saving/loading) XGBoost models and related hyperparameters for training. We would like to eventually replace the old binary format with JSON, since it is an open format and parsers are available in many programming languages and platforms. See [the documentation for model I/O using JSON](https://xgboost.readthedocs.io/en/release_1.0.0/tutorials/saving_model.html). #3980 explains why JSON was chosen over other alternatives.
* To maximize interoperability and compatibility of the serialized models, we now split serialization into two parts (#4855):
1. Model, e.g. decision trees and strictly related metadata like `num_features`.
2. Internal configuration, consisting of training parameters and other configurable parameters. For example, `max_delta_step`, `tree_method`, `objective`, `predictor`, `gpu_id`.
Previously, users often ran into issues where the model file produced by one machine could not load or run on another machine. For example, models trained using a machine with an NVIDIA GPU could not run on another machine without a GPU (#5291, #5234). The reason is that the old binary format saved some internal configuration that were not universally applicable to all machines, e.g. `predictor='gpu_predictor'`.
Now, model saving function (`Booster.save_model()` in Python) will save only the model, without internal configuration. This will guarantee that your model file would be used anywhere. Internal configuration will be serialized in limited circumstances such as:
* Multiple nodes in a distributed system exchange model details over the network.
* Model checkpointing, to recover from possible crashes.
This work proved to be useful for parameter validation as well (see below).
* Starting with 1.0.0 release, we will use semantic versioning to indicate whether the model produced by one version of XGBoost would be compatible with another version of XGBoost. Any change in the major version indicates a breaking change in the serialization format.
* We now provide a robust method to save and load scikit-learn related attributes (#5245). Previously, we used Python pickle to save Python attributes related to `XGBClassifier`, `XGBRegressor`, and `XGBRanker` objects. The attributes are necessary to properly interact with scikit-learn. See #4639 for more details. The use of pickling hampered interoperability, as a pickle from one machine may not necessarily work on another machine. Starting with this release, we use an alternative method to serialize the scikit-learn related attributes. The use of Python pickle is now discouraged (#5236, #5281).
### Parameter validation: detection of unused or incorrect parameters (#4553, #4577, #4738, #4801, #4961, #5101, #5157, #5167, #5256)
* Mis-spelled training parameter is a common user mistake. In previous versions of XGBoost, mis-spelled parameters were silently ignored. Starting with 1.0.0 release, XGBoost will produce a warning message if there is any unused training parameters. Currently, parameter validation is available to R users and Python XGBoost API users. We are working to extend its support to scikit-learn users.
* Configuration steps now have well-defined semantics (#4542, #4738), so we know exactly where and how the internal configurable parameters are changed.
* The user can now use `save_config()` function to inspect all (used) training parameters. This is helpful for debugging model performance.
### Allow individual workers to recover from faults (#4808, #4966)
* Status quo: if a worker fails, all workers are shut down and restarted, and learning resumes from the last checkpoint. This involves requesting resources from the scheduler (e.g. Spark) and shuffling all the data again from scratch. Both of these operations can be quite costly and block training for extended periods of time, especially if the training data is big and the number of worker nodes is in the hundreds.
* The proposed solution is to recover the single node that failed, instead of shutting down all workers. The rest of the clusters wait until the single failed worker is bootstrapped and catches up with the rest.
* See roadmap at #4753. Note that this is work in progress. In particular, the feature is not yet available from XGBoost4J-Spark.
### Accurate prediction for DART models
* Use DART tree weights when computing SHAPs (#5050)
* Don't drop trees during DART prediction by default (#5115)
* Fix DART prediction in R (#5204)
### Make external memory more robust
* Fix issues with training with external memory on cpu (#4487)
* Fix crash with approx tree method on cpu (#4510)
* Fix external memory race in `exact` (#4980). Note: `dmlc::ThreadedIter` is not actually thread-safe. We would like to re-design it in the long term.
### Major refactoring of the `DMatrix` class (#4686, #4744, #4748, #5044, #5092, #5108, #5188, #5198)
* Goal 1: improve performance and reduce memory consumption. Right now, if the user trains a model with a NumPy array as training data, the array gets copies 2-3 times before training begins. We'd like to reduce duplication of the data matrix.
* Goal 2: Expose a common interface to external data, unify the way DMatrix objects are constructed and simplify the process of adding new external data sources. This work is essential for ingesting cuPy arrays.
* Goal 3: Handle missing values consistently.
* RFC: #4354, Roadmap: #5143
* This work is also relevant to external memory support on GPUs.
### Breaking: XGBoost Python package now requires Python 3.5 or newer (#5021, #5274)
* Python 3.4 has reached its end-of-life on March 16, 2019, so we now require Python 3.5 or newer.
### Breaking: GPU algorithm now requires CUDA 9.0 and higher (#4527, #4580)
### Breaking: `n_gpus` parameter removed; multi-GPU training now requires a distributed framework (#4579, #4749, #4773, #4810, #4867, #4908)
* #4531 proposed removing support for single-process multi-GPU training. Contributors would focus on multi-GPU support through distributed frameworks such as Dask and Spark, where the framework would be expected to assign a worker process for each GPU independently. By delegating GPU management and data movement to the distributed framework, we can greatly simplify the core XGBoost codebase, make multi-GPU training more robust, and reduce burden for future development.
### Breaking: Some deprecated features have been removed
* ``gpu_exact`` training method (#4527, #4742, #4777). Use ``gpu_hist`` instead.
* ``learning_rates`` parameter in Python (#5155). Use the callback API instead.
* ``num_roots`` (#5059, #5165), since the current training code always uses a single root node.
* GPU-specific objectives (#4690), such as `gpu:reg:linear`. Use objectives without `gpu:` prefix; GPU will be used automatically if your machine has one.
### Breaking: the C API function `XGBoosterPredict()` now asks for an extra parameter `training`.
### Breaking: We now use CMake exclusively to build XGBoost. `Makefile` is being sunset.
* Exception: the R package uses Autotools, as the CRAN ecosystem did not yet adopt CMake widely.
### Performance improvements
* Smarter choice of histogram construction for distributed `gpu_hist` (#4519)
* Optimizations for quantization on device (#4572)
* Introduce caching memory allocator to avoid latency associated with GPU memory allocation (#4554, #4615)
* Optimize the initialization stage of the CPU `hist` algorithm for sparse datasets (#4625)
* Prevent unnecessary data copies from GPU memory to the host (#4795)
* Improve operation efficiency for single prediction (#5016)
* Group builder modified for incremental building, to speed up building large `DMatrix` (#5098)
### Bug-fixes
* Eliminate `FutureWarning: Series.base is deprecated` (#4337)
* Ensure pandas DataFrame column names are treated as strings in type error message (#4481)
* [jvm-packages] Add back `reg:linear` for scala, as it is only deprecated and not meant to be removed yet (#4490)
* Fix library loading for Cygwin users (#4499)
* Fix prediction from loaded pickle (#4516)
* Enforce exclusion between `pred_interactions=True` and `pred_interactions=True` (#4522)
* Do not return dangling reference to local `std::string` (#4543)
* Set the appropriate device before freeing device memory (#4566)
* Mark `SparsePageDmatrix` destructor default. (#4568)
* Choose the appropriate tree method only when the tree method is 'auto' (#4571)
* Fix `benchmark_tree.py` (#4593)
* [jvm-packages] Fix silly bug in feature scoring (#4604)
* Fix GPU predictor when the test data matrix has different number of features than the training data matrix used to train the model (#4613)
* Fix external memory for get column batches. (#4622)
* [R] Use built-in label when xgb.DMatrix is given to xgb.cv() (#4631)
* Fix early stopping in the Python package (#4638)
* Fix AUC error in distributed mode caused by imbalanced dataset (#4645, #4798)
* [jvm-packages] Expose `setMissing` method in `XGBoostClassificationModel` / `XGBoostRegressionModel` (#4643)
* Remove initializing stringstream reference. (#4788)
* [R] `xgb.get.handle` now checks all class listed of `object` (#4800)
* Do not use `gpu_predictor` unless data comes from GPU (#4836)
* Fix data loading (#4862)
* Workaround `isnan` across different environments. (#4883)
* [jvm-packages] Handle Long-type parameter (#4885)
* Don't `set_params` at the end of `set_state` (#4947). Ensure that the model does not change after pickling and unpickling multiple times.
* C++ exceptions should not crash OpenMP loops (#4960)
* Fix `usegpu` flag in DART. (#4984)
* Run training with empty `DMatrix` (#4990, #5159)
* Ensure that no two processes can use the same GPU (#4990)
* Fix repeated split and 0 cover nodes (#5010)
* Reset histogram hit counter between multiple data batches (#5035)
* Fix `feature_name` crated from int64index dataframe. (#5081)
* Don't use 0 for "fresh leaf" (#5084)
* Throw error when user attempts to use multi-GPU training and XGBoost has not been compiled with NCCL (#5170)
* Fix metric name loading (#5122)
* Quick fix for memory leak in CPU `hist` algorithm (#5153)
* Fix wrapping GPU ID and prevent data copying (#5160)
* Fix signature of Span constructor (#5166)
* Lazy initialization of device vector, so that XGBoost compiled with CUDA can run on a machine without any GPU (#5173)
* Model loading should not change system locale (#5314)
* Distributed training jobs would sometimes hang; revert Rabit to fix this regression (dmlc/rabit#132, #5237)
### API changes
* Add support for cross-validation using query ID (#4474)
* Enable feature importance property for DART model (#4525)
* Add `rmsle` metric and `reg:squaredlogerror` objective (#4541)
* All objective and evaluation metrics are now exposed to JVM packages (#4560)
* `dump_model()` and `get_dump()` now support exporting in GraphViz language (#4602)
* Support metrics `ndcg-` and `map-` (#4635)
* [jvm-packages] Allow chaining prediction (transform) in XGBoost4J-Spark (#4667)
* [jvm-packages] Add option to bypass missing value check in the Spark layer (#4805). Only use this option if you know what you are doing.
* [jvm-packages] Add public group getter (#4838)
* `XGDMatrixSetGroup` C API is now deprecated (#4864). Use `XGDMatrixSetUIntInfo` instead.
* [R] Added new `train_folds` parameter to `xgb.cv()` (#5114)
* Ingest meta information from Pandas DataFrame, such as data weights (#5216)
### Maintenance: Refactor code for legibility and maintainability
* De-duplicate GPU parameters (#4454)
* Simplify INI-style config reader using C++11 STL (#4478, #4521)
* Refactor histogram building code for `gpu_hist` (#4528)
* Overload device memory allocator, to enable instrumentation for compiling memory usage statistics (#4532)
* Refactor out row partitioning logic from `gpu_hist` (#4554)
* Remove an unused variable (#4588)
* Implement tree model dump with code generator, to de-duplicate code for generating dumps in 3 different formats (#4602)
* Remove `RowSet` class which is no longer being used (#4697)
* Remove some unused functions as reported by cppcheck (#4743)
* Mimic CUDA assert output in Span check (#4762)
* [jvm-packages] Refactor `XGBoost.scala` to put all params processing in one place (#4815)
* Add some comments for GPU row partitioner (#4832)
* Span: use `size_t' for index_type, add `front' and `back'. (#4935)
* Remove dead code in `exact` algorithm (#5034, #5105)
* Unify integer types used for row and column indices (#5034)
* Extract feature interaction constraint from `SplitEvaluator` class. (#5034)
* [Breaking] De-duplicate paramters and docstrings in the constructors of Scikit-Learn models (#5130)
* Remove benchmark code from GPU tests (#5141)
* Clean up Python 2 compatibility code. (#5161)
* Extensible binary serialization format for `DMatrix::MetaInfo` (#5187). This will be useful for implementing censored labels for survival analysis applications.
* Cleanup clang-tidy warnings. (#5247)
### Maintenance: testing, continuous integration, build system
* Use `yaml.safe_load` instead of `yaml.load`. (#4537)
* Ensure GCC is at least 5.x (#4538)
* Remove all mention of `reg:linear` from tests (#4544)
* [jvm-packages] Upgrade to Scala 2.12 (#4574)
* [jvm-packages] Update kryo dependency to 2.22 (#4575)
* [CI] Specify account ID when logging into ECR Docker registry (#4584)
* Use Sphinx 2.1+ to compile documentation (#4609)
* Make Pandas optional for running Python unit tests (#4620)
* Fix spark tests on machines with many cores (#4634)
* [jvm-packages] Update local dev build process (#4640)
* Add optional dependencies to setup.py (#4655)
* [jvm-packages] Fix maven warnings (#4664)
* Remove extraneous files from the R package, to comply with CRAN policy (#4699)
* Remove VC-2013 support, since it is not C++11 compliant (#4701)
* [CI] Fix broken installation of Pandas (#4704, #4722)
* [jvm-packages] Clean up temporary files afer running tests (#4706)
* Specify version macro in CMake. (#4730)
* Include dmlc-tracker into XGBoost Python package (#4731)
* [CI] Use long key ID for Ubuntu repository fingerprints. (#4783)
* Remove plugin, cuda related code in automake & autoconf files (#4789)
* Skip related tests when scikit-learn is not installed. (#4791)
* Ignore vscode and clion files (#4866)
* Use bundled Google Test by default (#4900)
* [CI] Raise timeout threshold in Jenkins (#4938)
* Copy CMake parameter from dmlc-core. (#4948)
* Set correct file permission. (#4964)
* [CI] Update lint configuration to support latest pylint convention (#4971)
* [CI] Upload nightly builds to S3 (#4976, #4979)
* Add asan.so.5 to cmake script. (#4999)
* [CI] Fix Travis tests. (#5062)
* [CI] Locate vcomp140.dll from System32 directory (#5078)
* Implement training observer to dump internal states of objects (#5088). This will be useful for debugging.
* Fix visual studio output library directories (#5119)
* [jvm-packages] Comply with scala style convention + fix broken unit test (#5134)
* [CI] Repair download URL for Maven 3.6.1 (#5139)
* Don't use modernize-use-trailing-return-type in clang-tidy. (#5169)
* Explicitly use UTF-8 codepage when using MSVC (#5197)
* Add CMake option to run Undefined Behavior Sanitizer (UBSan) (#5211)
* Make some GPU tests deterministic (#5229)
* [R] Robust endian detection in CRAN xgboost build (#5232)
* Support FreeBSD (#5233)
* Make `pip install xgboost*.tar.gz` work by fixing build-python.sh (#5241)
* Fix compilation error due to 64-bit integer narrowing to `size_t` (#5250)
* Remove use of `std::cout` from R package, to comply with CRAN policy (#5261)
* Update DMLC-Core submodule (#4674, #4688, #4726, #4924)
* Update Rabit submodule (#4560, #4667, #4718, #4808, #4966, #5237)
### Usability Improvements, Documentation
* Add Random Forest API to Python API doc (#4500)
* Fix Python demo and doc. (#4545)
* Remove doc about not supporting cuda 10.1 (#4578)
* Address some sphinx warnings and errors, add doc for building doc. (#4589)
* Add instruction to run formatting checks locally (#4591)
* Fix docstring for `XGBModel.predict()` (#4592)
* Doc and demo for customized metric and objective (#4598, #4608)
* Add to documentation how to run tests locally (#4610)
* Empty evaluation list in early stopping should produce meaningful error message (#4633)
* Fixed year to 2019 in conf.py, helpers.h and LICENSE (#4661)
* Minor updates to links and grammar (#4673)
* Remove `silent` in doc (#4689)
* Remove old Python trouble shooting doc (#4729)
* Add `os.PathLike` support for file paths to DMatrix and Booster Python classes (#4757)
* Update XGBoost4J-Spark doc (#4804)
* Regular formatting for evaluation metrics (#4803)
* [jvm-packages] Refine documentation for handling missing values in XGBoost4J-Spark (#4805)
* Monitor for distributed envorinment (#4829). This is useful for identifying performance bottleneck.
* Add check for length of weights and produce a good error message (#4872)
* Fix DMatrix doc (#4884)
* Export C++ headers in CMake installation (#4897)
* Update license year in README.md to 2019 (#4940)
* Fix incorrectly displayed Note in the doc (#4943)
* Follow PEP 257 Docstring Conventions (#4959)
* Document minimum version required for Google Test (#5001)
* Add better error message for invalid feature names (#5024)
* Some guidelines on device memory usage (#5038)
* [doc] Some notes for external memory. (#5065)
* Update document for `tree_method` (#5106)
* Update demo for ranking. (#5154)
* Add new lines for Spark XGBoost missing values section (#5180)
* Fix simple typo: utilty -> utility (#5182)
* Update R doc by roxygen2 (#5201)
* [R] Direct user to use `set.seed()` instead of setting `seed` parameter (#5125)
* Add Optuna badge to `README.md` (#5208)
* Fix compilation error in `c-api-demo.c` (#5215)
### Acknowledgement
**Contributors**: Nan Zhu (@CodingCat), Crissman Loomis (@Crissman), Cyprien Ricque (@Cyprien-Ricque), Evan Kepner (@EvanKepner), K.O. (@Hi-king), KaiJin Ji (@KerryJi), Peter Badida (@KeyWeeUsr), Kodi Arfer (@Kodiologist), Rory Mitchell (@RAMitchell), Egor Smirnov (@SmirnovEgorRu), Jacob Kim (@TheJacobKim), Vibhu Jawa (@VibhuJawa), Marcos (@astrowonk), Andy Adinets (@canonizer), Chen Qin (@chenqin), Christopher Cowden (@cowden), @cpfarrell, @david-cortes, Liangcai Li (@firestarman), @fuhaoda, Philip Hyunsu Cho (@hcho3), @here-nagini, Tong He (@hetong007), Michal Kurka (@michalkurka), Honza Sterba (@honzasterba), @iblumin, @koertkuipers, mattn (@mattn), Mingjie Tang (@merlintang), OrdoAbChao (@mglowacki100), Matthew Jones (@mt-jones), mitama (@nigimitama), Nathan Moore (@nmoorenz), Daniel Stahl (@phillyfan1138), Michaël Benesty (@pommedeterresautee), Rong Ou (@rongou), Sebastian (@sfahnens), Xu Xiao (@sperlingxx), @sriramch, Sean Owen (@srowen), Stephanie Yang (@stpyang), Yuan Tang (@terrytangyuan), Mathew Wicks (@thesuperzapper), Tim Gates (@timgates42), TinkleG (@tinkle1129), Oleksandr Pryimak (@trams), Jiaming Yuan (@trivialfis), Matvey Turkov (@turk0v), Bobby Wang (@wbo4958), yage (@yage99), @yellowdolphin
**Reviewers**: Nan Zhu (@CodingCat), Crissman Loomis (@Crissman), Cyprien Ricque (@Cyprien-Ricque), Evan Kepner (@EvanKepner), John Zedlewski (@JohnZed), KOLANICH (@KOLANICH), KaiJin Ji (@KerryJi), Kodi Arfer (@Kodiologist), Rory Mitchell (@RAMitchell), Egor Smirnov (@SmirnovEgorRu), Nikita Titov (@StrikerRUS), Jacob Kim (@TheJacobKim), Vibhu Jawa (@VibhuJawa), Andrew Kane (@ankane), Arno Candel (@arnocandel), Marcos (@astrowonk), Bryan Woods (@bryan-woods), Andy Adinets (@canonizer), Chen Qin (@chenqin), Thomas Franke (@coding-komek), Peter (@codingforfun), @cpfarrell, Joshua Patterson (@datametrician), @fuhaoda, Philip Hyunsu Cho (@hcho3), Tong He (@hetong007), Honza Sterba (@honzasterba), @iblumin, @jakirkham, Vadim Khotilovich (@khotilov), Keith Kraus (@kkraus14), @koertkuipers, @melonki, Mingjie Tang (@merlintang), OrdoAbChao (@mglowacki100), Daniel Mahler (@mhlr), Matthew Rocklin (@mrocklin), Matthew Jones (@mt-jones), Michaël Benesty (@pommedeterresautee), PSEUDOTENSOR / Jonathan McKinney (@pseudotensor), Rong Ou (@rongou), Vladimir (@sh1ng), Scott Lundberg (@slundberg), Xu Xiao (@sperlingxx), @sriramch, Pasha Stetsenko (@st-pasha), Stephanie Yang (@stpyang), Yuan Tang (@terrytangyuan), Mathew Wicks (@thesuperzapper), Theodore Vasiloudis (@thvasilo), TinkleG (@tinkle1129), Oleksandr Pryimak (@trams), Jiaming Yuan (@trivialfis), Bobby Wang (@wbo4958), yage (@yage99), @yellowdolphin, Yin Lou (@yinlou)
## v0.90 (2019.05.18)
### XGBoost Python package drops Python 2.x (#4379, #4381)

View File

@@ -6,8 +6,11 @@ file(GLOB_RECURSE R_SOURCES
${CMAKE_CURRENT_LIST_DIR}/src/*.c)
# Use object library to expose symbols
add_library(xgboost-r OBJECT ${R_SOURCES})
set(R_DEFINITIONS
if (ENABLE_ALL_WARNINGS)
target_compile_options(xgboost-r PRIVATE -Wall -Wextra)
endif (ENABLE_ALL_WARNINGS)
target_compile_definitions(xgboost-r
PUBLIC
-DXGBOOST_STRICT_R_MODE=1
-DXGBOOST_CUSTOMIZE_GLOBAL_PRNG=1
-DDMLC_LOG_BEFORE_THROW=0
@@ -15,20 +18,27 @@ set(R_DEFINITIONS
-DDMLC_LOG_CUSTOMIZE=1
-DRABIT_CUSTOMIZE_MSG_
-DRABIT_STRICT_CXX98_)
target_compile_definitions(xgboost-r
PRIVATE ${R_DEFINITIONS})
target_include_directories(xgboost-r
PRIVATE
${LIBR_INCLUDE_DIRS}
${PROJECT_SOURCE_DIR}/include
${PROJECT_SOURCE_DIR}/dmlc-core/include
${PROJECT_SOURCE_DIR}/rabit/include)
target_link_libraries(xgboost-r PUBLIC ${LIBR_CORE_LIBRARY})
if (USE_OPENMP)
find_package(OpenMP REQUIRED)
target_link_libraries(xgboost-r PUBLIC OpenMP::OpenMP_CXX OpenMP::OpenMP_C)
endif (USE_OPENMP)
set_target_properties(
xgboost-r PROPERTIES
CXX_STANDARD 11
CXX_STANDARD 14
CXX_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON)
set(XGBOOST_DEFINITIONS "${XGBOOST_DEFINITIONS};${R_DEFINITIONS}" PARENT_SCOPE)
set(XGBOOST_OBJ_SOURCES $<TARGET_OBJECTS:xgboost-r> PARENT_SCOPE)
set(LINKED_LIBRARIES_PRIVATE ${LINKED_LIBRARIES_PRIVATE} ${LIBR_CORE_LIBRARY} PARENT_SCOPE)
# Get compilation and link flags of xgboost-r and propagate to objxgboost
target_link_libraries(objxgboost PUBLIC xgboost-r)
# Add all objects of xgboost-r to objxgboost
target_sources(objxgboost INTERFACE $<TARGET_OBJECTS:xgboost-r>)
set(LIBR_HOME "${LIBR_HOME}" PARENT_SCOPE)
set(LIBR_EXECUTABLE "${LIBR_EXECUTABLE}" PARENT_SCOPE)

View File

@@ -1,8 +1,8 @@
Package: xgboost
Type: Package
Title: Extreme Gradient Boosting
Version: 1.0.0.1
Date: 2019-07-23
Version: 1.3.1.1
Date: 2020-08-28
Authors@R: c(
person("Tianqi", "Chen", role = c("aut"),
email = "tianqi.tchen@gmail.com"),
@@ -54,7 +54,9 @@ Suggests:
lintr,
igraph (>= 1.0.1),
jsonlite,
float
float,
crayon,
titanic
Depends:
R (>= 3.3.0)
Imports:
@@ -62,6 +64,5 @@ Imports:
methods,
data.table (>= 1.9.6),
magrittr (>= 1.5),
stringi (>= 0.5.2)
RoxygenNote: 7.0.2
SystemRequirements: GNU make, C++11
RoxygenNote: 7.1.1
SystemRequirements: GNU make, C++14

View File

@@ -14,6 +14,7 @@ S3method(setinfo,xgb.DMatrix)
S3method(slice,xgb.DMatrix)
export("xgb.attr<-")
export("xgb.attributes<-")
export("xgb.config<-")
export("xgb.parameters<-")
export(cb.cv.predict)
export(cb.early.stop)
@@ -30,23 +31,29 @@ export(xgb.DMatrix)
export(xgb.DMatrix.save)
export(xgb.attr)
export(xgb.attributes)
export(xgb.config)
export(xgb.create.features)
export(xgb.cv)
export(xgb.dump)
export(xgb.gblinear.history)
export(xgb.ggplot.deepness)
export(xgb.ggplot.importance)
export(xgb.ggplot.shap.summary)
export(xgb.importance)
export(xgb.load)
export(xgb.load.raw)
export(xgb.model.dt.tree)
export(xgb.plot.deepness)
export(xgb.plot.importance)
export(xgb.plot.multi.trees)
export(xgb.plot.shap)
export(xgb.plot.shap.summary)
export(xgb.plot.tree)
export(xgb.save)
export(xgb.save.raw)
export(xgb.serialize)
export(xgb.train)
export(xgb.unserialize)
export(xgboost)
import(methods)
importClassesFrom(Matrix,dgCMatrix)
@@ -74,11 +81,6 @@ importFrom(graphics,title)
importFrom(magrittr,"%>%")
importFrom(stats,median)
importFrom(stats,predict)
importFrom(stringi,stri_detect_regex)
importFrom(stringi,stri_match_first_regex)
importFrom(stringi,stri_replace_all_regex)
importFrom(stringi,stri_replace_first_regex)
importFrom(stringi,stri_split_regex)
importFrom(utils,head)
importFrom(utils,object.size)
importFrom(utils,str)

View File

@@ -62,11 +62,11 @@ cb.print.evaluation <- function(period = 1, showsd = TRUE) {
callback <- function(env = parent.frame()) {
if (length(env$bst_evaluation) == 0 ||
period == 0 ||
NVL(env$rank, 0) != 0 )
NVL(env$rank, 0) != 0)
return()
i <- env$iteration
if ((i-1) %% period == 0 ||
if ((i - 1) %% period == 0 ||
i == env$begin_iteration ||
i == env$end_iteration) {
stdev <- if (showsd) env$bst_evaluation_err else NULL
@@ -115,7 +115,7 @@ cb.evaluation.log <- function() {
stop("bst_evaluation must have non-empty names")
mnames <<- gsub('-', '_', names(env$bst_evaluation))
if(!is.null(env$bst_evaluation_err))
if (!is.null(env$bst_evaluation_err))
mnames <<- c(paste0(mnames, '_mean'), paste0(mnames, '_std'))
}
@@ -123,12 +123,12 @@ cb.evaluation.log <- function() {
env$evaluation_log <- as.data.table(t(simplify2array(env$evaluation_log)))
setnames(env$evaluation_log, c('iter', mnames))
if(!is.null(env$bst_evaluation_err)) {
if (!is.null(env$bst_evaluation_err)) {
# rearrange col order from _mean,_mean,...,_std,_std,...
# to be _mean,_std,_mean,_std,...
len <- length(mnames)
means <- mnames[seq_len(len/2)]
stds <- mnames[(len/2 + 1):len]
means <- mnames[seq_len(len / 2)]
stds <- mnames[(len / 2 + 1):len]
cnames <- numeric(len)
cnames[c(TRUE, FALSE)] <- means
cnames[c(FALSE, TRUE)] <- stds
@@ -144,7 +144,7 @@ cb.evaluation.log <- function() {
return(finalizer(env))
ev <- env$bst_evaluation
if(!is.null(env$bst_evaluation_err))
if (!is.null(env$bst_evaluation_err))
ev <- c(ev, env$bst_evaluation_err)
env$evaluation_log <- c(env$evaluation_log,
list(c(iter = env$iteration, ev)))
@@ -351,13 +351,19 @@ cb.early.stop <- function(stopping_rounds, maximize = FALSE,
finalizer <- function(env) {
if (!is.null(env$bst)) {
attr_best_score = as.numeric(xgb.attr(env$bst$handle, 'best_score'))
if (best_score != attr_best_score)
attr_best_score <- as.numeric(xgb.attr(env$bst$handle, 'best_score'))
if (best_score != attr_best_score) {
# If the difference is too big, throw an error
if (abs(best_score - attr_best_score) >= 1e-14) {
stop("Inconsistent 'best_score' values between the closure state: ", best_score,
" and the xgb.attr: ", attr_best_score)
env$bst$best_iteration = best_iteration
env$bst$best_ntreelimit = best_ntreelimit
env$bst$best_score = best_score
}
# If the difference is due to floating-point truncation, update best_score
best_score <- attr_best_score
}
env$bst$best_iteration <- best_iteration
env$bst$best_ntreelimit <- best_ntreelimit
env$bst$best_score <- best_score
} else {
env$basket$best_iteration <- best_iteration
env$basket$best_ntreelimit <- best_ntreelimit
@@ -372,9 +378,9 @@ cb.early.stop <- function(stopping_rounds, maximize = FALSE,
return(finalizer(env))
i <- env$iteration
score = env$bst_evaluation[metric_idx]
score <- env$bst_evaluation[metric_idx]
if (( maximize && score > best_score) ||
if ((maximize && score > best_score) ||
(!maximize && score < best_score)) {
best_msg <<- format.eval.string(i, env$bst_evaluation, env$bst_evaluation_err)
@@ -500,7 +506,7 @@ cb.cv.predict <- function(save_models = FALSE) {
for (fd in env$bst_folds) {
pr <- predict(fd$bst, fd$watchlist[[2]], ntreelimit = ntreelimit, reshape = TRUE)
if (is.matrix(pred)) {
pred[fd$index,] <- pr
pred[fd$index, ] <- pr
} else {
pred[fd$index] <- pr
}
@@ -613,9 +619,7 @@ cb.gblinear.history <- function(sparse=FALSE) {
init <- function(env) {
if (!is.null(env$bst)) { # xgb.train:
coef_path <- list()
} else if (!is.null(env$bst_folds)) { # xgb.cv:
coef_path <- rep(list(), length(env$bst_folds))
} else stop("Parent frame has neither 'bst' nor 'bst_folds'")
}
@@ -705,11 +709,11 @@ xgb.gblinear.history <- function(model, class_index = NULL) {
if (!is_cv) {
# extract num_class & num_feat from the internal model
dmp <- xgb.dump(model)
if(length(dmp) < 2 || dmp[2] != "bias:")
if (length(dmp) < 2 || dmp[2] != "bias:")
stop("It does not appear to be a gblinear model")
dmp <- dmp[-c(1,2)]
dmp <- dmp[-c(1, 2)]
n <- which(dmp == 'weight:')
if(length(n) != 1)
if (length(n) != 1)
stop("It does not appear to be a gblinear model")
num_class <- n - 1
num_feat <- (length(dmp) - 4) / num_class
@@ -732,9 +736,9 @@ xgb.gblinear.history <- function(model, class_index = NULL) {
if (!is.null(class_index) && num_class > 1) {
coef_path <- if (is.list(coef_path)) {
lapply(coef_path,
function(x) x[, seq(1 + class_index, by=num_class, length.out=num_feat)])
function(x) x[, seq(1 + class_index, by = num_class, length.out = num_feat)])
} else {
coef_path <- coef_path[, seq(1 + class_index, by=num_class, length.out=num_feat)]
coef_path <- coef_path[, seq(1 + class_index, by = num_class, length.out = num_feat)]
}
}
coef_path

View File

@@ -20,6 +20,12 @@ NVL <- function(x, val) {
stop("typeof(x) == ", typeof(x), " is not supported by NVL")
}
# List of classification and ranking objectives
.CLASSIFICATION_OBJECTIVES <- function() {
return(c('binary:logistic', 'binary:logitraw', 'binary:hinge', 'multi:softmax',
'multi:softprob', 'rank:pairwise', 'rank:ndcg', 'rank:map'))
}
#
# Low-level functions for boosting --------------------------------------------
@@ -28,7 +34,7 @@ NVL <- function(x, val) {
# Merges booster params with whatever is provided in ...
# plus runs some checks
check.booster.params <- function(params, ...) {
if (typeof(params) != "list")
if (!identical(class(params), "list"))
stop("params must be a list")
# in R interface, allow for '.' instead of '_' in parameter names
@@ -69,23 +75,23 @@ check.booster.params <- function(params, ...) {
if (!is.null(params[['monotone_constraints']]) &&
typeof(params[['monotone_constraints']]) != "character") {
vec2str = paste(params[['monotone_constraints']], collapse = ',')
vec2str = paste0('(', vec2str, ')')
params[['monotone_constraints']] = vec2str
vec2str <- paste(params[['monotone_constraints']], collapse = ',')
vec2str <- paste0('(', vec2str, ')')
params[['monotone_constraints']] <- vec2str
}
# interaction constraints parser (convert from list of column indices to string)
if (!is.null(params[['interaction_constraints']]) &&
typeof(params[['interaction_constraints']]) != "character"){
# check input class
if (class(params[['interaction_constraints']]) != 'list') stop('interaction_constraints should be class list')
if (!all(unique(sapply(params[['interaction_constraints']], class)) %in% c('numeric','integer'))) {
if (!identical(class(params[['interaction_constraints']]), 'list')) stop('interaction_constraints should be class list')
if (!all(unique(sapply(params[['interaction_constraints']], class)) %in% c('numeric', 'integer'))) {
stop('interaction_constraints should be a list of numeric/integer vectors')
}
# recast parameter as string
interaction_constraints <- sapply(params[['interaction_constraints']], function(x) paste0('[', paste(x, collapse=','), ']'))
params[['interaction_constraints']] <- paste0('[', paste(interaction_constraints, collapse=','), ']')
interaction_constraints <- sapply(params[['interaction_constraints']], function(x) paste0('[', paste(x, collapse = ','), ']'))
params[['interaction_constraints']] <- paste0('[', paste(interaction_constraints, collapse = ','), ']')
}
return(params)
}
@@ -145,7 +151,8 @@ xgb.iter.update <- function(booster_handle, dtrain, iter, obj = NULL) {
if (is.null(obj)) {
.Call(XGBoosterUpdateOneIter_R, booster_handle, as.integer(iter), dtrain)
} else {
pred <- predict(booster_handle, dtrain, training = TRUE)
pred <- predict(booster_handle, dtrain, outputmargin = TRUE, training = TRUE,
ntreelimit = 0)
gpair <- obj(pred, dtrain)
.Call(XGBoosterBoostOneIter_R, booster_handle, dtrain, gpair$grad, gpair$hess)
}
@@ -166,13 +173,12 @@ xgb.iter.eval <- function(booster_handle, watchlist, iter, feval = NULL) {
evnames <- names(watchlist)
if (is.null(feval)) {
msg <- .Call(XGBoosterEvalOneIter_R, booster_handle, as.integer(iter), watchlist, as.list(evnames))
msg <- stri_split_regex(msg, '(\\s+|:|\\s+)')[[1]][-1]
res <- as.numeric(msg[c(FALSE,TRUE)]) # even indices are the values
names(res) <- msg[c(TRUE,FALSE)] # odds are the names
mat <- matrix(strsplit(msg, '\\s+|:')[[1]][-1], nrow = 2)
res <- structure(as.numeric(mat[2, ]), names = mat[1, ])
} else {
res <- sapply(seq_along(watchlist), function(j) {
w <- watchlist[[j]]
preds <- predict(booster_handle, w) # predict using all trees
preds <- predict(booster_handle, w, outputmargin = TRUE, ntreelimit = 0) # predict using all trees
eval_res <- feval(preds, w)
out <- eval_res$value
names(out) <- paste0(evnames[j], "-", eval_res$metric)
@@ -187,13 +193,23 @@ xgb.iter.eval <- function(booster_handle, watchlist, iter, feval = NULL) {
# Helper functions for cross validation ---------------------------------------
#
# Possibly convert the labels into factors, depending on the objective.
# The labels are converted into factors only when the given objective refers to the classification
# or ranking tasks.
convert.labels <- function(labels, objective_name) {
if (objective_name %in% .CLASSIFICATION_OBJECTIVES()) {
return(as.factor(labels))
} else {
return(labels)
}
}
# Generates random (stratified if needed) CV folds
generate.cv.folds <- function(nfold, nrows, stratified, label, params) {
# cannot do it for rank
if (exists('objective', where = params) &&
is.character(params$objective) &&
strtrim(params$objective, 5) == 'rank:') {
objective <- params$objective
if (is.character(objective) && strtrim(objective, 5) == 'rank:') {
stop("\n\tAutomatic generation of CV-folds is not implemented for ranking!\n",
"\tConsider providing pre-computed CV-folds through the 'folds=' parameter.\n")
}
@@ -206,20 +222,17 @@ generate.cv.folds <- function(nfold, nrows, stratified, label, params) {
# - For classification, need to convert y labels to factor before making the folds,
# and then do stratification by factor levels.
# - For regression, leave y numeric and do stratification by quantiles.
if (exists('objective', where = params) &&
is.character(params$objective)) {
# If 'objective' provided in params, assume that y is a classification label
# unless objective is reg:squarederror
if (params$objective != 'reg:squarederror')
y <- factor(y)
if (is.character(objective)) {
y <- convert.labels(y, params$objective)
} else {
# If no 'objective' given in params, it means that user either wants to
# use the default 'reg:squarederror' objective or has provided a custom
# obj function. Here, assume classification setting when y has 5 or less
# unique values:
if (length(unique(y)) <= 5)
if (length(unique(y)) <= 5) {
y <- factor(y)
}
}
folds <- xgb.createFolds(y, nfold)
} else {
# make simple non-stratified folds
@@ -307,6 +320,68 @@ xgb.createFolds <- function(y, k = 10)
#' @name xgboost-deprecated
NULL
#' Do not use \code{\link[base]{saveRDS}} or \code{\link[base]{save}} for long-term archival of
#' models. Instead, use \code{\link{xgb.save}} or \code{\link{xgb.save.raw}}.
#'
#' It is a common practice to use the built-in \code{\link[base]{saveRDS}} function (or
#' \code{\link[base]{save}}) to persist R objects to the disk. While it is possible to persist
#' \code{xgb.Booster} objects using \code{\link[base]{saveRDS}}, it is not advisable to do so if
#' the model is to be accessed in the future. If you train a model with the current version of
#' XGBoost and persist it with \code{\link[base]{saveRDS}}, the model is not guaranteed to be
#' accessible in later releases of XGBoost. To ensure that your model can be accessed in future
#' releases of XGBoost, use \code{\link{xgb.save}} or \code{\link{xgb.save.raw}} instead.
#'
#' @details
#' Use \code{\link{xgb.save}} to save the XGBoost model as a stand-alone file. You may opt into
#' the JSON format by specifying the JSON extension. To read the model back, use
#' \code{\link{xgb.load}}.
#'
#' Use \code{\link{xgb.save.raw}} to save the XGBoost model as a sequence (vector) of raw bytes
#' in a future-proof manner. Future releases of XGBoost will be able to read the raw bytes and
#' re-construct the corresponding model. To read the model back, use \code{\link{xgb.load.raw}}.
#' The \code{\link{xgb.save.raw}} function is useful if you'd like to persist the XGBoost model
#' as part of another R object.
#'
#' Note: Do not use \code{\link{xgb.serialize}} to store models long-term. It persists not only the
#' model but also internal configurations and parameters, and its format is not stable across
#' multiple XGBoost versions. Use \code{\link{xgb.serialize}} only for checkpointing.
#'
#' For more details and explanation about model persistence and archival, consult the page
#' \url{https://xgboost.readthedocs.io/en/latest/tutorials/saving_model.html}.
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
#'
#' # Save as a stand-alone file; load it with xgb.load()
#' xgb.save(bst, 'xgb.model')
#' bst2 <- xgb.load('xgb.model')
#'
#' # Save as a stand-alone file (JSON); load it with xgb.load()
#' xgb.save(bst, 'xgb.model.json')
#' bst2 <- xgb.load('xgb.model.json')
#' if (file.exists('xgb.model.json')) file.remove('xgb.model.json')
#'
#' # Save as a raw byte vector; load it with xgb.load.raw()
#' xgb_bytes <- xgb.save.raw(bst)
#' bst2 <- xgb.load.raw(xgb_bytes)
#'
#' # Persist XGBoost model as part of another R object
#' obj <- list(xgb_model_bytes = xgb.save.raw(bst), description = "My first XGBoost model")
#' # Persist the R object. Here, saveRDS() is okay, since it doesn't persist
#' # xgb.Booster directly. What's being persisted is the future-proof byte representation
#' # as given by xgb.save.raw().
#' saveRDS(obj, 'my_object.rds')
#' # Read back the R object
#' obj2 <- readRDS('my_object.rds')
#' # Re-construct xgb.Booster object from the bytes
#' bst2 <- xgb.load.raw(obj2$xgb_model_bytes)
#' if (file.exists('my_object.rds')) file.remove('my_object.rds')
#'
#' @name a-compatibility-note-for-saveRDS-save
NULL
# Lookup table for the deprecated parameters bookkeeping
depr_par_lut <- matrix(c(
'print.every.n', 'print_every_n',
@@ -315,8 +390,8 @@ depr_par_lut <- matrix(c(
'with.stats', 'with_stats',
'numberOfClusters', 'n_clusters',
'features.keep', 'features_keep',
'plot.height','plot_height',
'plot.width','plot_width',
'plot.height', 'plot_height',
'plot.width', 'plot_width',
'n_first_tree', 'trees',
'dummy', 'DUMMY'
), ncol = 2, byrow = TRUE)
@@ -329,20 +404,20 @@ colnames(depr_par_lut) <- c('old', 'new')
check.deprecation <- function(..., env = parent.frame()) {
pars <- list(...)
# exact and partial matches
all_match <- pmatch(names(pars), depr_par_lut[,1])
all_match <- pmatch(names(pars), depr_par_lut[, 1])
# indices of matched pars' names
idx_pars <- which(!is.na(all_match))
if (length(idx_pars) == 0) return()
# indices of matched LUT rows
idx_lut <- all_match[idx_pars]
# which of idx_lut were the exact matches?
ex_match <- depr_par_lut[idx_lut,1] %in% names(pars)
ex_match <- depr_par_lut[idx_lut, 1] %in% names(pars)
for (i in seq_along(idx_pars)) {
pars_par <- names(pars)[idx_pars[i]]
old_par <- depr_par_lut[idx_lut[i], 1]
new_par <- depr_par_lut[idx_lut[i], 2]
if (!ex_match[i]) {
warning("'", pars_par, "' was partially matched to '", old_par,"'")
warning("'", pars_par, "' was partially matched to '", old_par, "'")
}
.Deprecated(new_par, old = old_par, package = 'xgboost')
if (new_par != 'NULL') {

View File

@@ -1,24 +1,39 @@
# Construct an internal xgboost Booster and return a handle to it.
# internal utility function
xgb.Booster.handle <- function(params = list(), cachelist = list(), modelfile = NULL) {
xgb.Booster.handle <- function(params = list(), cachelist = list(),
modelfile = NULL) {
if (typeof(cachelist) != "list" ||
!all(vapply(cachelist, inherits, logical(1), what = 'xgb.DMatrix'))) {
stop("cachelist must be a list of xgb.DMatrix objects")
}
handle <- .Call(XGBoosterCreate_R, cachelist)
## Load existing model, dispatch for on disk model file and in memory buffer
if (!is.null(modelfile)) {
if (typeof(modelfile) == "character") {
## A filename
handle <- .Call(XGBoosterCreate_R, cachelist)
.Call(XGBoosterLoadModel_R, handle, modelfile[1])
class(handle) <- "xgb.Booster.handle"
if (length(params) > 0) {
xgb.parameters(handle) <- params
}
return(handle)
} else if (typeof(modelfile) == "raw") {
.Call(XGBoosterLoadModelFromRaw_R, handle, modelfile)
## A memory buffer
bst <- xgb.unserialize(modelfile)
xgb.parameters(bst) <- params
return (bst)
} else if (inherits(modelfile, "xgb.Booster")) {
## A booster object
bst <- xgb.Booster.complete(modelfile, saveraw = TRUE)
.Call(XGBoosterLoadModelFromRaw_R, handle, bst$raw)
bst <- xgb.unserialize(bst$raw)
xgb.parameters(bst) <- params
return (bst)
} else {
stop("modelfile must be either character filename, or raw booster dump, or xgb.Booster object")
}
}
## Create new model
handle <- .Call(XGBoosterCreate_R, cachelist)
class(handle) <- "xgb.Booster.handle"
if (length(params) > 0) {
xgb.parameters(handle) <- params
@@ -48,8 +63,8 @@ is.null.handle <- function(handle) {
return(FALSE)
}
# Return a verified to be valid handle out of either xgb.Booster.handle or xgb.Booster
# internal utility function
# Return a verified to be valid handle out of either xgb.Booster.handle or
# xgb.Booster internal utility function
xgb.get.handle <- function(object) {
if (inherits(object, "xgb.Booster")) {
handle <- object$handle
@@ -96,6 +111,8 @@ xgb.get.handle <- function(object) {
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
#' saveRDS(bst, "xgb.model.rds")
#'
#' # Warning: The resulting RDS file is only compatible with the current XGBoost version.
#' # Refer to the section titled "a-compatibility-note-for-saveRDS-save".
#' bst1 <- readRDS("xgb.model.rds")
#' if (file.exists("xgb.model.rds")) file.remove("xgb.model.rds")
#' # the handle is invalid:
@@ -113,9 +130,29 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
if (is.null.handle(object$handle)) {
object$handle <- xgb.Booster.handle(modelfile = object$raw)
} else {
if (is.null(object$raw) && saveraw)
object$raw <- xgb.save.raw(object$handle)
if (is.null(object$raw) && saveraw) {
object$raw <- xgb.serialize(object$handle)
}
}
attrs <- xgb.attributes(object)
if (!is.null(attrs$best_ntreelimit)) {
object$best_ntreelimit <- as.integer(attrs$best_ntreelimit)
}
if (!is.null(attrs$best_iteration)) {
## Convert from 0 based back to 1 based.
object$best_iteration <- as.integer(attrs$best_iteration) + 1
}
if (!is.null(attrs$best_score)) {
object$best_score <- as.numeric(attrs$best_score)
}
if (!is.null(attrs$best_msg)) {
object$best_msg <- attrs$best_msg
}
if (!is.null(attrs$niter)) {
object$niter <- as.integer(attrs$niter)
}
return(object)
}
@@ -139,6 +176,8 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
#' @param reshape whether to reshape the vector of predictions to a matrix form when there are several
#' prediction outputs per case. This option has no effect when either of predleaf, predcontrib,
#' or predinteraction flags is TRUE.
#' @param training whether is the prediction result used for training. For dart booster,
#' training predicting will perform dropout.
#' @param ... Parameters passed to \code{predict.xgb.Booster}
#'
#' @details
@@ -333,8 +372,8 @@ predict.xgb.Booster <- function(object, newdata, missing = NA, outputmargin = FA
matrix(ret, nrow = n_row, byrow = TRUE, dimnames = list(NULL, cnames))
} else {
arr <- array(ret, c(n_col1, n_group, n_row),
dimnames = list(cnames, NULL, NULL)) %>% aperm(c(2,3,1)) # [group, row, col]
lapply(seq_len(n_group), function(g) arr[g,,])
dimnames = list(cnames, NULL, NULL)) %>% aperm(c(2, 3, 1)) # [group, row, col]
lapply(seq_len(n_group), function(g) arr[g, , ])
}
} else if (predinteraction) {
n_col1 <- ncol(newdata) + 1
@@ -343,11 +382,11 @@ predict.xgb.Booster <- function(object, newdata, missing = NA, outputmargin = FA
ret <- if (n_ret == n_row) {
matrix(ret, ncol = 1, dimnames = list(NULL, cnames))
} else if (n_group == 1) {
array(ret, c(n_col1, n_col1, n_row), dimnames = list(cnames, cnames, NULL)) %>% aperm(c(3,1,2))
array(ret, c(n_col1, n_col1, n_row), dimnames = list(cnames, cnames, NULL)) %>% aperm(c(3, 1, 2))
} else {
arr <- array(ret, c(n_col1, n_col1, n_group, n_row),
dimnames = list(cnames, cnames, NULL, NULL)) %>% aperm(c(3,4,1,2)) # [group, row, col1, col2]
lapply(seq_len(n_group), function(g) arr[g,,,])
dimnames = list(cnames, cnames, NULL, NULL)) %>% aperm(c(3, 4, 1, 2)) # [group, row, col1, col2]
lapply(seq_len(n_group), function(g) arr[g, , , ])
}
} else if (reshape && npred_per_case > 1) {
ret <- matrix(ret, nrow = n_row, byrow = TRUE)
@@ -397,7 +436,7 @@ predict.xgb.Booster.handle <- function(object, ...) {
#' That would only matter if attributes need to be set many times.
#' Note, however, that when feeding a handle of an \code{xgb.Booster} object to the attribute setters,
#' the raw model cache of an \code{xgb.Booster} object would not be automatically updated,
#' and it would be user's responsibility to call \code{xgb.save.raw} to update it.
#' and it would be user's responsibility to call \code{xgb.serialize} to update it.
#'
#' The \code{xgb.attributes<-} setter either updates the existing or adds one or several attributes,
#' but it doesn't delete the other existing attributes.
@@ -456,7 +495,7 @@ xgb.attr <- function(object, name) {
}
.Call(XGBoosterSetAttr_R, handle, as.character(name[1]), value)
if (is(object, 'xgb.Booster') && !is.null(object$raw)) {
object$raw <- xgb.save.raw(object$handle)
object$raw <- xgb.serialize(object$handle)
}
object
}
@@ -496,11 +535,41 @@ xgb.attributes <- function(object) {
.Call(XGBoosterSetAttr_R, handle, names(a[i]), a[[i]])
}
if (is(object, 'xgb.Booster') && !is.null(object$raw)) {
object$raw <- xgb.save.raw(object$handle)
object$raw <- xgb.serialize(object$handle)
}
object
}
#' Accessors for model parameters as JSON string.
#'
#' @param object Object of class \code{xgb.Booster}
#' @param value A JSON string.
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' train <- agaricus.train
#'
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
#' eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
#' config <- xgb.config(bst)
#'
#' @rdname xgb.config
#' @export
xgb.config <- function(object) {
handle <- xgb.get.handle(object)
.Call(XGBoosterSaveJsonConfig_R, handle);
}
#' @rdname xgb.config
#' @export
`xgb.config<-` <- function(object, value) {
handle <- xgb.get.handle(object)
.Call(XGBoosterLoadJsonConfig_R, handle, value)
object$raw <- NULL # force renew the raw buffer
object <- xgb.Booster.complete(object)
object
}
#' Accessors for model parameters.
#'
#' Only the setter for xgboost parameters is currently implemented.
@@ -537,7 +606,7 @@ xgb.attributes <- function(object) {
.Call(XGBoosterSetParam_R, handle, names(p[i]), p[[i]])
}
if (is(object, 'xgb.Booster') && !is.null(object$raw)) {
object$raw <- xgb.save.raw(object$handle)
object$raw <- xgb.serialize(object$handle)
}
object
}
@@ -590,7 +659,7 @@ print.xgb.Booster <- function(x, verbose = FALSE, ...) {
if (!is.null(x$params)) {
cat('params (as set within xgb.train):\n')
cat( ' ',
cat(' ',
paste(names(x$params),
paste0('"', unlist(x$params), '"'),
sep = ' = ', collapse = ', '), '\n', sep = '')
@@ -603,7 +672,7 @@ print.xgb.Booster <- function(x, verbose = FALSE, ...) {
if (length(attrs) > 0) {
cat('xgb.attributes:\n')
if (verbose) {
cat( paste(paste0(' ',names(attrs)),
cat(paste(paste0(' ', names(attrs)),
paste0('"', unlist(attrs), '"'),
sep = ' = ', collapse = '\n'), '\n', sep = '')
} else {
@@ -627,7 +696,7 @@ print.xgb.Booster <- function(x, verbose = FALSE, ...) {
#cat('ntree: ', xgb.ntree(x), '\n', sep='')
for (n in setdiff(names(x), c('handle', 'raw', 'call', 'params', 'callbacks',
'evaluation_log','niter','feature_names'))) {
'evaluation_log', 'niter', 'feature_names'))) {
if (is.atomic(x[[n]])) {
cat(n, ':', x[[n]], '\n', sep = ' ')
} else {

View File

@@ -188,9 +188,10 @@ getinfo <- function(object, ...) UseMethod("getinfo")
getinfo.xgb.DMatrix <- function(object, name, ...) {
if (typeof(name) != "character" ||
length(name) != 1 ||
!name %in% c('label', 'weight', 'base_margin', 'nrow')) {
!name %in% c('label', 'weight', 'base_margin', 'nrow',
'label_lower_bound', 'label_upper_bound')) {
stop("getinfo: name must be one of the following\n",
" 'label', 'weight', 'base_margin', 'nrow'")
" 'label', 'weight', 'base_margin', 'nrow', 'label_lower_bound', 'label_upper_bound'")
}
if (name != "nrow"){
ret <- .Call(XGDMatrixGetInfo_R, object, name)
@@ -243,9 +244,19 @@ setinfo.xgb.DMatrix <- function(object, name, info, ...) {
.Call(XGDMatrixSetInfo_R, object, name, as.numeric(info))
return(TRUE)
}
if (name == "weight") {
if (name == "label_lower_bound") {
if (length(info) != nrow(object))
stop("The length of weights must equal to the number of rows in the input data")
stop("The length of lower-bound labels must equal to the number of rows in the input data")
.Call(XGDMatrixSetInfo_R, object, name, as.numeric(info))
return(TRUE)
}
if (name == "label_upper_bound") {
if (length(info) != nrow(object))
stop("The length of upper-bound labels must equal to the number of rows in the input data")
.Call(XGDMatrixSetInfo_R, object, name, as.numeric(info))
return(TRUE)
}
if (name == "weight") {
.Call(XGDMatrixSetInfo_R, object, name, as.numeric(info))
return(TRUE)
}
@@ -309,7 +320,7 @@ slice.xgb.DMatrix <- function(object, idxset, ...) {
for (i in seq_along(ind)) {
obj_attr <- attr(object, nms[i])
if (NCOL(obj_attr) > 1) {
attr(ret, nms[i]) <- obj_attr[idxset,]
attr(ret, nms[i]) <- obj_attr[idxset, ]
} else {
attr(ret, nms[i]) <- obj_attr[idxset]
}
@@ -346,10 +357,10 @@ slice.xgb.DMatrix <- function(object, idxset, ...) {
#' @export
print.xgb.DMatrix <- function(x, verbose = FALSE, ...) {
cat('xgb.DMatrix dim:', nrow(x), 'x', ncol(x), ' info: ')
infos <- c()
if(length(getinfo(x, 'label')) > 0) infos <- 'label'
if(length(getinfo(x, 'weight')) > 0) infos <- c(infos, 'weight')
if(length(getinfo(x, 'base_margin')) > 0) infos <- c(infos, 'base_margin')
infos <- character(0)
if (length(getinfo(x, 'label')) > 0) infos <- 'label'
if (length(getinfo(x, 'weight')) > 0) infos <- c(infos, 'weight')
if (length(getinfo(x, 'base_margin')) > 0) infos <- c(infos, 'base_margin')
if (length(infos) == 0) infos <- 'NA'
cat(infos)
cnames <- colnames(x)

View File

@@ -83,5 +83,5 @@ xgb.create.features <- function(model, data, ...){
check.deprecation(...)
pred_with_leaf <- predict(model, data, predleaf = TRUE)
cols <- lapply(as.data.frame(pred_with_leaf), factor)
cbind(data, sparse.model.matrix( ~ . -1, cols))
cbind(data, sparse.model.matrix(~ . -1, cols)) # nolint
}

View File

@@ -2,12 +2,15 @@
#'
#' The cross validation function of xgboost
#'
#' @param params the list of parameters. Commonly used ones are:
#' @param params the list of parameters. The complete list of parameters is
#' available in the \href{http://xgboost.readthedocs.io/en/latest/parameter.html}{online documentation}. Below
#' is a shorter summary:
#' \itemize{
#' \item \code{objective} objective function, common ones are
#' \itemize{
#' \item \code{reg:squarederror} Regression with squared loss
#' \item \code{binary:logistic} logistic regression for classification
#' \item \code{reg:squarederror} Regression with squared loss.
#' \item \code{binary:logistic} logistic regression for classification.
#' \item See \code{\link[=xgb.train]{xgb.train}()} for complete list of objectives.
#' }
#' \item \code{eta} step size of each boosting step
#' \item \code{max_depth} maximum depth of the tree
@@ -33,6 +36,8 @@
#' \item \code{error} binary classification error rate
#' \item \code{rmse} Rooted mean square error
#' \item \code{logloss} negative log-likelihood function
#' \item \code{mae} Mean absolute error
#' \item \code{mape} Mean absolute percentage error
#' \item \code{auc} Area under curve
#' \item \code{aucpr} Area under PR curve
#' \item \code{merror} Exact matching error, used to evaluate multi-class classification
@@ -76,7 +81,7 @@
#'
#' All observations are used for both training and validation.
#'
#' Adapted from \url{http://en.wikipedia.org/wiki/Cross-validation_\%28statistics\%29#k-fold_cross-validation}
#' Adapted from \url{https://en.wikipedia.org/wiki/Cross-validation_\%28statistics\%29}
#'
#' @return
#' An object of class \code{xgb.cv.synchronous} with the following elements:
@@ -101,7 +106,7 @@
#' (only available with early stopping).
#' \item \code{pred} CV prediction values available when \code{prediction} is set.
#' It is either vector or matrix (see \code{\link{cb.cv.predict}}).
#' \item \code{models} a liost of the CV folds' models. It is only available with the explicit
#' \item \code{models} a list of the CV folds' models. It is only available with the explicit
#' setting of the \code{cb.cv.predict(save_models = TRUE)} callback.
#' }
#'
@@ -134,20 +139,20 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
# stop("Either 'eval_metric' or 'feval' must be provided for CV")
# Check the labels
if ( (inherits(data, 'xgb.DMatrix') && is.null(getinfo(data, 'label'))) ||
if ((inherits(data, 'xgb.DMatrix') && is.null(getinfo(data, 'label'))) ||
(!inherits(data, 'xgb.DMatrix') && is.null(label))) {
stop("Labels must be provided for CV either through xgb.DMatrix, or through 'label=' when 'data' is matrix")
} else if (inherits(data, 'xgb.DMatrix')) {
if (!is.null(label))
warning("xgb.cv: label will be ignored, since data is of type xgb.DMatrix")
cv_label = getinfo(data, 'label')
cv_label <- getinfo(data, 'label')
} else {
cv_label = label
cv_label <- label
}
# CV folds
if(!is.null(folds)) {
if(!is.list(folds) || length(folds) < 2)
if (!is.null(folds)) {
if (!is.list(folds) || length(folds) < 2)
stop("'folds' must be a list with 2 or more elements that are vectors of indices for each CV-fold")
nfold <- length(folds)
} else {
@@ -162,7 +167,7 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
# verbosity & evaluation printing callback:
params <- c(params, list(silent = 1))
print_every_n <- max( as.integer(print_every_n), 1L)
print_every_n <- max(as.integer(print_every_n), 1L)
if (!has.callbacks(callbacks, 'cb.print.evaluation') && verbose) {
callbacks <- add.cb(callbacks, cb.print.evaluation(print_every_n, showsd = showsd))
}
@@ -193,20 +198,20 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
bst_folds <- lapply(seq_along(folds), function(k) {
dtest <- slice(dall, folds[[k]])
# code originally contributed by @RolandASc on stackoverflow
if(is.null(train_folds))
if (is.null(train_folds))
dtrain <- slice(dall, unlist(folds[-k]))
else
dtrain <- slice(dall, train_folds[[k]])
handle <- xgb.Booster.handle(params, list(dtrain, dtest))
list(dtrain = dtrain, bst = handle, watchlist = list(train = dtrain, test=dtest), index = folds[[k]])
list(dtrain = dtrain, bst = handle, watchlist = list(train = dtrain, test = dtest), index = folds[[k]])
})
rm(dall)
# a "basket" to collect some results from callbacks
basket <- list()
# extract parameters that can affect the relationship b/w #trees and #iterations
num_class <- max(as.numeric(NVL(params[['num_class']], 1)), 1)
num_parallel_tree <- max(as.numeric(NVL(params[['num_parallel_tree']], 1)), 1)
num_class <- max(as.numeric(NVL(params[['num_class']], 1)), 1) # nolint
num_parallel_tree <- max(as.numeric(NVL(params[['num_parallel_tree']], 1)), 1) # nolint
# those are fixed for CV (no training continuation)
begin_iteration <- 1
@@ -223,7 +228,7 @@ xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing =
})
msg <- simplify2array(msg)
bst_evaluation <- rowMeans(msg)
bst_evaluation_err <- sqrt(rowMeans(msg^2) - bst_evaluation^2)
bst_evaluation_err <- sqrt(rowMeans(msg^2) - bst_evaluation^2) # nolint
for (f in cb$post_iter) f()
@@ -282,7 +287,7 @@ print.xgb.cv.synchronous <- function(x, verbose = FALSE, ...) {
}
if (!is.null(x$params)) {
cat('params (as set within xgb.cv):\n')
cat( ' ',
cat(' ',
paste(names(x$params),
paste0('"', unlist(x$params), '"'),
sep = ' = ', collapse = ', '), '\n', sep = '')

View File

@@ -56,10 +56,10 @@ xgb.dump <- function(model, fname = NULL, fmap = "", with_stats=FALSE,
as.character(dump_format))
if (is.null(fname))
model_dump <- stri_replace_all_regex(model_dump, '\t', '')
model_dump <- gsub('\t', '', model_dump, fixed = TRUE)
if (dump_format == "text")
model_dump <- unlist(stri_split_regex(model_dump, '\n'))
model_dump <- unlist(strsplit(model_dump, '\n', fixed = TRUE))
model_dump <- grep('^\\s*$', model_dump, invert = TRUE, value = TRUE)

View File

@@ -74,7 +74,7 @@ xgb.ggplot.deepness <- function(model = NULL, which = c("2x1", "max.depth", "med
p <-
ggplot2::ggplot(dt_depths[, max(Depth), Tree]) +
ggplot2::geom_jitter(ggplot2::aes(x = Tree, y = V1),
height = 0.15, alpha=0.4, size=3, stroke=0) +
height = 0.15, alpha = 0.4, size = 3, stroke = 0) +
ggplot2::xlab("tree #") +
ggplot2::ylab("Max tree leaf depth")
return(p)
@@ -83,7 +83,7 @@ xgb.ggplot.deepness <- function(model = NULL, which = c("2x1", "max.depth", "med
p <-
ggplot2::ggplot(dt_depths[, median(as.numeric(Depth)), Tree]) +
ggplot2::geom_jitter(ggplot2::aes(x = Tree, y = V1),
height = 0.15, alpha=0.4, size=3, stroke=0) +
height = 0.15, alpha = 0.4, size = 3, stroke = 0) +
ggplot2::xlab("tree #") +
ggplot2::ylab("Median tree leaf depth")
return(p)
@@ -92,20 +92,98 @@ xgb.ggplot.deepness <- function(model = NULL, which = c("2x1", "max.depth", "med
p <-
ggplot2::ggplot(dt_depths[, median(abs(Weight)), Tree]) +
ggplot2::geom_point(ggplot2::aes(x = Tree, y = V1),
alpha=0.4, size=3, stroke=0) +
alpha = 0.4, size = 3, stroke = 0) +
ggplot2::xlab("tree #") +
ggplot2::ylab("Median absolute leaf weight")
return(p)
}
}
#' @rdname xgb.plot.shap.summary
#' @export
xgb.ggplot.shap.summary <- function(data, shap_contrib = NULL, features = NULL, top_n = 10, model = NULL,
trees = NULL, target_class = NULL, approxcontrib = FALSE, subsample = NULL) {
data_list <- xgb.shap.data(
data = data,
shap_contrib = shap_contrib,
features = features,
top_n = top_n,
model = model,
trees = trees,
target_class = target_class,
approxcontrib = approxcontrib,
subsample = subsample,
max_observations = 10000 # 10,000 samples per feature.
)
p_data <- prepare.ggplot.shap.data(data_list, normalize = TRUE)
# Reverse factor levels so that the first level is at the top of the plot
p_data[, "feature" := factor(feature, rev(levels(feature)))]
p <- ggplot2::ggplot(p_data, ggplot2::aes(x = feature, y = p_data$shap_value, colour = p_data$feature_value)) +
ggplot2::geom_jitter(alpha = 0.5, width = 0.1) +
ggplot2::scale_colour_viridis_c(limits = c(-3, 3), option = "plasma", direction = -1) +
ggplot2::geom_abline(slope = 0, intercept = 0, colour = "darkgrey") +
ggplot2::coord_flip()
p
}
#' Combine and melt feature values and SHAP contributions for sample
#' observations.
#'
#' Conforms to data format required for ggplot functions.
#'
#' Internal utility function.
#'
#' @param data_list List containing 'data' and 'shap_contrib' returned by
#' \code{xgb.shap.data()}.
#' @param normalize Whether to standardize feature values to have mean 0 and
#' standard deviation 1 (useful for comparing multiple features on the same
#' plot). Default \code{FALSE}.
#'
#' @return A data.table containing the observation ID, the feature name, the
#' feature value (normalized if specified), and the SHAP contribution value.
prepare.ggplot.shap.data <- function(data_list, normalize = FALSE) {
data <- data_list[["data"]]
shap_contrib <- data_list[["shap_contrib"]]
data <- data.table::as.data.table(as.matrix(data))
if (normalize) {
data[, (names(data)) := lapply(.SD, normalize)]
}
data[, "id" := seq_len(nrow(data))]
data_m <- data.table::melt.data.table(data, id.vars = "id", variable.name = "feature", value.name = "feature_value")
shap_contrib <- data.table::as.data.table(as.matrix(shap_contrib))
shap_contrib[, "id" := seq_len(nrow(shap_contrib))]
shap_contrib_m <- data.table::melt.data.table(shap_contrib, id.vars = "id", variable.name = "feature", value.name = "shap_value")
p_data <- data.table::merge.data.table(data_m, shap_contrib_m, by = c("id", "feature"))
p_data
}
#' Scale feature value to have mean 0, standard deviation 1
#'
#' This is used to compare multiple features on the same plot.
#' Internal utility function
#'
#' @param x Numeric vector
#'
#' @return Numeric vector with mean 0 and sd 1.
normalize <- function(x) {
loc <- mean(x, na.rm = TRUE)
scale <- stats::sd(x, na.rm = TRUE)
(x - loc) / scale
}
# Plot multiple ggplot graph aligned by rows and columns.
# ... the plots
# cols number of columns
# internal utility function
multiplot <- function(..., cols = 1) {
plots <- list(...)
num_plots = length(plots)
num_plots <- length(plots)
layout <- matrix(seq(1, cols * ceiling(num_plots / cols)),
ncol = cols, nrow = ceiling(num_plots / cols))
@@ -131,5 +209,5 @@ multiplot <- function(..., cols = 1) {
globalVariables(c(
"Cluster", "ggplot", "aes", "geom_bar", "coord_flip", "xlab", "ylab", "ggtitle", "theme",
"element_blank", "element_text", "V1", "Weight"
"element_blank", "element_text", "V1", "Weight", "feature"
))

View File

@@ -99,13 +99,13 @@ xgb.importance <- function(feature_names = NULL, model = NULL, trees = NULL,
model_text_dump <- xgb.dump(model = model, with_stats = TRUE)
# linear model
if(model_text_dump[2] == "bias:"){
if (model_text_dump[2] == "bias:"){
weights <- which(model_text_dump == "weight:") %>%
{model_text_dump[(. + 1):length(model_text_dump)]} %>%
as.numeric
num_class <- NVL(model$params$num_class, 1)
if(is.null(feature_names))
if (is.null(feature_names))
feature_names <- seq(to = length(weights) / num_class) - 1
if (length(feature_names) * num_class != length(weights))
stop("feature_names length does not match the number of features used in the model")
@@ -117,15 +117,14 @@ xgb.importance <- function(feature_names = NULL, model = NULL, trees = NULL,
Weight = weights,
Class = seq_len(num_class) - 1)[order(Class, -abs(Weight))]
}
} else {
# tree model
} else { # tree model
result <- xgb.model.dt.tree(feature_names = feature_names,
text = model_text_dump,
trees = trees)[
Feature != "Leaf", .(Gain = sum(Quality),
Cover = sum(Cover),
Frequency = .N), by = Feature][
,`:=`(Gain = Gain / sum(Gain),
, `:=`(Gain = Gain / sum(Gain),
Cover = Cover / sum(Cover),
Frequency = Frequency / sum(Frequency))][
order(Gain, decreasing = TRUE)]

View File

@@ -0,0 +1,14 @@
#' Load serialised xgboost model from R's raw vector
#'
#' User can generate raw memory buffer by calling xgb.save.raw
#'
#' @param buffer the buffer returned by xgb.save.raw
#'
#' @export
xgb.load.raw <- function(buffer) {
cachelist <- list()
handle <- .Call(XGBoosterCreate_R, cachelist)
.Call(XGBoosterLoadModelFromRaw_R, handle, buffer)
class(handle) <- "xgb.Booster.handle"
return (handle)
}

View File

@@ -87,11 +87,11 @@ xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,
}
if (length(text) < 2 ||
sum(stri_detect_regex(text, 'yes=(\\d+),no=(\\d+)')) < 1) {
sum(grepl('yes=(\\d+),no=(\\d+)', text)) < 1) {
stop("Non-tree model detected! This function can only be used with tree models.")
}
position <- which(!is.na(stri_match_first_regex(text, "booster")))
position <- which(grepl("booster", text, fixed = TRUE))
add.tree.id <- function(node, tree) if (use_int_id) node else paste(tree, node, sep = "-")
@@ -108,9 +108,9 @@ xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,
}
td <- td[Tree %in% trees & !grepl('^booster', t)]
td[, Node := stri_match_first_regex(t, "(\\d+):")[,2] %>% as.integer ]
td[, Node := as.integer(sub("^([0-9]+):.*", "\\1", t))]
if (!use_int_id) td[, ID := add.tree.id(Node, Tree)]
td[, isLeaf := !is.na(stri_match_first_regex(t, "leaf"))]
td[, isLeaf := grepl("leaf", t, fixed = TRUE)]
# parse branch lines
branch_rx <- paste0("f(\\d+)<(", anynumber_regex, ")\\] yes=(\\d+),no=(\\d+),missing=(\\d+),",
@@ -118,16 +118,17 @@ xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,
branch_cols <- c("Feature", "Split", "Yes", "No", "Missing", "Quality", "Cover")
td[isLeaf == FALSE,
(branch_cols) := {
matches <- regmatches(t, regexec(branch_rx, t))
# skip some indices with spurious capture groups from anynumber_regex
xtr <- stri_match_first_regex(t, branch_rx)[, c(2,3,5,6,7,8,10), drop = FALSE]
xtr <- do.call(rbind, matches)[, c(2, 3, 5, 6, 7, 8, 10), drop = FALSE]
xtr[, 3:5] <- add.tree.id(xtr[, 3:5], Tree)
lapply(seq_len(ncol(xtr)), function(i) xtr[,i])
as.data.table(xtr)
}]
# assign feature_names when available
if (!is.null(feature_names)) {
if (length(feature_names) <= max(as.numeric(td$Feature), na.rm = TRUE))
stop("feature_names has less elements than there are features used in the model")
td[isLeaf == FALSE, Feature := feature_names[as.numeric(Feature) + 1] ]
td[isLeaf == FALSE, Feature := feature_names[as.numeric(Feature) + 1]]
}
# parse leaf lines
@@ -135,8 +136,9 @@ xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,
leaf_cols <- c("Feature", "Quality", "Cover")
td[isLeaf == TRUE,
(leaf_cols) := {
xtr <- stri_match_first_regex(t, leaf_rx)[, c(2,4)]
c("Leaf", lapply(seq_len(ncol(xtr)), function(i) xtr[,i]))
matches <- regmatches(t, regexec(leaf_rx, t))
xtr <- do.call(rbind, matches)[, c(2, 4)]
c("Leaf", as.data.table(xtr))
}]
# convert some columns to numeric
@@ -156,4 +158,4 @@ xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,
# Avoid error messages during CRAN check.
# The reason is that these variables are never declared
# They are mainly column names inferred by Data.table...
globalVariables(c("Tree", "Node", "ID", "Feature", "t", "isLeaf",".SD", ".SDcols"))
globalVariables(c("Tree", "Node", "ID", "Feature", "t", "isLeaf", ".SD", ".SDcols"))

View File

@@ -89,9 +89,9 @@ xgb.plot.deepness <- function(model = NULL, which = c("2x1", "max.depth", "med.d
if (plot) {
if (which == "2x1") {
op <- par(no.readonly = TRUE)
par(mfrow = c(2,1),
oma = c(3,1,3,1) + 0.1,
mar = c(1,4,1,0) + 0.1)
par(mfrow = c(2, 1),
oma = c(3, 1, 3, 1) + 0.1,
mar = c(1, 4, 1, 0) + 0.1)
dt_summaries[, barplot(N, border = NA, ylab = 'Number of leafs', ...)]
@@ -130,7 +130,7 @@ get.leaf.depth <- function(dt_tree) {
dt_edges[is.na(Leaf), Leaf := FALSE]
dt_edges[, {
graph <- igraph::graph_from_data_frame(.SD[,.(ID, To)])
graph <- igraph::graph_from_data_frame(.SD[, .(ID, To)])
# min(ID) in a tree is a root node
paths_tmp <- igraph::shortest_paths(graph, from = min(ID), to = To[Leaf == TRUE])
# list of paths to each leaf in a tree

View File

@@ -92,28 +92,27 @@ xgb.plot.importance <- function(importance_matrix = NULL, top_n = NULL, measure
importance_matrix <- head(importance_matrix, top_n)
}
if (rel_to_first) {
importance_matrix[, Importance := Importance/max(abs(Importance))]
importance_matrix[, Importance := Importance / max(abs(Importance))]
}
if (is.null(cex)) {
cex <- 2.5/log2(1 + nrow(importance_matrix))
cex <- 2.5 / log2(1 + nrow(importance_matrix))
}
if (plot) {
op <- par(no.readonly = TRUE)
mar <- op$mar
original_mar <- par()$mar
# reset margins so this function doesn't have side effects
on.exit({par(mar = original_mar)})
mar <- original_mar
if (!is.null(left_margin))
mar[2] <- left_margin
par(mar = mar)
# reverse the order of rows to have the highest ranked at the top
importance_matrix[nrow(importance_matrix):1,
importance_matrix[rev(seq_len(nrow(importance_matrix))),
barplot(Importance, horiz = TRUE, border = NA, cex.names = cex,
names.arg = Feature, las = 1, ...)]
grid(NULL, NA)
# redraw over the grid
importance_matrix[nrow(importance_matrix):1,
barplot(Importance, horiz = TRUE, border = NA, add = TRUE)]
par(op)
}
invisible(importance_matrix)

View File

@@ -67,12 +67,12 @@ xgb.plot.multi.trees <- function(model, feature_names = NULL, features_keep = 5,
# first number of the path represents the tree, then the following numbers are related to the path to follow
# root init
root.nodes <- tree.matrix[stri_detect_regex(ID, "\\d+-0"), ID]
root.nodes <- tree.matrix[Node == 0, ID]
tree.matrix[ID %in% root.nodes, abs.node.position := root.nodes]
precedent.nodes <- root.nodes
while(tree.matrix[,sum(is.na(abs.node.position))] > 0) {
while (tree.matrix[, sum(is.na(abs.node.position))] > 0) {
yes.row.nodes <- tree.matrix[abs.node.position %in% precedent.nodes & !is.na(Yes)]
no.row.nodes <- tree.matrix[abs.node.position %in% precedent.nodes & !is.na(No)]
yes.nodes.abs.pos <- yes.row.nodes[, abs.node.position] %>% paste0("_0")
@@ -86,37 +86,34 @@ xgb.plot.multi.trees <- function(model, feature_names = NULL, features_keep = 5,
tree.matrix[!is.na(Yes), Yes := paste0(abs.node.position, "_0")]
tree.matrix[!is.na(No), No := paste0(abs.node.position, "_1")]
remove.tree <- . %>% stri_replace_first_regex(pattern = "^\\d+-", replacement = "")
tree.matrix[,`:=`(abs.node.position = remove.tree(abs.node.position),
Yes = remove.tree(Yes),
No = remove.tree(No))]
for (nm in c("abs.node.position", "Yes", "No"))
data.table::set(tree.matrix, j = nm, value = sub("^\\d+-", "", tree.matrix[[nm]]))
nodes.dt <- tree.matrix[
, .(Quality = sum(Quality))
, by = .(abs.node.position, Feature)
][, .(Text = paste0(Feature[1:min(length(Feature), features_keep)],
" (",
format(Quality[1:min(length(Quality), features_keep)], digits=5),
format(Quality[1:min(length(Quality), features_keep)], digits = 5),
")") %>%
paste0(collapse = "\n"))
, by = abs.node.position]
edges.dt <- tree.matrix[Feature != "Leaf", .(abs.node.position, Yes)] %>%
list(tree.matrix[Feature != "Leaf",.(abs.node.position, No)]) %>%
list(tree.matrix[Feature != "Leaf", .(abs.node.position, No)]) %>%
rbindlist() %>%
setnames(c("From", "To")) %>%
.[, .N, .(From, To)] %>%
.[, N:=NULL]
.[, N := NULL]
nodes <- DiagrammeR::create_node_df(
n = nrow(nodes.dt),
label = nodes.dt[,Text]
label = nodes.dt[, Text]
)
edges <- DiagrammeR::create_edge_df(
from = match(edges.dt[,From], nodes.dt[,abs.node.position]),
to = match(edges.dt[,To], nodes.dt[,abs.node.position]),
from = match(edges.dt[, From], nodes.dt[, abs.node.position]),
to = match(edges.dt[, To], nodes.dt[, abs.node.position]),
rel = "leading_to")
graph <- DiagrammeR::create_graph(

View File

@@ -81,6 +81,7 @@
#' xgb.plot.shap(agaricus.test$data, model = bst, features = "odor=none")
#' contr <- predict(bst, agaricus.test$data, predcontrib = TRUE)
#' xgb.plot.shap(agaricus.test$data, contr, model = bst, top_n = 12, n_col = 3)
#' xgb.ggplot.shap.summary(agaricus.test$data, contr, model = bst, top_n = 12) # Summary plot
#'
#' # multiclass example - plots for each class separately:
#' nclass <- 3
@@ -99,6 +100,7 @@
#' n_col = 2, col = col, pch = 16, pch_NA = 17)
#' xgb.plot.shap(x, model = mbst, trees = trees0 + 2, target_class = 2, top_n = 4,
#' n_col = 2, col = col, pch = 16, pch_NA = 17)
#' xgb.ggplot.shap.summary(x, model = mbst, target_class = 0, top_n = 4) # Summary plot
#'
#' @rdname xgb.plot.shap
#' @export
@@ -109,69 +111,33 @@ xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
plot_NA = TRUE, col_NA = rgb(0.7, 0, 1, 0.6), pch_NA = '.', pos_NA = 1.07,
plot_loess = TRUE, col_loess = 2, span_loess = 0.5,
which = c("1d", "2d"), plot = TRUE, ...) {
if (!is.matrix(data) && !inherits(data, "dgCMatrix"))
stop("data: must be either matrix or dgCMatrix")
if (is.null(shap_contrib) && (is.null(model) || !inherits(model, "xgb.Booster")))
stop("when shap_contrib is not provided, one must provide an xgb.Booster model")
if (is.null(features) && (is.null(model) || !inherits(model, "xgb.Booster")))
stop("when features are not provided, one must provide an xgb.Booster model to rank the features")
if (!is.null(shap_contrib) &&
(!is.matrix(shap_contrib) || nrow(shap_contrib) != nrow(data) || ncol(shap_contrib) != ncol(data) + 1))
stop("shap_contrib is not compatible with the provided data")
nsample <- if (is.null(subsample)) min(100000, nrow(data)) else as.integer(subsample * nrow(data))
idx <- sample(1:nrow(data), nsample)
data <- data[idx,]
if (is.null(shap_contrib)) {
shap_contrib <- predict(model, data, predcontrib = TRUE, approxcontrib = approxcontrib)
} else {
shap_contrib <- shap_contrib[idx,]
}
data_list <- xgb.shap.data(
data = data,
shap_contrib = shap_contrib,
features = features,
top_n = top_n,
model = model,
trees = trees,
target_class = target_class,
approxcontrib = approxcontrib,
subsample = subsample,
max_observations = 100000
)
data <- data_list[["data"]]
shap_contrib <- data_list[["shap_contrib"]]
features <- colnames(data)
which <- match.arg(which)
if (which == "2d")
stop("2D plots are not implemented yet")
if (is.null(features)) {
imp <- xgb.importance(model = model, trees = trees)
top_n <- as.integer(top_n[1])
if (top_n < 1 && top_n > 100)
stop("top_n: must be an integer within [1, 100]")
features <- imp$Feature[1:min(top_n, NROW(imp))]
}
if (is.character(features)) {
if (is.null(colnames(data)))
stop("Either provide `data` with column names or provide `features` as column indices")
features <- match(features, colnames(data))
}
if (n_col > length(features)) n_col <- length(features)
if (is.list(shap_contrib)) { # multiclass: either choose a class or merge
shap_contrib <- if (!is.null(target_class)) shap_contrib[[target_class + 1]]
else Reduce("+", lapply(shap_contrib, abs))
}
shap_contrib <- shap_contrib[, features, drop = FALSE]
data <- data[, features, drop = FALSE]
cols <- colnames(data)
if (is.null(cols)) cols <- colnames(shap_contrib)
if (is.null(cols)) cols <- paste0('X', 1:ncol(data))
colnames(data) <- cols
colnames(shap_contrib) <- cols
if (plot && which == "1d") {
op <- par(mfrow = c(ceiling(length(features) / n_col), n_col),
oma = c(0,0,0,0) + 0.2,
mar = c(3.5,3.5,0,0) + 0.1,
oma = c(0, 0, 0, 0) + 0.2,
mar = c(3.5, 3.5, 0, 0) + 0.1,
mgp = c(1.7, 0.6, 0))
for (f in cols) {
for (f in features) {
ord <- order(data[, f])
x <- data[, f][ord]
y <- shap_contrib[, f][ord]
@@ -192,7 +158,7 @@ xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
grid()
if (plot_loess) {
# compress x to 3 digits, and mean-aggredate y
zz <- data.table(x = signif(x, 3), y)[, .(.N, y=mean(y)), x]
zz <- data.table(x = signif(x, 3), y)[, .(.N, y = mean(y)), x]
if (nrow(zz) <= 5) {
lines(zz$x, zz$y, col = col_loess)
} else {
@@ -216,3 +182,108 @@ xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
}
invisible(list(data = data, shap_contrib = shap_contrib))
}
#' SHAP contribution dependency summary plot
#'
#' Compare SHAP contributions of different features.
#'
#' A point plot (each point representing one sample from \code{data}) is
#' produced for each feature, with the points plotted on the SHAP value axis.
#' Each point (observation) is coloured based on its feature value. The plot
#' hence allows us to see which features have a negative / positive contribution
#' on the model prediction, and whether the contribution is different for larger
#' or smaller values of the feature. We effectively try to replicate the
#' \code{summary_plot} function from https://github.com/slundberg/shap.
#'
#' @inheritParams xgb.plot.shap
#'
#' @return A \code{ggplot2} object.
#' @export
#'
#' @examples # See \code{\link{xgb.plot.shap}}.
#' @seealso \code{\link{xgb.plot.shap}}, \code{\link{xgb.ggplot.shap.summary}},
#' \url{https://github.com/slundberg/shap}
xgb.plot.shap.summary <- function(data, shap_contrib = NULL, features = NULL, top_n = 10, model = NULL,
trees = NULL, target_class = NULL, approxcontrib = FALSE, subsample = NULL) {
# Only ggplot implementation is available.
xgb.ggplot.shap.summary(data, shap_contrib, features, top_n, model, trees, target_class, approxcontrib, subsample)
}
#' Prepare data for SHAP plots. To be used in xgb.plot.shap, xgb.plot.shap.summary, etc.
#' Internal utility function.
#'
#' @inheritParams xgb.plot.shap
#' @keywords internal
#'
#' @return A list containing: 'data', a matrix containing sample observations
#' and their feature values; 'shap_contrib', a matrix containing the SHAP contribution
#' values for these observations.
xgb.shap.data <- function(data, shap_contrib = NULL, features = NULL, top_n = 1, model = NULL,
trees = NULL, target_class = NULL, approxcontrib = FALSE,
subsample = NULL, max_observations = 100000) {
if (!is.matrix(data) && !inherits(data, "dgCMatrix"))
stop("data: must be either matrix or dgCMatrix")
if (is.null(shap_contrib) && (is.null(model) || !inherits(model, "xgb.Booster")))
stop("when shap_contrib is not provided, one must provide an xgb.Booster model")
if (is.null(features) && (is.null(model) || !inherits(model, "xgb.Booster")))
stop("when features are not provided, one must provide an xgb.Booster model to rank the features")
if (!is.null(shap_contrib) &&
(!is.matrix(shap_contrib) || nrow(shap_contrib) != nrow(data) || ncol(shap_contrib) != ncol(data) + 1))
stop("shap_contrib is not compatible with the provided data")
if (is.character(features) && is.null(colnames(data)))
stop("either provide `data` with column names or provide `features` as column indices")
if (is.null(model$feature_names) && model$nfeatures != ncol(data))
stop("if model has no feature_names, columns in `data` must match features in model")
if (!is.null(subsample)) {
idx <- sample(x = seq_len(nrow(data)), size = as.integer(subsample * nrow(data)), replace = FALSE)
} else {
idx <- seq_len(min(nrow(data), max_observations))
}
data <- data[idx, ]
if (is.null(colnames(data))) {
colnames(data) <- paste0("X", seq_len(ncol(data)))
}
if (!is.null(shap_contrib)) {
if (is.list(shap_contrib)) { # multiclass: either choose a class or merge
shap_contrib <- if (!is.null(target_class)) shap_contrib[[target_class + 1]] else Reduce("+", lapply(shap_contrib, abs))
}
shap_contrib <- shap_contrib[idx, ]
if (is.null(colnames(shap_contrib))) {
colnames(shap_contrib) <- paste0("X", seq_len(ncol(data)))
}
} else {
shap_contrib <- predict(model, newdata = data, predcontrib = TRUE, approxcontrib = approxcontrib)
if (is.list(shap_contrib)) { # multiclass: either choose a class or merge
shap_contrib <- if (!is.null(target_class)) shap_contrib[[target_class + 1]] else Reduce("+", lapply(shap_contrib, abs))
}
}
if (is.null(features)) {
if (!is.null(model$feature_names)) {
imp <- xgb.importance(model = model, trees = trees)
} else {
imp <- xgb.importance(model = model, trees = trees, feature_names = colnames(data))
}
top_n <- top_n[1]
if (top_n < 1 | top_n > 100) stop("top_n: must be an integer within [1, 100]")
features <- imp$Feature[1:min(top_n, NROW(imp))]
}
if (is.character(features)) {
features <- match(features, colnames(data))
}
shap_contrib <- shap_contrib[, features, drop = FALSE]
data <- data[, features, drop = FALSE]
list(
data = data,
shap_contrib = shap_contrib
)
}

View File

@@ -80,12 +80,12 @@ xgb.plot.tree <- function(feature_names = NULL, model = NULL, trees = NULL, plot
dt <- xgb.model.dt.tree(feature_names = feature_names, model = model, trees = trees)
dt[, label:= paste0(Feature, "\nCover: ", Cover, ifelse(Feature == "Leaf", "\nValue: ", "\nGain: "), Quality)]
dt[, label := paste0(Feature, "\nCover: ", Cover, ifelse(Feature == "Leaf", "\nValue: ", "\nGain: "), Quality)]
if (show_node_id)
dt[, label := paste0(ID, ": ", label)]
dt[Node == 0, label := paste0("Tree ", Tree, "\n", label)]
dt[, shape:= "rectangle"][Feature == "Leaf", shape:= "oval"]
dt[, filledcolor:= "Beige"][Feature == "Leaf", filledcolor:= "Khaki"]
dt[, shape := "rectangle"][Feature == "Leaf", shape := "oval"]
dt[, filledcolor := "Beige"][Feature == "Leaf", filledcolor := "Khaki"]
# in order to draw the first tree on top:
dt <- dt[order(-Tree)]

View File

@@ -13,7 +13,11 @@
#'
#' Note: a model can also be saved as an R-object (e.g., by using \code{\link[base]{readRDS}}
#' or \code{\link[base]{save}}). However, it would then only be compatible with R, and
#' corresponding R-methods would need to be used to load it.
#' corresponding R-methods would need to be used to load it. Moreover, persisting the model with
#' \code{\link[base]{readRDS}} or \code{\link[base]{save}}) will cause compatibility problems in
#' future versions of XGBoost. Consult \code{\link{a-compatibility-note-for-saveRDS-save}} to learn
#' how to persist models in a future-proof way, i.e. to make the model accessible in future
#' releases of XGBoost.
#'
#' @seealso
#' \code{\link{xgb.load}}, \code{\link{xgb.Booster.complete}}.

View File

@@ -1,5 +1,5 @@
#' Save xgboost model to R's raw vector,
#' user can call xgb.load to load the model back from raw vector
#' user can call xgb.load.raw to load the model back from raw vector
#'
#' Save xgboost model from xgboost or xgb.train
#'
@@ -13,11 +13,11 @@
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
#' raw <- xgb.save.raw(bst)
#' bst <- xgb.load(raw)
#' bst <- xgb.load.raw(raw)
#' pred <- predict(bst, test$data)
#'
#' @export
xgb.save.raw <- function(model) {
model <- xgb.get.handle(model)
.Call(XGBoosterModelToRaw_R, model)
handle <- xgb.get.handle(model)
.Call(XGBoosterModelToRaw_R, handle)
}

View File

@@ -0,0 +1,21 @@
#' Serialize the booster instance into R's raw vector. The serialization method differs
#' from \code{\link{xgb.save.raw}} as the latter one saves only the model but not
#' parameters. This serialization format is not stable across different xgboost versions.
#'
#' @param booster the booster instance
#'
#' @examples
#' data(agaricus.train, package='xgboost')
#' data(agaricus.test, package='xgboost')
#' train <- agaricus.train
#' test <- agaricus.test
#' bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
#' eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
#' raw <- xgb.serialize(bst)
#' bst <- xgb.unserialize(raw)
#'
#' @export
xgb.serialize <- function(booster) {
handle <- xgb.get.handle(booster)
.Call(XGBoosterSerializeToBuffer_R, handle)
}

View File

@@ -3,9 +3,9 @@
#' \code{xgb.train} is an advanced interface for training an xgboost model.
#' The \code{xgboost} function is a simpler wrapper for \code{xgb.train}.
#'
#' @param params the list of parameters.
#' The complete list of parameters is available at \url{http://xgboost.readthedocs.io/en/latest/parameter.html}.
#' Below is a shorter summary:
#' @param params the list of parameters. The complete list of parameters is
#' available in the \href{http://xgboost.readthedocs.io/en/latest/parameter.html}{online documentation}. Below
#' is a shorter summary:
#'
#' 1. General Parameters
#'
@@ -43,13 +43,23 @@
#' \item \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it. The default objective options are below:
#' \itemize{
#' \item \code{reg:squarederror} Regression with squared loss (Default).
#' \item \code{reg:squaredlogerror}: regression with squared log loss \eqn{1/2 * (log(pred + 1) - log(label + 1))^2}. All inputs are required to be greater than -1. Also, see metric rmsle for possible issue with this objective.
#' \item \code{reg:logistic} logistic regression.
#' \item \code{reg:pseudohubererror}: regression with Pseudo Huber loss, a twice differentiable alternative to absolute loss.
#' \item \code{binary:logistic} logistic regression for binary classification. Output probability.
#' \item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation.
#' \item \code{num_class} set the number of classes. To use only with multiclass objectives.
#' \item \code{binary:hinge}: hinge loss for binary classification. This makes predictions of 0 or 1, rather than producing probabilities.
#' \item \code{count:poisson}: poisson regression for count data, output mean of poisson distribution. \code{max_delta_step} is set to 0.7 by default in poisson regression (used to safeguard optimization).
#' \item \code{survival:cox}: Cox regression for right censored survival time data (negative values are considered right censored). Note that predictions are returned on the hazard ratio scale (i.e., as HR = exp(marginal_prediction) in the proportional hazard function \code{h(t) = h0(t) * HR)}.
#' \item \code{survival:aft}: Accelerated failure time model for censored survival time data. See \href{https://xgboost.readthedocs.io/en/latest/tutorials/aft_survival_analysis.html}{Survival Analysis with Accelerated Failure Time} for details.
#' \item \code{aft_loss_distribution}: Probabilty Density Function used by \code{survival:aft} and \code{aft-nloglik} metric.
#' \item \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective. Class is represented by a number and should be from 0 to \code{num_class - 1}.
#' \item \code{multi:softprob} same as softmax, but prediction outputs a vector of ndata * nclass elements, which can be further reshaped to ndata, nclass matrix. The result contains predicted probabilities of each data point belonging to each class.
#' \item \code{rank:pairwise} set xgboost to do ranking task by minimizing the pairwise loss.
#' \item \code{rank:ndcg}: Use LambdaMART to perform list-wise ranking where \href{https://en.wikipedia.org/wiki/Discounted_cumulative_gain}{Normalized Discounted Cumulative Gain (NDCG)} is maximized.
#' \item \code{rank:map}: Use LambdaMART to perform list-wise ranking where \href{https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Mean_average_precision}{Mean Average Precision (MAP)} is maximized.
#' \item \code{reg:gamma}: gamma regression with log-link. Output is a mean of gamma distribution. It might be useful, e.g., for modeling insurance claims severity, or for any outcome that might be \href{https://en.wikipedia.org/wiki/Gamma_distribution#Applications}{gamma-distributed}.
#' \item \code{reg:tweedie}: Tweedie regression with log-link. It might be useful, e.g., for modeling total loss in insurance, or for any outcome that might be \href{https://en.wikipedia.org/wiki/Tweedie_distribution#Applications}{Tweedie-distributed}.
#' }
#' \item \code{base_score} the initial prediction score of all instances, global bias. Default: 0.5
#' \item \code{eval_metric} evaluation metrics for validation data. Users can pass a self-defined function to it. Default: metric will be assigned according to objective(rmse for regression, and error for classification, mean average precision for ranking). List is provided in detail section.
@@ -120,16 +130,18 @@
#' Note that when using a customized metric, only this single metric can be used.
#' The following is the list of built-in metrics for which Xgboost provides optimized implementation:
#' \itemize{
#' \item \code{rmse} root mean square error. \url{http://en.wikipedia.org/wiki/Root_mean_square_error}
#' \item \code{logloss} negative log-likelihood. \url{http://en.wikipedia.org/wiki/Log-likelihood}
#' \item \code{mlogloss} multiclass logloss. \url{http://wiki.fast.ai/index.php/Log_Loss}
#' \item \code{rmse} root mean square error. \url{https://en.wikipedia.org/wiki/Root_mean_square_error}
#' \item \code{logloss} negative log-likelihood. \url{https://en.wikipedia.org/wiki/Log-likelihood}
#' \item \code{mlogloss} multiclass logloss. \url{https://scikit-learn.org/stable/modules/generated/sklearn.metrics.log_loss.html}
#' \item \code{error} Binary classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}.
#' By default, it uses the 0.5 threshold for predicted values to define negative and positive instances.
#' Different threshold (e.g., 0.) could be specified as "error@0."
#' \item \code{merror} Multiclass classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}.
#' \item \code{auc} Area under the curve. \url{http://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation.
#' \item \code{mae} Mean absolute error
#' \item \code{mape} Mean absolute percentage error
#' \item \code{auc} Area under the curve. \url{https://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation.
#' \item \code{aucpr} Area under the PR curve. \url{https://en.wikipedia.org/wiki/Precision_and_recall} for ranking evaluation.
#' \item \code{ndcg} Normalized Discounted Cumulative Gain (for ranking task). \url{http://en.wikipedia.org/wiki/NDCG}
#' \item \code{ndcg} Normalized Discounted Cumulative Gain (for ranking task). \url{https://en.wikipedia.org/wiki/NDCG}
#' }
#'
#' The following callbacks are automatically created when certain parameters are set:
@@ -267,8 +279,8 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
}
# evaluation printing callback
params <- c(params, list(silent = ifelse(verbose > 1, 0, 1)))
print_every_n <- max( as.integer(print_every_n), 1L)
params <- c(params)
print_every_n <- max(as.integer(print_every_n), 1L)
if (!has.callbacks(callbacks, 'cb.print.evaluation') &&
verbose) {
callbacks <- add.cb(callbacks, cb.print.evaluation(print_every_n))
@@ -291,8 +303,10 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
callbacks <- add.cb(callbacks, cb.early.stop(early_stopping_rounds,
maximize = maximize, verbose = verbose))
}
# Sort the callbacks into categories
cb <- categorize.callbacks(callbacks)
params['validate_parameters'] <- TRUE
if (!is.null(params[['seed']])) {
warning("xgb.train: `seed` is ignored in R package. Use `set.seed()` instead.")
}
@@ -316,12 +330,9 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
niter_init <- xgb.ntree(bst) %/% (num_parallel_tree * num_class)
}
}
if(is_update && nrounds > niter_init)
if (is_update && nrounds > niter_init)
stop("nrounds cannot be larger than ", niter_init, " (nrounds of xgb_model)")
# TODO: distributed code
rank <- 0
niter_skip <- ifelse(is_update, 0, niter_init)
begin_iteration <- niter_skip + 1
end_iteration <- niter_skip + nrounds
@@ -333,7 +344,6 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
xgb.iter.update(bst$handle, dtrain, iteration - 1, obj)
bst_evaluation <- numeric(0)
if (length(watchlist) > 0)
bst_evaluation <- xgb.iter.eval(bst$handle, watchlist, iteration - 1, feval)
@@ -348,7 +358,7 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
bst <- xgb.Booster.complete(bst, saveraw = TRUE)
# store the total number of boosting iterations
bst$niter = end_iteration
bst$niter <- end_iteration
# store the evaluation results
if (length(evaluation_log) > 0 &&

View File

@@ -0,0 +1,31 @@
#' Load the instance back from \code{\link{xgb.serialize}}
#'
#' @param buffer the buffer containing booster instance saved by \code{\link{xgb.serialize}}
#'
#' @export
xgb.unserialize <- function(buffer) {
cachelist <- list()
handle <- .Call(XGBoosterCreate_R, cachelist)
tryCatch(
.Call(XGBoosterUnserializeFromBuffer_R, handle, buffer),
error = function(e) {
error_msg <- conditionMessage(e)
m <- regexec("(src[\\\\/]learner.cc:[0-9]+): Check failed: (header == serialisation_header_)",
error_msg, perl = TRUE)
groups <- regmatches(error_msg, m)[[1]]
if (length(groups) == 3) {
warning(paste("The model had been generated by XGBoost version 1.0.0 or earlier and was ",
"loaded from a RDS file. We strongly ADVISE AGAINST using saveRDS() ",
"function, to ensure that your model can be read in current and upcoming ",
"XGBoost releases. Please use xgb.save() instead to preserve models for the ",
"long term. For more details and explanation, see ",
"https://xgboost.readthedocs.io/en/latest/tutorials/saving_model.html",
sep = ""))
.Call(XGBoosterLoadModelFromRaw_R, handle, buffer)
} else {
stop(e)
}
})
class(handle) <- "xgb.Booster.handle"
return (handle)
}

View File

@@ -91,11 +91,6 @@ NULL
#' @importFrom data.table setkeyv
#' @importFrom data.table setnames
#' @importFrom magrittr %>%
#' @importFrom stringi stri_detect_regex
#' @importFrom stringi stri_match_first_regex
#' @importFrom stringi stri_replace_first_regex
#' @importFrom stringi stri_replace_all_regex
#' @importFrom stringi stri_split_regex
#' @importFrom utils object.size str tail
#' @importFrom stats predict
#' @importFrom stats median

20
R-package/configure vendored
View File

@@ -613,6 +613,7 @@ infodir
docdir
oldincludedir
includedir
runstatedir
localstatedir
sharedstatedir
sysconfdir
@@ -682,6 +683,7 @@ datadir='${datarootdir}'
sysconfdir='${prefix}/etc'
sharedstatedir='${prefix}/com'
localstatedir='${prefix}/var'
runstatedir='${localstatedir}/run'
includedir='${prefix}/include'
oldincludedir='/usr/include'
docdir='${datarootdir}/doc/${PACKAGE_TARNAME}'
@@ -934,6 +936,15 @@ do
| -silent | --silent | --silen | --sile | --sil)
silent=yes ;;
-runstatedir | --runstatedir | --runstatedi | --runstated \
| --runstate | --runstat | --runsta | --runst | --runs \
| --run | --ru | --r)
ac_prev=runstatedir ;;
-runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \
| --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \
| --run=* | --ru=* | --r=*)
runstatedir=$ac_optarg ;;
-sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
ac_prev=sbindir ;;
-sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
@@ -1071,7 +1082,7 @@ fi
for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \
datadir sysconfdir sharedstatedir localstatedir includedir \
oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
libdir localedir mandir
libdir localedir mandir runstatedir
do
eval ac_val=\$$ac_var
# Remove trailing slashes.
@@ -1224,6 +1235,7 @@ Fine tuning of the installation directories:
--sysconfdir=DIR read-only single-machine data [PREFIX/etc]
--sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com]
--localstatedir=DIR modifiable single-machine data [PREFIX/var]
--runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run]
--libdir=DIR object code libraries [EPREFIX/lib]
--includedir=DIR C header files [PREFIX/include]
--oldincludedir=DIR C header files for non-gcc [/usr/include]
@@ -2698,7 +2710,7 @@ fi
if test `uname -s` = "Darwin"
then
OPENMP_CXXFLAGS='-Xclang -fopenmp'
OPENMP_LIB='/usr/local/lib/libomp.dylib'
OPENMP_LIB='-lomp'
ac_pkg_openmp=no
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether OpenMP will work in a package" >&5
$as_echo_n "checking whether OpenMP will work in a package... " >&6; }
@@ -2713,14 +2725,14 @@ main ()
return 0;
}
_ACEOF
${CC} -o conftest conftest.c /usr/local/lib/libomp.dylib -Xclang -fopenmp 2>/dev/null && ./conftest && ac_pkg_openmp=yes
${CC} -o conftest conftest.c ${OPENMP_LIB} ${OPENMP_CXXFLAGS} 2>/dev/null && ./conftest && ac_pkg_openmp=yes
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${ac_pkg_openmp}" >&5
$as_echo "${ac_pkg_openmp}" >&6; }
if test "${ac_pkg_openmp}" = no; then
OPENMP_CXXFLAGS=''
OPENMP_LIB=''
echo '*****************************************************************************************'
echo 'WARNING: OpenMP is unavailable on this Mac OSX system. Training speed may be suboptimal.'
echo ' OpenMP is unavailable on this Mac OSX system. Training speed may be suboptimal.'
echo ' To use all CPU cores for training jobs, you should install OpenMP by running\n'
echo ' brew install libomp'
echo '*****************************************************************************************'

View File

@@ -1,6 +1,6 @@
### configure.ac -*- Autoconf -*-
AC_PREREQ(2.62)
AC_PREREQ(2.69)
AC_INIT([xgboost],[0.6-3],[],[xgboost],[])
@@ -29,17 +29,17 @@ fi
if test `uname -s` = "Darwin"
then
OPENMP_CXXFLAGS='-Xclang -fopenmp'
OPENMP_LIB='/usr/local/lib/libomp.dylib'
OPENMP_LIB='-lomp'
ac_pkg_openmp=no
AC_MSG_CHECKING([whether OpenMP will work in a package])
AC_LANG_CONFTEST([AC_LANG_PROGRAM([[#include <omp.h>]], [[ return (omp_get_max_threads() <= 1); ]])])
${CC} -o conftest conftest.c /usr/local/lib/libomp.dylib -Xclang -fopenmp 2>/dev/null && ./conftest && ac_pkg_openmp=yes
${CC} -o conftest conftest.c ${OPENMP_LIB} ${OPENMP_CXXFLAGS} 2>/dev/null && ./conftest && ac_pkg_openmp=yes
AC_MSG_RESULT([${ac_pkg_openmp}])
if test "${ac_pkg_openmp}" = no; then
OPENMP_CXXFLAGS=''
OPENMP_LIB=''
echo '*****************************************************************************************'
echo 'WARNING: OpenMP is unavailable on this Mac OSX system. Training speed may be suboptimal.'
echo ' OpenMP is unavailable on this Mac OSX system. Training speed may be suboptimal.'
echo ' To use all CPU cores for training jobs, you should install OpenMP by running\n'
echo ' brew install libomp'
echo '*****************************************************************************************'
@@ -52,4 +52,3 @@ AC_SUBST(ENDIAN_FLAG)
AC_SUBST(BACKTRACE_LIB)
AC_CONFIG_FILES([src/Makevars])
AC_OUTPUT

View File

@@ -17,4 +17,4 @@ Benchmarks
Notes
====
* Contribution of examples, benchmarks is more than welcomed!
* If you like to share how you use xgboost to solve your problem, send a pull request:)
* If you like to share how you use xgboost to solve your problem, send a pull request :)

View File

@@ -3,8 +3,8 @@ require(methods)
# we load in the agaricus dataset
# In this example, we are aiming to predict whether a mushroom is edible
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
data(agaricus.train, package = 'xgboost')
data(agaricus.test, package = 'xgboost')
train <- agaricus.train
test <- agaricus.test
# the loaded data is stored in sparseMatrix, and label is a numeric vector in {0,1}
@@ -58,31 +58,31 @@ xgb.save(bst, "xgboost.model")
bst2 <- xgb.load("xgboost.model")
pred2 <- predict(bst2, test$data)
# pred2 should be identical to pred
print(paste("sum(abs(pred2-pred))=", sum(abs(pred2-pred))))
print(paste("sum(abs(pred2-pred))=", sum(abs(pred2 - pred))))
# save model to R's raw vector
raw = xgb.save.raw(bst)
raw <- xgb.save.raw(bst)
# load binary model to R
bst3 <- xgb.load(raw)
pred3 <- predict(bst3, test$data)
# pred3 should be identical to pred
print(paste("sum(abs(pred3-pred))=", sum(abs(pred3-pred))))
print(paste("sum(abs(pred3-pred))=", sum(abs(pred3 - pred))))
#----------------Advanced features --------------
# to use advanced features, we need to put data in xgb.DMatrix
dtrain <- xgb.DMatrix(data = train$data, label=train$label)
dtest <- xgb.DMatrix(data = test$data, label=test$label)
dtrain <- xgb.DMatrix(data = train$data, label = train$label)
dtest <- xgb.DMatrix(data = test$data, label = test$label)
#---------------Using watchlist----------------
# watchlist is a list of xgb.DMatrix, each of them is tagged with name
watchlist <- list(train=dtrain, test=dtest)
watchlist <- list(train = dtrain, test = dtest)
# to train with watchlist, use xgb.train, which contains more advanced features
# watchlist allows us to monitor the evaluation result on all data in the list
print("Train xgboost using xgb.train with watchlist")
bst <- xgb.train(data=dtrain, max_depth=2, eta=1, nrounds=2, watchlist=watchlist,
bst <- xgb.train(data = dtrain, max_depth = 2, eta = 1, nrounds = 2, watchlist = watchlist,
nthread = 2, objective = "binary:logistic")
# we can change evaluation metrics, or use multiple evaluation metrics
print("train xgboost using xgb.train with watchlist, watch logloss and error")
bst <- xgb.train(data=dtrain, max_depth=2, eta=1, nrounds=2, watchlist=watchlist,
bst <- xgb.train(data = dtrain, max_depth = 2, eta = 1, nrounds = 2, watchlist = watchlist,
eval_metric = "error", eval_metric = "logloss",
nthread = 2, objective = "binary:logistic")
@@ -90,17 +90,17 @@ bst <- xgb.train(data=dtrain, max_depth=2, eta=1, nrounds=2, watchlist=watchlist
xgb.DMatrix.save(dtrain, "dtrain.buffer")
# to load it in, simply call xgb.DMatrix
dtrain2 <- xgb.DMatrix("dtrain.buffer")
bst <- xgb.train(data=dtrain2, max_depth=2, eta=1, nrounds=2, watchlist=watchlist,
bst <- xgb.train(data = dtrain2, max_depth = 2, eta = 1, nrounds = 2, watchlist = watchlist,
nthread = 2, objective = "binary:logistic")
# information can be extracted from xgb.DMatrix using getinfo
label = getinfo(dtest, "label")
label <- getinfo(dtest, "label")
pred <- predict(bst, dtest)
err <- as.numeric(sum(as.integer(pred > 0.5) != label))/length(label)
err <- as.numeric(sum(as.integer(pred > 0.5) != label)) / length(label)
print(paste("test-error=", err))
# You can dump the tree you learned using xgb.dump into a text file
dump_path = file.path(tempdir(), 'dump.raw.txt')
xgb.dump(bst, dump_path, with_stats = T)
dump_path <- file.path(tempdir(), 'dump.raw.txt')
xgb.dump(bst, dump_path, with_stats = TRUE)
# Finally, you can check which features are the most important.
print("Most important features (look at column Gain):")

View File

@@ -1,7 +1,7 @@
require(xgboost)
# load in the agaricus dataset
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
data(agaricus.train, package = 'xgboost')
data(agaricus.test, package = 'xgboost')
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
@@ -11,12 +11,12 @@ watchlist <- list(eval = dtest, train = dtrain)
#
print('start running example to start from a initial prediction')
# train xgboost for 1 round
param <- list(max_depth=2, eta=1, nthread = 2, silent=1, objective='binary:logistic')
param <- list(max_depth = 2, eta = 1, nthread = 2, objective = 'binary:logistic')
bst <- xgb.train(param, dtrain, 1, watchlist)
# Note: we need the margin value instead of transformed prediction in set_base_margin
# do predict with output_margin=TRUE, will always give you margin values before logistic transformation
ptrain <- predict(bst, dtrain, outputmargin=TRUE)
ptest <- predict(bst, dtest, outputmargin=TRUE)
ptrain <- predict(bst, dtrain, outputmargin = TRUE)
ptest <- predict(bst, dtest, outputmargin = TRUE)
# set the base_margin property of dtrain and dtest
# base margin is the base prediction we will boost from
setinfo(dtrain, "base_margin", ptrain)

View File

@@ -9,17 +9,17 @@ require(e1071)
# Load Arthritis dataset in memory.
data(Arthritis)
# Create a copy of the dataset with data.table package (data.table is 100% compliant with R dataframe but its syntax is a lot more consistent and its performance are really good).
df <- data.table(Arthritis, keep.rownames = F)
df <- data.table(Arthritis, keep.rownames = FALSE)
# Let's add some new categorical features to see if it helps. Of course these feature are highly correlated to the Age feature. Usually it's not a good thing in ML, but Tree algorithms (including boosted trees) are able to select the best features, even in case of highly correlated features.
# For the first feature we create groups of age by rounding the real age. Note that we transform it to factor (categorical data) so the algorithm treat them as independant values.
df[,AgeDiscret:= as.factor(round(Age/10,0))]
df[, AgeDiscret := as.factor(round(Age / 10, 0))]
# Here is an even stronger simplification of the real age with an arbitrary split at 30 years old. I choose this value based on nothing. We will see later if simplifying the information based on arbitrary values is a good strategy (I am sure you already have an idea of how well it will work!).
df[,AgeCat:= as.factor(ifelse(Age > 30, "Old", "Young"))]
df[, AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))]
# We remove ID as there is nothing to learn from this feature (it will just add some noise as the dataset is small).
df[,ID:=NULL]
df[, ID := NULL]
#-------------Basic Training using XGBoost in caret Library-----------------
# Set up control parameters for caret::train

View File

@@ -19,7 +19,7 @@ if (!require(vcd)) {
data(Arthritis)
# create a copy of the dataset with data.table package (data.table is 100% compliant with R dataframe but its syntax is a lot more consistent and its performance are really good).
df <- data.table(Arthritis, keep.rownames = F)
df <- data.table(Arthritis, keep.rownames = FALSE)
# Let's have a look to the data.table
cat("Print the dataset\n")
@@ -32,17 +32,17 @@ str(df)
# Let's add some new categorical features to see if it helps. Of course these feature are highly correlated to the Age feature. Usually it's not a good thing in ML, but Tree algorithms (including boosted trees) are able to select the best features, even in case of highly correlated features.
# For the first feature we create groups of age by rounding the real age. Note that we transform it to factor (categorical data) so the algorithm treat them as independant values.
df[,AgeDiscret:= as.factor(round(Age/10,0))]
df[, AgeDiscret := as.factor(round(Age / 10, 0))]
# Here is an even stronger simplification of the real age with an arbitrary split at 30 years old. I choose this value based on nothing. We will see later if simplifying the information based on arbitrary values is a good strategy (I am sure you already have an idea of how well it will work!).
df[,AgeCat:= as.factor(ifelse(Age > 30, "Old", "Young"))]
df[, AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))]
# We remove ID as there is nothing to learn from this feature (it will just add some noise as the dataset is small).
df[,ID:=NULL]
df[, ID := NULL]
# List the different values for the column Treatment: Placebo, Treated.
cat("Values of the categorical feature Treatment\n")
print(levels(df[,Treatment]))
print(levels(df[, Treatment]))
# Next step, we will transform the categorical data to dummy variables.
# This method is also called one hot encoding.
@@ -52,7 +52,7 @@ print(levels(df[,Treatment]))
#
# Formulae Improved~.-1 used below means transform all categorical features but column Improved to binary values.
# Column Improved is excluded because it will be our output column, the one we want to predict.
sparse_matrix = sparse.model.matrix(Improved~.-1, data = df)
sparse_matrix <- sparse.model.matrix(Improved ~ . - 1, data = df)
cat("Encoding of the sparse Matrix\n")
print(sparse_matrix)
@@ -61,7 +61,7 @@ print(sparse_matrix)
# 1. Set, for all rows, field in Y column to 0;
# 2. set Y to 1 when Improved == Marked;
# 3. Return Y column
output_vector = df[,Y:=0][Improved == "Marked",Y:=1][,Y]
output_vector <- df[, Y := 0][Improved == "Marked", Y := 1][, Y]
# Following is the same process as other demo
cat("Learning...\n")

View File

@@ -1,25 +1,25 @@
require(xgboost)
# load in the agaricus dataset
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
data(agaricus.train, package = 'xgboost')
data(agaricus.test, package = 'xgboost')
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
nrounds <- 2
param <- list(max_depth=2, eta=1, silent=1, nthread=2, objective='binary:logistic')
param <- list(max_depth = 2, eta = 1, nthread = 2, objective = 'binary:logistic')
cat('running cross validation\n')
# do cross validation, this will print result out as
# [iteration] metric_name:mean_value+std_value
# std_value is standard deviation of the metric
xgb.cv(param, dtrain, nrounds, nfold=5, metrics={'error'})
xgb.cv(param, dtrain, nrounds, nfold = 5, metrics = {'error'})
cat('running cross validation, disable standard deviation display\n')
# do cross validation, this will print result out as
# [iteration] metric_name:mean_value+std_value
# std_value is standard deviation of the metric
xgb.cv(param, dtrain, nrounds, nfold=5,
metrics='error', showsd = FALSE)
xgb.cv(param, dtrain, nrounds, nfold = 5,
metrics = 'error', showsd = FALSE)
###
# you can also do cross validation with cutomized loss function
@@ -29,18 +29,18 @@ print ('running cross validation, with cutomsized loss function')
logregobj <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
preds <- 1/(1 + exp(-preds))
preds <- 1 / (1 + exp(-preds))
grad <- preds - labels
hess <- preds * (1 - preds)
return(list(grad = grad, hess = hess))
}
evalerror <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
err <- as.numeric(sum(labels != (preds > 0)))/length(labels)
err <- as.numeric(sum(labels != (preds > 0))) / length(labels)
return(list(metric = "error", value = err))
}
param <- list(max_depth=2, eta=1, silent=1,
param <- list(max_depth = 2, eta = 1,
objective = logregobj, eval_metric = evalerror)
# train with customized objective
xgb.cv(params = param, data = dtrain, nrounds = nrounds, nfold = 5)

View File

@@ -1,7 +1,7 @@
require(xgboost)
# load in the agaricus dataset
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
data(agaricus.train, package = 'xgboost')
data(agaricus.test, package = 'xgboost')
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
@@ -15,7 +15,7 @@ num_round <- 2
# this is loglikelihood loss
logregobj <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
preds <- 1/(1 + exp(-preds))
preds <- 1 / (1 + exp(-preds))
grad <- preds - labels
hess <- preds * (1 - preds)
return(list(grad = grad, hess = hess))
@@ -29,12 +29,12 @@ logregobj <- function(preds, dtrain) {
# Take this in mind when you use the customization, and maybe you need write customized evaluation function
evalerror <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
err <- as.numeric(sum(labels != (preds > 0)))/length(labels)
err <- as.numeric(sum(labels != (preds > 0))) / length(labels)
return(list(metric = "error", value = err))
}
param <- list(max_depth=2, eta=1, nthread = 2, verbosity=0,
objective=logregobj, eval_metric=evalerror)
param <- list(max_depth = 2, eta = 1, nthread = 2, verbosity = 0,
objective = logregobj, eval_metric = evalerror)
print ('start training with user customized objective')
# training with customized objective, we can also do step by step training
# simply look at xgboost.py's implementation of train
@@ -52,13 +52,13 @@ attr(dtrain, 'label') <- getinfo(dtrain, 'label')
logregobjattr <- function(preds, dtrain) {
# now you can access the attribute in customized function
labels <- attr(dtrain, 'label')
preds <- 1/(1 + exp(-preds))
preds <- 1 / (1 + exp(-preds))
grad <- preds - labels
hess <- preds * (1 - preds)
return(list(grad = grad, hess = hess))
}
param <- list(max_depth=2, eta=1, nthread = 2, verbosity=0,
objective=logregobjattr, eval_metric=evalerror)
param <- list(max_depth = 2, eta = 1, nthread = 2, verbosity = 0,
objective = logregobjattr, eval_metric = evalerror)
print ('start training with user customized objective, with additional attributes in DMatrix')
# training with customized objective, we can also do step by step training
# simply look at xgboost.py's implementation of train

View File

@@ -1,20 +1,20 @@
require(xgboost)
# load in the agaricus dataset
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
data(agaricus.train, package = 'xgboost')
data(agaricus.test, package = 'xgboost')
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
# note: for customized objective function, we leave objective as default
# note: what we are getting is margin value in prediction
# you must know what you are doing
param <- list(max_depth=2, eta=1, nthread=2, verbosity=0)
param <- list(max_depth = 2, eta = 1, nthread = 2, verbosity = 0)
watchlist <- list(eval = dtest)
num_round <- 20
# user define objective function, given prediction, return gradient and second order gradient
# this is loglikelihood loss
logregobj <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
preds <- 1/(1 + exp(-preds))
preds <- 1 / (1 + exp(-preds))
grad <- preds - labels
hess <- preds * (1 - preds)
return(list(grad = grad, hess = hess))
@@ -27,7 +27,7 @@ logregobj <- function(preds, dtrain) {
# Take this in mind when you use the customization, and maybe you need write customized evaluation function
evalerror <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
err <- as.numeric(sum(labels != (preds > 0)))/length(labels)
err <- as.numeric(sum(labels != (preds > 0))) / length(labels)
return(list(metric = "error", value = err))
}
print ('start training with early Stopping setting')

View File

@@ -1,7 +1,7 @@
require(xgboost)
# load in the agaricus dataset
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
data(agaricus.train, package = 'xgboost')
data(agaricus.test, package = 'xgboost')
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
##
@@ -30,5 +30,4 @@ num_round <- 2
bst <- xgb.train(param, dtrain, num_round, watchlist)
ypred <- predict(bst, dtest)
labels <- getinfo(dtest, 'label')
cat('error of preds=', mean(as.numeric(ypred>0.5)!=labels),'\n')
cat('error of preds=', mean(as.numeric(ypred > 0.5) != labels), '\n')

View File

@@ -21,8 +21,8 @@ m <- X[, sel] %*% betas - 1 + rnorm(N)
y <- rbinom(N, 1, plogis(m))
tr <- sample.int(N, N * 0.75)
dtrain <- xgb.DMatrix(X[tr,], label = y[tr])
dtest <- xgb.DMatrix(X[-tr,], label = y[-tr])
dtrain <- xgb.DMatrix(X[tr, ], label = y[tr])
dtest <- xgb.DMatrix(X[-tr, ], label = y[-tr])
wl <- list(train = dtrain, test = dtest)
# An example of running 'gpu_hist' algorithm

View File

@@ -4,34 +4,39 @@ library(data.table)
set.seed(1024)
# Function to obtain a list of interactions fitted in trees, requires input of maximum depth
treeInteractions <- function(input_tree, input_max_depth){
trees <- copy(input_tree) # copy tree input to prevent overwriting
treeInteractions <- function(input_tree, input_max_depth) {
ID_merge <- i.id <- i.feature <- NULL # Suppress warning "no visible binding for global variable"
trees <- data.table::copy(input_tree) # copy tree input to prevent overwriting
if (input_max_depth < 2) return(list()) # no interactions if max depth < 2
if (nrow(input_tree) == 1) return(list())
# Attach parent nodes
for (i in 2:input_max_depth){
if (i == 2) trees[, ID_merge:=ID] else trees[, ID_merge:=get(paste0('parent_',i-2))]
parents_left <- trees[!is.na(Split), list(i.id=ID, i.feature=Feature, ID_merge=Yes)]
parents_right <- trees[!is.na(Split), list(i.id=ID, i.feature=Feature, ID_merge=No)]
for (i in 2:input_max_depth) {
if (i == 2) trees[, ID_merge := ID] else trees[, ID_merge := get(paste0('parent_', i - 2))]
parents_left <- trees[!is.na(Split), list(i.id = ID, i.feature = Feature, ID_merge = Yes)]
parents_right <- trees[!is.na(Split), list(i.id = ID, i.feature = Feature, ID_merge = No)]
setorderv(trees, 'ID_merge')
setorderv(parents_left, 'ID_merge')
setorderv(parents_right, 'ID_merge')
data.table::setorderv(trees, 'ID_merge')
data.table::setorderv(parents_left, 'ID_merge')
data.table::setorderv(parents_right, 'ID_merge')
trees <- merge(trees, parents_left, by='ID_merge', all.x=T)
trees[!is.na(i.id), c(paste0('parent_', i-1), paste0('parent_feat_', i-1)):=list(i.id, i.feature)]
trees[, c('i.id','i.feature'):=NULL]
trees <- merge(trees, parents_left, by = 'ID_merge', all.x = TRUE)
trees[!is.na(i.id), c(paste0('parent_', i - 1), paste0('parent_feat_', i - 1))
:= list(i.id, i.feature)]
trees[, c('i.id', 'i.feature') := NULL]
trees <- merge(trees, parents_right, by='ID_merge', all.x=T)
trees[!is.na(i.id), c(paste0('parent_', i-1), paste0('parent_feat_', i-1)):=list(i.id, i.feature)]
trees[, c('i.id','i.feature'):=NULL]
trees <- merge(trees, parents_right, by = 'ID_merge', all.x = TRUE)
trees[!is.na(i.id), c(paste0('parent_', i - 1), paste0('parent_feat_', i - 1))
:= list(i.id, i.feature)]
trees[, c('i.id', 'i.feature') := NULL]
}
# Extract nodes with interactions
interaction_trees <- trees[!is.na(Split) & !is.na(parent_1),
c('Feature',paste0('parent_feat_',1:(input_max_depth-1))), with=F]
interaction_trees_split <- split(interaction_trees, 1:nrow(interaction_trees))
c('Feature', paste0('parent_feat_', 1:(input_max_depth - 1))),
with = FALSE]
interaction_trees_split <- split(interaction_trees, seq_len(nrow(interaction_trees)))
interaction_list <- lapply(interaction_trees_split, as.character)
# Remove NAs (no parent interaction)
@@ -47,56 +52,59 @@ treeInteractions <- function(input_tree, input_max_depth){
# Generate sample data
x <- list()
for (i in 1:10){
x[[i]] = i*rnorm(1000, 10)
for (i in 1:10) {
x[[i]] <- i * rnorm(1000, 10)
}
x <- as.data.table(x)
y = -1*x[, rowSums(.SD)] + x[['V1']]*x[['V2']] + x[['V3']]*x[['V4']]*x[['V5']] + rnorm(1000, 0.001) + 3*sin(x[['V7']])
y <- -1 * x[, rowSums(.SD)] + x[['V1']] * x[['V2']] + x[['V3']] * x[['V4']] * x[['V5']]
+ rnorm(1000, 0.001) + 3 * sin(x[['V7']])
train = as.matrix(x)
train <- as.matrix(x)
# Interaction constraint list (column names form)
interaction_list <- list(c('V1','V2'),c('V3','V4','V5'))
interaction_list <- list(c('V1', 'V2'), c('V3', 'V4', 'V5'))
# Convert interaction constraint list into feature index form
cols2ids <- function(object, col_names) {
LUT <- seq_along(col_names) - 1
names(LUT) <- col_names
rapply(object, function(x) LUT[x], classes="character", how="replace")
rapply(object, function(x) LUT[x], classes = "character", how = "replace")
}
interaction_list_fid = cols2ids(interaction_list, colnames(train))
interaction_list_fid <- cols2ids(interaction_list, colnames(train))
# Fit model with interaction constraints
bst = xgboost(data = train, label = y, max_depth = 4,
bst <- xgboost(data = train, label = y, max_depth = 4,
eta = 0.1, nthread = 2, nrounds = 1000,
interaction_constraints = interaction_list_fid)
bst_tree <- xgb.model.dt.tree(colnames(train), bst)
bst_interactions <- treeInteractions(bst_tree, 4) # interactions constrained to combinations of V1*V2 and V3*V4*V5
bst_interactions <- treeInteractions(bst_tree, 4)
# interactions constrained to combinations of V1*V2 and V3*V4*V5
# Fit model without interaction constraints
bst2 = xgboost(data = train, label = y, max_depth = 4,
bst2 <- xgboost(data = train, label = y, max_depth = 4,
eta = 0.1, nthread = 2, nrounds = 1000)
bst2_tree <- xgb.model.dt.tree(colnames(train), bst2)
bst2_interactions <- treeInteractions(bst2_tree, 4) # much more interactions
# Fit model with both interaction and monotonicity constraints
bst3 = xgboost(data = train, label = y, max_depth = 4,
bst3 <- xgboost(data = train, label = y, max_depth = 4,
eta = 0.1, nthread = 2, nrounds = 1000,
interaction_constraints = interaction_list_fid,
monotone_constraints = c(-1,0,0,0,0,0,0,0,0,0))
monotone_constraints = c(-1, 0, 0, 0, 0, 0, 0, 0, 0, 0))
bst3_tree <- xgb.model.dt.tree(colnames(train), bst3)
bst3_interactions <- treeInteractions(bst3_tree, 4) # interactions still constrained to combinations of V1*V2 and V3*V4*V5
bst3_interactions <- treeInteractions(bst3_tree, 4)
# interactions still constrained to combinations of V1*V2 and V3*V4*V5
# Show monotonic constraints still apply by checking scores after incrementing V1
x1 <- sort(unique(x[['V1']]))
for (i in 1:length(x1)){
testdata <- copy(x[, -c('V1')])
for (i in seq_along(x1)){
testdata <- copy(x[, - ('V1')])
testdata[['V1']] <- x1[i]
testdata <- testdata[, paste0('V',1:10), with=F]
testdata <- testdata[, paste0('V', 1:10), with = FALSE]
pred <- predict(bst3, as.matrix(testdata))
# Should not print out anything due to monotonic constraints

View File

@@ -1,7 +1,6 @@
data(mtcars)
head(mtcars)
bst = xgboost(data=as.matrix(mtcars[,-11]),label=mtcars[,11],
objective='count:poisson',nrounds=5)
pred = predict(bst,as.matrix(mtcars[,-11]))
sqrt(mean((pred-mtcars[,11])^2))
bst <- xgboost(data = as.matrix(mtcars[, -11]), label = mtcars[, 11],
objective = 'count:poisson', nrounds = 5)
pred <- predict(bst, as.matrix(mtcars[, -11]))
sqrt(mean((pred - mtcars[, 11]) ^ 2))

View File

@@ -1,23 +1,23 @@
require(xgboost)
# load in the agaricus dataset
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
data(agaricus.train, package = 'xgboost')
data(agaricus.test, package = 'xgboost')
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
param <- list(max_depth=2, eta=1, silent=1, objective='binary:logistic')
param <- list(max_depth = 2, eta = 1, objective = 'binary:logistic')
watchlist <- list(eval = dtest, train = dtrain)
nrounds = 2
nrounds <- 2
# training the model for two rounds
bst = xgb.train(param, dtrain, nrounds, nthread = 2, watchlist)
bst <- xgb.train(param, dtrain, nrounds, nthread = 2, watchlist)
cat('start testing prediction from first n trees\n')
labels <- getinfo(dtest,'label')
labels <- getinfo(dtest, 'label')
### predict using first 1 tree
ypred1 = predict(bst, dtest, ntreelimit=1)
ypred1 <- predict(bst, dtest, ntreelimit = 1)
# by default, we predict using all the trees
ypred2 = predict(bst, dtest)
ypred2 <- predict(bst, dtest)
cat('error of ypred1=', mean(as.numeric(ypred1>0.5)!=labels),'\n')
cat('error of ypred2=', mean(as.numeric(ypred2>0.5)!=labels),'\n')
cat('error of ypred1=', mean(as.numeric(ypred1 > 0.5) != labels), '\n')
cat('error of ypred2=', mean(as.numeric(ypred2 > 0.5) != labels), '\n')

View File

@@ -5,34 +5,34 @@ require(Matrix)
set.seed(1982)
# load in the agaricus dataset
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
data(agaricus.train, package = 'xgboost')
data(agaricus.test, package = 'xgboost')
dtrain <- xgb.DMatrix(data = agaricus.train$data, label = agaricus.train$label)
dtest <- xgb.DMatrix(data = agaricus.test$data, label = agaricus.test$label)
param <- list(max_depth=2, eta=1, silent=1, objective='binary:logistic')
nrounds = 4
param <- list(max_depth = 2, eta = 1, objective = 'binary:logistic')
nrounds <- 4
# training the model for two rounds
bst = xgb.train(params = param, data = dtrain, nrounds = nrounds, nthread = 2)
bst <- xgb.train(params = param, data = dtrain, nrounds = nrounds, nthread = 2)
# Model accuracy without new features
accuracy.before <- sum((predict(bst, agaricus.test$data) >= 0.5) == agaricus.test$label) / length(agaricus.test$label)
accuracy.before <- (sum((predict(bst, agaricus.test$data) >= 0.5) == agaricus.test$label)
/ length(agaricus.test$label))
# by default, we predict using all the trees
pred_with_leaf = predict(bst, dtest, predleaf = TRUE)
pred_with_leaf <- predict(bst, dtest, predleaf = TRUE)
head(pred_with_leaf)
create.new.tree.features <- function(model, original.features){
pred_with_leaf <- predict(model, original.features, predleaf = TRUE)
cols <- list()
for(i in 1:model$niter){
for (i in 1:model$niter) {
# max is not the real max but it s not important for the purpose of adding features
leaf.id <- sort(unique(pred_with_leaf[,i]))
cols[[i]] <- factor(x = pred_with_leaf[,i], level = leaf.id)
leaf.id <- sort(unique(pred_with_leaf[, i]))
cols[[i]] <- factor(x = pred_with_leaf[, i], level = leaf.id)
}
cbind(original.features, sparse.model.matrix( ~ . -1, as.data.frame(cols)))
cbind(original.features, sparse.model.matrix(~ . - 1, as.data.frame(cols)))
}
# Convert previous features to one hot encoding
@@ -47,7 +47,9 @@ watchlist <- list(train = new.dtrain)
bst <- xgb.train(params = param, data = new.dtrain, nrounds = nrounds, nthread = 2)
# Model accuracy with new features
accuracy.after <- sum((predict(bst, new.dtest) >= 0.5) == agaricus.test$label) / length(agaricus.test$label)
accuracy.after <- (sum((predict(bst, new.dtest) >= 0.5) == agaricus.test$label)
/ length(agaricus.test$label))
# Here the accuracy was already good and is now perfect.
cat(paste("The accuracy was", accuracy.before, "before adding leaf features and it is now", accuracy.after, "!\n"))
cat(paste("The accuracy was", accuracy.before, "before adding leaf features and it is now",
accuracy.after, "!\n"))

View File

@@ -1,14 +1,14 @@
# running all scripts in demo folder
demo(basic_walkthrough)
demo(custom_objective)
demo(boost_from_prediction)
demo(predict_first_ntree)
demo(generalized_linear_model)
demo(cross_validation)
demo(create_sparse_matrix)
demo(predict_leaf_indices)
demo(early_stopping)
demo(poisson_regression)
demo(caret_wrapper)
demo(tweedie_regression)
#demo(gpu_accelerated) # can only run when built with GPU support
demo(basic_walkthrough, package = 'xgboost')
demo(custom_objective, package = 'xgboost')
demo(boost_from_prediction, package = 'xgboost')
demo(predict_first_ntree, package = 'xgboost')
demo(generalized_linear_model, package = 'xgboost')
demo(cross_validation, package = 'xgboost')
demo(create_sparse_matrix, package = 'xgboost')
demo(predict_leaf_indices, package = 'xgboost')
demo(early_stopping, package = 'xgboost')
demo(poisson_regression, package = 'xgboost')
demo(caret_wrapper, package = 'xgboost')
demo(tweedie_regression, package = 'xgboost')
#demo(gpu_accelerated, package = 'xgboost') # can only run when built with GPU support

4
R-package/demo/tweedie_regression.R Executable file → Normal file
View File

@@ -13,7 +13,7 @@ exclude <- c('POLICYNO', 'PLCYDATE', 'CLM_FREQ5', 'CLM_AMT5', 'CLM_FLAG', 'IN_Y
# retains the missing values
# NOTE: this dataset is comes ready out of the box
options(na.action = 'na.pass')
x <- sparse.model.matrix(~ . - 1, data = dt[, -exclude, with = F])
x <- sparse.model.matrix(~ . - 1, data = dt[, -exclude, with = FALSE])
options(na.action = 'na.omit')
# response
@@ -46,4 +46,4 @@ var_imp <- xgb.importance(attr(x, 'Dimnames')[[2]], model = bst)
preds <- predict(bst, d_train)
rmse <- sqrt(sum(mean((y - preds)^2)))
rmse <- sqrt(sum(mean((y - preds) ^ 2)))

View File

@@ -0,0 +1,96 @@
# [description]
# Create a definition file (.def) from a .dll file, using objdump. This
# is used by FindLibR.cmake when building the R package with MSVC.
#
# [usage]
#
# Rscript make-r-def.R something.dll something.def
#
# [references]
# * https://www.cs.colorado.edu/~main/cs1300/doc/mingwfaq.html
args <- commandArgs(trailingOnly = TRUE)
IN_DLL_FILE <- args[[1L]]
OUT_DEF_FILE <- args[[2L]]
DLL_BASE_NAME <- basename(IN_DLL_FILE)
message(sprintf("Creating '%s' from '%s'", OUT_DEF_FILE, IN_DLL_FILE))
# system() will not raise an R exception if the process called
# fails. Wrapping it here to get that behavior.
#
# system() introduces a lot of overhead, at least on Windows,
# so trying processx if it is available
.pipe_shell_command_to_stdout <- function(command, args, out_file) {
has_processx <- suppressMessages({
suppressWarnings({
require("processx") # nolint
})
})
if (has_processx) {
p <- processx::process$new(
command = command
, args = args
, stdout = out_file
, windows_verbatim_args = FALSE
)
invisible(p$wait())
} else {
message(paste0(
"Using system2() to run shell commands. Installing "
, "'processx' with install.packages('processx') might "
, "make this faster."
))
exit_code <- system2(
command = command
, args = shQuote(args)
, stdout = out_file
)
if (exit_code != 0L) {
stop(paste0("Command failed with exit code: ", exit_code))
}
}
return(invisible(NULL))
}
# use objdump to dump all the symbols
OBJDUMP_FILE <- "objdump-out.txt"
.pipe_shell_command_to_stdout(
command = "objdump"
, args = c("-p", IN_DLL_FILE)
, out_file = OBJDUMP_FILE
)
objdump_results <- readLines(OBJDUMP_FILE)
result <- file.remove(OBJDUMP_FILE)
# Only one table in the objdump results matters for our purposes,
# see https://www.cs.colorado.edu/~main/cs1300/doc/mingwfaq.html
start_index <- which(
grepl(
pattern = "[Ordinal/Name Pointer] Table"
, x = objdump_results
, fixed = TRUE
)
)
empty_lines <- which(objdump_results == "")
end_of_table <- empty_lines[empty_lines > start_index][1L]
# Read the contents of the table
exported_symbols <- objdump_results[(start_index + 1L):end_of_table]
exported_symbols <- gsub("\t", "", exported_symbols)
exported_symbols <- gsub(".*\\] ", "", exported_symbols)
exported_symbols <- gsub(" ", "", exported_symbols)
# Write R.def file
writeLines(
text = c(
paste0("LIBRARY \"", DLL_BASE_NAME, "\"")
, "EXPORTS"
, exported_symbols
)
, con = OUT_DEF_FILE
, sep = "\n"
)
message(sprintf("Successfully created '%s'", OUT_DEF_FILE))

View File

@@ -0,0 +1,64 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{a-compatibility-note-for-saveRDS-save}
\alias{a-compatibility-note-for-saveRDS-save}
\title{Do not use \code{\link[base]{saveRDS}} or \code{\link[base]{save}} for long-term archival of
models. Instead, use \code{\link{xgb.save}} or \code{\link{xgb.save.raw}}.}
\description{
It is a common practice to use the built-in \code{\link[base]{saveRDS}} function (or
\code{\link[base]{save}}) to persist R objects to the disk. While it is possible to persist
\code{xgb.Booster} objects using \code{\link[base]{saveRDS}}, it is not advisable to do so if
the model is to be accessed in the future. If you train a model with the current version of
XGBoost and persist it with \code{\link[base]{saveRDS}}, the model is not guaranteed to be
accessible in later releases of XGBoost. To ensure that your model can be accessed in future
releases of XGBoost, use \code{\link{xgb.save}} or \code{\link{xgb.save.raw}} instead.
}
\details{
Use \code{\link{xgb.save}} to save the XGBoost model as a stand-alone file. You may opt into
the JSON format by specifying the JSON extension. To read the model back, use
\code{\link{xgb.load}}.
Use \code{\link{xgb.save.raw}} to save the XGBoost model as a sequence (vector) of raw bytes
in a future-proof manner. Future releases of XGBoost will be able to read the raw bytes and
re-construct the corresponding model. To read the model back, use \code{\link{xgb.load.raw}}.
The \code{\link{xgb.save.raw}} function is useful if you'd like to persist the XGBoost model
as part of another R object.
Note: Do not use \code{\link{xgb.serialize}} to store models long-term. It persists not only the
model but also internal configurations and parameters, and its format is not stable across
multiple XGBoost versions. Use \code{\link{xgb.serialize}} only for checkpointing.
For more details and explanation about model persistence and archival, consult the page
\url{https://xgboost.readthedocs.io/en/latest/tutorials/saving_model.html}.
}
\examples{
data(agaricus.train, package='xgboost')
bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
# Save as a stand-alone file; load it with xgb.load()
xgb.save(bst, 'xgb.model')
bst2 <- xgb.load('xgb.model')
# Save as a stand-alone file (JSON); load it with xgb.load()
xgb.save(bst, 'xgb.model.json')
bst2 <- xgb.load('xgb.model.json')
if (file.exists('xgb.model.json')) file.remove('xgb.model.json')
# Save as a raw byte vector; load it with xgb.load.raw()
xgb_bytes <- xgb.save.raw(bst)
bst2 <- xgb.load.raw(xgb_bytes)
# Persist XGBoost model as part of another R object
obj <- list(xgb_model_bytes = xgb.save.raw(bst), description = "My first XGBoost model")
# Persist the R object. Here, saveRDS() is okay, since it doesn't persist
# xgb.Booster directly. What's being persisted is the future-proof byte representation
# as given by xgb.save.raw().
saveRDS(obj, 'my_object.rds')
# Read back the R object
obj2 <- readRDS('my_object.rds')
# Re-construct xgb.Booster object from the bytes
bst2 <- xgb.load.raw(obj2$xgb_model_bytes)
if (file.exists('my_object.rds')) file.remove('my_object.rds')
}

View File

@@ -4,8 +4,10 @@
\name{agaricus.test}
\alias{agaricus.test}
\title{Test part from Mushroom Data Set}
\format{A list containing a label vector, and a dgCMatrix object with 1611
rows and 126 variables}
\format{
A list containing a label vector, and a dgCMatrix object with 1611
rows and 126 variables
}
\usage{
data(agaricus.test)
}

View File

@@ -4,8 +4,10 @@
\name{agaricus.train}
\alias{agaricus.train}
\title{Training part from Mushroom Data Set}
\format{A list containing a label vector, and a dgCMatrix object with 6513
rows and 127 variables}
\format{
A list containing a label vector, and a dgCMatrix object with 6513
rows and 127 variables
}
\usage{
data(agaricus.train)
}

View File

@@ -0,0 +1,18 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.ggplot.R
\name{normalize}
\alias{normalize}
\title{Scale feature value to have mean 0, standard deviation 1}
\usage{
normalize(x)
}
\arguments{
\item{x}{Numeric vector}
}
\value{
Numeric vector with mean 0 and sd 1.
}
\description{
This is used to compare multiple features on the same plot.
Internal utility function
}

View File

@@ -49,6 +49,9 @@ It will use all the trees by default (\code{NULL} value).}
prediction outputs per case. This option has no effect when either of predleaf, predcontrib,
or predinteraction flags is TRUE.}
\item{training}{whether is the prediction result used for training. For dart booster,
training predicting will perform dropout.}
\item{...}{Parameters passed to \code{predict.xgb.Booster}}
}
\value{

View File

@@ -0,0 +1,27 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.ggplot.R
\name{prepare.ggplot.shap.data}
\alias{prepare.ggplot.shap.data}
\title{Combine and melt feature values and SHAP contributions for sample
observations.}
\usage{
prepare.ggplot.shap.data(data_list, normalize = FALSE)
}
\arguments{
\item{data_list}{List containing 'data' and 'shap_contrib' returned by
\code{xgb.shap.data()}.}
\item{normalize}{Whether to standardize feature values to have mean 0 and
standard deviation 1 (useful for comparing multiple features on the same
plot). Default \code{FALSE}.}
}
\value{
A data.table containing the observation ID, the feature name, the
feature value (normalized if specified), and the SHAP contribution value.
}
\description{
Conforms to data format required for ggplot functions.
}
\details{
Internal utility function.
}

View File

@@ -38,6 +38,8 @@ bst <- xgboost(data = agaricus.train$data, label = agaricus.train$label, max_dep
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
saveRDS(bst, "xgb.model.rds")
# Warning: The resulting RDS file is only compatible with the current XGBoost version.
# Refer to the section titled "a-compatibility-note-for-saveRDS-save".
bst1 <- readRDS("xgb.model.rds")
if (file.exists("xgb.model.rds")) file.remove("xgb.model.rds")
# the handle is invalid:

View File

@@ -55,7 +55,7 @@ than for \code{xgb.Booster}, since only just a handle (pointer) would need to be
That would only matter if attributes need to be set many times.
Note, however, that when feeding a handle of an \code{xgb.Booster} object to the attribute setters,
the raw model cache of an \code{xgb.Booster} object would not be automatically updated,
and it would be user's responsibility to call \code{xgb.save.raw} to update it.
and it would be user's responsibility to call \code{xgb.serialize} to update it.
The \code{xgb.attributes<-} setter either updates the existing or adds one or several attributes,
but it doesn't delete the other existing attributes.

View File

@@ -0,0 +1,28 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.Booster.R
\name{xgb.config}
\alias{xgb.config}
\alias{xgb.config<-}
\title{Accessors for model parameters as JSON string.}
\usage{
xgb.config(object)
xgb.config(object) <- value
}
\arguments{
\item{object}{Object of class \code{xgb.Booster}}
\item{value}{A JSON string.}
}
\description{
Accessors for model parameters as JSON string.
}
\examples{
data(agaricus.train, package='xgboost')
train <- agaricus.train
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
config <- xgb.config(bst)
}

View File

@@ -28,12 +28,15 @@ xgb.cv(
)
}
\arguments{
\item{params}{the list of parameters. Commonly used ones are:
\item{params}{the list of parameters. The complete list of parameters is
available in the \href{http://xgboost.readthedocs.io/en/latest/parameter.html}{online documentation}. Below
is a shorter summary:
\itemize{
\item \code{objective} objective function, common ones are
\itemize{
\item \code{reg:squarederror} Regression with squared loss
\item \code{binary:logistic} logistic regression for classification
\item \code{reg:squarederror} Regression with squared loss.
\item \code{binary:logistic} logistic regression for classification.
\item See \code{\link[=xgb.train]{xgb.train}()} for complete list of objectives.
}
\item \code{eta} step size of each boosting step
\item \code{max_depth} maximum depth of the tree
@@ -67,6 +70,8 @@ from each CV model. This parameter engages the \code{\link{cb.cv.predict}} callb
\item \code{error} binary classification error rate
\item \code{rmse} Rooted mean square error
\item \code{logloss} negative log-likelihood function
\item \code{mae} Mean absolute error
\item \code{mape} Mean absolute percentage error
\item \code{auc} Area under curve
\item \code{aucpr} Area under PR curve
\item \code{merror} Exact matching error, used to evaluate multi-class classification
@@ -135,7 +140,7 @@ An object of class \code{xgb.cv.synchronous} with the following elements:
(only available with early stopping).
\item \code{pred} CV prediction values available when \code{prediction} is set.
It is either vector or matrix (see \code{\link{cb.cv.predict}}).
\item \code{models} a liost of the CV folds' models. It is only available with the explicit
\item \code{models} a list of the CV folds' models. It is only available with the explicit
setting of the \code{cb.cv.predict(save_models = TRUE)} callback.
}
}
@@ -151,7 +156,7 @@ The cross-validation process is then repeated \code{nrounds} times, with each of
All observations are used for both training and validation.
Adapted from \url{http://en.wikipedia.org/wiki/Cross-validation_\%28statistics\%29#k-fold_cross-validation}
Adapted from \url{https://en.wikipedia.org/wiki/Cross-validation_\%28statistics\%29}
}
\examples{
data(agaricus.train, package='xgboost')

View File

@@ -0,0 +1,14 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.load.raw.R
\name{xgb.load.raw}
\alias{xgb.load.raw}
\title{Load serialised xgboost model from R's raw vector}
\usage{
xgb.load.raw(buffer)
}
\arguments{
\item{buffer}{the buffer returned by xgb.save.raw}
}
\description{
User can generate raw memory buffer by calling xgb.save.raw
}

View File

@@ -131,6 +131,7 @@ bst <- xgboost(agaricus.train$data, agaricus.train$label, nrounds = 50,
xgb.plot.shap(agaricus.test$data, model = bst, features = "odor=none")
contr <- predict(bst, agaricus.test$data, predcontrib = TRUE)
xgb.plot.shap(agaricus.test$data, contr, model = bst, top_n = 12, n_col = 3)
xgb.ggplot.shap.summary(agaricus.test$data, contr, model = bst, top_n = 12) # Summary plot
# multiclass example - plots for each class separately:
nclass <- 3
@@ -149,6 +150,7 @@ xgb.plot.shap(x, model = mbst, trees = trees0 + 1, target_class = 1, top_n = 4,
n_col = 2, col = col, pch = 16, pch_NA = 17)
xgb.plot.shap(x, model = mbst, trees = trees0 + 2, target_class = 2, top_n = 4,
n_col = 2, col = col, pch = 16, pch_NA = 17)
xgb.ggplot.shap.summary(x, model = mbst, target_class = 0, top_n = 4) # Summary plot
}
\references{

View File

@@ -0,0 +1,78 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.ggplot.R, R/xgb.plot.shap.R
\name{xgb.ggplot.shap.summary}
\alias{xgb.ggplot.shap.summary}
\alias{xgb.plot.shap.summary}
\title{SHAP contribution dependency summary plot}
\usage{
xgb.ggplot.shap.summary(
data,
shap_contrib = NULL,
features = NULL,
top_n = 10,
model = NULL,
trees = NULL,
target_class = NULL,
approxcontrib = FALSE,
subsample = NULL
)
xgb.plot.shap.summary(
data,
shap_contrib = NULL,
features = NULL,
top_n = 10,
model = NULL,
trees = NULL,
target_class = NULL,
approxcontrib = FALSE,
subsample = NULL
)
}
\arguments{
\item{data}{data as a \code{matrix} or \code{dgCMatrix}.}
\item{shap_contrib}{a matrix of SHAP contributions that was computed earlier for the above
\code{data}. When it is NULL, it is computed internally using \code{model} and \code{data}.}
\item{features}{a vector of either column indices or of feature names to plot. When it is NULL,
feature importance is calculated, and \code{top_n} high ranked features are taken.}
\item{top_n}{when \code{features} is NULL, top_n [1, 100] most important features in a model are taken.}
\item{model}{an \code{xgb.Booster} model. It has to be provided when either \code{shap_contrib}
or \code{features} is missing.}
\item{trees}{passed to \code{\link{xgb.importance}} when \code{features = NULL}.}
\item{target_class}{is only relevant for multiclass models. When it is set to a 0-based class index,
only SHAP contributions for that specific class are used.
If it is not set, SHAP importances are averaged over all classes.}
\item{approxcontrib}{passed to \code{\link{predict.xgb.Booster}} when \code{shap_contrib = NULL}.}
\item{subsample}{a random fraction of data points to use for plotting. When it is NULL,
it is set so that up to 100K data points are used.}
}
\value{
A \code{ggplot2} object.
}
\description{
Compare SHAP contributions of different features.
}
\details{
A point plot (each point representing one sample from \code{data}) is
produced for each feature, with the points plotted on the SHAP value axis.
Each point (observation) is coloured based on its feature value. The plot
hence allows us to see which features have a negative / positive contribution
on the model prediction, and whether the contribution is different for larger
or smaller values of the feature. We effectively try to replicate the
\code{summary_plot} function from https://github.com/slundberg/shap.
}
\examples{
# See \code{\link{xgb.plot.shap}}.
}
\seealso{
\code{\link{xgb.plot.shap}}, \code{\link{xgb.ggplot.shap.summary}},
\url{https://github.com/slundberg/shap}
}

View File

@@ -22,7 +22,11 @@ of \code{\link{xgb.train}}.
Note: a model can also be saved as an R-object (e.g., by using \code{\link[base]{readRDS}}
or \code{\link[base]{save}}). However, it would then only be compatible with R, and
corresponding R-methods would need to be used to load it.
corresponding R-methods would need to be used to load it. Moreover, persisting the model with
\code{\link[base]{readRDS}} or \code{\link[base]{save}}) will cause compatibility problems in
future versions of XGBoost. Consult \code{\link{a-compatibility-note-for-saveRDS-save}} to learn
how to persist models in a future-proof way, i.e. to make the model accessible in future
releases of XGBoost.
}
\examples{
data(agaricus.train, package='xgboost')

View File

@@ -3,7 +3,7 @@
\name{xgb.save.raw}
\alias{xgb.save.raw}
\title{Save xgboost model to R's raw vector,
user can call xgb.load to load the model back from raw vector}
user can call xgb.load.raw to load the model back from raw vector}
\usage{
xgb.save.raw(model)
}
@@ -21,7 +21,7 @@ test <- agaricus.test
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
raw <- xgb.save.raw(bst)
bst <- xgb.load(raw)
bst <- xgb.load.raw(raw)
pred <- predict(bst, test$data)
}

View File

@@ -0,0 +1,29 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.serialize.R
\name{xgb.serialize}
\alias{xgb.serialize}
\title{Serialize the booster instance into R's raw vector. The serialization method differs
from \code{\link{xgb.save.raw}} as the latter one saves only the model but not
parameters. This serialization format is not stable across different xgboost versions.}
\usage{
xgb.serialize(booster)
}
\arguments{
\item{booster}{the booster instance}
}
\description{
Serialize the booster instance into R's raw vector. The serialization method differs
from \code{\link{xgb.save.raw}} as the latter one saves only the model but not
parameters. This serialization format is not stable across different xgboost versions.
}
\examples{
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
train <- agaricus.train
test <- agaricus.test
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2,objective = "binary:logistic")
raw <- xgb.serialize(bst)
bst <- xgb.unserialize(raw)
}

View File

@@ -0,0 +1,55 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.plot.shap.R
\name{xgb.shap.data}
\alias{xgb.shap.data}
\title{Prepare data for SHAP plots. To be used in xgb.plot.shap, xgb.plot.shap.summary, etc.
Internal utility function.}
\usage{
xgb.shap.data(
data,
shap_contrib = NULL,
features = NULL,
top_n = 1,
model = NULL,
trees = NULL,
target_class = NULL,
approxcontrib = FALSE,
subsample = NULL,
max_observations = 1e+05
)
}
\arguments{
\item{data}{data as a \code{matrix} or \code{dgCMatrix}.}
\item{shap_contrib}{a matrix of SHAP contributions that was computed earlier for the above
\code{data}. When it is NULL, it is computed internally using \code{model} and \code{data}.}
\item{features}{a vector of either column indices or of feature names to plot. When it is NULL,
feature importance is calculated, and \code{top_n} high ranked features are taken.}
\item{top_n}{when \code{features} is NULL, top_n [1, 100] most important features in a model are taken.}
\item{model}{an \code{xgb.Booster} model. It has to be provided when either \code{shap_contrib}
or \code{features} is missing.}
\item{trees}{passed to \code{\link{xgb.importance}} when \code{features = NULL}.}
\item{target_class}{is only relevant for multiclass models. When it is set to a 0-based class index,
only SHAP contributions for that specific class are used.
If it is not set, SHAP importances are averaged over all classes.}
\item{approxcontrib}{passed to \code{\link{predict.xgb.Booster}} when \code{shap_contrib = NULL}.}
\item{subsample}{a random fraction of data points to use for plotting. When it is NULL,
it is set so that up to 100K data points are used.}
}
\value{
A list containing: 'data', a matrix containing sample observations
and their feature values; 'shap_contrib', a matrix containing the SHAP contribution
values for these observations.
}
\description{
Prepare data for SHAP plots. To be used in xgb.plot.shap, xgb.plot.shap.summary, etc.
Internal utility function.
}
\keyword{internal}

View File

@@ -42,9 +42,9 @@ xgboost(
)
}
\arguments{
\item{params}{the list of parameters.
The complete list of parameters is available at \url{http://xgboost.readthedocs.io/en/latest/parameter.html}.
Below is a shorter summary:
\item{params}{the list of parameters. The complete list of parameters is
available in the \href{http://xgboost.readthedocs.io/en/latest/parameter.html}{online documentation}. Below
is a shorter summary:
1. General Parameters
@@ -82,13 +82,23 @@ xgboost(
\item \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it. The default objective options are below:
\itemize{
\item \code{reg:squarederror} Regression with squared loss (Default).
\item \code{reg:squaredlogerror}: regression with squared log loss \eqn{1/2 * (log(pred + 1) - log(label + 1))^2}. All inputs are required to be greater than -1. Also, see metric rmsle for possible issue with this objective.
\item \code{reg:logistic} logistic regression.
\item \code{reg:pseudohubererror}: regression with Pseudo Huber loss, a twice differentiable alternative to absolute loss.
\item \code{binary:logistic} logistic regression for binary classification. Output probability.
\item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation.
\item \code{num_class} set the number of classes. To use only with multiclass objectives.
\item \code{binary:hinge}: hinge loss for binary classification. This makes predictions of 0 or 1, rather than producing probabilities.
\item \code{count:poisson}: poisson regression for count data, output mean of poisson distribution. \code{max_delta_step} is set to 0.7 by default in poisson regression (used to safeguard optimization).
\item \code{survival:cox}: Cox regression for right censored survival time data (negative values are considered right censored). Note that predictions are returned on the hazard ratio scale (i.e., as HR = exp(marginal_prediction) in the proportional hazard function \code{h(t) = h0(t) * HR)}.
\item \code{survival:aft}: Accelerated failure time model for censored survival time data. See \href{https://xgboost.readthedocs.io/en/latest/tutorials/aft_survival_analysis.html}{Survival Analysis with Accelerated Failure Time} for details.
\item \code{aft_loss_distribution}: Probabilty Density Function used by \code{survival:aft} and \code{aft-nloglik} metric.
\item \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective. Class is represented by a number and should be from 0 to \code{num_class - 1}.
\item \code{multi:softprob} same as softmax, but prediction outputs a vector of ndata * nclass elements, which can be further reshaped to ndata, nclass matrix. The result contains predicted probabilities of each data point belonging to each class.
\item \code{rank:pairwise} set xgboost to do ranking task by minimizing the pairwise loss.
\item \code{rank:ndcg}: Use LambdaMART to perform list-wise ranking where \href{https://en.wikipedia.org/wiki/Discounted_cumulative_gain}{Normalized Discounted Cumulative Gain (NDCG)} is maximized.
\item \code{rank:map}: Use LambdaMART to perform list-wise ranking where \href{https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Mean_average_precision}{Mean Average Precision (MAP)} is maximized.
\item \code{reg:gamma}: gamma regression with log-link. Output is a mean of gamma distribution. It might be useful, e.g., for modeling insurance claims severity, or for any outcome that might be \href{https://en.wikipedia.org/wiki/Gamma_distribution#Applications}{gamma-distributed}.
\item \code{reg:tweedie}: Tweedie regression with log-link. It might be useful, e.g., for modeling total loss in insurance, or for any outcome that might be \href{https://en.wikipedia.org/wiki/Tweedie_distribution#Applications}{Tweedie-distributed}.
}
\item \code{base_score} the initial prediction score of all instances, global bias. Default: 0.5
\item \code{eval_metric} evaluation metrics for validation data. Users can pass a self-defined function to it. Default: metric will be assigned according to objective(rmse for regression, and error for classification, mean average precision for ranking). List is provided in detail section.
@@ -205,16 +215,18 @@ User may set one or several \code{eval_metric} parameters.
Note that when using a customized metric, only this single metric can be used.
The following is the list of built-in metrics for which Xgboost provides optimized implementation:
\itemize{
\item \code{rmse} root mean square error. \url{http://en.wikipedia.org/wiki/Root_mean_square_error}
\item \code{logloss} negative log-likelihood. \url{http://en.wikipedia.org/wiki/Log-likelihood}
\item \code{mlogloss} multiclass logloss. \url{http://wiki.fast.ai/index.php/Log_Loss}
\item \code{rmse} root mean square error. \url{https://en.wikipedia.org/wiki/Root_mean_square_error}
\item \code{logloss} negative log-likelihood. \url{https://en.wikipedia.org/wiki/Log-likelihood}
\item \code{mlogloss} multiclass logloss. \url{https://scikit-learn.org/stable/modules/generated/sklearn.metrics.log_loss.html}
\item \code{error} Binary classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}.
By default, it uses the 0.5 threshold for predicted values to define negative and positive instances.
Different threshold (e.g., 0.) could be specified as "error@0."
\item \code{merror} Multiclass classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}.
\item \code{auc} Area under the curve. \url{http://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation.
\item \code{mae} Mean absolute error
\item \code{mape} Mean absolute percentage error
\item \code{auc} Area under the curve. \url{https://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation.
\item \code{aucpr} Area under the PR curve. \url{https://en.wikipedia.org/wiki/Precision_and_recall} for ranking evaluation.
\item \code{ndcg} Normalized Discounted Cumulative Gain (for ranking task). \url{http://en.wikipedia.org/wiki/NDCG}
\item \code{ndcg} Normalized Discounted Cumulative Gain (for ranking task). \url{https://en.wikipedia.org/wiki/NDCG}
}
The following callbacks are automatically created when certain parameters are set:

View File

@@ -0,0 +1,14 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.unserialize.R
\name{xgb.unserialize}
\alias{xgb.unserialize}
\title{Load the instance back from \code{\link{xgb.serialize}}}
\usage{
xgb.unserialize(buffer)
}
\arguments{
\item{buffer}{the buffer containing booster instance saved by \code{\link{xgb.serialize}}}
}
\description{
Load the instance back from \code{\link{xgb.serialize}}
}

View File

@@ -3,12 +3,12 @@ PKGROOT=../../
ENABLE_STD_THREAD=1
# _*_ mode: Makefile; _*_
CXX_STD = CXX11
CXX_STD = CXX14
XGB_RFLAGS = -DXGBOOST_STRICT_R_MODE=1 -DDMLC_LOG_BEFORE_THROW=0\
-DDMLC_ENABLE_STD_THREAD=$(ENABLE_STD_THREAD) -DDMLC_DISABLE_STDIN=1\
-DDMLC_LOG_CUSTOMIZE=1 -DXGBOOST_CUSTOMIZE_LOGGER=1\
-DRABIT_CUSTOMIZE_MSG_ -DRABIT_STRICT_CXX98_
-DRABIT_CUSTOMIZE_MSG_
# disable the use of thread_local for 32 bit windows:
ifeq ($(R_OSTYPE)$(WIN),windows)
@@ -19,6 +19,7 @@ $(foreach v, $(XGB_RFLAGS), $(warning $(v)))
PKG_CPPFLAGS= -I$(PKGROOT)/include -I$(PKGROOT)/dmlc-core/include -I$(PKGROOT)/rabit/include -I$(PKGROOT) $(XGB_RFLAGS)
PKG_CXXFLAGS= @OPENMP_CXXFLAGS@ @ENDIAN_FLAG@ -pthread
PKG_LIBS = @OPENMP_CXXFLAGS@ @OPENMP_LIB@ @ENDIAN_FLAG@ @BACKTRACE_LIB@ -pthread
OBJECTS= ./xgboost_R.o ./xgboost_custom.o ./xgboost_assert.o ./init.o\
$(PKGROOT)/amalgamation/xgboost-all0.o $(PKGROOT)/amalgamation/dmlc-minimum0.o\
$(PKGROOT)/rabit/src/engine_empty.o $(PKGROOT)/rabit/src/c_api.o
OBJECTS= ./xgboost_R.o ./xgboost_custom.o ./xgboost_assert.o ./init.o \
$(PKGROOT)/amalgamation/xgboost-all0.o $(PKGROOT)/amalgamation/dmlc-minimum0.o \
$(PKGROOT)/rabit/src/engine.o $(PKGROOT)/rabit/src/c_api.o \
$(PKGROOT)/rabit/src/allreduce_base.o

View File

@@ -15,12 +15,12 @@ xgblib:
cp -r ../../include .
cp -r ../../amalgamation .
CXX_STD = CXX11
CXX_STD = CXX14
XGB_RFLAGS = -DXGBOOST_STRICT_R_MODE=1 -DDMLC_LOG_BEFORE_THROW=0\
-DDMLC_ENABLE_STD_THREAD=$(ENABLE_STD_THREAD) -DDMLC_DISABLE_STDIN=1\
-DDMLC_LOG_CUSTOMIZE=1 -DXGBOOST_CUSTOMIZE_LOGGER=1\
-DRABIT_CUSTOMIZE_MSG_ -DRABIT_STRICT_CXX98_
-DRABIT_CUSTOMIZE_MSG_
# disable the use of thread_local for 32 bit windows:
ifeq ($(R_OSTYPE)$(WIN),windows)
@@ -31,8 +31,9 @@ $(foreach v, $(XGB_RFLAGS), $(warning $(v)))
PKG_CPPFLAGS= -I$(PKGROOT)/include -I$(PKGROOT)/dmlc-core/include -I$(PKGROOT)/rabit/include -I$(PKGROOT) $(XGB_RFLAGS)
PKG_CXXFLAGS= $(SHLIB_OPENMP_CXXFLAGS) $(SHLIB_PTHREAD_FLAGS)
PKG_LIBS = $(SHLIB_OPENMP_CXXFLAGS) $(SHLIB_PTHREAD_FLAGS)
OBJECTS= ./xgboost_R.o ./xgboost_custom.o ./xgboost_assert.o ./init.o\
$(PKGROOT)/amalgamation/xgboost-all0.o $(PKGROOT)/amalgamation/dmlc-minimum0.o\
$(PKGROOT)/rabit/src/engine_empty.o $(PKGROOT)/rabit/src/c_api.o
OBJECTS= ./xgboost_R.o ./xgboost_custom.o ./xgboost_assert.o ./init.o \
$(PKGROOT)/amalgamation/xgboost-all0.o $(PKGROOT)/amalgamation/dmlc-minimum0.o \
$(PKGROOT)/rabit/src/engine.o $(PKGROOT)/rabit/src/c_api.o \
$(PKGROOT)/rabit/src/allreduce_base.o
$(OBJECTS) : xgblib

View File

@@ -23,6 +23,10 @@ extern SEXP XGBoosterGetAttrNames_R(SEXP);
extern SEXP XGBoosterGetAttr_R(SEXP, SEXP);
extern SEXP XGBoosterLoadModelFromRaw_R(SEXP, SEXP);
extern SEXP XGBoosterLoadModel_R(SEXP, SEXP);
extern SEXP XGBoosterSaveJsonConfig_R(SEXP handle);
extern SEXP XGBoosterLoadJsonConfig_R(SEXP handle, SEXP value);
extern SEXP XGBoosterSerializeToBuffer_R(SEXP handle);
extern SEXP XGBoosterUnserializeFromBuffer_R(SEXP handle, SEXP raw);
extern SEXP XGBoosterModelToRaw_R(SEXP);
extern SEXP XGBoosterPredict_R(SEXP, SEXP, SEXP, SEXP, SEXP);
extern SEXP XGBoosterSaveModel_R(SEXP, SEXP);
@@ -49,6 +53,10 @@ static const R_CallMethodDef CallEntries[] = {
{"XGBoosterGetAttr_R", (DL_FUNC) &XGBoosterGetAttr_R, 2},
{"XGBoosterLoadModelFromRaw_R", (DL_FUNC) &XGBoosterLoadModelFromRaw_R, 2},
{"XGBoosterLoadModel_R", (DL_FUNC) &XGBoosterLoadModel_R, 2},
{"XGBoosterSaveJsonConfig_R", (DL_FUNC) &XGBoosterSaveJsonConfig_R, 1},
{"XGBoosterLoadJsonConfig_R", (DL_FUNC) &XGBoosterLoadJsonConfig_R, 2},
{"XGBoosterSerializeToBuffer_R", (DL_FUNC) &XGBoosterSerializeToBuffer_R, 1},
{"XGBoosterUnserializeFromBuffer_R", (DL_FUNC) &XGBoosterUnserializeFromBuffer_R, 2},
{"XGBoosterModelToRaw_R", (DL_FUNC) &XGBoosterModelToRaw_R, 1},
{"XGBoosterPredict_R", (DL_FUNC) &XGBoosterPredict_R, 5},
{"XGBoosterSaveModel_R", (DL_FUNC) &XGBoosterSaveModel_R, 2},

View File

@@ -338,15 +338,6 @@ SEXP XGBoosterSaveModel_R(SEXP handle, SEXP fname) {
return R_NilValue;
}
SEXP XGBoosterLoadModelFromRaw_R(SEXP handle, SEXP raw) {
R_API_BEGIN();
CHECK_CALL(XGBoosterLoadModelFromBuffer(R_ExternalPtrAddr(handle),
RAW(raw),
length(raw)));
R_API_END();
return R_NilValue;
}
SEXP XGBoosterModelToRaw_R(SEXP handle) {
SEXP ret;
R_API_BEGIN();
@@ -362,6 +353,57 @@ SEXP XGBoosterModelToRaw_R(SEXP handle) {
return ret;
}
SEXP XGBoosterLoadModelFromRaw_R(SEXP handle, SEXP raw) {
R_API_BEGIN();
CHECK_CALL(XGBoosterLoadModelFromBuffer(R_ExternalPtrAddr(handle),
RAW(raw),
length(raw)));
R_API_END();
return R_NilValue;
}
SEXP XGBoosterSaveJsonConfig_R(SEXP handle) {
const char* ret;
R_API_BEGIN();
bst_ulong len {0};
CHECK_CALL(XGBoosterSaveJsonConfig(R_ExternalPtrAddr(handle),
&len,
&ret));
R_API_END();
return mkString(ret);
}
SEXP XGBoosterLoadJsonConfig_R(SEXP handle, SEXP value) {
R_API_BEGIN();
CHECK_CALL(XGBoosterLoadJsonConfig(R_ExternalPtrAddr(handle), CHAR(asChar(value))));
R_API_END();
return R_NilValue;
}
SEXP XGBoosterSerializeToBuffer_R(SEXP handle) {
SEXP ret;
R_API_BEGIN();
bst_ulong out_len;
const char *raw;
CHECK_CALL(XGBoosterSerializeToBuffer(R_ExternalPtrAddr(handle), &out_len, &raw));
ret = PROTECT(allocVector(RAWSXP, out_len));
if (out_len != 0) {
memcpy(RAW(ret), raw, out_len);
}
R_API_END();
UNPROTECT(1);
return ret;
}
SEXP XGBoosterUnserializeFromBuffer_R(SEXP handle, SEXP raw) {
R_API_BEGIN();
CHECK_CALL(XGBoosterUnserializeFromBuffer(R_ExternalPtrAddr(handle),
RAW(raw),
length(raw)));
R_API_END();
return R_NilValue;
}
SEXP XGBoosterDumpModel_R(SEXP handle, SEXP fmap, SEXP with_stats, SEXP dump_format) {
SEXP out;
R_API_BEGIN();

View File

@@ -182,6 +182,36 @@ XGB_DLL SEXP XGBoosterLoadModelFromRaw_R(SEXP handle, SEXP raw);
*/
XGB_DLL SEXP XGBoosterModelToRaw_R(SEXP handle);
/*!
* \brief Save internal parameters as a JSON string
* \param handle handle
* \return JSON string
*/
XGB_DLL SEXP XGBoosterSaveJsonConfig_R(SEXP handle);
/*!
* \brief Load the JSON string returnd by XGBoosterSaveJsonConfig_R
* \param handle handle
* \param value JSON string
* \return R_NilValue
*/
XGB_DLL SEXP XGBoosterLoadJsonConfig_R(SEXP handle, SEXP value);
/*!
* \brief Memory snapshot based serialization method. Saves everything states
* into buffer.
* \param handle handle to booster
*/
XGB_DLL SEXP XGBoosterSerializeToBuffer_R(SEXP handle);
/*!
* \brief Memory snapshot based serialization method. Loads the buffer returned
* from `XGBoosterSerializeToBuffer'.
* \param handle handle to booster
* \return raw byte array
*/
XGB_DLL SEXP XGBoosterUnserializeFromBuffer_R(SEXP handle, SEXP raw);
/*!
* \brief dump model into a string
* \param handle handle

View File

@@ -13,23 +13,6 @@ void CustomLogMessage::Log(const std::string& msg) {
}
} // namespace dmlc
// implements rabit error handling.
extern "C" {
void XGBoostAssert_R(int exp, const char *fmt, ...);
void XGBoostCheck_R(int exp, const char *fmt, ...);
}
namespace rabit {
namespace utils {
extern "C" {
void (*Printf)(const char *fmt, ...) = Rprintf;
void (*Assert)(int exp, const char *fmt, ...) = XGBoostAssert_R;
void (*Check)(int exp, const char *fmt, ...) = XGBoostCheck_R;
void (*Error)(const char *fmt, ...) = error;
}
}
}
namespace xgboost {
ConsoleLogger::~ConsoleLogger() {
if (cur_verbosity_ == LogVerbosity::kIgnore ||

View File

@@ -0,0 +1,105 @@
# Script to generate reference models. The reference models are used to test backward compatibility
# of saved model files from XGBoost version 0.90 and 1.0.x.
library(xgboost)
library(Matrix)
set.seed(0)
metadata <- list(
kRounds = 2,
kRows = 1000,
kCols = 4,
kForests = 2,
kMaxDepth = 2,
kClasses = 3
)
X <- Matrix(data = rnorm(metadata$kRows * metadata$kCols), nrow = metadata$kRows,
ncol = metadata$kCols, sparse = TRUE)
w <- runif(metadata$kRows)
version <- packageVersion('xgboost')
target_dir <- 'models'
save_booster <- function (booster, model_name) {
booster_bin <- function (model_name) {
return (file.path(target_dir, paste('xgboost-', version, '.', model_name, '.bin', sep = '')))
}
booster_json <- function (model_name) {
return (file.path(target_dir, paste('xgboost-', version, '.', model_name, '.json', sep = '')))
}
booster_rds <- function (model_name) {
return (file.path(target_dir, paste('xgboost-', version, '.', model_name, '.rds', sep = '')))
}
xgb.save(booster, booster_bin(model_name))
saveRDS(booster, booster_rds(model_name))
if (version >= '1.0.0') {
xgb.save(booster, booster_json(model_name))
}
}
generate_regression_model <- function () {
print('Regression')
y <- rnorm(metadata$kRows)
data <- xgb.DMatrix(X, label = y)
params <- list(tree_method = 'hist', num_parallel_tree = metadata$kForests,
max_depth = metadata$kMaxDepth)
booster <- xgb.train(params, data, nrounds = metadata$kRounds)
save_booster(booster, 'reg')
}
generate_logistic_model <- function () {
print('Binary classification with logistic loss')
y <- sample(0:1, size = metadata$kRows, replace = TRUE)
stopifnot(max(y) == 1, min(y) == 0)
objective <- c('binary:logistic', 'binary:logitraw')
name <- c('logit', 'logitraw')
for (i in seq_len(length(objective))) {
data <- xgb.DMatrix(X, label = y, weight = w)
params <- list(tree_method = 'hist', num_parallel_tree = metadata$kForests,
max_depth = metadata$kMaxDepth, objective = objective[i])
booster <- xgb.train(params, data, nrounds = metadata$kRounds)
save_booster(booster, name[i])
}
}
generate_classification_model <- function () {
print('Multi-class classification')
y <- sample(0:(metadata$kClasses - 1), size = metadata$kRows, replace = TRUE)
stopifnot(max(y) == metadata$kClasses - 1, min(y) == 0)
data <- xgb.DMatrix(X, label = y, weight = w)
params <- list(num_class = metadata$kClasses, tree_method = 'hist',
num_parallel_tree = metadata$kForests, max_depth = metadata$kMaxDepth,
objective = 'multi:softmax')
booster <- xgb.train(params, data, nrounds = metadata$kRounds)
save_booster(booster, 'cls')
}
generate_ranking_model <- function () {
print('Learning to rank')
y <- sample(0:4, size = metadata$kRows, replace = TRUE)
stopifnot(max(y) == 4, min(y) == 0)
kGroups <- 20
w <- runif(kGroups)
g <- rep(50, times = kGroups)
data <- xgb.DMatrix(X, label = y, group = g)
# setinfo(data, 'weight', w)
# ^^^ does not work in version <= 1.1.0; see https://github.com/dmlc/xgboost/issues/5942
# So call low-level function XGDMatrixSetInfo_R directly. Since this function is not an exported
# symbol, use the triple-colon operator.
.Call(xgboost:::XGDMatrixSetInfo_R, data, 'weight', as.numeric(w))
params <- list(objective = 'rank:ndcg', num_parallel_tree = metadata$kForests,
tree_method = 'hist', max_depth = metadata$kMaxDepth)
booster <- xgb.train(params, data, nrounds = metadata$kRounds)
save_booster(booster, 'ltr')
}
dir.create(target_dir)
invisible(generate_regression_model())
invisible(generate_logistic_model())
invisible(generate_classification_model())
invisible(generate_ranking_model())

View File

@@ -0,0 +1,71 @@
library(lintr)
library(crayon)
my_linters <- list(
absolute_path_linter = lintr::absolute_path_linter,
assignment_linter = lintr::assignment_linter,
closed_curly_linter = lintr::closed_curly_linter,
commas_linter = lintr::commas_linter,
equals_na = lintr::equals_na_linter,
infix_spaces_linter = lintr::infix_spaces_linter,
line_length_linter = lintr::line_length_linter,
no_tab_linter = lintr::no_tab_linter,
object_usage_linter = lintr::object_usage_linter,
object_length_linter = lintr::object_length_linter,
open_curly_linter = lintr::open_curly_linter,
semicolon = lintr::semicolon_terminator_linter,
seq = lintr::seq_linter,
spaces_inside_linter = lintr::spaces_inside_linter,
spaces_left_parentheses_linter = lintr::spaces_left_parentheses_linter,
trailing_blank_lines_linter = lintr::trailing_blank_lines_linter,
trailing_whitespace_linter = lintr::trailing_whitespace_linter,
true_false = lintr::T_and_F_symbol_linter,
unneeded_concatenation = lintr::unneeded_concatenation_linter
)
results <- lapply(
list.files(path = '.', pattern = '\\.[Rr]$', recursive = TRUE),
function (r_file) {
cat(sprintf("Processing %s ...\n", r_file))
list(r_file = r_file,
output = lintr::lint(filename = r_file, linters = my_linters))
})
num_issue <- Reduce(sum, lapply(results, function (e) length(e$output)))
lint2str <- function(lint_entry) {
color <- function(type) {
switch(type,
"warning" = crayon::magenta,
"error" = crayon::red,
"style" = crayon::blue,
crayon::bold
)
}
paste0(
lapply(lint_entry$output,
function (lint_line) {
paste0(
crayon::bold(lint_entry$r_file, ":",
as.character(lint_line$line_number), ":",
as.character(lint_line$column_number), ": ", sep = ""),
color(lint_line$type)(lint_line$type, ": ", sep = ""),
crayon::bold(lint_line$message), "\n",
lint_line$line, "\n",
lintr:::highlight_string(lint_line$message, lint_line$column_number, lint_line$ranges),
"\n",
collapse = "")
}),
collapse = "")
}
if (num_issue > 0) {
cat(sprintf('R linters found %d issues:\n', num_issue))
for (entry in results) {
if (length(entry$output)) {
cat(paste0('**** ', crayon::bold(entry$r_file), '\n'))
cat(paste0(lint2str(entry), collapse = ''))
}
}
quit(save = 'no', status = 1) # Signal error to parent shell
}

View File

@@ -1,4 +1,4 @@
library(testthat)
library(xgboost)
test_check("xgboost")
test_check("xgboost", reporter = ProgressReporter)

View File

@@ -2,22 +2,23 @@ require(xgboost)
context("basic functions")
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
data(agaricus.train, package = 'xgboost')
data(agaricus.test, package = 'xgboost')
train <- agaricus.train
test <- agaricus.test
set.seed(1994)
# disable some tests for Win32
windows_flag = .Platform$OS.type == "windows" &&
windows_flag <- .Platform$OS.type == "windows" &&
.Machine$sizeof.pointer != 8
solaris_flag = (Sys.info()['sysname'] == "SunOS")
solaris_flag <- (Sys.info()['sysname'] == "SunOS")
test_that("train and predict binary classification", {
nrounds = 2
nrounds <- 2
expect_output(
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = nrounds, objective = "binary:logistic")
eta = 1, nthread = 2, nrounds = nrounds, objective = "binary:logistic",
eval_metric = "error")
, "train-error")
expect_equal(class(bst), "xgb.Booster")
expect_equal(bst$niter, nrounds)
@@ -30,21 +31,55 @@ test_that("train and predict binary classification", {
pred1 <- predict(bst, train$data, ntreelimit = 1)
expect_length(pred1, 6513)
err_pred1 <- sum((pred1 > 0.5) != train$label)/length(train$label)
err_pred1 <- sum((pred1 > 0.5) != train$label) / length(train$label)
err_log <- bst$evaluation_log[1, train_error]
expect_lt(abs(err_pred1 - err_log), 10e-6)
})
test_that("parameter validation works", {
p <- list(foo = "bar")
nrounds <- 1
set.seed(1994)
d <- cbind(
x1 = rnorm(10),
x2 = rnorm(10),
x3 = rnorm(10))
y <- d[, "x1"] + d[, "x2"]^2 +
ifelse(d[, "x3"] > .5, d[, "x3"]^2, 2^d[, "x3"]) +
rnorm(10)
dtrain <- xgb.DMatrix(data = d, info = list(label = y))
correct <- function() {
params <- list(max_depth = 2, booster = "dart",
rate_drop = 0.5, one_drop = TRUE,
objective = "reg:squarederror")
xgb.train(params = params, data = dtrain, nrounds = nrounds)
}
expect_silent(correct())
incorrect <- function() {
params <- list(max_depth = 2, booster = "dart",
rate_drop = 0.5, one_drop = TRUE,
objective = "reg:squarederror",
foo = "bar", bar = "foo")
output <- capture.output(
xgb.train(params = params, data = dtrain, nrounds = nrounds))
print(output)
}
expect_output(incorrect(), "bar, foo")
})
test_that("dart prediction works", {
nrounds = 32
nrounds <- 32
set.seed(1994)
d <- cbind(
x1 = rnorm(100),
x2 = rnorm(100),
x3 = rnorm(100))
y <- d[,"x1"] + d[,"x2"]^2 +
ifelse(d[,"x3"] > .5, d[,"x3"]^2, 2^d[,"x3"]) +
y <- d[, "x1"] + d[, "x2"]^2 +
ifelse(d[, "x3"] > .5, d[, "x3"]^2, 2^d[, "x3"]) +
rnorm(100)
set.seed(1994)
@@ -53,22 +88,21 @@ test_that("dart prediction works", {
eta = 1, nthread = 2, nrounds = nrounds, objective = "reg:squarederror")
pred_by_xgboost_0 <- predict(booster_by_xgboost, newdata = d, ntreelimit = 0)
pred_by_xgboost_1 <- predict(booster_by_xgboost, newdata = d, ntreelimit = nrounds)
expect_true(all(matrix(pred_by_xgboost_0, byrow=TRUE) == matrix(pred_by_xgboost_1, byrow=TRUE)))
expect_true(all(matrix(pred_by_xgboost_0, byrow = TRUE) == matrix(pred_by_xgboost_1, byrow = TRUE)))
pred_by_xgboost_2 <- predict(booster_by_xgboost, newdata = d, training = TRUE)
expect_false(all(matrix(pred_by_xgboost_0, byrow=TRUE) == matrix(pred_by_xgboost_2, byrow=TRUE)))
expect_false(all(matrix(pred_by_xgboost_0, byrow = TRUE) == matrix(pred_by_xgboost_2, byrow = TRUE)))
set.seed(1994)
dtrain <- xgb.DMatrix(data=d, info = list(label=y))
booster_by_train <- xgb.train( params = list(
dtrain <- xgb.DMatrix(data = d, info = list(label = y))
booster_by_train <- xgb.train(params = list(
booster = "dart",
max_depth = 2,
eta = 1,
rate_drop = 0.5,
one_drop = TRUE,
nthread = 1,
tree_method= "exact",
verbosity = 3,
tree_method = "exact",
objective = "reg:squarederror"
),
data = dtrain,
@@ -78,9 +112,9 @@ test_that("dart prediction works", {
pred_by_train_1 <- predict(booster_by_train, newdata = dtrain, ntreelimit = nrounds)
pred_by_train_2 <- predict(booster_by_train, newdata = dtrain, training = TRUE)
expect_true(all(matrix(pred_by_train_0, byrow=TRUE) == matrix(pred_by_xgboost_0, byrow=TRUE)))
expect_true(all(matrix(pred_by_train_1, byrow=TRUE) == matrix(pred_by_xgboost_1, byrow=TRUE)))
expect_true(all(matrix(pred_by_train_2, byrow=TRUE) == matrix(pred_by_xgboost_2, byrow=TRUE)))
expect_true(all(matrix(pred_by_train_0, byrow = TRUE) == matrix(pred_by_xgboost_0, byrow = TRUE)))
expect_true(all(matrix(pred_by_train_1, byrow = TRUE) == matrix(pred_by_xgboost_1, byrow = TRUE)))
expect_true(all(matrix(pred_by_train_2, byrow = TRUE) == matrix(pred_by_xgboost_2, byrow = TRUE)))
})
test_that("train and predict softprob", {
@@ -89,7 +123,7 @@ test_that("train and predict softprob", {
expect_output(
bst <- xgboost(data = as.matrix(iris[, -5]), label = lb,
max_depth = 3, eta = 0.5, nthread = 2, nrounds = 5,
objective = "multi:softprob", num_class=3)
objective = "multi:softprob", num_class = 3, eval_metric = "merror")
, "train-merror")
expect_false(is.null(bst$evaluation_log))
expect_lt(bst$evaluation_log[, min(train_merror)], 0.025)
@@ -97,17 +131,17 @@ test_that("train and predict softprob", {
pred <- predict(bst, as.matrix(iris[, -5]))
expect_length(pred, nrow(iris) * 3)
# row sums add up to total probability of 1:
expect_equal(rowSums(matrix(pred, ncol=3, byrow=TRUE)), rep(1, nrow(iris)), tolerance = 1e-7)
expect_equal(rowSums(matrix(pred, ncol = 3, byrow = TRUE)), rep(1, nrow(iris)), tolerance = 1e-7)
# manually calculate error at the last iteration:
mpred <- predict(bst, as.matrix(iris[, -5]), reshape = TRUE)
expect_equal(as.numeric(t(mpred)), pred)
pred_labels <- max.col(mpred) - 1
err <- sum(pred_labels != lb)/length(lb)
err <- sum(pred_labels != lb) / length(lb)
expect_equal(bst$evaluation_log[5, train_merror], err, tolerance = 5e-6)
# manually calculate error at the 1st iteration:
mpred <- predict(bst, as.matrix(iris[, -5]), reshape = TRUE, ntreelimit = 1)
pred_labels <- max.col(mpred) - 1
err <- sum(pred_labels != lb)/length(lb)
err <- sum(pred_labels != lb) / length(lb)
expect_equal(bst$evaluation_log[1, train_merror], err, tolerance = 5e-6)
})
@@ -117,7 +151,7 @@ test_that("train and predict softmax", {
expect_output(
bst <- xgboost(data = as.matrix(iris[, -5]), label = lb,
max_depth = 3, eta = 0.5, nthread = 2, nrounds = 5,
objective = "multi:softmax", num_class=3)
objective = "multi:softmax", num_class = 3, eval_metric = "merror")
, "train-merror")
expect_false(is.null(bst$evaluation_log))
expect_lt(bst$evaluation_log[, min(train_merror)], 0.025)
@@ -125,7 +159,7 @@ test_that("train and predict softmax", {
pred <- predict(bst, as.matrix(iris[, -5]))
expect_length(pred, nrow(iris))
err <- sum(pred != lb)/length(lb)
err <- sum(pred != lb) / length(lb)
expect_equal(bst$evaluation_log[5, train_merror], err, tolerance = 5e-6)
})
@@ -134,18 +168,18 @@ test_that("train and predict RF", {
lb <- train$label
# single iteration
bst <- xgboost(data = train$data, label = lb, max_depth = 5,
nthread = 2, nrounds = 1, objective = "binary:logistic",
nthread = 2, nrounds = 1, objective = "binary:logistic", eval_metric = "error",
num_parallel_tree = 20, subsample = 0.6, colsample_bytree = 0.1)
expect_equal(bst$niter, 1)
expect_equal(xgb.ntree(bst), 20)
pred <- predict(bst, train$data)
pred_err <- sum((pred > 0.5) != lb)/length(lb)
pred_err <- sum((pred > 0.5) != lb) / length(lb)
expect_lt(abs(bst$evaluation_log[1, train_error] - pred_err), 10e-6)
#expect_lt(pred_err, 0.03)
pred <- predict(bst, train$data, ntreelimit = 20)
pred_err_20 <- sum((pred > 0.5) != lb)/length(lb)
pred_err_20 <- sum((pred > 0.5) != lb) / length(lb)
expect_equal(pred_err_20, pred_err)
#pred <- predict(bst, train$data, ntreelimit = 1)
@@ -160,19 +194,20 @@ test_that("train and predict RF with softprob", {
set.seed(11)
bst <- xgboost(data = as.matrix(iris[, -5]), label = lb,
max_depth = 3, eta = 0.9, nthread = 2, nrounds = nrounds,
objective = "multi:softprob", num_class=3, verbose = 0,
objective = "multi:softprob", eval_metric = "merror",
num_class = 3, verbose = 0,
num_parallel_tree = 4, subsample = 0.5, colsample_bytree = 0.5)
expect_equal(bst$niter, 15)
expect_equal(xgb.ntree(bst), 15*3*4)
expect_equal(xgb.ntree(bst), 15 * 3 * 4)
# predict for all iterations:
pred <- predict(bst, as.matrix(iris[, -5]), reshape=TRUE)
pred <- predict(bst, as.matrix(iris[, -5]), reshape = TRUE)
expect_equal(dim(pred), c(nrow(iris), 3))
pred_labels <- max.col(pred) - 1
err <- sum(pred_labels != lb)/length(lb)
err <- sum(pred_labels != lb) / length(lb)
expect_equal(bst$evaluation_log[nrounds, train_merror], err, tolerance = 5e-6)
# predict for 7 iterations and adjust for 4 parallel trees per iteration
pred <- predict(bst, as.matrix(iris[, -5]), reshape=TRUE, ntreelimit = 7 * 4)
err <- sum((max.col(pred) - 1) != lb)/length(lb)
pred <- predict(bst, as.matrix(iris[, -5]), reshape = TRUE, ntreelimit = 7 * 4)
err <- sum((max.col(pred) - 1) != lb) / length(lb)
expect_equal(bst$evaluation_log[7, train_merror], err, tolerance = 5e-6)
})
@@ -190,7 +225,7 @@ test_that("use of multiple eval metrics works", {
test_that("training continuation works", {
dtrain <- xgb.DMatrix(train$data, label = train$label)
watchlist = list(train=dtrain)
watchlist <- list(train = dtrain)
param <- list(objective = "binary:logistic", max_depth = 2, eta = 1, nthread = 2)
# for the reference, use 4 iterations at once:
@@ -212,20 +247,36 @@ test_that("training continuation works", {
expect_equal(bst$raw, bst2$raw)
expect_equal(dim(bst2$evaluation_log), c(2, 2))
# test continuing from a model in file
xgb.save(bst1, "xgboost.model")
bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0, xgb_model = "xgboost.model")
xgb.save(bst1, "xgboost.json")
bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, verbose = 0, xgb_model = "xgboost.json")
if (!windows_flag && !solaris_flag)
expect_equal(bst$raw, bst2$raw)
expect_equal(dim(bst2$evaluation_log), c(2, 2))
file.remove("xgboost.json")
})
test_that("model serialization works", {
out_path <- "model_serialization"
dtrain <- xgb.DMatrix(train$data, label = train$label)
watchlist <- list(train = dtrain)
param <- list(objective = "binary:logistic")
booster <- xgb.train(param, dtrain, nrounds = 4, watchlist)
raw <- xgb.serialize(booster)
saveRDS(raw, out_path)
raw <- readRDS(out_path)
loaded <- xgb.unserialize(raw)
raw_from_loaded <- xgb.serialize(loaded)
expect_equal(raw, raw_from_loaded)
file.remove(out_path)
})
test_that("xgb.cv works", {
set.seed(11)
expect_output(
cv <- xgb.cv(data = train$data, label = train$label, max_depth = 2, nfold = 5,
eta = 1., nthread = 2, nrounds = 2, objective = "binary:logistic",
verbose=TRUE)
eval_metric = "error", verbose = TRUE)
, "train-error:")
expect_is(cv, 'xgb.cv.synchronous')
expect_false(is.null(cv$evaluation_log))
@@ -244,13 +295,13 @@ test_that("xgb.cv works with stratified folds", {
set.seed(314159)
cv <- xgb.cv(data = dtrain, max_depth = 2, nfold = 5,
eta = 1., nthread = 2, nrounds = 2, objective = "binary:logistic",
verbose=TRUE, stratified = FALSE)
verbose = TRUE, stratified = FALSE)
set.seed(314159)
cv2 <- xgb.cv(data = dtrain, max_depth = 2, nfold = 5,
eta = 1., nthread = 2, nrounds = 2, objective = "binary:logistic",
verbose=TRUE, stratified = TRUE)
verbose = TRUE, stratified = TRUE)
# Stratified folds should result in a different evaluation logs
expect_true(all(cv$evaluation_log[, test_error_mean] != cv2$evaluation_log[, test_error_mean]))
expect_true(all(cv$evaluation_log[, test_logloss_mean] != cv2$evaluation_log[, test_logloss_mean]))
})
test_that("train and predict with non-strict classes", {
@@ -271,7 +322,7 @@ test_that("train and predict with non-strict classes", {
expect_equal(pr0, pr)
# dense matrix-like input of non-matrix class with some inheritance
class(train_dense) <- c('pphmatrix','shmatrix')
class(train_dense) <- c('pphmatrix', 'shmatrix')
expect_true(is.matrix(train_dense))
expect_error(
bst <- xgboost(data = train_dense, label = train$label, max_depth = 2,
@@ -289,15 +340,15 @@ test_that("train and predict with non-strict classes", {
test_that("max_delta_step works", {
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
watchlist <- list(train = dtrain)
param <- list(objective = "binary:logistic", eval_metric="logloss", max_depth = 2, nthread = 2, eta = 0.5)
nrounds = 5
param <- list(objective = "binary:logistic", eval_metric = "logloss", max_depth = 2, nthread = 2, eta = 0.5)
nrounds <- 5
# model with no restriction on max_delta_step
bst1 <- xgb.train(param, dtrain, nrounds, watchlist, verbose = 1)
# model with restricted max_delta_step
bst2 <- xgb.train(param, dtrain, nrounds, watchlist, verbose = 1, max_delta_step = 1)
# the no-restriction model is expected to have consistently lower loss during the initial interations
expect_true(all(bst1$evaluation_log$train_logloss < bst2$evaluation_log$train_logloss))
expect_lt(mean(bst1$evaluation_log$train_logloss)/mean(bst2$evaluation_log$train_logloss), 0.8)
expect_lt(mean(bst1$evaluation_log$train_logloss) / mean(bst2$evaluation_log$train_logloss), 0.8)
})
test_that("colsample_bytree works", {
@@ -312,9 +363,9 @@ test_that("colsample_bytree works", {
dtrain <- xgb.DMatrix(train_x, label = train_y)
dtest <- xgb.DMatrix(test_x, label = test_y)
watchlist <- list(train = dtrain, eval = dtest)
# Use colsample_bytree = 0.01, so that roughly one out of 100 features is
# chosen for each tree
param <- list(max_depth = 2, eta = 0, silent = 1, nthread = 2,
## Use colsample_bytree = 0.01, so that roughly one out of 100 features is chosen for
## each tree
param <- list(max_depth = 2, eta = 0, nthread = 2,
colsample_bytree = 0.01, objective = "binary:logistic",
eval_metric = "auc")
set.seed(2)
@@ -324,3 +375,13 @@ test_that("colsample_bytree works", {
# in the 100 trees
expect_gte(nrow(xgb.importance(model = bst)), 30)
})
test_that("Configuration works", {
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic",
eval_metric = 'error', eval_metric = 'auc', eval_metric = "logloss")
config <- xgb.config(bst)
xgb.config(bst) <- config
reloaded_config <- xgb.config(bst)
expect_equal(config, reloaded_config);
})

View File

@@ -2,11 +2,12 @@
require(xgboost)
require(data.table)
require(titanic)
context("callbacks")
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
data(agaricus.train, package = 'xgboost')
data(agaricus.test, package = 'xgboost')
train <- agaricus.train
test <- agaricus.test
@@ -21,24 +22,25 @@ ltrain <- add.noise(train$label, 0.2)
ltest <- add.noise(test$label, 0.2)
dtrain <- xgb.DMatrix(train$data, label = ltrain)
dtest <- xgb.DMatrix(test$data, label = ltest)
watchlist = list(train=dtrain, test=dtest)
watchlist <- list(train = dtrain, test = dtest)
err <- function(label, pr) sum((pr > 0.5) != label)/length(label)
err <- function(label, pr) sum((pr > 0.5) != label) / length(label)
param <- list(objective = "binary:logistic", max_depth = 2, nthread = 2)
param <- list(objective = "binary:logistic", eval_metric = "error",
max_depth = 2, nthread = 2)
test_that("cb.print.evaluation works as expected", {
bst_evaluation <- c('train-auc'=0.9, 'test-auc'=0.8)
bst_evaluation <- c('train-auc' = 0.9, 'test-auc' = 0.8)
bst_evaluation_err <- NULL
begin_iteration <- 1
end_iteration <- 7
f0 <- cb.print.evaluation(period=0)
f1 <- cb.print.evaluation(period=1)
f5 <- cb.print.evaluation(period=5)
f0 <- cb.print.evaluation(period = 0)
f1 <- cb.print.evaluation(period = 1)
f5 <- cb.print.evaluation(period = 5)
expect_false(is.null(attr(f1, 'call')))
expect_equal(attr(f1, 'name'), 'cb.print.evaluation')
@@ -57,13 +59,13 @@ test_that("cb.print.evaluation works as expected", {
expect_output(f1(), "\\[7\\]\ttrain-auc:0.900000\ttest-auc:0.800000")
expect_output(f5(), "\\[7\\]\ttrain-auc:0.900000\ttest-auc:0.800000")
bst_evaluation_err <- c('train-auc'=0.1, 'test-auc'=0.2)
bst_evaluation_err <- c('train-auc' = 0.1, 'test-auc' = 0.2)
expect_output(f1(), "\\[7\\]\ttrain-auc:0.900000\\+0.100000\ttest-auc:0.800000\\+0.200000")
})
test_that("cb.evaluation.log works as expected", {
bst_evaluation <- c('train-auc'=0.9, 'test-auc'=0.8)
bst_evaluation <- c('train-auc' = 0.9, 'test-auc' = 0.8)
bst_evaluation_err <- NULL
evaluation_log <- list()
@@ -75,37 +77,38 @@ test_that("cb.evaluation.log works as expected", {
iteration <- 1
expect_silent(f())
expect_equal(evaluation_log,
list(c(iter=1, bst_evaluation)))
list(c(iter = 1, bst_evaluation)))
iteration <- 2
expect_silent(f())
expect_equal(evaluation_log,
list(c(iter=1, bst_evaluation), c(iter=2, bst_evaluation)))
list(c(iter = 1, bst_evaluation), c(iter = 2, bst_evaluation)))
expect_silent(f(finalize = TRUE))
expect_equal(evaluation_log,
data.table(iter=1:2, train_auc=c(0.9,0.9), test_auc=c(0.8,0.8)))
data.table(iter = 1:2, train_auc = c(0.9, 0.9), test_auc = c(0.8, 0.8)))
bst_evaluation_err <- c('train-auc'=0.1, 'test-auc'=0.2)
bst_evaluation_err <- c('train-auc' = 0.1, 'test-auc' = 0.2)
evaluation_log <- list()
f <- cb.evaluation.log()
iteration <- 1
expect_silent(f())
expect_equal(evaluation_log,
list(c(iter=1, c(bst_evaluation, bst_evaluation_err))))
list(c(iter = 1, c(bst_evaluation, bst_evaluation_err))))
iteration <- 2
expect_silent(f())
expect_equal(evaluation_log,
list(c(iter=1, c(bst_evaluation, bst_evaluation_err)),
c(iter=2, c(bst_evaluation, bst_evaluation_err))))
list(c(iter = 1, c(bst_evaluation, bst_evaluation_err)),
c(iter = 2, c(bst_evaluation, bst_evaluation_err))))
expect_silent(f(finalize = TRUE))
expect_equal(evaluation_log,
data.table(iter=1:2,
train_auc_mean=c(0.9,0.9), train_auc_std=c(0.1,0.1),
test_auc_mean=c(0.8,0.8), test_auc_std=c(0.2,0.2)))
data.table(iter = 1:2,
train_auc_mean = c(0.9, 0.9), train_auc_std = c(0.1, 0.1),
test_auc_mean = c(0.8, 0.8), test_auc_std = c(0.2, 0.2)))
})
param <- list(objective = "binary:logistic", max_depth = 4, nthread = 2)
param <- list(objective = "binary:logistic", eval_metric = "error",
max_depth = 4, nthread = 2)
test_that("can store evaluation_log without printing", {
expect_silent(
@@ -173,24 +176,28 @@ test_that("cb.reset.parameters works as expected", {
})
test_that("cb.save.model works as expected", {
files <- c('xgboost_01.model', 'xgboost_02.model', 'xgboost.model')
files <- c('xgboost_01.json', 'xgboost_02.json', 'xgboost.json')
for (f in files) if (file.exists(f)) file.remove(f)
bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, eta = 1, verbose = 0,
save_period = 1, save_name = "xgboost_%02d.model")
expect_true(file.exists('xgboost_01.model'))
expect_true(file.exists('xgboost_02.model'))
b1 <- xgb.load('xgboost_01.model')
save_period = 1, save_name = "xgboost_%02d.json")
expect_true(file.exists('xgboost_01.json'))
expect_true(file.exists('xgboost_02.json'))
b1 <- xgb.load('xgboost_01.json')
expect_equal(xgb.ntree(b1), 1)
b2 <- xgb.load('xgboost_02.model')
b2 <- xgb.load('xgboost_02.json')
expect_equal(xgb.ntree(b2), 2)
xgb.config(b2) <- xgb.config(bst)
expect_equal(xgb.config(bst), xgb.config(b2))
expect_equal(bst$raw, b2$raw)
# save_period = 0 saves the last iteration's model
bst <- xgb.train(param, dtrain, nrounds = 2, watchlist, eta = 1, verbose = 0,
save_period = 0)
expect_true(file.exists('xgboost.model'))
b2 <- xgb.load('xgboost.model')
save_period = 0, save_name = 'xgboost.json')
expect_true(file.exists('xgboost.json'))
b2 <- xgb.load('xgboost.json')
xgb.config(b2) <- xgb.config(bst)
expect_equal(bst$raw, b2$raw)
for (f in files) if (file.exists(f)) file.remove(f)
@@ -218,13 +225,22 @@ test_that("early stopping xgb.train works", {
early_stopping_rounds = 3, maximize = FALSE, verbose = 0)
)
expect_equal(bst$evaluation_log, bst0$evaluation_log)
xgb.save(bst, "model.bin")
loaded <- xgb.load("model.bin")
expect_false(is.null(loaded$best_iteration))
expect_equal(loaded$best_iteration, bst$best_ntreelimit)
expect_equal(loaded$best_ntreelimit, bst$best_ntreelimit)
file.remove("model.bin")
})
test_that("early stopping using a specific metric works", {
set.seed(11)
expect_output(
bst <- xgb.train(param, dtrain, nrounds = 20, watchlist, eta = 0.6,
eval_metric="logloss", eval_metric="auc",
bst <- xgb.train(param[-2], dtrain, nrounds = 20, watchlist, eta = 0.6,
eval_metric = "logloss", eval_metric = "auc",
callbacks = list(cb.early.stop(stopping_rounds = 3, maximize = FALSE,
metric_name = 'test_logloss')))
, "Stopping. Best iteration")
@@ -239,6 +255,26 @@ test_that("early stopping using a specific metric works", {
expect_equal(logloss_log, logloss_pred, tolerance = 1e-5)
})
test_that("early stopping works with titanic", {
# This test was inspired by https://github.com/dmlc/xgboost/issues/5935
# It catches possible issues on noLD R
titanic <- titanic::titanic_train
titanic$Pclass <- as.factor(titanic$Pclass)
dtx <- model.matrix(~ 0 + ., data = titanic[, c("Pclass", "Sex")])
dty <- titanic$Survived
xgboost::xgboost(
data = dtx,
label = dty,
objective = "binary:logistic",
eval_metric = "auc",
nrounds = 100,
early_stopping_rounds = 3
)
expect_true(TRUE) # should not crash
})
test_that("early stopping xgb.cv works", {
set.seed(11)
expect_output(
@@ -254,12 +290,12 @@ test_that("early stopping xgb.cv works", {
test_that("prediction in xgb.cv works", {
set.seed(11)
nrounds = 4
nrounds <- 4
cv <- xgb.cv(param, dtrain, nfold = 5, eta = 0.5, nrounds = nrounds, prediction = TRUE, verbose = 0)
expect_false(is.null(cv$evaluation_log))
expect_false(is.null(cv$pred))
expect_length(cv$pred, nrow(train$data))
err_pred <- mean( sapply(cv$folds, function(f) mean(err(ltrain[f], cv$pred[f]))) )
err_pred <- mean(sapply(cv$folds, function(f) mean(err(ltrain[f], cv$pred[f]))))
err_log <- cv$evaluation_log[nrounds, test_error_mean]
expect_equal(err_pred, err_log, tolerance = 1e-6)
@@ -295,7 +331,7 @@ test_that("prediction in early-stopping xgb.cv works", {
expect_false(is.null(cv$pred))
expect_length(cv$pred, nrow(train$data))
err_pred <- mean( sapply(cv$folds, function(f) mean(err(ltrain[f], cv$pred[f]))) )
err_pred <- mean(sapply(cv$folds, function(f) mean(err(ltrain[f], cv$pred[f]))))
err_log <- cv$evaluation_log[cv$best_iteration, test_error_mean]
expect_equal(err_pred, err_log, tolerance = 1e-6)
err_log_last <- cv$evaluation_log[cv$niter, test_error_mean]

View File

@@ -4,8 +4,8 @@ require(xgboost)
set.seed(1994)
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
data(agaricus.train, package = 'xgboost')
data(agaricus.test, package = 'xgboost')
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
watchlist <- list(eval = dtest, train = dtrain)
@@ -20,30 +20,36 @@ logregobj <- function(preds, dtrain) {
evalerror <- function(preds, dtrain) {
labels <- getinfo(dtrain, "label")
err <- as.numeric(sum(labels != (preds > 0))) / length(labels)
err <- as.numeric(sum(labels != (preds > 0.5))) / length(labels)
return(list(metric = "error", value = err))
}
param <- list(max_depth=2, eta=1, nthread = 2,
objective=logregobj, eval_metric=evalerror)
param <- list(max_depth = 2, eta = 1, nthread = 2,
objective = logregobj, eval_metric = evalerror)
num_round <- 2
test_that("custom objective works", {
bst <- xgb.train(param, dtrain, num_round, watchlist)
expect_equal(class(bst), "xgb.Booster")
expect_equal(length(bst$raw), 1100)
expect_false(is.null(bst$evaluation_log))
expect_false(is.null(bst$evaluation_log$eval_error))
expect_lt(bst$evaluation_log[num_round, eval_error], 0.03)
})
test_that("custom objective in CV works", {
cv <- xgb.cv(param, dtrain, num_round, nfold=10, verbose=FALSE)
cv <- xgb.cv(param, dtrain, num_round, nfold = 10, verbose = FALSE)
expect_false(is.null(cv$evaluation_log))
expect_equal(dim(cv$evaluation_log), c(2, 5))
expect_lt(cv$evaluation_log[num_round, test_error_mean], 0.03)
})
test_that("custom objective with early stop works", {
bst <- xgb.train(param, dtrain, 10, watchlist)
expect_equal(class(bst), "xgb.Booster")
train_log <- bst$evaluation_log$train_error
expect_true(all(diff(train_log) <= 0))
})
test_that("custom objective using DMatrix attr works", {
attr(dtrain, 'label') <- getinfo(dtrain, 'label')
@@ -55,8 +61,28 @@ test_that("custom objective using DMatrix attr works", {
hess <- preds * (1 - preds)
return(list(grad = grad, hess = hess))
}
param$objective = logregobjattr
param$objective <- logregobjattr
bst <- xgb.train(param, dtrain, num_round, watchlist)
expect_equal(class(bst), "xgb.Booster")
expect_equal(length(bst$raw), 1100)
})
test_that("custom objective with multi-class works", {
data <- as.matrix(iris[, -5])
label <- as.numeric(iris$Species) - 1
dtrain <- xgb.DMatrix(data = data, label = label)
nclasses <- 3
fake_softprob <- function(preds, dtrain) {
expect_true(all(matrix(preds) == 0.5))
grad <- rnorm(dim(as.matrix(preds))[1])
expect_equal(dim(data)[1] * nclasses, dim(as.matrix(preds))[1])
hess <- rnorm(dim(as.matrix(preds))[1])
return (list(grad = grad, hess = hess))
}
fake_merror <- function(preds, dtrain) {
expect_equal(dim(data)[1] * nclasses, dim(as.matrix(preds))[1])
}
param$objective <- fake_softprob
param$eval_metric <- fake_merror
bst <- xgb.train(param, dtrain, 1, num_class = nclasses)
})

View File

@@ -3,29 +3,29 @@ require(Matrix)
context("testing xgb.DMatrix functionality")
data(agaricus.test, package='xgboost')
test_data <- agaricus.test$data[1:100,]
data(agaricus.test, package = 'xgboost')
test_data <- agaricus.test$data[1:100, ]
test_label <- agaricus.test$label[1:100]
test_that("xgb.DMatrix: basic construction", {
# from sparse matrix
dtest1 <- xgb.DMatrix(test_data, label=test_label)
dtest1 <- xgb.DMatrix(test_data, label = test_label)
# from dense matrix
dtest2 <- xgb.DMatrix(as.matrix(test_data), label=test_label)
dtest2 <- xgb.DMatrix(as.matrix(test_data), label = test_label)
expect_equal(getinfo(dtest1, 'label'), getinfo(dtest2, 'label'))
expect_equal(dim(dtest1), dim(dtest2))
#from dense integer matrix
int_data <- as.matrix(test_data)
storage.mode(int_data) <- "integer"
dtest3 <- xgb.DMatrix(int_data, label=test_label)
dtest3 <- xgb.DMatrix(int_data, label = test_label)
expect_equal(dim(dtest1), dim(dtest3))
})
test_that("xgb.DMatrix: saving, loading", {
# save to a local file
dtest1 <- xgb.DMatrix(test_data, label=test_label)
dtest1 <- xgb.DMatrix(test_data, label = test_label)
tmp_file <- tempfile('xgb.DMatrix_')
expect_true(xgb.DMatrix.save(dtest1, tmp_file))
# read from a local file
@@ -35,12 +35,12 @@ test_that("xgb.DMatrix: saving, loading", {
expect_equal(getinfo(dtest1, 'label'), getinfo(dtest3, 'label'))
# from a libsvm text file
tmp <- c("0 1:1 2:1","1 3:1","0 1:1")
tmp <- c("0 1:1 2:1", "1 3:1", "0 1:1")
tmp_file <- 'tmp.libsvm'
writeLines(tmp, tmp_file)
dtest4 <- xgb.DMatrix(tmp_file, silent = TRUE)
expect_equal(dim(dtest4), c(3, 4))
expect_equal(getinfo(dtest4, 'label'), c(0,1,0))
expect_equal(getinfo(dtest4, 'label'), c(0, 1, 0))
unlink(tmp_file)
})
@@ -50,51 +50,57 @@ test_that("xgb.DMatrix: getinfo & setinfo", {
labels <- getinfo(dtest, 'label')
expect_equal(test_label, getinfo(dtest, 'label'))
expect_true(setinfo(dtest, 'label_lower_bound', test_label))
expect_equal(test_label, getinfo(dtest, 'label_lower_bound'))
expect_true(setinfo(dtest, 'label_upper_bound', test_label))
expect_equal(test_label, getinfo(dtest, 'label_upper_bound'))
expect_true(length(getinfo(dtest, 'weight')) == 0)
expect_true(length(getinfo(dtest, 'base_margin')) == 0)
expect_true(setinfo(dtest, 'weight', test_label))
expect_true(setinfo(dtest, 'base_margin', test_label))
expect_true(setinfo(dtest, 'group', c(50,50)))
expect_true(setinfo(dtest, 'group', c(50, 50)))
expect_error(setinfo(dtest, 'group', test_label))
# providing character values will give a warning
expect_warning( setinfo(dtest, 'weight', rep('a', nrow(test_data))) )
# providing character values will give an error
expect_error(setinfo(dtest, 'weight', rep('a', nrow(test_data))))
# any other label should error
expect_error(setinfo(dtest, 'asdf', test_label))
})
test_that("xgb.DMatrix: slice, dim", {
dtest <- xgb.DMatrix(test_data, label=test_label)
dtest <- xgb.DMatrix(test_data, label = test_label)
expect_equal(dim(dtest), dim(test_data))
dsub1 <- slice(dtest, 1:42)
expect_equal(nrow(dsub1), 42)
expect_equal(ncol(dsub1), ncol(test_data))
dsub2 <- dtest[1:42,]
dsub2 <- dtest[1:42, ]
expect_equal(dim(dtest), dim(test_data))
expect_equal(getinfo(dsub1, 'label'), getinfo(dsub2, 'label'))
})
test_that("xgb.DMatrix: slice, trailing empty rows", {
data(agaricus.train, package='xgboost')
data(agaricus.train, package = 'xgboost')
train_data <- agaricus.train$data
train_label <- agaricus.train$label
dtrain <- xgb.DMatrix(data=train_data, label=train_label)
dtrain <- xgb.DMatrix(data = train_data, label = train_label)
slice(dtrain, 6513L)
train_data[6513, ] <- 0
dtrain <- xgb.DMatrix(data=train_data, label=train_label)
dtrain <- xgb.DMatrix(data = train_data, label = train_label)
slice(dtrain, 6513L)
expect_equal(nrow(dtrain), 6513)
})
test_that("xgb.DMatrix: colnames", {
dtest <- xgb.DMatrix(test_data, label=test_label)
dtest <- xgb.DMatrix(test_data, label = test_label)
expect_equal(colnames(dtest), colnames(test_data))
expect_error( colnames(dtest) <- 'asdf')
new_names <- make.names(1:ncol(test_data))
expect_silent( colnames(dtest) <- new_names)
expect_error(colnames(dtest) <- 'asdf')
new_names <- make.names(seq_len(ncol(test_data)))
expect_silent(colnames(dtest) <- new_names)
expect_equal(colnames(dtest), new_names)
expect_silent(colnames(dtest) <- NULL)
expect_null(colnames(dtest))
@@ -103,7 +109,7 @@ test_that("xgb.DMatrix: colnames", {
test_that("xgb.DMatrix: nrow is correct for a very sparse matrix", {
set.seed(123)
nr <- 1000
x <- rsparsematrix(nr, 100, density=0.0005)
x <- rsparsematrix(nr, 100, density = 0.0005)
# we want it very sparse, so that last rows are empty
expect_lt(max(x@i), nr)
dtest <- xgb.DMatrix(x)

View File

@@ -3,8 +3,8 @@ require(xgboost)
context("Garbage Collection Safety Check")
test_that("train and prediction when gctorture is on", {
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
data(agaricus.train, package = 'xgboost')
data(agaricus.test, package = 'xgboost')
train <- agaricus.train
test <- agaricus.test
gctorture(TRUE)
@@ -12,4 +12,5 @@ test_that("train and prediction when gctorture is on", {
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
pred <- predict(bst, test$data)
gctorture(FALSE)
expect_length(pred, length(test$label))
})

View File

@@ -3,12 +3,12 @@ context('Test generalized linear models')
require(xgboost)
test_that("gblinear works", {
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
data(agaricus.train, package = 'xgboost')
data(agaricus.test, package = 'xgboost')
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
param <- list(objective = "binary:logistic", booster = "gblinear",
param <- list(objective = "binary:logistic", eval_metric = "error", booster = "gblinear",
nthread = 2, eta = 0.8, alpha = 0.0001, lambda = 0.0001)
watchlist <- list(eval = dtest, train = dtrain)
@@ -16,7 +16,7 @@ test_that("gblinear works", {
ERR_UL <- 0.005 # upper limit for the test set error
VERB <- 0 # chatterbox switch
param$updater = 'shotgun'
param$updater <- 'shotgun'
bst <- xgb.train(param, dtrain, n, watchlist, verbose = VERB, feature_selector = 'shuffle')
ypred <- predict(bst, dtest)
expect_equal(length(getinfo(dtest, 'label')), 1611)
@@ -29,7 +29,7 @@ test_that("gblinear works", {
expect_equal(dim(h), c(n, ncol(dtrain) + 1))
expect_is(h, "matrix")
param$updater = 'coord_descent'
param$updater <- 'coord_descent'
bst <- xgb.train(param, dtrain, n, watchlist, verbose = VERB, feature_selector = 'cyclic')
expect_lt(bst$evaluation_log$eval_error[n], ERR_UL)
@@ -40,7 +40,7 @@ test_that("gblinear works", {
expect_lt(bst$evaluation_log$eval_error[2], ERR_UL)
bst <- xgb.train(param, dtrain, n, watchlist, verbose = VERB, feature_selector = 'thrifty',
top_n = 50, callbacks = list(cb.gblinear.history(sparse = TRUE)))
top_k = 50, callbacks = list(cb.gblinear.history(sparse = TRUE)))
expect_lt(bst$evaluation_log$eval_error[n], ERR_UL)
h <- xgb.gblinear.history(bst)
expect_equal(dim(h), c(n, ncol(dtrain) + 1))

View File

@@ -5,18 +5,18 @@ require(data.table)
require(Matrix)
require(vcd, quietly = TRUE)
float_tolerance = 5e-6
float_tolerance <- 5e-6
# disable some tests for Win32
win32_flag = .Platform$OS.type == "windows" && .Machine$sizeof.pointer != 8
# disable some tests for 32-bit environment
flag_32bit <- .Machine$sizeof.pointer != 8
set.seed(1982)
data(Arthritis)
df <- data.table(Arthritis, keep.rownames = F)
df[,AgeDiscret := as.factor(round(Age / 10,0))]
df[,AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))]
df[,ID := NULL]
sparse_matrix <- sparse.model.matrix(Improved~.-1, data = df)
df <- data.table(Arthritis, keep.rownames = FALSE)
df[, AgeDiscret := as.factor(round(Age / 10, 0))]
df[, AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))]
df[, ID := NULL]
sparse_matrix <- sparse.model.matrix(Improved~.-1, data = df) # nolint
label <- df[, ifelse(Improved == "Marked", 1, 0)]
# binary
@@ -44,17 +44,17 @@ mbst.GLM <- xgboost(data = as.matrix(iris[, -5]), label = mlabel, verbose = 0,
test_that("xgb.dump works", {
if (!win32_flag)
if (!flag_32bit)
expect_length(xgb.dump(bst.Tree), 200)
dump_file = file.path(tempdir(), 'xgb.model.dump')
expect_true(xgb.dump(bst.Tree, dump_file, with_stats = T))
dump_file <- file.path(tempdir(), 'xgb.model.dump')
expect_true(xgb.dump(bst.Tree, dump_file, with_stats = TRUE))
expect_true(file.exists(dump_file))
expect_gt(file.size(dump_file), 8000)
# JSON format
dmp <- xgb.dump(bst.Tree, dump_format = "json")
expect_length(dmp, 1)
if (!win32_flag)
if (!flag_32bit)
expect_length(grep('nodeid', strsplit(dmp, '\n')[[1]]), 188)
})
@@ -63,7 +63,7 @@ test_that("xgb.dump works for gblinear", {
# also make sure that it works properly for a sparse model where some coefficients
# are 0 from setting large L1 regularization:
bst.GLM.sp <- xgboost(data = sparse_matrix, label = label, eta = 1, nthread = 2, nrounds = 1,
alpha=2, objective = "binary:logistic", booster = "gblinear")
alpha = 2, objective = "binary:logistic", booster = "gblinear")
d.sp <- xgb.dump(bst.GLM.sp)
expect_length(d.sp, 14)
expect_gt(sum(d.sp == "0"), 0)
@@ -110,9 +110,9 @@ test_that("predict feature contributions works", {
pred <- predict(bst.GLM, sparse_matrix, outputmargin = TRUE)
expect_lt(max(abs(rowSums(pred_contr) - pred)), 1e-5)
# manual calculation of linear terms
coefs <- xgb.dump(bst.GLM)[-c(1,2,4)] %>% as.numeric
coefs <- xgb.dump(bst.GLM)[-c(1, 2, 4)] %>% as.numeric
coefs <- c(coefs[-1], coefs[1]) # intercept must be the last
pred_contr_manual <- sweep(cbind(sparse_matrix, 1), 2, coefs, FUN="*")
pred_contr_manual <- sweep(cbind(sparse_matrix, 1), 2, coefs, FUN = "*")
expect_equal(as.numeric(pred_contr), as.numeric(pred_contr_manual),
tolerance = float_tolerance)
@@ -130,13 +130,13 @@ test_that("predict feature contributions works", {
pred <- predict(mbst.GLM, as.matrix(iris[, -5]), outputmargin = TRUE, reshape = TRUE)
pred_contr <- predict(mbst.GLM, as.matrix(iris[, -5]), predcontrib = TRUE)
expect_length(pred_contr, 3)
coefs_all <- xgb.dump(mbst.GLM)[-c(1,2,6)] %>% as.numeric %>% matrix(ncol = 3, byrow = TRUE)
coefs_all <- xgb.dump(mbst.GLM)[-c(1, 2, 6)] %>% as.numeric %>% matrix(ncol = 3, byrow = TRUE)
for (g in seq_along(pred_contr)) {
expect_equal(colnames(pred_contr[[g]]), c(colnames(iris[, -5]), "BIAS"))
expect_lt(max(abs(rowSums(pred_contr[[g]]) - pred[, g])), float_tolerance)
# manual calculation of linear terms
coefs <- c(coefs_all[-1, g], coefs_all[1, g]) # intercept needs to be the last
pred_contr_manual <- sweep(as.matrix(cbind(iris[,-5], 1)), 2, coefs, FUN="*")
pred_contr_manual <- sweep(as.matrix(cbind(iris[, -5], 1)), 2, coefs, FUN = "*")
expect_equal(as.numeric(pred_contr[[g]]), as.numeric(pred_contr_manual),
tolerance = float_tolerance)
}
@@ -147,8 +147,8 @@ test_that("SHAPs sum to predictions, with or without DART", {
x1 = rnorm(100),
x2 = rnorm(100),
x3 = rnorm(100))
y <- d[,"x1"] + d[,"x2"]^2 +
ifelse(d[,"x3"] > .5, d[,"x3"]^2, 2^d[,"x3"]) +
y <- d[, "x1"] + d[, "x2"]^2 +
ifelse(d[, "x3"] > .5, d[, "x3"]^2, 2^d[, "x3"]) +
rnorm(100)
nrounds <- 30
@@ -160,7 +160,7 @@ test_that("SHAPs sum to predictions, with or without DART", {
objective = "reg:squarederror",
eval_metric = "rmse"),
if (booster == "dart")
list(rate_drop = .01, one_drop = T)),
list(rate_drop = .01, one_drop = TRUE)),
data = d,
label = y,
nrounds = nrounds)
@@ -168,21 +168,21 @@ test_that("SHAPs sum to predictions, with or without DART", {
pr <- function(...)
predict(fit, newdata = d, ...)
pred <- pr()
shap <- pr(predcontrib = T)
shapi <- pr(predinteraction = T)
tol = 1e-5
shap <- pr(predcontrib = TRUE)
shapi <- pr(predinteraction = TRUE)
tol <- 1e-5
expect_equal(rowSums(shap), pred, tol = tol)
expect_equal(apply(shapi, 1, sum), pred, tol = tol)
for (i in 1 : nrow(d))
for (i in seq_len(nrow(d)))
for (f in list(rowSums, colSums))
expect_equal(f(shapi[i,,]), shap[i,], tol = tol)
expect_equal(f(shapi[i, , ]), shap[i, ], tol = tol)
}
})
test_that("xgb-attribute functionality", {
val <- "my attribute value"
list.val <- list(my_attr=val, a=123, b='ok')
list.val <- list(my_attr = val, a = 123, b = 'ok')
list.ch <- list.val[order(names(list.val))]
list.ch <- lapply(list.ch, as.character)
# note: iter is 0-index in xgb attributes
@@ -208,9 +208,9 @@ test_that("xgb-attribute functionality", {
xgb.attr(bst, "my_attr") <- NULL
expect_null(xgb.attr(bst, "my_attr"))
expect_equal(xgb.attributes(bst), list.ch[c("a", "b", "niter")])
xgb.attributes(bst) <- list(a=NULL, b=NULL)
xgb.attributes(bst) <- list(a = NULL, b = NULL)
expect_equal(xgb.attributes(bst), list.default)
xgb.attributes(bst) <- list(niter=NULL)
xgb.attributes(bst) <- list(niter = NULL)
expect_null(xgb.attributes(bst))
})
@@ -256,7 +256,7 @@ test_that("xgb.model.dt.tree works with and without feature names", {
names.dt.trees <- c("Tree", "Node", "ID", "Feature", "Split", "Yes", "No", "Missing", "Quality", "Cover")
dt.tree <- xgb.model.dt.tree(feature_names = feature.names, model = bst.Tree)
expect_equal(names.dt.trees, names(dt.tree))
if (!win32_flag)
if (!flag_32bit)
expect_equal(dim(dt.tree), c(188, 10))
expect_output(str(dt.tree), 'Feature.*\\"Age\\"')
@@ -268,7 +268,7 @@ test_that("xgb.model.dt.tree works with and without feature names", {
bst.Tree.x$feature_names <- NULL
dt.tree.x <- xgb.model.dt.tree(model = bst.Tree.x)
expect_output(str(dt.tree.x), 'Feature.*\\"3\\"')
expect_equal(dt.tree[, -4, with=FALSE], dt.tree.x[, -4, with=FALSE])
expect_equal(dt.tree[, -4, with = FALSE], dt.tree.x[, -4, with = FALSE])
# using integer node ID instead of character
dt.tree.int <- xgb.model.dt.tree(model = bst.Tree, use_int_id = TRUE)
@@ -283,7 +283,7 @@ test_that("xgb.model.dt.tree throws error for gblinear", {
test_that("xgb.importance works with and without feature names", {
importance.Tree <- xgb.importance(feature_names = feature.names, model = bst.Tree)
if (!win32_flag)
if (!flag_32bit)
expect_equal(dim(importance.Tree), c(7, 4))
expect_equal(colnames(importance.Tree), c("Feature", "Gain", "Cover", "Frequency"))
expect_output(str(importance.Tree), 'Feature.*\\"Age\\"')
@@ -295,7 +295,7 @@ test_that("xgb.importance works with and without feature names", {
bst.Tree.x <- bst.Tree
bst.Tree.x$feature_names <- NULL
importance.Tree.x <- xgb.importance(model = bst.Tree)
expect_equal(importance.Tree[, -1, with=FALSE], importance.Tree.x[, -1, with=FALSE],
expect_equal(importance.Tree[, -1, with = FALSE], importance.Tree.x[, -1, with = FALSE],
tolerance = float_tolerance)
imp2plot <- xgb.plot.importance(importance_matrix = importance.Tree)
@@ -305,7 +305,7 @@ test_that("xgb.importance works with and without feature names", {
# for multiclass
imp.Tree <- xgb.importance(model = mbst.Tree)
expect_equal(dim(imp.Tree), c(4, 4))
xgb.importance(model = mbst.Tree, trees = seq(from=0, by=nclass, length.out=nrounds))
xgb.importance(model = mbst.Tree, trees = seq(from = 0, by = nclass, length.out = nrounds))
})
test_that("xgb.importance works with GLM model", {
@@ -320,7 +320,7 @@ test_that("xgb.importance works with GLM model", {
# for multiclass
imp.GLM <- xgb.importance(model = mbst.GLM)
expect_equal(dim(imp.GLM), c(12, 3))
expect_equal(imp.GLM$Class, rep(0:2, each=4))
expect_equal(imp.GLM$Class, rep(0:2, each = 4))
})
test_that("xgb.model.dt.tree and xgb.importance work with a single split model", {
@@ -335,8 +335,8 @@ test_that("xgb.model.dt.tree and xgb.importance work with a single split model",
})
test_that("xgb.plot.tree works with and without feature names", {
xgb.plot.tree(feature_names = feature.names, model = bst.Tree)
xgb.plot.tree(model = bst.Tree)
expect_silent(xgb.plot.tree(feature_names = feature.names, model = bst.Tree))
expect_silent(xgb.plot.tree(model = bst.Tree))
})
test_that("xgb.plot.multi.trees works with and without feature names", {
@@ -351,11 +351,47 @@ test_that("xgb.plot.deepness works", {
xgb.ggplot.deepness(model = bst.Tree)
})
test_that("xgb.shap.data works when top_n is provided", {
data_list <- xgb.shap.data(data = sparse_matrix, model = bst.Tree, top_n = 2)
expect_equal(names(data_list), c("data", "shap_contrib"))
expect_equal(NCOL(data_list$data), 2)
expect_equal(NCOL(data_list$shap_contrib), 2)
expect_equal(NROW(data_list$data), NROW(data_list$shap_contrib))
expect_gt(length(colnames(data_list$data)), 0)
expect_gt(length(colnames(data_list$shap_contrib)), 0)
# for multiclass without target class provided
data_list <- xgb.shap.data(data = as.matrix(iris[, -5]), model = mbst.Tree, top_n = 2)
expect_equal(dim(data_list$shap_contrib), c(nrow(iris), 2))
# for multiclass with target class provided
data_list <- xgb.shap.data(data = as.matrix(iris[, -5]), model = mbst.Tree, top_n = 2, target_class = 0)
expect_equal(dim(data_list$shap_contrib), c(nrow(iris), 2))
})
test_that("xgb.shap.data works with subsampling", {
data_list <- xgb.shap.data(data = sparse_matrix, model = bst.Tree, top_n = 2, subsample = 0.8)
expect_equal(NROW(data_list$data), as.integer(0.8 * nrow(sparse_matrix)))
expect_equal(NROW(data_list$data), NROW(data_list$shap_contrib))
})
test_that("prepare.ggplot.shap.data works", {
data_list <- xgb.shap.data(data = sparse_matrix, model = bst.Tree, top_n = 2)
plot_data <- prepare.ggplot.shap.data(data_list, normalize = TRUE)
expect_s3_class(plot_data, "data.frame")
expect_equal(names(plot_data), c("id", "feature", "feature_value", "shap_value"))
expect_s3_class(plot_data$feature, "factor")
# Each observation should have 1 row for each feature
expect_equal(nrow(plot_data), nrow(sparse_matrix) * 2)
})
test_that("xgb.plot.shap works", {
sh <- xgb.plot.shap(data = sparse_matrix, model = bst.Tree, top_n = 2, col = 4)
expect_equal(names(sh), c("data", "shap_contrib"))
expect_equal(NCOL(sh$data), 2)
expect_equal(NCOL(sh$shap_contrib), 2)
})
test_that("xgb.plot.shap.summary works", {
expect_silent(xgb.plot.shap.summary(data = sparse_matrix, model = bst.Tree, top_n = 2))
expect_silent(xgb.ggplot.shap.summary(data = sparse_matrix, model = bst.Tree, top_n = 2))
})
test_that("check.deprecation works", {
@@ -374,3 +410,26 @@ test_that("check.deprecation works", {
, "\'dumm\' was partially matched to \'dummy\'")
expect_equal(res, list(a = 1, DUMMY = 22))
})
test_that('convert.labels works', {
y <- c(0, 1, 0, 0, 1)
for (objective in c('binary:logistic', 'binary:logitraw', 'binary:hinge')) {
res <- xgboost:::convert.labels(y, objective_name = objective)
expect_s3_class(res, 'factor')
expect_equal(res, factor(res))
}
y <- c(0, 1, 3, 2, 1, 4)
for (objective in c('multi:softmax', 'multi:softprob', 'rank:pairwise', 'rank:ndcg',
'rank:map')) {
res <- xgboost:::convert.labels(y, objective_name = objective)
expect_s3_class(res, 'factor')
expect_equal(res, factor(res))
}
y <- c(1.2, 3.0, -1.0, 10.0)
for (objective in c('reg:squarederror', 'reg:squaredlogerror', 'reg:logistic',
'reg:pseudohubererror', 'count:poisson', 'survival:cox', 'survival:aft',
'reg:gamma', 'reg:tweedie')) {
res <- xgboost:::convert.labels(y, objective_name = objective)
expect_equal(class(res), 'numeric')
}
})

View File

@@ -5,20 +5,20 @@ context("interaction constraints")
set.seed(1024)
x1 <- rnorm(1000, 1)
x2 <- rnorm(1000, 1)
x3 <- sample(c(1,2,3), size=1000, replace=TRUE)
y <- x1 + x2 + x3 + x1*x2*x3 + rnorm(1000, 0.001) + 3*sin(x1)
train <- matrix(c(x1,x2,x3), ncol = 3)
x3 <- sample(c(1, 2, 3), size = 1000, replace = TRUE)
y <- x1 + x2 + x3 + x1 * x2 * x3 + rnorm(1000, 0.001) + 3 * sin(x1)
train <- matrix(c(x1, x2, x3), ncol = 3)
test_that("interaction constraints for regression", {
# Fit a model that only allows interaction between x1 and x2
bst <- xgboost(data = train, label = y, max_depth = 3,
eta = 0.1, nthread = 2, nrounds = 100, verbose = 0,
interaction_constraints = list(c(0,1)))
interaction_constraints = list(c(0, 1)))
# Set all observations to have the same x3 values then increment
# by the same amount
preds <- lapply(c(1,2,3), function(x){
tmat <- matrix(c(x1,x2,rep(x,1000)), ncol=3)
preds <- lapply(c(1, 2, 3), function(x){
tmat <- matrix(c(x1, x2, rep(x, 1000)), ncol = 3)
return(predict(bst, tmat))
})
@@ -34,5 +34,22 @@ test_that("interaction constraints for regression", {
expect_true({
test1 & test2
}, "Interaction Contraint Satisfied")
})
test_that("interaction constraints scientific representation", {
rows <- 10
## When number exceeds 1e5, R paste function uses scientific representation.
## See: https://github.com/dmlc/xgboost/issues/5179
cols <- 1e5 + 10
d <- matrix(rexp(rows, rate = .1), nrow = rows, ncol = cols)
y <- rnorm(rows)
dtrain <- xgb.DMatrix(data = d, info = list(label = y))
inc <- list(c(seq.int(from = 0, to = cols, by = 1)))
with_inc <- xgb.train(data = dtrain, tree_method = 'hist',
interaction_constraints = inc, nrounds = 10)
without_inc <- xgb.train(data = dtrain, tree_method = 'hist', nrounds = 10)
expect_equal(xgb.save.raw(with_inc), xgb.save.raw(without_inc))
})

View File

@@ -9,9 +9,9 @@ test_that("predict feature interactions works", {
# simulate some binary data and a linear outcome with an interaction term
N <- 1000
P <- 5
X <- matrix(rbinom(N * P, 1, 0.5), ncol=P, dimnames = list(NULL, letters[1:P]))
X <- matrix(rbinom(N * P, 1, 0.5), ncol = P, dimnames = list(NULL, letters[1:P]))
# center the data (as contributions are computed WRT feature means)
X <- scale(X, scale=FALSE)
X <- scale(X, scale = FALSE)
# outcome without any interactions, without any noise:
f <- function(x) 2 * x[, 1] - 3 * x[, 2]
@@ -23,14 +23,14 @@ test_that("predict feature interactions works", {
y <- f_int(X)
dm <- xgb.DMatrix(X, label = y)
param <- list(eta=0.1, max_depth=4, base_score=mean(y), lambda=0, nthread=2)
param <- list(eta = 0.1, max_depth = 4, base_score = mean(y), lambda = 0, nthread = 2)
b <- xgb.train(param, dm, 100)
pred = predict(b, dm, outputmargin=TRUE)
pred <- predict(b, dm, outputmargin = TRUE)
# SHAP contributions:
cont <- predict(b, dm, predcontrib=TRUE)
expect_equal(dim(cont), c(N, P+1))
cont <- predict(b, dm, predcontrib = TRUE)
expect_equal(dim(cont), c(N, P + 1))
# make sure for each row they add up to marginal predictions
max(abs(rowSums(cont) - pred)) %>% expect_lt(0.001)
# Hand-construct the 'ground truth' feature contributions:
@@ -39,43 +39,43 @@ test_that("predict feature interactions works", {
-3. * X[, 2] + 1. * X[, 2] * X[, 3], # attribute a HALF of the interaction term to feature #2
1. * X[, 2] * X[, 3] # and another HALF of the interaction term to feature #3
)
gt_cont <- cbind(gt_cont, matrix(0, nrow=N, ncol=P + 1 - 3))
gt_cont <- cbind(gt_cont, matrix(0, nrow = N, ncol = P + 1 - 3))
# These should be relatively close:
expect_lt(max(abs(cont - gt_cont)), 0.05)
# SHAP interaction contributions:
intr <- predict(b, dm, predinteraction=TRUE)
expect_equal(dim(intr), c(N, P+1, P+1))
intr <- predict(b, dm, predinteraction = TRUE)
expect_equal(dim(intr), c(N, P + 1, P + 1))
# check assigned colnames
cn <- c(letters[1:P], "BIAS")
expect_equal(dimnames(intr), list(NULL, cn, cn))
# check the symmetry
max(abs(aperm(intr, c(1,3,2)) - intr)) %>% expect_lt(0.00001)
max(abs(aperm(intr, c(1, 3, 2)) - intr)) %>% expect_lt(0.00001)
# sums WRT columns must be close to feature contributions
max(abs(apply(intr, c(1,2), sum) - cont)) %>% expect_lt(0.00001)
max(abs(apply(intr, c(1, 2), sum) - cont)) %>% expect_lt(0.00001)
# diagonal terms for features 3,4,5 must be close to zero
Reduce(max, sapply(3:P, function(i) max(abs(intr[, i, i])))) %>% expect_lt(0.05)
# BIAS must have no interactions
max(abs(intr[, 1:P, P+1])) %>% expect_lt(0.00001)
max(abs(intr[, 1:P, P + 1])) %>% expect_lt(0.00001)
# interactions other than 2 x 3 must be close to zero
intr23 <- intr
intr23[,2,3] <- 0
Reduce(max, sapply(1:P, function(i) max(abs(intr23[, i, (i+1):(P+1)])))) %>% expect_lt(0.05)
intr23[, 2, 3] <- 0
Reduce(max, sapply(1:P, function(i) max(abs(intr23[, i, (i + 1):(P + 1)])))) %>% expect_lt(0.05)
# Construct the 'ground truth' contributions of interactions directly from the linear terms:
gt_intr <- array(0, c(N, P+1, P+1))
gt_intr[,2,3] <- 1. * X[, 2] * X[, 3] # attribute a HALF of the interaction term to each symmetric element
gt_intr[,3,2] <- gt_intr[, 2, 3]
gt_intr <- array(0, c(N, P + 1, P + 1))
gt_intr[, 2, 3] <- 1. * X[, 2] * X[, 3] # attribute a HALF of the interaction term to each symmetric element
gt_intr[, 3, 2] <- gt_intr[, 2, 3]
# merge-in the diagonal based on 'ground truth' feature contributions
intr_diag = gt_cont - apply(gt_intr, c(1,2), sum)
for(j in seq_len(P)) {
gt_intr[,j,j] = intr_diag[,j]
intr_diag <- gt_cont - apply(gt_intr, c(1, 2), sum)
for (j in seq_len(P)) {
gt_intr[, j, j] <- intr_diag[, j]
}
# These should be relatively close:
expect_lt(max(abs(intr - gt_intr)), 0.1)
@@ -107,7 +107,7 @@ test_that("SHAP contribution values are not NAN", {
shaps <- as.data.frame(predict(fit,
newdata = as.matrix(subset(d, fold == 1)[, ivs]),
predcontrib = T))
predcontrib = TRUE))
result <- cbind(shaps, sum = rowSums(shaps), pred = predict(fit,
newdata = as.matrix(subset(d, fold == 1)[, ivs])))
@@ -116,26 +116,26 @@ test_that("SHAP contribution values are not NAN", {
test_that("multiclass feature interactions work", {
dm <- xgb.DMatrix(as.matrix(iris[,-5]), label=as.numeric(iris$Species)-1)
param <- list(eta=0.1, max_depth=4, objective='multi:softprob', num_class=3)
dm <- xgb.DMatrix(as.matrix(iris[, -5]), label = as.numeric(iris$Species) - 1)
param <- list(eta = 0.1, max_depth = 4, objective = 'multi:softprob', num_class = 3)
b <- xgb.train(param, dm, 40)
pred = predict(b, dm, outputmargin=TRUE) %>% array(c(3, 150)) %>% t
pred <- predict(b, dm, outputmargin = TRUE) %>% array(c(3, 150)) %>% t
# SHAP contributions:
cont <- predict(b, dm, predcontrib=TRUE)
cont <- predict(b, dm, predcontrib = TRUE)
expect_length(cont, 3)
# rewrap them as a 3d array
cont <- unlist(cont) %>% array(c(150, 5, 3))
# make sure for each row they add up to marginal predictions
max(abs(apply(cont, c(1,3), sum) - pred)) %>% expect_lt(0.001)
max(abs(apply(cont, c(1, 3), sum) - pred)) %>% expect_lt(0.001)
# SHAP interaction contributions:
intr <- predict(b, dm, predinteraction=TRUE)
intr <- predict(b, dm, predinteraction = TRUE)
expect_length(intr, 3)
# rewrap them as a 4d array
intr <- unlist(intr) %>% array(c(150, 5, 5, 3)) %>% aperm(c(4, 1, 2, 3)) # [grp, row, col, col]
# check the symmetry
max(abs(aperm(intr, c(1,2,4,3)) - intr)) %>% expect_lt(0.00001)
max(abs(aperm(intr, c(1, 2, 4, 3)) - intr)) %>% expect_lt(0.00001)
# sums WRT columns must be close to feature contributions
max(abs(apply(intr, c(1,2,3), sum) - aperm(cont, c(3,1,2)))) %>% expect_lt(0.00001)
max(abs(apply(intr, c(1, 2, 3), sum) - aperm(cont, c(3, 1, 2)))) %>% expect_lt(0.00001)
})

View File

@@ -1,27 +0,0 @@
context("Code is of high quality and lint free")
test_that("Code Lint", {
skip_on_cran()
skip_on_travis()
skip_if_not_installed("lintr")
my_linters <- list(
absolute_paths_linter=lintr::absolute_paths_linter,
assignment_linter=lintr::assignment_linter,
closed_curly_linter=lintr::closed_curly_linter,
commas_linter=lintr::commas_linter,
# commented_code_linter=lintr::commented_code_linter,
infix_spaces_linter=lintr::infix_spaces_linter,
line_length_linter=lintr::line_length_linter,
no_tab_linter=lintr::no_tab_linter,
object_usage_linter=lintr::object_usage_linter,
# snake_case_linter=lintr::snake_case_linter,
# multiple_dots_linter=lintr::multiple_dots_linter,
object_length_linter=lintr::object_length_linter,
open_curly_linter=lintr::open_curly_linter,
# single_quotes_linter=lintr::single_quotes_linter,
spaces_inside_linter=lintr::spaces_inside_linter,
spaces_left_parentheses_linter=lintr::spaces_left_parentheses_linter,
trailing_blank_lines_linter=lintr::trailing_blank_lines_linter,
trailing_whitespace_linter=lintr::trailing_whitespace_linter
)
# lintr::expect_lint_free(linters=my_linters) # uncomment this if you want to check code quality
})

View File

@@ -0,0 +1,109 @@
require(xgboost)
require(jsonlite)
context("Models from previous versions of XGBoost can be loaded")
metadata <- list(
kRounds = 2,
kRows = 1000,
kCols = 4,
kForests = 2,
kMaxDepth = 2,
kClasses = 3
)
run_model_param_check <- function (config) {
testthat::expect_equal(config$learner$learner_model_param$num_feature, '4')
testthat::expect_equal(config$learner$learner_train_param$booster, 'gbtree')
}
get_num_tree <- function (booster) {
dump <- xgb.dump(booster)
m <- regexec('booster\\[[0-9]+\\]', dump, perl = TRUE)
m <- regmatches(dump, m)
num_tree <- Reduce('+', lapply(m, length))
return (num_tree)
}
run_booster_check <- function (booster, name) {
# If given a handle, we need to call xgb.Booster.complete() prior to using xgb.config().
if (inherits(booster, "xgb.Booster") && xgboost:::is.null.handle(booster$handle)) {
booster <- xgb.Booster.complete(booster)
}
config <- jsonlite::fromJSON(xgb.config(booster))
run_model_param_check(config)
if (name == 'cls') {
testthat::expect_equal(get_num_tree(booster),
metadata$kForests * metadata$kRounds * metadata$kClasses)
testthat::expect_equal(as.numeric(config$learner$learner_model_param$base_score), 0.5)
testthat::expect_equal(config$learner$learner_train_param$objective, 'multi:softmax')
testthat::expect_equal(as.numeric(config$learner$learner_model_param$num_class),
metadata$kClasses)
} else if (name == 'logitraw') {
testthat::expect_equal(get_num_tree(booster), metadata$kForests * metadata$kRounds)
testthat::expect_equal(as.numeric(config$learner$learner_model_param$num_class), 0)
testthat::expect_equal(config$learner$learner_train_param$objective, 'binary:logitraw')
} else if (name == 'logit') {
testthat::expect_equal(get_num_tree(booster), metadata$kForests * metadata$kRounds)
testthat::expect_equal(as.numeric(config$learner$learner_model_param$num_class), 0)
testthat::expect_equal(config$learner$learner_train_param$objective, 'binary:logistic')
} else if (name == 'ltr') {
testthat::expect_equal(get_num_tree(booster), metadata$kForests * metadata$kRounds)
testthat::expect_equal(config$learner$learner_train_param$objective, 'rank:ndcg')
} else {
testthat::expect_equal(name, 'reg')
testthat::expect_equal(get_num_tree(booster), metadata$kForests * metadata$kRounds)
testthat::expect_equal(as.numeric(config$learner$learner_model_param$base_score), 0.5)
testthat::expect_equal(config$learner$learner_train_param$objective, 'reg:squarederror')
}
}
test_that("Models from previous versions of XGBoost can be loaded", {
bucket <- 'xgboost-ci-jenkins-artifacts'
region <- 'us-west-2'
file_name <- 'xgboost_r_model_compatibility_test.zip'
zipfile <- file.path(getwd(), file_name)
model_dir <- file.path(getwd(), 'models')
download.file(paste('https://', bucket, '.s3-', region, '.amazonaws.com/', file_name, sep = ''),
destfile = zipfile, mode = 'wb', quiet = TRUE)
unzip(zipfile, overwrite = TRUE)
pred_data <- xgb.DMatrix(matrix(c(0, 0, 0, 0), nrow = 1, ncol = 4))
lapply(list.files(model_dir), function (x) {
model_file <- file.path(model_dir, x)
m <- regexec("xgboost-([0-9\\.]+)\\.([a-z]+)\\.[a-z]+", model_file, perl = TRUE)
m <- regmatches(model_file, m)[[1]]
model_xgb_ver <- m[2]
name <- m[3]
is_rds <- endsWith(model_file, '.rds')
cpp_warning <- capture.output({
# Expect an R warning when a model is loaded from RDS and it was generated by version < 1.1.x
if (is_rds && compareVersion(model_xgb_ver, '1.1.1.1') < 0) {
booster <- readRDS(model_file)
expect_warning(predict(booster, newdata = pred_data))
expect_warning(run_booster_check(booster, name))
} else {
if (is_rds) {
booster <- readRDS(model_file)
} else {
booster <- xgb.load(model_file)
}
predict(booster, newdata = pred_data)
run_booster_check(booster, name)
}
})
if (compareVersion(model_xgb_ver, '1.0.0.0') < 0) {
# Expect a C++ warning when a model was generated in version < 1.0.x
m <- grepl(paste0('.*Loading model from XGBoost < 1\\.0\\.0, consider saving it again for ',
'improved compatibility.*'), cpp_warning, perl = TRUE)
expect_true(length(m) > 0 && all(m))
} else if (is_rds && model_xgb_ver == '1.1.1.1') {
# Expect a C++ warning when a model is loaded from RDS and it was generated by version 1.1.x
m <- grepl(paste0('.*Attempted to load internal configuration for a model file that was ',
'generated by a previous version of XGBoost.*'), cpp_warning, perl = TRUE)
expect_true(length(m) > 0 && all(m))
}
})
})

View File

@@ -3,22 +3,21 @@ require(xgboost)
context("monotone constraints")
set.seed(1024)
x = rnorm(1000, 10)
y = -1*x + rnorm(1000, 0.001) + 3*sin(x)
train = matrix(x, ncol = 1)
x <- rnorm(1000, 10)
y <- -1 * x + rnorm(1000, 0.001) + 3 * sin(x)
train <- matrix(x, ncol = 1)
test_that("monotone constraints for regression", {
bst = xgboost(data = train, label = y, max_depth = 2,
bst <- xgboost(data = train, label = y, max_depth = 2,
eta = 0.1, nthread = 2, nrounds = 100, verbose = 0,
monotone_constraints = -1)
pred = predict(bst, train)
pred <- predict(bst, train)
ind = order(train[,1])
pred.ord = pred[ind]
ind <- order(train[, 1])
pred.ord <- pred[ind]
expect_true({
!any(diff(pred.ord) > 0)
}, "Monotone Contraint Satisfied")
})

View File

@@ -2,8 +2,8 @@ context('Test model params and call are exposed to R')
require(xgboost)
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
data(agaricus.train, package = 'xgboost')
data(agaricus.test, package = 'xgboost')
dtrain <- xgb.DMatrix(agaricus.train$data, label = agaricus.train$label)
dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)

View File

@@ -5,10 +5,10 @@ set.seed(1994)
test_that("poisson regression works", {
data(mtcars)
bst <- xgboost(data = as.matrix(mtcars[,-11]), label = mtcars[,11],
objective = 'count:poisson', nrounds=10, verbose=0)
bst <- xgboost(data = as.matrix(mtcars[, -11]), label = mtcars[, 11],
objective = 'count:poisson', nrounds = 10, verbose = 0)
expect_equal(class(bst), "xgb.Booster")
pred <- predict(bst, as.matrix(mtcars[, -11]))
expect_equal(length(pred), 32)
expect_lt(sqrt(mean( (pred - mtcars[,11])^2 )), 1.2)
expect_lt(sqrt(mean((pred - mtcars[, 11])^2)), 1.2)
})

View File

@@ -0,0 +1,51 @@
require(xgboost)
require(Matrix)
context('Learning to rank')
test_that('Test ranking with unweighted data', {
X <- sparseMatrix(i = c(2, 3, 7, 9, 12, 15, 17, 18),
j = c(1, 1, 2, 2, 3, 3, 4, 4),
x = rep(1.0, 8), dims = c(20, 4))
y <- c(0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0)
group <- c(5, 5, 5, 5)
dtrain <- xgb.DMatrix(X, label = y, group = group)
params <- list(eta = 1, tree_method = 'exact', objective = 'rank:pairwise', max_depth = 1,
eval_metric = 'auc', eval_metric = 'aucpr')
bst <- xgb.train(params, dtrain, nrounds = 10, watchlist = list(train = dtrain))
# Check if the metric is monotone increasing
expect_true(all(diff(bst$evaluation_log$train_auc) >= 0))
expect_true(all(diff(bst$evaluation_log$train_aucpr) >= 0))
})
test_that('Test ranking with weighted data', {
X <- sparseMatrix(i = c(2, 3, 7, 9, 12, 15, 17, 18),
j = c(1, 1, 2, 2, 3, 3, 4, 4),
x = rep(1.0, 8), dims = c(20, 4))
y <- c(0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0)
group <- c(5, 5, 5, 5)
weight <- c(1.0, 2.0, 3.0, 4.0)
dtrain <- xgb.DMatrix(X, label = y, group = group, weight = weight)
params <- list(eta = 1, tree_method = 'exact', objective = 'rank:pairwise', max_depth = 1,
eval_metric = 'auc', eval_metric = 'aucpr')
bst <- xgb.train(params, dtrain, nrounds = 10, watchlist = list(train = dtrain))
# Check if the metric is monotone increasing
expect_true(all(diff(bst$evaluation_log$train_auc) >= 0))
expect_true(all(diff(bst$evaluation_log$train_aucpr) >= 0))
for (i in 1:10) {
pred <- predict(bst, newdata = dtrain, ntreelimit = i)
# is_sorted[i]: is i-th group correctly sorted by the ranking predictor?
is_sorted <- lapply(seq(1, 20, by = 5),
function (k) {
ind <- order(-pred[k:(k + 4)])
z <- y[ind + (k - 1)]
all(diff(z) <= 0) # Check if z is monotone decreasing
})
# Since we give weights 1, 2, 3, 4 to the four query groups,
# the ranking predictor will first try to correctly sort the last query group
# before correctly sorting other groups.
expect_true(all(diff(as.numeric(is_sorted)) >= 0))
}
})

View File

@@ -9,10 +9,10 @@ dtest <- xgb.DMatrix(agaricus.test$data, label = agaricus.test$label)
# Disable flaky tests for 32-bit Windows.
# See https://github.com/dmlc/xgboost/issues/3720
win32_flag = .Platform$OS.type == "windows" && .Machine$sizeof.pointer != 8
win32_flag <- .Platform$OS.type == "windows" && .Machine$sizeof.pointer != 8
test_that("updating the model works", {
watchlist = list(train = dtrain, test = dtest)
watchlist <- list(train = dtrain, test = dtest)
# no-subsampling
p1 <- list(objective = "binary:logistic", max_depth = 2, eta = 0.05, nthread = 2)
@@ -95,7 +95,7 @@ test_that("updating works for multiclass & multitree", {
tr0 <- xgb.model.dt.tree(model = bst0)
# run update process for an original model with subsampling
p0u <- modifyList(p0, list(process_type='update', updater='refresh', refresh_leaf=FALSE))
p0u <- modifyList(p0, list(process_type = 'update', updater = 'refresh', refresh_leaf = FALSE))
bst0u <- xgb.train(p0u, dtr, nrounds = bst0$niter, watchlist, xgb_model = bst0, verbose = 0)
tr0u <- xgb.model.dt.tree(model = bst0u)

Some files were not shown because too many files have changed in this diff Show More