Compare commits

..

22 Commits

Author SHA1 Message Date
Jiaming Yuan
62ed8b5fef Bump release version to 1.7.2. (#8569) 2022-12-08 21:46:26 +08:00
Jiaming Yuan
a980e10744 Properly await async method client.wait_for_workers (#8558) (#8567)
* Properly await async method client.wait_for_workers

* ignore mypy error.

Co-authored-by: jiamingy <jm.yuan@outlook.com>

Co-authored-by: Matthew Rocklin <mrocklin@gmail.com>
2022-12-07 23:25:05 +08:00
Jiaming Yuan
59c54e361b [pyspark] Make QDM optional based on cuDF check (#8471) (#8556)
Co-authored-by: WeichenXu <weichen.xu@databricks.com>
2022-12-07 03:19:35 +08:00
Jiaming Yuan
60a8c8ebba [pyspark] sort qid for SparkRanker (#8497) (#8555)
* [pyspark] sort qid for SparkRandker

* resolve comments

Co-authored-by: Bobby Wang <wbo4958@gmail.com>
2022-12-07 02:07:37 +08:00
Jiaming Yuan
58bc225657 [backport] [CI] Fix github action mismatched glibcxx. (#8551) (#8552)
Split up the Linux test to use the toolchain from conda forge.
2022-12-06 21:35:26 +08:00
Jiaming Yuan
850b53100f [backport] [doc] Fix outdated document [skip ci] (#8527) (#8553)
* [doc] Fix document around categorical parameters. [skip ci]

* note on validate parameter [skip ci]

* Fix dask doc as well [skip ci]
2022-12-06 18:21:14 +08:00
Philip Hyunsu Cho
67b657dad0 SO_DOMAIN do not support on IBM i, using getsockname instead (#8437) (#8500) 2022-11-30 11:47:59 -08:00
Philip Hyunsu Cho
db14e3feb7 Support null value in CUDA array interface. (#8486) (#8499) 2022-11-30 11:44:54 -08:00
Robert Maynard
9372370dda Work with newer thrust and libcudacxx (#8432)
* Thrust 1.17 removes the experimental/pinned_allocator.

When xgboost is brought into a large project it can
be compiled against Thrust 1.17+ which don't offer
this experimental allocator.

To ensure that going forward xgboost works in all environments we provide a xgboost namespaced version of
the pinned_allocator that previously was in Thrust.

* Update gputreeshap to work with libcudacxx 1.9
2022-11-11 01:15:25 +08:00
Jiaming Yuan
1136a7e0c3 Fix CRAN note on cleanup. (#8447) 2022-11-09 14:22:54 +08:00
Jiaming Yuan
a347cd512b [backport] [R] Fix CRAN test notes. (#8428) (#8440)
- Limit the number of used CPU cores in examples.
- Add a note for the constraint.
- Bring back the cleanup script.
2022-11-09 07:12:46 +08:00
Jiaming Yuan
9ff0c0832a Fix 1.7.1 version file. (#8427) 2022-11-06 03:19:54 +08:00
Philip Hyunsu Cho
534c940a7e Release 1.7.1 (#8413)
* Release 1.7.1

* Review comment
2022-11-03 15:37:54 -07:00
Philip Hyunsu Cho
5b76acccff Add back xgboost.rabit for backwards compatibility (#8408) (#8411) 2022-11-02 07:56:55 -07:00
Hyunsu Cho
4bc59ef7c3 Release 1.7 2022-10-31 10:53:07 -07:00
Jiaming Yuan
e43cd60c0e [backport] Type fix for WebAssembly. (#8369) (#8394)
Co-authored-by: Yizhi Liu <liuyizhi@apache.org>
2022-10-26 20:47:16 +08:00
Jiaming Yuan
3f92970a39 [backport] Fix CUDA async stream. (#8380) (#8392) 2022-10-26 20:46:38 +08:00
Jiaming Yuan
e17f7010bf [backport][doc] Cleanup outdated documents for GPU. [skip ci] (#8378) (#8393) 2022-10-26 19:49:00 +08:00
Jiaming Yuan
aa30ce10da [backport][pyspark] Improve tutorial on enabling GPU support. (#8385) [skip ci] (#8391)
- Quote the databricks doc on how to manage dependencies.
- Some wording changes.

Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>
2022-10-26 19:31:34 +08:00
Philip Hyunsu Cho
153d995b58 Fix building XGBoost with libomp 15 (#8384) (#8387) 2022-10-26 00:43:10 -07:00
Jiaming Yuan
463313d9be Remove cleanup script in R package. (#8370) 2022-10-20 14:22:13 +08:00
Jiaming Yuan
7cf58a2c65 Make 1.7.0rc1. (#8365) 2022-10-20 12:01:18 +08:00
875 changed files with 27839 additions and 44939 deletions

View File

@@ -1,4 +1,4 @@
Checks: 'modernize-*,-modernize-use-nodiscard,-modernize-concat-nested-namespaces,-modernize-make-*,-modernize-use-auto,-modernize-raw-string-literal,-modernize-avoid-c-arrays,-modernize-use-trailing-return-type,google-*,-google-default-arguments,-clang-diagnostic-#pragma-messages,readability-identifier-naming'
Checks: 'modernize-*,-modernize-make-*,-modernize-use-auto,-modernize-raw-string-literal,-modernize-avoid-c-arrays,-modernize-use-trailing-return-type,google-*,-google-default-arguments,-clang-diagnostic-#pragma-messages,readability-identifier-naming'
CheckOptions:
- { key: readability-identifier-naming.ClassCase, value: CamelCase }
- { key: readability-identifier-naming.StructCase, value: CamelCase }

18
.gitattributes vendored
View File

@@ -1,18 +0,0 @@
* text=auto
*.c text eol=lf
*.h text eol=lf
*.cc text eol=lf
*.cuh text eol=lf
*.cu text eol=lf
*.py text eol=lf
*.txt text eol=lf
*.R text eol=lf
*.scala text eol=lf
*.java text eol=lf
*.sh text eol=lf
*.rst text eol=lf
*.md text eol=lf
*.csv text eol=lf

View File

@@ -1,31 +0,0 @@
# To get started with Dependabot version updates, you'll need to specify which
# package ecosystems to update and where the package manifests are located.
# Please see the documentation for all configuration options:
# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
version: 2
updates:
- package-ecosystem: "maven"
directory: "/jvm-packages"
schedule:
interval: "daily"
- package-ecosystem: "maven"
directory: "/jvm-packages/xgboost4j"
schedule:
interval: "daily"
- package-ecosystem: "maven"
directory: "/jvm-packages/xgboost4j-gpu"
schedule:
interval: "daily"
- package-ecosystem: "maven"
directory: "/jvm-packages/xgboost4j-example"
schedule:
interval: "daily"
- package-ecosystem: "maven"
directory: "/jvm-packages/xgboost4j-spark"
schedule:
interval: "daily"
- package-ecosystem: "maven"
directory: "/jvm-packages/xgboost4j-spark-gpu"
schedule:
interval: "daily"

View File

@@ -15,16 +15,16 @@ jobs:
os: [windows-latest, ubuntu-latest, macos-11]
steps:
- uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v2.5.0
- uses: actions/checkout@v2
with:
submodules: 'true'
- uses: actions/setup-python@7f80679172b057fc5e90d70d197929d454754a5a # v4.3.0
- uses: actions/setup-python@v2
with:
python-version: '3.8'
architecture: 'x64'
- uses: actions/setup-java@d202f5dbf7256730fb690ec59f6381650114feb2 # v3.6.0
- uses: actions/setup-java@v1
with:
java-version: 1.8
@@ -34,13 +34,13 @@ jobs:
python -m pip install awscli
- name: Cache Maven packages
uses: actions/cache@6998d139ddd3e68c71e9e398d8e40b71a2f39812 # v3.2.5
uses: actions/cache@v2
with:
path: ~/.m2
key: ${{ runner.os }}-m2-${{ hashFiles('./jvm-packages/pom.xml') }}
restore-keys: ${{ runner.os }}-m2-${{ hashFiles('./jvm-packages/pom.xml') }}
restore-keys: ${{ runner.os }}-m2
- name: Test XGBoost4J (Core)
- name: Test XGBoost4J
run: |
cd jvm-packages
mvn test -B -pl :xgboost4j_2.12
@@ -67,7 +67,7 @@ jobs:
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY_IAM_S3_UPLOADER }}
- name: Test XGBoost4J (Core, Spark, Examples)
- name: Test XGBoost4J-Spark
run: |
rm -rfv build/
cd jvm-packages
@@ -75,13 +75,3 @@ jobs:
if: matrix.os == 'ubuntu-latest' # Distributed training doesn't work on Windows
env:
RABIT_MOCK: ON
- name: Build and Test XGBoost4J with scala 2.13
run: |
rm -rfv build/
cd jvm-packages
mvn -B clean install test -Pdefault,scala-2.13
if: matrix.os == 'ubuntu-latest' # Distributed training doesn't work on Windows
env:
RABIT_MOCK: ON

View File

@@ -19,7 +19,7 @@ jobs:
matrix:
os: [macos-11]
steps:
- uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v2.5.0
- uses: actions/checkout@v2
with:
submodules: 'true'
- name: Install system packages
@@ -45,7 +45,7 @@ jobs:
matrix:
os: [ubuntu-latest]
steps:
- uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v2.5.0
- uses: actions/checkout@v2
with:
submodules: 'true'
- name: Install system packages
@@ -66,16 +66,13 @@ jobs:
c-api-demo:
name: Test installing XGBoost lib + building the C API demo
runs-on: ${{ matrix.os }}
defaults:
run:
shell: bash -l {0}
strategy:
fail-fast: false
matrix:
os: ["ubuntu-latest"]
python-version: ["3.8"]
steps:
- uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v2.5.0
- uses: actions/checkout@v2
with:
submodules: 'true'
- uses: mamba-org/provision-with-micromamba@f347426e5745fe3dfc13ec5baf20496990d0281f # v14
@@ -85,11 +82,13 @@ jobs:
environment-name: cpp_test
environment-file: tests/ci_build/conda_env/cpp_test.yml
- name: Display Conda env
shell: bash -l {0}
run: |
conda info
conda list
- name: Build and install XGBoost static library
shell: bash -l {0}
run: |
mkdir build
cd build
@@ -97,6 +96,7 @@ jobs:
ninja -v install
cd -
- name: Build and run C API demo with static
shell: bash -l {0}
run: |
pushd .
cd demo/c-api/
@@ -110,12 +110,14 @@ jobs:
popd
- name: Build and install XGBoost shared library
shell: bash -l {0}
run: |
cd build
cmake .. -DBUILD_STATIC_LIB=OFF -DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX -GNinja
ninja -v install
cd -
- name: Build and run C API demo with shared
shell: bash -l {0}
run: |
pushd .
cd demo/c-api/
@@ -128,14 +130,14 @@ jobs:
./tests/ci_build/verify_link.sh ./demo/c-api/build/basic/api-demo
./tests/ci_build/verify_link.sh ./demo/c-api/build/external-memory/external-memory-demo
cpp-lint:
lint:
runs-on: ubuntu-latest
name: Code linting for C++
steps:
- uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v2.5.0
- uses: actions/checkout@v2
with:
submodules: 'true'
- uses: actions/setup-python@7f80679172b057fc5e90d70d197929d454754a5a # v4.3.0
- uses: actions/setup-python@v2
with:
python-version: "3.8"
architecture: 'x64'
@@ -144,15 +146,68 @@ jobs:
python -m pip install wheel setuptools cpplint pylint
- name: Run lint
run: |
python3 dmlc-core/scripts/lint.py xgboost cpp R-package/src
LINT_LANG=cpp make lint
python3 dmlc-core/scripts/lint.py --exclude_path \
python-package/xgboost/dmlc-core \
python-package/xgboost/include \
python-package/xgboost/lib \
python-package/xgboost/rabit \
python-package/xgboost/src \
--pylint-rc python-package/.pylintrc \
xgboost \
cpp \
include src python-package
doxygen:
runs-on: ubuntu-latest
name: Generate C/C++ API doc using Doxygen
steps:
- uses: actions/checkout@v2
with:
submodules: 'true'
- uses: actions/setup-python@v2
with:
python-version: "3.8"
architecture: 'x64'
- name: Install system packages
run: |
sudo apt-get install -y --no-install-recommends doxygen graphviz ninja-build
python -m pip install wheel setuptools
python -m pip install awscli
- name: Run Doxygen
run: |
mkdir build
cd build
cmake .. -DBUILD_C_DOC=ON -GNinja
ninja -v doc_doxygen
- name: Extract branch name
shell: bash
run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})"
id: extract_branch
if: github.ref == 'refs/heads/master' || contains(github.ref, 'refs/heads/release_')
- name: Publish
run: |
cd build/
tar cvjf ${{ steps.extract_branch.outputs.branch }}.tar.bz2 doc_doxygen/
python -m awscli s3 cp ./${{ steps.extract_branch.outputs.branch }}.tar.bz2 s3://xgboost-docs/doxygen/ --acl public-read
if: github.ref == 'refs/heads/master' || contains(github.ref, 'refs/heads/release_')
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID_IAM_S3_UPLOADER }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY_IAM_S3_UPLOADER }}
sphinx:
runs-on: ubuntu-latest
name: Build docs using Sphinx
steps:
- uses: actions/checkout@v2
with:
submodules: 'true'
- uses: actions/setup-python@v2
with:
python-version: "3.8"
architecture: 'x64'
- name: Install system packages
run: |
sudo apt-get install -y --no-install-recommends graphviz
python -m pip install wheel setuptools
python -m pip install -r doc/requirements.txt
- name: Extract branch name
shell: bash
run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})"
id: extract_branch
if: github.ref == 'refs/heads/master' || contains(github.ref, 'refs/heads/release_')
- name: Run Sphinx
run: |
make -C doc html
env:
SPHINX_GIT_BRANCH: ${{ steps.extract_branch.outputs.branch }}

View File

@@ -5,10 +5,6 @@ on: [push, pull_request]
permissions:
contents: read # to fetch code (actions/checkout)
defaults:
run:
shell: bash -l {0}
jobs:
python-mypy-lint:
runs-on: ubuntu-latest
@@ -16,27 +12,32 @@ jobs:
strategy:
matrix:
os: [ubuntu-latest]
python-version: ["3.8"]
steps:
- uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v2.5.0
- uses: actions/checkout@v2
with:
submodules: 'true'
- uses: mamba-org/provision-with-micromamba@f347426e5745fe3dfc13ec5baf20496990d0281f # v14
- uses: conda-incubator/setup-miniconda@v2
with:
cache-downloads: true
cache-env: true
environment-name: python_lint
auto-update-conda: true
python-version: ${{ matrix.python-version }}
activate-environment: python_lint
environment-file: tests/ci_build/conda_env/python_lint.yml
- name: Display Conda env
shell: bash -l {0}
run: |
conda info
conda list
- name: Run mypy
shell: bash -l {0}
run: |
python tests/ci_build/lint_python.py --format=0 --type-check=1 --pylint=0
- name: Run formatter
shell: bash -l {0}
run: |
python tests/ci_build/lint_python.py --format=1 --type-check=0 --pylint=0
- name: Run pylint
shell: bash -l {0}
run: |
python tests/ci_build/lint_python.py --format=0 --type-check=0 --pylint=1
@@ -54,19 +55,21 @@ jobs:
- uses: mamba-org/provision-with-micromamba@f347426e5745fe3dfc13ec5baf20496990d0281f # v14
with:
cache-downloads: true
cache-env: true
cache-env: false
environment-name: sdist_test
environment-file: tests/ci_build/conda_env/sdist_test.yml
- name: Display Conda env
shell: bash -l {0}
run: |
conda info
conda list
- name: Build and install XGBoost
shell: bash -l {0}
run: |
cd python-package
python --version
python -m build --sdist
pip install -v ./dist/xgboost-*.tar.gz --config-settings use_openmp=False
python setup.py sdist
pip install -v ./dist/xgboost-*.tar.gz
cd ..
python -c 'import xgboost'
@@ -80,7 +83,7 @@ jobs:
os: [macos-11, windows-latest]
python-version: ["3.8"]
steps:
- uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v2.5.0
- uses: actions/checkout@v2
with:
submodules: 'true'
- name: Install osx system dependencies
@@ -92,18 +95,17 @@ jobs:
auto-update-conda: true
python-version: ${{ matrix.python-version }}
activate-environment: test
- name: Install build
run: |
conda install -c conda-forge python-build
- name: Display Conda env
shell: bash -l {0}
run: |
conda info
conda list
- name: Build and install XGBoost
shell: bash -l {0}
run: |
cd python-package
python --version
python -m build --sdist
python setup.py sdist
pip install -v ./dist/xgboost-*.tar.gz
cd ..
python -c 'import xgboost'
@@ -125,16 +127,18 @@ jobs:
- uses: mamba-org/provision-with-micromamba@f347426e5745fe3dfc13ec5baf20496990d0281f # v14
with:
cache-downloads: true
cache-env: true
cache-env: false
environment-name: macos_test
environment-file: tests/ci_build/conda_env/macos_cpu_test.yml
- name: Display Conda env
shell: bash -l {0}
run: |
conda info
conda list
- name: Build XGBoost on macos
shell: bash -l {0}
run: |
brew install ninja
@@ -147,34 +151,31 @@ jobs:
ninja
- name: Install Python package
shell: bash -l {0}
run: |
cd python-package
python --version
pip install -v .
python setup.py install
- name: Test Python package
shell: bash -l {0}
run: |
pytest -s -v -rxXs --durations=0 ./tests/python
- name: Test Dask Interface
run: |
pytest -s -v -rxXs --durations=0 ./tests/test_distributed/test_with_dask
python-tests-on-win:
name: Test XGBoost Python package on ${{ matrix.config.os }}
runs-on: ${{ matrix.config.os }}
timeout-minutes: 60
strategy:
matrix:
config:
- {os: windows-latest, python-version: '3.8'}
steps:
- uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v2.5.0
- uses: actions/checkout@v2
with:
submodules: 'true'
- uses: conda-incubator/setup-miniconda@35d1405e78aa3f784fe3ce9a2eb378d5eeb62169 # v2.1.1
- uses: conda-incubator/setup-miniconda@v2
with:
auto-update-conda: true
python-version: ${{ matrix.config.python-version }}
@@ -182,11 +183,13 @@ jobs:
environment-file: tests/ci_build/conda_env/win64_cpu_test.yml
- name: Display Conda env
shell: bash -l {0}
run: |
conda info
conda list
- name: Build XGBoost on Windows
shell: bash -l {0}
run: |
mkdir build_msvc
cd build_msvc
@@ -194,105 +197,14 @@ jobs:
cmake --build . --config Release --parallel $(nproc)
- name: Install Python package
shell: bash -l {0}
run: |
cd python-package
python --version
pip wheel -v . --wheel-dir dist/
python setup.py bdist_wheel --universal
pip install ./dist/*.whl
- name: Test Python package
run: |
pytest -s -v -rxXs --durations=0 ./tests/python
python-tests-on-ubuntu:
name: Test XGBoost Python package on ${{ matrix.config.os }}
runs-on: ${{ matrix.config.os }}
timeout-minutes: 90
strategy:
matrix:
config:
- {os: ubuntu-latest, python-version: "3.8"}
steps:
- uses: actions/checkout@v2
with:
submodules: 'true'
- uses: mamba-org/provision-with-micromamba@f347426e5745fe3dfc13ec5baf20496990d0281f # v14
with:
cache-downloads: true
cache-env: true
environment-name: linux_cpu_test
environment-file: tests/ci_build/conda_env/linux_cpu_test.yml
- name: Display Conda env
run: |
conda info
conda list
- name: Build XGBoost on Ubuntu
run: |
mkdir build
cd build
cmake .. -GNinja -DCMAKE_PREFIX_PATH=$CONDA_PREFIX
ninja
- name: Install Python package
run: |
cd python-package
python --version
pip install -v .
- name: Test Python package
run: |
pytest -s -v -rxXs --durations=0 ./tests/python
- name: Test Dask Interface
run: |
pytest -s -v -rxXs --durations=0 ./tests/test_distributed/test_with_dask
- name: Test PySpark Interface
shell: bash -l {0}
run: |
pytest -s -v -rxXs --durations=0 ./tests/test_distributed/test_with_spark
python-system-installation-on-ubuntu:
name: Test XGBoost Python package System Installation on ${{ matrix.os }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v2
with:
submodules: 'true'
- name: Set up Python 3.8
uses: actions/setup-python@v4
with:
python-version: 3.8
- name: Install ninja
run: |
sudo apt-get update && sudo apt-get install -y ninja-build
- name: Build XGBoost on Ubuntu
run: |
mkdir build
cd build
cmake .. -GNinja
ninja
- name: Copy lib to system lib
run: |
cp lib/* "$(python -c 'import sys; print(sys.base_prefix)')/lib"
- name: Install XGBoost in Virtual Environment
run: |
cd python-package
pip install virtualenv
virtualenv venv
source venv/bin/activate && \
pip install -v . --config-settings use_system_libxgboost=True && \
python -c 'import xgboost'
pytest -s -v -rxXs --durations=0 ./tests/python

View File

@@ -17,11 +17,11 @@ jobs:
- os: macos-latest
platform_id: macosx_arm64
steps:
- uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v2.5.0
- uses: actions/checkout@v2
with:
submodules: 'true'
- name: Setup Python
uses: actions/setup-python@7f80679172b057fc5e90d70d197929d454754a5a # v4.3.0
uses: actions/setup-python@v2
with:
python-version: "3.8"
- name: Build wheels

View File

@@ -1,4 +1,4 @@
# Run expensive R tests with the help of rhub. Only triggered by a pull request review
# Run R tests with noLD R. Only triggered by a pull request review
# See discussion at https://github.com/dmlc/xgboost/pull/6378
name: XGBoost-R-noLD
@@ -7,6 +7,9 @@ on:
pull_request_review_comment:
types: [created]
env:
R_PACKAGES: c('XML', 'igraph', 'data.table', 'ggplot2', 'DiagrammeR', 'Ckmeans.1d.dp', 'vcd', 'testthat', 'lintr', 'knitr', 'rmarkdown', 'e1071', 'cplm', 'devtools', 'float', 'titanic')
permissions:
contents: read # to fetch code (actions/checkout)
@@ -15,22 +18,26 @@ jobs:
if: github.event.comment.body == '/gha run r-nold-test' && contains('OWNER,MEMBER,COLLABORATOR', github.event.comment.author_association)
timeout-minutes: 120
runs-on: ubuntu-latest
container:
image: rhub/debian-gcc-devel-nold
container: rhub/debian-gcc-devel-nold
steps:
- name: Install git and system packages
shell: bash
run: |
apt update && apt install libcurl4-openssl-dev libssl-dev libssh2-1-dev libgit2-dev libglpk-dev libxml2-dev libharfbuzz-dev libfribidi-dev git -y
apt-get update && apt-get install -y git libcurl4-openssl-dev libssl-dev libssh2-1-dev libgit2-dev libxml2-dev
- uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v2.5.0
- uses: actions/checkout@v2
with:
submodules: 'true'
- name: Install dependencies
shell: bash -l {0}
shell: bash
run: |
/tmp/R-devel/bin/Rscript -e "source('./R-package/tests/helper_scripts/install_deps.R')"
cat > install_libs.R <<EOT
install.packages(${{ env.R_PACKAGES }},
repos = 'http://cloud.r-project.org',
dependencies = c('Depends', 'Imports', 'LinkingTo'))
EOT
/tmp/R-devel/bin/Rscript install_libs.R
- name: Run R tests
shell: bash

View File

@@ -3,7 +3,9 @@ name: XGBoost-R-Tests
on: [push, pull_request]
env:
R_PACKAGES: c('XML', 'data.table', 'ggplot2', 'DiagrammeR', 'Ckmeans.1d.dp', 'vcd', 'testthat', 'lintr', 'knitr', 'rmarkdown', 'e1071', 'cplm', 'devtools', 'float', 'titanic')
GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }}
_R_CHECK_EXAMPLE_TIMING_CPU_TO_ELAPSED_THRESHOLD_: 2.5
permissions:
contents: read # to fetch code (actions/checkout)
@@ -21,32 +23,41 @@ jobs:
RSPM: ${{ matrix.config.rspm }}
steps:
- uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v2.5.0
- uses: actions/checkout@v2
with:
submodules: 'true'
- uses: r-lib/actions/setup-r@50d1eae9b8da0bb3f8582c59a5b82225fa2fe7f2 # v2.3.1
- uses: r-lib/actions/setup-r@v2
with:
r-version: ${{ matrix.config.r }}
- name: Cache R packages
uses: actions/cache@937d24475381cd9c75ae6db12cb4e79714b926ed # v3.0.11
uses: actions/cache@v2
with:
path: ${{ env.R_LIBS_USER }}
key: ${{ runner.os }}-r-${{ matrix.config.r }}-6-${{ hashFiles('R-package/DESCRIPTION') }}
restore-keys: ${{ runner.os }}-r-${{ matrix.config.r }}-6-${{ hashFiles('R-package/DESCRIPTION') }}
key: ${{ runner.os }}-r-${{ matrix.config.r }}-5-${{ hashFiles('R-package/DESCRIPTION') }}
restore-keys: ${{ runner.os }}-r-${{ matrix.config.r }}-5-${{ hashFiles('R-package/DESCRIPTION') }}
- name: Install dependencies
shell: Rscript {0}
run: |
source("./R-package/tests/helper_scripts/install_deps.R")
install.packages(${{ env.R_PACKAGES }},
repos = 'http://cloud.r-project.org',
dependencies = c('Depends', 'Imports', 'LinkingTo'))
- name: Install igraph on Windows
shell: Rscript {0}
if: matrix.config.os == 'windows-latest'
run: |
install.packages('igraph', type='binary')
- name: Run lintr
run: |
MAKEFLAGS="-j$(nproc)" R CMD INSTALL R-package/
Rscript tests/ci_build/lint_r.R $(pwd)
cd R-package
R CMD INSTALL .
# Disable lintr errors for now: https://github.com/dmlc/xgboost/issues/8012
Rscript tests/helper_scripts/run_lint.R || true
test-R-on-Windows:
test-with-R:
runs-on: ${{ matrix.config.os }}
name: Test R on OS ${{ matrix.config.os }}, R ${{ matrix.config.r }}, Compiler ${{ matrix.config.compiler }}, Build ${{ matrix.config.build }}
strategy:
@@ -54,82 +65,100 @@ jobs:
matrix:
config:
- {os: windows-latest, r: 'release', compiler: 'mingw', build: 'autotools'}
- {os: windows-latest, r: '4.2.0', compiler: 'msvc', build: 'cmake'}
- {os: windows-latest, r: 'release', compiler: 'msvc', build: 'cmake'}
- {os: windows-latest, r: 'release', compiler: 'mingw', build: 'cmake'}
env:
R_REMOTES_NO_ERRORS_FROM_WARNINGS: true
_R_CHECK_EXAMPLE_TIMING_CPU_TO_ELAPSED_THRESHOLD_: 2.5
RSPM: ${{ matrix.config.rspm }}
steps:
- uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v2.5.0
- uses: actions/checkout@v2
with:
submodules: 'true'
- uses: r-lib/actions/setup-r@50d1eae9b8da0bb3f8582c59a5b82225fa2fe7f2 # v2.3.1
- uses: r-lib/actions/setup-r@v2
with:
r-version: ${{ matrix.config.r }}
- name: Cache R packages
uses: actions/cache@937d24475381cd9c75ae6db12cb4e79714b926ed # v3.0.11
uses: actions/cache@v2
with:
path: ${{ env.R_LIBS_USER }}
key: ${{ runner.os }}-r-${{ matrix.config.r }}-6-${{ hashFiles('R-package/DESCRIPTION') }}
restore-keys: ${{ runner.os }}-r-${{ matrix.config.r }}-6-${{ hashFiles('R-package/DESCRIPTION') }}
key: ${{ runner.os }}-r-${{ matrix.config.r }}-5-${{ hashFiles('R-package/DESCRIPTION') }}
restore-keys: ${{ runner.os }}-r-${{ matrix.config.r }}-5-${{ hashFiles('R-package/DESCRIPTION') }}
- uses: actions/setup-python@7f80679172b057fc5e90d70d197929d454754a5a # v4.3.0
- name: Install dependencies
shell: Rscript {0}
if: matrix.config.os != 'windows-latest'
run: |
install.packages(${{ env.R_PACKAGES }},
repos = 'http://cloud.r-project.org',
dependencies = c('Depends', 'Imports', 'LinkingTo'))
- name: Install binary dependencies
shell: Rscript {0}
if: matrix.config.os == 'windows-latest'
run: |
install.packages(${{ env.R_PACKAGES }},
type = 'binary',
repos = 'http://cloud.r-project.org',
dependencies = c('Depends', 'Imports', 'LinkingTo'))
- uses: actions/setup-python@v2
with:
python-version: "3.8"
architecture: 'x64'
- name: Test R
run: |
python tests/ci_build/test_r_package.py --compiler='${{ matrix.config.compiler }}' --build-tool='${{ matrix.config.build }}'
test-R-CRAN:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
config:
- {r: 'release'}
env:
_R_CHECK_EXAMPLE_TIMING_CPU_TO_ELAPSED_THRESHOLD_: 2.5
MAKE: "make -j$(nproc)"
steps:
- uses: actions/checkout@v2
with:
submodules: 'true'
- uses: r-lib/actions/setup-r@v2
with:
r-version: ${{ matrix.config.r }}
- uses: r-lib/actions/setup-tinytex@v2
- name: Install system packages
run: |
sudo apt-get update && sudo apt-get install libcurl4-openssl-dev libssl-dev libssh2-1-dev libgit2-dev pandoc pandoc-citeproc libglpk-dev
- name: Cache R packages
uses: actions/cache@v2
with:
path: ${{ env.R_LIBS_USER }}
key: ${{ runner.os }}-r-${{ matrix.config.r }}-5-${{ hashFiles('R-package/DESCRIPTION') }}
restore-keys: ${{ runner.os }}-r-${{ matrix.config.r }}-5-${{ hashFiles('R-package/DESCRIPTION') }}
- name: Install dependencies
shell: Rscript {0}
run: |
source("./R-package/tests/helper_scripts/install_deps.R")
install.packages(${{ env.R_PACKAGES }},
repos = 'http://cloud.r-project.org',
dependencies = c('Depends', 'Imports', 'LinkingTo'))
install.packages('igraph', repos = 'http://cloud.r-project.org', dependencies = c('Depends', 'Imports', 'LinkingTo'))
- name: Test R
- name: Check R Package
run: |
python tests/ci_build/test_r_package.py --compiler='${{ matrix.config.compiler }}' --build-tool="${{ matrix.config.build }}" --task=check
test-R-on-Debian:
name: Test R package on Debian
runs-on: ubuntu-latest
container:
image: rhub/debian-gcc-devel
steps:
- name: Install system dependencies
run: |
# Must run before checkout to have the latest git installed.
# No need to add pandoc, the container has it figured out.
apt update && apt install libcurl4-openssl-dev libssl-dev libssh2-1-dev libgit2-dev libglpk-dev libxml2-dev libharfbuzz-dev libfribidi-dev git -y
- name: Trust git cloning project sources
run: |
git config --global --add safe.directory "${GITHUB_WORKSPACE}"
- uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v2.5.0
with:
submodules: 'true'
- name: Install dependencies
shell: bash -l {0}
run: |
/tmp/R-devel/bin/Rscript -e "source('./R-package/tests/helper_scripts/install_deps.R')"
- name: Test R
shell: bash -l {0}
run: |
python3 tests/ci_build/test_r_package.py --r=/tmp/R-devel/bin/R --build-tool=autotools --task=check
- uses: dorny/paths-filter@v2
id: changes
with:
filters: |
r_package:
- 'R-package/**'
- name: Run document check
if: steps.changes.outputs.r_package == 'true'
run: |
python3 tests/ci_build/test_r_package.py --r=/tmp/R-devel/bin/R --task=doc
# Print stacktrace upon success of failure
make Rcheck || tests/ci_build/print_r_stacktrace.sh fail
tests/ci_build/print_r_stacktrace.sh success

View File

@@ -27,7 +27,7 @@ jobs:
persist-credentials: false
- name: "Run analysis"
uses: ossf/scorecard-action@99c53751e09b9529366343771cc321ec74e9bd3d # tag=v2.0.6
uses: ossf/scorecard-action@865b4092859256271290c77adbd10a43f4779972 # tag=v2.0.3
with:
results_file: results.sarif
results_format: sarif

View File

@@ -1,44 +0,0 @@
name: update-rapids
on:
workflow_dispatch:
schedule:
- cron: "0 20 * * *" # Run once daily
permissions:
pull-requests: write
contents: write
defaults:
run:
shell: bash -l {0}
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # To use GitHub CLI
jobs:
update-rapids:
name: Check latest RAPIDS
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
submodules: 'true'
- name: Check latest RAPIDS and update conftest.sh
run: |
bash tests/buildkite/update-rapids.sh
- name: Create Pull Request
uses: peter-evans/create-pull-request@v5
if: github.ref == 'refs/heads/master'
with:
add-paths: |
tests/buildkite
branch: create-pull-request/update-rapids
base: master
title: "[CI] Update RAPIDS to latest stable"
commit-message: "[CI] Update RAPIDS to latest stable"

12
.gitignore vendored
View File

@@ -137,15 +137,5 @@ credentials.csv
.metals
.bloop
# python tests
demo/**/*.txt
*.dmatrix
# hypothesis python tests
.hypothesis
__MACOSX/
model*.json
# R tests
*.libsvm
*.rds
Rplots.pdf
*.zip

3
.gitmodules vendored
View File

@@ -2,6 +2,9 @@
path = dmlc-core
url = https://github.com/dmlc/dmlc-core
branch = main
[submodule "cub"]
path = cub
url = https://github.com/NVlabs/cub
[submodule "gputreeshap"]
path = gputreeshap
url = https://github.com/rapidsai/gputreeshap.git

53
.travis.yml Normal file
View File

@@ -0,0 +1,53 @@
sudo: required
dist: bionic
env:
global:
- secure: "lqkL5SCM/CBwgVb1GWoOngpojsa0zCSGcvF0O3/45rBT1EpNYtQ4LRJ1+XcHi126vdfGoim/8i7AQhn5eOgmZI8yAPBeoUZ5zSrejD3RUpXr2rXocsvRRP25Z4mIuAGHD9VAHtvTdhBZRVV818W02pYduSzAeaY61q/lU3xmWsE="
- secure: "mzms6X8uvdhRWxkPBMwx+mDl3d+V1kUpZa7UgjT+dr4rvZMzvKtjKp/O0JZZVogdgZjUZf444B98/7AvWdSkGdkfz2QdmhWmXzNPfNuHtmfCYMdijsgFIGLuD3GviFL/rBiM2vgn32T3QqFiEJiC5StparnnXimPTc9TpXQRq5c="
jobs:
include:
- os: linux
arch: s390x
env: TASK=s390x_test
# dependent brew packages
# the dependencies from homebrew is installed manually from setup script due to outdated image from travis.
addons:
homebrew:
update: false
apt:
packages:
- unzip
before_install:
- source tests/travis/travis_setup_env.sh
install:
- source tests/travis/setup.sh
script:
- tests/travis/run_test.sh
cache:
directories:
- ${HOME}/.cache/usr
- ${HOME}/.cache/pip
before_cache:
- tests/travis/travis_before_cache.sh
after_failure:
- tests/travis/travis_after_failure.sh
after_success:
- tree build
- bash <(curl -s https://codecov.io/bash) -a '-o src/ src/*.c'
notifications:
email:
on_success: change
on_failure: always

View File

@@ -1,5 +1,5 @@
cmake_minimum_required(VERSION 3.18 FATAL_ERROR)
project(xgboost LANGUAGES CXX C VERSION 2.0.0)
cmake_minimum_required(VERSION 3.14 FATAL_ERROR)
project(xgboost LANGUAGES CXX C VERSION 1.7.2)
include(cmake/Utils.cmake)
list(APPEND CMAKE_MODULE_PATH "${xgboost_SOURCE_DIR}/cmake/modules")
cmake_policy(SET CMP0022 NEW)
@@ -47,11 +47,11 @@ option(USE_NVTX "Build with cuda profiling annotations. Developers only." OFF)
set(NVTX_HEADER_DIR "" CACHE PATH "Path to the stand-alone nvtx header")
option(RABIT_MOCK "Build rabit with mock" OFF)
option(HIDE_CXX_SYMBOLS "Build shared library and hide all C++ symbols" OFF)
option(KEEP_BUILD_ARTIFACTS_IN_BINARY_DIR "Output build artifacts in CMake binary dir" OFF)
## CUDA
option(USE_CUDA "Build with GPU acceleration" OFF)
option(USE_NCCL "Build with NCCL to enable distributed GPU support." OFF)
option(BUILD_WITH_SHARED_NCCL "Build with shared NCCL library." OFF)
option(BUILD_WITH_CUDA_CUB "Build with cub in CUDA installation" OFF)
set(GPU_COMPUTE_VER "" CACHE STRING
"Semicolon separated list of compute versions to be built against, e.g. '35;61'")
## Copied From dmlc
@@ -115,6 +115,9 @@ endif (ENABLE_ALL_WARNINGS)
if (BUILD_STATIC_LIB AND (R_LIB OR JVM_BINDINGS))
message(SEND_ERROR "Cannot build a static library libxgboost.a when R or JVM packages are enabled.")
endif (BUILD_STATIC_LIB AND (R_LIB OR JVM_BINDINGS))
if (PLUGIN_RMM AND (NOT BUILD_WITH_CUDA_CUB))
message(SEND_ERROR "Cannot build with RMM using cub submodule.")
endif (PLUGIN_RMM AND (NOT BUILD_WITH_CUDA_CUB))
if (PLUGIN_FEDERATED)
if (CMAKE_CROSSCOMPILING)
message(SEND_ERROR "Cannot cross compile with federated learning support")
@@ -150,7 +153,9 @@ if (USE_CUDA)
format_gencode_flags("${GPU_COMPUTE_VER}" GEN_CODE)
add_subdirectory(${PROJECT_SOURCE_DIR}/gputreeshap)
find_package(CUDAToolkit REQUIRED)
if ((${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 11.4) AND (NOT BUILD_WITH_CUDA_CUB))
set(BUILD_WITH_CUDA_CUB ON)
endif ()
endif (USE_CUDA)
if (FORCE_COLORED_OUTPUT AND (CMAKE_GENERATOR STREQUAL "Ninja") AND
@@ -163,6 +168,9 @@ find_package(Threads REQUIRED)
if (USE_OPENMP)
if (APPLE)
# Require CMake 3.16+ on Mac OSX, as previous versions of CMake had trouble locating
# OpenMP on Mac. See https://github.com/dmlc/xgboost/pull/5146#issuecomment-568312706
cmake_minimum_required(VERSION 3.16)
find_package(OpenMP)
if (NOT OpenMP_FOUND)
# Try again with extra path info; required for libomp 15+ from Homebrew
@@ -271,13 +279,8 @@ if (JVM_BINDINGS)
xgboost_target_defs(xgboost4j)
endif (JVM_BINDINGS)
if (KEEP_BUILD_ARTIFACTS_IN_BINARY_DIR)
set_output_directory(runxgboost ${xgboost_BINARY_DIR})
set_output_directory(xgboost ${xgboost_BINARY_DIR}/lib)
else ()
set_output_directory(runxgboost ${xgboost_SOURCE_DIR})
set_output_directory(xgboost ${xgboost_SOURCE_DIR}/lib)
endif ()
set_output_directory(runxgboost ${xgboost_SOURCE_DIR})
set_output_directory(xgboost ${xgboost_SOURCE_DIR}/lib)
# Ensure these two targets do not build simultaneously, as they produce outputs with conflicting names
add_dependencies(xgboost runxgboost)

145
Makefile Normal file
View File

@@ -0,0 +1,145 @@
ifndef DMLC_CORE
DMLC_CORE = dmlc-core
endif
ifndef RABIT
RABIT = rabit
endif
ROOTDIR = $(CURDIR)
# workarounds for some buggy old make & msys2 versions seen in windows
ifeq (NA, $(shell test ! -d "$(ROOTDIR)" && echo NA ))
$(warning Attempting to fix non-existing ROOTDIR [$(ROOTDIR)])
ROOTDIR := $(shell pwd)
$(warning New ROOTDIR [$(ROOTDIR)] $(shell test -d "$(ROOTDIR)" && echo " is OK" ))
endif
MAKE_OK := $(shell "$(MAKE)" -v 2> /dev/null)
ifndef MAKE_OK
$(warning Attempting to recover non-functional MAKE [$(MAKE)])
MAKE := $(shell which make 2> /dev/null)
MAKE_OK := $(shell "$(MAKE)" -v 2> /dev/null)
endif
$(warning MAKE [$(MAKE)] - $(if $(MAKE_OK),checked OK,PROBLEM))
include $(DMLC_CORE)/make/dmlc.mk
# set compiler defaults for OSX versus *nix
# let people override either
OS := $(shell uname)
ifeq ($(OS), Darwin)
ifndef CC
export CC = $(if $(shell which clang), clang, gcc)
endif
ifndef CXX
export CXX = $(if $(shell which clang++), clang++, g++)
endif
else
# linux defaults
ifndef CC
export CC = gcc
endif
ifndef CXX
export CXX = g++
endif
endif
export CFLAGS= -DDMLC_LOG_CUSTOMIZE=1 -std=c++14 -Wall -Wno-unknown-pragmas -Iinclude $(ADD_CFLAGS)
CFLAGS += -I$(DMLC_CORE)/include -I$(RABIT)/include -I$(GTEST_PATH)/include
ifeq ($(TEST_COVER), 1)
CFLAGS += -g -O0 -fprofile-arcs -ftest-coverage
else
CFLAGS += -O3 -funroll-loops
endif
ifndef LINT_LANG
LINT_LANG= "all"
endif
# specify tensor path
.PHONY: clean all lint clean_all doxygen rcpplint pypack Rpack Rbuild Rcheck
build/%.o: src/%.cc
@mkdir -p $(@D)
$(CXX) $(CFLAGS) -MM -MT build/$*.o $< >build/$*.d
$(CXX) -c $(CFLAGS) $< -o $@
# The should be equivalent to $(ALL_OBJ) except for build/cli_main.o
amalgamation/xgboost-all0.o: amalgamation/xgboost-all0.cc
$(CXX) -c $(CFLAGS) $< -o $@
rcpplint:
python3 dmlc-core/scripts/lint.py xgboost ${LINT_LANG} R-package/src
lint: rcpplint
python3 dmlc-core/scripts/lint.py --exclude_path python-package/xgboost/dmlc-core \
python-package/xgboost/include python-package/xgboost/lib \
python-package/xgboost/make python-package/xgboost/rabit \
python-package/xgboost/src --pylint-rc ${PWD}/python-package/.pylintrc xgboost \
${LINT_LANG} include src python-package
ifeq ($(TEST_COVER), 1)
cover: check
@- $(foreach COV_OBJ, $(COVER_OBJ), \
gcov -pbcul -o $(shell dirname $(COV_OBJ)) $(COV_OBJ) > gcov.log || cat gcov.log; \
)
endif
clean:
$(RM) -rf build lib bin *~ */*~ */*/*~ */*/*/*~ */*.o */*/*.o */*/*/*.o #xgboost
$(RM) -rf build_tests *.gcov tests/cpp/xgboost_test
if [ -d "R-package/src" ]; then \
cd R-package/src; \
$(RM) -rf rabit src include dmlc-core amalgamation *.so *.dll; \
cd $(ROOTDIR); \
fi
clean_all: clean
cd $(DMLC_CORE); "$(MAKE)" clean; cd $(ROOTDIR)
cd $(RABIT); "$(MAKE)" clean; cd $(ROOTDIR)
# create pip source dist (sdist) pack for PyPI
pippack: clean_all
cd python-package; python setup.py sdist; mv dist/*.tar.gz ..; cd ..
# Script to make a clean installable R package.
Rpack: clean_all
rm -rf xgboost xgboost*.tar.gz
cp -r R-package xgboost
rm -rf xgboost/src/*.o xgboost/src/*.so xgboost/src/*.dll
rm -rf xgboost/src/*/*.o
rm -rf xgboost/demo/*.model xgboost/demo/*.buffer xgboost/demo/*.txt
rm -rf xgboost/demo/runall.R
cp -r src xgboost/src/src
cp -r include xgboost/src/include
cp -r amalgamation xgboost/src/amalgamation
mkdir -p xgboost/src/rabit
cp -r rabit/include xgboost/src/rabit/include
cp -r rabit/src xgboost/src/rabit/src
rm -rf xgboost/src/rabit/src/*.o
mkdir -p xgboost/src/dmlc-core
cp -r dmlc-core/include xgboost/src/dmlc-core/include
cp -r dmlc-core/src xgboost/src/dmlc-core/src
cp ./LICENSE xgboost
cat R-package/src/Makevars.in|sed '2s/.*/PKGROOT=./' > xgboost/src/Makevars.in
cat R-package/src/Makevars.win|sed '2s/.*/PKGROOT=./' > xgboost/src/Makevars.win
rm -f xgboost/src/Makevars.win-e # OSX sed create this extra file; remove it
bash R-package/remove_warning_suppression_pragma.sh
bash xgboost/remove_warning_suppression_pragma.sh
rm xgboost/remove_warning_suppression_pragma.sh
rm xgboost/CMakeLists.txt
rm -rfv xgboost/tests/helper_scripts/
R ?= R
Rbuild: Rpack
$(R) CMD build xgboost
rm -rf xgboost
Rcheck: Rbuild
$(R) CMD check --as-cran xgboost*.tar.gz
-include build/*.d
-include build/*/*.d

219
NEWS.md
View File

@@ -3,225 +3,6 @@ XGBoost Change Log
This file records the changes in xgboost library in reverse chronological order.
## 1.7.6 (2023 Jun 16)
This is a patch release for bug fixes. The CRAN package for the R binding is kept at 1.7.5.
### Bug Fixes
* Fix distributed training with mixed dense and sparse partitions. (#9272)
* Fix monotone constraints on CPU with large trees. (#9122)
* [spark] Make the spark model have the same UID as its estimator (#9022)
* Optimize prediction with `QuantileDMatrix`. (#9096)
### Document
* Improve doxygen (#8959)
* Update the cuDF pip index URL. (#9106)
### Maintenance
* Fix tests with pandas 2.0. (#9014)
## 1.7.5 (2023 Mar 30)
This is a patch release for bug fixes.
* C++ requirement is updated to C++-17, along with which, CUDA 11.8 is used as the default CTK. (#8860, #8855, #8853)
* Fix import for pyspark ranker. (#8692)
* Fix Windows binary wheel to be compatible with Poetry (#8991)
* Fix GPU hist with column sampling. (#8850)
* Make sure iterative DMatrix is properly initialized. (#8997)
* [R] Update link in document. (#8998)
## 1.7.4 (2023 Feb 16)
This is a patch release for bug fixes.
* [R] Fix OpenMP detection on macOS. (#8684)
* [Python] Make sure input numpy array is aligned. (#8690)
* Fix feature interaction with column sampling in gpu_hist evaluator. (#8754)
* Fix GPU L1 error. (#8749)
* [PySpark] Fix feature types param (#8772)
* Fix ranking with quantile dmatrix and group weight. (#8762)
## 1.7.3 (2023 Jan 6)
This is a patch release for bug fixes.
* [Breaking] XGBoost Sklearn estimator method `get_params` no longer returns internally configured values. (#8634)
* Fix linalg iterator, which may crash the L1 error. (#8603)
* Fix loading pickled GPU model with a CPU-only XGBoost build. (#8632)
* Fix inference with unseen categories with categorical features. (#8591, #8602)
* CI fixes. (#8620, #8631, #8579)
## v1.7.2 (2022 Dec 8)
This is a patch release for bug fixes.
* Work with newer thrust and libcudacxx (#8432)
* Support null value in CUDA array interface namespace. (#8486)
* Use `getsockname` instead of `SO_DOMAIN` on AIX. (#8437)
* [pyspark] Make QDM optional based on a cuDF check (#8471)
* [pyspark] sort qid for SparkRanker. (#8497)
* [dask] Properly await async method client.wait_for_workers. (#8558)
* [R] Fix CRAN test notes. (#8428)
* [doc] Fix outdated document [skip ci]. (#8527)
* [CI] Fix github action mismatched glibcxx. (#8551)
## v1.7.1 (2022 Nov 3)
This is a patch release to incorporate the following hotfix:
* Add back xgboost.rabit for backwards compatibility (#8411)
## v1.7.0 (2022 Oct 20)
We are excited to announce the feature packed XGBoost 1.7 release. The release note will walk through some of the major new features first, then make a summary for other improvements and language-binding-specific changes.
### PySpark
XGBoost 1.7 features initial support for PySpark integration. The new interface is adapted from the existing PySpark XGBoost interface developed by databricks with additional features like `QuantileDMatrix` and the rapidsai plugin (GPU pipeline) support. The new Spark XGBoost Python estimators not only benefit from PySpark ml facilities for powerful distributed computing but also enjoy the rest of the Python ecosystem. Users can define a custom objective, callbacks, and metrics in Python and use them with this interface on distributed clusters. The support is labeled as experimental with more features to come in future releases. For a brief introduction please visit the tutorial on XGBoost's [document page](https://xgboost.readthedocs.io/en/latest/tutorials/spark_estimator.html). (#8355, #8344, #8335, #8284, #8271, #8283, #8250, #8231, #8219, #8245, #8217, #8200, #8173, #8172, #8145, #8117, #8131, #8088, #8082, #8085, #8066, #8068, #8067, #8020, #8385)
Due to its initial support status, the new interface has some limitations; categorical features and multi-output models are not yet supported.
### Development of categorical data support
More progress on the experimental support for categorical features. In 1.7, XGBoost can handle missing values in categorical features and features a new parameter `max_cat_threshold`, which limits the number of categories that can be used in the split evaluation. The parameter is enabled when the partitioning algorithm is used and helps prevent over-fitting. Also, the sklearn interface can now accept the `feature_types` parameter to use data types other than dataframe for categorical features. (#8280, #7821, #8285, #8080, #7948, #7858, #7853, #8212, #7957, #7937, #7934)
### Experimental support for federated learning and new communication collective
An exciting addition to XGBoost is the experimental federated learning support. The federated learning is implemented with a gRPC federated server that aggregates allreduce calls, and federated clients that train on local data and use existing tree methods (approx, hist, gpu_hist). Currently, this only supports horizontal federated learning (samples are split across participants, and each participant has all the features and labels). Future plans include vertical federated learning (features split across participants), and stronger privacy guarantees with homomorphic encryption and differential privacy. See [Demo with NVFlare integration](demo/nvflare/README.md) for example usage with nvflare.
As part of the work, XGBoost 1.7 has replaced the old rabit module with the new collective module as the network communication interface with added support for runtime backend selection. In previous versions, the backend is defined at compile time and can not be changed once built. In this new release, users can choose between `rabit` and `federated.` (#8029, #8351, #8350, #8342, #8340, #8325, #8279, #8181, #8027, #7958, #7831, #7879, #8257, #8316, #8242, #8057, #8203, #8038, #7965, #7930, #7911)
The feature is available in the public PyPI binary package for testing.
### Quantile DMatrix
Before 1.7, XGBoost has an internal data structure called `DeviceQuantileDMatrix` (and its distributed version). We now extend its support to CPU and renamed it to `QuantileDMatrix`. This data structure is used for optimizing memory usage for the `hist` and `gpu_hist` tree methods. The new feature helps reduce CPU memory usage significantly, especially for dense data. The new `QuantileDMatrix` can be initialized from both CPU and GPU data, and regardless of where the data comes from, the constructed instance can be used by both the CPU algorithm and GPU algorithm including training and prediction (with some overhead of conversion if the device of data and training algorithm doesn't match). Also, a new parameter `ref` is added to `QuantileDMatrix`, which can be used to construct validation/test datasets. Lastly, it's set as default in the scikit-learn interface when a supported tree method is specified by users. (#7889, #7923, #8136, #8215, #8284, #8268, #8220, #8346, #8327, #8130, #8116, #8103, #8094, #8086, #7898, #8060, #8019, #8045, #7901, #7912, #7922)
### Mean absolute error
The mean absolute error is a new member of the collection of objectives in XGBoost. It's noteworthy since MAE has zero hessian value, which is unusual to XGBoost as XGBoost relies on Newton optimization. Without valid Hessian values, the convergence speed can be slow. As part of the support for MAE, we added line searches into the XGBoost training algorithm to overcome the difficulty of training without valid Hessian values. In the future, we will extend the line search to other objectives where it's appropriate for faster convergence speed. (#8343, #8107, #7812, #8380)
### XGBoost on Browser
With the help of the [pyodide](https://github.com/pyodide/pyodide) project, you can now run XGBoost on browsers. (#7954, #8369)
### Experimental IPv6 Support for Dask
With the growing adaption of the new internet protocol, XGBoost joined the club. In the latest release, the Dask interface can be used on IPv6 clusters, see XGBoost's Dask tutorial for details. (#8225, #8234)
### Optimizations
We have new optimizations for both the `hist` and `gpu_hist` tree methods to make XGBoost's training even more efficient.
* Hist
Hist now supports optional by-column histogram build, which is automatically configured based on various conditions of input data. This helps the XGBoost CPU hist algorithm to scale better with different shapes of training datasets. (#8233, #8259). Also, the build histogram kernel now can better utilize CPU registers (#8218)
* GPU Hist
GPU hist performance is significantly improved for wide datasets. GPU hist now supports batched node build, which reduces kernel latency and increases throughput. The improvement is particularly significant when growing deep trees with the default ``depthwise`` policy. (#7919, #8073, #8051, #8118, #7867, #7964, #8026)
### Breaking Changes
Breaking changes made in the 1.7 release are summarized below.
- The `grow_local_histmaker` updater is removed. This updater is rarely used in practice and has no test. We decided to remove it and focus have XGBoot focus on other more efficient algorithms. (#7992, #8091)
- Single precision histogram is removed due to its lack of accuracy caused by significant floating point error. In some cases the error can be difficult to detect due to log-scale operations, which makes the parameter dangerous to use. (#7892, #7828)
- Deprecated CUDA architectures are no longer supported in the release binaries. (#7774)
- As part of the federated learning development, the `rabit` module is replaced with the new `collective` module. It's a drop-in replacement with added runtime backend selection, see the federated learning section for more details (#8257)
### General new features and improvements
Before diving into package-specific changes, some general new features other than those listed at the beginning are summarized here.
* Users of `DMatrix` and `QuantileDMatrix` can get the data from XGBoost. In previous versions, only getters for meta info like labels are available. The new method is available in Python (`DMatrix::get_data`) and C. (#8269, #8323)
* In previous versions, the GPU histogram tree method may generate phantom gradient for missing values due to floating point error. We fixed such an error in this release and XGBoost is much better equated to handle floating point errors when training on GPU. (#8274, #8246)
* Parameter validation is no longer experimental. (#8206)
* C pointer parameters and JSON parameters are vigorously checked. (#8254, #8254)
* Improved handling of JSON model input. (#7953, #7918)
* Support IBM i OS (#7920, #8178)
### Fixes
Some noteworthy bug fixes that are not related to specific language binding are listed in this section.
* Rename misspelled config parameter for pseudo-Huber (#7904)
* Fix feature weights with nested column sampling. (#8100)
* Fix loading DMatrix binary in distributed env. (#8149)
* Force auc.cc to be statically linked for unusual compiler platforms. (#8039)
* New logic for detecting libomp on macos (#8384).
### Python Package
* Python 3.8 is now the minimum required Python version. (#8071)
* More progress on type hint support. Except for the new PySpark interface, the XGBoost module is fully typed. (#7742, #7945, #8302, #7914, #8052)
* XGBoost now validates the feature names in `inplace_predict`, which also affects the predict function in scikit-learn estimators as it uses `inplace_predict` internally. (#8359)
* Users can now get the data from `DMatrix` using `DMatrix::get_data` or `QuantileDMatrix::get_data`.
* Show `libxgboost.so` path in build info. (#7893)
* Raise import error when using the sklearn module while scikit-learn is missing. (#8049)
* Use `config_context` in the sklearn interface. (#8141)
* Validate features for inplace prediction. (#8359)
* Pandas dataframe handling is refactored to reduce data fragmentation. (#7843)
* Support more pandas nullable types (#8262)
* Remove pyarrow workaround. (#7884)
* Binary wheel size
We aim to enable as many features as possible in XGBoost's default binary distribution on PyPI (package installed with pip), but there's a upper limit on the size of the binary wheel. In 1.7, XGBoost reduces the size of the wheel by pruning unused CUDA architectures. (#8179, #8152, #8150)
* Fixes
Some noteworthy fixes are listed here:
- Fix the Dask interface with the latest cupy. (#8210)
- Check cuDF lazily to avoid potential errors with cuda-python. (#8084)
* Fix potential error in DMatrix constructor on 32-bit platform. (#8369)
* Maintenance work
- Linter script is moved from dmlc-core to XGBoost with added support for formatting, mypy, and parallel run, along with some fixes (#7967, #8101, #8216)
- We now require the use of `isort` and `black` for selected files. (#8137, #8096)
- Code cleanups. (#7827)
- Deprecate `use_label_encoder` in XGBClassifier. The label encoder has already been deprecated and removed in the previous version. These changes only affect the indicator parameter (#7822)
- Remove the use of distutils. (#7770)
- Refactor and fixes for tests (#8077, #8064, #8078, #8076, #8013, #8010, #8244, #7833)
* Documents
- [dask] Fix potential error in demo. (#8079)
- Improved documentation for the ranker. (#8356, #8347)
- Indicate lack of py-xgboost-gpu on Windows (#8127)
- Clarification for feature importance. (#8151)
- Simplify Python getting started example (#8153)
### R Package
We summarize improvements for the R package briefly here:
* Feature info including names and types are now passed to DMatrix in preparation for categorical feature support. (#804)
* XGBoost 1.7 can now gracefully load old R models from RDS for better compatibility with 3-party tuning libraries (#7864)
* The R package now can be built with parallel compilation, along with fixes for warnings in CRAN tests. (#8330)
* Emit error early if DiagrammeR is missing (#8037)
* Fix R package Windows build. (#8065)
### JVM Packages
The consistency between JVM packages and other language bindings is greatly improved in 1.7, improvements range from model serialization format to the default value of hyper-parameters.
* Java package now supports feature names and feature types for DMatrix in preparation for categorical feature support. (#7966)
* Models trained by the JVM packages can now be safely used with other language bindings. (#7896, #7907)
* Users can specify the model format when saving models with a stream. (#7940, #7955)
* The default value for training parameters is now sourced from XGBoost directly, which helps JVM packages be consistent with other packages. (#7938)
* Set the correct objective if the user doesn't explicitly set it (#7781)
* Auto-detection of MUSL is replaced by system properties (#7921)
* Improved error message for launching tracker. (#7952, #7968)
* Fix a race condition in parameter configuration. (#8025)
* [Breaking] ` timeoutRequestWorkers` is now removed. With the support for barrier mode, this parameter is no longer needed. (#7839)
* Dependencies updates. (#7791, #8157, #7801, #8240)
### Documents
- Document for the C interface is greatly improved and is now displayed at the [sphinx document page](https://xgboost.readthedocs.io/en/latest/c.html). Thanks to the breathe project, you can view the C API just like the Python API. (#8300)
- We now avoid having XGBoost internal text parser in demos and recommend users use dedicated libraries for loading data whenever it's feasible. (#7753)
- Python survival training demos are now displayed at [sphinx gallery](https://xgboost.readthedocs.io/en/latest/python/survival-examples/index.html). (#8328)
- Some typos, links, format, and grammar fixes. (#7800, #7832, #7861, #8099, #8163, #8166, #8229, #8028, #8214, #7777, #7905, #8270, #8309, d70e59fef, #7806)
- Updated winning solution under readme.md (#7862)
- New security policy. (#8360)
- GPU document is overhauled as we consider CUDA support to be feature-complete. (#8378)
### Maintenance
* Code refactoring and cleanups. (#7850, #7826, #7910, #8332, #8204)
* Reduce compiler warnings. (#7768, #7916, #8046, #8059, #7974, #8031, #8022)
* Compiler workarounds. (#8211, #8314, #8226, #8093)
* Dependencies update. (#8001, #7876, #7973, #8298, #7816)
* Remove warnings emitted in previous versions. (#7815)
* Small fixes occurred during development. (#8008)
### CI and Tests
* We overhauled the CI infrastructure to reduce the CI cost and lift the maintenance burdens. Jenkins is replaced with buildkite for better automation, with which, finer control of test runs is implemented to reduce overall cost. Also, we refactored some of the existing tests to reduce their runtime, drooped the size of docker images, and removed multi-GPU C++ tests. Lastly, `pytest-timeout` is added as an optional dependency for running Python tests to keep the test time in check. (#7772, #8291, #8286, #8276, #8306, #8287, #8243, #8313, #8235, #8288, #8303, #8142, #8092, #8333, #8312, #8348)
* New documents for how to reproduce the CI environment (#7971, #8297)
* Improved automation for JVM release. (#7882)
* GitHub Action security-related updates. (#8263, #8267, #8360)
* Other fixes and maintenance work. (#8154, #7848, #8069, #7943)
* Small updates and fixes to GitHub action pipelines. (#8364, #8321, #8241, #7950, #8011)
## v1.6.1 (2022 May 9)
This is a patch release for bug fixes and Spark barrier mode support. The R package is unchanged.

View File

@@ -16,6 +16,7 @@ target_compile_definitions(xgboost-r
-DDMLC_LOG_BEFORE_THROW=0
-DDMLC_DISABLE_STDIN=1
-DDMLC_LOG_CUSTOMIZE=1
-DRABIT_CUSTOMIZE_MSG_
-DRABIT_STRICT_CXX98_)
target_include_directories(xgboost-r
PRIVATE
@@ -30,7 +31,7 @@ if (USE_OPENMP)
endif (USE_OPENMP)
set_target_properties(
xgboost-r PROPERTIES
CXX_STANDARD 17
CXX_STANDARD 14
CXX_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON)

View File

@@ -1,8 +1,8 @@
Package: xgboost
Type: Package
Title: Extreme Gradient Boosting
Version: 2.0.0.1
Date: 2022-10-18
Version: 1.7.2.1
Date: 2022-12-08
Authors@R: c(
person("Tianqi", "Chen", role = c("aut"),
email = "tianqi.tchen@gmail.com"),
@@ -54,8 +54,10 @@ Suggests:
Ckmeans.1d.dp (>= 3.3.1),
vcd (>= 1.3),
testthat,
lintr,
igraph (>= 1.0.1),
float,
crayon,
titanic
Depends:
R (>= 3.3.0)
@@ -64,6 +66,5 @@ Imports:
methods,
data.table (>= 1.9.6),
jsonlite (>= 1.0),
RoxygenNote: 7.2.3
Encoding: UTF-8
SystemRequirements: GNU make, C++17
RoxygenNote: 7.2.1
SystemRequirements: GNU make, C++14

View File

@@ -1,4 +1,4 @@
Copyright (c) 2014-2023, Tianqi Chen and XBGoost Contributors
Copyright (c) 2014 by Tianqi Chen and Contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -114,7 +114,7 @@ cb.evaluation.log <- function() {
if (is.null(mnames) || any(mnames == ""))
stop("bst_evaluation must have non-empty names")
mnames <<- gsub('-', '_', names(env$bst_evaluation), fixed = TRUE)
mnames <<- gsub('-', '_', names(env$bst_evaluation))
if (!is.null(env$bst_evaluation_err))
mnames <<- c(paste0(mnames, '_mean'), paste0(mnames, '_std'))
}
@@ -185,7 +185,7 @@ cb.reset.parameters <- function(new_params) {
if (typeof(new_params) != "list")
stop("'new_params' must be a list")
pnames <- gsub(".", "_", names(new_params), fixed = TRUE)
pnames <- gsub("\\.", "_", names(new_params))
nrounds <- NULL
# run some checks in the beginning
@@ -300,9 +300,9 @@ cb.early.stop <- function(stopping_rounds, maximize = FALSE,
if (length(env$bst_evaluation) == 0)
stop("For early stopping, watchlist must have at least one element")
eval_names <- gsub('-', '_', names(env$bst_evaluation), fixed = TRUE)
eval_names <- gsub('-', '_', names(env$bst_evaluation))
if (!is.null(metric_name)) {
metric_idx <<- which(gsub('-', '_', metric_name, fixed = TRUE) == eval_names)
metric_idx <<- which(gsub('-', '_', metric_name) == eval_names)
if (length(metric_idx) == 0)
stop("'metric_name' for early stopping is not one of the following:\n",
paste(eval_names, collapse = ' '), '\n')
@@ -319,7 +319,7 @@ cb.early.stop <- function(stopping_rounds, maximize = FALSE,
# maximize is usually NULL when not set in xgb.train and built-in metrics
if (is.null(maximize))
maximize <<- grepl('(_auc|_map|_ndcg|_pre)', metric_name)
maximize <<- grepl('(_auc|_map|_ndcg)', metric_name)
if (verbose && NVL(env$rank, 0) == 0)
cat("Will train until ", metric_name, " hasn't improved in ",
@@ -592,12 +592,12 @@ cb.cv.predict <- function(save_models = FALSE) {
#'
#' #### Multiclass classification:
#' #
#' dtrain <- xgb.DMatrix(scale(x), label = as.numeric(iris$Species) - 1, nthread = 1)
#' dtrain <- xgb.DMatrix(scale(x), label = as.numeric(iris$Species) - 1, nthread = 2)
#' param <- list(booster = "gblinear", objective = "multi:softprob", num_class = 3,
#' lambda = 0.0003, alpha = 0.0003, nthread = 1)
#' lambda = 0.0003, alpha = 0.0003, nthread = 2)
#' # For the default linear updater 'shotgun' it sometimes is helpful
#' # to use smaller eta to reduce instability
#' bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 50, eta = 0.5,
#' bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 70, eta = 0.5,
#' callbacks = list(cb.gblinear.history()))
#' # Will plot the coefficient paths separately for each class:
#' matplot(xgb.gblinear.history(bst, class_index = 0), type = 'l')
@@ -611,15 +611,13 @@ cb.cv.predict <- function(save_models = FALSE) {
#' matplot(xgb.gblinear.history(bst, class_index = 0)[[1]], type = 'l')
#'
#' @export
cb.gblinear.history <- function(sparse = FALSE) {
cb.gblinear.history <- function(sparse=FALSE) {
coefs <- NULL
init <- function(env) {
# xgb.train(): bst will be present
# xgb.cv(): bst_folds will be present
if (is.null(env$bst) && is.null(env$bst_folds)) {
stop("Parent frame has neither 'bst' nor 'bst_folds'")
}
if (!is.null(env$bst)) { # xgb.train:
} else if (!is.null(env$bst_folds)) { # xgb.cv:
} else stop("Parent frame has neither 'bst' nor 'bst_folds'")
}
# convert from list to (sparse) matrix

View File

@@ -38,11 +38,11 @@ check.booster.params <- function(params, ...) {
stop("params must be a list")
# in R interface, allow for '.' instead of '_' in parameter names
names(params) <- gsub(".", "_", names(params), fixed = TRUE)
names(params) <- gsub("\\.", "_", names(params))
# merge parameters from the params and the dots-expansion
dot_params <- list(...)
names(dot_params) <- gsub(".", "_", names(dot_params), fixed = TRUE)
names(dot_params) <- gsub("\\.", "_", names(dot_params))
if (length(intersect(names(params),
names(dot_params))) > 0)
stop("Same parameters in 'params' and in the call are not allowed. Please check your 'params' list.")
@@ -82,7 +82,7 @@ check.booster.params <- function(params, ...) {
# interaction constraints parser (convert from list of column indices to string)
if (!is.null(params[['interaction_constraints']]) &&
typeof(params[['interaction_constraints']]) != "character") {
typeof(params[['interaction_constraints']]) != "character"){
# check input class
if (!identical(class(params[['interaction_constraints']]), 'list')) stop('interaction_constraints should be class list')
if (!all(unique(sapply(params[['interaction_constraints']], class)) %in% c('numeric', 'integer'))) {
@@ -251,7 +251,8 @@ generate.cv.folds <- function(nfold, nrows, stratified, label, params) {
# Creates CV folds stratified by the values of y.
# It was borrowed from caret::createFolds and simplified
# by always returning an unnamed list of fold indices.
xgb.createFolds <- function(y, k = 10) {
xgb.createFolds <- function(y, k = 10)
{
if (is.numeric(y)) {
## Group the numeric data based on their magnitudes
## and sample within those groups.

View File

@@ -214,10 +214,6 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
#' Since it quadratically depends on the number of features, it is recommended to perform selection
#' of the most important features first. See below about the format of the returned results.
#'
#' The \code{predict()} method uses as many threads as defined in \code{xgb.Booster} object (all by default).
#' If you want to change their number, then assign a new number to \code{nthread} using \code{\link{xgb.parameters<-}}.
#' Note also that converting a matrix to \code{\link{xgb.DMatrix}} uses multiple threads too.
#'
#' @return
#' The return type is different depending whether \code{strict_shape} is set to \code{TRUE}. By default,
#' for regression or binary classification, it returns a vector of length \code{nrows(newdata)}.
@@ -332,9 +328,8 @@ predict.xgb.Booster <- function(object, newdata, missing = NA, outputmargin = FA
predleaf = FALSE, predcontrib = FALSE, approxcontrib = FALSE, predinteraction = FALSE,
reshape = FALSE, training = FALSE, iterationrange = NULL, strict_shape = FALSE, ...) {
object <- xgb.Booster.complete(object, saveraw = FALSE)
if (!inherits(newdata, "xgb.DMatrix"))
newdata <- xgb.DMatrix(newdata, missing = missing, nthread = NVL(object$params[["nthread"]], -1))
newdata <- xgb.DMatrix(newdata, missing = missing)
if (!is.null(object[["feature_names"]]) &&
!is.null(colnames(newdata)) &&
!identical(object[["feature_names"]], colnames(newdata)))
@@ -634,7 +629,7 @@ xgb.attributes <- function(object) {
#' @export
xgb.config <- function(object) {
handle <- xgb.get.handle(object)
.Call(XGBoosterSaveJsonConfig_R, handle)
.Call(XGBoosterSaveJsonConfig_R, handle);
}
#' @rdname xgb.config
@@ -676,7 +671,7 @@ xgb.config <- function(object) {
if (is.null(names(p)) || any(nchar(names(p)) == 0)) {
stop("parameter names cannot be empty strings")
}
names(p) <- gsub(".", "_", names(p), fixed = TRUE)
names(p) <- gsub("\\.", "_", names(p))
p <- lapply(p, function(x) as.character(x)[1])
handle <- xgb.get.handle(object)
for (i in seq_along(p)) {

View File

@@ -36,37 +36,19 @@ xgb.DMatrix <- function(data, info = list(), missing = NA, silent = FALSE, nthre
cnames <- colnames(data)
} else if (inherits(data, "dgCMatrix")) {
handle <- .Call(
XGDMatrixCreateFromCSC_R,
data@p,
data@i,
data@x,
nrow(data),
missing,
as.integer(NVL(nthread, -1))
XGDMatrixCreateFromCSC_R, data@p, data@i, data@x, nrow(data), as.integer(NVL(nthread, -1))
)
cnames <- colnames(data)
} else if (inherits(data, "dgRMatrix")) {
handle <- .Call(
XGDMatrixCreateFromCSR_R,
data@p,
data@j,
data@x,
ncol(data),
missing,
as.integer(NVL(nthread, -1))
XGDMatrixCreateFromCSR_R, data@p, data@j, data@x, ncol(data), as.integer(NVL(nthread, -1))
)
cnames <- colnames(data)
} else if (inherits(data, "dsparseVector")) {
indptr <- c(0L, as.integer(length(data@i)))
ind <- as.integer(data@i) - 1L
handle <- .Call(
XGDMatrixCreateFromCSR_R,
indptr,
ind,
data@x,
length(data),
missing,
as.integer(NVL(nthread, -1))
XGDMatrixCreateFromCSR_R, indptr, ind, data@x, length(data), as.integer(NVL(nthread, -1))
)
} else {
stop("xgb.DMatrix does not support construction from ", typeof(data))
@@ -94,7 +76,7 @@ xgb.get.DMatrix <- function(data, label = NULL, missing = NA, weight = NULL, nth
stop("label must be provided when data is a matrix")
}
dtrain <- xgb.DMatrix(data, label = label, missing = missing, nthread = nthread)
if (!is.null(weight)) {
if (!is.null(weight)){
setinfo(dtrain, "weight", weight)
}
} else {
@@ -236,7 +218,7 @@ getinfo.xgb.DMatrix <- function(object, name, ...) {
}
if (name == "feature_name" || name == "feature_type") {
ret <- .Call(XGDMatrixGetStrFeatureInfo_R, object, name)
} else if (name != "nrow") {
} else if (name != "nrow"){
ret <- .Call(XGDMatrixGetInfo_R, object, name)
} else {
ret <- nrow(object)
@@ -346,6 +328,7 @@ setinfo.xgb.DMatrix <- function(object, name, info, ...) {
return(TRUE)
}
stop("setinfo: unknown info name ", name)
return(FALSE)
}
@@ -435,7 +418,7 @@ print.xgb.DMatrix <- function(x, verbose = FALSE, ...) {
cat(infos)
cnames <- colnames(x)
cat(' colnames:')
if (verbose && !is.null(cnames)) {
if (verbose & !is.null(cnames)) {
cat("\n'")
cat(cnames, sep = "','")
cat("'")

View File

@@ -75,11 +75,9 @@
#' @details
#' The original sample is randomly partitioned into \code{nfold} equal size subsamples.
#'
#' Of the \code{nfold} subsamples, a single subsample is retained as the validation data for testing the model,
#' and the remaining \code{nfold - 1} subsamples are used as training data.
#' Of the \code{nfold} subsamples, a single subsample is retained as the validation data for testing the model, and the remaining \code{nfold - 1} subsamples are used as training data.
#'
#' The cross-validation process is then repeated \code{nrounds} times, with each of the
#' \code{nfold} subsamples used exactly once as the validation data.
#' The cross-validation process is then repeated \code{nrounds} times, with each of the \code{nfold} subsamples used exactly once as the validation data.
#'
#' All observations are used for both training and validation.
#'
@@ -119,10 +117,10 @@
#' print(cv, verbose=TRUE)
#'
#' @export
xgb.cv <- function(params = list(), data, nrounds, nfold, label = NULL, missing = NA,
prediction = FALSE, showsd = TRUE, metrics = list(),
xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing = NA,
prediction = FALSE, showsd = TRUE, metrics=list(),
obj = NULL, feval = NULL, stratified = TRUE, folds = NULL, train_folds = NULL,
verbose = TRUE, print_every_n = 1L,
verbose = TRUE, print_every_n=1L,
early_stopping_rounds = NULL, maximize = NULL, callbacks = list(), ...) {
check.deprecation(...)

View File

@@ -38,7 +38,7 @@
#' cat(xgb.dump(bst, with_stats = TRUE, dump_format='json'))
#'
#' @export
xgb.dump <- function(model, fname = NULL, fmap = "", with_stats = FALSE,
xgb.dump <- function(model, fname = NULL, fmap = "", with_stats=FALSE,
dump_format = c("text", "json"), ...) {
check.deprecation(...)
dump_format <- match.arg(dump_format)

View File

@@ -4,7 +4,7 @@
#' @rdname xgb.plot.importance
#' @export
xgb.ggplot.importance <- function(importance_matrix = NULL, top_n = NULL, measure = NULL,
rel_to_first = FALSE, n_clusters = seq_len(10), ...) {
rel_to_first = FALSE, n_clusters = c(1:10), ...) {
importance_matrix <- xgb.plot.importance(importance_matrix, top_n = top_n, measure = measure,
rel_to_first = rel_to_first, plot = FALSE, ...)

View File

@@ -82,7 +82,7 @@
#'
#' @export
xgb.importance <- function(feature_names = NULL, model = NULL, trees = NULL,
data = NULL, label = NULL, target = NULL) {
data = NULL, label = NULL, target = NULL){
if (!(is.null(data) && is.null(label) && is.null(target)))
warning("xgb.importance: parameters 'data', 'label' and 'target' are deprecated")
@@ -104,11 +104,7 @@ xgb.importance <- function(feature_names = NULL, model = NULL, trees = NULL,
XGBoosterFeatureScore_R, model$handle, jsonlite::toJSON(args, auto_unbox = TRUE, null = "null")
)
names(results) <- c("features", "shape", "weight")
if (length(results$shape) == 2) {
n_classes <- results$shape[2]
} else {
n_classes <- 0
}
n_classes <- if (length(results$shape) == 2) { results$shape[2] } else { 0 }
importance <- if (n_classes == 0) {
data.table(Feature = results$features, Weight = results$weight)[order(-abs(Weight))]
} else {

View File

@@ -62,7 +62,7 @@
#'
#' @export
xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,
trees = NULL, use_int_id = FALSE, ...) {
trees = NULL, use_int_id = FALSE, ...){
check.deprecation(...)
if (!inherits(model, "xgb.Booster") && !is.character(text)) {
@@ -82,7 +82,7 @@ xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,
stop("trees: must be a vector of integers.")
}
if (is.null(text)) {
if (is.null(text)){
text <- xgb.dump(model = model, with_stats = TRUE)
}

View File

@@ -102,9 +102,7 @@ xgb.plot.importance <- function(importance_matrix = NULL, top_n = NULL, measure
original_mar <- par()$mar
# reset margins so this function doesn't have side effects
on.exit({
par(mar = original_mar)
})
on.exit({par(mar = original_mar)})
mar <- original_mar
if (!is.null(left_margin))

View File

@@ -61,7 +61,7 @@
#'
#' @export
xgb.plot.multi.trees <- function(model, feature_names = NULL, features_keep = 5, plot_width = NULL, plot_height = NULL,
render = TRUE, ...) {
render = TRUE, ...){
if (!requireNamespace("DiagrammeR", quietly = TRUE)) {
stop("DiagrammeR is required for xgb.plot.multi.trees")
}
@@ -97,9 +97,9 @@ xgb.plot.multi.trees <- function(model, feature_names = NULL, features_keep = 5,
, by = .(abs.node.position, Feature)
][, .(Text = paste0(
paste0(
Feature[seq_len(min(length(Feature), features_keep))],
Feature[1:min(length(Feature), features_keep)],
" (",
format(Quality[seq_len(min(length(Quality), features_keep))], digits = 5),
format(Quality[1:min(length(Quality), features_keep)], digits = 5),
")"
),
collapse = "\n"

View File

@@ -143,7 +143,7 @@ xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
y <- shap_contrib[, f][ord]
x_lim <- range(x, na.rm = TRUE)
y_lim <- range(y, na.rm = TRUE)
do_na <- plot_NA && anyNA(x)
do_na <- plot_NA && any(is.na(x))
if (do_na) {
x_range <- diff(x_lim)
loc_na <- min(x, na.rm = TRUE) + x_range * pos_NA
@@ -272,8 +272,8 @@ xgb.shap.data <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
imp <- xgb.importance(model = model, trees = trees, feature_names = colnames(data))
}
top_n <- top_n[1]
if (top_n < 1 || top_n > 100) stop("top_n: must be an integer within [1, 100]")
features <- imp$Feature[seq_len(min(top_n, NROW(imp)))]
if (top_n < 1 | top_n > 100) stop("top_n: must be an integer within [1, 100]")
features <- imp$Feature[1:min(top_n, NROW(imp))]
}
if (is.character(features)) {
features <- match(features, colnames(data))

View File

@@ -34,7 +34,7 @@
#' The branches that also used for missing values are marked as bold
#' (as in "carrying extra capacity").
#'
#' This function uses \href{https://www.graphviz.org/}{GraphViz} as a backend of DiagrammeR.
#' This function uses \href{http://www.graphviz.org/}{GraphViz} as a backend of DiagrammeR.
#'
#' @return
#'
@@ -68,7 +68,7 @@
#'
#' @export
xgb.plot.tree <- function(feature_names = NULL, model = NULL, trees = NULL, plot_width = NULL, plot_height = NULL,
render = TRUE, show_node_id = FALSE, ...) {
render = TRUE, show_node_id = FALSE, ...){
check.deprecation(...)
if (!inherits(model, "xgb.Booster")) {
stop("model: Has to be an object of class xgb.Booster")

View File

@@ -18,37 +18,17 @@
#' 2.1. Parameters for Tree Booster
#'
#' \itemize{
#' \item{ \code{eta} control the learning rate: scale the contribution of each tree by a factor of \code{0 < eta < 1}
#' when it is added to the current approximation.
#' Used to prevent overfitting by making the boosting process more conservative.
#' Lower value for \code{eta} implies larger value for \code{nrounds}: low \code{eta} value means model
#' more robust to overfitting but slower to compute. Default: 0.3}
#' \item{ \code{gamma} minimum loss reduction required to make a further partition on a leaf node of the tree.
#' the larger, the more conservative the algorithm will be.}
#' \item \code{eta} control the learning rate: scale the contribution of each tree by a factor of \code{0 < eta < 1} when it is added to the current approximation. Used to prevent overfitting by making the boosting process more conservative. Lower value for \code{eta} implies larger value for \code{nrounds}: low \code{eta} value means model more robust to overfitting but slower to compute. Default: 0.3
#' \item \code{gamma} minimum loss reduction required to make a further partition on a leaf node of the tree. the larger, the more conservative the algorithm will be.
#' \item \code{max_depth} maximum depth of a tree. Default: 6
#' \item{\code{min_child_weight} minimum sum of instance weight (hessian) needed in a child.
#' If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight,
#' then the building process will give up further partitioning.
#' In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node.
#' The larger, the more conservative the algorithm will be. Default: 1}
#' \item{ \code{subsample} subsample ratio of the training instance.
#' Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees
#' and this will prevent overfitting. It makes computation shorter (because less data to analyse).
#' It is advised to use this parameter with \code{eta} and increase \code{nrounds}. Default: 1}
#' \item \code{min_child_weight} minimum sum of instance weight (hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, then the building process will give up further partitioning. In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node. The larger, the more conservative the algorithm will be. Default: 1
#' \item \code{subsample} subsample ratio of the training instance. Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees and this will prevent overfitting. It makes computation shorter (because less data to analyse). It is advised to use this parameter with \code{eta} and increase \code{nrounds}. Default: 1
#' \item \code{colsample_bytree} subsample ratio of columns when constructing each tree. Default: 1
#' \item \code{lambda} L2 regularization term on weights. Default: 1
#' \item \code{alpha} L1 regularization term on weights. (there is no L1 reg on bias because it is not important). Default: 0
#' \item{ \code{num_parallel_tree} Experimental parameter. number of trees to grow per round.
#' Useful to test Random Forest through XGBoost
#' (set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly.
#' Default: 1}
#' \item{ \code{monotone_constraints} A numerical vector consists of \code{1}, \code{0} and \code{-1} with its length
#' equals to the number of features in the training data.
#' \code{1} is increasing, \code{-1} is decreasing and \code{0} is no constraint.}
#' \item{ \code{interaction_constraints} A list of vectors specifying feature indices of permitted interactions.
#' Each item of the list represents one permitted interaction where specified features are allowed to interact with each other.
#' Feature index values should start from \code{0} (\code{0} references the first column).
#' Leave argument unspecified for no interaction constraints.}
#' \item \code{num_parallel_tree} Experimental parameter. number of trees to grow per round. Useful to test Random Forest through XGBoost (set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly. Default: 1
#' \item \code{monotone_constraints} A numerical vector consists of \code{1}, \code{0} and \code{-1} with its length equals to the number of features in the training data. \code{1} is increasing, \code{-1} is decreasing and \code{0} is no constraint.
#' \item \code{interaction_constraints} A list of vectors specifying feature indices of permitted interactions. Each item of the list represents one permitted interaction where specified features are allowed to interact with each other. Feature index values should start from \code{0} (\code{0} references the first column). Leave argument unspecified for no interaction constraints.
#' }
#'
#' 2.2. Parameters for Linear Booster
@@ -62,53 +42,29 @@
#' 3. Task Parameters
#'
#' \itemize{
#' \item{ \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it.
#' The default objective options are below:
#' \item \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it. The default objective options are below:
#' \itemize{
#' \item \code{reg:squarederror} Regression with squared loss (Default).
#' \item{ \code{reg:squaredlogerror}: regression with squared log loss \eqn{1/2 * (log(pred + 1) - log(label + 1))^2}.
#' All inputs are required to be greater than -1.
#' Also, see metric rmsle for possible issue with this objective.}
#' \item \code{reg:squaredlogerror}: regression with squared log loss \eqn{1/2 * (log(pred + 1) - log(label + 1))^2}. All inputs are required to be greater than -1. Also, see metric rmsle for possible issue with this objective.
#' \item \code{reg:logistic} logistic regression.
#' \item \code{reg:pseudohubererror}: regression with Pseudo Huber loss, a twice differentiable alternative to absolute loss.
#' \item \code{binary:logistic} logistic regression for binary classification. Output probability.
#' \item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation.
#' \item \code{binary:hinge}: hinge loss for binary classification. This makes predictions of 0 or 1, rather than producing probabilities.
#' \item{ \code{count:poisson}: Poisson regression for count data, output mean of Poisson distribution.
#' \code{max_delta_step} is set to 0.7 by default in poisson regression (used to safeguard optimization).}
#' \item{ \code{survival:cox}: Cox regression for right censored survival time data (negative values are considered right censored).
#' Note that predictions are returned on the hazard ratio scale (i.e., as HR = exp(marginal_prediction) in the proportional
#' hazard function \code{h(t) = h0(t) * HR)}.}
#' \item{ \code{survival:aft}: Accelerated failure time model for censored survival time data. See
#' \href{https://xgboost.readthedocs.io/en/latest/tutorials/aft_survival_analysis.html}{Survival Analysis with Accelerated Failure Time}
#' for details.}
#' \item \code{count:poisson}: Poisson regression for count data, output mean of Poisson distribution. \code{max_delta_step} is set to 0.7 by default in poisson regression (used to safeguard optimization).
#' \item \code{survival:cox}: Cox regression for right censored survival time data (negative values are considered right censored). Note that predictions are returned on the hazard ratio scale (i.e., as HR = exp(marginal_prediction) in the proportional hazard function \code{h(t) = h0(t) * HR)}.
#' \item \code{survival:aft}: Accelerated failure time model for censored survival time data. See \href{https://xgboost.readthedocs.io/en/latest/tutorials/aft_survival_analysis.html}{Survival Analysis with Accelerated Failure Time} for details.
#' \item \code{aft_loss_distribution}: Probability Density Function used by \code{survival:aft} and \code{aft-nloglik} metric.
#' \item{ \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective.
#' Class is represented by a number and should be from 0 to \code{num_class - 1}.}
#' \item{ \code{multi:softprob} same as softmax, but prediction outputs a vector of ndata * nclass elements, which can be
#' further reshaped to ndata, nclass matrix. The result contains predicted probabilities of each data point belonging
#' to each class.}
#' \item \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective. Class is represented by a number and should be from 0 to \code{num_class - 1}.
#' \item \code{multi:softprob} same as softmax, but prediction outputs a vector of ndata * nclass elements, which can be further reshaped to ndata, nclass matrix. The result contains predicted probabilities of each data point belonging to each class.
#' \item \code{rank:pairwise} set xgboost to do ranking task by minimizing the pairwise loss.
#' \item{ \code{rank:ndcg}: Use LambdaMART to perform list-wise ranking where
#' \href{https://en.wikipedia.org/wiki/Discounted_cumulative_gain}{Normalized Discounted Cumulative Gain (NDCG)} is maximized.}
#' \item{ \code{rank:map}: Use LambdaMART to perform list-wise ranking where
#' \href{https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Mean_average_precision}{Mean Average Precision (MAP)}
#' is maximized.}
#' \item{ \code{reg:gamma}: gamma regression with log-link.
#' Output is a mean of gamma distribution.
#' It might be useful, e.g., for modeling insurance claims severity, or for any outcome that might be
#' \href{https://en.wikipedia.org/wiki/Gamma_distribution#Applications}{gamma-distributed}.}
#' \item{ \code{reg:tweedie}: Tweedie regression with log-link.
#' It might be useful, e.g., for modeling total loss in insurance, or for any outcome that might be
#' \href{https://en.wikipedia.org/wiki/Tweedie_distribution#Applications}{Tweedie-distributed}.}
#' \item \code{rank:ndcg}: Use LambdaMART to perform list-wise ranking where \href{https://en.wikipedia.org/wiki/Discounted_cumulative_gain}{Normalized Discounted Cumulative Gain (NDCG)} is maximized.
#' \item \code{rank:map}: Use LambdaMART to perform list-wise ranking where \href{https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Mean_average_precision}{Mean Average Precision (MAP)} is maximized.
#' \item \code{reg:gamma}: gamma regression with log-link. Output is a mean of gamma distribution. It might be useful, e.g., for modeling insurance claims severity, or for any outcome that might be \href{https://en.wikipedia.org/wiki/Gamma_distribution#Applications}{gamma-distributed}.
#' \item \code{reg:tweedie}: Tweedie regression with log-link. It might be useful, e.g., for modeling total loss in insurance, or for any outcome that might be \href{https://en.wikipedia.org/wiki/Tweedie_distribution#Applications}{Tweedie-distributed}.
#' }
#' }
#' \item \code{base_score} the initial prediction score of all instances, global bias. Default: 0.5
#' \item{ \code{eval_metric} evaluation metrics for validation data.
#' Users can pass a self-defined function to it.
#' Default: metric will be assigned according to objective
#' (rmse for regression, and error for classification, mean average precision for ranking).
#' List is provided in detail section.}
#' \item \code{eval_metric} evaluation metrics for validation data. Users can pass a self-defined function to it. Default: metric will be assigned according to objective(rmse for regression, and error for classification, mean average precision for ranking). List is provided in detail section.
#' }
#'
#' @param data training dataset. \code{xgb.train} accepts only an \code{xgb.DMatrix} as the input.
@@ -185,8 +141,7 @@
#' \item \code{merror} Multiclass classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}.
#' \item \code{mae} Mean absolute error
#' \item \code{mape} Mean absolute percentage error
#' \item{ \code{auc} Area under the curve.
#' \url{https://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation.}
#' \item \code{auc} Area under the curve. \url{https://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation.
#' \item \code{aucpr} Area under the PR curve. \url{https://en.wikipedia.org/wiki/Precision_and_recall} for ranking evaluation.
#' \item \code{ndcg} Normalized Discounted Cumulative Gain (for ranking task). \url{https://en.wikipedia.org/wiki/NDCG}
#' }
@@ -321,10 +276,6 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
if (is.null(evnames) || any(evnames == ""))
stop("each element of the watchlist must have a name tag")
}
# Handle multiple evaluation metrics given as a list
for (m in params$eval_metric) {
params <- c(params, list(eval_metric = m))
}
# evaluation printing callback
params <- c(params)
@@ -393,7 +344,7 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
xgb.iter.update(bst$handle, dtrain, iteration - 1, obj)
if (length(watchlist) > 0)
bst_evaluation <- xgb.iter.eval(bst$handle, watchlist, iteration - 1, feval) # nolint: object_usage_linter
bst_evaluation <- xgb.iter.eval(bst$handle, watchlist, iteration - 1, feval)
xgb.attr(bst$handle, 'niter') <- iteration - 1

1861
R-package/configure vendored

File diff suppressed because it is too large Load Diff

View File

@@ -2,25 +2,10 @@
AC_PREREQ(2.69)
AC_INIT([xgboost],[2.0.0],[],[xgboost],[])
AC_INIT([xgboost],[1.7.2],[],[xgboost],[])
: ${R_HOME=`R RHOME`}
if test -z "${R_HOME}"; then
echo "could not determine R_HOME"
exit 1
fi
CXX17=`"${R_HOME}/bin/R" CMD config CXX17`
CXX17STD=`"${R_HOME}/bin/R" CMD config CXX17STD`
CXX="${CXX17} ${CXX17STD}"
CXXFLAGS=`"${R_HOME}/bin/R" CMD config CXXFLAGS`
CC=`"${R_HOME}/bin/R" CMD config CC`
CFLAGS=`"${R_HOME}/bin/R" CMD config CFLAGS`
CPPFLAGS=`"${R_HOME}/bin/R" CMD config CPPFLAGS`
LDFLAGS=`"${R_HOME}/bin/R" CMD config LDFLAGS`
AC_LANG(C++)
# Use this line to set CC variable to a C compiler
AC_PROG_CC
### Check whether backtrace() is part of libc or the external lib libexecinfo
AC_MSG_CHECKING([Backtrace lib])
@@ -55,7 +40,7 @@ then
ac_pkg_openmp=no
AC_MSG_CHECKING([whether OpenMP will work in a package])
AC_LANG_CONFTEST([AC_LANG_PROGRAM([[#include <omp.h>]], [[ return (omp_get_max_threads() <= 1); ]])])
${CXX} -o conftest conftest.cpp ${CPPFLAGS} ${LDFLAGS} ${OPENMP_LIB} ${OPENMP_CXXFLAGS} 2>/dev/null && ./conftest && ac_pkg_openmp=yes
${CC} -o conftest conftest.c ${CPPFLAGS} ${LDFLAGS} ${OPENMP_LIB} ${OPENMP_CXXFLAGS} 2>/dev/null && ./conftest && ac_pkg_openmp=yes
AC_MSG_RESULT([${ac_pkg_openmp}])
if test "${ac_pkg_openmp}" = no; then
OPENMP_CXXFLAGS=''

View File

@@ -1,4 +1,5 @@
# install development version of caret library that contains xgboost models
devtools::install_github("topepo/caret/pkg/caret")
require(caret)
require(xgboost)
require(data.table)
@@ -7,23 +8,14 @@ require(e1071)
# Load Arthritis dataset in memory.
data(Arthritis)
# Create a copy of the dataset with data.table package
# (data.table is 100% compliant with R dataframe but its syntax is a lot more consistent
# and its performance are really good).
# Create a copy of the dataset with data.table package (data.table is 100% compliant with R dataframe but its syntax is a lot more consistent and its performance are really good).
df <- data.table(Arthritis, keep.rownames = FALSE)
# Let's add some new categorical features to see if it helps.
# Of course these feature are highly correlated to the Age feature.
# Usually it's not a good thing in ML, but Tree algorithms (including boosted trees) are able to select the best features,
# even in case of highly correlated features.
# For the first feature we create groups of age by rounding the real age.
# Note that we transform it to factor (categorical data) so the algorithm treat them as independant values.
# Let's add some new categorical features to see if it helps. Of course these feature are highly correlated to the Age feature. Usually it's not a good thing in ML, but Tree algorithms (including boosted trees) are able to select the best features, even in case of highly correlated features.
# For the first feature we create groups of age by rounding the real age. Note that we transform it to factor (categorical data) so the algorithm treat them as independant values.
df[, AgeDiscret := as.factor(round(Age / 10, 0))]
# Here is an even stronger simplification of the real age with an arbitrary split at 30 years old.
# I choose this value based on nothing.
# We will see later if simplifying the information based on arbitrary values is a good strategy
# (I am sure you already have an idea of how well it will work!).
# Here is an even stronger simplification of the real age with an arbitrary split at 30 years old. I choose this value based on nothing. We will see later if simplifying the information based on arbitrary values is a good strategy (I am sure you already have an idea of how well it will work!).
df[, AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))]
# We remove ID as there is nothing to learn from this feature (it will just add some noise as the dataset is small).
@@ -34,10 +26,9 @@ df[, ID := NULL]
# Here we use 10-fold cross-validation, repeating twice, and using random search for tuning hyper-parameters.
fitControl <- trainControl(method = "repeatedcv", number = 10, repeats = 2, search = "random")
# train a xgbTree model using caret::train
model <- train(factor(Improved) ~ ., data = df, method = "xgbTree", trControl = fitControl)
model <- train(factor(Improved)~., data = df, method = "xgbTree", trControl = fitControl)
# Instead of tree for our boosters, you can also fit a linear regression or logistic regression model
# using xgbLinear
# Instead of tree for our boosters, you can also fit a linear regression or logistic regression model using xgbLinear
# model <- train(factor(Improved)~., data = df, method = "xgbLinear", trControl = fitControl)
# See model results

View File

@@ -7,47 +7,34 @@ if (!require(vcd)) {
}
# According to its documentation, XGBoost works only on numbers.
# Sometimes the dataset we have to work on have categorical data.
# A categorical variable is one which have a fixed number of values.
# By example, if for each observation a variable called "Colour" can have only
# "red", "blue" or "green" as value, it is a categorical variable.
# A categorical variable is one which have a fixed number of values. By example, if for each observation a variable called "Colour" can have only "red", "blue" or "green" as value, it is a categorical variable.
#
# In R, categorical variable is called Factor.
# Type ?factor in console for more information.
#
# In this demo we will see how to transform a dense dataframe with categorical variables to a sparse matrix
# before analyzing it in XGBoost.
# In this demo we will see how to transform a dense dataframe with categorical variables to a sparse matrix before analyzing it in XGBoost.
# The method we are going to see is usually called "one hot encoding".
#load Arthritis dataset in memory.
data(Arthritis)
# create a copy of the dataset with data.table package
# (data.table is 100% compliant with R dataframe but its syntax is a lot more consistent
# and its performance are really good).
# create a copy of the dataset with data.table package (data.table is 100% compliant with R dataframe but its syntax is a lot more consistent and its performance are really good).
df <- data.table(Arthritis, keep.rownames = FALSE)
# Let's have a look to the data.table
cat("Print the dataset\n")
print(df)
# 2 columns have factor type, one has ordinal type
# (ordinal variable is a categorical variable with values which can be ordered, here: None > Some > Marked).
# 2 columns have factor type, one has ordinal type (ordinal variable is a categorical variable with values which can be ordered, here: None > Some > Marked).
cat("Structure of the dataset\n")
str(df)
# Let's add some new categorical features to see if it helps.
# Of course these feature are highly correlated to the Age feature.
# Usually it's not a good thing in ML, but Tree algorithms (including boosted trees) are able to select the best features,
# even in case of highly correlated features.
# Let's add some new categorical features to see if it helps. Of course these feature are highly correlated to the Age feature. Usually it's not a good thing in ML, but Tree algorithms (including boosted trees) are able to select the best features, even in case of highly correlated features.
# For the first feature we create groups of age by rounding the real age.
# Note that we transform it to factor (categorical data) so the algorithm treat them as independent values.
# For the first feature we create groups of age by rounding the real age. Note that we transform it to factor (categorical data) so the algorithm treat them as independent values.
df[, AgeDiscret := as.factor(round(Age / 10, 0))]
# Here is an even stronger simplification of the real age with an arbitrary split at 30 years old.
# I choose this value based on nothing.
# We will see later if simplifying the information based on arbitrary values is a good strategy
# (I am sure you already have an idea of how well it will work!).
# Here is an even stronger simplification of the real age with an arbitrary split at 30 years old. I choose this value based on nothing. We will see later if simplifying the information based on arbitrary values is a good strategy (I am sure you already have an idea of how well it will work!).
df[, AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))]
# We remove ID as there is nothing to learn from this feature (it will just add some noise as the dataset is small).
@@ -61,10 +48,7 @@ print(levels(df[, Treatment]))
# This method is also called one hot encoding.
# The purpose is to transform each value of each categorical feature in one binary feature.
#
# Let's take, the column Treatment will be replaced by two columns, Placebo, and Treated.
# Each of them will be binary.
# For example an observation which had the value Placebo in column Treatment before the transformation will have, after the transformation,
# the value 1 in the new column Placebo and the value 0 in the new column Treated.
# Let's take, the column Treatment will be replaced by two columns, Placebo, and Treated. Each of them will be binary. For example an observation which had the value Placebo in column Treatment before the transformation will have, after the transformation, the value 1 in the new column Placebo and the value 0 in the new column Treated.
#
# Formulae Improved~.-1 used below means transform all categorical features but column Improved to binary values.
# Column Improved is excluded because it will be our output column, the one we want to predict.
@@ -86,10 +70,7 @@ bst <- xgboost(data = sparse_matrix, label = output_vector, max_depth = 9,
importance <- xgb.importance(feature_names = colnames(sparse_matrix), model = bst)
print(importance)
# According to the matrix below, the most important feature in this dataset to predict if the treatment will work is the Age.
# The second most important feature is having received a placebo or not.
# The sex is third.
# Then we see our generated features (AgeDiscret). We can see that their contribution is very low (Gain column).
# According to the matrix below, the most important feature in this dataset to predict if the treatment will work is the Age. The second most important feature is having received a placebo or not. The sex is third. Then we see our generated features (AgeDiscret). We can see that their contribution is very low (Gain column).
# Does these result make sense?
# Let's check some Chi2 between each of these features and the outcome.
@@ -101,17 +82,8 @@ print(chisq.test(df$AgeDiscret, df$Y))
# Our first simplification of Age gives a Pearson correlation of 8.
print(chisq.test(df$AgeCat, df$Y))
# The perfectly random split I did between young and old at 30 years old have a low correlation of 2.
# It's a result we may expect as may be in my mind > 30 years is being old (I am 32 and starting feeling old, this may explain that),
# but for the illness we are studying, the age to be vulnerable is not the same.
# Don't let your "gut" lower the quality of your model. In "data science", there is science :-)
# The perfectly random split I did between young and old at 30 years old have a low correlation of 2. It's a result we may expect as may be in my mind > 30 years is being old (I am 32 and starting feeling old, this may explain that), but for the illness we are studying, the age to be vulnerable is not the same. Don't let your "gut" lower the quality of your model. In "data science", there is science :-)
# As you can see, in general destroying information by simplifying it won't improve your model.
# Chi2 just demonstrates that.
# But in more complex cases, creating a new feature based on existing one which makes link with the outcome
# more obvious may help the algorithm and improve the model.
# The case studied here is not enough complex to show that. Check Kaggle forum for some challenging datasets.
# As you can see, in general destroying information by simplifying it won't improve your model. Chi2 just demonstrates that. But in more complex cases, creating a new feature based on existing one which makes link with the outcome more obvious may help the algorithm and improve the model. The case studied here is not enough complex to show that. Check Kaggle forum for some challenging datasets.
# However it's almost always worse when you add some arbitrary rules.
# Moreover, you can notice that even if we have added some not useful new features highly correlated with
# other features, the boosting tree algorithm have been able to choose the best one, which in this case is the Age.
# Linear model may not be that strong in these scenario.
# Moreover, you can notice that even if we have added some not useful new features highly correlated with other features, the boosting tree algorithm have been able to choose the best one, which in this case is the Age. Linear model may not be that strong in these scenario.

View File

@@ -12,7 +12,7 @@ cat('running cross validation\n')
# do cross validation, this will print result out as
# [iteration] metric_name:mean_value+std_value
# std_value is standard deviation of the metric
xgb.cv(param, dtrain, nrounds, nfold = 5, metrics = 'error')
xgb.cv(param, dtrain, nrounds, nfold = 5, metrics = {'error'})
cat('running cross validation, disable standard deviation display\n')
# do cross validation, this will print result out as

View File

@@ -33,7 +33,7 @@ treeInteractions <- function(input_tree, input_max_depth) {
}
# Extract nodes with interactions
interaction_trees <- trees[!is.na(Split) & !is.na(parent_1), # nolint: object_usage_linter
interaction_trees <- trees[!is.na(Split) & !is.na(parent_1),
c('Feature', paste0('parent_feat_', 1:(input_max_depth - 1))),
with = FALSE]
interaction_trees_split <- split(interaction_trees, seq_len(nrow(interaction_trees)))

View File

@@ -24,7 +24,7 @@ accuracy.before <- (sum((predict(bst, agaricus.test$data) >= 0.5) == agaricus.te
pred_with_leaf <- predict(bst, dtest, predleaf = TRUE)
head(pred_with_leaf)
create.new.tree.features <- function(model, original.features) {
create.new.tree.features <- function(model, original.features){
pred_with_leaf <- predict(model, original.features, predleaf = TRUE)
cols <- list()
for (i in 1:model$niter) {

View File

@@ -1,4 +1,4 @@
# running all scripts in demo folder, removed during packaging.
# running all scripts in demo folder
demo(basic_walkthrough, package = 'xgboost')
demo(custom_objective, package = 'xgboost')
demo(boost_from_prediction, package = 'xgboost')

View File

@@ -79,9 +79,9 @@ end_of_table <- empty_lines[empty_lines > start_index][1L]
# Read the contents of the table
exported_symbols <- objdump_results[(start_index + 1L):end_of_table]
exported_symbols <- gsub("\t", "", exported_symbols, fixed = TRUE)
exported_symbols <- gsub("\t", "", exported_symbols)
exported_symbols <- gsub(".*\\] ", "", exported_symbols)
exported_symbols <- gsub(" ", "", exported_symbols, fixed = TRUE)
exported_symbols <- gsub(" ", "", exported_symbols)
# Write R.def file
writeLines(

View File

@@ -72,12 +72,12 @@ matplot(xgb.gblinear.history(bst)[[3]], type = 'l')
#### Multiclass classification:
#
dtrain <- xgb.DMatrix(scale(x), label = as.numeric(iris$Species) - 1, nthread = 1)
dtrain <- xgb.DMatrix(scale(x), label = as.numeric(iris$Species) - 1, nthread = 2)
param <- list(booster = "gblinear", objective = "multi:softprob", num_class = 3,
lambda = 0.0003, alpha = 0.0003, nthread = 1)
lambda = 0.0003, alpha = 0.0003, nthread = 2)
# For the default linear updater 'shotgun' it sometimes is helpful
# to use smaller eta to reduce instability
bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 50, eta = 0.5,
bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 70, eta = 0.5,
callbacks = list(cb.gblinear.history()))
# Will plot the coefficient paths separately for each class:
matplot(xgb.gblinear.history(bst, class_index = 0), type = 'l')

View File

@@ -122,10 +122,6 @@ With \code{predinteraction = TRUE}, SHAP values of contributions of interaction
are computed. Note that this operation might be rather expensive in terms of compute and memory.
Since it quadratically depends on the number of features, it is recommended to perform selection
of the most important features first. See below about the format of the returned results.
The \code{predict()} method uses as many threads as defined in \code{xgb.Booster} object (all by default).
If you want to change their number, then assign a new number to \code{nthread} using \code{\link{xgb.parameters<-}}.
Note also that converting a matrix to \code{\link{xgb.DMatrix}} uses multiple threads too.
}
\examples{
## binary classification:

View File

@@ -148,11 +148,9 @@ The cross validation function of xgboost
\details{
The original sample is randomly partitioned into \code{nfold} equal size subsamples.
Of the \code{nfold} subsamples, a single subsample is retained as the validation data for testing the model,
and the remaining \code{nfold - 1} subsamples are used as training data.
Of the \code{nfold} subsamples, a single subsample is retained as the validation data for testing the model, and the remaining \code{nfold - 1} subsamples are used as training data.
The cross-validation process is then repeated \code{nrounds} times, with each of the
\code{nfold} subsamples used exactly once as the validation data.
The cross-validation process is then repeated \code{nrounds} times, with each of the \code{nfold} subsamples used exactly once as the validation data.
All observations are used for both training and validation.

View File

@@ -10,7 +10,7 @@ xgb.ggplot.importance(
top_n = NULL,
measure = NULL,
rel_to_first = FALSE,
n_clusters = seq_len(10),
n_clusters = c(1:10),
...
)

View File

@@ -67,7 +67,7 @@ The "Yes" branches are marked by the "< split_value" label.
The branches that also used for missing values are marked as bold
(as in "carrying extra capacity").
This function uses \href{https://www.graphviz.org/}{GraphViz} as a backend of DiagrammeR.
This function uses \href{http://www.graphviz.org/}{GraphViz} as a backend of DiagrammeR.
}
\examples{
data(agaricus.train, package='xgboost')

View File

@@ -57,37 +57,17 @@ xgboost(
2.1. Parameters for Tree Booster
\itemize{
\item{ \code{eta} control the learning rate: scale the contribution of each tree by a factor of \code{0 < eta < 1}
when it is added to the current approximation.
Used to prevent overfitting by making the boosting process more conservative.
Lower value for \code{eta} implies larger value for \code{nrounds}: low \code{eta} value means model
more robust to overfitting but slower to compute. Default: 0.3}
\item{ \code{gamma} minimum loss reduction required to make a further partition on a leaf node of the tree.
the larger, the more conservative the algorithm will be.}
\item \code{eta} control the learning rate: scale the contribution of each tree by a factor of \code{0 < eta < 1} when it is added to the current approximation. Used to prevent overfitting by making the boosting process more conservative. Lower value for \code{eta} implies larger value for \code{nrounds}: low \code{eta} value means model more robust to overfitting but slower to compute. Default: 0.3
\item \code{gamma} minimum loss reduction required to make a further partition on a leaf node of the tree. the larger, the more conservative the algorithm will be.
\item \code{max_depth} maximum depth of a tree. Default: 6
\item{\code{min_child_weight} minimum sum of instance weight (hessian) needed in a child.
If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight,
then the building process will give up further partitioning.
In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node.
The larger, the more conservative the algorithm will be. Default: 1}
\item{ \code{subsample} subsample ratio of the training instance.
Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees
and this will prevent overfitting. It makes computation shorter (because less data to analyse).
It is advised to use this parameter with \code{eta} and increase \code{nrounds}. Default: 1}
\item \code{min_child_weight} minimum sum of instance weight (hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, then the building process will give up further partitioning. In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node. The larger, the more conservative the algorithm will be. Default: 1
\item \code{subsample} subsample ratio of the training instance. Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees and this will prevent overfitting. It makes computation shorter (because less data to analyse). It is advised to use this parameter with \code{eta} and increase \code{nrounds}. Default: 1
\item \code{colsample_bytree} subsample ratio of columns when constructing each tree. Default: 1
\item \code{lambda} L2 regularization term on weights. Default: 1
\item \code{alpha} L1 regularization term on weights. (there is no L1 reg on bias because it is not important). Default: 0
\item{ \code{num_parallel_tree} Experimental parameter. number of trees to grow per round.
Useful to test Random Forest through XGBoost
(set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly.
Default: 1}
\item{ \code{monotone_constraints} A numerical vector consists of \code{1}, \code{0} and \code{-1} with its length
equals to the number of features in the training data.
\code{1} is increasing, \code{-1} is decreasing and \code{0} is no constraint.}
\item{ \code{interaction_constraints} A list of vectors specifying feature indices of permitted interactions.
Each item of the list represents one permitted interaction where specified features are allowed to interact with each other.
Feature index values should start from \code{0} (\code{0} references the first column).
Leave argument unspecified for no interaction constraints.}
\item \code{num_parallel_tree} Experimental parameter. number of trees to grow per round. Useful to test Random Forest through XGBoost (set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly. Default: 1
\item \code{monotone_constraints} A numerical vector consists of \code{1}, \code{0} and \code{-1} with its length equals to the number of features in the training data. \code{1} is increasing, \code{-1} is decreasing and \code{0} is no constraint.
\item \code{interaction_constraints} A list of vectors specifying feature indices of permitted interactions. Each item of the list represents one permitted interaction where specified features are allowed to interact with each other. Feature index values should start from \code{0} (\code{0} references the first column). Leave argument unspecified for no interaction constraints.
}
2.2. Parameters for Linear Booster
@@ -101,53 +81,29 @@ xgboost(
3. Task Parameters
\itemize{
\item{ \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it.
The default objective options are below:
\item \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it. The default objective options are below:
\itemize{
\item \code{reg:squarederror} Regression with squared loss (Default).
\item{ \code{reg:squaredlogerror}: regression with squared log loss \eqn{1/2 * (log(pred + 1) - log(label + 1))^2}.
All inputs are required to be greater than -1.
Also, see metric rmsle for possible issue with this objective.}
\item \code{reg:squaredlogerror}: regression with squared log loss \eqn{1/2 * (log(pred + 1) - log(label + 1))^2}. All inputs are required to be greater than -1. Also, see metric rmsle for possible issue with this objective.
\item \code{reg:logistic} logistic regression.
\item \code{reg:pseudohubererror}: regression with Pseudo Huber loss, a twice differentiable alternative to absolute loss.
\item \code{binary:logistic} logistic regression for binary classification. Output probability.
\item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation.
\item \code{binary:hinge}: hinge loss for binary classification. This makes predictions of 0 or 1, rather than producing probabilities.
\item{ \code{count:poisson}: Poisson regression for count data, output mean of Poisson distribution.
\code{max_delta_step} is set to 0.7 by default in poisson regression (used to safeguard optimization).}
\item{ \code{survival:cox}: Cox regression for right censored survival time data (negative values are considered right censored).
Note that predictions are returned on the hazard ratio scale (i.e., as HR = exp(marginal_prediction) in the proportional
hazard function \code{h(t) = h0(t) * HR)}.}
\item{ \code{survival:aft}: Accelerated failure time model for censored survival time data. See
\href{https://xgboost.readthedocs.io/en/latest/tutorials/aft_survival_analysis.html}{Survival Analysis with Accelerated Failure Time}
for details.}
\item \code{count:poisson}: Poisson regression for count data, output mean of Poisson distribution. \code{max_delta_step} is set to 0.7 by default in poisson regression (used to safeguard optimization).
\item \code{survival:cox}: Cox regression for right censored survival time data (negative values are considered right censored). Note that predictions are returned on the hazard ratio scale (i.e., as HR = exp(marginal_prediction) in the proportional hazard function \code{h(t) = h0(t) * HR)}.
\item \code{survival:aft}: Accelerated failure time model for censored survival time data. See \href{https://xgboost.readthedocs.io/en/latest/tutorials/aft_survival_analysis.html}{Survival Analysis with Accelerated Failure Time} for details.
\item \code{aft_loss_distribution}: Probability Density Function used by \code{survival:aft} and \code{aft-nloglik} metric.
\item{ \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective.
Class is represented by a number and should be from 0 to \code{num_class - 1}.}
\item{ \code{multi:softprob} same as softmax, but prediction outputs a vector of ndata * nclass elements, which can be
further reshaped to ndata, nclass matrix. The result contains predicted probabilities of each data point belonging
to each class.}
\item \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective. Class is represented by a number and should be from 0 to \code{num_class - 1}.
\item \code{multi:softprob} same as softmax, but prediction outputs a vector of ndata * nclass elements, which can be further reshaped to ndata, nclass matrix. The result contains predicted probabilities of each data point belonging to each class.
\item \code{rank:pairwise} set xgboost to do ranking task by minimizing the pairwise loss.
\item{ \code{rank:ndcg}: Use LambdaMART to perform list-wise ranking where
\href{https://en.wikipedia.org/wiki/Discounted_cumulative_gain}{Normalized Discounted Cumulative Gain (NDCG)} is maximized.}
\item{ \code{rank:map}: Use LambdaMART to perform list-wise ranking where
\href{https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Mean_average_precision}{Mean Average Precision (MAP)}
is maximized.}
\item{ \code{reg:gamma}: gamma regression with log-link.
Output is a mean of gamma distribution.
It might be useful, e.g., for modeling insurance claims severity, or for any outcome that might be
\href{https://en.wikipedia.org/wiki/Gamma_distribution#Applications}{gamma-distributed}.}
\item{ \code{reg:tweedie}: Tweedie regression with log-link.
It might be useful, e.g., for modeling total loss in insurance, or for any outcome that might be
\href{https://en.wikipedia.org/wiki/Tweedie_distribution#Applications}{Tweedie-distributed}.}
\item \code{rank:ndcg}: Use LambdaMART to perform list-wise ranking where \href{https://en.wikipedia.org/wiki/Discounted_cumulative_gain}{Normalized Discounted Cumulative Gain (NDCG)} is maximized.
\item \code{rank:map}: Use LambdaMART to perform list-wise ranking where \href{https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Mean_average_precision}{Mean Average Precision (MAP)} is maximized.
\item \code{reg:gamma}: gamma regression with log-link. Output is a mean of gamma distribution. It might be useful, e.g., for modeling insurance claims severity, or for any outcome that might be \href{https://en.wikipedia.org/wiki/Gamma_distribution#Applications}{gamma-distributed}.
\item \code{reg:tweedie}: Tweedie regression with log-link. It might be useful, e.g., for modeling total loss in insurance, or for any outcome that might be \href{https://en.wikipedia.org/wiki/Tweedie_distribution#Applications}{Tweedie-distributed}.
}
}
\item \code{base_score} the initial prediction score of all instances, global bias. Default: 0.5
\item{ \code{eval_metric} evaluation metrics for validation data.
Users can pass a self-defined function to it.
Default: metric will be assigned according to objective
(rmse for regression, and error for classification, mean average precision for ranking).
List is provided in detail section.}
\item \code{eval_metric} evaluation metrics for validation data. Users can pass a self-defined function to it. Default: metric will be assigned according to objective(rmse for regression, and error for classification, mean average precision for ranking). List is provided in detail section.
}}
\item{data}{training dataset. \code{xgb.train} accepts only an \code{xgb.DMatrix} as the input.
@@ -267,8 +223,7 @@ The following is the list of built-in metrics for which XGBoost provides optimiz
\item \code{merror} Multiclass classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}.
\item \code{mae} Mean absolute error
\item \code{mape} Mean absolute percentage error
\item{ \code{auc} Area under the curve.
\url{https://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation.}
\item \code{auc} Area under the curve. \url{https://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation.
\item \code{aucpr} Area under the PR curve. \url{https://en.wikipedia.org/wiki/Precision_and_recall} for ranking evaluation.
\item \code{ndcg} Normalized Discounted Cumulative Gain (for ranking task). \url{https://en.wikipedia.org/wiki/NDCG}
}

View File

@@ -3,11 +3,12 @@ PKGROOT=../../
ENABLE_STD_THREAD=1
# _*_ mode: Makefile; _*_
CXX_STD = CXX17
CXX_STD = CXX14
XGB_RFLAGS = -DXGBOOST_STRICT_R_MODE=1 -DDMLC_LOG_BEFORE_THROW=0\
-DDMLC_ENABLE_STD_THREAD=$(ENABLE_STD_THREAD) -DDMLC_DISABLE_STDIN=1\
-DDMLC_LOG_CUSTOMIZE=1
-DDMLC_LOG_CUSTOMIZE=1 -DXGBOOST_CUSTOMIZE_LOGGER=1\
-DRABIT_CUSTOMIZE_MSG_
# disable the use of thread_local for 32 bit windows:
ifeq ($(R_OSTYPE)$(WIN),windows)
@@ -22,6 +23,7 @@ PKG_LIBS = @OPENMP_CXXFLAGS@ @OPENMP_LIB@ @ENDIAN_FLAG@ @BACKTRACE_LIB@ -pthread
OBJECTS= \
./xgboost_R.o \
./xgboost_custom.o \
./xgboost_assert.o \
./init.o \
$(PKGROOT)/src/metric/metric.o \
$(PKGROOT)/src/metric/elementwise_metric.o \
@@ -32,12 +34,10 @@ OBJECTS= \
$(PKGROOT)/src/objective/objective.o \
$(PKGROOT)/src/objective/regression_obj.o \
$(PKGROOT)/src/objective/multiclass_obj.o \
$(PKGROOT)/src/objective/lambdarank_obj.o \
$(PKGROOT)/src/objective/rank_obj.o \
$(PKGROOT)/src/objective/hinge.o \
$(PKGROOT)/src/objective/aft_obj.o \
$(PKGROOT)/src/objective/adaptive.o \
$(PKGROOT)/src/objective/init_estimation.o \
$(PKGROOT)/src/objective/quantile_obj.o \
$(PKGROOT)/src/gbm/gbm.o \
$(PKGROOT)/src/gbm/gbtree.o \
$(PKGROOT)/src/gbm/gbtree_model.o \
@@ -55,13 +55,10 @@ OBJECTS= \
$(PKGROOT)/src/data/iterative_dmatrix.o \
$(PKGROOT)/src/predictor/predictor.o \
$(PKGROOT)/src/predictor/cpu_predictor.o \
$(PKGROOT)/src/predictor/cpu_treeshap.o \
$(PKGROOT)/src/tree/constraints.o \
$(PKGROOT)/src/tree/param.o \
$(PKGROOT)/src/tree/fit_stump.o \
$(PKGROOT)/src/tree/tree_model.o \
$(PKGROOT)/src/tree/tree_updater.o \
$(PKGROOT)/src/tree/multi_target_tree_model.o \
$(PKGROOT)/src/tree/updater_approx.o \
$(PKGROOT)/src/tree/updater_colmaker.o \
$(PKGROOT)/src/tree/updater_prune.o \
@@ -72,12 +69,9 @@ OBJECTS= \
$(PKGROOT)/src/linear/updater_coordinate.o \
$(PKGROOT)/src/linear/updater_shotgun.o \
$(PKGROOT)/src/learner.o \
$(PKGROOT)/src/context.o \
$(PKGROOT)/src/logging.o \
$(PKGROOT)/src/global_config.o \
$(PKGROOT)/src/collective/communicator.o \
$(PKGROOT)/src/collective/in_memory_communicator.o \
$(PKGROOT)/src/collective/in_memory_handler.o \
$(PKGROOT)/src/collective/socket.o \
$(PKGROOT)/src/common/charconv.o \
$(PKGROOT)/src/common/column_matrix.o \
@@ -90,11 +84,8 @@ OBJECTS= \
$(PKGROOT)/src/common/pseudo_huber.o \
$(PKGROOT)/src/common/quantile.o \
$(PKGROOT)/src/common/random.o \
$(PKGROOT)/src/common/stats.o \
$(PKGROOT)/src/common/survival_util.o \
$(PKGROOT)/src/common/threading_utils.o \
$(PKGROOT)/src/common/ranking_utils.o \
$(PKGROOT)/src/common/quantile_loss_utils.o \
$(PKGROOT)/src/common/timer.o \
$(PKGROOT)/src/common/version.o \
$(PKGROOT)/src/c_api/c_api.o \

View File

@@ -3,11 +3,12 @@ PKGROOT=../../
ENABLE_STD_THREAD=0
# _*_ mode: Makefile; _*_
CXX_STD = CXX17
CXX_STD = CXX14
XGB_RFLAGS = -DXGBOOST_STRICT_R_MODE=1 -DDMLC_LOG_BEFORE_THROW=0\
-DDMLC_ENABLE_STD_THREAD=$(ENABLE_STD_THREAD) -DDMLC_DISABLE_STDIN=1\
-DDMLC_LOG_CUSTOMIZE=1
-DDMLC_LOG_CUSTOMIZE=1 -DXGBOOST_CUSTOMIZE_LOGGER=1\
-DRABIT_CUSTOMIZE_MSG_
# disable the use of thread_local for 32 bit windows:
ifeq ($(R_OSTYPE)$(WIN),windows)
@@ -22,6 +23,7 @@ PKG_LIBS = $(SHLIB_OPENMP_CXXFLAGS) -DDMLC_CMAKE_LITTLE_ENDIAN=1 $(SHLIB_PTHRE
OBJECTS= \
./xgboost_R.o \
./xgboost_custom.o \
./xgboost_assert.o \
./init.o \
$(PKGROOT)/src/metric/metric.o \
$(PKGROOT)/src/metric/elementwise_metric.o \
@@ -32,12 +34,10 @@ OBJECTS= \
$(PKGROOT)/src/objective/objective.o \
$(PKGROOT)/src/objective/regression_obj.o \
$(PKGROOT)/src/objective/multiclass_obj.o \
$(PKGROOT)/src/objective/lambdarank_obj.o \
$(PKGROOT)/src/objective/rank_obj.o \
$(PKGROOT)/src/objective/hinge.o \
$(PKGROOT)/src/objective/aft_obj.o \
$(PKGROOT)/src/objective/adaptive.o \
$(PKGROOT)/src/objective/init_estimation.o \
$(PKGROOT)/src/objective/quantile_obj.o \
$(PKGROOT)/src/gbm/gbm.o \
$(PKGROOT)/src/gbm/gbtree.o \
$(PKGROOT)/src/gbm/gbtree_model.o \
@@ -55,12 +55,9 @@ OBJECTS= \
$(PKGROOT)/src/data/iterative_dmatrix.o \
$(PKGROOT)/src/predictor/predictor.o \
$(PKGROOT)/src/predictor/cpu_predictor.o \
$(PKGROOT)/src/predictor/cpu_treeshap.o \
$(PKGROOT)/src/tree/constraints.o \
$(PKGROOT)/src/tree/param.o \
$(PKGROOT)/src/tree/fit_stump.o \
$(PKGROOT)/src/tree/tree_model.o \
$(PKGROOT)/src/tree/multi_target_tree_model.o \
$(PKGROOT)/src/tree/tree_updater.o \
$(PKGROOT)/src/tree/updater_approx.o \
$(PKGROOT)/src/tree/updater_colmaker.o \
@@ -72,12 +69,9 @@ OBJECTS= \
$(PKGROOT)/src/linear/updater_coordinate.o \
$(PKGROOT)/src/linear/updater_shotgun.o \
$(PKGROOT)/src/learner.o \
$(PKGROOT)/src/context.o \
$(PKGROOT)/src/logging.o \
$(PKGROOT)/src/global_config.o \
$(PKGROOT)/src/collective/communicator.o \
$(PKGROOT)/src/collective/in_memory_communicator.o \
$(PKGROOT)/src/collective/in_memory_handler.o \
$(PKGROOT)/src/collective/socket.o \
$(PKGROOT)/src/common/charconv.o \
$(PKGROOT)/src/common/column_matrix.o \
@@ -90,11 +84,8 @@ OBJECTS= \
$(PKGROOT)/src/common/pseudo_huber.o \
$(PKGROOT)/src/common/quantile.o \
$(PKGROOT)/src/common/random.o \
$(PKGROOT)/src/common/stats.o \
$(PKGROOT)/src/common/survival_util.o \
$(PKGROOT)/src/common/threading_utils.o \
$(PKGROOT)/src/common/ranking_utils.o \
$(PKGROOT)/src/common/quantile_loss_utils.o \
$(PKGROOT)/src/common/timer.o \
$(PKGROOT)/src/common/version.o \
$(PKGROOT)/src/c_api/c_api.o \

View File

@@ -30,14 +30,15 @@ extern SEXP XGBoosterSaveJsonConfig_R(SEXP handle);
extern SEXP XGBoosterLoadJsonConfig_R(SEXP handle, SEXP value);
extern SEXP XGBoosterSerializeToBuffer_R(SEXP handle);
extern SEXP XGBoosterUnserializeFromBuffer_R(SEXP handle, SEXP raw);
extern SEXP XGBoosterPredict_R(SEXP, SEXP, SEXP, SEXP, SEXP);
extern SEXP XGBoosterPredictFromDMatrix_R(SEXP, SEXP, SEXP);
extern SEXP XGBoosterSaveModel_R(SEXP, SEXP);
extern SEXP XGBoosterSetAttr_R(SEXP, SEXP, SEXP);
extern SEXP XGBoosterSetParam_R(SEXP, SEXP, SEXP);
extern SEXP XGBoosterUpdateOneIter_R(SEXP, SEXP, SEXP);
extern SEXP XGCheckNullPtr_R(SEXP);
extern SEXP XGDMatrixCreateFromCSC_R(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP);
extern SEXP XGDMatrixCreateFromCSR_R(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP);
extern SEXP XGDMatrixCreateFromCSC_R(SEXP, SEXP, SEXP, SEXP, SEXP);
extern SEXP XGDMatrixCreateFromCSR_R(SEXP, SEXP, SEXP, SEXP, SEXP);
extern SEXP XGDMatrixCreateFromFile_R(SEXP, SEXP);
extern SEXP XGDMatrixCreateFromMat_R(SEXP, SEXP, SEXP);
extern SEXP XGDMatrixGetInfo_R(SEXP, SEXP);
@@ -67,14 +68,15 @@ static const R_CallMethodDef CallEntries[] = {
{"XGBoosterLoadJsonConfig_R", (DL_FUNC) &XGBoosterLoadJsonConfig_R, 2},
{"XGBoosterSerializeToBuffer_R", (DL_FUNC) &XGBoosterSerializeToBuffer_R, 1},
{"XGBoosterUnserializeFromBuffer_R", (DL_FUNC) &XGBoosterUnserializeFromBuffer_R, 2},
{"XGBoosterPredict_R", (DL_FUNC) &XGBoosterPredict_R, 5},
{"XGBoosterPredictFromDMatrix_R", (DL_FUNC) &XGBoosterPredictFromDMatrix_R, 3},
{"XGBoosterSaveModel_R", (DL_FUNC) &XGBoosterSaveModel_R, 2},
{"XGBoosterSetAttr_R", (DL_FUNC) &XGBoosterSetAttr_R, 3},
{"XGBoosterSetParam_R", (DL_FUNC) &XGBoosterSetParam_R, 3},
{"XGBoosterUpdateOneIter_R", (DL_FUNC) &XGBoosterUpdateOneIter_R, 3},
{"XGCheckNullPtr_R", (DL_FUNC) &XGCheckNullPtr_R, 1},
{"XGDMatrixCreateFromCSC_R", (DL_FUNC) &XGDMatrixCreateFromCSC_R, 6},
{"XGDMatrixCreateFromCSR_R", (DL_FUNC) &XGDMatrixCreateFromCSR_R, 6},
{"XGDMatrixCreateFromCSC_R", (DL_FUNC) &XGDMatrixCreateFromCSC_R, 5},
{"XGDMatrixCreateFromCSR_R", (DL_FUNC) &XGDMatrixCreateFromCSR_R, 5},
{"XGDMatrixCreateFromFile_R", (DL_FUNC) &XGDMatrixCreateFromFile_R, 2},
{"XGDMatrixCreateFromMat_R", (DL_FUNC) &XGDMatrixCreateFromMat_R, 3},
{"XGDMatrixGetInfo_R", (DL_FUNC) &XGDMatrixGetInfo_R, 2},

View File

@@ -1,11 +1,11 @@
/**
* Copyright 2014-2023 by XGBoost Contributors
* Copyright 2014-2022 by XGBoost Contributors
*/
#include <dmlc/common.h>
#include <dmlc/omp.h>
#include <xgboost/c_api.h>
#include <xgboost/context.h>
#include <xgboost/data.h>
#include <xgboost/generic_parameters.h>
#include <xgboost/logging.h>
#include <cstdio>
@@ -16,11 +16,9 @@
#include <vector>
#include "../../src/c_api/c_api_error.h"
#include "../../src/c_api/c_api_utils.h" // MakeSparseFromPtr
#include "../../src/common/threading_utils.h"
#include "./xgboost_R.h" // Must follow other includes.
#include "Rinternals.h"
#include "./xgboost_R.h"
/*!
* \brief macro to annotate begin of api
@@ -48,14 +46,14 @@
using dmlc::BeginPtr;
xgboost::Context const *BoosterCtx(BoosterHandle handle) {
xgboost::GenericParameter const *BoosterCtx(BoosterHandle handle) {
CHECK_HANDLE();
auto *learner = static_cast<xgboost::Learner *>(handle);
CHECK(learner);
return learner->Ctx();
}
xgboost::Context const *DMatrixCtx(DMatrixHandle handle) {
xgboost::GenericParameter const *DMatrixCtx(DMatrixHandle handle) {
CHECK_HANDLE();
auto p_m = static_cast<std::shared_ptr<xgboost::DMatrix> *>(handle);
CHECK(p_m);
@@ -116,9 +114,7 @@ XGB_DLL SEXP XGDMatrixCreateFromMat_R(SEXP mat, SEXP missing, SEXP n_threads) {
din = REAL(mat);
}
std::vector<float> data(nrow * ncol);
xgboost::Context ctx;
ctx.nthread = asInteger(n_threads);
std::int32_t threads = ctx.Threads();
int32_t threads = xgboost::common::OmpGetNumThreads(asInteger(n_threads));
xgboost::common::ParallelFor(nrow, threads, [&](xgboost::omp_ulong i) {
for (size_t j = 0; j < ncol; ++j) {
@@ -135,78 +131,66 @@ XGB_DLL SEXP XGDMatrixCreateFromMat_R(SEXP mat, SEXP missing, SEXP n_threads) {
return ret;
}
namespace {
void CreateFromSparse(SEXP indptr, SEXP indices, SEXP data, std::string *indptr_str,
std::string *indices_str, std::string *data_str) {
XGB_DLL SEXP XGDMatrixCreateFromCSC_R(SEXP indptr, SEXP indices, SEXP data,
SEXP num_row, SEXP n_threads) {
SEXP ret;
R_API_BEGIN();
const int *p_indptr = INTEGER(indptr);
const int *p_indices = INTEGER(indices);
const double *p_data = REAL(data);
size_t nindptr = static_cast<size_t>(length(indptr));
size_t ndata = static_cast<size_t>(length(data));
size_t nrow = static_cast<size_t>(INTEGER(num_row)[0]);
std::vector<size_t> col_ptr_(nindptr);
std::vector<unsigned> indices_(ndata);
std::vector<float> data_(ndata);
auto nindptr = static_cast<std::size_t>(length(indptr));
auto ndata = static_cast<std::size_t>(length(data));
CHECK_EQ(ndata, p_indptr[nindptr - 1]);
xgboost::detail::MakeSparseFromPtr(p_indptr, p_indices, p_data, nindptr, indptr_str, indices_str,
data_str);
}
} // namespace
XGB_DLL SEXP XGDMatrixCreateFromCSC_R(SEXP indptr, SEXP indices, SEXP data, SEXP num_row,
SEXP missing, SEXP n_threads) {
SEXP ret;
R_API_BEGIN();
std::int32_t threads = asInteger(n_threads);
using xgboost::Integer;
using xgboost::Json;
using xgboost::Object;
std::string sindptr, sindices, sdata;
CreateFromSparse(indptr, indices, data, &sindptr, &sindices, &sdata);
auto nrow = static_cast<std::size_t>(INTEGER(num_row)[0]);
for (size_t i = 0; i < nindptr; ++i) {
col_ptr_[i] = static_cast<size_t>(p_indptr[i]);
}
int32_t threads = xgboost::common::OmpGetNumThreads(asInteger(n_threads));
xgboost::common::ParallelFor(ndata, threads, [&](xgboost::omp_ulong i) {
indices_[i] = static_cast<unsigned>(p_indices[i]);
data_[i] = static_cast<float>(p_data[i]);
});
DMatrixHandle handle;
Json jconfig{Object{}};
// Construct configuration
jconfig["nthread"] = Integer{threads};
jconfig["missing"] = xgboost::Number{asReal(missing)};
std::string config;
Json::Dump(jconfig, &config);
CHECK_CALL(XGDMatrixCreateFromCSC(sindptr.c_str(), sindices.c_str(), sdata.c_str(), nrow,
config.c_str(), &handle));
CHECK_CALL(XGDMatrixCreateFromCSCEx(BeginPtr(col_ptr_), BeginPtr(indices_),
BeginPtr(data_), nindptr, ndata,
nrow, &handle));
ret = PROTECT(R_MakeExternalPtr(handle, R_NilValue, R_NilValue));
R_RegisterCFinalizerEx(ret, _DMatrixFinalizer, TRUE);
R_API_END();
UNPROTECT(1);
return ret;
}
XGB_DLL SEXP XGDMatrixCreateFromCSR_R(SEXP indptr, SEXP indices, SEXP data, SEXP num_col,
SEXP missing, SEXP n_threads) {
XGB_DLL SEXP XGDMatrixCreateFromCSR_R(SEXP indptr, SEXP indices, SEXP data,
SEXP num_col, SEXP n_threads) {
SEXP ret;
R_API_BEGIN();
std::int32_t threads = asInteger(n_threads);
using xgboost::Integer;
using xgboost::Json;
using xgboost::Object;
std::string sindptr, sindices, sdata;
CreateFromSparse(indptr, indices, data, &sindptr, &sindices, &sdata);
auto ncol = static_cast<std::size_t>(INTEGER(num_col)[0]);
const int *p_indptr = INTEGER(indptr);
const int *p_indices = INTEGER(indices);
const double *p_data = REAL(data);
size_t nindptr = static_cast<size_t>(length(indptr));
size_t ndata = static_cast<size_t>(length(data));
size_t ncol = static_cast<size_t>(INTEGER(num_col)[0]);
std::vector<size_t> row_ptr_(nindptr);
std::vector<unsigned> indices_(ndata);
std::vector<float> data_(ndata);
for (size_t i = 0; i < nindptr; ++i) {
row_ptr_[i] = static_cast<size_t>(p_indptr[i]);
}
int32_t threads = xgboost::common::OmpGetNumThreads(asInteger(n_threads));
xgboost::common::ParallelFor(ndata, threads, [&](xgboost::omp_ulong i) {
indices_[i] = static_cast<unsigned>(p_indices[i]);
data_[i] = static_cast<float>(p_data[i]);
});
DMatrixHandle handle;
Json jconfig{Object{}};
// Construct configuration
jconfig["nthread"] = Integer{threads};
jconfig["missing"] = xgboost::Number{asReal(missing)};
std::string config;
Json::Dump(jconfig, &config);
CHECK_CALL(XGDMatrixCreateFromCSR(sindptr.c_str(), sindices.c_str(), sdata.c_str(), ncol,
config.c_str(), &handle));
CHECK_CALL(XGDMatrixCreateFromCSREx(BeginPtr(row_ptr_), BeginPtr(indices_),
BeginPtr(data_), nindptr, ndata,
ncol, &handle));
ret = PROTECT(R_MakeExternalPtr(handle, R_NilValue, R_NilValue));
R_RegisterCFinalizerEx(ret, _DMatrixFinalizer, TRUE);
R_API_END();
UNPROTECT(1);
@@ -438,6 +422,27 @@ XGB_DLL SEXP XGBoosterEvalOneIter_R(SEXP handle, SEXP iter, SEXP dmats, SEXP evn
return mkString(ret);
}
XGB_DLL SEXP XGBoosterPredict_R(SEXP handle, SEXP dmat, SEXP option_mask,
SEXP ntree_limit, SEXP training) {
SEXP ret;
R_API_BEGIN();
bst_ulong olen;
const float *res;
CHECK_CALL(XGBoosterPredict(R_ExternalPtrAddr(handle),
R_ExternalPtrAddr(dmat),
asInteger(option_mask),
asInteger(ntree_limit),
asInteger(training),
&olen, &res));
ret = PROTECT(allocVector(REALSXP, olen));
for (size_t i = 0; i < olen; ++i) {
REAL(ret)[i] = res[i];
}
R_API_END();
UNPROTECT(1);
return ret;
}
XGB_DLL SEXP XGBoosterPredictFromDMatrix_R(SEXP handle, SEXP dmat, SEXP json_config) {
SEXP r_out_shape;
SEXP r_out_result;

View File

@@ -59,12 +59,11 @@ XGB_DLL SEXP XGDMatrixCreateFromMat_R(SEXP mat,
* \param indices row indices
* \param data content of the data
* \param num_row numer of rows (when it's set to 0, then guess from data)
* \param missing which value to represent missing value
* \param n_threads Number of threads used to construct DMatrix from csc matrix.
* \return created dmatrix
*/
XGB_DLL SEXP XGDMatrixCreateFromCSC_R(SEXP indptr, SEXP indices, SEXP data, SEXP num_row,
SEXP missing, SEXP n_threads);
SEXP n_threads);
/*!
* \brief create a matrix content from CSR format
@@ -72,12 +71,11 @@ XGB_DLL SEXP XGDMatrixCreateFromCSC_R(SEXP indptr, SEXP indices, SEXP data, SEXP
* \param indices column indices
* \param data content of the data
* \param num_col numer of columns (when it's set to 0, then guess from data)
* \param missing which value to represent missing value
* \param n_threads Number of threads used to construct DMatrix from csr matrix.
* \return created dmatrix
*/
XGB_DLL SEXP XGDMatrixCreateFromCSR_R(SEXP indptr, SEXP indices, SEXP data, SEXP num_col,
SEXP missing, SEXP n_threads);
SEXP n_threads);
/*!
* \brief create a new dmatrix from sliced content of existing matrix
@@ -178,6 +176,17 @@ XGB_DLL SEXP XGBoosterBoostOneIter_R(SEXP handle, SEXP dtrain, SEXP grad, SEXP h
*/
XGB_DLL SEXP XGBoosterEvalOneIter_R(SEXP handle, SEXP iter, SEXP dmats, SEXP evnames);
/*!
* \brief (Deprecated) make prediction based on dmat
* \param handle handle
* \param dmat data matrix
* \param option_mask output_margin:1 predict_leaf:2
* \param ntree_limit limit number of trees used in prediction
* \param training Whether the prediction value is used for training.
*/
XGB_DLL SEXP XGBoosterPredict_R(SEXP handle, SEXP dmat, SEXP option_mask,
SEXP ntree_limit, SEXP training);
/*!
* \brief Run prediction on DMatrix, replacing `XGBoosterPredict_R`
* \param handle handle

View File

@@ -0,0 +1,26 @@
// Copyright (c) 2014 by Contributors
#include <stdio.h>
#include <stdarg.h>
#include <Rinternals.h>
// implements error handling
void XGBoostAssert_R(int exp, const char *fmt, ...) {
char buf[1024];
if (exp == 0) {
va_list args;
va_start(args, fmt);
vsprintf(buf, fmt, args);
va_end(args);
error("AssertError:%s\n", buf);
}
}
void XGBoostCheck_R(int exp, const char *fmt, ...) {
char buf[1024];
if (exp == 0) {
va_list args;
va_start(args, fmt);
vsprintf(buf, fmt, args);
va_end(args);
error("%s\n", buf);
}
}

View File

@@ -1,51 +0,0 @@
## Install dependencies of R package for testing. The list might not be
## up-to-date, check DESCRIPTION for the latest list and update this one if
## inconsistent is found.
pkgs <- c(
## CI
"caret",
"pkgbuild",
"roxygen2",
"XML",
"cplm",
"e1071",
## suggests
"knitr",
"rmarkdown",
"ggplot2",
"DiagrammeR",
"Ckmeans.1d.dp",
"vcd",
"lintr",
"testthat",
"igraph",
"float",
"titanic",
## imports
"Matrix",
"methods",
"data.table",
"jsonlite"
)
ncpus <- parallel::detectCores()
print(paste0("Using ", ncpus, " cores to install dependencies."))
if (.Platform$OS.type == "unix") {
print("Installing source packages on unix.")
install.packages(
pkgs,
repo = "https://cloud.r-project.org",
dependencies = c("Depends", "Imports", "LinkingTo"),
Ncpus = parallel::detectCores()
)
} else {
print("Installing binary packages on Windows.")
install.packages(
pkgs,
repo = "https://cloud.r-project.org",
dependencies = c("Depends", "Imports", "LinkingTo"),
Ncpus = parallel::detectCores(),
type = "binary"
)
}

View File

@@ -0,0 +1,71 @@
library(lintr)
library(crayon)
my_linters <- list(
absolute_path_linter = lintr::absolute_path_linter,
assignment_linter = lintr::assignment_linter,
closed_curly_linter = lintr::closed_curly_linter,
commas_linter = lintr::commas_linter,
equals_na = lintr::equals_na_linter,
infix_spaces_linter = lintr::infix_spaces_linter,
line_length_linter = lintr::line_length_linter,
no_tab_linter = lintr::no_tab_linter,
object_usage_linter = lintr::object_usage_linter,
object_length_linter = lintr::object_length_linter,
open_curly_linter = lintr::open_curly_linter,
semicolon = lintr::semicolon_terminator_linter(semicolon = c("compound", "trailing")),
seq = lintr::seq_linter,
spaces_inside_linter = lintr::spaces_inside_linter,
spaces_left_parentheses_linter = lintr::spaces_left_parentheses_linter,
trailing_blank_lines_linter = lintr::trailing_blank_lines_linter,
trailing_whitespace_linter = lintr::trailing_whitespace_linter,
true_false = lintr::T_and_F_symbol_linter,
unneeded_concatenation = lintr::unneeded_concatenation_linter
)
results <- lapply(
list.files(path = '.', pattern = '\\.[Rr]$', recursive = TRUE),
function (r_file) {
cat(sprintf("Processing %s ...\n", r_file))
list(r_file = r_file,
output = lintr::lint(filename = r_file, linters = my_linters))
})
num_issue <- Reduce(sum, lapply(results, function (e) length(e$output)))
lint2str <- function(lint_entry) {
color <- function(type) {
switch(type,
"warning" = crayon::magenta,
"error" = crayon::red,
"style" = crayon::blue,
crayon::bold
)
}
paste0(
lapply(lint_entry$output,
function (lint_line) {
paste0(
crayon::bold(lint_entry$r_file, ":",
as.character(lint_line$line_number), ":",
as.character(lint_line$column_number), ": ", sep = ""),
color(lint_line$type)(lint_line$type, ": ", sep = ""),
crayon::bold(lint_line$message), "\n",
lint_line$line, "\n",
lintr:::highlight_string(lint_line$message, lint_line$column_number, lint_line$ranges),
"\n",
collapse = "")
}),
collapse = "")
}
if (num_issue > 0) {
cat(sprintf('R linters found %d issues:\n', num_issue))
for (entry in results) {
if (length(entry$output)) {
cat(paste0('**** ', crayon::bold(entry$r_file), '\n'))
cat(paste0(lint2str(entry), collapse = ''))
}
}
quit(save = 'no', status = 1) # Signal error to parent shell
}

View File

@@ -1,3 +1,6 @@
require(xgboost)
library(Matrix)
context("basic functions")
data(agaricus.train, package = 'xgboost')
@@ -85,18 +88,9 @@ test_that("dart prediction works", {
rnorm(100)
set.seed(1994)
booster_by_xgboost <- xgboost(
data = d,
label = y,
max_depth = 2,
booster = "dart",
rate_drop = 0.5,
one_drop = TRUE,
eta = 1,
nthread = 2,
nrounds = nrounds,
objective = "reg:squarederror"
)
booster_by_xgboost <- xgboost(data = d, label = y, max_depth = 2, booster = "dart",
rate_drop = 0.5, one_drop = TRUE,
eta = 1, nthread = 2, nrounds = nrounds, objective = "reg:squarederror")
pred_by_xgboost_0 <- predict(booster_by_xgboost, newdata = d, ntreelimit = 0)
pred_by_xgboost_1 <- predict(booster_by_xgboost, newdata = d, ntreelimit = nrounds)
expect_true(all(matrix(pred_by_xgboost_0, byrow = TRUE) == matrix(pred_by_xgboost_1, byrow = TRUE)))
@@ -106,19 +100,19 @@ test_that("dart prediction works", {
set.seed(1994)
dtrain <- xgb.DMatrix(data = d, info = list(label = y))
booster_by_train <- xgb.train(
params = list(
booster = "dart",
max_depth = 2,
eta = 1,
rate_drop = 0.5,
one_drop = TRUE,
nthread = 1,
objective = "reg:squarederror"
),
data = dtrain,
nrounds = nrounds
)
booster_by_train <- xgb.train(params = list(
booster = "dart",
max_depth = 2,
eta = 1,
rate_drop = 0.5,
one_drop = TRUE,
nthread = 1,
tree_method = "exact",
objective = "reg:squarederror"
),
data = dtrain,
nrounds = nrounds
)
pred_by_train_0 <- predict(booster_by_train, newdata = dtrain, ntreelimit = 0)
pred_by_train_1 <- predict(booster_by_train, newdata = dtrain, ntreelimit = nrounds)
pred_by_train_2 <- predict(booster_by_train, newdata = dtrain, training = TRUE)
@@ -241,20 +235,12 @@ test_that("train and predict RF with softprob", {
test_that("use of multiple eval metrics works", {
expect_output(
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic",
eval_metric = 'error', eval_metric = 'auc', eval_metric = "logloss")
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic",
eval_metric = 'error', eval_metric = 'auc', eval_metric = "logloss")
, "train-error.*train-auc.*train-logloss")
expect_false(is.null(bst$evaluation_log))
expect_equal(dim(bst$evaluation_log), c(2, 4))
expect_equal(colnames(bst$evaluation_log), c("iter", "train_error", "train_auc", "train_logloss"))
expect_output(
bst2 <- xgboost(data = train$data, label = train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic",
eval_metric = list("error", "auc", "logloss"))
, "train-error.*train-auc.*train-logloss")
expect_false(is.null(bst2$evaluation_log))
expect_equal(dim(bst2$evaluation_log), c(2, 4))
expect_equal(colnames(bst2$evaluation_log), c("iter", "train_error", "train_auc", "train_logloss"))
})
@@ -408,7 +394,7 @@ test_that("colsample_bytree works", {
xgb.importance(model = bst)
# If colsample_bytree works properly, a variety of features should be used
# in the 100 trees
expect_gte(nrow(xgb.importance(model = bst)), 28)
expect_gte(nrow(xgb.importance(model = bst)), 30)
})
test_that("Configuration works", {
@@ -418,7 +404,7 @@ test_that("Configuration works", {
config <- xgb.config(bst)
xgb.config(bst) <- config
reloaded_config <- xgb.config(bst)
expect_equal(config, reloaded_config)
expect_equal(config, reloaded_config);
})
test_that("strict_shape works", {

View File

@@ -1,4 +1,9 @@
# More specific testing of callbacks
require(xgboost)
require(data.table)
require(titanic)
context("callbacks")
data(agaricus.train, package = 'xgboost')
@@ -79,7 +84,7 @@ test_that("cb.evaluation.log works as expected", {
list(c(iter = 1, bst_evaluation), c(iter = 2, bst_evaluation)))
expect_silent(f(finalize = TRUE))
expect_equal(evaluation_log,
data.table::data.table(iter = 1:2, train_auc = c(0.9, 0.9), test_auc = c(0.8, 0.8)))
data.table(iter = 1:2, train_auc = c(0.9, 0.9), test_auc = c(0.8, 0.8)))
bst_evaluation_err <- c('train-auc' = 0.1, 'test-auc' = 0.2)
evaluation_log <- list()
@@ -96,7 +101,7 @@ test_that("cb.evaluation.log works as expected", {
c(iter = 2, c(bst_evaluation, bst_evaluation_err))))
expect_silent(f(finalize = TRUE))
expect_equal(evaluation_log,
data.table::data.table(iter = 1:2,
data.table(iter = 1:2,
train_auc_mean = c(0.9, 0.9), train_auc_std = c(0.1, 0.1),
test_auc_mean = c(0.8, 0.8), test_auc_std = c(0.2, 0.2)))
})
@@ -251,9 +256,6 @@ test_that("early stopping using a specific metric works", {
})
test_that("early stopping works with titanic", {
if (!requireNamespace("titanic")) {
testthat::skip("Optional testing dependency 'titanic' not found.")
}
# This test was inspired by https://github.com/dmlc/xgboost/issues/5935
# It catches possible issues on noLD R
titanic <- titanic::titanic_train
@@ -320,7 +322,7 @@ test_that("prediction in early-stopping xgb.cv works", {
expect_output(
cv <- xgb.cv(param, dtrain, nfold = 5, eta = 0.1, nrounds = 20,
early_stopping_rounds = 5, maximize = FALSE, stratified = FALSE,
prediction = TRUE, base_score = 0.5)
prediction = TRUE)
, "Stopping. Best iteration")
expect_false(is.null(cv$best_iteration))

View File

@@ -1,5 +1,7 @@
context('Test models with custom objective')
require(xgboost)
set.seed(1994)
data(agaricus.train, package = 'xgboost')

View File

@@ -1,7 +1,9 @@
library(Matrix)
require(xgboost)
require(Matrix)
context("testing xgb.DMatrix functionality")
data(agaricus.test, package = "xgboost")
data(agaricus.test, package = 'xgboost')
test_data <- agaricus.test$data[1:100, ]
test_label <- agaricus.test$label[1:100]
@@ -11,49 +13,14 @@ test_that("xgb.DMatrix: basic construction", {
# from dense matrix
dtest2 <- xgb.DMatrix(as.matrix(test_data), label = test_label)
expect_equal(getinfo(dtest1, "label"), getinfo(dtest2, "label"))
expect_equal(getinfo(dtest1, 'label'), getinfo(dtest2, 'label'))
expect_equal(dim(dtest1), dim(dtest2))
# from dense integer matrix
#from dense integer matrix
int_data <- as.matrix(test_data)
storage.mode(int_data) <- "integer"
dtest3 <- xgb.DMatrix(int_data, label = test_label)
expect_equal(dim(dtest1), dim(dtest3))
n_samples <- 100
X <- cbind(
x1 = sample(x = 4, size = n_samples, replace = TRUE),
x2 = sample(x = 4, size = n_samples, replace = TRUE),
x3 = sample(x = 4, size = n_samples, replace = TRUE)
)
X <- matrix(X, nrow = n_samples)
y <- rbinom(n = n_samples, size = 1, prob = 1 / 2)
fd <- xgb.DMatrix(X, label = y, missing = 1)
dgc <- as(X, "dgCMatrix")
fdgc <- xgb.DMatrix(dgc, label = y, missing = 1.0)
dgr <- as(X, "dgRMatrix")
fdgr <- xgb.DMatrix(dgr, label = y, missing = 1)
params <- list(tree_method = "hist")
bst_fd <- xgb.train(
params, nrounds = 8, fd, watchlist = list(train = fd)
)
bst_dgr <- xgb.train(
params, nrounds = 8, fdgr, watchlist = list(train = fdgr)
)
bst_dgc <- xgb.train(
params, nrounds = 8, fdgc, watchlist = list(train = fdgc)
)
raw_fd <- xgb.save.raw(bst_fd, raw_format = "ubj")
raw_dgr <- xgb.save.raw(bst_dgr, raw_format = "ubj")
raw_dgc <- xgb.save.raw(bst_dgc, raw_format = "ubj")
expect_equal(raw_fd, raw_dgr)
expect_equal(raw_fd, raw_dgc)
})
test_that("xgb.DMatrix: saving, loading", {
@@ -70,9 +37,9 @@ test_that("xgb.DMatrix: saving, loading", {
# from a libsvm text file
tmp <- c("0 1:1 2:1", "1 3:1", "0 1:1")
tmp_file <- tempfile(fileext = ".libsvm")
tmp_file <- 'tmp.libsvm'
writeLines(tmp, tmp_file)
dtest4 <- xgb.DMatrix(paste(tmp_file, "?format=libsvm", sep = ""), silent = TRUE)
dtest4 <- xgb.DMatrix(tmp_file, silent = TRUE)
expect_equal(dim(dtest4), c(3, 4))
expect_equal(getinfo(dtest4, 'label'), c(0, 1, 0))
@@ -86,7 +53,7 @@ test_that("xgb.DMatrix: saving, loading", {
dtrain <- xgb.DMatrix(tmp_file)
expect_equal(colnames(dtrain), cnames)
ft <- rep(c("c", "q"), each = length(cnames) / 2)
ft <- rep(c("c", "q"), each=length(cnames)/2)
setinfo(dtrain, "feature_type", ft)
expect_equal(ft, getinfo(dtrain, "feature_type"))
})
@@ -156,62 +123,9 @@ test_that("xgb.DMatrix: colnames", {
test_that("xgb.DMatrix: nrow is correct for a very sparse matrix", {
set.seed(123)
nr <- 1000
x <- Matrix::rsparsematrix(nr, 100, density = 0.0005)
x <- rsparsematrix(nr, 100, density = 0.0005)
# we want it very sparse, so that last rows are empty
expect_lt(max(x@i), nr)
dtest <- xgb.DMatrix(x)
expect_equal(dim(dtest), dim(x))
})
test_that("xgb.DMatrix: print", {
data(agaricus.train, package = 'xgboost')
# core DMatrix with just data and labels
dtrain <- xgb.DMatrix(
data = agaricus.train$data
, label = agaricus.train$label
)
txt <- capture.output({
print(dtrain)
})
expect_equal(txt, "xgb.DMatrix dim: 6513 x 126 info: label colnames: yes")
# verbose=TRUE prints feature names
txt <- capture.output({
print(dtrain, verbose = TRUE)
})
expect_equal(txt[[1L]], "xgb.DMatrix dim: 6513 x 126 info: label colnames:")
expect_equal(txt[[2L]], sprintf("'%s'", paste(colnames(dtrain), collapse = "','")))
# DMatrix with weights and base_margin
dtrain <- xgb.DMatrix(
data = agaricus.train$data
, label = agaricus.train$label
, weight = seq_along(agaricus.train$label)
, base_margin = agaricus.train$label
)
txt <- capture.output({
print(dtrain)
})
expect_equal(txt, "xgb.DMatrix dim: 6513 x 126 info: label weight base_margin colnames: yes")
# DMatrix with just features
dtrain <- xgb.DMatrix(
data = agaricus.train$data
)
txt <- capture.output({
print(dtrain)
})
expect_equal(txt, "xgb.DMatrix dim: 6513 x 126 info: NA colnames: yes")
# DMatrix with no column names
data_no_colnames <- agaricus.train$data
colnames(data_no_colnames) <- NULL
dtrain <- xgb.DMatrix(
data = data_no_colnames
)
txt <- capture.output({
print(dtrain)
})
expect_equal(txt, "xgb.DMatrix dim: 6513 x 126 info: NA colnames: no")
})

View File

@@ -1,3 +1,5 @@
library(xgboost)
context("feature weights")
test_that("training with feature weights works", {

View File

@@ -1,3 +1,5 @@
require(xgboost)
context("Garbage Collection Safety Check")
test_that("train and prediction when gctorture is on", {

View File

@@ -1,5 +1,7 @@
context('Test generalized linear models')
require(xgboost)
test_that("gblinear works", {
data(agaricus.train, package = 'xgboost')
data(agaricus.test, package = 'xgboost')

View File

@@ -1,11 +1,10 @@
library(testthat)
context('Test helper functions')
VCD_AVAILABLE <- requireNamespace("vcd", quietly = TRUE)
.skip_if_vcd_not_available <- function() {
if (!VCD_AVAILABLE) {
testthat::skip("Optional testing dependency 'vcd' not found.")
}
}
require(xgboost)
require(data.table)
require(Matrix)
require(vcd, quietly = TRUE)
float_tolerance <- 5e-6
@@ -13,30 +12,25 @@ float_tolerance <- 5e-6
flag_32bit <- .Machine$sizeof.pointer != 8
set.seed(1982)
data(Arthritis)
df <- data.table(Arthritis, keep.rownames = FALSE)
df[, AgeDiscret := as.factor(round(Age / 10, 0))]
df[, AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))]
df[, ID := NULL]
sparse_matrix <- sparse.model.matrix(Improved~.-1, data = df) # nolint
label <- df[, ifelse(Improved == "Marked", 1, 0)]
# binary
nrounds <- 12
if (isTRUE(VCD_AVAILABLE)) {
data(Arthritis, package = "vcd")
df <- data.table::data.table(Arthritis, keep.rownames = FALSE)
df[, AgeDiscret := as.factor(round(Age / 10, 0))]
df[, AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))]
df[, ID := NULL]
sparse_matrix <- Matrix::sparse.model.matrix(Improved~.-1, data = df) # nolint
label <- df[, ifelse(Improved == "Marked", 1, 0)]
bst.Tree <- xgboost(data = sparse_matrix, label = label, max_depth = 9,
eta = 1, nthread = 2, nrounds = nrounds, verbose = 0,
objective = "binary:logistic", booster = "gbtree")
# binary
bst.Tree <- xgboost(data = sparse_matrix, label = label, max_depth = 9,
eta = 1, nthread = 2, nrounds = nrounds, verbose = 0,
objective = "binary:logistic", booster = "gbtree",
base_score = 0.5)
bst.GLM <- xgboost(data = sparse_matrix, label = label,
eta = 1, nthread = 1, nrounds = nrounds, verbose = 0,
objective = "binary:logistic", booster = "gblinear")
bst.GLM <- xgboost(data = sparse_matrix, label = label,
eta = 1, nthread = 1, nrounds = nrounds, verbose = 0,
objective = "binary:logistic", booster = "gblinear",
base_score = 0.5)
feature.names <- colnames(sparse_matrix)
}
feature.names <- colnames(sparse_matrix)
# multiclass
mlabel <- as.numeric(iris$Species) - 1
@@ -51,7 +45,6 @@ mbst.GLM <- xgboost(data = as.matrix(iris[, -5]), label = mlabel, verbose = 0,
test_that("xgb.dump works", {
.skip_if_vcd_not_available()
if (!flag_32bit)
expect_length(xgb.dump(bst.Tree), 200)
dump_file <- file.path(tempdir(), 'xgb.model.dump')
@@ -63,11 +56,10 @@ test_that("xgb.dump works", {
dmp <- xgb.dump(bst.Tree, dump_format = "json")
expect_length(dmp, 1)
if (!flag_32bit)
expect_length(grep('nodeid', strsplit(dmp, '\n', fixed = TRUE)[[1]], fixed = TRUE), 188)
expect_length(grep('nodeid', strsplit(dmp, '\n')[[1]]), 188)
})
test_that("xgb.dump works for gblinear", {
.skip_if_vcd_not_available()
expect_length(xgb.dump(bst.GLM), 14)
# also make sure that it works properly for a sparse model where some coefficients
# are 0 from setting large L1 regularization:
@@ -80,11 +72,10 @@ test_that("xgb.dump works for gblinear", {
# JSON format
dmp <- xgb.dump(bst.GLM.sp, dump_format = "json")
expect_length(dmp, 1)
expect_length(grep('\\d', strsplit(dmp, '\n', fixed = TRUE)[[1]]), 11)
expect_length(grep('\\d', strsplit(dmp, '\n')[[1]]), 11)
})
test_that("predict leafs works", {
.skip_if_vcd_not_available()
# no error for gbtree
expect_error(pred_leaf <- predict(bst.Tree, sparse_matrix, predleaf = TRUE), regexp = NA)
expect_equal(dim(pred_leaf), c(nrow(sparse_matrix), nrounds))
@@ -93,7 +84,6 @@ test_that("predict leafs works", {
})
test_that("predict feature contributions works", {
.skip_if_vcd_not_available()
# gbtree binary classifier
expect_error(pred_contr <- predict(bst.Tree, sparse_matrix, predcontrib = TRUE), regexp = NA)
expect_equal(dim(pred_contr), c(nrow(sparse_matrix), ncol(sparse_matrix) + 1))
@@ -180,9 +170,8 @@ test_that("SHAPs sum to predictions, with or without DART", {
label = y,
nrounds = nrounds)
pr <- function(...) {
pr <- function(...)
predict(fit, newdata = d, ...)
}
pred <- pr()
shap <- pr(predcontrib = TRUE)
shapi <- pr(predinteraction = TRUE)
@@ -197,7 +186,6 @@ test_that("SHAPs sum to predictions, with or without DART", {
})
test_that("xgb-attribute functionality", {
.skip_if_vcd_not_available()
val <- "my attribute value"
list.val <- list(my_attr = val, a = 123, b = 'ok')
list.ch <- list.val[order(names(list.val))]
@@ -231,11 +219,10 @@ test_that("xgb-attribute functionality", {
expect_null(xgb.attributes(bst))
})
if (grepl('Windows', Sys.info()[['sysname']], fixed = TRUE) ||
grepl('Linux', Sys.info()[['sysname']], fixed = TRUE) ||
grepl('Darwin', Sys.info()[['sysname']], fixed = TRUE)) {
if (grepl('Windows', Sys.info()[['sysname']]) ||
grepl('Linux', Sys.info()[['sysname']]) ||
grepl('Darwin', Sys.info()[['sysname']])) {
test_that("xgb-attribute numeric precision", {
.skip_if_vcd_not_available()
# check that lossless conversion works with 17 digits
# numeric -> character -> numeric
X <- 10^runif(100, -20, 20)
@@ -254,7 +241,6 @@ if (grepl('Windows', Sys.info()[['sysname']], fixed = TRUE) ||
}
test_that("xgb.Booster serializing as R object works", {
.skip_if_vcd_not_available()
saveRDS(bst.Tree, 'xgb.model.rds')
bst <- readRDS('xgb.model.rds')
dtrain <- xgb.DMatrix(sparse_matrix, label = label)
@@ -273,7 +259,6 @@ test_that("xgb.Booster serializing as R object works", {
})
test_that("xgb.model.dt.tree works with and without feature names", {
.skip_if_vcd_not_available()
names.dt.trees <- c("Tree", "Node", "ID", "Feature", "Split", "Yes", "No", "Missing", "Quality", "Cover")
dt.tree <- xgb.model.dt.tree(feature_names = feature.names, model = bst.Tree)
expect_equal(names.dt.trees, names(dt.tree))
@@ -293,18 +278,16 @@ test_that("xgb.model.dt.tree works with and without feature names", {
# using integer node ID instead of character
dt.tree.int <- xgb.model.dt.tree(model = bst.Tree, use_int_id = TRUE)
expect_equal(as.integer(data.table::tstrsplit(dt.tree$Yes, '-', fixed = TRUE)[[2]]), dt.tree.int$Yes)
expect_equal(as.integer(data.table::tstrsplit(dt.tree$No, '-', fixed = TRUE)[[2]]), dt.tree.int$No)
expect_equal(as.integer(data.table::tstrsplit(dt.tree$Missing, '-', fixed = TRUE)[[2]]), dt.tree.int$Missing)
expect_equal(as.integer(tstrsplit(dt.tree$Yes, '-')[[2]]), dt.tree.int$Yes)
expect_equal(as.integer(tstrsplit(dt.tree$No, '-')[[2]]), dt.tree.int$No)
expect_equal(as.integer(tstrsplit(dt.tree$Missing, '-')[[2]]), dt.tree.int$Missing)
})
test_that("xgb.model.dt.tree throws error for gblinear", {
.skip_if_vcd_not_available()
expect_error(xgb.model.dt.tree(model = bst.GLM))
})
test_that("xgb.importance works with and without feature names", {
.skip_if_vcd_not_available()
importance.Tree <- xgb.importance(feature_names = feature.names, model = bst.Tree)
if (!flag_32bit)
expect_equal(dim(importance.Tree), c(7, 4))
@@ -362,8 +345,7 @@ test_that("xgb.importance works with and without feature names", {
m <- xgboost::xgboost(
data = as.matrix(data.frame(x = c(0, 1))),
label = c(1, 2),
nrounds = 1,
base_score = 0.5
nrounds = 1
)
df <- xgb.model.dt.tree(model = m)
expect_equal(df$Feature, "Leaf")
@@ -371,7 +353,6 @@ test_that("xgb.importance works with and without feature names", {
})
test_that("xgb.importance works with GLM model", {
.skip_if_vcd_not_available()
importance.GLM <- xgb.importance(feature_names = feature.names, model = bst.GLM)
expect_equal(dim(importance.GLM), c(10, 2))
expect_equal(colnames(importance.GLM), c("Feature", "Weight"))
@@ -387,7 +368,6 @@ test_that("xgb.importance works with GLM model", {
})
test_that("xgb.model.dt.tree and xgb.importance work with a single split model", {
.skip_if_vcd_not_available()
bst1 <- xgboost(data = sparse_matrix, label = label, max_depth = 1,
eta = 1, nthread = 2, nrounds = 1, verbose = 0,
objective = "binary:logistic")
@@ -399,19 +379,16 @@ test_that("xgb.model.dt.tree and xgb.importance work with a single split model",
})
test_that("xgb.plot.tree works with and without feature names", {
.skip_if_vcd_not_available()
expect_silent(xgb.plot.tree(feature_names = feature.names, model = bst.Tree))
expect_silent(xgb.plot.tree(model = bst.Tree))
})
test_that("xgb.plot.multi.trees works with and without feature names", {
.skip_if_vcd_not_available()
xgb.plot.multi.trees(model = bst.Tree, feature_names = feature.names, features_keep = 3)
xgb.plot.multi.trees(model = bst.Tree, features_keep = 3)
})
test_that("xgb.plot.deepness works", {
.skip_if_vcd_not_available()
d2p <- xgb.plot.deepness(model = bst.Tree)
expect_equal(colnames(d2p), c("ID", "Tree", "Depth", "Cover", "Weight"))
xgb.plot.deepness(model = bst.Tree, which = "med.depth")
@@ -419,7 +396,6 @@ test_that("xgb.plot.deepness works", {
})
test_that("xgb.shap.data works when top_n is provided", {
.skip_if_vcd_not_available()
data_list <- xgb.shap.data(data = sparse_matrix, model = bst.Tree, top_n = 2)
expect_equal(names(data_list), c("data", "shap_contrib"))
expect_equal(NCOL(data_list$data), 2)
@@ -437,14 +413,12 @@ test_that("xgb.shap.data works when top_n is provided", {
})
test_that("xgb.shap.data works with subsampling", {
.skip_if_vcd_not_available()
data_list <- xgb.shap.data(data = sparse_matrix, model = bst.Tree, top_n = 2, subsample = 0.8)
expect_equal(NROW(data_list$data), as.integer(0.8 * nrow(sparse_matrix)))
expect_equal(NROW(data_list$data), NROW(data_list$shap_contrib))
})
test_that("prepare.ggplot.shap.data works", {
.skip_if_vcd_not_available()
data_list <- xgb.shap.data(data = sparse_matrix, model = bst.Tree, top_n = 2)
plot_data <- prepare.ggplot.shap.data(data_list, normalize = TRUE)
expect_s3_class(plot_data, "data.frame")
@@ -455,19 +429,17 @@ test_that("prepare.ggplot.shap.data works", {
})
test_that("xgb.plot.shap works", {
.skip_if_vcd_not_available()
sh <- xgb.plot.shap(data = sparse_matrix, model = bst.Tree, top_n = 2, col = 4)
expect_equal(names(sh), c("data", "shap_contrib"))
})
test_that("xgb.plot.shap.summary works", {
.skip_if_vcd_not_available()
expect_silent(xgb.plot.shap.summary(data = sparse_matrix, model = bst.Tree, top_n = 2))
expect_silent(xgb.ggplot.shap.summary(data = sparse_matrix, model = bst.Tree, top_n = 2))
})
test_that("check.deprecation works", {
ttt <- function(a = NNULL, DUMMY = NULL, ...) {
ttt <- function(a = NNULL, DUMMY=NULL, ...) {
check.deprecation(...)
as.list((environment()))
}

View File

@@ -17,7 +17,7 @@ test_that("interaction constraints for regression", {
# Set all observations to have the same x3 values then increment
# by the same amount
preds <- lapply(c(1, 2, 3), function(x) {
preds <- lapply(c(1, 2, 3), function(x){
tmat <- matrix(c(x1, x2, rep(x, 1000)), ncol = 3)
return(predict(bst, tmat))
})

View File

@@ -1,5 +1,7 @@
context('Test prediction of feature interactions')
require(xgboost)
set.seed(123)
test_that("predict feature interactions works", {

View File

@@ -1,4 +1,7 @@
context("Test model IO.")
## some other tests are in test_basic.R
require(xgboost)
require(testthat)
data(agaricus.train, package = "xgboost")
data(agaricus.test, package = "xgboost")

View File

@@ -1,3 +1,6 @@
require(xgboost)
require(jsonlite)
context("Models from previous versions of XGBoost can be loaded")
metadata <- list(
@@ -59,12 +62,11 @@ test_that("Models from previous versions of XGBoost can be loaded", {
bucket <- 'xgboost-ci-jenkins-artifacts'
region <- 'us-west-2'
file_name <- 'xgboost_r_model_compatibility_test.zip'
zipfile <- tempfile(fileext = ".zip")
extract_dir <- tempdir()
zipfile <- file.path(getwd(), file_name)
model_dir <- file.path(getwd(), 'models')
download.file(paste('https://', bucket, '.s3-', region, '.amazonaws.com/', file_name, sep = ''),
destfile = zipfile, mode = 'wb', quiet = TRUE)
unzip(zipfile, exdir = extract_dir, overwrite = TRUE)
model_dir <- file.path(extract_dir, 'models')
unzip(zipfile, overwrite = TRUE)
pred_data <- xgb.DMatrix(matrix(c(0, 0, 0, 0), nrow = 1, ncol = 4))
@@ -76,20 +78,32 @@ test_that("Models from previous versions of XGBoost can be loaded", {
name <- m[3]
is_rds <- endsWith(model_file, '.rds')
is_json <- endsWith(model_file, '.json')
# Expect an R warning when a model is loaded from RDS and it was generated by version < 1.1.x
if (is_rds && compareVersion(model_xgb_ver, '1.1.1.1') < 0) {
booster <- readRDS(model_file)
expect_warning(predict(booster, newdata = pred_data))
booster <- readRDS(model_file)
expect_warning(run_booster_check(booster, name))
} else {
if (is_rds) {
cpp_warning <- capture.output({
# Expect an R warning when a model is loaded from RDS and it was generated by version < 1.1.x
if (is_rds && compareVersion(model_xgb_ver, '1.1.1.1') < 0) {
booster <- readRDS(model_file)
expect_warning(predict(booster, newdata = pred_data))
booster <- readRDS(model_file)
expect_warning(run_booster_check(booster, name))
} else {
booster <- xgb.load(model_file)
if (is_rds) {
booster <- readRDS(model_file)
} else {
booster <- xgb.load(model_file)
}
predict(booster, newdata = pred_data)
run_booster_check(booster, name)
}
predict(booster, newdata = pred_data)
run_booster_check(booster, name)
})
cpp_warning <- paste0(cpp_warning, collapse = ' ')
if (is_rds && compareVersion(model_xgb_ver, '1.1.1.1') >= 0) {
# Expect a C++ warning when a model is loaded from RDS and it was generated by old XGBoost`
m <- grepl(paste0('.*If you are loading a serialized model ',
'\\(like pickle in Python, RDS in R\\).*',
'for more details about differences between ',
'saving model and serializing.*'), cpp_warning, perl = TRUE)
expect_true(length(m) > 0 && all(m))
}
})
})

View File

@@ -1,3 +1,5 @@
require(xgboost)
context("monotone constraints")
set.seed(1024)

View File

@@ -1,5 +1,7 @@
context('Test model params and call are exposed to R')
require(xgboost)
data(agaricus.train, package = 'xgboost')
data(agaricus.test, package = 'xgboost')

View File

@@ -1,5 +1,6 @@
context('Test Poisson regression model')
require(xgboost)
set.seed(1994)
test_that("Poisson regression works", {

View File

@@ -1,12 +1,12 @@
require(xgboost)
require(Matrix)
context('Learning to rank')
test_that('Test ranking with unweighted data', {
X <- Matrix::sparseMatrix(
i = c(2, 3, 7, 9, 12, 15, 17, 18)
, j = c(1, 1, 2, 2, 3, 3, 4, 4)
, x = rep(1.0, 8)
, dims = c(20, 4)
)
X <- sparseMatrix(i = c(2, 3, 7, 9, 12, 15, 17, 18),
j = c(1, 1, 2, 2, 3, 3, 4, 4),
x = rep(1.0, 8), dims = c(20, 4))
y <- c(0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0)
group <- c(5, 5, 5, 5)
dtrain <- xgb.DMatrix(X, label = y, group = group)
@@ -20,12 +20,9 @@ test_that('Test ranking with unweighted data', {
})
test_that('Test ranking with weighted data', {
X <- Matrix::sparseMatrix(
i = c(2, 3, 7, 9, 12, 15, 17, 18)
, j = c(1, 1, 2, 2, 3, 3, 4, 4)
, x = rep(1.0, 8)
, dims = c(20, 4)
)
X <- sparseMatrix(i = c(2, 3, 7, 9, 12, 15, 17, 18),
j = c(1, 1, 2, 2, 3, 3, 4, 4),
x = rep(1.0, 8), dims = c(20, 4))
y <- c(0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0)
group <- c(5, 5, 5, 5)
weight <- c(1.0, 2.0, 3.0, 4.0)

View File

@@ -1,3 +1,5 @@
require(xgboost)
context("update trees in an existing model")
data(agaricus.train, package = 'xgboost')
@@ -13,10 +15,7 @@ test_that("updating the model works", {
watchlist <- list(train = dtrain, test = dtest)
# no-subsampling
p1 <- list(
objective = "binary:logistic", max_depth = 2, eta = 0.05, nthread = 2,
updater = "grow_colmaker,prune"
)
p1 <- list(objective = "binary:logistic", max_depth = 2, eta = 0.05, nthread = 2)
set.seed(11)
bst1 <- xgb.train(p1, dtrain, nrounds = 10, watchlist, verbose = 0)
tr1 <- xgb.model.dt.tree(model = bst1)

View File

@@ -28,9 +28,7 @@ Package loading:
require(xgboost)
require(Matrix)
require(data.table)
if (!require('vcd')) {
install.packages('vcd')
}
if (!require('vcd')) install.packages('vcd')
```
> **VCD** package is used for one of its embedded dataset only.
@@ -102,7 +100,7 @@ Note that we transform it to `factor` so the algorithm treat these age groups as
Therefore, 20 is not closer to 30 than 60. To make it short, the distance between ages is lost in this transformation.
```{r}
head(df[, AgeDiscret := as.factor(round(Age / 10, 0))])
head(df[,AgeDiscret := as.factor(round(Age/10,0))])
```
##### Random split into two groups
@@ -110,7 +108,7 @@ head(df[, AgeDiscret := as.factor(round(Age / 10, 0))])
Following is an even stronger simplification of the real age with an arbitrary split at 30 years old. We choose this value **based on nothing**. We will see later if simplifying the information based on arbitrary values is a good strategy (you may already have an idea of how well it will work...).
```{r}
head(df[, AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))])
head(df[,AgeCat:= as.factor(ifelse(Age > 30, "Old", "Young"))])
```
##### Risks in adding correlated features
@@ -126,13 +124,13 @@ Fortunately, decision tree algorithms (including boosted trees) are very robust
We remove ID as there is nothing to learn from this feature (it would just add some noise).
```{r, results='hide'}
df[, ID := NULL]
df[,ID:=NULL]
```
We will list the different values for the column `Treatment`:
```{r}
levels(df[, Treatment])
levels(df[,Treatment])
```
@@ -149,7 +147,7 @@ For example, the column `Treatment` will be replaced by two columns, `TreatmentP
Column `Improved` is excluded because it will be our `label` column, the one we want to predict.
```{r, warning=FALSE,message=FALSE}
sparse_matrix <- sparse.model.matrix(Improved ~ ., data = df)[, -1]
sparse_matrix <- sparse.model.matrix(Improved ~ ., data = df)[,-1]
head(sparse_matrix)
```
@@ -158,7 +156,7 @@ head(sparse_matrix)
Create the output `numeric` vector (not as a sparse `Matrix`):
```{r}
output_vector <- df[, Improved] == "Marked"
output_vector = df[,Improved] == "Marked"
```
1. set `Y` vector to `0`;
@@ -172,7 +170,7 @@ The code below is very usual. For more information, you can look at the document
```{r}
bst <- xgboost(data = sparse_matrix, label = output_vector, max_depth = 4,
eta = 1, nthread = 2, nrounds = 10, objective = "binary:logistic")
eta = 1, nthread = 2, nrounds = 10,objective = "binary:logistic")
```
@@ -221,7 +219,7 @@ For that purpose we will execute the same function as above but using two more p
importanceRaw <- xgb.importance(feature_names = colnames(sparse_matrix), model = bst, data = sparse_matrix, label = output_vector)
# Cleaning for better display
importanceClean <- importanceRaw[, `:=`(Cover = NULL, Frequency = NULL)]
importanceClean <- importanceRaw[,`:=`(Cover=NULL, Frequency=NULL)]
head(importanceClean)
```
@@ -323,31 +321,16 @@ If you want to try Random Forests algorithm, you can tweak XGBoost parameters!
For instance, to compute a model with 1000 trees, with a 0.5 factor on sampling rows and columns:
```{r, warning=FALSE, message=FALSE}
data(agaricus.train, package = 'xgboost')
data(agaricus.test, package = 'xgboost')
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
train <- agaricus.train
test <- agaricus.test
#Random Forest - 1000 trees
bst <- xgboost(
data = train$data
, label = train$label
, max_depth = 4
, num_parallel_tree = 1000
, subsample = 0.5
, colsample_bytree = 0.5
, nrounds = 1
, objective = "binary:logistic"
)
bst <- xgboost(data = train$data, label = train$label, max_depth = 4, num_parallel_tree = 1000, subsample = 0.5, colsample_bytree =0.5, nrounds = 1, objective = "binary:logistic")
#Boosting - 3 rounds
bst <- xgboost(
data = train$data
, label = train$label
, max_depth = 4
, nrounds = 3
, objective = "binary:logistic"
)
bst <- xgboost(data = train$data, label = train$label, max_depth = 4, nrounds = 3, objective = "binary:logistic")
```
> Note that the parameter `round` is set to `1`.

View File

@@ -18,11 +18,13 @@
publisher={Institute of Mathematical Statistics}
}
@misc{
Bache+Lichman:2013 ,
author = "K. Bache and M. Lichman",
year = "2013",
title = "{UCI} Machine Learning Repository",
url = "https://archive.ics.uci.edu/",
url = "http://archive.ics.uci.edu/ml/",
institution = "University of California, Irvine, School of Information and Computer Sciences"
}

View File

@@ -52,9 +52,9 @@ It has several features:
For weekly updated version (highly recommended), install from *GitHub*:
```{r installGithub, eval=FALSE}
install.packages("drat", repos = "https://cran.rstudio.com")
install.packages("drat", repos="https://cran.rstudio.com")
drat:::addRepo("dmlc")
install.packages("xgboost", repos = "http://dmlc.ml/drat/", type = "source")
install.packages("xgboost", repos="http://dmlc.ml/drat/", type = "source")
```
> *Windows* user will need to install [Rtools](https://cran.r-project.org/bin/windows/Rtools/) first.
@@ -101,8 +101,8 @@ Why *split* the dataset in two parts?
In the first part we will build our model. In the second part we will want to test it and assess its quality. Without dividing the dataset we would test the model on the data which the algorithm have already seen.
```{r datasetLoading, results='hold', message=F, warning=F}
data(agaricus.train, package = 'xgboost')
data(agaricus.test, package = 'xgboost')
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
train <- agaricus.train
test <- agaricus.test
```
@@ -152,15 +152,7 @@ We will train decision tree model using the following parameters:
* `nrounds = 2`: there will be two passes on the data, the second one will enhance the model by further reducing the difference between ground truth and prediction.
```{r trainingSparse, message=F, warning=F}
bstSparse <- xgboost(
data = train$data
, label = train$label
, max_depth = 2
, eta = 1
, nthread = 2
, nrounds = 2
, objective = "binary:logistic"
)
bstSparse <- xgboost(data = train$data, label = train$label, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
```
> More complex the relationship between your features and your `label` is, more passes you need.
@@ -172,15 +164,7 @@ bstSparse <- xgboost(
Alternatively, you can put your dataset in a *dense* matrix, i.e. a basic **R** matrix.
```{r trainingDense, message=F, warning=F}
bstDense <- xgboost(
data = as.matrix(train$data)
, label = train$label
, max_depth = 2
, eta = 1
, nthread = 2
, nrounds = 2
, objective = "binary:logistic"
)
bstDense <- xgboost(data = as.matrix(train$data), label = train$label, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
```
##### xgb.DMatrix
@@ -189,14 +173,7 @@ bstDense <- xgboost(
```{r trainingDmatrix, message=F, warning=F}
dtrain <- xgb.DMatrix(data = train$data, label = train$label)
bstDMatrix <- xgboost(
data = dtrain
, max_depth = 2
, eta = 1
, nthread = 2
, nrounds = 2
, objective = "binary:logistic"
)
bstDMatrix <- xgboost(data = dtrain, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
```
##### Verbose option
@@ -207,41 +184,17 @@ One of the simplest way to see the training progress is to set the `verbose` opt
```{r trainingVerbose0, message=T, warning=F}
# verbose = 0, no message
bst <- xgboost(
data = dtrain
, max_depth = 2
, eta = 1
, nthread = 2
, nrounds = 2
, objective = "binary:logistic"
, verbose = 0
)
bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", verbose = 0)
```
```{r trainingVerbose1, message=T, warning=F}
# verbose = 1, print evaluation metric
bst <- xgboost(
data = dtrain
, max_depth = 2
, eta = 1
, nthread = 2
, nrounds = 2
, objective = "binary:logistic"
, verbose = 1
)
bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", verbose = 1)
```
```{r trainingVerbose2, message=T, warning=F}
# verbose = 2, also print information about tree
bst <- xgboost(
data = dtrain
, max_depth = 2
, eta = 1
, nthread = 2
, nrounds = 2
, objective = "binary:logistic"
, verbose = 2
)
bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", verbose = 2)
```
## Basic prediction using XGBoost
@@ -314,8 +267,8 @@ Most of the features below have been implemented to help you to improve your mod
For the following advanced features, we need to put data in `xgb.DMatrix` as explained above.
```{r DMatrix, message=F, warning=F}
dtrain <- xgb.DMatrix(data = train$data, label = train$label)
dtest <- xgb.DMatrix(data = test$data, label = test$label)
dtrain <- xgb.DMatrix(data = train$data, label=train$label)
dtest <- xgb.DMatrix(data = test$data, label=test$label)
```
### Measure learning progress with xgb.train
@@ -332,17 +285,9 @@ One way to measure progress in learning of a model is to provide to **XGBoost**
For the purpose of this example, we use `watchlist` parameter. It is a list of `xgb.DMatrix`, each of them tagged with a name.
```{r watchlist, message=F, warning=F}
watchlist <- list(train = dtrain, test = dtest)
watchlist <- list(train=dtrain, test=dtest)
bst <- xgb.train(
data = dtrain
, max_depth = 2
, eta = 1
, nthread = 2
, nrounds = 2
, watchlist = watchlist
, objective = "binary:logistic"
)
bst <- xgb.train(data=dtrain, max_depth=2, eta=1, nthread = 2, nrounds=2, watchlist=watchlist, objective = "binary:logistic")
```
**XGBoost** has computed at each round the same average error metric than seen above (we set `nrounds` to 2, that is why we have two lines). Obviously, the `train-error` number is related to the training dataset (the one the algorithm learns from) and the `test-error` number to the test dataset.
@@ -354,17 +299,7 @@ If with your own dataset you have not such results, you should think about how y
For a better understanding of the learning progression, you may want to have some specific metric or even use multiple evaluation metrics.
```{r watchlist2, message=F, warning=F}
bst <- xgb.train(
data = dtrain
, max_depth = 2
, eta = 1
, nthread = 2
, nrounds = 2
, watchlist = watchlist
, eval_metric = "error"
, eval_metric = "logloss"
, objective = "binary:logistic"
)
bst <- xgb.train(data=dtrain, max_depth=2, eta=1, nthread = 2, nrounds=2, watchlist=watchlist, eval_metric = "error", eval_metric = "logloss", objective = "binary:logistic")
```
> `eval_metric` allows us to monitor two new metrics for each round, `logloss` and `error`.
@@ -375,17 +310,7 @@ bst <- xgb.train(
Until now, all the learnings we have performed were based on boosting trees. **XGBoost** implements a second algorithm, based on linear boosting. The only difference with previous command is `booster = "gblinear"` parameter (and removing `eta` parameter).
```{r linearBoosting, message=F, warning=F}
bst <- xgb.train(
data = dtrain
, booster = "gblinear"
, max_depth = 2
, nthread = 2
, nrounds = 2
, watchlist = watchlist
, eval_metric = "error"
, eval_metric = "logloss"
, objective = "binary:logistic"
)
bst <- xgb.train(data=dtrain, booster = "gblinear", max_depth=2, nthread = 2, nrounds=2, watchlist=watchlist, eval_metric = "error", eval_metric = "logloss", objective = "binary:logistic")
```
In this specific case, *linear boosting* gets slightly better performance metrics than decision trees based algorithm.
@@ -403,15 +328,7 @@ Like saving models, `xgb.DMatrix` object (which groups both dataset and outcome)
xgb.DMatrix.save(dtrain, "dtrain.buffer")
# to load it in, simply call xgb.DMatrix
dtrain2 <- xgb.DMatrix("dtrain.buffer")
bst <- xgb.train(
data = dtrain2
, max_depth = 2
, eta = 1
, nthread = 2
, nrounds = 2
, watchlist = watchlist
, objective = "binary:logistic"
)
bst <- xgb.train(data=dtrain2, max_depth=2, eta=1, nthread = 2, nrounds=2, watchlist=watchlist, objective = "binary:logistic")
```
```{r DMatrixDel, include=FALSE}
@@ -423,9 +340,9 @@ file.remove("dtrain.buffer")
Information can be extracted from `xgb.DMatrix` using `getinfo` function. Hereafter we will extract `label` data.
```{r getinfo, message=F, warning=F}
label <- getinfo(dtest, "label")
label = getinfo(dtest, "label")
pred <- predict(bst, dtest)
err <- as.numeric(sum(as.integer(pred > 0.5) != label)) / length(label)
err <- as.numeric(sum(as.integer(pred > 0.5) != label))/length(label)
print(paste("test-error=", err))
```
@@ -479,7 +396,7 @@ bst2 <- xgb.load("xgboost.model")
pred2 <- predict(bst2, test$data)
# And now the test
print(paste("sum(abs(pred2-pred))=", sum(abs(pred2 - pred))))
print(paste("sum(abs(pred2-pred))=", sum(abs(pred2-pred))))
```
```{r clean, include=FALSE}
@@ -503,7 +420,7 @@ bst3 <- xgb.load(rawVec)
pred3 <- predict(bst3, test$data)
# pred2 should be identical to pred
print(paste("sum(abs(pred3-pred))=", sum(abs(pred2 - pred))))
print(paste("sum(abs(pred3-pred))=", sum(abs(pred2-pred))))
```
> Again `0`? It seems that `XGBoost` works pretty well!

View File

@@ -30,7 +30,7 @@ For the purpose of this tutorial we will load the xgboost, jsonlite, and float p
require(xgboost)
require(jsonlite)
require(float)
options(digits = 22)
options(digits=22)
```
We will create a toy binary logistic model based on the example first provided [here](https://github.com/dmlc/xgboost/issues/3960), so that we can easily understand the structure of the dumped JSON model object. This will allow us to understand where discrepancies can occur and how they should be handled.
@@ -50,7 +50,7 @@ labels <- c(1, 1, 1,
0, 0, 0,
0, 0, 0)
data <- data.frame(dates = dates, labels = labels)
data <- data.frame(dates = dates, labels=labels)
bst <- xgboost(
data = as.matrix(data$dates),
@@ -69,7 +69,7 @@ We will now dump the model to JSON and attempt to illustrate a variety of issues
First let's dump the model to JSON:
```{r}
bst_json <- xgb.dump(bst, with_stats = FALSE, dump_format = 'json')
bst_json <- xgb.dump(bst, with_stats = FALSE, dump_format='json')
bst_from_json <- fromJSON(bst_json, simplifyDataFrame = FALSE)
node <- bst_from_json[[1]]
cat(bst_json)
@@ -78,10 +78,10 @@ cat(bst_json)
The tree JSON shown by the above code-chunk tells us that if the data is less than 20180132, the tree will output the value in the first leaf. Otherwise it will output the value in the second leaf. Let's try to reproduce this manually with the data we have and confirm that it matches the model predictions we've already calculated.
```{r}
bst_preds_logodds <- predict(bst, as.matrix(data$dates), outputmargin = TRUE)
bst_preds_logodds <- predict(bst,as.matrix(data$dates), outputmargin = TRUE)
# calculate the logodds values using the JSON representation
bst_from_json_logodds <- ifelse(data$dates < node$split_condition,
bst_from_json_logodds <- ifelse(data$dates<node$split_condition,
node$children[[1]]$leaf,
node$children[[2]]$leaf)
@@ -106,19 +106,19 @@ At this stage two things happened:
To explain this, let's repeat the comparison and round to two decimals:
```{r}
round(bst_preds_logodds, 2) == round(bst_from_json_logodds, 2)
round(bst_preds_logodds,2) == round(bst_from_json_logodds,2)
```
If we round to two decimals, we see that only the elements related to data values of `20180131` don't agree. If we convert the data to floats, they agree:
```{r}
# now convert the dates to floats first
bst_from_json_logodds <- ifelse(fl(data$dates) < node$split_condition,
bst_from_json_logodds <- ifelse(fl(data$dates)<node$split_condition,
node$children[[1]]$leaf,
node$children[[2]]$leaf)
# test that values are equal
round(bst_preds_logodds, 2) == round(bst_from_json_logodds, 2)
round(bst_preds_logodds,2) == round(bst_from_json_logodds,2)
```
What's the lesson? If we are going to work with an imported JSON model, any data must be converted to floats first. In this case, since '20180131' cannot be represented as a 32-bit float, it is rounded up to 20180132, as shown here:
@@ -143,7 +143,7 @@ None are exactly equal. What happened? Although we've converted the data to 32
```{r}
# now convert the dates to floats first
bst_from_json_logodds <- ifelse(fl(data$dates) < fl(node$split_condition),
bst_from_json_logodds <- ifelse(fl(data$dates)<fl(node$split_condition),
as.numeric(fl(node$children[[1]]$leaf)),
as.numeric(fl(node$children[[2]]$leaf)))
@@ -160,13 +160,12 @@ We were able to get the log-odds to agree, so now let's manually calculate the s
```{r}
bst_preds <- predict(bst, as.matrix(data$dates))
bst_preds <- predict(bst,as.matrix(data$dates))
# calculate the predictions casting doubles to floats
bst_from_json_preds <- ifelse(
fl(data$dates) < fl(node$split_condition)
, as.numeric(1 / (1 + exp(-1 * fl(node$children[[1]]$leaf))))
, as.numeric(1 / (1 + exp(-1 * fl(node$children[[2]]$leaf))))
bst_from_json_preds <- ifelse(fl(data$dates)<fl(node$split_condition),
as.numeric(1/(1+exp(-1*fl(node$children[[1]]$leaf)))),
as.numeric(1/(1+exp(-1*fl(node$children[[2]]$leaf))))
)
# test that values are equal
@@ -178,10 +177,9 @@ None are exactly equal again. What is going on here? Well, since we are using
How do we fix this? We have to ensure we use the correct data types everywhere and the correct operators. If we use only floats, the float library that we have loaded will ensure the 32-bit float exponentiation operator is applied.
```{r}
# calculate the predictions casting doubles to floats
bst_from_json_preds <- ifelse(
fl(data$dates) < fl(node$split_condition)
, as.numeric(fl(1) / (fl(1) + exp(fl(-1) * fl(node$children[[1]]$leaf))))
, as.numeric(fl(1) / (fl(1) + exp(fl(-1) * fl(node$children[[2]]$leaf))))
bst_from_json_preds <- ifelse(fl(data$dates)<fl(node$split_condition),
as.numeric(fl(1)/(fl(1)+exp(fl(-1)*fl(node$children[[1]]$leaf)))),
as.numeric(fl(1)/(fl(1)+exp(fl(-1)*fl(node$children[[2]]$leaf))))
)
# test that values are equal

View File

@@ -1,6 +1,7 @@
<img src="https://xgboost.ai/images/logo/xgboost-logo.svg" width=135/> eXtreme Gradient Boosting
<img src=https://raw.githubusercontent.com/dmlc/dmlc.github.io/master/img/logo-m/xgboost.png width=135/> eXtreme Gradient Boosting
===========
[![Build Status](https://badge.buildkite.com/aca47f40a32735c00a8550540c5eeff6a4c1d246a580cae9b0.svg?branch=master)](https://buildkite.com/xgboost/xgboost-ci)
[![Build Status](https://xgboost-ci.net/job/xgboost/job/master/badge/icon)](https://xgboost-ci.net/blue/organizations/jenkins/xgboost/activity)
[![Build Status](https://img.shields.io/travis/dmlc/xgboost.svg?label=build&logo=travis&branch=master)](https://travis-ci.org/dmlc/xgboost)
[![XGBoost-CI](https://github.com/dmlc/xgboost/workflows/XGBoost-CI/badge.svg?branch=master)](https://github.com/dmlc/xgboost/actions)
[![Documentation Status](https://readthedocs.org/projects/xgboost/badge/?version=latest)](https://xgboost.readthedocs.org)
[![GitHub license](http://dmlc.github.io/img/apache2.svg)](./LICENSE)
@@ -20,7 +21,7 @@
XGBoost is an optimized distributed gradient boosting library designed to be highly ***efficient***, ***flexible*** and ***portable***.
It implements machine learning algorithms under the [Gradient Boosting](https://en.wikipedia.org/wiki/Gradient_boosting) framework.
XGBoost provides a parallel tree boosting (also known as GBDT, GBM) that solve many data science problems in a fast and accurate way.
The same code runs on major distributed environment (Kubernetes, Hadoop, SGE, Dask, Spark, PySpark) and can solve problems beyond billions of examples.
The same code runs on major distributed environment (Kubernetes, Hadoop, SGE, MPI, Dask) and can solve problems beyond billions of examples.
License
-------

1
cmake/Python_version.in Normal file
View File

@@ -0,0 +1 @@
@xgboost_VERSION_MAJOR@.@xgboost_VERSION_MINOR@.@xgboost_VERSION_PATCH@

View File

@@ -8,6 +8,9 @@ macro(enable_sanitizer sanitizer)
if(${sanitizer} MATCHES "address")
find_package(ASan)
set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=address")
if (ASan_FOUND)
link_libraries(${ASan_LIBRARY})
endif (ASan_FOUND)
elseif(${sanitizer} MATCHES "thread")
find_package(TSan)
@@ -19,10 +22,16 @@ macro(enable_sanitizer sanitizer)
elseif(${sanitizer} MATCHES "leak")
find_package(LSan)
set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=leak")
if (LSan_FOUND)
link_libraries(${LSan_LIBRARY})
endif (LSan_FOUND)
elseif(${sanitizer} MATCHES "undefined")
find_package(UBSan)
set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=undefined -fno-sanitize-recover=undefined")
if (UBSan_FOUND)
link_libraries(${UBSan_LIBRARY})
endif (UBSan_FOUND)
else()
message(FATAL_ERROR "Santizer ${sanitizer} not supported.")

View File

@@ -124,6 +124,13 @@ function(format_gencode_flags flags out)
endif (CMAKE_VERSION VERSION_GREATER_EQUAL "3.18")
endfunction(format_gencode_flags flags)
macro(enable_nvtx target)
find_package(NVTX REQUIRED)
target_include_directories(${target} PRIVATE "${NVTX_INCLUDE_DIR}")
target_link_libraries(${target} PRIVATE "${NVTX_LIBRARY}")
target_compile_definitions(${target} PRIVATE -DXGBOOST_USE_NVTX=1)
endmacro()
# Set CUDA related flags to target. Must be used after code `format_gencode_flags`.
function(xgboost_set_cuda_flags target)
target_compile_options(${target} PRIVATE
@@ -155,24 +162,33 @@ function(xgboost_set_cuda_flags target)
endif (USE_DEVICE_DEBUG)
if (USE_NVTX)
target_compile_definitions(${target} PRIVATE -DXGBOOST_USE_NVTX=1)
enable_nvtx(${target})
endif (USE_NVTX)
target_compile_definitions(${target} PRIVATE -DXGBOOST_USE_CUDA=1)
target_include_directories(
${target} PRIVATE
${xgboost_SOURCE_DIR}/gputreeshap
${CUDAToolkit_INCLUDE_DIRS})
if (NOT BUILD_WITH_CUDA_CUB)
target_compile_definitions(${target} PRIVATE -DXGBOOST_USE_CUDA=1 -DTHRUST_IGNORE_CUB_VERSION_CHECK=1)
target_include_directories(${target} PRIVATE ${xgboost_SOURCE_DIR}/cub/ ${xgboost_SOURCE_DIR}/gputreeshap)
else ()
target_compile_definitions(${target} PRIVATE -DXGBOOST_USE_CUDA=1)
target_include_directories(${target} PRIVATE ${xgboost_SOURCE_DIR}/gputreeshap)
endif (NOT BUILD_WITH_CUDA_CUB)
if (MSVC)
target_compile_options(${target} PRIVATE
$<$<COMPILE_LANGUAGE:CUDA>:-Xcompiler=/utf-8>)
endif (MSVC)
set_target_properties(${target} PROPERTIES
CUDA_STANDARD 17
CUDA_STANDARD_REQUIRED ON
CUDA_SEPARABLE_COMPILATION OFF)
if (PLUGIN_RMM)
set_target_properties(${target} PROPERTIES
CUDA_STANDARD 17
CUDA_STANDARD_REQUIRED ON
CUDA_SEPARABLE_COMPILATION OFF)
else ()
set_target_properties(${target} PROPERTIES
CUDA_STANDARD 14
CUDA_STANDARD_REQUIRED ON
CUDA_SEPARABLE_COMPILATION OFF)
endif (PLUGIN_RMM)
endfunction(xgboost_set_cuda_flags)
macro(xgboost_link_nccl target)
@@ -189,10 +205,17 @@ endmacro(xgboost_link_nccl)
# compile options
macro(xgboost_target_properties target)
set_target_properties(${target} PROPERTIES
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON)
if (PLUGIN_RMM)
set_target_properties(${target} PROPERTIES
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON)
else ()
set_target_properties(${target} PROPERTIES
CXX_STANDARD 14
CXX_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON)
endif (PLUGIN_RMM)
if (HIDE_CXX_SYMBOLS)
#-- Hide all C++ symbols
@@ -285,7 +308,7 @@ macro(xgboost_target_link_libraries target)
endif (USE_NCCL)
if (USE_NVTX)
target_link_libraries(${target} PRIVATE CUDA::nvToolsExt)
enable_nvtx(${target})
endif (USE_NVTX)
if (RABIT_BUILD_MPI)

View File

@@ -3,4 +3,7 @@ function (write_version)
configure_file(
${xgboost_SOURCE_DIR}/cmake/version_config.h.in
${xgboost_SOURCE_DIR}/include/xgboost/version_config.h @ONLY)
configure_file(
${xgboost_SOURCE_DIR}/cmake/Python_version.in
${xgboost_SOURCE_DIR}/python-package/xgboost/VERSION @ONLY)
endfunction (write_version)

View File

@@ -1,7 +1,7 @@
set(ASan_LIB_NAME ASan)
find_library(ASan_LIBRARY
NAMES libasan.so libasan.so.6 libasan.so.5 libasan.so.4 libasan.so.3 libasan.so.2 libasan.so.1 libasan.so.0
NAMES libasan.so libasan.so.5 libasan.so.4 libasan.so.3 libasan.so.2 libasan.so.1 libasan.so.0
PATHS ${SANITIZER_PATH} /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib ${CMAKE_PREFIX_PATH}/lib)
include(FindPackageHandleStandardArgs)

View File

@@ -0,0 +1,26 @@
if (NVTX_LIBRARY)
unset(NVTX_LIBRARY CACHE)
endif (NVTX_LIBRARY)
set(NVTX_LIB_NAME nvToolsExt)
find_path(NVTX_INCLUDE_DIR
NAMES nvToolsExt.h
PATHS ${CUDA_HOME}/include ${CUDA_INCLUDE} /usr/local/cuda/include)
find_library(NVTX_LIBRARY
NAMES nvToolsExt
PATHS ${CUDA_HOME}/lib64 /usr/local/cuda/lib64)
message(STATUS "Using nvtx library: ${NVTX_LIBRARY}")
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(NVTX DEFAULT_MSG
NVTX_INCLUDE_DIR NVTX_LIBRARY)
mark_as_advanced(
NVTX_INCLUDE_DIR
NVTX_LIBRARY
)

View File

@@ -1,11 +1,11 @@
/**
* Copyright 2019-2023 by XGBoost contributors
/*!
* Copyright 2019 XGBoost contributors
*/
#ifndef XGBOOST_VERSION_CONFIG_H_
#define XGBOOST_VERSION_CONFIG_H_
#define XGBOOST_VER_MAJOR @xgboost_VERSION_MAJOR@ /* NOLINT */
#define XGBOOST_VER_MINOR @xgboost_VERSION_MINOR@ /* NOLINT */
#define XGBOOST_VER_PATCH @xgboost_VERSION_PATCH@ /* NOLINT */
#define XGBOOST_VER_MAJOR @xgboost_VERSION_MAJOR@
#define XGBOOST_VER_MINOR @xgboost_VERSION_MINOR@
#define XGBOOST_VER_PATCH @xgboost_VERSION_PATCH@
#endif // XGBOOST_VERSION_CONFIG_H_

1
cub Submodule

Submodule cub added at af39ee264f

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env python3
import random
import sys
import random
if len(sys.argv) < 2:
print ('Usage:<filename> <k> [nfold = 5]')

View File

@@ -20,10 +20,10 @@ num_round = 2
# 0 means do not save any model except the final round model
save_period = 2
# The path of training data
data = "agaricus.txt.train?format=libsvm"
data = "agaricus.txt.train"
# The path of validation data, used to monitor training process, here [test] sets name of the validation set
eval[test] = "agaricus.txt.test?format=libsvm"
eval[test] = "agaricus.txt.test"
# evaluate on training data as well each round
eval_train = 1
# The path of test data
test:data = "agaricus.txt.test?format=libsvm"
test:data = "agaricus.txt.test"

View File

@@ -21,8 +21,8 @@ num_round = 2
# 0 means do not save any model except the final round model
save_period = 0
# The path of training data
data = "machine.txt.train?format=libsvm"
data = "machine.txt.train"
# The path of validation data, used to monitor training process, here [test] sets name of the validation set
eval[test] = "machine.txt.test?format=libsvm"
eval[test] = "machine.txt.test"
# The path of test data
test:data = "machine.txt.test?format=libsvm"
test:data = "machine.txt.test"

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env python3
import random
import sys
import random
if len(sys.argv) < 2:
print('Usage:<filename> <k> [nfold = 5]')

View File

@@ -1,7 +1,6 @@
#!/usr/bin/env python3
import sys
fo = open(sys.argv[2], 'w')
for l in open(sys.argv[1]):

View File

@@ -6,11 +6,9 @@ Demo for survival analysis (regression). using Accelerated Failure Time (AFT) mo
"""
import os
import numpy as np
import pandas as pd
from sklearn.model_selection import ShuffleSplit
import pandas as pd
import numpy as np
import xgboost as xgb
# The Veterans' Administration Lung Cancer Trial

View File

@@ -6,12 +6,11 @@ Demo for survival analysis (regression) using Accelerated Failure Time (AFT) mod
using Optuna to tune hyperparameters
"""
import numpy as np
import optuna
import pandas as pd
from sklearn.model_selection import ShuffleSplit
import pandas as pd
import numpy as np
import xgboost as xgb
import optuna
# The Veterans' Administration Lung Cancer Trial
# The Statistical Analysis of Failure Time Data by Kalbfleisch J. and Prentice R (1980)

View File

@@ -6,48 +6,37 @@ This demo uses 1D toy data and visualizes how XGBoost fits a tree ensemble. The
model starts out as a flat line and evolves into a step function in order to account for
all ranged labels.
"""
import matplotlib.pyplot as plt
import numpy as np
import xgboost as xgb
import matplotlib.pyplot as plt
plt.rcParams.update({"font.size": 13})
plt.rcParams.update({'font.size': 13})
# Function to visualize censored labels
def plot_censored_labels(
X: np.ndarray, y_lower: np.ndarray, y_upper: np.ndarray
) -> None:
def replace_inf(x: np.ndarray, target_value: float) -> np.ndarray:
def plot_censored_labels(X, y_lower, y_upper):
def replace_inf(x, target_value):
x[np.isinf(x)] = target_value
return x
plt.plot(X, y_lower, "o", label="y_lower", color="blue")
plt.plot(X, y_upper, "o", label="y_upper", color="fuchsia")
plt.vlines(
X,
ymin=replace_inf(y_lower, 0.01),
ymax=replace_inf(y_upper, 1000.0),
label="Range for y",
color="gray",
)
plt.plot(X, y_lower, 'o', label='y_lower', color='blue')
plt.plot(X, y_upper, 'o', label='y_upper', color='fuchsia')
plt.vlines(X, ymin=replace_inf(y_lower, 0.01), ymax=replace_inf(y_upper, 1000),
label='Range for y', color='gray')
# Toy data
X = np.array([1, 2, 3, 4, 5]).reshape((-1, 1))
INF = np.inf
y_lower = np.array([10, 15, -INF, 30, 100])
y_upper = np.array([INF, INF, 20, 50, INF])
y_lower = np.array([ 10, 15, -INF, 30, 100])
y_upper = np.array([INF, INF, 20, 50, INF])
# Visualize toy data
plt.figure(figsize=(5, 4))
plot_censored_labels(X, y_lower, y_upper)
plt.ylim((6, 200))
plt.legend(loc="lower right")
plt.title("Toy data")
plt.xlabel("Input feature")
plt.ylabel("Label")
plt.yscale("log")
plt.legend(loc='lower right')
plt.title('Toy data')
plt.xlabel('Input feature')
plt.ylabel('Label')
plt.yscale('log')
plt.tight_layout()
plt.show(block=True)
@@ -56,83 +45,54 @@ grid_pts = np.linspace(0.8, 5.2, 1000).reshape((-1, 1))
# Train AFT model using XGBoost
dmat = xgb.DMatrix(X)
dmat.set_float_info("label_lower_bound", y_lower)
dmat.set_float_info("label_upper_bound", y_upper)
params = {"max_depth": 3, "objective": "survival:aft", "min_child_weight": 0}
dmat.set_float_info('label_lower_bound', y_lower)
dmat.set_float_info('label_upper_bound', y_upper)
params = {'max_depth': 3, 'objective':'survival:aft', 'min_child_weight': 0}
accuracy_history = []
def plot_intermediate_model_callback(env):
"""Custom callback to plot intermediate models"""
# Compute y_pred = prediction using the intermediate model, at current boosting iteration
y_pred = env.model.predict(dmat)
# "Accuracy" = the number of data points whose ranged label (y_lower, y_upper) includes
# the corresponding predicted label (y_pred)
acc = np.sum(np.logical_and(y_pred >= y_lower, y_pred <= y_upper)/len(X) * 100)
accuracy_history.append(acc)
# Plot ranged labels as well as predictions by the model
plt.subplot(5, 3, env.iteration + 1)
plot_censored_labels(X, y_lower, y_upper)
y_pred_grid_pts = env.model.predict(xgb.DMatrix(grid_pts))
plt.plot(grid_pts, y_pred_grid_pts, 'r-', label='XGBoost AFT model', linewidth=4)
plt.title('Iteration {}'.format(env.iteration), x=0.5, y=0.8)
plt.xlim((0.8, 5.2))
plt.ylim((1 if np.min(y_pred) < 6 else 6, 200))
plt.yscale('log')
class PlotIntermediateModel(xgb.callback.TrainingCallback):
"""Custom callback to plot intermediate models."""
def __init__(self) -> None:
super().__init__()
def after_iteration(
self,
model: xgb.Booster,
epoch: int,
evals_log: xgb.callback.TrainingCallback.EvalsLog,
) -> bool:
"""Run after training is finished."""
# Compute y_pred = prediction using the intermediate model, at current boosting
# iteration
y_pred = model.predict(dmat)
# "Accuracy" = the number of data points whose ranged label (y_lower, y_upper)
# includes the corresponding predicted label (y_pred)
acc = np.sum(
np.logical_and(y_pred >= y_lower, y_pred <= y_upper) / len(X) * 100
)
accuracy_history.append(acc)
# Plot ranged labels as well as predictions by the model
plt.subplot(5, 3, epoch + 1)
plot_censored_labels(X, y_lower, y_upper)
y_pred_grid_pts = model.predict(xgb.DMatrix(grid_pts))
plt.plot(
grid_pts, y_pred_grid_pts, "r-", label="XGBoost AFT model", linewidth=4
)
plt.title("Iteration {}".format(epoch), x=0.5, y=0.8)
plt.xlim((0.8, 5.2))
plt.ylim((1 if np.min(y_pred) < 6 else 6, 200))
plt.yscale("log")
return False
res: xgb.callback.TrainingCallback.EvalsLog = {}
plt.figure(figsize=(12, 13))
bst = xgb.train(
params,
dmat,
15,
[(dmat, "train")],
evals_result=res,
callbacks=[PlotIntermediateModel()],
)
res = {}
plt.figure(figsize=(12,13))
bst = xgb.train(params, dmat, 15, [(dmat, 'train')], evals_result=res,
callbacks=[plot_intermediate_model_callback])
plt.tight_layout()
plt.legend(
loc="lower center",
ncol=4,
bbox_to_anchor=(0.5, 0),
bbox_transform=plt.gcf().transFigure,
)
plt.legend(loc='lower center', ncol=4,
bbox_to_anchor=(0.5, 0),
bbox_transform=plt.gcf().transFigure)
plt.tight_layout()
# Plot negative log likelihood over boosting iterations
plt.figure(figsize=(8, 3))
plt.figure(figsize=(8,3))
plt.subplot(1, 2, 1)
plt.plot(res["train"]["aft-nloglik"], "b-o", label="aft-nloglik")
plt.xlabel("# Boosting Iterations")
plt.legend(loc="best")
plt.plot(res['train']['aft-nloglik'], 'b-o', label='aft-nloglik')
plt.xlabel('# Boosting Iterations')
plt.legend(loc='best')
# Plot "accuracy" over boosting iterations
# "Accuracy" = the number of data points whose ranged label (y_lower, y_upper) includes
# the corresponding predicted label (y_pred)
plt.subplot(1, 2, 2)
plt.plot(accuracy_history, "r-o", label="Accuracy (%)")
plt.xlabel("# Boosting Iterations")
plt.legend(loc="best")
plt.plot(accuracy_history, 'r-o', label='Accuracy (%)')
plt.xlabel('# Boosting Iterations')
plt.legend(loc='best')
plt.tight_layout()
plt.show()

View File

@@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 3.18)
cmake_minimum_required(VERSION 3.13)
project(xgboost-c-examples)
add_subdirectory(basic)

View File

@@ -1,16 +1,13 @@
/**
* Copyright 2019-2023 by XGBoost contributors
/*!
* Copyright 2019 XGBoost contributors
*
* \file c-api-demo.c
* \brief A simple example of using xgboost C API.
*/
#include <assert.h>
#include <stddef.h>
#include <stdint.h> /* uint32_t,uint64_t */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <xgboost/c_api.h>
#define safe_xgboost(call) { \
@@ -21,29 +18,14 @@ if (err != 0) { \
} \
}
/* Make Json encoded array interface. */
static void MakeArrayInterface(size_t data, size_t n, char const* typestr, size_t length,
char* out) {
static char const kTemplate[] =
"{\"data\": [%lu, true], \"shape\": [%lu, %lu], \"typestr\": \"%s\", \"version\": 3}";
memset(out, '\0', length);
sprintf(out, kTemplate, data, n, 1ul, typestr);
}
/* Make Json encoded DMatrix configuration. */
static void MakeConfig(int n_threads, size_t length, char* out) {
static char const kTemplate[] = "{\"missing\": NaN, \"nthread\": %d}";
memset(out, '\0', length);
sprintf(out, kTemplate, n_threads);
}
int main() {
int silent = 0;
int use_gpu = 0; // set to 1 to use the GPU for training
// load the data
DMatrixHandle dtrain, dtest;
safe_xgboost(XGDMatrixCreateFromFile("../../data/agaricus.txt.train?format=libsvm", silent, &dtrain));
safe_xgboost(XGDMatrixCreateFromFile("../../data/agaricus.txt.test?format=libsvm", silent, &dtest));
safe_xgboost(XGDMatrixCreateFromFile("../../data/agaricus.txt.train", silent, &dtrain));
safe_xgboost(XGDMatrixCreateFromFile("../../data/agaricus.txt.test", silent, &dtest));
// create the booster
BoosterHandle booster;
@@ -139,27 +121,17 @@ int main() {
}
{
printf("Sparse Matrix Example (XGDMatrixCreateFromCSR): ");
printf("Sparse Matrix Example (XGDMatrixCreateFromCSREx): ");
const uint64_t indptr[] = {0, 22};
const uint32_t indices[] = {1, 9, 19, 21, 24, 34, 36, 39, 42, 53, 56,
65, 69, 77, 86, 88, 92, 95, 102, 106, 117, 122};
const size_t indptr[] = {0, 22};
const unsigned indices[] = {1, 9, 19, 21, 24, 34, 36, 39, 42, 53, 56, 65,
69, 77, 86, 88, 92, 95, 102, 106, 117, 122};
const float data[] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0};
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0};
DMatrixHandle dmat;
char j_indptr[128];
MakeArrayInterface((size_t)indptr, 2ul, "<u8", sizeof(j_indptr), j_indptr);
char j_indices[128];
MakeArrayInterface((size_t)indices, sizeof(indices) / sizeof(uint32_t), "<u4",
sizeof(j_indices), j_indices);
char j_data[128];
MakeArrayInterface((size_t)data, sizeof(data) / sizeof(float), "<f4", sizeof(j_data), j_data);
char j_config[64];
MakeConfig(0, sizeof(j_config), j_config);
safe_xgboost(XGDMatrixCreateFromCSR(j_indptr, j_indices, j_data, 127, j_config, &dmat));
safe_xgboost(XGDMatrixCreateFromCSREx(indptr, indices, data, 2, 22, 127,
&dmat));
const float* out_result = NULL;
@@ -173,34 +145,25 @@ int main() {
}
{
printf("Sparse Matrix Example (XGDMatrixCreateFromCSC): ");
printf("Sparse Matrix Example (XGDMatrixCreateFromCSCEx): ");
const uint64_t indptr[] = {
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3,
4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 7, 7, 7, 8, 8, 8, 9,
9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11,
12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15,
15, 16, 16, 16, 16, 17, 17, 17, 18, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19, 20, 20, 20,
20, 20, 20, 20, 20, 20, 20, 20, 21, 21, 21, 21, 21, 22, 22, 22, 22, 22};
const size_t col_ptr[] = {0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 3, 3, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 7, 7, 7, 8,
8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 11, 11, 11, 11, 11, 11,
11, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14,
14, 14, 14, 14, 14, 14, 15, 15, 16, 16, 16, 16, 17, 17, 17, 18, 18, 18,
18, 18, 18, 18, 19, 19, 19, 19, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
20, 21, 21, 21, 21, 21, 22, 22, 22, 22, 22};
const uint32_t indices[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
const unsigned indices[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0};
const float data[] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0};
char j_indptr[128];
MakeArrayInterface((size_t)indptr, 128ul, "<u8", sizeof(j_indptr), j_indptr);
char j_indices[128];
MakeArrayInterface((size_t)indices, sizeof(indices) / sizeof(unsigned), "<u4",
sizeof(j_indices), j_indices);
char j_data[128];
MakeArrayInterface((size_t)data, sizeof(data) / sizeof(float), "<f4", sizeof(j_data), j_data);
char j_config[64];
MakeConfig(0, sizeof(j_config), j_config);
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0};
DMatrixHandle dmat;
safe_xgboost(XGDMatrixCreateFromCSC(j_indptr, j_indices, j_data, 1, j_config, &dmat));
safe_xgboost(XGDMatrixCreateFromCSCEx(col_ptr, indices, data, 128, 22, 1,
&dmat));
const float* out_result = NULL;

View File

@@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 3.18)
cmake_minimum_required(VERSION 3.13)
project(external-memory-demo LANGUAGES C VERSION 0.0.1)
find_package(xgboost REQUIRED)

View File

@@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 3.18)
cmake_minimum_required(VERSION 3.13)
project(inference-demo LANGUAGES C VERSION 0.0.1)
find_package(xgboost REQUIRED)

View File

@@ -4,14 +4,12 @@ Example of training survival model with Dask on CPU
"""
import os
import dask.dataframe as dd
from dask.distributed import Client, LocalCluster
import xgboost as xgb
import os
from xgboost.dask import DaskDMatrix
import dask.dataframe as dd
from dask.distributed import Client
from dask.distributed import LocalCluster
def main(client):
# Load an example survival data from CSV into a Dask data frame.

Some files were not shown because too many files have changed in this diff Show More