Compare commits
62 Commits
release_2.
...
release_1.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
36eb41c960 | ||
|
|
39ddf40a8d | ||
|
|
573f1c7db4 | ||
|
|
abc80d2a6d | ||
|
|
e882fb3262 | ||
|
|
3218f6cd3c | ||
|
|
a962611de7 | ||
|
|
14476e8868 | ||
|
|
03f3879b71 | ||
|
|
21d95f3d8f | ||
|
|
5cd4015d70 | ||
|
|
b8c6b86792 | ||
|
|
1baebe231b | ||
|
|
365da0b8f4 | ||
|
|
f5f03dfb61 | ||
|
|
a1c209182d | ||
|
|
4be75d852c | ||
|
|
ba50e6eb62 | ||
|
|
36ad160501 | ||
|
|
c22f6db4bf | ||
|
|
f15a6d2b19 | ||
|
|
08a547f5c2 | ||
|
|
60303db2ee | ||
|
|
df984f9c43 | ||
|
|
2f22f8d49b | ||
|
|
68d86336d7 | ||
|
|
76bdca072a | ||
|
|
021e6a842a | ||
|
|
e5bef4ffce | ||
|
|
10bb0a74ef | ||
|
|
e803d06d8c | ||
|
|
ccf43d4ba0 | ||
|
|
dd58c2ac47 | ||
|
|
899e4c8988 | ||
|
|
a2085bf223 | ||
|
|
067b704e58 | ||
|
|
1a834b2b85 | ||
|
|
162b48a1a4 | ||
|
|
83a078b7e5 | ||
|
|
575fba651b | ||
|
|
62ed8b5fef | ||
|
|
a980e10744 | ||
|
|
59c54e361b | ||
|
|
60a8c8ebba | ||
|
|
58bc225657 | ||
|
|
850b53100f | ||
|
|
67b657dad0 | ||
|
|
db14e3feb7 | ||
|
|
9372370dda | ||
|
|
1136a7e0c3 | ||
|
|
a347cd512b | ||
|
|
9ff0c0832a | ||
|
|
534c940a7e | ||
|
|
5b76acccff | ||
|
|
4bc59ef7c3 | ||
|
|
e43cd60c0e | ||
|
|
3f92970a39 | ||
|
|
e17f7010bf | ||
|
|
aa30ce10da | ||
|
|
153d995b58 | ||
|
|
463313d9be | ||
|
|
7cf58a2c65 |
@@ -1,4 +1,4 @@
|
||||
Checks: 'modernize-*,-modernize-use-nodiscard,-modernize-concat-nested-namespaces,-modernize-make-*,-modernize-use-auto,-modernize-raw-string-literal,-modernize-avoid-c-arrays,-modernize-use-trailing-return-type,google-*,-google-default-arguments,-clang-diagnostic-#pragma-messages,readability-identifier-naming'
|
||||
Checks: 'modernize-*,-modernize-make-*,-modernize-use-auto,-modernize-raw-string-literal,-modernize-avoid-c-arrays,-modernize-use-trailing-return-type,google-*,-google-default-arguments,-clang-diagnostic-#pragma-messages,readability-identifier-naming'
|
||||
CheckOptions:
|
||||
- { key: readability-identifier-naming.ClassCase, value: CamelCase }
|
||||
- { key: readability-identifier-naming.StructCase, value: CamelCase }
|
||||
|
||||
18
.gitattributes
vendored
18
.gitattributes
vendored
@@ -1,18 +0,0 @@
|
||||
* text=auto
|
||||
|
||||
*.c text eol=lf
|
||||
*.h text eol=lf
|
||||
*.cc text eol=lf
|
||||
*.cuh text eol=lf
|
||||
*.cu text eol=lf
|
||||
*.py text eol=lf
|
||||
*.txt text eol=lf
|
||||
*.R text eol=lf
|
||||
*.scala text eol=lf
|
||||
*.java text eol=lf
|
||||
|
||||
*.sh text eol=lf
|
||||
|
||||
*.rst text eol=lf
|
||||
*.md text eol=lf
|
||||
*.csv text eol=lf
|
||||
31
.github/dependabot.yml
vendored
31
.github/dependabot.yml
vendored
@@ -1,31 +0,0 @@
|
||||
# To get started with Dependabot version updates, you'll need to specify which
|
||||
# package ecosystems to update and where the package manifests are located.
|
||||
# Please see the documentation for all configuration options:
|
||||
# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "maven"
|
||||
directory: "/jvm-packages"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
- package-ecosystem: "maven"
|
||||
directory: "/jvm-packages/xgboost4j"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
- package-ecosystem: "maven"
|
||||
directory: "/jvm-packages/xgboost4j-gpu"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
- package-ecosystem: "maven"
|
||||
directory: "/jvm-packages/xgboost4j-example"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
- package-ecosystem: "maven"
|
||||
directory: "/jvm-packages/xgboost4j-spark"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
- package-ecosystem: "maven"
|
||||
directory: "/jvm-packages/xgboost4j-spark-gpu"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
41
.github/workflows/jvm_tests.yml
vendored
41
.github/workflows/jvm_tests.yml
vendored
@@ -15,16 +15,16 @@ jobs:
|
||||
os: [windows-latest, ubuntu-latest, macos-11]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v2.5.0
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
|
||||
- uses: actions/setup-python@7f80679172b057fc5e90d70d197929d454754a5a # v4.3.0
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '3.8'
|
||||
architecture: 'x64'
|
||||
|
||||
- uses: actions/setup-java@d202f5dbf7256730fb690ec59f6381650114feb2 # v3.6.0
|
||||
- uses: actions/setup-java@v1
|
||||
with:
|
||||
java-version: 1.8
|
||||
|
||||
@@ -34,13 +34,13 @@ jobs:
|
||||
python -m pip install awscli
|
||||
|
||||
- name: Cache Maven packages
|
||||
uses: actions/cache@6998d139ddd3e68c71e9e398d8e40b71a2f39812 # v3.2.5
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ~/.m2
|
||||
key: ${{ runner.os }}-m2-${{ hashFiles('./jvm-packages/pom.xml') }}
|
||||
restore-keys: ${{ runner.os }}-m2-${{ hashFiles('./jvm-packages/pom.xml') }}
|
||||
restore-keys: ${{ runner.os }}-m2
|
||||
|
||||
- name: Test XGBoost4J (Core)
|
||||
- name: Test XGBoost4J
|
||||
run: |
|
||||
cd jvm-packages
|
||||
mvn test -B -pl :xgboost4j_2.12
|
||||
@@ -51,14 +51,14 @@ jobs:
|
||||
id: extract_branch
|
||||
if: |
|
||||
(github.ref == 'refs/heads/master' || contains(github.ref, 'refs/heads/release_')) &&
|
||||
(matrix.os == 'windows-latest' || matrix.os == 'macos-11')
|
||||
matrix.os == 'windows-latest'
|
||||
|
||||
- name: Publish artifact xgboost4j.dll to S3
|
||||
run: |
|
||||
cd lib/
|
||||
Rename-Item -Path xgboost4j.dll -NewName xgboost4j_${{ github.sha }}.dll
|
||||
dir
|
||||
python -m awscli s3 cp xgboost4j_${{ github.sha }}.dll s3://xgboost-nightly-builds/${{ steps.extract_branch.outputs.branch }}/libxgboost4j/ --acl public-read
|
||||
python -m awscli s3 cp xgboost4j_${{ github.sha }}.dll s3://xgboost-nightly-builds/${{ steps.extract_branch.outputs.branch }}/ --acl public-read
|
||||
if: |
|
||||
(github.ref == 'refs/heads/master' || contains(github.ref, 'refs/heads/release_')) &&
|
||||
matrix.os == 'windows-latest'
|
||||
@@ -66,21 +66,8 @@ jobs:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID_IAM_S3_UPLOADER }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY_IAM_S3_UPLOADER }}
|
||||
|
||||
- name: Publish artifact libxgboost4j.dylib to S3
|
||||
run: |
|
||||
cd lib/
|
||||
mv -v libxgboost4j.dylib libxgboost4j_${{ github.sha }}.dylib
|
||||
ls
|
||||
python -m awscli s3 cp libxgboost4j_${{ github.sha }}.dylib s3://xgboost-nightly-builds/${{ steps.extract_branch.outputs.branch }}/libxgboost4j/ --acl public-read
|
||||
if: |
|
||||
(github.ref == 'refs/heads/master' || contains(github.ref, 'refs/heads/release_')) &&
|
||||
matrix.os == 'macos-11'
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID_IAM_S3_UPLOADER }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY_IAM_S3_UPLOADER }}
|
||||
|
||||
|
||||
- name: Test XGBoost4J (Core, Spark, Examples)
|
||||
- name: Test XGBoost4J-Spark
|
||||
run: |
|
||||
rm -rfv build/
|
||||
cd jvm-packages
|
||||
@@ -88,13 +75,3 @@ jobs:
|
||||
if: matrix.os == 'ubuntu-latest' # Distributed training doesn't work on Windows
|
||||
env:
|
||||
RABIT_MOCK: ON
|
||||
|
||||
|
||||
- name: Build and Test XGBoost4J with scala 2.13
|
||||
run: |
|
||||
rm -rfv build/
|
||||
cd jvm-packages
|
||||
mvn -B clean install test -Pdefault,scala-2.13
|
||||
if: matrix.os == 'ubuntu-latest' # Distributed training doesn't work on Windows
|
||||
env:
|
||||
RABIT_MOCK: ON
|
||||
|
||||
22
.github/workflows/main.yml
vendored
22
.github/workflows/main.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
matrix:
|
||||
os: [macos-11]
|
||||
steps:
|
||||
- uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v2.5.0
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
- name: Install system packages
|
||||
@@ -45,7 +45,7 @@ jobs:
|
||||
matrix:
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v2.5.0
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
- name: Install system packages
|
||||
@@ -66,16 +66,13 @@ jobs:
|
||||
c-api-demo:
|
||||
name: Test installing XGBoost lib + building the C API demo
|
||||
runs-on: ${{ matrix.os }}
|
||||
defaults:
|
||||
run:
|
||||
shell: bash -l {0}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: ["ubuntu-latest"]
|
||||
python-version: ["3.8"]
|
||||
steps:
|
||||
- uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v2.5.0
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
- uses: mamba-org/provision-with-micromamba@f347426e5745fe3dfc13ec5baf20496990d0281f # v14
|
||||
@@ -85,11 +82,13 @@ jobs:
|
||||
environment-name: cpp_test
|
||||
environment-file: tests/ci_build/conda_env/cpp_test.yml
|
||||
- name: Display Conda env
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
conda info
|
||||
conda list
|
||||
|
||||
- name: Build and install XGBoost static library
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
mkdir build
|
||||
cd build
|
||||
@@ -97,6 +96,7 @@ jobs:
|
||||
ninja -v install
|
||||
cd -
|
||||
- name: Build and run C API demo with static
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
pushd .
|
||||
cd demo/c-api/
|
||||
@@ -110,12 +110,14 @@ jobs:
|
||||
popd
|
||||
|
||||
- name: Build and install XGBoost shared library
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
cd build
|
||||
cmake .. -DBUILD_STATIC_LIB=OFF -DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX -GNinja
|
||||
ninja -v install
|
||||
cd -
|
||||
- name: Build and run C API demo with shared
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
pushd .
|
||||
cd demo/c-api/
|
||||
@@ -128,14 +130,14 @@ jobs:
|
||||
./tests/ci_build/verify_link.sh ./demo/c-api/build/basic/api-demo
|
||||
./tests/ci_build/verify_link.sh ./demo/c-api/build/external-memory/external-memory-demo
|
||||
|
||||
cpp-lint:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
name: Code linting for C++
|
||||
steps:
|
||||
- uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v2.5.0
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
- uses: actions/setup-python@7f80679172b057fc5e90d70d197929d454754a5a # v4.3.0
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: "3.8"
|
||||
architecture: 'x64'
|
||||
@@ -144,7 +146,7 @@ jobs:
|
||||
python -m pip install wheel setuptools cpplint pylint
|
||||
- name: Run lint
|
||||
run: |
|
||||
python3 dmlc-core/scripts/lint.py xgboost cpp R-package/src
|
||||
LINT_LANG=cpp make lint
|
||||
|
||||
python3 dmlc-core/scripts/lint.py --exclude_path \
|
||||
python-package/xgboost/dmlc-core \
|
||||
|
||||
152
.github/workflows/python_tests.yml
vendored
152
.github/workflows/python_tests.yml
vendored
@@ -5,10 +5,6 @@ on: [push, pull_request]
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash -l {0}
|
||||
|
||||
jobs:
|
||||
python-mypy-lint:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -16,27 +12,32 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest]
|
||||
python-version: ["3.8"]
|
||||
steps:
|
||||
- uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v2.5.0
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
- uses: mamba-org/provision-with-micromamba@f347426e5745fe3dfc13ec5baf20496990d0281f # v14
|
||||
- uses: conda-incubator/setup-miniconda@v2
|
||||
with:
|
||||
cache-downloads: true
|
||||
cache-env: true
|
||||
environment-name: python_lint
|
||||
auto-update-conda: true
|
||||
python-version: ${{ matrix.python-version }}
|
||||
activate-environment: python_lint
|
||||
environment-file: tests/ci_build/conda_env/python_lint.yml
|
||||
- name: Display Conda env
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
conda info
|
||||
conda list
|
||||
- name: Run mypy
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
python tests/ci_build/lint_python.py --format=0 --type-check=1 --pylint=0
|
||||
- name: Run formatter
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
python tests/ci_build/lint_python.py --format=1 --type-check=0 --pylint=0
|
||||
- name: Run pylint
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
python tests/ci_build/lint_python.py --format=0 --type-check=0 --pylint=1
|
||||
|
||||
@@ -54,19 +55,21 @@ jobs:
|
||||
- uses: mamba-org/provision-with-micromamba@f347426e5745fe3dfc13ec5baf20496990d0281f # v14
|
||||
with:
|
||||
cache-downloads: true
|
||||
cache-env: true
|
||||
cache-env: false
|
||||
environment-name: sdist_test
|
||||
environment-file: tests/ci_build/conda_env/sdist_test.yml
|
||||
- name: Display Conda env
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
conda info
|
||||
conda list
|
||||
- name: Build and install XGBoost
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
cd python-package
|
||||
python --version
|
||||
python -m build --sdist
|
||||
pip install -v ./dist/xgboost-*.tar.gz --config-settings use_openmp=False
|
||||
python setup.py sdist
|
||||
pip install -v ./dist/xgboost-*.tar.gz
|
||||
cd ..
|
||||
python -c 'import xgboost'
|
||||
|
||||
@@ -80,7 +83,7 @@ jobs:
|
||||
os: [macos-11, windows-latest]
|
||||
python-version: ["3.8"]
|
||||
steps:
|
||||
- uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v2.5.0
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
- name: Install osx system dependencies
|
||||
@@ -92,18 +95,17 @@ jobs:
|
||||
auto-update-conda: true
|
||||
python-version: ${{ matrix.python-version }}
|
||||
activate-environment: test
|
||||
- name: Install build
|
||||
run: |
|
||||
conda install -c conda-forge python-build
|
||||
- name: Display Conda env
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
conda info
|
||||
conda list
|
||||
- name: Build and install XGBoost
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
cd python-package
|
||||
python --version
|
||||
python -m build --sdist
|
||||
python setup.py sdist
|
||||
pip install -v ./dist/xgboost-*.tar.gz
|
||||
cd ..
|
||||
python -c 'import xgboost'
|
||||
@@ -125,16 +127,18 @@ jobs:
|
||||
- uses: mamba-org/provision-with-micromamba@f347426e5745fe3dfc13ec5baf20496990d0281f # v14
|
||||
with:
|
||||
cache-downloads: true
|
||||
cache-env: true
|
||||
cache-env: false
|
||||
environment-name: macos_test
|
||||
environment-file: tests/ci_build/conda_env/macos_cpu_test.yml
|
||||
|
||||
- name: Display Conda env
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
conda info
|
||||
conda list
|
||||
|
||||
- name: Build XGBoost on macos
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
brew install ninja
|
||||
|
||||
@@ -147,34 +151,31 @@ jobs:
|
||||
ninja
|
||||
|
||||
- name: Install Python package
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
cd python-package
|
||||
python --version
|
||||
pip install -v .
|
||||
python setup.py install
|
||||
|
||||
- name: Test Python package
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
pytest -s -v -rxXs --durations=0 ./tests/python
|
||||
|
||||
- name: Test Dask Interface
|
||||
run: |
|
||||
pytest -s -v -rxXs --durations=0 ./tests/test_distributed/test_with_dask
|
||||
|
||||
python-tests-on-win:
|
||||
name: Test XGBoost Python package on ${{ matrix.config.os }}
|
||||
runs-on: ${{ matrix.config.os }}
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
matrix:
|
||||
config:
|
||||
- {os: windows-latest, python-version: '3.8'}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v2.5.0
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
|
||||
- uses: conda-incubator/setup-miniconda@35d1405e78aa3f784fe3ce9a2eb378d5eeb62169 # v2.1.1
|
||||
- uses: conda-incubator/setup-miniconda@v2
|
||||
with:
|
||||
auto-update-conda: true
|
||||
python-version: ${{ matrix.config.python-version }}
|
||||
@@ -182,11 +183,13 @@ jobs:
|
||||
environment-file: tests/ci_build/conda_env/win64_cpu_test.yml
|
||||
|
||||
- name: Display Conda env
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
conda info
|
||||
conda list
|
||||
|
||||
- name: Build XGBoost on Windows
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
mkdir build_msvc
|
||||
cd build_msvc
|
||||
@@ -194,105 +197,14 @@ jobs:
|
||||
cmake --build . --config Release --parallel $(nproc)
|
||||
|
||||
- name: Install Python package
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
cd python-package
|
||||
python --version
|
||||
pip wheel -v . --wheel-dir dist/
|
||||
python setup.py bdist_wheel --universal
|
||||
pip install ./dist/*.whl
|
||||
|
||||
- name: Test Python package
|
||||
run: |
|
||||
pytest -s -v -rxXs --durations=0 ./tests/python
|
||||
|
||||
python-tests-on-ubuntu:
|
||||
name: Test XGBoost Python package on ${{ matrix.config.os }}
|
||||
runs-on: ${{ matrix.config.os }}
|
||||
timeout-minutes: 90
|
||||
strategy:
|
||||
matrix:
|
||||
config:
|
||||
- {os: ubuntu-latest, python-version: "3.8"}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
|
||||
- uses: mamba-org/provision-with-micromamba@f347426e5745fe3dfc13ec5baf20496990d0281f # v14
|
||||
with:
|
||||
cache-downloads: true
|
||||
cache-env: true
|
||||
environment-name: linux_cpu_test
|
||||
environment-file: tests/ci_build/conda_env/linux_cpu_test.yml
|
||||
|
||||
- name: Display Conda env
|
||||
run: |
|
||||
conda info
|
||||
conda list
|
||||
|
||||
- name: Build XGBoost on Ubuntu
|
||||
run: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -GNinja -DCMAKE_PREFIX_PATH=$CONDA_PREFIX
|
||||
ninja
|
||||
|
||||
- name: Install Python package
|
||||
run: |
|
||||
cd python-package
|
||||
python --version
|
||||
pip install -v .
|
||||
|
||||
- name: Test Python package
|
||||
run: |
|
||||
pytest -s -v -rxXs --durations=0 ./tests/python
|
||||
|
||||
- name: Test Dask Interface
|
||||
run: |
|
||||
pytest -s -v -rxXs --durations=0 ./tests/test_distributed/test_with_dask
|
||||
|
||||
- name: Test PySpark Interface
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
pytest -s -v -rxXs --durations=0 ./tests/test_distributed/test_with_spark
|
||||
|
||||
python-system-installation-on-ubuntu:
|
||||
name: Test XGBoost Python package System Installation on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
|
||||
- name: Set up Python 3.8
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.8
|
||||
|
||||
- name: Install ninja
|
||||
run: |
|
||||
sudo apt-get update && sudo apt-get install -y ninja-build
|
||||
|
||||
- name: Build XGBoost on Ubuntu
|
||||
run: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -GNinja
|
||||
ninja
|
||||
|
||||
- name: Copy lib to system lib
|
||||
run: |
|
||||
cp lib/* "$(python -c 'import sys; print(sys.base_prefix)')/lib"
|
||||
|
||||
- name: Install XGBoost in Virtual Environment
|
||||
run: |
|
||||
cd python-package
|
||||
pip install virtualenv
|
||||
virtualenv venv
|
||||
source venv/bin/activate && \
|
||||
pip install -v . --config-settings use_system_libxgboost=True && \
|
||||
python -c 'import xgboost'
|
||||
pytest -s -v -rxXs --durations=0 ./tests/python
|
||||
|
||||
4
.github/workflows/python_wheels.yml
vendored
4
.github/workflows/python_wheels.yml
vendored
@@ -17,11 +17,11 @@ jobs:
|
||||
- os: macos-latest
|
||||
platform_id: macosx_arm64
|
||||
steps:
|
||||
- uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v2.5.0
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@7f80679172b057fc5e90d70d197929d454754a5a # v4.3.0
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: "3.8"
|
||||
- name: Build wheels
|
||||
|
||||
21
.github/workflows/r_nold.yml
vendored
21
.github/workflows/r_nold.yml
vendored
@@ -1,4 +1,4 @@
|
||||
# Run expensive R tests with the help of rhub. Only triggered by a pull request review
|
||||
# Run R tests with noLD R. Only triggered by a pull request review
|
||||
# See discussion at https://github.com/dmlc/xgboost/pull/6378
|
||||
|
||||
name: XGBoost-R-noLD
|
||||
@@ -7,6 +7,9 @@ on:
|
||||
pull_request_review_comment:
|
||||
types: [created]
|
||||
|
||||
env:
|
||||
R_PACKAGES: c('XML', 'igraph', 'data.table', 'ggplot2', 'DiagrammeR', 'Ckmeans.1d.dp', 'vcd', 'testthat', 'lintr', 'knitr', 'rmarkdown', 'e1071', 'cplm', 'devtools', 'float', 'titanic')
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
@@ -15,22 +18,26 @@ jobs:
|
||||
if: github.event.comment.body == '/gha run r-nold-test' && contains('OWNER,MEMBER,COLLABORATOR', github.event.comment.author_association)
|
||||
timeout-minutes: 120
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: rhub/debian-gcc-devel-nold
|
||||
container: rhub/debian-gcc-devel-nold
|
||||
steps:
|
||||
- name: Install git and system packages
|
||||
shell: bash
|
||||
run: |
|
||||
apt update && apt install libcurl4-openssl-dev libssl-dev libssh2-1-dev libgit2-dev libglpk-dev libxml2-dev libharfbuzz-dev libfribidi-dev git -y
|
||||
apt-get update && apt-get install -y git libcurl4-openssl-dev libssl-dev libssh2-1-dev libgit2-dev libxml2-dev
|
||||
|
||||
- uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v2.5.0
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash -l {0}
|
||||
shell: bash
|
||||
run: |
|
||||
/tmp/R-devel/bin/Rscript -e "source('./R-package/tests/helper_scripts/install_deps.R')"
|
||||
cat > install_libs.R <<EOT
|
||||
install.packages(${{ env.R_PACKAGES }},
|
||||
repos = 'http://cloud.r-project.org',
|
||||
dependencies = c('Depends', 'Imports', 'LinkingTo'))
|
||||
EOT
|
||||
/tmp/R-devel/bin/Rscript install_libs.R
|
||||
|
||||
- name: Run R tests
|
||||
shell: bash
|
||||
|
||||
153
.github/workflows/r_tests.yml
vendored
153
.github/workflows/r_tests.yml
vendored
@@ -3,7 +3,9 @@ name: XGBoost-R-Tests
|
||||
on: [push, pull_request]
|
||||
|
||||
env:
|
||||
R_PACKAGES: c('XML', 'data.table', 'ggplot2', 'DiagrammeR', 'Ckmeans.1d.dp', 'vcd', 'testthat', 'lintr', 'knitr', 'rmarkdown', 'e1071', 'cplm', 'devtools', 'float', 'titanic')
|
||||
GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }}
|
||||
_R_CHECK_EXAMPLE_TIMING_CPU_TO_ELAPSED_THRESHOLD_: 2.5
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
@@ -21,32 +23,41 @@ jobs:
|
||||
RSPM: ${{ matrix.config.rspm }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v2.5.0
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
|
||||
- uses: r-lib/actions/setup-r@11a22a908006c25fe054c4ef0ac0436b1de3edbe # v2.6.4
|
||||
- uses: r-lib/actions/setup-r@v2
|
||||
with:
|
||||
r-version: ${{ matrix.config.r }}
|
||||
|
||||
- name: Cache R packages
|
||||
uses: actions/cache@937d24475381cd9c75ae6db12cb4e79714b926ed # v3.0.11
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ${{ env.R_LIBS_USER }}
|
||||
key: ${{ runner.os }}-r-${{ matrix.config.r }}-6-${{ hashFiles('R-package/DESCRIPTION') }}
|
||||
restore-keys: ${{ runner.os }}-r-${{ matrix.config.r }}-6-${{ hashFiles('R-package/DESCRIPTION') }}
|
||||
key: ${{ runner.os }}-r-${{ matrix.config.r }}-5-${{ hashFiles('R-package/DESCRIPTION') }}
|
||||
restore-keys: ${{ runner.os }}-r-${{ matrix.config.r }}-5-${{ hashFiles('R-package/DESCRIPTION') }}
|
||||
|
||||
- name: Install dependencies
|
||||
shell: Rscript {0}
|
||||
run: |
|
||||
source("./R-package/tests/helper_scripts/install_deps.R")
|
||||
install.packages(${{ env.R_PACKAGES }},
|
||||
repos = 'http://cloud.r-project.org',
|
||||
dependencies = c('Depends', 'Imports', 'LinkingTo'))
|
||||
- name: Install igraph on Windows
|
||||
shell: Rscript {0}
|
||||
if: matrix.config.os == 'windows-latest'
|
||||
run: |
|
||||
install.packages('igraph', type='binary')
|
||||
|
||||
- name: Run lintr
|
||||
run: |
|
||||
MAKEFLAGS="-j$(nproc)" R CMD INSTALL R-package/
|
||||
Rscript tests/ci_build/lint_r.R $(pwd)
|
||||
cd R-package
|
||||
R CMD INSTALL .
|
||||
# Disable lintr errors for now: https://github.com/dmlc/xgboost/issues/8012
|
||||
Rscript tests/helper_scripts/run_lint.R || true
|
||||
|
||||
test-R-on-Windows:
|
||||
test-with-R:
|
||||
runs-on: ${{ matrix.config.os }}
|
||||
name: Test R on OS ${{ matrix.config.os }}, R ${{ matrix.config.r }}, Compiler ${{ matrix.config.compiler }}, Build ${{ matrix.config.build }}
|
||||
strategy:
|
||||
@@ -54,82 +65,100 @@ jobs:
|
||||
matrix:
|
||||
config:
|
||||
- {os: windows-latest, r: 'release', compiler: 'mingw', build: 'autotools'}
|
||||
- {os: windows-latest, r: '4.2.0', compiler: 'msvc', build: 'cmake'}
|
||||
- {os: windows-latest, r: 'release', compiler: 'msvc', build: 'cmake'}
|
||||
- {os: windows-latest, r: 'release', compiler: 'mingw', build: 'cmake'}
|
||||
env:
|
||||
R_REMOTES_NO_ERRORS_FROM_WARNINGS: true
|
||||
_R_CHECK_EXAMPLE_TIMING_CPU_TO_ELAPSED_THRESHOLD_: 2.5
|
||||
RSPM: ${{ matrix.config.rspm }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v2.5.0
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
|
||||
- uses: r-lib/actions/setup-r@11a22a908006c25fe054c4ef0ac0436b1de3edbe # v2.6.4
|
||||
- uses: r-lib/actions/setup-r@v2
|
||||
with:
|
||||
r-version: ${{ matrix.config.r }}
|
||||
|
||||
- name: Cache R packages
|
||||
uses: actions/cache@937d24475381cd9c75ae6db12cb4e79714b926ed # v3.0.11
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ${{ env.R_LIBS_USER }}
|
||||
key: ${{ runner.os }}-r-${{ matrix.config.r }}-6-${{ hashFiles('R-package/DESCRIPTION') }}
|
||||
restore-keys: ${{ runner.os }}-r-${{ matrix.config.r }}-6-${{ hashFiles('R-package/DESCRIPTION') }}
|
||||
key: ${{ runner.os }}-r-${{ matrix.config.r }}-5-${{ hashFiles('R-package/DESCRIPTION') }}
|
||||
restore-keys: ${{ runner.os }}-r-${{ matrix.config.r }}-5-${{ hashFiles('R-package/DESCRIPTION') }}
|
||||
|
||||
- uses: actions/setup-python@7f80679172b057fc5e90d70d197929d454754a5a # v4.3.0
|
||||
- name: Install dependencies
|
||||
shell: Rscript {0}
|
||||
if: matrix.config.os != 'windows-latest'
|
||||
run: |
|
||||
install.packages(${{ env.R_PACKAGES }},
|
||||
repos = 'http://cloud.r-project.org',
|
||||
dependencies = c('Depends', 'Imports', 'LinkingTo'))
|
||||
|
||||
- name: Install binary dependencies
|
||||
shell: Rscript {0}
|
||||
if: matrix.config.os == 'windows-latest'
|
||||
run: |
|
||||
install.packages(${{ env.R_PACKAGES }},
|
||||
type = 'binary',
|
||||
repos = 'http://cloud.r-project.org',
|
||||
dependencies = c('Depends', 'Imports', 'LinkingTo'))
|
||||
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: "3.8"
|
||||
architecture: 'x64'
|
||||
|
||||
- name: Test R
|
||||
run: |
|
||||
python tests/ci_build/test_r_package.py --compiler='${{ matrix.config.compiler }}' --build-tool='${{ matrix.config.build }}'
|
||||
|
||||
test-R-CRAN:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
- {r: 'release'}
|
||||
|
||||
env:
|
||||
_R_CHECK_EXAMPLE_TIMING_CPU_TO_ELAPSED_THRESHOLD_: 2.5
|
||||
MAKE: "make -j$(nproc)"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
|
||||
- uses: r-lib/actions/setup-r@v2
|
||||
with:
|
||||
r-version: ${{ matrix.config.r }}
|
||||
|
||||
- uses: r-lib/actions/setup-tinytex@v2
|
||||
|
||||
- name: Install system packages
|
||||
run: |
|
||||
sudo apt-get update && sudo apt-get install libcurl4-openssl-dev libssl-dev libssh2-1-dev libgit2-dev pandoc pandoc-citeproc libglpk-dev
|
||||
|
||||
- name: Cache R packages
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ${{ env.R_LIBS_USER }}
|
||||
key: ${{ runner.os }}-r-${{ matrix.config.r }}-5-${{ hashFiles('R-package/DESCRIPTION') }}
|
||||
restore-keys: ${{ runner.os }}-r-${{ matrix.config.r }}-5-${{ hashFiles('R-package/DESCRIPTION') }}
|
||||
|
||||
- name: Install dependencies
|
||||
shell: Rscript {0}
|
||||
run: |
|
||||
source("./R-package/tests/helper_scripts/install_deps.R")
|
||||
install.packages(${{ env.R_PACKAGES }},
|
||||
repos = 'http://cloud.r-project.org',
|
||||
dependencies = c('Depends', 'Imports', 'LinkingTo'))
|
||||
install.packages('igraph', repos = 'http://cloud.r-project.org', dependencies = c('Depends', 'Imports', 'LinkingTo'))
|
||||
|
||||
- name: Test R
|
||||
- name: Check R Package
|
||||
run: |
|
||||
python tests/ci_build/test_r_package.py --compiler='${{ matrix.config.compiler }}' --build-tool="${{ matrix.config.build }}" --task=check
|
||||
|
||||
test-R-on-Debian:
|
||||
name: Test R package on Debian
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: rhub/debian-gcc-devel
|
||||
|
||||
steps:
|
||||
- name: Install system dependencies
|
||||
run: |
|
||||
# Must run before checkout to have the latest git installed.
|
||||
# No need to add pandoc, the container has it figured out.
|
||||
apt update && apt install libcurl4-openssl-dev libssl-dev libssh2-1-dev libgit2-dev libglpk-dev libxml2-dev libharfbuzz-dev libfribidi-dev git -y
|
||||
|
||||
- name: Trust git cloning project sources
|
||||
run: |
|
||||
git config --global --add safe.directory "${GITHUB_WORKSPACE}"
|
||||
|
||||
- uses: actions/checkout@e2f20e631ae6d7dd3b768f56a5d2af784dd54791 # v2.5.0
|
||||
with:
|
||||
submodules: 'true'
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
/tmp/R-devel/bin/Rscript -e "source('./R-package/tests/helper_scripts/install_deps.R')"
|
||||
|
||||
- name: Test R
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
python3 tests/ci_build/test_r_package.py --r=/tmp/R-devel/bin/R --build-tool=autotools --task=check
|
||||
|
||||
- uses: dorny/paths-filter@v2
|
||||
id: changes
|
||||
with:
|
||||
filters: |
|
||||
r_package:
|
||||
- 'R-package/**'
|
||||
|
||||
- name: Run document check
|
||||
if: steps.changes.outputs.r_package == 'true'
|
||||
run: |
|
||||
python3 tests/ci_build/test_r_package.py --r=/tmp/R-devel/bin/R --task=doc
|
||||
# Print stacktrace upon success of failure
|
||||
make Rcheck || tests/ci_build/print_r_stacktrace.sh fail
|
||||
tests/ci_build/print_r_stacktrace.sh success
|
||||
|
||||
10
.github/workflows/scorecards.yml
vendored
10
.github/workflows/scorecards.yml
vendored
@@ -27,21 +27,21 @@ jobs:
|
||||
persist-credentials: false
|
||||
|
||||
- name: "Run analysis"
|
||||
uses: ossf/scorecard-action@08b4669551908b1024bb425080c797723083c031 # tag=v2.2.0
|
||||
uses: ossf/scorecard-action@865b4092859256271290c77adbd10a43f4779972 # tag=v2.0.3
|
||||
with:
|
||||
results_file: results.sarif
|
||||
results_format: sarif
|
||||
|
||||
# Publish the results for public repositories to enable scorecard badges. For more details, see
|
||||
# https://github.com/ossf/scorecard-action#publishing-results.
|
||||
# For private repositories, `publish_results` will automatically be set to `false`, regardless
|
||||
# https://github.com/ossf/scorecard-action#publishing-results.
|
||||
# For private repositories, `publish_results` will automatically be set to `false`, regardless
|
||||
# of the value entered here.
|
||||
publish_results: true
|
||||
|
||||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||
# format to the repository Actions tab.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # tag=v3.1.2
|
||||
uses: actions/upload-artifact@6673cd052c4cd6fcf4b4e6e60ea986c889389535 # tag=v3.0.0
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
@@ -49,6 +49,6 @@ jobs:
|
||||
|
||||
# Upload the results to GitHub's code scanning dashboard.
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@7b6664fa89524ee6e3c3e9749402d5afd69b3cd8 # tag=v2.14.1
|
||||
uses: github/codeql-action/upload-sarif@5f532563584d71fdef14ee64d17bafb34f751ce5 # tag=v1.0.26
|
||||
with:
|
||||
sarif_file: results.sarif
|
||||
|
||||
44
.github/workflows/update_rapids.yml
vendored
44
.github/workflows/update_rapids.yml
vendored
@@ -1,44 +0,0 @@
|
||||
name: update-rapids
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: "0 20 * * *" # Run once daily
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: write
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash -l {0}
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # To use GitHub CLI
|
||||
|
||||
jobs:
|
||||
update-rapids:
|
||||
name: Check latest RAPIDS
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'true'
|
||||
- name: Check latest RAPIDS and update conftest.sh
|
||||
run: |
|
||||
bash tests/buildkite/update-rapids.sh
|
||||
- name: Create Pull Request
|
||||
uses: peter-evans/create-pull-request@v5
|
||||
if: github.ref == 'refs/heads/master'
|
||||
with:
|
||||
add-paths: |
|
||||
tests/buildkite
|
||||
branch: create-pull-request/update-rapids
|
||||
base: master
|
||||
title: "[CI] Update RAPIDS to latest stable"
|
||||
commit-message: "[CI] Update RAPIDS to latest stable"
|
||||
|
||||
15
.gitignore
vendored
15
.gitignore
vendored
@@ -48,7 +48,6 @@ Debug
|
||||
*.Rproj
|
||||
./xgboost.mpi
|
||||
./xgboost.mock
|
||||
*.bak
|
||||
#.Rbuildignore
|
||||
R-package.Rproj
|
||||
*.cache*
|
||||
@@ -138,15 +137,5 @@ credentials.csv
|
||||
.metals
|
||||
.bloop
|
||||
|
||||
# python tests
|
||||
demo/**/*.txt
|
||||
*.dmatrix
|
||||
.hypothesis
|
||||
__MACOSX/
|
||||
model*.json
|
||||
|
||||
# R tests
|
||||
*.libsvm
|
||||
*.rds
|
||||
Rplots.pdf
|
||||
*.zip
|
||||
# hypothesis python tests
|
||||
.hypothesis
|
||||
6
.gitmodules
vendored
6
.gitmodules
vendored
@@ -2,9 +2,9 @@
|
||||
path = dmlc-core
|
||||
url = https://github.com/dmlc/dmlc-core
|
||||
branch = main
|
||||
[submodule "cub"]
|
||||
path = cub
|
||||
url = https://github.com/NVlabs/cub
|
||||
[submodule "gputreeshap"]
|
||||
path = gputreeshap
|
||||
url = https://github.com/rapidsai/gputreeshap.git
|
||||
[submodule "rocgputreeshap"]
|
||||
path = rocgputreeshap
|
||||
url = https://github.com/ROCmSoftwarePlatform/rocgputreeshap
|
||||
|
||||
@@ -32,3 +32,4 @@ formats:
|
||||
python:
|
||||
install:
|
||||
- requirements: doc/requirements.txt
|
||||
system_packages: true
|
||||
|
||||
53
.travis.yml
Normal file
53
.travis.yml
Normal file
@@ -0,0 +1,53 @@
|
||||
sudo: required
|
||||
|
||||
dist: bionic
|
||||
|
||||
env:
|
||||
global:
|
||||
- secure: "lqkL5SCM/CBwgVb1GWoOngpojsa0zCSGcvF0O3/45rBT1EpNYtQ4LRJ1+XcHi126vdfGoim/8i7AQhn5eOgmZI8yAPBeoUZ5zSrejD3RUpXr2rXocsvRRP25Z4mIuAGHD9VAHtvTdhBZRVV818W02pYduSzAeaY61q/lU3xmWsE="
|
||||
- secure: "mzms6X8uvdhRWxkPBMwx+mDl3d+V1kUpZa7UgjT+dr4rvZMzvKtjKp/O0JZZVogdgZjUZf444B98/7AvWdSkGdkfz2QdmhWmXzNPfNuHtmfCYMdijsgFIGLuD3GviFL/rBiM2vgn32T3QqFiEJiC5StparnnXimPTc9TpXQRq5c="
|
||||
|
||||
|
||||
jobs:
|
||||
include:
|
||||
- os: linux
|
||||
arch: s390x
|
||||
env: TASK=s390x_test
|
||||
|
||||
# dependent brew packages
|
||||
# the dependencies from homebrew is installed manually from setup script due to outdated image from travis.
|
||||
addons:
|
||||
homebrew:
|
||||
update: false
|
||||
apt:
|
||||
packages:
|
||||
- unzip
|
||||
|
||||
before_install:
|
||||
- source tests/travis/travis_setup_env.sh
|
||||
|
||||
install:
|
||||
- source tests/travis/setup.sh
|
||||
|
||||
script:
|
||||
- tests/travis/run_test.sh
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- ${HOME}/.cache/usr
|
||||
- ${HOME}/.cache/pip
|
||||
|
||||
before_cache:
|
||||
- tests/travis/travis_before_cache.sh
|
||||
|
||||
after_failure:
|
||||
- tests/travis/travis_after_failure.sh
|
||||
|
||||
after_success:
|
||||
- tree build
|
||||
- bash <(curl -s https://codecov.io/bash) -a '-o src/ src/*.c'
|
||||
|
||||
notifications:
|
||||
email:
|
||||
on_success: change
|
||||
on_failure: always
|
||||
1
CITATION
1
CITATION
@@ -15,3 +15,4 @@
|
||||
address = {New York, NY, USA},
|
||||
keywords = {large-scale machine learning},
|
||||
}
|
||||
|
||||
|
||||
101
CMakeLists.txt
101
CMakeLists.txt
@@ -1,5 +1,5 @@
|
||||
cmake_minimum_required(VERSION 3.18 FATAL_ERROR)
|
||||
project(xgboost LANGUAGES CXX C VERSION 2.0.1)
|
||||
project(xgboost LANGUAGES CXX C VERSION 1.7.6)
|
||||
include(cmake/Utils.cmake)
|
||||
list(APPEND CMAKE_MODULE_PATH "${xgboost_SOURCE_DIR}/cmake/modules")
|
||||
cmake_policy(SET CMP0022 NEW)
|
||||
@@ -14,24 +14,8 @@ endif ((${CMAKE_VERSION} VERSION_GREATER 3.13) OR (${CMAKE_VERSION} VERSION_EQUA
|
||||
|
||||
message(STATUS "CMake version ${CMAKE_VERSION}")
|
||||
|
||||
# Check compiler versions
|
||||
# Use recent compilers to ensure that std::filesystem is available
|
||||
if(MSVC)
|
||||
if(MSVC_VERSION LESS 1920)
|
||||
message(FATAL_ERROR "Need Visual Studio 2019 or newer to build XGBoost")
|
||||
endif()
|
||||
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "8.1")
|
||||
message(FATAL_ERROR "Need GCC 8.1 or newer to build XGBoost")
|
||||
endif()
|
||||
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")
|
||||
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "11.0")
|
||||
message(FATAL_ERROR "Need Xcode 11.0 (AppleClang 11.0) or newer to build XGBoost")
|
||||
endif()
|
||||
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
||||
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "9.0")
|
||||
message(FATAL_ERROR "Need Clang 9.0 or newer to build XGBoost")
|
||||
endif()
|
||||
if (CMAKE_COMPILER_IS_GNUCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.0)
|
||||
message(FATAL_ERROR "GCC version must be at least 5.0!")
|
||||
endif()
|
||||
|
||||
include(${xgboost_SOURCE_DIR}/cmake/FindPrefetchIntrinsics.cmake)
|
||||
@@ -58,23 +42,18 @@ option(ENABLE_ALL_WARNINGS "Enable all compiler warnings. Only effective for GCC
|
||||
option(LOG_CAPI_INVOCATION "Log all C API invocations for debugging" OFF)
|
||||
option(GOOGLE_TEST "Build google tests" OFF)
|
||||
option(USE_DMLC_GTEST "Use google tests bundled with dmlc-core submodule" OFF)
|
||||
option(USE_DEVICE_DEBUG "Generate CUDA/HIP device debug info." OFF)
|
||||
option(USE_DEVICE_DEBUG "Generate CUDA device debug info." OFF)
|
||||
option(USE_NVTX "Build with cuda profiling annotations. Developers only." OFF)
|
||||
set(NVTX_HEADER_DIR "" CACHE PATH "Path to the stand-alone nvtx header")
|
||||
option(RABIT_MOCK "Build rabit with mock" OFF)
|
||||
option(HIDE_CXX_SYMBOLS "Build shared library and hide all C++ symbols" OFF)
|
||||
option(KEEP_BUILD_ARTIFACTS_IN_BINARY_DIR "Output build artifacts in CMake binary dir" OFF)
|
||||
## CUDA
|
||||
option(USE_CUDA "Build with GPU acceleration" OFF)
|
||||
option(USE_PER_THREAD_DEFAULT_STREAM "Build with per-thread default stream" ON)
|
||||
option(USE_NCCL "Build with NCCL to enable distributed GPU support." OFF)
|
||||
option(BUILD_WITH_SHARED_NCCL "Build with shared NCCL library." OFF)
|
||||
option(BUILD_WITH_CUDA_CUB "Build with cub in CUDA installation" OFF)
|
||||
set(GPU_COMPUTE_VER "" CACHE STRING
|
||||
"Semicolon separated list of compute versions to be built against, e.g. '35;61'")
|
||||
## HIP
|
||||
option(USE_HIP "Build with GPU acceleration" OFF)
|
||||
option(USE_RCCL "Build with RCCL to enable distributed GPU support." OFF)
|
||||
option(BUILD_WITH_SHARED_RCCL "Build with shared RCCL library." OFF)
|
||||
## Copied From dmlc
|
||||
option(USE_HDFS "Build with HDFS support" OFF)
|
||||
option(USE_AZURE "Build with AZURE support" OFF)
|
||||
@@ -97,7 +76,6 @@ option(ADD_PKGCONFIG "Add xgboost.pc into system." ON)
|
||||
if (USE_DEBUG_OUTPUT AND (NOT (CMAKE_BUILD_TYPE MATCHES Debug)))
|
||||
message(SEND_ERROR "Do not enable `USE_DEBUG_OUTPUT' with release build.")
|
||||
endif (USE_DEBUG_OUTPUT AND (NOT (CMAKE_BUILD_TYPE MATCHES Debug)))
|
||||
|
||||
if (USE_NCCL AND NOT (USE_CUDA))
|
||||
message(SEND_ERROR "`USE_NCCL` must be enabled with `USE_CUDA` flag.")
|
||||
endif (USE_NCCL AND NOT (USE_CUDA))
|
||||
@@ -107,17 +85,6 @@ endif (USE_DEVICE_DEBUG AND NOT (USE_CUDA))
|
||||
if (BUILD_WITH_SHARED_NCCL AND (NOT USE_NCCL))
|
||||
message(SEND_ERROR "Build XGBoost with -DUSE_NCCL=ON to enable BUILD_WITH_SHARED_NCCL.")
|
||||
endif (BUILD_WITH_SHARED_NCCL AND (NOT USE_NCCL))
|
||||
|
||||
if (USE_RCCL AND NOT (USE_HIP))
|
||||
message(SEND_ERROR "`USE_RCCL` must be enabled with `USE_HIP` flag.")
|
||||
endif (USE_RCCL AND NOT (USE_HIP))
|
||||
if (USE_DEVICE_DEBUG AND NOT (USE_HIP))
|
||||
message(SEND_ERROR "`USE_DEVICE_DEBUG` must be enabled with `USE_HIP` flag.")
|
||||
endif (USE_DEVICE_DEBUG AND NOT (USE_HIP))
|
||||
if (BUILD_WITH_SHARED_RCCL AND (NOT USE_RCCL))
|
||||
message(SEND_ERROR "Build XGBoost with -DUSE_RCCL=ON to enable BUILD_WITH_SHARED_RCCL.")
|
||||
endif (BUILD_WITH_SHARED_RCCL AND (NOT USE_RCCL))
|
||||
|
||||
if (JVM_BINDINGS AND R_LIB)
|
||||
message(SEND_ERROR "`R_LIB' is not compatible with `JVM_BINDINGS' as they both have customized configurations.")
|
||||
endif (JVM_BINDINGS AND R_LIB)
|
||||
@@ -131,15 +98,9 @@ endif (USE_AVX)
|
||||
if (PLUGIN_LZ4)
|
||||
message(SEND_ERROR "The option 'PLUGIN_LZ4' is removed from XGBoost.")
|
||||
endif (PLUGIN_LZ4)
|
||||
|
||||
if (PLUGIN_RMM AND NOT (USE_CUDA))
|
||||
message(SEND_ERROR "`PLUGIN_RMM` must be enabled with `USE_CUDA` flag.")
|
||||
endif (PLUGIN_RMM AND NOT (USE_CUDA))
|
||||
|
||||
if (PLUGIN_RMM AND NOT (USE_HIP))
|
||||
message(SEND_ERROR "`PLUGIN_RMM` must be enabled with `USE_HIP` flag.")
|
||||
endif (PLUGIN_RMM AND NOT (USE_HIP))
|
||||
|
||||
if (PLUGIN_RMM AND NOT ((CMAKE_CXX_COMPILER_ID STREQUAL "Clang") OR (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")))
|
||||
message(SEND_ERROR "`PLUGIN_RMM` must be used with GCC or Clang compiler.")
|
||||
endif (PLUGIN_RMM AND NOT ((CMAKE_CXX_COMPILER_ID STREQUAL "Clang") OR (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")))
|
||||
@@ -154,6 +115,9 @@ endif (ENABLE_ALL_WARNINGS)
|
||||
if (BUILD_STATIC_LIB AND (R_LIB OR JVM_BINDINGS))
|
||||
message(SEND_ERROR "Cannot build a static library libxgboost.a when R or JVM packages are enabled.")
|
||||
endif (BUILD_STATIC_LIB AND (R_LIB OR JVM_BINDINGS))
|
||||
if (PLUGIN_RMM AND (NOT BUILD_WITH_CUDA_CUB))
|
||||
message(SEND_ERROR "Cannot build with RMM using cub submodule.")
|
||||
endif (PLUGIN_RMM AND (NOT BUILD_WITH_CUDA_CUB))
|
||||
if (PLUGIN_FEDERATED)
|
||||
if (CMAKE_CROSSCOMPILING)
|
||||
message(SEND_ERROR "Cannot cross compile with federated learning support")
|
||||
@@ -189,27 +153,11 @@ if (USE_CUDA)
|
||||
format_gencode_flags("${GPU_COMPUTE_VER}" GEN_CODE)
|
||||
add_subdirectory(${PROJECT_SOURCE_DIR}/gputreeshap)
|
||||
|
||||
find_package(CUDAToolkit REQUIRED)
|
||||
if ((${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 11.4) AND (NOT BUILD_WITH_CUDA_CUB))
|
||||
set(BUILD_WITH_CUDA_CUB ON)
|
||||
endif ()
|
||||
endif (USE_CUDA)
|
||||
|
||||
if (USE_HIP)
|
||||
set(USE_OPENMP ON CACHE BOOL "HIP requires OpenMP" FORCE)
|
||||
# `export CXX=' is ignored by CMake HIP.
|
||||
set(CMAKE_HIP_HOST_COMPILER ${CMAKE_CXX_COMPILER})
|
||||
message(STATUS "Configured HIP host compiler: ${CMAKE_HIP_HOST_COMPILER}")
|
||||
|
||||
enable_language(HIP)
|
||||
find_package(hip REQUIRED)
|
||||
find_package(rocthrust REQUIRED)
|
||||
find_package(hipcub REQUIRED)
|
||||
|
||||
set(CMAKE_HIP_FLAGS "${CMAKE_HIP_FLAGS} -I${HIP_INCLUDE_DIRS} -I${HIP_INCLUDE_DIRS}/hip")
|
||||
set(CMAKE_HIP_FLAGS "${CMAKE_HIP_FLAGS} -Wunused-result -w")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D__HIP_PLATFORM_AMD__")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -I${HIP_INCLUDE_DIRS}")
|
||||
add_subdirectory(${PROJECT_SOURCE_DIR}/rocgputreeshap)
|
||||
endif (USE_HIP)
|
||||
|
||||
if (FORCE_COLORED_OUTPUT AND (CMAKE_GENERATOR STREQUAL "Ninja") AND
|
||||
((CMAKE_CXX_COMPILER_ID STREQUAL "GNU") OR
|
||||
(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")))
|
||||
@@ -249,10 +197,6 @@ if (USE_NCCL)
|
||||
find_package(Nccl REQUIRED)
|
||||
endif (USE_NCCL)
|
||||
|
||||
if (USE_RCCL)
|
||||
find_package(rccl REQUIRED)
|
||||
endif (USE_RCCL)
|
||||
|
||||
# dmlc-core
|
||||
msvc_use_static_runtime()
|
||||
if (FORCE_SHARED_CRT)
|
||||
@@ -277,11 +221,6 @@ endif (RABIT_BUILD_MPI)
|
||||
add_subdirectory(${xgboost_SOURCE_DIR}/src)
|
||||
target_link_libraries(objxgboost PUBLIC dmlc)
|
||||
|
||||
# Link -lstdc++fs for GCC 8.x
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS "9.0")
|
||||
target_link_libraries(objxgboost PUBLIC stdc++fs)
|
||||
endif()
|
||||
|
||||
# Exports some R specific definitions and objects
|
||||
if (R_LIB)
|
||||
add_subdirectory(${xgboost_SOURCE_DIR}/R-package)
|
||||
@@ -297,15 +236,6 @@ add_subdirectory(${xgboost_SOURCE_DIR}/plugin)
|
||||
|
||||
if (PLUGIN_RMM)
|
||||
find_package(rmm REQUIRED)
|
||||
|
||||
# Patch the rmm targets so they reference the static cudart
|
||||
# Remove this patch once RMM stops specifying cudart requirement
|
||||
# (since RMM is a header-only library, it should not specify cudart in its CMake config)
|
||||
get_target_property(rmm_link_libs rmm::rmm INTERFACE_LINK_LIBRARIES)
|
||||
list(REMOVE_ITEM rmm_link_libs CUDA::cudart)
|
||||
list(APPEND rmm_link_libs CUDA::cudart_static)
|
||||
set_target_properties(rmm::rmm PROPERTIES INTERFACE_LINK_LIBRARIES "${rmm_link_libs}")
|
||||
get_target_property(rmm_link_libs rmm::rmm INTERFACE_LINK_LIBRARIES)
|
||||
endif (PLUGIN_RMM)
|
||||
|
||||
#-- library
|
||||
@@ -346,13 +276,8 @@ if (JVM_BINDINGS)
|
||||
xgboost_target_defs(xgboost4j)
|
||||
endif (JVM_BINDINGS)
|
||||
|
||||
if (KEEP_BUILD_ARTIFACTS_IN_BINARY_DIR)
|
||||
set_output_directory(runxgboost ${xgboost_BINARY_DIR})
|
||||
set_output_directory(xgboost ${xgboost_BINARY_DIR}/lib)
|
||||
else ()
|
||||
set_output_directory(runxgboost ${xgboost_SOURCE_DIR})
|
||||
set_output_directory(xgboost ${xgboost_SOURCE_DIR}/lib)
|
||||
endif ()
|
||||
set_output_directory(runxgboost ${xgboost_SOURCE_DIR})
|
||||
set_output_directory(xgboost ${xgboost_SOURCE_DIR}/lib)
|
||||
# Ensure these two targets do not build simultaneously, as they produce outputs with conflicting names
|
||||
add_dependencies(xgboost runxgboost)
|
||||
|
||||
|
||||
145
Makefile
Normal file
145
Makefile
Normal file
@@ -0,0 +1,145 @@
|
||||
ifndef DMLC_CORE
|
||||
DMLC_CORE = dmlc-core
|
||||
endif
|
||||
|
||||
ifndef RABIT
|
||||
RABIT = rabit
|
||||
endif
|
||||
|
||||
ROOTDIR = $(CURDIR)
|
||||
|
||||
# workarounds for some buggy old make & msys2 versions seen in windows
|
||||
ifeq (NA, $(shell test ! -d "$(ROOTDIR)" && echo NA ))
|
||||
$(warning Attempting to fix non-existing ROOTDIR [$(ROOTDIR)])
|
||||
ROOTDIR := $(shell pwd)
|
||||
$(warning New ROOTDIR [$(ROOTDIR)] $(shell test -d "$(ROOTDIR)" && echo " is OK" ))
|
||||
endif
|
||||
MAKE_OK := $(shell "$(MAKE)" -v 2> /dev/null)
|
||||
ifndef MAKE_OK
|
||||
$(warning Attempting to recover non-functional MAKE [$(MAKE)])
|
||||
MAKE := $(shell which make 2> /dev/null)
|
||||
MAKE_OK := $(shell "$(MAKE)" -v 2> /dev/null)
|
||||
endif
|
||||
$(warning MAKE [$(MAKE)] - $(if $(MAKE_OK),checked OK,PROBLEM))
|
||||
|
||||
include $(DMLC_CORE)/make/dmlc.mk
|
||||
|
||||
# set compiler defaults for OSX versus *nix
|
||||
# let people override either
|
||||
OS := $(shell uname)
|
||||
ifeq ($(OS), Darwin)
|
||||
ifndef CC
|
||||
export CC = $(if $(shell which clang), clang, gcc)
|
||||
endif
|
||||
ifndef CXX
|
||||
export CXX = $(if $(shell which clang++), clang++, g++)
|
||||
endif
|
||||
else
|
||||
# linux defaults
|
||||
ifndef CC
|
||||
export CC = gcc
|
||||
endif
|
||||
ifndef CXX
|
||||
export CXX = g++
|
||||
endif
|
||||
endif
|
||||
|
||||
export CFLAGS= -DDMLC_LOG_CUSTOMIZE=1 -std=c++14 -Wall -Wno-unknown-pragmas -Iinclude $(ADD_CFLAGS)
|
||||
CFLAGS += -I$(DMLC_CORE)/include -I$(RABIT)/include -I$(GTEST_PATH)/include
|
||||
|
||||
ifeq ($(TEST_COVER), 1)
|
||||
CFLAGS += -g -O0 -fprofile-arcs -ftest-coverage
|
||||
else
|
||||
CFLAGS += -O3 -funroll-loops
|
||||
endif
|
||||
|
||||
ifndef LINT_LANG
|
||||
LINT_LANG= "all"
|
||||
endif
|
||||
|
||||
# specify tensor path
|
||||
.PHONY: clean all lint clean_all doxygen rcpplint pypack Rpack Rbuild Rcheck
|
||||
|
||||
build/%.o: src/%.cc
|
||||
@mkdir -p $(@D)
|
||||
$(CXX) $(CFLAGS) -MM -MT build/$*.o $< >build/$*.d
|
||||
$(CXX) -c $(CFLAGS) $< -o $@
|
||||
|
||||
# The should be equivalent to $(ALL_OBJ) except for build/cli_main.o
|
||||
amalgamation/xgboost-all0.o: amalgamation/xgboost-all0.cc
|
||||
$(CXX) -c $(CFLAGS) $< -o $@
|
||||
|
||||
rcpplint:
|
||||
python3 dmlc-core/scripts/lint.py xgboost ${LINT_LANG} R-package/src
|
||||
|
||||
lint: rcpplint
|
||||
python3 dmlc-core/scripts/lint.py --exclude_path python-package/xgboost/dmlc-core \
|
||||
python-package/xgboost/include python-package/xgboost/lib \
|
||||
python-package/xgboost/make python-package/xgboost/rabit \
|
||||
python-package/xgboost/src --pylint-rc ${PWD}/python-package/.pylintrc xgboost \
|
||||
${LINT_LANG} include src python-package
|
||||
|
||||
ifeq ($(TEST_COVER), 1)
|
||||
cover: check
|
||||
@- $(foreach COV_OBJ, $(COVER_OBJ), \
|
||||
gcov -pbcul -o $(shell dirname $(COV_OBJ)) $(COV_OBJ) > gcov.log || cat gcov.log; \
|
||||
)
|
||||
endif
|
||||
|
||||
|
||||
clean:
|
||||
$(RM) -rf build lib bin *~ */*~ */*/*~ */*/*/*~ */*.o */*/*.o */*/*/*.o #xgboost
|
||||
$(RM) -rf build_tests *.gcov tests/cpp/xgboost_test
|
||||
if [ -d "R-package/src" ]; then \
|
||||
cd R-package/src; \
|
||||
$(RM) -rf rabit src include dmlc-core amalgamation *.so *.dll; \
|
||||
cd $(ROOTDIR); \
|
||||
fi
|
||||
|
||||
clean_all: clean
|
||||
cd $(DMLC_CORE); "$(MAKE)" clean; cd $(ROOTDIR)
|
||||
cd $(RABIT); "$(MAKE)" clean; cd $(ROOTDIR)
|
||||
|
||||
# create pip source dist (sdist) pack for PyPI
|
||||
pippack: clean_all
|
||||
cd python-package; python setup.py sdist; mv dist/*.tar.gz ..; cd ..
|
||||
|
||||
# Script to make a clean installable R package.
|
||||
Rpack: clean_all
|
||||
rm -rf xgboost xgboost*.tar.gz
|
||||
cp -r R-package xgboost
|
||||
rm -rf xgboost/src/*.o xgboost/src/*.so xgboost/src/*.dll
|
||||
rm -rf xgboost/src/*/*.o
|
||||
rm -rf xgboost/demo/*.model xgboost/demo/*.buffer xgboost/demo/*.txt
|
||||
rm -rf xgboost/demo/runall.R
|
||||
cp -r src xgboost/src/src
|
||||
cp -r include xgboost/src/include
|
||||
cp -r amalgamation xgboost/src/amalgamation
|
||||
mkdir -p xgboost/src/rabit
|
||||
cp -r rabit/include xgboost/src/rabit/include
|
||||
cp -r rabit/src xgboost/src/rabit/src
|
||||
rm -rf xgboost/src/rabit/src/*.o
|
||||
mkdir -p xgboost/src/dmlc-core
|
||||
cp -r dmlc-core/include xgboost/src/dmlc-core/include
|
||||
cp -r dmlc-core/src xgboost/src/dmlc-core/src
|
||||
cp ./LICENSE xgboost
|
||||
cat R-package/src/Makevars.in|sed '2s/.*/PKGROOT=./' > xgboost/src/Makevars.in
|
||||
cat R-package/src/Makevars.win|sed '2s/.*/PKGROOT=./' > xgboost/src/Makevars.win
|
||||
rm -f xgboost/src/Makevars.win-e # OSX sed create this extra file; remove it
|
||||
bash R-package/remove_warning_suppression_pragma.sh
|
||||
bash xgboost/remove_warning_suppression_pragma.sh
|
||||
rm xgboost/remove_warning_suppression_pragma.sh
|
||||
rm xgboost/CMakeLists.txt
|
||||
rm -rfv xgboost/tests/helper_scripts/
|
||||
|
||||
R ?= R
|
||||
|
||||
Rbuild: Rpack
|
||||
$(R) CMD build xgboost
|
||||
rm -rf xgboost
|
||||
|
||||
Rcheck: Rbuild
|
||||
$(R) CMD check --as-cran xgboost*.tar.gz
|
||||
|
||||
-include build/*.d
|
||||
-include build/*/*.d
|
||||
219
NEWS.md
219
NEWS.md
@@ -3,225 +3,6 @@ XGBoost Change Log
|
||||
|
||||
This file records the changes in xgboost library in reverse chronological order.
|
||||
|
||||
## 1.7.6 (2023 Jun 16)
|
||||
|
||||
This is a patch release for bug fixes. The CRAN package for the R binding is kept at 1.7.5.
|
||||
|
||||
### Bug Fixes
|
||||
* Fix distributed training with mixed dense and sparse partitions. (#9272)
|
||||
* Fix monotone constraints on CPU with large trees. (#9122)
|
||||
* [spark] Make the spark model have the same UID as its estimator (#9022)
|
||||
* Optimize prediction with `QuantileDMatrix`. (#9096)
|
||||
|
||||
### Document
|
||||
* Improve doxygen (#8959)
|
||||
* Update the cuDF pip index URL. (#9106)
|
||||
|
||||
### Maintenance
|
||||
* Fix tests with pandas 2.0. (#9014)
|
||||
|
||||
## 1.7.5 (2023 Mar 30)
|
||||
This is a patch release for bug fixes.
|
||||
|
||||
* C++ requirement is updated to C++-17, along with which, CUDA 11.8 is used as the default CTK. (#8860, #8855, #8853)
|
||||
* Fix import for pyspark ranker. (#8692)
|
||||
* Fix Windows binary wheel to be compatible with Poetry (#8991)
|
||||
* Fix GPU hist with column sampling. (#8850)
|
||||
* Make sure iterative DMatrix is properly initialized. (#8997)
|
||||
* [R] Update link in document. (#8998)
|
||||
|
||||
## 1.7.4 (2023 Feb 16)
|
||||
This is a patch release for bug fixes.
|
||||
|
||||
* [R] Fix OpenMP detection on macOS. (#8684)
|
||||
* [Python] Make sure input numpy array is aligned. (#8690)
|
||||
* Fix feature interaction with column sampling in gpu_hist evaluator. (#8754)
|
||||
* Fix GPU L1 error. (#8749)
|
||||
* [PySpark] Fix feature types param (#8772)
|
||||
* Fix ranking with quantile dmatrix and group weight. (#8762)
|
||||
|
||||
## 1.7.3 (2023 Jan 6)
|
||||
This is a patch release for bug fixes.
|
||||
|
||||
* [Breaking] XGBoost Sklearn estimator method `get_params` no longer returns internally configured values. (#8634)
|
||||
* Fix linalg iterator, which may crash the L1 error. (#8603)
|
||||
* Fix loading pickled GPU model with a CPU-only XGBoost build. (#8632)
|
||||
* Fix inference with unseen categories with categorical features. (#8591, #8602)
|
||||
* CI fixes. (#8620, #8631, #8579)
|
||||
|
||||
## v1.7.2 (2022 Dec 8)
|
||||
This is a patch release for bug fixes.
|
||||
|
||||
* Work with newer thrust and libcudacxx (#8432)
|
||||
* Support null value in CUDA array interface namespace. (#8486)
|
||||
* Use `getsockname` instead of `SO_DOMAIN` on AIX. (#8437)
|
||||
* [pyspark] Make QDM optional based on a cuDF check (#8471)
|
||||
* [pyspark] sort qid for SparkRanker. (#8497)
|
||||
* [dask] Properly await async method client.wait_for_workers. (#8558)
|
||||
|
||||
* [R] Fix CRAN test notes. (#8428)
|
||||
|
||||
* [doc] Fix outdated document [skip ci]. (#8527)
|
||||
* [CI] Fix github action mismatched glibcxx. (#8551)
|
||||
|
||||
## v1.7.1 (2022 Nov 3)
|
||||
This is a patch release to incorporate the following hotfix:
|
||||
|
||||
* Add back xgboost.rabit for backwards compatibility (#8411)
|
||||
|
||||
|
||||
## v1.7.0 (2022 Oct 20)
|
||||
|
||||
We are excited to announce the feature packed XGBoost 1.7 release. The release note will walk through some of the major new features first, then make a summary for other improvements and language-binding-specific changes.
|
||||
|
||||
### PySpark
|
||||
|
||||
XGBoost 1.7 features initial support for PySpark integration. The new interface is adapted from the existing PySpark XGBoost interface developed by databricks with additional features like `QuantileDMatrix` and the rapidsai plugin (GPU pipeline) support. The new Spark XGBoost Python estimators not only benefit from PySpark ml facilities for powerful distributed computing but also enjoy the rest of the Python ecosystem. Users can define a custom objective, callbacks, and metrics in Python and use them with this interface on distributed clusters. The support is labeled as experimental with more features to come in future releases. For a brief introduction please visit the tutorial on XGBoost's [document page](https://xgboost.readthedocs.io/en/latest/tutorials/spark_estimator.html). (#8355, #8344, #8335, #8284, #8271, #8283, #8250, #8231, #8219, #8245, #8217, #8200, #8173, #8172, #8145, #8117, #8131, #8088, #8082, #8085, #8066, #8068, #8067, #8020, #8385)
|
||||
|
||||
Due to its initial support status, the new interface has some limitations; categorical features and multi-output models are not yet supported.
|
||||
|
||||
### Development of categorical data support
|
||||
More progress on the experimental support for categorical features. In 1.7, XGBoost can handle missing values in categorical features and features a new parameter `max_cat_threshold`, which limits the number of categories that can be used in the split evaluation. The parameter is enabled when the partitioning algorithm is used and helps prevent over-fitting. Also, the sklearn interface can now accept the `feature_types` parameter to use data types other than dataframe for categorical features. (#8280, #7821, #8285, #8080, #7948, #7858, #7853, #8212, #7957, #7937, #7934)
|
||||
|
||||
|
||||
### Experimental support for federated learning and new communication collective
|
||||
|
||||
An exciting addition to XGBoost is the experimental federated learning support. The federated learning is implemented with a gRPC federated server that aggregates allreduce calls, and federated clients that train on local data and use existing tree methods (approx, hist, gpu_hist). Currently, this only supports horizontal federated learning (samples are split across participants, and each participant has all the features and labels). Future plans include vertical federated learning (features split across participants), and stronger privacy guarantees with homomorphic encryption and differential privacy. See [Demo with NVFlare integration](demo/nvflare/README.md) for example usage with nvflare.
|
||||
|
||||
As part of the work, XGBoost 1.7 has replaced the old rabit module with the new collective module as the network communication interface with added support for runtime backend selection. In previous versions, the backend is defined at compile time and can not be changed once built. In this new release, users can choose between `rabit` and `federated.` (#8029, #8351, #8350, #8342, #8340, #8325, #8279, #8181, #8027, #7958, #7831, #7879, #8257, #8316, #8242, #8057, #8203, #8038, #7965, #7930, #7911)
|
||||
|
||||
The feature is available in the public PyPI binary package for testing.
|
||||
|
||||
### Quantile DMatrix
|
||||
Before 1.7, XGBoost has an internal data structure called `DeviceQuantileDMatrix` (and its distributed version). We now extend its support to CPU and renamed it to `QuantileDMatrix`. This data structure is used for optimizing memory usage for the `hist` and `gpu_hist` tree methods. The new feature helps reduce CPU memory usage significantly, especially for dense data. The new `QuantileDMatrix` can be initialized from both CPU and GPU data, and regardless of where the data comes from, the constructed instance can be used by both the CPU algorithm and GPU algorithm including training and prediction (with some overhead of conversion if the device of data and training algorithm doesn't match). Also, a new parameter `ref` is added to `QuantileDMatrix`, which can be used to construct validation/test datasets. Lastly, it's set as default in the scikit-learn interface when a supported tree method is specified by users. (#7889, #7923, #8136, #8215, #8284, #8268, #8220, #8346, #8327, #8130, #8116, #8103, #8094, #8086, #7898, #8060, #8019, #8045, #7901, #7912, #7922)
|
||||
|
||||
### Mean absolute error
|
||||
The mean absolute error is a new member of the collection of objectives in XGBoost. It's noteworthy since MAE has zero hessian value, which is unusual to XGBoost as XGBoost relies on Newton optimization. Without valid Hessian values, the convergence speed can be slow. As part of the support for MAE, we added line searches into the XGBoost training algorithm to overcome the difficulty of training without valid Hessian values. In the future, we will extend the line search to other objectives where it's appropriate for faster convergence speed. (#8343, #8107, #7812, #8380)
|
||||
|
||||
### XGBoost on Browser
|
||||
With the help of the [pyodide](https://github.com/pyodide/pyodide) project, you can now run XGBoost on browsers. (#7954, #8369)
|
||||
|
||||
### Experimental IPv6 Support for Dask
|
||||
|
||||
With the growing adaption of the new internet protocol, XGBoost joined the club. In the latest release, the Dask interface can be used on IPv6 clusters, see XGBoost's Dask tutorial for details. (#8225, #8234)
|
||||
|
||||
### Optimizations
|
||||
We have new optimizations for both the `hist` and `gpu_hist` tree methods to make XGBoost's training even more efficient.
|
||||
|
||||
* Hist
|
||||
Hist now supports optional by-column histogram build, which is automatically configured based on various conditions of input data. This helps the XGBoost CPU hist algorithm to scale better with different shapes of training datasets. (#8233, #8259). Also, the build histogram kernel now can better utilize CPU registers (#8218)
|
||||
|
||||
* GPU Hist
|
||||
GPU hist performance is significantly improved for wide datasets. GPU hist now supports batched node build, which reduces kernel latency and increases throughput. The improvement is particularly significant when growing deep trees with the default ``depthwise`` policy. (#7919, #8073, #8051, #8118, #7867, #7964, #8026)
|
||||
|
||||
### Breaking Changes
|
||||
Breaking changes made in the 1.7 release are summarized below.
|
||||
- The `grow_local_histmaker` updater is removed. This updater is rarely used in practice and has no test. We decided to remove it and focus have XGBoot focus on other more efficient algorithms. (#7992, #8091)
|
||||
- Single precision histogram is removed due to its lack of accuracy caused by significant floating point error. In some cases the error can be difficult to detect due to log-scale operations, which makes the parameter dangerous to use. (#7892, #7828)
|
||||
- Deprecated CUDA architectures are no longer supported in the release binaries. (#7774)
|
||||
- As part of the federated learning development, the `rabit` module is replaced with the new `collective` module. It's a drop-in replacement with added runtime backend selection, see the federated learning section for more details (#8257)
|
||||
|
||||
### General new features and improvements
|
||||
Before diving into package-specific changes, some general new features other than those listed at the beginning are summarized here.
|
||||
* Users of `DMatrix` and `QuantileDMatrix` can get the data from XGBoost. In previous versions, only getters for meta info like labels are available. The new method is available in Python (`DMatrix::get_data`) and C. (#8269, #8323)
|
||||
* In previous versions, the GPU histogram tree method may generate phantom gradient for missing values due to floating point error. We fixed such an error in this release and XGBoost is much better equated to handle floating point errors when training on GPU. (#8274, #8246)
|
||||
* Parameter validation is no longer experimental. (#8206)
|
||||
* C pointer parameters and JSON parameters are vigorously checked. (#8254, #8254)
|
||||
* Improved handling of JSON model input. (#7953, #7918)
|
||||
* Support IBM i OS (#7920, #8178)
|
||||
|
||||
### Fixes
|
||||
Some noteworthy bug fixes that are not related to specific language binding are listed in this section.
|
||||
* Rename misspelled config parameter for pseudo-Huber (#7904)
|
||||
* Fix feature weights with nested column sampling. (#8100)
|
||||
* Fix loading DMatrix binary in distributed env. (#8149)
|
||||
* Force auc.cc to be statically linked for unusual compiler platforms. (#8039)
|
||||
* New logic for detecting libomp on macos (#8384).
|
||||
|
||||
### Python Package
|
||||
* Python 3.8 is now the minimum required Python version. (#8071)
|
||||
* More progress on type hint support. Except for the new PySpark interface, the XGBoost module is fully typed. (#7742, #7945, #8302, #7914, #8052)
|
||||
* XGBoost now validates the feature names in `inplace_predict`, which also affects the predict function in scikit-learn estimators as it uses `inplace_predict` internally. (#8359)
|
||||
* Users can now get the data from `DMatrix` using `DMatrix::get_data` or `QuantileDMatrix::get_data`.
|
||||
* Show `libxgboost.so` path in build info. (#7893)
|
||||
* Raise import error when using the sklearn module while scikit-learn is missing. (#8049)
|
||||
* Use `config_context` in the sklearn interface. (#8141)
|
||||
* Validate features for inplace prediction. (#8359)
|
||||
* Pandas dataframe handling is refactored to reduce data fragmentation. (#7843)
|
||||
* Support more pandas nullable types (#8262)
|
||||
* Remove pyarrow workaround. (#7884)
|
||||
|
||||
* Binary wheel size
|
||||
We aim to enable as many features as possible in XGBoost's default binary distribution on PyPI (package installed with pip), but there's a upper limit on the size of the binary wheel. In 1.7, XGBoost reduces the size of the wheel by pruning unused CUDA architectures. (#8179, #8152, #8150)
|
||||
|
||||
* Fixes
|
||||
Some noteworthy fixes are listed here:
|
||||
- Fix the Dask interface with the latest cupy. (#8210)
|
||||
- Check cuDF lazily to avoid potential errors with cuda-python. (#8084)
|
||||
* Fix potential error in DMatrix constructor on 32-bit platform. (#8369)
|
||||
|
||||
* Maintenance work
|
||||
- Linter script is moved from dmlc-core to XGBoost with added support for formatting, mypy, and parallel run, along with some fixes (#7967, #8101, #8216)
|
||||
- We now require the use of `isort` and `black` for selected files. (#8137, #8096)
|
||||
- Code cleanups. (#7827)
|
||||
- Deprecate `use_label_encoder` in XGBClassifier. The label encoder has already been deprecated and removed in the previous version. These changes only affect the indicator parameter (#7822)
|
||||
- Remove the use of distutils. (#7770)
|
||||
- Refactor and fixes for tests (#8077, #8064, #8078, #8076, #8013, #8010, #8244, #7833)
|
||||
|
||||
* Documents
|
||||
- [dask] Fix potential error in demo. (#8079)
|
||||
- Improved documentation for the ranker. (#8356, #8347)
|
||||
- Indicate lack of py-xgboost-gpu on Windows (#8127)
|
||||
- Clarification for feature importance. (#8151)
|
||||
- Simplify Python getting started example (#8153)
|
||||
|
||||
### R Package
|
||||
We summarize improvements for the R package briefly here:
|
||||
* Feature info including names and types are now passed to DMatrix in preparation for categorical feature support. (#804)
|
||||
* XGBoost 1.7 can now gracefully load old R models from RDS for better compatibility with 3-party tuning libraries (#7864)
|
||||
* The R package now can be built with parallel compilation, along with fixes for warnings in CRAN tests. (#8330)
|
||||
* Emit error early if DiagrammeR is missing (#8037)
|
||||
* Fix R package Windows build. (#8065)
|
||||
|
||||
### JVM Packages
|
||||
The consistency between JVM packages and other language bindings is greatly improved in 1.7, improvements range from model serialization format to the default value of hyper-parameters.
|
||||
|
||||
* Java package now supports feature names and feature types for DMatrix in preparation for categorical feature support. (#7966)
|
||||
* Models trained by the JVM packages can now be safely used with other language bindings. (#7896, #7907)
|
||||
* Users can specify the model format when saving models with a stream. (#7940, #7955)
|
||||
* The default value for training parameters is now sourced from XGBoost directly, which helps JVM packages be consistent with other packages. (#7938)
|
||||
* Set the correct objective if the user doesn't explicitly set it (#7781)
|
||||
* Auto-detection of MUSL is replaced by system properties (#7921)
|
||||
* Improved error message for launching tracker. (#7952, #7968)
|
||||
* Fix a race condition in parameter configuration. (#8025)
|
||||
* [Breaking] ` timeoutRequestWorkers` is now removed. With the support for barrier mode, this parameter is no longer needed. (#7839)
|
||||
* Dependencies updates. (#7791, #8157, #7801, #8240)
|
||||
|
||||
### Documents
|
||||
- Document for the C interface is greatly improved and is now displayed at the [sphinx document page](https://xgboost.readthedocs.io/en/latest/c.html). Thanks to the breathe project, you can view the C API just like the Python API. (#8300)
|
||||
- We now avoid having XGBoost internal text parser in demos and recommend users use dedicated libraries for loading data whenever it's feasible. (#7753)
|
||||
- Python survival training demos are now displayed at [sphinx gallery](https://xgboost.readthedocs.io/en/latest/python/survival-examples/index.html). (#8328)
|
||||
- Some typos, links, format, and grammar fixes. (#7800, #7832, #7861, #8099, #8163, #8166, #8229, #8028, #8214, #7777, #7905, #8270, #8309, d70e59fef, #7806)
|
||||
- Updated winning solution under readme.md (#7862)
|
||||
- New security policy. (#8360)
|
||||
- GPU document is overhauled as we consider CUDA support to be feature-complete. (#8378)
|
||||
|
||||
### Maintenance
|
||||
* Code refactoring and cleanups. (#7850, #7826, #7910, #8332, #8204)
|
||||
* Reduce compiler warnings. (#7768, #7916, #8046, #8059, #7974, #8031, #8022)
|
||||
* Compiler workarounds. (#8211, #8314, #8226, #8093)
|
||||
* Dependencies update. (#8001, #7876, #7973, #8298, #7816)
|
||||
* Remove warnings emitted in previous versions. (#7815)
|
||||
* Small fixes occurred during development. (#8008)
|
||||
|
||||
### CI and Tests
|
||||
* We overhauled the CI infrastructure to reduce the CI cost and lift the maintenance burdens. Jenkins is replaced with buildkite for better automation, with which, finer control of test runs is implemented to reduce overall cost. Also, we refactored some of the existing tests to reduce their runtime, drooped the size of docker images, and removed multi-GPU C++ tests. Lastly, `pytest-timeout` is added as an optional dependency for running Python tests to keep the test time in check. (#7772, #8291, #8286, #8276, #8306, #8287, #8243, #8313, #8235, #8288, #8303, #8142, #8092, #8333, #8312, #8348)
|
||||
* New documents for how to reproduce the CI environment (#7971, #8297)
|
||||
* Improved automation for JVM release. (#7882)
|
||||
* GitHub Action security-related updates. (#8263, #8267, #8360)
|
||||
* Other fixes and maintenance work. (#8154, #7848, #8069, #7943)
|
||||
* Small updates and fixes to GitHub action pipelines. (#8364, #8321, #8241, #7950, #8011)
|
||||
|
||||
## v1.6.1 (2022 May 9)
|
||||
This is a patch release for bug fixes and Spark barrier mode support. The R package is unchanged.
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ target_compile_definitions(xgboost-r
|
||||
-DDMLC_LOG_BEFORE_THROW=0
|
||||
-DDMLC_DISABLE_STDIN=1
|
||||
-DDMLC_LOG_CUSTOMIZE=1
|
||||
-DRABIT_CUSTOMIZE_MSG_
|
||||
-DRABIT_STRICT_CXX98_)
|
||||
target_include_directories(xgboost-r
|
||||
PRIVATE
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
Package: xgboost
|
||||
Type: Package
|
||||
Title: Extreme Gradient Boosting
|
||||
Version: 2.0.1.1
|
||||
Date: 2023-10-12
|
||||
Version: 1.7.6.1
|
||||
Date: 2023-06-16
|
||||
Authors@R: c(
|
||||
person("Tianqi", "Chen", role = c("aut"),
|
||||
email = "tianqi.tchen@gmail.com"),
|
||||
@@ -54,8 +54,10 @@ Suggests:
|
||||
Ckmeans.1d.dp (>= 3.3.1),
|
||||
vcd (>= 1.3),
|
||||
testthat,
|
||||
lintr,
|
||||
igraph (>= 1.0.1),
|
||||
float,
|
||||
crayon,
|
||||
titanic
|
||||
Depends:
|
||||
R (>= 3.3.0)
|
||||
|
||||
@@ -70,7 +70,7 @@ cb.print.evaluation <- function(period = 1, showsd = TRUE) {
|
||||
i == env$begin_iteration ||
|
||||
i == env$end_iteration) {
|
||||
stdev <- if (showsd) env$bst_evaluation_err else NULL
|
||||
msg <- .format_eval_string(i, env$bst_evaluation, stdev)
|
||||
msg <- format.eval.string(i, env$bst_evaluation, stdev)
|
||||
cat(msg, '\n')
|
||||
}
|
||||
}
|
||||
@@ -114,7 +114,7 @@ cb.evaluation.log <- function() {
|
||||
if (is.null(mnames) || any(mnames == ""))
|
||||
stop("bst_evaluation must have non-empty names")
|
||||
|
||||
mnames <<- gsub('-', '_', names(env$bst_evaluation), fixed = TRUE)
|
||||
mnames <<- gsub('-', '_', names(env$bst_evaluation))
|
||||
if (!is.null(env$bst_evaluation_err))
|
||||
mnames <<- c(paste0(mnames, '_mean'), paste0(mnames, '_std'))
|
||||
}
|
||||
@@ -185,7 +185,7 @@ cb.reset.parameters <- function(new_params) {
|
||||
|
||||
if (typeof(new_params) != "list")
|
||||
stop("'new_params' must be a list")
|
||||
pnames <- gsub(".", "_", names(new_params), fixed = TRUE)
|
||||
pnames <- gsub("\\.", "_", names(new_params))
|
||||
nrounds <- NULL
|
||||
|
||||
# run some checks in the beginning
|
||||
@@ -300,9 +300,9 @@ cb.early.stop <- function(stopping_rounds, maximize = FALSE,
|
||||
if (length(env$bst_evaluation) == 0)
|
||||
stop("For early stopping, watchlist must have at least one element")
|
||||
|
||||
eval_names <- gsub('-', '_', names(env$bst_evaluation), fixed = TRUE)
|
||||
eval_names <- gsub('-', '_', names(env$bst_evaluation))
|
||||
if (!is.null(metric_name)) {
|
||||
metric_idx <<- which(gsub('-', '_', metric_name, fixed = TRUE) == eval_names)
|
||||
metric_idx <<- which(gsub('-', '_', metric_name) == eval_names)
|
||||
if (length(metric_idx) == 0)
|
||||
stop("'metric_name' for early stopping is not one of the following:\n",
|
||||
paste(eval_names, collapse = ' '), '\n')
|
||||
@@ -319,7 +319,7 @@ cb.early.stop <- function(stopping_rounds, maximize = FALSE,
|
||||
|
||||
# maximize is usually NULL when not set in xgb.train and built-in metrics
|
||||
if (is.null(maximize))
|
||||
maximize <<- grepl('(_auc|_map|_ndcg|_pre)', metric_name)
|
||||
maximize <<- grepl('(_auc|_map|_ndcg)', metric_name)
|
||||
|
||||
if (verbose && NVL(env$rank, 0) == 0)
|
||||
cat("Will train until ", metric_name, " hasn't improved in ",
|
||||
@@ -380,9 +380,7 @@ cb.early.stop <- function(stopping_rounds, maximize = FALSE,
|
||||
if ((maximize && score > best_score) ||
|
||||
(!maximize && score < best_score)) {
|
||||
|
||||
best_msg <<- .format_eval_string(
|
||||
i, env$bst_evaluation, env$bst_evaluation_err
|
||||
)
|
||||
best_msg <<- format.eval.string(i, env$bst_evaluation, env$bst_evaluation_err)
|
||||
best_score <<- score
|
||||
best_iteration <<- i
|
||||
best_ntreelimit <<- best_iteration * env$num_parallel_tree
|
||||
@@ -513,7 +511,7 @@ cb.cv.predict <- function(save_models = FALSE) {
|
||||
if (save_models) {
|
||||
env$basket$models <- lapply(env$bst_folds, function(fd) {
|
||||
xgb.attr(fd$bst, 'niter') <- env$end_iteration - 1
|
||||
xgb.Booster.complete(xgb.handleToBooster(handle = fd$bst, raw = NULL), saveraw = TRUE)
|
||||
xgb.Booster.complete(xgb.handleToBooster(fd$bst), saveraw = TRUE)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -594,12 +592,12 @@ cb.cv.predict <- function(save_models = FALSE) {
|
||||
#'
|
||||
#' #### Multiclass classification:
|
||||
#' #
|
||||
#' dtrain <- xgb.DMatrix(scale(x), label = as.numeric(iris$Species) - 1, nthread = 1)
|
||||
#' dtrain <- xgb.DMatrix(scale(x), label = as.numeric(iris$Species) - 1, nthread = 2)
|
||||
#' param <- list(booster = "gblinear", objective = "multi:softprob", num_class = 3,
|
||||
#' lambda = 0.0003, alpha = 0.0003, nthread = 1)
|
||||
#' lambda = 0.0003, alpha = 0.0003, nthread = 2)
|
||||
#' # For the default linear updater 'shotgun' it sometimes is helpful
|
||||
#' # to use smaller eta to reduce instability
|
||||
#' bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 50, eta = 0.5,
|
||||
#' bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 70, eta = 0.5,
|
||||
#' callbacks = list(cb.gblinear.history()))
|
||||
#' # Will plot the coefficient paths separately for each class:
|
||||
#' matplot(xgb.gblinear.history(bst, class_index = 0), type = 'l')
|
||||
@@ -613,15 +611,13 @@ cb.cv.predict <- function(save_models = FALSE) {
|
||||
#' matplot(xgb.gblinear.history(bst, class_index = 0)[[1]], type = 'l')
|
||||
#'
|
||||
#' @export
|
||||
cb.gblinear.history <- function(sparse = FALSE) {
|
||||
cb.gblinear.history <- function(sparse=FALSE) {
|
||||
coefs <- NULL
|
||||
|
||||
init <- function(env) {
|
||||
# xgb.train(): bst will be present
|
||||
# xgb.cv(): bst_folds will be present
|
||||
if (is.null(env$bst) && is.null(env$bst_folds)) {
|
||||
stop("Parent frame has neither 'bst' nor 'bst_folds'")
|
||||
}
|
||||
if (!is.null(env$bst)) { # xgb.train:
|
||||
} else if (!is.null(env$bst_folds)) { # xgb.cv:
|
||||
} else stop("Parent frame has neither 'bst' nor 'bst_folds'")
|
||||
}
|
||||
|
||||
# convert from list to (sparse) matrix
|
||||
@@ -661,7 +657,7 @@ cb.gblinear.history <- function(sparse = FALSE) {
|
||||
} else { # xgb.cv:
|
||||
cf <- vector("list", length(env$bst_folds))
|
||||
for (i in seq_along(env$bst_folds)) {
|
||||
dmp <- xgb.dump(xgb.handleToBooster(handle = env$bst_folds[[i]]$bst, raw = NULL))
|
||||
dmp <- xgb.dump(xgb.handleToBooster(env$bst_folds[[i]]$bst))
|
||||
cf[[i]] <- as.numeric(grep('(booster|bias|weigh)', dmp, invert = TRUE, value = TRUE))
|
||||
if (sparse) cf[[i]] <- as(cf[[i]], "sparseVector")
|
||||
}
|
||||
@@ -756,7 +752,7 @@ xgb.gblinear.history <- function(model, class_index = NULL) {
|
||||
#
|
||||
|
||||
# Format the evaluation metric string
|
||||
.format_eval_string <- function(iter, eval_res, eval_err = NULL) {
|
||||
format.eval.string <- function(iter, eval_res, eval_err = NULL) {
|
||||
if (length(eval_res) == 0)
|
||||
stop('no evaluation results')
|
||||
enames <- names(eval_res)
|
||||
|
||||
@@ -38,11 +38,11 @@ check.booster.params <- function(params, ...) {
|
||||
stop("params must be a list")
|
||||
|
||||
# in R interface, allow for '.' instead of '_' in parameter names
|
||||
names(params) <- gsub(".", "_", names(params), fixed = TRUE)
|
||||
names(params) <- gsub("\\.", "_", names(params))
|
||||
|
||||
# merge parameters from the params and the dots-expansion
|
||||
dot_params <- list(...)
|
||||
names(dot_params) <- gsub(".", "_", names(dot_params), fixed = TRUE)
|
||||
names(dot_params) <- gsub("\\.", "_", names(dot_params))
|
||||
if (length(intersect(names(params),
|
||||
names(dot_params))) > 0)
|
||||
stop("Same parameters in 'params' and in the call are not allowed. Please check your 'params' list.")
|
||||
@@ -82,7 +82,7 @@ check.booster.params <- function(params, ...) {
|
||||
|
||||
# interaction constraints parser (convert from list of column indices to string)
|
||||
if (!is.null(params[['interaction_constraints']]) &&
|
||||
typeof(params[['interaction_constraints']]) != "character") {
|
||||
typeof(params[['interaction_constraints']]) != "character"){
|
||||
# check input class
|
||||
if (!identical(class(params[['interaction_constraints']]), 'list')) stop('interaction_constraints should be class list')
|
||||
if (!all(unique(sapply(params[['interaction_constraints']], class)) %in% c('numeric', 'integer'))) {
|
||||
@@ -140,7 +140,7 @@ check.custom.eval <- function(env = parent.frame()) {
|
||||
|
||||
|
||||
# Update a booster handle for an iteration with dtrain data
|
||||
xgb.iter.update <- function(booster_handle, dtrain, iter, obj) {
|
||||
xgb.iter.update <- function(booster_handle, dtrain, iter, obj = NULL) {
|
||||
if (!identical(class(booster_handle), "xgb.Booster.handle")) {
|
||||
stop("booster_handle must be of xgb.Booster.handle class")
|
||||
}
|
||||
@@ -163,7 +163,7 @@ xgb.iter.update <- function(booster_handle, dtrain, iter, obj) {
|
||||
# Evaluate one iteration.
|
||||
# Returns a named vector of evaluation metrics
|
||||
# with the names in a 'datasetname-metricname' format.
|
||||
xgb.iter.eval <- function(booster_handle, watchlist, iter, feval) {
|
||||
xgb.iter.eval <- function(booster_handle, watchlist, iter, feval = NULL) {
|
||||
if (!identical(class(booster_handle), "xgb.Booster.handle"))
|
||||
stop("class of booster_handle must be xgb.Booster.handle")
|
||||
|
||||
@@ -234,7 +234,7 @@ generate.cv.folds <- function(nfold, nrows, stratified, label, params) {
|
||||
y <- factor(y)
|
||||
}
|
||||
}
|
||||
folds <- xgb.createFolds(y = y, k = nfold)
|
||||
folds <- xgb.createFolds(y, nfold)
|
||||
} else {
|
||||
# make simple non-stratified folds
|
||||
kstep <- length(rnd_idx) %/% nfold
|
||||
@@ -251,7 +251,8 @@ generate.cv.folds <- function(nfold, nrows, stratified, label, params) {
|
||||
# Creates CV folds stratified by the values of y.
|
||||
# It was borrowed from caret::createFolds and simplified
|
||||
# by always returning an unnamed list of fold indices.
|
||||
xgb.createFolds <- function(y, k) {
|
||||
xgb.createFolds <- function(y, k = 10)
|
||||
{
|
||||
if (is.numeric(y)) {
|
||||
## Group the numeric data based on their magnitudes
|
||||
## and sample within those groups.
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# Construct an internal xgboost Booster and return a handle to it.
|
||||
# internal utility function
|
||||
xgb.Booster.handle <- function(params, cachelist, modelfile, handle) {
|
||||
xgb.Booster.handle <- function(params = list(), cachelist = list(),
|
||||
modelfile = NULL, handle = NULL) {
|
||||
if (typeof(cachelist) != "list" ||
|
||||
!all(vapply(cachelist, inherits, logical(1), what = 'xgb.DMatrix'))) {
|
||||
stop("cachelist must be a list of xgb.DMatrix objects")
|
||||
@@ -11,7 +12,7 @@ xgb.Booster.handle <- function(params, cachelist, modelfile, handle) {
|
||||
## A filename
|
||||
handle <- .Call(XGBoosterCreate_R, cachelist)
|
||||
modelfile <- path.expand(modelfile)
|
||||
.Call(XGBoosterLoadModel_R, handle, enc2utf8(modelfile[1]))
|
||||
.Call(XGBoosterLoadModel_R, handle, modelfile[1])
|
||||
class(handle) <- "xgb.Booster.handle"
|
||||
if (length(params) > 0) {
|
||||
xgb.parameters(handle) <- params
|
||||
@@ -43,7 +44,7 @@ xgb.Booster.handle <- function(params, cachelist, modelfile, handle) {
|
||||
|
||||
# Convert xgb.Booster.handle to xgb.Booster
|
||||
# internal utility function
|
||||
xgb.handleToBooster <- function(handle, raw) {
|
||||
xgb.handleToBooster <- function(handle, raw = NULL) {
|
||||
bst <- list(handle = handle, raw = raw)
|
||||
class(bst) <- "xgb.Booster"
|
||||
return(bst)
|
||||
@@ -128,12 +129,7 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
|
||||
stop("argument type must be xgb.Booster")
|
||||
|
||||
if (is.null.handle(object$handle)) {
|
||||
object$handle <- xgb.Booster.handle(
|
||||
params = list(),
|
||||
cachelist = list(),
|
||||
modelfile = object$raw,
|
||||
handle = object$handle
|
||||
)
|
||||
object$handle <- xgb.Booster.handle(modelfile = object$raw, handle = object$handle)
|
||||
} else {
|
||||
if (is.null(object$raw) && saveraw) {
|
||||
object$raw <- xgb.serialize(object$handle)
|
||||
@@ -218,10 +214,6 @@ xgb.Booster.complete <- function(object, saveraw = TRUE) {
|
||||
#' Since it quadratically depends on the number of features, it is recommended to perform selection
|
||||
#' of the most important features first. See below about the format of the returned results.
|
||||
#'
|
||||
#' The \code{predict()} method uses as many threads as defined in \code{xgb.Booster} object (all by default).
|
||||
#' If you want to change their number, then assign a new number to \code{nthread} using \code{\link{xgb.parameters<-}}.
|
||||
#' Note also that converting a matrix to \code{\link{xgb.DMatrix}} uses multiple threads too.
|
||||
#'
|
||||
#' @return
|
||||
#' The return type is different depending whether \code{strict_shape} is set to \code{TRUE}. By default,
|
||||
#' for regression or binary classification, it returns a vector of length \code{nrows(newdata)}.
|
||||
@@ -479,7 +471,7 @@ predict.xgb.Booster <- function(object, newdata, missing = NA, outputmargin = FA
|
||||
#' @export
|
||||
predict.xgb.Booster.handle <- function(object, ...) {
|
||||
|
||||
bst <- xgb.handleToBooster(handle = object, raw = NULL)
|
||||
bst <- xgb.handleToBooster(object)
|
||||
|
||||
ret <- predict(bst, ...)
|
||||
return(ret)
|
||||
@@ -638,7 +630,7 @@ xgb.attributes <- function(object) {
|
||||
#' @export
|
||||
xgb.config <- function(object) {
|
||||
handle <- xgb.get.handle(object)
|
||||
.Call(XGBoosterSaveJsonConfig_R, handle)
|
||||
.Call(XGBoosterSaveJsonConfig_R, handle);
|
||||
}
|
||||
|
||||
#' @rdname xgb.config
|
||||
@@ -680,7 +672,7 @@ xgb.config <- function(object) {
|
||||
if (is.null(names(p)) || any(nchar(names(p)) == 0)) {
|
||||
stop("parameter names cannot be empty strings")
|
||||
}
|
||||
names(p) <- gsub(".", "_", names(p), fixed = TRUE)
|
||||
names(p) <- gsub("\\.", "_", names(p))
|
||||
p <- lapply(p, function(x) as.character(x)[1])
|
||||
handle <- xgb.get.handle(object)
|
||||
for (i in seq_along(p)) {
|
||||
|
||||
@@ -36,37 +36,19 @@ xgb.DMatrix <- function(data, info = list(), missing = NA, silent = FALSE, nthre
|
||||
cnames <- colnames(data)
|
||||
} else if (inherits(data, "dgCMatrix")) {
|
||||
handle <- .Call(
|
||||
XGDMatrixCreateFromCSC_R,
|
||||
data@p,
|
||||
data@i,
|
||||
data@x,
|
||||
nrow(data),
|
||||
missing,
|
||||
as.integer(NVL(nthread, -1))
|
||||
XGDMatrixCreateFromCSC_R, data@p, data@i, data@x, nrow(data), as.integer(NVL(nthread, -1))
|
||||
)
|
||||
cnames <- colnames(data)
|
||||
} else if (inherits(data, "dgRMatrix")) {
|
||||
handle <- .Call(
|
||||
XGDMatrixCreateFromCSR_R,
|
||||
data@p,
|
||||
data@j,
|
||||
data@x,
|
||||
ncol(data),
|
||||
missing,
|
||||
as.integer(NVL(nthread, -1))
|
||||
XGDMatrixCreateFromCSR_R, data@p, data@j, data@x, ncol(data), as.integer(NVL(nthread, -1))
|
||||
)
|
||||
cnames <- colnames(data)
|
||||
} else if (inherits(data, "dsparseVector")) {
|
||||
indptr <- c(0L, as.integer(length(data@i)))
|
||||
ind <- as.integer(data@i) - 1L
|
||||
handle <- .Call(
|
||||
XGDMatrixCreateFromCSR_R,
|
||||
indptr,
|
||||
ind,
|
||||
data@x,
|
||||
length(data),
|
||||
missing,
|
||||
as.integer(NVL(nthread, -1))
|
||||
XGDMatrixCreateFromCSR_R, indptr, ind, data@x, length(data), as.integer(NVL(nthread, -1))
|
||||
)
|
||||
} else {
|
||||
stop("xgb.DMatrix does not support construction from ", typeof(data))
|
||||
@@ -88,13 +70,13 @@ xgb.DMatrix <- function(data, info = list(), missing = NA, silent = FALSE, nthre
|
||||
|
||||
# get dmatrix from data, label
|
||||
# internal helper method
|
||||
xgb.get.DMatrix <- function(data, label, missing, weight, nthread) {
|
||||
xgb.get.DMatrix <- function(data, label = NULL, missing = NA, weight = NULL, nthread = NULL) {
|
||||
if (inherits(data, "dgCMatrix") || is.matrix(data)) {
|
||||
if (is.null(label)) {
|
||||
stop("label must be provided when data is a matrix")
|
||||
}
|
||||
dtrain <- xgb.DMatrix(data, label = label, missing = missing, nthread = nthread)
|
||||
if (!is.null(weight)) {
|
||||
if (!is.null(weight)){
|
||||
setinfo(dtrain, "weight", weight)
|
||||
}
|
||||
} else {
|
||||
@@ -236,7 +218,7 @@ getinfo.xgb.DMatrix <- function(object, name, ...) {
|
||||
}
|
||||
if (name == "feature_name" || name == "feature_type") {
|
||||
ret <- .Call(XGDMatrixGetStrFeatureInfo_R, object, name)
|
||||
} else if (name != "nrow") {
|
||||
} else if (name != "nrow"){
|
||||
ret <- .Call(XGDMatrixGetInfo_R, object, name)
|
||||
} else {
|
||||
ret <- nrow(object)
|
||||
@@ -346,6 +328,7 @@ setinfo.xgb.DMatrix <- function(object, name, info, ...) {
|
||||
return(TRUE)
|
||||
}
|
||||
stop("setinfo: unknown info name ", name)
|
||||
return(FALSE)
|
||||
}
|
||||
|
||||
|
||||
@@ -435,7 +418,7 @@ print.xgb.DMatrix <- function(x, verbose = FALSE, ...) {
|
||||
cat(infos)
|
||||
cnames <- colnames(x)
|
||||
cat(' colnames:')
|
||||
if (verbose && !is.null(cnames)) {
|
||||
if (verbose & !is.null(cnames)) {
|
||||
cat("\n'")
|
||||
cat(cnames, sep = "','")
|
||||
cat("'")
|
||||
|
||||
@@ -75,11 +75,9 @@
|
||||
#' @details
|
||||
#' The original sample is randomly partitioned into \code{nfold} equal size subsamples.
|
||||
#'
|
||||
#' Of the \code{nfold} subsamples, a single subsample is retained as the validation data for testing the model,
|
||||
#' and the remaining \code{nfold - 1} subsamples are used as training data.
|
||||
#' Of the \code{nfold} subsamples, a single subsample is retained as the validation data for testing the model, and the remaining \code{nfold - 1} subsamples are used as training data.
|
||||
#'
|
||||
#' The cross-validation process is then repeated \code{nrounds} times, with each of the
|
||||
#' \code{nfold} subsamples used exactly once as the validation data.
|
||||
#' The cross-validation process is then repeated \code{nrounds} times, with each of the \code{nfold} subsamples used exactly once as the validation data.
|
||||
#'
|
||||
#' All observations are used for both training and validation.
|
||||
#'
|
||||
@@ -119,10 +117,10 @@
|
||||
#' print(cv, verbose=TRUE)
|
||||
#'
|
||||
#' @export
|
||||
xgb.cv <- function(params = list(), data, nrounds, nfold, label = NULL, missing = NA,
|
||||
prediction = FALSE, showsd = TRUE, metrics = list(),
|
||||
xgb.cv <- function(params=list(), data, nrounds, nfold, label = NULL, missing = NA,
|
||||
prediction = FALSE, showsd = TRUE, metrics=list(),
|
||||
obj = NULL, feval = NULL, stratified = TRUE, folds = NULL, train_folds = NULL,
|
||||
verbose = TRUE, print_every_n = 1L,
|
||||
verbose = TRUE, print_every_n=1L,
|
||||
early_stopping_rounds = NULL, maximize = NULL, callbacks = list(), ...) {
|
||||
|
||||
check.deprecation(...)
|
||||
@@ -135,6 +133,9 @@ xgb.cv <- function(params = list(), data, nrounds, nfold, label = NULL, missing
|
||||
check.custom.obj()
|
||||
check.custom.eval()
|
||||
|
||||
#if (is.null(params[['eval_metric']]) && is.null(feval))
|
||||
# stop("Either 'eval_metric' or 'feval' must be provided for CV")
|
||||
|
||||
# Check the labels
|
||||
if ((inherits(data, 'xgb.DMatrix') && is.null(getinfo(data, 'label'))) ||
|
||||
(!inherits(data, 'xgb.DMatrix') && is.null(label))) {
|
||||
@@ -158,6 +159,10 @@ xgb.cv <- function(params = list(), data, nrounds, nfold, label = NULL, missing
|
||||
folds <- generate.cv.folds(nfold, nrow(data), stratified, cv_label, params)
|
||||
}
|
||||
|
||||
# Potential TODO: sequential CV
|
||||
#if (strategy == 'sequential')
|
||||
# stop('Sequential CV strategy is not yet implemented')
|
||||
|
||||
# verbosity & evaluation printing callback:
|
||||
params <- c(params, list(silent = 1))
|
||||
print_every_n <- max(as.integer(print_every_n), 1L)
|
||||
@@ -187,13 +192,7 @@ xgb.cv <- function(params = list(), data, nrounds, nfold, label = NULL, missing
|
||||
|
||||
# create the booster-folds
|
||||
# train_folds
|
||||
dall <- xgb.get.DMatrix(
|
||||
data = data,
|
||||
label = label,
|
||||
missing = missing,
|
||||
weight = NULL,
|
||||
nthread = params$nthread
|
||||
)
|
||||
dall <- xgb.get.DMatrix(data, label, missing, nthread = params$nthread)
|
||||
bst_folds <- lapply(seq_along(folds), function(k) {
|
||||
dtest <- slice(dall, folds[[k]])
|
||||
# code originally contributed by @RolandASc on stackoverflow
|
||||
@@ -201,12 +200,7 @@ xgb.cv <- function(params = list(), data, nrounds, nfold, label = NULL, missing
|
||||
dtrain <- slice(dall, unlist(folds[-k]))
|
||||
else
|
||||
dtrain <- slice(dall, train_folds[[k]])
|
||||
handle <- xgb.Booster.handle(
|
||||
params = params,
|
||||
cachelist = list(dtrain, dtest),
|
||||
modelfile = NULL,
|
||||
handle = NULL
|
||||
)
|
||||
handle <- xgb.Booster.handle(params, list(dtrain, dtest))
|
||||
list(dtrain = dtrain, bst = handle, watchlist = list(train = dtrain, test = dtest), index = folds[[k]])
|
||||
})
|
||||
rm(dall)
|
||||
@@ -227,18 +221,8 @@ xgb.cv <- function(params = list(), data, nrounds, nfold, label = NULL, missing
|
||||
for (f in cb$pre_iter) f()
|
||||
|
||||
msg <- lapply(bst_folds, function(fd) {
|
||||
xgb.iter.update(
|
||||
booster_handle = fd$bst,
|
||||
dtrain = fd$dtrain,
|
||||
iter = iteration - 1,
|
||||
obj = obj
|
||||
)
|
||||
xgb.iter.eval(
|
||||
booster_handle = fd$bst,
|
||||
watchlist = fd$watchlist,
|
||||
iter = iteration - 1,
|
||||
feval = feval
|
||||
)
|
||||
xgb.iter.update(fd$bst, fd$dtrain, iteration - 1, obj)
|
||||
xgb.iter.eval(fd$bst, fd$watchlist, iteration - 1, feval)
|
||||
})
|
||||
msg <- simplify2array(msg)
|
||||
bst_evaluation <- rowMeans(msg)
|
||||
|
||||
@@ -38,7 +38,7 @@
|
||||
#' cat(xgb.dump(bst, with_stats = TRUE, dump_format='json'))
|
||||
#'
|
||||
#' @export
|
||||
xgb.dump <- function(model, fname = NULL, fmap = "", with_stats = FALSE,
|
||||
xgb.dump <- function(model, fname = NULL, fmap = "", with_stats=FALSE,
|
||||
dump_format = c("text", "json"), ...) {
|
||||
check.deprecation(...)
|
||||
dump_format <- match.arg(dump_format)
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
#' @rdname xgb.plot.importance
|
||||
#' @export
|
||||
xgb.ggplot.importance <- function(importance_matrix = NULL, top_n = NULL, measure = NULL,
|
||||
rel_to_first = FALSE, n_clusters = seq_len(10), ...) {
|
||||
rel_to_first = FALSE, n_clusters = c(1:10), ...) {
|
||||
|
||||
importance_matrix <- xgb.plot.importance(importance_matrix, top_n = top_n, measure = measure,
|
||||
rel_to_first = rel_to_first, plot = FALSE, ...)
|
||||
@@ -142,7 +142,6 @@ xgb.ggplot.shap.summary <- function(data, shap_contrib = NULL, features = NULL,
|
||||
#'
|
||||
#' @return A data.table containing the observation ID, the feature name, the
|
||||
#' feature value (normalized if specified), and the SHAP contribution value.
|
||||
#' @noRd
|
||||
prepare.ggplot.shap.data <- function(data_list, normalize = FALSE) {
|
||||
data <- data_list[["data"]]
|
||||
shap_contrib <- data_list[["shap_contrib"]]
|
||||
@@ -171,7 +170,6 @@ prepare.ggplot.shap.data <- function(data_list, normalize = FALSE) {
|
||||
#' @param x Numeric vector
|
||||
#'
|
||||
#' @return Numeric vector with mean 0 and sd 1.
|
||||
#' @noRd
|
||||
normalize <- function(x) {
|
||||
loc <- mean(x, na.rm = TRUE)
|
||||
scale <- stats::sd(x, na.rm = TRUE)
|
||||
@@ -183,7 +181,7 @@ normalize <- function(x) {
|
||||
# ... the plots
|
||||
# cols number of columns
|
||||
# internal utility function
|
||||
multiplot <- function(..., cols) {
|
||||
multiplot <- function(..., cols = 1) {
|
||||
plots <- list(...)
|
||||
num_plots <- length(plots)
|
||||
|
||||
|
||||
@@ -82,7 +82,7 @@
|
||||
#'
|
||||
#' @export
|
||||
xgb.importance <- function(feature_names = NULL, model = NULL, trees = NULL,
|
||||
data = NULL, label = NULL, target = NULL) {
|
||||
data = NULL, label = NULL, target = NULL){
|
||||
|
||||
if (!(is.null(data) && is.null(label) && is.null(target)))
|
||||
warning("xgb.importance: parameters 'data', 'label' and 'target' are deprecated")
|
||||
@@ -104,11 +104,7 @@ xgb.importance <- function(feature_names = NULL, model = NULL, trees = NULL,
|
||||
XGBoosterFeatureScore_R, model$handle, jsonlite::toJSON(args, auto_unbox = TRUE, null = "null")
|
||||
)
|
||||
names(results) <- c("features", "shape", "weight")
|
||||
if (length(results$shape) == 2) {
|
||||
n_classes <- results$shape[2]
|
||||
} else {
|
||||
n_classes <- 0
|
||||
}
|
||||
n_classes <- if (length(results$shape) == 2) { results$shape[2] } else { 0 }
|
||||
importance <- if (n_classes == 0) {
|
||||
data.table(Feature = results$features, Weight = results$weight)[order(-abs(Weight))]
|
||||
} else {
|
||||
|
||||
@@ -35,12 +35,7 @@ xgb.load <- function(modelfile) {
|
||||
if (is.null(modelfile))
|
||||
stop("xgb.load: modelfile cannot be NULL")
|
||||
|
||||
handle <- xgb.Booster.handle(
|
||||
params = list(),
|
||||
cachelist = list(),
|
||||
modelfile = modelfile,
|
||||
handle = NULL
|
||||
)
|
||||
handle <- xgb.Booster.handle(modelfile = modelfile)
|
||||
# re-use modelfile if it is raw so we do not need to serialize
|
||||
if (typeof(modelfile) == "raw") {
|
||||
warning(
|
||||
@@ -50,9 +45,9 @@ xgb.load <- function(modelfile) {
|
||||
" `xgb.unserialize` instead. "
|
||||
)
|
||||
)
|
||||
bst <- xgb.handleToBooster(handle = handle, raw = modelfile)
|
||||
bst <- xgb.handleToBooster(handle, modelfile)
|
||||
} else {
|
||||
bst <- xgb.handleToBooster(handle = handle, raw = NULL)
|
||||
bst <- xgb.handleToBooster(handle, NULL)
|
||||
}
|
||||
bst <- xgb.Booster.complete(bst, saveraw = TRUE)
|
||||
return(bst)
|
||||
|
||||
@@ -62,7 +62,7 @@
|
||||
#'
|
||||
#' @export
|
||||
xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,
|
||||
trees = NULL, use_int_id = FALSE, ...) {
|
||||
trees = NULL, use_int_id = FALSE, ...){
|
||||
check.deprecation(...)
|
||||
|
||||
if (!inherits(model, "xgb.Booster") && !is.character(text)) {
|
||||
@@ -82,11 +82,12 @@ xgb.model.dt.tree <- function(feature_names = NULL, model = NULL, text = NULL,
|
||||
stop("trees: must be a vector of integers.")
|
||||
}
|
||||
|
||||
if (is.null(text)) {
|
||||
if (is.null(text)){
|
||||
text <- xgb.dump(model = model, with_stats = TRUE)
|
||||
}
|
||||
|
||||
if (length(text) < 2 || !any(grepl('leaf=(\\d+)', text))) {
|
||||
if (length(text) < 2 ||
|
||||
sum(grepl('leaf=(\\d+)', text)) < 1) {
|
||||
stop("Non-tree model detected! This function can only be used with tree models.")
|
||||
}
|
||||
|
||||
|
||||
@@ -136,7 +136,7 @@ get.leaf.depth <- function(dt_tree) {
|
||||
# list of paths to each leaf in a tree
|
||||
paths <- lapply(paths_tmp$vpath, names)
|
||||
# combine into a resulting path lengths table for a tree
|
||||
data.table(Depth = lengths(paths), ID = To[Leaf == TRUE])
|
||||
data.table(Depth = sapply(paths, length), ID = To[Leaf == TRUE])
|
||||
}, by = Tree]
|
||||
}
|
||||
|
||||
|
||||
@@ -102,9 +102,7 @@ xgb.plot.importance <- function(importance_matrix = NULL, top_n = NULL, measure
|
||||
original_mar <- par()$mar
|
||||
|
||||
# reset margins so this function doesn't have side effects
|
||||
on.exit({
|
||||
par(mar = original_mar)
|
||||
})
|
||||
on.exit({par(mar = original_mar)})
|
||||
|
||||
mar <- original_mar
|
||||
if (!is.null(left_margin))
|
||||
|
||||
@@ -61,7 +61,7 @@
|
||||
#'
|
||||
#' @export
|
||||
xgb.plot.multi.trees <- function(model, feature_names = NULL, features_keep = 5, plot_width = NULL, plot_height = NULL,
|
||||
render = TRUE, ...) {
|
||||
render = TRUE, ...){
|
||||
if (!requireNamespace("DiagrammeR", quietly = TRUE)) {
|
||||
stop("DiagrammeR is required for xgb.plot.multi.trees")
|
||||
}
|
||||
@@ -97,9 +97,9 @@ xgb.plot.multi.trees <- function(model, feature_names = NULL, features_keep = 5,
|
||||
, by = .(abs.node.position, Feature)
|
||||
][, .(Text = paste0(
|
||||
paste0(
|
||||
Feature[seq_len(min(length(Feature), features_keep))],
|
||||
Feature[1:min(length(Feature), features_keep)],
|
||||
" (",
|
||||
format(Quality[seq_len(min(length(Quality), features_keep))], digits = 5),
|
||||
format(Quality[1:min(length(Quality), features_keep)], digits = 5),
|
||||
")"
|
||||
),
|
||||
collapse = "\n"
|
||||
|
||||
@@ -143,7 +143,7 @@ xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
|
||||
y <- shap_contrib[, f][ord]
|
||||
x_lim <- range(x, na.rm = TRUE)
|
||||
y_lim <- range(y, na.rm = TRUE)
|
||||
do_na <- plot_NA && anyNA(x)
|
||||
do_na <- plot_NA && any(is.na(x))
|
||||
if (do_na) {
|
||||
x_range <- diff(x_lim)
|
||||
loc_na <- min(x, na.rm = TRUE) + x_range * pos_NA
|
||||
@@ -193,7 +193,7 @@ xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
|
||||
#' hence allows us to see which features have a negative / positive contribution
|
||||
#' on the model prediction, and whether the contribution is different for larger
|
||||
#' or smaller values of the feature. We effectively try to replicate the
|
||||
#' \code{summary_plot} function from https://github.com/shap/shap.
|
||||
#' \code{summary_plot} function from https://github.com/slundberg/shap.
|
||||
#'
|
||||
#' @inheritParams xgb.plot.shap
|
||||
#'
|
||||
@@ -202,7 +202,7 @@ xgb.plot.shap <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
|
||||
#'
|
||||
#' @examples # See \code{\link{xgb.plot.shap}}.
|
||||
#' @seealso \code{\link{xgb.plot.shap}}, \code{\link{xgb.ggplot.shap.summary}},
|
||||
#' \url{https://github.com/shap/shap}
|
||||
#' \url{https://github.com/slundberg/shap}
|
||||
xgb.plot.shap.summary <- function(data, shap_contrib = NULL, features = NULL, top_n = 10, model = NULL,
|
||||
trees = NULL, target_class = NULL, approxcontrib = FALSE, subsample = NULL) {
|
||||
# Only ggplot implementation is available.
|
||||
@@ -272,8 +272,8 @@ xgb.shap.data <- function(data, shap_contrib = NULL, features = NULL, top_n = 1,
|
||||
imp <- xgb.importance(model = model, trees = trees, feature_names = colnames(data))
|
||||
}
|
||||
top_n <- top_n[1]
|
||||
if (top_n < 1 || top_n > 100) stop("top_n: must be an integer within [1, 100]")
|
||||
features <- imp$Feature[seq_len(min(top_n, NROW(imp)))]
|
||||
if (top_n < 1 | top_n > 100) stop("top_n: must be an integer within [1, 100]")
|
||||
features <- imp$Feature[1:min(top_n, NROW(imp))]
|
||||
}
|
||||
if (is.character(features)) {
|
||||
features <- match(features, colnames(data))
|
||||
|
||||
@@ -68,7 +68,7 @@
|
||||
#'
|
||||
#' @export
|
||||
xgb.plot.tree <- function(feature_names = NULL, model = NULL, trees = NULL, plot_width = NULL, plot_height = NULL,
|
||||
render = TRUE, show_node_id = FALSE, ...) {
|
||||
render = TRUE, show_node_id = FALSE, ...){
|
||||
check.deprecation(...)
|
||||
if (!inherits(model, "xgb.Booster")) {
|
||||
stop("model: Has to be an object of class xgb.Booster")
|
||||
|
||||
@@ -43,6 +43,6 @@ xgb.save <- function(model, fname) {
|
||||
}
|
||||
model <- xgb.Booster.complete(model, saveraw = FALSE)
|
||||
fname <- path.expand(fname)
|
||||
.Call(XGBoosterSaveModel_R, model$handle, enc2utf8(fname[1]))
|
||||
.Call(XGBoosterSaveModel_R, model$handle, fname[1])
|
||||
return(TRUE)
|
||||
}
|
||||
|
||||
@@ -18,37 +18,17 @@
|
||||
#' 2.1. Parameters for Tree Booster
|
||||
#'
|
||||
#' \itemize{
|
||||
#' \item{ \code{eta} control the learning rate: scale the contribution of each tree by a factor of \code{0 < eta < 1}
|
||||
#' when it is added to the current approximation.
|
||||
#' Used to prevent overfitting by making the boosting process more conservative.
|
||||
#' Lower value for \code{eta} implies larger value for \code{nrounds}: low \code{eta} value means model
|
||||
#' more robust to overfitting but slower to compute. Default: 0.3}
|
||||
#' \item{ \code{gamma} minimum loss reduction required to make a further partition on a leaf node of the tree.
|
||||
#' the larger, the more conservative the algorithm will be.}
|
||||
#' \item \code{eta} control the learning rate: scale the contribution of each tree by a factor of \code{0 < eta < 1} when it is added to the current approximation. Used to prevent overfitting by making the boosting process more conservative. Lower value for \code{eta} implies larger value for \code{nrounds}: low \code{eta} value means model more robust to overfitting but slower to compute. Default: 0.3
|
||||
#' \item \code{gamma} minimum loss reduction required to make a further partition on a leaf node of the tree. the larger, the more conservative the algorithm will be.
|
||||
#' \item \code{max_depth} maximum depth of a tree. Default: 6
|
||||
#' \item{\code{min_child_weight} minimum sum of instance weight (hessian) needed in a child.
|
||||
#' If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight,
|
||||
#' then the building process will give up further partitioning.
|
||||
#' In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node.
|
||||
#' The larger, the more conservative the algorithm will be. Default: 1}
|
||||
#' \item{ \code{subsample} subsample ratio of the training instance.
|
||||
#' Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees
|
||||
#' and this will prevent overfitting. It makes computation shorter (because less data to analyse).
|
||||
#' It is advised to use this parameter with \code{eta} and increase \code{nrounds}. Default: 1}
|
||||
#' \item \code{min_child_weight} minimum sum of instance weight (hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, then the building process will give up further partitioning. In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node. The larger, the more conservative the algorithm will be. Default: 1
|
||||
#' \item \code{subsample} subsample ratio of the training instance. Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees and this will prevent overfitting. It makes computation shorter (because less data to analyse). It is advised to use this parameter with \code{eta} and increase \code{nrounds}. Default: 1
|
||||
#' \item \code{colsample_bytree} subsample ratio of columns when constructing each tree. Default: 1
|
||||
#' \item \code{lambda} L2 regularization term on weights. Default: 1
|
||||
#' \item \code{alpha} L1 regularization term on weights. (there is no L1 reg on bias because it is not important). Default: 0
|
||||
#' \item{ \code{num_parallel_tree} Experimental parameter. number of trees to grow per round.
|
||||
#' Useful to test Random Forest through XGBoost
|
||||
#' (set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly.
|
||||
#' Default: 1}
|
||||
#' \item{ \code{monotone_constraints} A numerical vector consists of \code{1}, \code{0} and \code{-1} with its length
|
||||
#' equals to the number of features in the training data.
|
||||
#' \code{1} is increasing, \code{-1} is decreasing and \code{0} is no constraint.}
|
||||
#' \item{ \code{interaction_constraints} A list of vectors specifying feature indices of permitted interactions.
|
||||
#' Each item of the list represents one permitted interaction where specified features are allowed to interact with each other.
|
||||
#' Feature index values should start from \code{0} (\code{0} references the first column).
|
||||
#' Leave argument unspecified for no interaction constraints.}
|
||||
#' \item \code{num_parallel_tree} Experimental parameter. number of trees to grow per round. Useful to test Random Forest through XGBoost (set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly. Default: 1
|
||||
#' \item \code{monotone_constraints} A numerical vector consists of \code{1}, \code{0} and \code{-1} with its length equals to the number of features in the training data. \code{1} is increasing, \code{-1} is decreasing and \code{0} is no constraint.
|
||||
#' \item \code{interaction_constraints} A list of vectors specifying feature indices of permitted interactions. Each item of the list represents one permitted interaction where specified features are allowed to interact with each other. Feature index values should start from \code{0} (\code{0} references the first column). Leave argument unspecified for no interaction constraints.
|
||||
#' }
|
||||
#'
|
||||
#' 2.2. Parameters for Linear Booster
|
||||
@@ -62,53 +42,29 @@
|
||||
#' 3. Task Parameters
|
||||
#'
|
||||
#' \itemize{
|
||||
#' \item{ \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it.
|
||||
#' The default objective options are below:
|
||||
#' \item \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it. The default objective options are below:
|
||||
#' \itemize{
|
||||
#' \item \code{reg:squarederror} Regression with squared loss (Default).
|
||||
#' \item{ \code{reg:squaredlogerror}: regression with squared log loss \eqn{1/2 * (log(pred + 1) - log(label + 1))^2}.
|
||||
#' All inputs are required to be greater than -1.
|
||||
#' Also, see metric rmsle for possible issue with this objective.}
|
||||
#' \item \code{reg:squaredlogerror}: regression with squared log loss \eqn{1/2 * (log(pred + 1) - log(label + 1))^2}. All inputs are required to be greater than -1. Also, see metric rmsle for possible issue with this objective.
|
||||
#' \item \code{reg:logistic} logistic regression.
|
||||
#' \item \code{reg:pseudohubererror}: regression with Pseudo Huber loss, a twice differentiable alternative to absolute loss.
|
||||
#' \item \code{binary:logistic} logistic regression for binary classification. Output probability.
|
||||
#' \item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation.
|
||||
#' \item \code{binary:hinge}: hinge loss for binary classification. This makes predictions of 0 or 1, rather than producing probabilities.
|
||||
#' \item{ \code{count:poisson}: Poisson regression for count data, output mean of Poisson distribution.
|
||||
#' \code{max_delta_step} is set to 0.7 by default in poisson regression (used to safeguard optimization).}
|
||||
#' \item{ \code{survival:cox}: Cox regression for right censored survival time data (negative values are considered right censored).
|
||||
#' Note that predictions are returned on the hazard ratio scale (i.e., as HR = exp(marginal_prediction) in the proportional
|
||||
#' hazard function \code{h(t) = h0(t) * HR)}.}
|
||||
#' \item{ \code{survival:aft}: Accelerated failure time model for censored survival time data. See
|
||||
#' \href{https://xgboost.readthedocs.io/en/latest/tutorials/aft_survival_analysis.html}{Survival Analysis with Accelerated Failure Time}
|
||||
#' for details.}
|
||||
#' \item \code{count:poisson}: Poisson regression for count data, output mean of Poisson distribution. \code{max_delta_step} is set to 0.7 by default in poisson regression (used to safeguard optimization).
|
||||
#' \item \code{survival:cox}: Cox regression for right censored survival time data (negative values are considered right censored). Note that predictions are returned on the hazard ratio scale (i.e., as HR = exp(marginal_prediction) in the proportional hazard function \code{h(t) = h0(t) * HR)}.
|
||||
#' \item \code{survival:aft}: Accelerated failure time model for censored survival time data. See \href{https://xgboost.readthedocs.io/en/latest/tutorials/aft_survival_analysis.html}{Survival Analysis with Accelerated Failure Time} for details.
|
||||
#' \item \code{aft_loss_distribution}: Probability Density Function used by \code{survival:aft} and \code{aft-nloglik} metric.
|
||||
#' \item{ \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective.
|
||||
#' Class is represented by a number and should be from 0 to \code{num_class - 1}.}
|
||||
#' \item{ \code{multi:softprob} same as softmax, but prediction outputs a vector of ndata * nclass elements, which can be
|
||||
#' further reshaped to ndata, nclass matrix. The result contains predicted probabilities of each data point belonging
|
||||
#' to each class.}
|
||||
#' \item \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective. Class is represented by a number and should be from 0 to \code{num_class - 1}.
|
||||
#' \item \code{multi:softprob} same as softmax, but prediction outputs a vector of ndata * nclass elements, which can be further reshaped to ndata, nclass matrix. The result contains predicted probabilities of each data point belonging to each class.
|
||||
#' \item \code{rank:pairwise} set xgboost to do ranking task by minimizing the pairwise loss.
|
||||
#' \item{ \code{rank:ndcg}: Use LambdaMART to perform list-wise ranking where
|
||||
#' \href{https://en.wikipedia.org/wiki/Discounted_cumulative_gain}{Normalized Discounted Cumulative Gain (NDCG)} is maximized.}
|
||||
#' \item{ \code{rank:map}: Use LambdaMART to perform list-wise ranking where
|
||||
#' \href{https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Mean_average_precision}{Mean Average Precision (MAP)}
|
||||
#' is maximized.}
|
||||
#' \item{ \code{reg:gamma}: gamma regression with log-link.
|
||||
#' Output is a mean of gamma distribution.
|
||||
#' It might be useful, e.g., for modeling insurance claims severity, or for any outcome that might be
|
||||
#' \href{https://en.wikipedia.org/wiki/Gamma_distribution#Applications}{gamma-distributed}.}
|
||||
#' \item{ \code{reg:tweedie}: Tweedie regression with log-link.
|
||||
#' It might be useful, e.g., for modeling total loss in insurance, or for any outcome that might be
|
||||
#' \href{https://en.wikipedia.org/wiki/Tweedie_distribution#Applications}{Tweedie-distributed}.}
|
||||
#' \item \code{rank:ndcg}: Use LambdaMART to perform list-wise ranking where \href{https://en.wikipedia.org/wiki/Discounted_cumulative_gain}{Normalized Discounted Cumulative Gain (NDCG)} is maximized.
|
||||
#' \item \code{rank:map}: Use LambdaMART to perform list-wise ranking where \href{https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Mean_average_precision}{Mean Average Precision (MAP)} is maximized.
|
||||
#' \item \code{reg:gamma}: gamma regression with log-link. Output is a mean of gamma distribution. It might be useful, e.g., for modeling insurance claims severity, or for any outcome that might be \href{https://en.wikipedia.org/wiki/Gamma_distribution#Applications}{gamma-distributed}.
|
||||
#' \item \code{reg:tweedie}: Tweedie regression with log-link. It might be useful, e.g., for modeling total loss in insurance, or for any outcome that might be \href{https://en.wikipedia.org/wiki/Tweedie_distribution#Applications}{Tweedie-distributed}.
|
||||
#' }
|
||||
#' }
|
||||
#' \item \code{base_score} the initial prediction score of all instances, global bias. Default: 0.5
|
||||
#' \item{ \code{eval_metric} evaluation metrics for validation data.
|
||||
#' Users can pass a self-defined function to it.
|
||||
#' Default: metric will be assigned according to objective
|
||||
#' (rmse for regression, and error for classification, mean average precision for ranking).
|
||||
#' List is provided in detail section.}
|
||||
#' \item \code{eval_metric} evaluation metrics for validation data. Users can pass a self-defined function to it. Default: metric will be assigned according to objective(rmse for regression, and error for classification, mean average precision for ranking). List is provided in detail section.
|
||||
#' }
|
||||
#'
|
||||
#' @param data training dataset. \code{xgb.train} accepts only an \code{xgb.DMatrix} as the input.
|
||||
@@ -185,8 +141,7 @@
|
||||
#' \item \code{merror} Multiclass classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}.
|
||||
#' \item \code{mae} Mean absolute error
|
||||
#' \item \code{mape} Mean absolute percentage error
|
||||
#' \item{ \code{auc} Area under the curve.
|
||||
#' \url{https://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation.}
|
||||
#' \item \code{auc} Area under the curve. \url{https://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation.
|
||||
#' \item \code{aucpr} Area under the PR curve. \url{https://en.wikipedia.org/wiki/Precision_and_recall} for ranking evaluation.
|
||||
#' \item \code{ndcg} Normalized Discounted Cumulative Gain (for ranking task). \url{https://en.wikipedia.org/wiki/NDCG}
|
||||
#' }
|
||||
@@ -321,10 +276,6 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
|
||||
if (is.null(evnames) || any(evnames == ""))
|
||||
stop("each element of the watchlist must have a name tag")
|
||||
}
|
||||
# Handle multiple evaluation metrics given as a list
|
||||
for (m in params$eval_metric) {
|
||||
params <- c(params, list(eval_metric = m))
|
||||
}
|
||||
|
||||
# evaluation printing callback
|
||||
params <- c(params)
|
||||
@@ -363,13 +314,8 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
|
||||
is_update <- NVL(params[['process_type']], '.') == 'update'
|
||||
|
||||
# Construct a booster (either a new one or load from xgb_model)
|
||||
handle <- xgb.Booster.handle(
|
||||
params = params,
|
||||
cachelist = append(watchlist, dtrain),
|
||||
modelfile = xgb_model,
|
||||
handle = NULL
|
||||
)
|
||||
bst <- xgb.handleToBooster(handle = handle, raw = NULL)
|
||||
handle <- xgb.Booster.handle(params, append(watchlist, dtrain), xgb_model)
|
||||
bst <- xgb.handleToBooster(handle)
|
||||
|
||||
# extract parameters that can affect the relationship b/w #trees and #iterations
|
||||
num_class <- max(as.numeric(NVL(params[['num_class']], 1)), 1)
|
||||
@@ -395,21 +341,10 @@ xgb.train <- function(params = list(), data, nrounds, watchlist = list(),
|
||||
|
||||
for (f in cb$pre_iter) f()
|
||||
|
||||
xgb.iter.update(
|
||||
booster_handle = bst$handle,
|
||||
dtrain = dtrain,
|
||||
iter = iteration - 1,
|
||||
obj = obj
|
||||
)
|
||||
xgb.iter.update(bst$handle, dtrain, iteration - 1, obj)
|
||||
|
||||
if (length(watchlist) > 0) {
|
||||
bst_evaluation <- xgb.iter.eval( # nolint: object_usage_linter
|
||||
booster_handle = bst$handle,
|
||||
watchlist = watchlist,
|
||||
iter = iteration - 1,
|
||||
feval = feval
|
||||
)
|
||||
}
|
||||
if (length(watchlist) > 0)
|
||||
bst_evaluation <- xgb.iter.eval(bst$handle, watchlist, iteration - 1, feval)
|
||||
|
||||
xgb.attr(bst$handle, 'niter') <- iteration - 1
|
||||
|
||||
|
||||
@@ -10,13 +10,7 @@ xgboost <- function(data = NULL, label = NULL, missing = NA, weight = NULL,
|
||||
save_period = NULL, save_name = "xgboost.model",
|
||||
xgb_model = NULL, callbacks = list(), ...) {
|
||||
merged <- check.booster.params(params, ...)
|
||||
dtrain <- xgb.get.DMatrix(
|
||||
data = data,
|
||||
label = label,
|
||||
missing = missing,
|
||||
weight = weight,
|
||||
nthread = merged$nthread
|
||||
)
|
||||
dtrain <- xgb.get.DMatrix(data, label, missing, weight, nthread = merged$nthread)
|
||||
|
||||
watchlist <- list(train = dtrain)
|
||||
|
||||
|
||||
18
R-package/configure
vendored
18
R-package/configure
vendored
@@ -1,6 +1,6 @@
|
||||
#! /bin/sh
|
||||
# Guess values for system-dependent variables and create Makefiles.
|
||||
# Generated by GNU Autoconf 2.71 for xgboost 2.0.1.
|
||||
# Generated by GNU Autoconf 2.71 for xgboost 1.7.6.
|
||||
#
|
||||
#
|
||||
# Copyright (C) 1992-1996, 1998-2017, 2020-2021 Free Software Foundation,
|
||||
@@ -607,8 +607,8 @@ MAKEFLAGS=
|
||||
# Identity of this package.
|
||||
PACKAGE_NAME='xgboost'
|
||||
PACKAGE_TARNAME='xgboost'
|
||||
PACKAGE_VERSION='2.0.1'
|
||||
PACKAGE_STRING='xgboost 2.0.1'
|
||||
PACKAGE_VERSION='1.7.6'
|
||||
PACKAGE_STRING='xgboost 1.7.6'
|
||||
PACKAGE_BUGREPORT=''
|
||||
PACKAGE_URL=''
|
||||
|
||||
@@ -1225,7 +1225,7 @@ if test "$ac_init_help" = "long"; then
|
||||
# Omit some internal or obsolete options to make the list less imposing.
|
||||
# This message is too long to be a string in the A/UX 3.1 sh.
|
||||
cat <<_ACEOF
|
||||
\`configure' configures xgboost 2.0.1 to adapt to many kinds of systems.
|
||||
\`configure' configures xgboost 1.7.6 to adapt to many kinds of systems.
|
||||
|
||||
Usage: $0 [OPTION]... [VAR=VALUE]...
|
||||
|
||||
@@ -1287,7 +1287,7 @@ fi
|
||||
|
||||
if test -n "$ac_init_help"; then
|
||||
case $ac_init_help in
|
||||
short | recursive ) echo "Configuration of xgboost 2.0.1:";;
|
||||
short | recursive ) echo "Configuration of xgboost 1.7.6:";;
|
||||
esac
|
||||
cat <<\_ACEOF
|
||||
|
||||
@@ -1367,7 +1367,7 @@ fi
|
||||
test -n "$ac_init_help" && exit $ac_status
|
||||
if $ac_init_version; then
|
||||
cat <<\_ACEOF
|
||||
xgboost configure 2.0.1
|
||||
xgboost configure 1.7.6
|
||||
generated by GNU Autoconf 2.71
|
||||
|
||||
Copyright (C) 2021 Free Software Foundation, Inc.
|
||||
@@ -1533,7 +1533,7 @@ cat >config.log <<_ACEOF
|
||||
This file contains any messages produced by compilers while
|
||||
running configure, to aid debugging if configure makes a mistake.
|
||||
|
||||
It was created by xgboost $as_me 2.0.1, which was
|
||||
It was created by xgboost $as_me 1.7.6, which was
|
||||
generated by GNU Autoconf 2.71. Invocation command line was
|
||||
|
||||
$ $0$ac_configure_args_raw
|
||||
@@ -3412,7 +3412,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
|
||||
# report actual input values of CONFIG_FILES etc. instead of their
|
||||
# values after options handling.
|
||||
ac_log="
|
||||
This file was extended by xgboost $as_me 2.0.1, which was
|
||||
This file was extended by xgboost $as_me 1.7.6, which was
|
||||
generated by GNU Autoconf 2.71. Invocation command line was
|
||||
|
||||
CONFIG_FILES = $CONFIG_FILES
|
||||
@@ -3467,7 +3467,7 @@ ac_cs_config_escaped=`printf "%s\n" "$ac_cs_config" | sed "s/^ //; s/'/'\\\\\\\\
|
||||
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
|
||||
ac_cs_config='$ac_cs_config_escaped'
|
||||
ac_cs_version="\\
|
||||
xgboost config.status 2.0.1
|
||||
xgboost config.status 1.7.6
|
||||
configured by $0, generated by GNU Autoconf 2.71,
|
||||
with options \\"\$ac_cs_config\\"
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
AC_PREREQ(2.69)
|
||||
|
||||
AC_INIT([xgboost],[2.0.1],[],[xgboost],[])
|
||||
AC_INIT([xgboost],[1.7.6],[],[xgboost],[])
|
||||
|
||||
: ${R_HOME=`R RHOME`}
|
||||
if test -z "${R_HOME}"; then
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
# install development version of caret library that contains xgboost models
|
||||
devtools::install_github("topepo/caret/pkg/caret")
|
||||
require(caret)
|
||||
require(xgboost)
|
||||
require(data.table)
|
||||
@@ -7,23 +8,14 @@ require(e1071)
|
||||
|
||||
# Load Arthritis dataset in memory.
|
||||
data(Arthritis)
|
||||
# Create a copy of the dataset with data.table package
|
||||
# (data.table is 100% compliant with R dataframe but its syntax is a lot more consistent
|
||||
# and its performance are really good).
|
||||
# Create a copy of the dataset with data.table package (data.table is 100% compliant with R dataframe but its syntax is a lot more consistent and its performance are really good).
|
||||
df <- data.table(Arthritis, keep.rownames = FALSE)
|
||||
|
||||
# Let's add some new categorical features to see if it helps.
|
||||
# Of course these feature are highly correlated to the Age feature.
|
||||
# Usually it's not a good thing in ML, but Tree algorithms (including boosted trees) are able to select the best features,
|
||||
# even in case of highly correlated features.
|
||||
# For the first feature we create groups of age by rounding the real age.
|
||||
# Note that we transform it to factor (categorical data) so the algorithm treat them as independant values.
|
||||
# Let's add some new categorical features to see if it helps. Of course these feature are highly correlated to the Age feature. Usually it's not a good thing in ML, but Tree algorithms (including boosted trees) are able to select the best features, even in case of highly correlated features.
|
||||
# For the first feature we create groups of age by rounding the real age. Note that we transform it to factor (categorical data) so the algorithm treat them as independant values.
|
||||
df[, AgeDiscret := as.factor(round(Age / 10, 0))]
|
||||
|
||||
# Here is an even stronger simplification of the real age with an arbitrary split at 30 years old.
|
||||
# I choose this value based on nothing.
|
||||
# We will see later if simplifying the information based on arbitrary values is a good strategy
|
||||
# (I am sure you already have an idea of how well it will work!).
|
||||
# Here is an even stronger simplification of the real age with an arbitrary split at 30 years old. I choose this value based on nothing. We will see later if simplifying the information based on arbitrary values is a good strategy (I am sure you already have an idea of how well it will work!).
|
||||
df[, AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))]
|
||||
|
||||
# We remove ID as there is nothing to learn from this feature (it will just add some noise as the dataset is small).
|
||||
@@ -34,10 +26,9 @@ df[, ID := NULL]
|
||||
# Here we use 10-fold cross-validation, repeating twice, and using random search for tuning hyper-parameters.
|
||||
fitControl <- trainControl(method = "repeatedcv", number = 10, repeats = 2, search = "random")
|
||||
# train a xgbTree model using caret::train
|
||||
model <- train(factor(Improved) ~ ., data = df, method = "xgbTree", trControl = fitControl)
|
||||
model <- train(factor(Improved)~., data = df, method = "xgbTree", trControl = fitControl)
|
||||
|
||||
# Instead of tree for our boosters, you can also fit a linear regression or logistic regression model
|
||||
# using xgbLinear
|
||||
# Instead of tree for our boosters, you can also fit a linear regression or logistic regression model using xgbLinear
|
||||
# model <- train(factor(Improved)~., data = df, method = "xgbLinear", trControl = fitControl)
|
||||
|
||||
# See model results
|
||||
|
||||
@@ -7,47 +7,34 @@ if (!require(vcd)) {
|
||||
}
|
||||
# According to its documentation, XGBoost works only on numbers.
|
||||
# Sometimes the dataset we have to work on have categorical data.
|
||||
# A categorical variable is one which have a fixed number of values.
|
||||
# By example, if for each observation a variable called "Colour" can have only
|
||||
# "red", "blue" or "green" as value, it is a categorical variable.
|
||||
# A categorical variable is one which have a fixed number of values. By example, if for each observation a variable called "Colour" can have only "red", "blue" or "green" as value, it is a categorical variable.
|
||||
#
|
||||
# In R, categorical variable is called Factor.
|
||||
# Type ?factor in console for more information.
|
||||
#
|
||||
# In this demo we will see how to transform a dense dataframe with categorical variables to a sparse matrix
|
||||
# before analyzing it in XGBoost.
|
||||
# In this demo we will see how to transform a dense dataframe with categorical variables to a sparse matrix before analyzing it in XGBoost.
|
||||
# The method we are going to see is usually called "one hot encoding".
|
||||
|
||||
#load Arthritis dataset in memory.
|
||||
data(Arthritis)
|
||||
|
||||
# create a copy of the dataset with data.table package
|
||||
# (data.table is 100% compliant with R dataframe but its syntax is a lot more consistent
|
||||
# and its performance are really good).
|
||||
# create a copy of the dataset with data.table package (data.table is 100% compliant with R dataframe but its syntax is a lot more consistent and its performance are really good).
|
||||
df <- data.table(Arthritis, keep.rownames = FALSE)
|
||||
|
||||
# Let's have a look to the data.table
|
||||
cat("Print the dataset\n")
|
||||
print(df)
|
||||
|
||||
# 2 columns have factor type, one has ordinal type
|
||||
# (ordinal variable is a categorical variable with values which can be ordered, here: None > Some > Marked).
|
||||
# 2 columns have factor type, one has ordinal type (ordinal variable is a categorical variable with values which can be ordered, here: None > Some > Marked).
|
||||
cat("Structure of the dataset\n")
|
||||
str(df)
|
||||
|
||||
# Let's add some new categorical features to see if it helps.
|
||||
# Of course these feature are highly correlated to the Age feature.
|
||||
# Usually it's not a good thing in ML, but Tree algorithms (including boosted trees) are able to select the best features,
|
||||
# even in case of highly correlated features.
|
||||
# Let's add some new categorical features to see if it helps. Of course these feature are highly correlated to the Age feature. Usually it's not a good thing in ML, but Tree algorithms (including boosted trees) are able to select the best features, even in case of highly correlated features.
|
||||
|
||||
# For the first feature we create groups of age by rounding the real age.
|
||||
# Note that we transform it to factor (categorical data) so the algorithm treat them as independent values.
|
||||
# For the first feature we create groups of age by rounding the real age. Note that we transform it to factor (categorical data) so the algorithm treat them as independent values.
|
||||
df[, AgeDiscret := as.factor(round(Age / 10, 0))]
|
||||
|
||||
# Here is an even stronger simplification of the real age with an arbitrary split at 30 years old.
|
||||
# I choose this value based on nothing.
|
||||
# We will see later if simplifying the information based on arbitrary values is a good strategy
|
||||
# (I am sure you already have an idea of how well it will work!).
|
||||
# Here is an even stronger simplification of the real age with an arbitrary split at 30 years old. I choose this value based on nothing. We will see later if simplifying the information based on arbitrary values is a good strategy (I am sure you already have an idea of how well it will work!).
|
||||
df[, AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))]
|
||||
|
||||
# We remove ID as there is nothing to learn from this feature (it will just add some noise as the dataset is small).
|
||||
@@ -61,10 +48,7 @@ print(levels(df[, Treatment]))
|
||||
# This method is also called one hot encoding.
|
||||
# The purpose is to transform each value of each categorical feature in one binary feature.
|
||||
#
|
||||
# Let's take, the column Treatment will be replaced by two columns, Placebo, and Treated.
|
||||
# Each of them will be binary.
|
||||
# For example an observation which had the value Placebo in column Treatment before the transformation will have, after the transformation,
|
||||
# the value 1 in the new column Placebo and the value 0 in the new column Treated.
|
||||
# Let's take, the column Treatment will be replaced by two columns, Placebo, and Treated. Each of them will be binary. For example an observation which had the value Placebo in column Treatment before the transformation will have, after the transformation, the value 1 in the new column Placebo and the value 0 in the new column Treated.
|
||||
#
|
||||
# Formulae Improved~.-1 used below means transform all categorical features but column Improved to binary values.
|
||||
# Column Improved is excluded because it will be our output column, the one we want to predict.
|
||||
@@ -86,10 +70,7 @@ bst <- xgboost(data = sparse_matrix, label = output_vector, max_depth = 9,
|
||||
|
||||
importance <- xgb.importance(feature_names = colnames(sparse_matrix), model = bst)
|
||||
print(importance)
|
||||
# According to the matrix below, the most important feature in this dataset to predict if the treatment will work is the Age.
|
||||
# The second most important feature is having received a placebo or not.
|
||||
# The sex is third.
|
||||
# Then we see our generated features (AgeDiscret). We can see that their contribution is very low (Gain column).
|
||||
# According to the matrix below, the most important feature in this dataset to predict if the treatment will work is the Age. The second most important feature is having received a placebo or not. The sex is third. Then we see our generated features (AgeDiscret). We can see that their contribution is very low (Gain column).
|
||||
|
||||
# Does these result make sense?
|
||||
# Let's check some Chi2 between each of these features and the outcome.
|
||||
@@ -101,17 +82,8 @@ print(chisq.test(df$AgeDiscret, df$Y))
|
||||
# Our first simplification of Age gives a Pearson correlation of 8.
|
||||
|
||||
print(chisq.test(df$AgeCat, df$Y))
|
||||
# The perfectly random split I did between young and old at 30 years old have a low correlation of 2.
|
||||
# It's a result we may expect as may be in my mind > 30 years is being old (I am 32 and starting feeling old, this may explain that),
|
||||
# but for the illness we are studying, the age to be vulnerable is not the same.
|
||||
# Don't let your "gut" lower the quality of your model. In "data science", there is science :-)
|
||||
# The perfectly random split I did between young and old at 30 years old have a low correlation of 2. It's a result we may expect as may be in my mind > 30 years is being old (I am 32 and starting feeling old, this may explain that), but for the illness we are studying, the age to be vulnerable is not the same. Don't let your "gut" lower the quality of your model. In "data science", there is science :-)
|
||||
|
||||
# As you can see, in general destroying information by simplifying it won't improve your model.
|
||||
# Chi2 just demonstrates that.
|
||||
# But in more complex cases, creating a new feature based on existing one which makes link with the outcome
|
||||
# more obvious may help the algorithm and improve the model.
|
||||
# The case studied here is not enough complex to show that. Check Kaggle forum for some challenging datasets.
|
||||
# As you can see, in general destroying information by simplifying it won't improve your model. Chi2 just demonstrates that. But in more complex cases, creating a new feature based on existing one which makes link with the outcome more obvious may help the algorithm and improve the model. The case studied here is not enough complex to show that. Check Kaggle forum for some challenging datasets.
|
||||
# However it's almost always worse when you add some arbitrary rules.
|
||||
# Moreover, you can notice that even if we have added some not useful new features highly correlated with
|
||||
# other features, the boosting tree algorithm have been able to choose the best one, which in this case is the Age.
|
||||
# Linear model may not be that strong in these scenario.
|
||||
# Moreover, you can notice that even if we have added some not useful new features highly correlated with other features, the boosting tree algorithm have been able to choose the best one, which in this case is the Age. Linear model may not be that strong in these scenario.
|
||||
|
||||
@@ -12,7 +12,7 @@ cat('running cross validation\n')
|
||||
# do cross validation, this will print result out as
|
||||
# [iteration] metric_name:mean_value+std_value
|
||||
# std_value is standard deviation of the metric
|
||||
xgb.cv(param, dtrain, nrounds, nfold = 5, metrics = 'error')
|
||||
xgb.cv(param, dtrain, nrounds, nfold = 5, metrics = {'error'})
|
||||
|
||||
cat('running cross validation, disable standard deviation display\n')
|
||||
# do cross validation, this will print result out as
|
||||
|
||||
@@ -33,7 +33,7 @@ treeInteractions <- function(input_tree, input_max_depth) {
|
||||
}
|
||||
|
||||
# Extract nodes with interactions
|
||||
interaction_trees <- trees[!is.na(Split) & !is.na(parent_1), # nolint: object_usage_linter
|
||||
interaction_trees <- trees[!is.na(Split) & !is.na(parent_1),
|
||||
c('Feature', paste0('parent_feat_', 1:(input_max_depth - 1))),
|
||||
with = FALSE]
|
||||
interaction_trees_split <- split(interaction_trees, seq_len(nrow(interaction_trees)))
|
||||
@@ -44,7 +44,7 @@ treeInteractions <- function(input_tree, input_max_depth) {
|
||||
|
||||
# Remove non-interactions (same variable)
|
||||
interaction_list <- lapply(interaction_list, unique) # remove same variables
|
||||
interaction_length <- lengths(interaction_list)
|
||||
interaction_length <- sapply(interaction_list, length)
|
||||
interaction_list <- interaction_list[interaction_length > 1]
|
||||
interaction_list <- unique(lapply(interaction_list, sort))
|
||||
return(interaction_list)
|
||||
|
||||
@@ -24,7 +24,7 @@ accuracy.before <- (sum((predict(bst, agaricus.test$data) >= 0.5) == agaricus.te
|
||||
pred_with_leaf <- predict(bst, dtest, predleaf = TRUE)
|
||||
head(pred_with_leaf)
|
||||
|
||||
create.new.tree.features <- function(model, original.features) {
|
||||
create.new.tree.features <- function(model, original.features){
|
||||
pred_with_leaf <- predict(model, original.features, predleaf = TRUE)
|
||||
cols <- list()
|
||||
for (i in 1:model$niter) {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# running all scripts in demo folder, removed during packaging.
|
||||
# running all scripts in demo folder
|
||||
demo(basic_walkthrough, package = 'xgboost')
|
||||
demo(custom_objective, package = 'xgboost')
|
||||
demo(boost_from_prediction, package = 'xgboost')
|
||||
|
||||
@@ -79,9 +79,9 @@ end_of_table <- empty_lines[empty_lines > start_index][1L]
|
||||
|
||||
# Read the contents of the table
|
||||
exported_symbols <- objdump_results[(start_index + 1L):end_of_table]
|
||||
exported_symbols <- gsub("\t", "", exported_symbols, fixed = TRUE)
|
||||
exported_symbols <- gsub("\t", "", exported_symbols)
|
||||
exported_symbols <- gsub(".*\\] ", "", exported_symbols)
|
||||
exported_symbols <- gsub(" ", "", exported_symbols, fixed = TRUE)
|
||||
exported_symbols <- gsub(" ", "", exported_symbols)
|
||||
|
||||
# Write R.def file
|
||||
writeLines(
|
||||
|
||||
@@ -72,12 +72,12 @@ matplot(xgb.gblinear.history(bst)[[3]], type = 'l')
|
||||
|
||||
#### Multiclass classification:
|
||||
#
|
||||
dtrain <- xgb.DMatrix(scale(x), label = as.numeric(iris$Species) - 1, nthread = 1)
|
||||
dtrain <- xgb.DMatrix(scale(x), label = as.numeric(iris$Species) - 1, nthread = 2)
|
||||
param <- list(booster = "gblinear", objective = "multi:softprob", num_class = 3,
|
||||
lambda = 0.0003, alpha = 0.0003, nthread = 1)
|
||||
lambda = 0.0003, alpha = 0.0003, nthread = 2)
|
||||
# For the default linear updater 'shotgun' it sometimes is helpful
|
||||
# to use smaller eta to reduce instability
|
||||
bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 50, eta = 0.5,
|
||||
bst <- xgb.train(param, dtrain, list(tr=dtrain), nrounds = 70, eta = 0.5,
|
||||
callbacks = list(cb.gblinear.history()))
|
||||
# Will plot the coefficient paths separately for each class:
|
||||
matplot(xgb.gblinear.history(bst, class_index = 0), type = 'l')
|
||||
|
||||
18
R-package/man/normalize.Rd
Normal file
18
R-package/man/normalize.Rd
Normal file
@@ -0,0 +1,18 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/xgb.ggplot.R
|
||||
\name{normalize}
|
||||
\alias{normalize}
|
||||
\title{Scale feature value to have mean 0, standard deviation 1}
|
||||
\usage{
|
||||
normalize(x)
|
||||
}
|
||||
\arguments{
|
||||
\item{x}{Numeric vector}
|
||||
}
|
||||
\value{
|
||||
Numeric vector with mean 0 and sd 1.
|
||||
}
|
||||
\description{
|
||||
This is used to compare multiple features on the same plot.
|
||||
Internal utility function
|
||||
}
|
||||
@@ -122,10 +122,6 @@ With \code{predinteraction = TRUE}, SHAP values of contributions of interaction
|
||||
are computed. Note that this operation might be rather expensive in terms of compute and memory.
|
||||
Since it quadratically depends on the number of features, it is recommended to perform selection
|
||||
of the most important features first. See below about the format of the returned results.
|
||||
|
||||
The \code{predict()} method uses as many threads as defined in \code{xgb.Booster} object (all by default).
|
||||
If you want to change their number, then assign a new number to \code{nthread} using \code{\link{xgb.parameters<-}}.
|
||||
Note also that converting a matrix to \code{\link{xgb.DMatrix}} uses multiple threads too.
|
||||
}
|
||||
\examples{
|
||||
## binary classification:
|
||||
|
||||
27
R-package/man/prepare.ggplot.shap.data.Rd
Normal file
27
R-package/man/prepare.ggplot.shap.data.Rd
Normal file
@@ -0,0 +1,27 @@
|
||||
% Generated by roxygen2: do not edit by hand
|
||||
% Please edit documentation in R/xgb.ggplot.R
|
||||
\name{prepare.ggplot.shap.data}
|
||||
\alias{prepare.ggplot.shap.data}
|
||||
\title{Combine and melt feature values and SHAP contributions for sample
|
||||
observations.}
|
||||
\usage{
|
||||
prepare.ggplot.shap.data(data_list, normalize = FALSE)
|
||||
}
|
||||
\arguments{
|
||||
\item{data_list}{List containing 'data' and 'shap_contrib' returned by
|
||||
\code{xgb.shap.data()}.}
|
||||
|
||||
\item{normalize}{Whether to standardize feature values to have mean 0 and
|
||||
standard deviation 1 (useful for comparing multiple features on the same
|
||||
plot). Default \code{FALSE}.}
|
||||
}
|
||||
\value{
|
||||
A data.table containing the observation ID, the feature name, the
|
||||
feature value (normalized if specified), and the SHAP contribution value.
|
||||
}
|
||||
\description{
|
||||
Conforms to data format required for ggplot functions.
|
||||
}
|
||||
\details{
|
||||
Internal utility function.
|
||||
}
|
||||
@@ -148,11 +148,9 @@ The cross validation function of xgboost
|
||||
\details{
|
||||
The original sample is randomly partitioned into \code{nfold} equal size subsamples.
|
||||
|
||||
Of the \code{nfold} subsamples, a single subsample is retained as the validation data for testing the model,
|
||||
and the remaining \code{nfold - 1} subsamples are used as training data.
|
||||
Of the \code{nfold} subsamples, a single subsample is retained as the validation data for testing the model, and the remaining \code{nfold - 1} subsamples are used as training data.
|
||||
|
||||
The cross-validation process is then repeated \code{nrounds} times, with each of the
|
||||
\code{nfold} subsamples used exactly once as the validation data.
|
||||
The cross-validation process is then repeated \code{nrounds} times, with each of the \code{nfold} subsamples used exactly once as the validation data.
|
||||
|
||||
All observations are used for both training and validation.
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ xgb.ggplot.importance(
|
||||
top_n = NULL,
|
||||
measure = NULL,
|
||||
rel_to_first = FALSE,
|
||||
n_clusters = seq_len(10),
|
||||
n_clusters = c(1:10),
|
||||
...
|
||||
)
|
||||
|
||||
|
||||
@@ -67,12 +67,12 @@ Each point (observation) is coloured based on its feature value. The plot
|
||||
hence allows us to see which features have a negative / positive contribution
|
||||
on the model prediction, and whether the contribution is different for larger
|
||||
or smaller values of the feature. We effectively try to replicate the
|
||||
\code{summary_plot} function from https://github.com/shap/shap.
|
||||
\code{summary_plot} function from https://github.com/slundberg/shap.
|
||||
}
|
||||
\examples{
|
||||
# See \code{\link{xgb.plot.shap}}.
|
||||
}
|
||||
\seealso{
|
||||
\code{\link{xgb.plot.shap}}, \code{\link{xgb.ggplot.shap.summary}},
|
||||
\url{https://github.com/shap/shap}
|
||||
\url{https://github.com/slundberg/shap}
|
||||
}
|
||||
|
||||
@@ -57,37 +57,17 @@ xgboost(
|
||||
2.1. Parameters for Tree Booster
|
||||
|
||||
\itemize{
|
||||
\item{ \code{eta} control the learning rate: scale the contribution of each tree by a factor of \code{0 < eta < 1}
|
||||
when it is added to the current approximation.
|
||||
Used to prevent overfitting by making the boosting process more conservative.
|
||||
Lower value for \code{eta} implies larger value for \code{nrounds}: low \code{eta} value means model
|
||||
more robust to overfitting but slower to compute. Default: 0.3}
|
||||
\item{ \code{gamma} minimum loss reduction required to make a further partition on a leaf node of the tree.
|
||||
the larger, the more conservative the algorithm will be.}
|
||||
\item \code{eta} control the learning rate: scale the contribution of each tree by a factor of \code{0 < eta < 1} when it is added to the current approximation. Used to prevent overfitting by making the boosting process more conservative. Lower value for \code{eta} implies larger value for \code{nrounds}: low \code{eta} value means model more robust to overfitting but slower to compute. Default: 0.3
|
||||
\item \code{gamma} minimum loss reduction required to make a further partition on a leaf node of the tree. the larger, the more conservative the algorithm will be.
|
||||
\item \code{max_depth} maximum depth of a tree. Default: 6
|
||||
\item{\code{min_child_weight} minimum sum of instance weight (hessian) needed in a child.
|
||||
If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight,
|
||||
then the building process will give up further partitioning.
|
||||
In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node.
|
||||
The larger, the more conservative the algorithm will be. Default: 1}
|
||||
\item{ \code{subsample} subsample ratio of the training instance.
|
||||
Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees
|
||||
and this will prevent overfitting. It makes computation shorter (because less data to analyse).
|
||||
It is advised to use this parameter with \code{eta} and increase \code{nrounds}. Default: 1}
|
||||
\item \code{min_child_weight} minimum sum of instance weight (hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, then the building process will give up further partitioning. In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node. The larger, the more conservative the algorithm will be. Default: 1
|
||||
\item \code{subsample} subsample ratio of the training instance. Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees and this will prevent overfitting. It makes computation shorter (because less data to analyse). It is advised to use this parameter with \code{eta} and increase \code{nrounds}. Default: 1
|
||||
\item \code{colsample_bytree} subsample ratio of columns when constructing each tree. Default: 1
|
||||
\item \code{lambda} L2 regularization term on weights. Default: 1
|
||||
\item \code{alpha} L1 regularization term on weights. (there is no L1 reg on bias because it is not important). Default: 0
|
||||
\item{ \code{num_parallel_tree} Experimental parameter. number of trees to grow per round.
|
||||
Useful to test Random Forest through XGBoost
|
||||
(set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly.
|
||||
Default: 1}
|
||||
\item{ \code{monotone_constraints} A numerical vector consists of \code{1}, \code{0} and \code{-1} with its length
|
||||
equals to the number of features in the training data.
|
||||
\code{1} is increasing, \code{-1} is decreasing and \code{0} is no constraint.}
|
||||
\item{ \code{interaction_constraints} A list of vectors specifying feature indices of permitted interactions.
|
||||
Each item of the list represents one permitted interaction where specified features are allowed to interact with each other.
|
||||
Feature index values should start from \code{0} (\code{0} references the first column).
|
||||
Leave argument unspecified for no interaction constraints.}
|
||||
\item \code{num_parallel_tree} Experimental parameter. number of trees to grow per round. Useful to test Random Forest through XGBoost (set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly. Default: 1
|
||||
\item \code{monotone_constraints} A numerical vector consists of \code{1}, \code{0} and \code{-1} with its length equals to the number of features in the training data. \code{1} is increasing, \code{-1} is decreasing and \code{0} is no constraint.
|
||||
\item \code{interaction_constraints} A list of vectors specifying feature indices of permitted interactions. Each item of the list represents one permitted interaction where specified features are allowed to interact with each other. Feature index values should start from \code{0} (\code{0} references the first column). Leave argument unspecified for no interaction constraints.
|
||||
}
|
||||
|
||||
2.2. Parameters for Linear Booster
|
||||
@@ -101,53 +81,29 @@ xgboost(
|
||||
3. Task Parameters
|
||||
|
||||
\itemize{
|
||||
\item{ \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it.
|
||||
The default objective options are below:
|
||||
\item \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it. The default objective options are below:
|
||||
\itemize{
|
||||
\item \code{reg:squarederror} Regression with squared loss (Default).
|
||||
\item{ \code{reg:squaredlogerror}: regression with squared log loss \eqn{1/2 * (log(pred + 1) - log(label + 1))^2}.
|
||||
All inputs are required to be greater than -1.
|
||||
Also, see metric rmsle for possible issue with this objective.}
|
||||
\item \code{reg:squaredlogerror}: regression with squared log loss \eqn{1/2 * (log(pred + 1) - log(label + 1))^2}. All inputs are required to be greater than -1. Also, see metric rmsle for possible issue with this objective.
|
||||
\item \code{reg:logistic} logistic regression.
|
||||
\item \code{reg:pseudohubererror}: regression with Pseudo Huber loss, a twice differentiable alternative to absolute loss.
|
||||
\item \code{binary:logistic} logistic regression for binary classification. Output probability.
|
||||
\item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation.
|
||||
\item \code{binary:hinge}: hinge loss for binary classification. This makes predictions of 0 or 1, rather than producing probabilities.
|
||||
\item{ \code{count:poisson}: Poisson regression for count data, output mean of Poisson distribution.
|
||||
\code{max_delta_step} is set to 0.7 by default in poisson regression (used to safeguard optimization).}
|
||||
\item{ \code{survival:cox}: Cox regression for right censored survival time data (negative values are considered right censored).
|
||||
Note that predictions are returned on the hazard ratio scale (i.e., as HR = exp(marginal_prediction) in the proportional
|
||||
hazard function \code{h(t) = h0(t) * HR)}.}
|
||||
\item{ \code{survival:aft}: Accelerated failure time model for censored survival time data. See
|
||||
\href{https://xgboost.readthedocs.io/en/latest/tutorials/aft_survival_analysis.html}{Survival Analysis with Accelerated Failure Time}
|
||||
for details.}
|
||||
\item \code{count:poisson}: Poisson regression for count data, output mean of Poisson distribution. \code{max_delta_step} is set to 0.7 by default in poisson regression (used to safeguard optimization).
|
||||
\item \code{survival:cox}: Cox regression for right censored survival time data (negative values are considered right censored). Note that predictions are returned on the hazard ratio scale (i.e., as HR = exp(marginal_prediction) in the proportional hazard function \code{h(t) = h0(t) * HR)}.
|
||||
\item \code{survival:aft}: Accelerated failure time model for censored survival time data. See \href{https://xgboost.readthedocs.io/en/latest/tutorials/aft_survival_analysis.html}{Survival Analysis with Accelerated Failure Time} for details.
|
||||
\item \code{aft_loss_distribution}: Probability Density Function used by \code{survival:aft} and \code{aft-nloglik} metric.
|
||||
\item{ \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective.
|
||||
Class is represented by a number and should be from 0 to \code{num_class - 1}.}
|
||||
\item{ \code{multi:softprob} same as softmax, but prediction outputs a vector of ndata * nclass elements, which can be
|
||||
further reshaped to ndata, nclass matrix. The result contains predicted probabilities of each data point belonging
|
||||
to each class.}
|
||||
\item \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective. Class is represented by a number and should be from 0 to \code{num_class - 1}.
|
||||
\item \code{multi:softprob} same as softmax, but prediction outputs a vector of ndata * nclass elements, which can be further reshaped to ndata, nclass matrix. The result contains predicted probabilities of each data point belonging to each class.
|
||||
\item \code{rank:pairwise} set xgboost to do ranking task by minimizing the pairwise loss.
|
||||
\item{ \code{rank:ndcg}: Use LambdaMART to perform list-wise ranking where
|
||||
\href{https://en.wikipedia.org/wiki/Discounted_cumulative_gain}{Normalized Discounted Cumulative Gain (NDCG)} is maximized.}
|
||||
\item{ \code{rank:map}: Use LambdaMART to perform list-wise ranking where
|
||||
\href{https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Mean_average_precision}{Mean Average Precision (MAP)}
|
||||
is maximized.}
|
||||
\item{ \code{reg:gamma}: gamma regression with log-link.
|
||||
Output is a mean of gamma distribution.
|
||||
It might be useful, e.g., for modeling insurance claims severity, or for any outcome that might be
|
||||
\href{https://en.wikipedia.org/wiki/Gamma_distribution#Applications}{gamma-distributed}.}
|
||||
\item{ \code{reg:tweedie}: Tweedie regression with log-link.
|
||||
It might be useful, e.g., for modeling total loss in insurance, or for any outcome that might be
|
||||
\href{https://en.wikipedia.org/wiki/Tweedie_distribution#Applications}{Tweedie-distributed}.}
|
||||
\item \code{rank:ndcg}: Use LambdaMART to perform list-wise ranking where \href{https://en.wikipedia.org/wiki/Discounted_cumulative_gain}{Normalized Discounted Cumulative Gain (NDCG)} is maximized.
|
||||
\item \code{rank:map}: Use LambdaMART to perform list-wise ranking where \href{https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Mean_average_precision}{Mean Average Precision (MAP)} is maximized.
|
||||
\item \code{reg:gamma}: gamma regression with log-link. Output is a mean of gamma distribution. It might be useful, e.g., for modeling insurance claims severity, or for any outcome that might be \href{https://en.wikipedia.org/wiki/Gamma_distribution#Applications}{gamma-distributed}.
|
||||
\item \code{reg:tweedie}: Tweedie regression with log-link. It might be useful, e.g., for modeling total loss in insurance, or for any outcome that might be \href{https://en.wikipedia.org/wiki/Tweedie_distribution#Applications}{Tweedie-distributed}.
|
||||
}
|
||||
}
|
||||
\item \code{base_score} the initial prediction score of all instances, global bias. Default: 0.5
|
||||
\item{ \code{eval_metric} evaluation metrics for validation data.
|
||||
Users can pass a self-defined function to it.
|
||||
Default: metric will be assigned according to objective
|
||||
(rmse for regression, and error for classification, mean average precision for ranking).
|
||||
List is provided in detail section.}
|
||||
\item \code{eval_metric} evaluation metrics for validation data. Users can pass a self-defined function to it. Default: metric will be assigned according to objective(rmse for regression, and error for classification, mean average precision for ranking). List is provided in detail section.
|
||||
}}
|
||||
|
||||
\item{data}{training dataset. \code{xgb.train} accepts only an \code{xgb.DMatrix} as the input.
|
||||
@@ -267,8 +223,7 @@ The following is the list of built-in metrics for which XGBoost provides optimiz
|
||||
\item \code{merror} Multiclass classification error rate. It is calculated as \code{(# wrong cases) / (# all cases)}.
|
||||
\item \code{mae} Mean absolute error
|
||||
\item \code{mape} Mean absolute percentage error
|
||||
\item{ \code{auc} Area under the curve.
|
||||
\url{https://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation.}
|
||||
\item \code{auc} Area under the curve. \url{https://en.wikipedia.org/wiki/Receiver_operating_characteristic#'Area_under_curve} for ranking evaluation.
|
||||
\item \code{aucpr} Area under the PR curve. \url{https://en.wikipedia.org/wiki/Precision_and_recall} for ranking evaluation.
|
||||
\item \code{ndcg} Normalized Discounted Cumulative Gain (for ranking task). \url{https://en.wikipedia.org/wiki/NDCG}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,8 @@ CXX_STD = CXX17
|
||||
|
||||
XGB_RFLAGS = -DXGBOOST_STRICT_R_MODE=1 -DDMLC_LOG_BEFORE_THROW=0\
|
||||
-DDMLC_ENABLE_STD_THREAD=$(ENABLE_STD_THREAD) -DDMLC_DISABLE_STDIN=1\
|
||||
-DDMLC_LOG_CUSTOMIZE=1
|
||||
-DDMLC_LOG_CUSTOMIZE=1 -DXGBOOST_CUSTOMIZE_LOGGER=1\
|
||||
-DRABIT_CUSTOMIZE_MSG_
|
||||
|
||||
# disable the use of thread_local for 32 bit windows:
|
||||
ifeq ($(R_OSTYPE)$(WIN),windows)
|
||||
@@ -32,12 +33,10 @@ OBJECTS= \
|
||||
$(PKGROOT)/src/objective/objective.o \
|
||||
$(PKGROOT)/src/objective/regression_obj.o \
|
||||
$(PKGROOT)/src/objective/multiclass_obj.o \
|
||||
$(PKGROOT)/src/objective/lambdarank_obj.o \
|
||||
$(PKGROOT)/src/objective/rank_obj.o \
|
||||
$(PKGROOT)/src/objective/hinge.o \
|
||||
$(PKGROOT)/src/objective/aft_obj.o \
|
||||
$(PKGROOT)/src/objective/adaptive.o \
|
||||
$(PKGROOT)/src/objective/init_estimation.o \
|
||||
$(PKGROOT)/src/objective/quantile_obj.o \
|
||||
$(PKGROOT)/src/gbm/gbm.o \
|
||||
$(PKGROOT)/src/gbm/gbtree.o \
|
||||
$(PKGROOT)/src/gbm/gbtree_model.o \
|
||||
@@ -47,7 +46,6 @@ OBJECTS= \
|
||||
$(PKGROOT)/src/data/data.o \
|
||||
$(PKGROOT)/src/data/sparse_page_raw_format.o \
|
||||
$(PKGROOT)/src/data/ellpack_page.o \
|
||||
$(PKGROOT)/src/data/file_iterator.o \
|
||||
$(PKGROOT)/src/data/gradient_index.o \
|
||||
$(PKGROOT)/src/data/gradient_index_page_source.o \
|
||||
$(PKGROOT)/src/data/gradient_index_format.o \
|
||||
@@ -56,36 +54,27 @@ OBJECTS= \
|
||||
$(PKGROOT)/src/data/iterative_dmatrix.o \
|
||||
$(PKGROOT)/src/predictor/predictor.o \
|
||||
$(PKGROOT)/src/predictor/cpu_predictor.o \
|
||||
$(PKGROOT)/src/predictor/cpu_treeshap.o \
|
||||
$(PKGROOT)/src/tree/constraints.o \
|
||||
$(PKGROOT)/src/tree/param.o \
|
||||
$(PKGROOT)/src/tree/fit_stump.o \
|
||||
$(PKGROOT)/src/tree/tree_model.o \
|
||||
$(PKGROOT)/src/tree/tree_updater.o \
|
||||
$(PKGROOT)/src/tree/multi_target_tree_model.o \
|
||||
$(PKGROOT)/src/tree/updater_approx.o \
|
||||
$(PKGROOT)/src/tree/updater_colmaker.o \
|
||||
$(PKGROOT)/src/tree/updater_prune.o \
|
||||
$(PKGROOT)/src/tree/updater_quantile_hist.o \
|
||||
$(PKGROOT)/src/tree/updater_refresh.o \
|
||||
$(PKGROOT)/src/tree/updater_sync.o \
|
||||
$(PKGROOT)/src/tree/hist/param.o \
|
||||
$(PKGROOT)/src/tree/hist/histogram.o \
|
||||
$(PKGROOT)/src/linear/linear_updater.o \
|
||||
$(PKGROOT)/src/linear/updater_coordinate.o \
|
||||
$(PKGROOT)/src/linear/updater_shotgun.o \
|
||||
$(PKGROOT)/src/learner.o \
|
||||
$(PKGROOT)/src/context.o \
|
||||
$(PKGROOT)/src/logging.o \
|
||||
$(PKGROOT)/src/global_config.o \
|
||||
$(PKGROOT)/src/collective/communicator.o \
|
||||
$(PKGROOT)/src/collective/in_memory_communicator.o \
|
||||
$(PKGROOT)/src/collective/in_memory_handler.o \
|
||||
$(PKGROOT)/src/collective/socket.o \
|
||||
$(PKGROOT)/src/common/charconv.o \
|
||||
$(PKGROOT)/src/common/column_matrix.o \
|
||||
$(PKGROOT)/src/common/common.o \
|
||||
$(PKGROOT)/src/common/error_msg.o \
|
||||
$(PKGROOT)/src/common/hist_util.o \
|
||||
$(PKGROOT)/src/common/host_device_vector.o \
|
||||
$(PKGROOT)/src/common/io.o \
|
||||
@@ -94,11 +83,8 @@ OBJECTS= \
|
||||
$(PKGROOT)/src/common/pseudo_huber.o \
|
||||
$(PKGROOT)/src/common/quantile.o \
|
||||
$(PKGROOT)/src/common/random.o \
|
||||
$(PKGROOT)/src/common/stats.o \
|
||||
$(PKGROOT)/src/common/survival_util.o \
|
||||
$(PKGROOT)/src/common/threading_utils.o \
|
||||
$(PKGROOT)/src/common/ranking_utils.o \
|
||||
$(PKGROOT)/src/common/quantile_loss_utils.o \
|
||||
$(PKGROOT)/src/common/timer.o \
|
||||
$(PKGROOT)/src/common/version.o \
|
||||
$(PKGROOT)/src/c_api/c_api.o \
|
||||
|
||||
@@ -7,7 +7,8 @@ CXX_STD = CXX17
|
||||
|
||||
XGB_RFLAGS = -DXGBOOST_STRICT_R_MODE=1 -DDMLC_LOG_BEFORE_THROW=0\
|
||||
-DDMLC_ENABLE_STD_THREAD=$(ENABLE_STD_THREAD) -DDMLC_DISABLE_STDIN=1\
|
||||
-DDMLC_LOG_CUSTOMIZE=1
|
||||
-DDMLC_LOG_CUSTOMIZE=1 -DXGBOOST_CUSTOMIZE_LOGGER=1\
|
||||
-DRABIT_CUSTOMIZE_MSG_
|
||||
|
||||
# disable the use of thread_local for 32 bit windows:
|
||||
ifeq ($(R_OSTYPE)$(WIN),windows)
|
||||
@@ -32,12 +33,10 @@ OBJECTS= \
|
||||
$(PKGROOT)/src/objective/objective.o \
|
||||
$(PKGROOT)/src/objective/regression_obj.o \
|
||||
$(PKGROOT)/src/objective/multiclass_obj.o \
|
||||
$(PKGROOT)/src/objective/lambdarank_obj.o \
|
||||
$(PKGROOT)/src/objective/rank_obj.o \
|
||||
$(PKGROOT)/src/objective/hinge.o \
|
||||
$(PKGROOT)/src/objective/aft_obj.o \
|
||||
$(PKGROOT)/src/objective/adaptive.o \
|
||||
$(PKGROOT)/src/objective/init_estimation.o \
|
||||
$(PKGROOT)/src/objective/quantile_obj.o \
|
||||
$(PKGROOT)/src/gbm/gbm.o \
|
||||
$(PKGROOT)/src/gbm/gbtree.o \
|
||||
$(PKGROOT)/src/gbm/gbtree_model.o \
|
||||
@@ -47,7 +46,6 @@ OBJECTS= \
|
||||
$(PKGROOT)/src/data/data.o \
|
||||
$(PKGROOT)/src/data/sparse_page_raw_format.o \
|
||||
$(PKGROOT)/src/data/ellpack_page.o \
|
||||
$(PKGROOT)/src/data/file_iterator.o \
|
||||
$(PKGROOT)/src/data/gradient_index.o \
|
||||
$(PKGROOT)/src/data/gradient_index_page_source.o \
|
||||
$(PKGROOT)/src/data/gradient_index_format.o \
|
||||
@@ -56,12 +54,9 @@ OBJECTS= \
|
||||
$(PKGROOT)/src/data/iterative_dmatrix.o \
|
||||
$(PKGROOT)/src/predictor/predictor.o \
|
||||
$(PKGROOT)/src/predictor/cpu_predictor.o \
|
||||
$(PKGROOT)/src/predictor/cpu_treeshap.o \
|
||||
$(PKGROOT)/src/tree/constraints.o \
|
||||
$(PKGROOT)/src/tree/param.o \
|
||||
$(PKGROOT)/src/tree/fit_stump.o \
|
||||
$(PKGROOT)/src/tree/tree_model.o \
|
||||
$(PKGROOT)/src/tree/multi_target_tree_model.o \
|
||||
$(PKGROOT)/src/tree/tree_updater.o \
|
||||
$(PKGROOT)/src/tree/updater_approx.o \
|
||||
$(PKGROOT)/src/tree/updater_colmaker.o \
|
||||
@@ -69,23 +64,17 @@ OBJECTS= \
|
||||
$(PKGROOT)/src/tree/updater_quantile_hist.o \
|
||||
$(PKGROOT)/src/tree/updater_refresh.o \
|
||||
$(PKGROOT)/src/tree/updater_sync.o \
|
||||
$(PKGROOT)/src/tree/hist/param.o \
|
||||
$(PKGROOT)/src/tree/hist/histogram.o \
|
||||
$(PKGROOT)/src/linear/linear_updater.o \
|
||||
$(PKGROOT)/src/linear/updater_coordinate.o \
|
||||
$(PKGROOT)/src/linear/updater_shotgun.o \
|
||||
$(PKGROOT)/src/learner.o \
|
||||
$(PKGROOT)/src/context.o \
|
||||
$(PKGROOT)/src/logging.o \
|
||||
$(PKGROOT)/src/global_config.o \
|
||||
$(PKGROOT)/src/collective/communicator.o \
|
||||
$(PKGROOT)/src/collective/in_memory_communicator.o \
|
||||
$(PKGROOT)/src/collective/in_memory_handler.o \
|
||||
$(PKGROOT)/src/collective/socket.o \
|
||||
$(PKGROOT)/src/common/charconv.o \
|
||||
$(PKGROOT)/src/common/column_matrix.o \
|
||||
$(PKGROOT)/src/common/common.o \
|
||||
$(PKGROOT)/src/common/error_msg.o \
|
||||
$(PKGROOT)/src/common/hist_util.o \
|
||||
$(PKGROOT)/src/common/host_device_vector.o \
|
||||
$(PKGROOT)/src/common/io.o \
|
||||
@@ -94,11 +83,8 @@ OBJECTS= \
|
||||
$(PKGROOT)/src/common/pseudo_huber.o \
|
||||
$(PKGROOT)/src/common/quantile.o \
|
||||
$(PKGROOT)/src/common/random.o \
|
||||
$(PKGROOT)/src/common/stats.o \
|
||||
$(PKGROOT)/src/common/survival_util.o \
|
||||
$(PKGROOT)/src/common/threading_utils.o \
|
||||
$(PKGROOT)/src/common/ranking_utils.o \
|
||||
$(PKGROOT)/src/common/quantile_loss_utils.o \
|
||||
$(PKGROOT)/src/common/timer.o \
|
||||
$(PKGROOT)/src/common/version.o \
|
||||
$(PKGROOT)/src/c_api/c_api.o \
|
||||
|
||||
@@ -30,14 +30,15 @@ extern SEXP XGBoosterSaveJsonConfig_R(SEXP handle);
|
||||
extern SEXP XGBoosterLoadJsonConfig_R(SEXP handle, SEXP value);
|
||||
extern SEXP XGBoosterSerializeToBuffer_R(SEXP handle);
|
||||
extern SEXP XGBoosterUnserializeFromBuffer_R(SEXP handle, SEXP raw);
|
||||
extern SEXP XGBoosterPredict_R(SEXP, SEXP, SEXP, SEXP, SEXP);
|
||||
extern SEXP XGBoosterPredictFromDMatrix_R(SEXP, SEXP, SEXP);
|
||||
extern SEXP XGBoosterSaveModel_R(SEXP, SEXP);
|
||||
extern SEXP XGBoosterSetAttr_R(SEXP, SEXP, SEXP);
|
||||
extern SEXP XGBoosterSetParam_R(SEXP, SEXP, SEXP);
|
||||
extern SEXP XGBoosterUpdateOneIter_R(SEXP, SEXP, SEXP);
|
||||
extern SEXP XGCheckNullPtr_R(SEXP);
|
||||
extern SEXP XGDMatrixCreateFromCSC_R(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP);
|
||||
extern SEXP XGDMatrixCreateFromCSR_R(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP);
|
||||
extern SEXP XGDMatrixCreateFromCSC_R(SEXP, SEXP, SEXP, SEXP, SEXP);
|
||||
extern SEXP XGDMatrixCreateFromCSR_R(SEXP, SEXP, SEXP, SEXP, SEXP);
|
||||
extern SEXP XGDMatrixCreateFromFile_R(SEXP, SEXP);
|
||||
extern SEXP XGDMatrixCreateFromMat_R(SEXP, SEXP, SEXP);
|
||||
extern SEXP XGDMatrixGetInfo_R(SEXP, SEXP);
|
||||
@@ -67,14 +68,15 @@ static const R_CallMethodDef CallEntries[] = {
|
||||
{"XGBoosterLoadJsonConfig_R", (DL_FUNC) &XGBoosterLoadJsonConfig_R, 2},
|
||||
{"XGBoosterSerializeToBuffer_R", (DL_FUNC) &XGBoosterSerializeToBuffer_R, 1},
|
||||
{"XGBoosterUnserializeFromBuffer_R", (DL_FUNC) &XGBoosterUnserializeFromBuffer_R, 2},
|
||||
{"XGBoosterPredict_R", (DL_FUNC) &XGBoosterPredict_R, 5},
|
||||
{"XGBoosterPredictFromDMatrix_R", (DL_FUNC) &XGBoosterPredictFromDMatrix_R, 3},
|
||||
{"XGBoosterSaveModel_R", (DL_FUNC) &XGBoosterSaveModel_R, 2},
|
||||
{"XGBoosterSetAttr_R", (DL_FUNC) &XGBoosterSetAttr_R, 3},
|
||||
{"XGBoosterSetParam_R", (DL_FUNC) &XGBoosterSetParam_R, 3},
|
||||
{"XGBoosterUpdateOneIter_R", (DL_FUNC) &XGBoosterUpdateOneIter_R, 3},
|
||||
{"XGCheckNullPtr_R", (DL_FUNC) &XGCheckNullPtr_R, 1},
|
||||
{"XGDMatrixCreateFromCSC_R", (DL_FUNC) &XGDMatrixCreateFromCSC_R, 6},
|
||||
{"XGDMatrixCreateFromCSR_R", (DL_FUNC) &XGDMatrixCreateFromCSR_R, 6},
|
||||
{"XGDMatrixCreateFromCSC_R", (DL_FUNC) &XGDMatrixCreateFromCSC_R, 5},
|
||||
{"XGDMatrixCreateFromCSR_R", (DL_FUNC) &XGDMatrixCreateFromCSR_R, 5},
|
||||
{"XGDMatrixCreateFromFile_R", (DL_FUNC) &XGDMatrixCreateFromFile_R, 2},
|
||||
{"XGDMatrixCreateFromMat_R", (DL_FUNC) &XGDMatrixCreateFromMat_R, 3},
|
||||
{"XGDMatrixGetInfo_R", (DL_FUNC) &XGDMatrixGetInfo_R, 2},
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
/**
|
||||
* Copyright 2014-2023 by XGBoost Contributors
|
||||
* Copyright 2014-2022 by XGBoost Contributors
|
||||
*/
|
||||
#include <dmlc/common.h>
|
||||
#include <dmlc/omp.h>
|
||||
#include <xgboost/c_api.h>
|
||||
#include <xgboost/context.h>
|
||||
#include <xgboost/data.h>
|
||||
#include <xgboost/generic_parameters.h>
|
||||
#include <xgboost/logging.h>
|
||||
|
||||
#include <cstdio>
|
||||
@@ -16,11 +16,9 @@
|
||||
#include <vector>
|
||||
|
||||
#include "../../src/c_api/c_api_error.h"
|
||||
#include "../../src/c_api/c_api_utils.h" // MakeSparseFromPtr
|
||||
#include "../../src/common/threading_utils.h"
|
||||
|
||||
#include "./xgboost_R.h" // Must follow other includes.
|
||||
#include "Rinternals.h"
|
||||
#include "./xgboost_R.h"
|
||||
|
||||
/*!
|
||||
* \brief macro to annotate begin of api
|
||||
@@ -48,14 +46,14 @@
|
||||
|
||||
using dmlc::BeginPtr;
|
||||
|
||||
xgboost::Context const *BoosterCtx(BoosterHandle handle) {
|
||||
xgboost::GenericParameter const *BoosterCtx(BoosterHandle handle) {
|
||||
CHECK_HANDLE();
|
||||
auto *learner = static_cast<xgboost::Learner *>(handle);
|
||||
CHECK(learner);
|
||||
return learner->Ctx();
|
||||
}
|
||||
|
||||
xgboost::Context const *DMatrixCtx(DMatrixHandle handle) {
|
||||
xgboost::GenericParameter const *DMatrixCtx(DMatrixHandle handle) {
|
||||
CHECK_HANDLE();
|
||||
auto p_m = static_cast<std::shared_ptr<xgboost::DMatrix> *>(handle);
|
||||
CHECK(p_m);
|
||||
@@ -116,29 +114,13 @@ XGB_DLL SEXP XGDMatrixCreateFromMat_R(SEXP mat, SEXP missing, SEXP n_threads) {
|
||||
din = REAL(mat);
|
||||
}
|
||||
std::vector<float> data(nrow * ncol);
|
||||
xgboost::Context ctx;
|
||||
ctx.nthread = asInteger(n_threads);
|
||||
std::int32_t threads = ctx.Threads();
|
||||
|
||||
if (is_int) {
|
||||
xgboost::common::ParallelFor(nrow, threads, [&](xgboost::omp_ulong i) {
|
||||
for (size_t j = 0; j < ncol; ++j) {
|
||||
auto v = iin[i + nrow * j];
|
||||
if (v == NA_INTEGER) {
|
||||
data[i * ncol + j] = std::numeric_limits<float>::quiet_NaN();
|
||||
} else {
|
||||
data[i * ncol + j] = static_cast<float>(v);
|
||||
}
|
||||
}
|
||||
});
|
||||
} else {
|
||||
xgboost::common::ParallelFor(nrow, threads, [&](xgboost::omp_ulong i) {
|
||||
for (size_t j = 0; j < ncol; ++j) {
|
||||
data[i * ncol + j] = din[i + nrow * j];
|
||||
}
|
||||
});
|
||||
}
|
||||
int32_t threads = xgboost::common::OmpGetNumThreads(asInteger(n_threads));
|
||||
|
||||
xgboost::common::ParallelFor(nrow, threads, [&](xgboost::omp_ulong i) {
|
||||
for (size_t j = 0; j < ncol; ++j) {
|
||||
data[i * ncol + j] = is_int ? static_cast<float>(iin[i + nrow * j]) : din[i + nrow * j];
|
||||
}
|
||||
});
|
||||
DMatrixHandle handle;
|
||||
CHECK_CALL(XGDMatrixCreateFromMat_omp(BeginPtr(data), nrow, ncol,
|
||||
asReal(missing), &handle, threads));
|
||||
@@ -149,78 +131,66 @@ XGB_DLL SEXP XGDMatrixCreateFromMat_R(SEXP mat, SEXP missing, SEXP n_threads) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
namespace {
|
||||
void CreateFromSparse(SEXP indptr, SEXP indices, SEXP data, std::string *indptr_str,
|
||||
std::string *indices_str, std::string *data_str) {
|
||||
XGB_DLL SEXP XGDMatrixCreateFromCSC_R(SEXP indptr, SEXP indices, SEXP data,
|
||||
SEXP num_row, SEXP n_threads) {
|
||||
SEXP ret;
|
||||
R_API_BEGIN();
|
||||
const int *p_indptr = INTEGER(indptr);
|
||||
const int *p_indices = INTEGER(indices);
|
||||
const double *p_data = REAL(data);
|
||||
size_t nindptr = static_cast<size_t>(length(indptr));
|
||||
size_t ndata = static_cast<size_t>(length(data));
|
||||
size_t nrow = static_cast<size_t>(INTEGER(num_row)[0]);
|
||||
std::vector<size_t> col_ptr_(nindptr);
|
||||
std::vector<unsigned> indices_(ndata);
|
||||
std::vector<float> data_(ndata);
|
||||
|
||||
auto nindptr = static_cast<std::size_t>(length(indptr));
|
||||
auto ndata = static_cast<std::size_t>(length(data));
|
||||
CHECK_EQ(ndata, p_indptr[nindptr - 1]);
|
||||
xgboost::detail::MakeSparseFromPtr(p_indptr, p_indices, p_data, nindptr, indptr_str, indices_str,
|
||||
data_str);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
XGB_DLL SEXP XGDMatrixCreateFromCSC_R(SEXP indptr, SEXP indices, SEXP data, SEXP num_row,
|
||||
SEXP missing, SEXP n_threads) {
|
||||
SEXP ret;
|
||||
R_API_BEGIN();
|
||||
std::int32_t threads = asInteger(n_threads);
|
||||
|
||||
using xgboost::Integer;
|
||||
using xgboost::Json;
|
||||
using xgboost::Object;
|
||||
|
||||
std::string sindptr, sindices, sdata;
|
||||
CreateFromSparse(indptr, indices, data, &sindptr, &sindices, &sdata);
|
||||
auto nrow = static_cast<std::size_t>(INTEGER(num_row)[0]);
|
||||
|
||||
for (size_t i = 0; i < nindptr; ++i) {
|
||||
col_ptr_[i] = static_cast<size_t>(p_indptr[i]);
|
||||
}
|
||||
int32_t threads = xgboost::common::OmpGetNumThreads(asInteger(n_threads));
|
||||
xgboost::common::ParallelFor(ndata, threads, [&](xgboost::omp_ulong i) {
|
||||
indices_[i] = static_cast<unsigned>(p_indices[i]);
|
||||
data_[i] = static_cast<float>(p_data[i]);
|
||||
});
|
||||
DMatrixHandle handle;
|
||||
Json jconfig{Object{}};
|
||||
// Construct configuration
|
||||
jconfig["nthread"] = Integer{threads};
|
||||
jconfig["missing"] = xgboost::Number{asReal(missing)};
|
||||
std::string config;
|
||||
Json::Dump(jconfig, &config);
|
||||
CHECK_CALL(XGDMatrixCreateFromCSC(sindptr.c_str(), sindices.c_str(), sdata.c_str(), nrow,
|
||||
config.c_str(), &handle));
|
||||
|
||||
CHECK_CALL(XGDMatrixCreateFromCSCEx(BeginPtr(col_ptr_), BeginPtr(indices_),
|
||||
BeginPtr(data_), nindptr, ndata,
|
||||
nrow, &handle));
|
||||
ret = PROTECT(R_MakeExternalPtr(handle, R_NilValue, R_NilValue));
|
||||
|
||||
R_RegisterCFinalizerEx(ret, _DMatrixFinalizer, TRUE);
|
||||
R_API_END();
|
||||
UNPROTECT(1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
XGB_DLL SEXP XGDMatrixCreateFromCSR_R(SEXP indptr, SEXP indices, SEXP data, SEXP num_col,
|
||||
SEXP missing, SEXP n_threads) {
|
||||
XGB_DLL SEXP XGDMatrixCreateFromCSR_R(SEXP indptr, SEXP indices, SEXP data,
|
||||
SEXP num_col, SEXP n_threads) {
|
||||
SEXP ret;
|
||||
R_API_BEGIN();
|
||||
std::int32_t threads = asInteger(n_threads);
|
||||
|
||||
using xgboost::Integer;
|
||||
using xgboost::Json;
|
||||
using xgboost::Object;
|
||||
|
||||
std::string sindptr, sindices, sdata;
|
||||
CreateFromSparse(indptr, indices, data, &sindptr, &sindices, &sdata);
|
||||
auto ncol = static_cast<std::size_t>(INTEGER(num_col)[0]);
|
||||
const int *p_indptr = INTEGER(indptr);
|
||||
const int *p_indices = INTEGER(indices);
|
||||
const double *p_data = REAL(data);
|
||||
size_t nindptr = static_cast<size_t>(length(indptr));
|
||||
size_t ndata = static_cast<size_t>(length(data));
|
||||
size_t ncol = static_cast<size_t>(INTEGER(num_col)[0]);
|
||||
std::vector<size_t> row_ptr_(nindptr);
|
||||
std::vector<unsigned> indices_(ndata);
|
||||
std::vector<float> data_(ndata);
|
||||
|
||||
for (size_t i = 0; i < nindptr; ++i) {
|
||||
row_ptr_[i] = static_cast<size_t>(p_indptr[i]);
|
||||
}
|
||||
int32_t threads = xgboost::common::OmpGetNumThreads(asInteger(n_threads));
|
||||
xgboost::common::ParallelFor(ndata, threads, [&](xgboost::omp_ulong i) {
|
||||
indices_[i] = static_cast<unsigned>(p_indices[i]);
|
||||
data_[i] = static_cast<float>(p_data[i]);
|
||||
});
|
||||
DMatrixHandle handle;
|
||||
Json jconfig{Object{}};
|
||||
// Construct configuration
|
||||
jconfig["nthread"] = Integer{threads};
|
||||
jconfig["missing"] = xgboost::Number{asReal(missing)};
|
||||
std::string config;
|
||||
Json::Dump(jconfig, &config);
|
||||
CHECK_CALL(XGDMatrixCreateFromCSR(sindptr.c_str(), sindices.c_str(), sdata.c_str(), ncol,
|
||||
config.c_str(), &handle));
|
||||
CHECK_CALL(XGDMatrixCreateFromCSREx(BeginPtr(row_ptr_), BeginPtr(indices_),
|
||||
BeginPtr(data_), nindptr, ndata,
|
||||
ncol, &handle));
|
||||
ret = PROTECT(R_MakeExternalPtr(handle, R_NilValue, R_NilValue));
|
||||
|
||||
R_RegisterCFinalizerEx(ret, _DMatrixFinalizer, TRUE);
|
||||
R_API_END();
|
||||
UNPROTECT(1);
|
||||
@@ -452,6 +422,27 @@ XGB_DLL SEXP XGBoosterEvalOneIter_R(SEXP handle, SEXP iter, SEXP dmats, SEXP evn
|
||||
return mkString(ret);
|
||||
}
|
||||
|
||||
XGB_DLL SEXP XGBoosterPredict_R(SEXP handle, SEXP dmat, SEXP option_mask,
|
||||
SEXP ntree_limit, SEXP training) {
|
||||
SEXP ret;
|
||||
R_API_BEGIN();
|
||||
bst_ulong olen;
|
||||
const float *res;
|
||||
CHECK_CALL(XGBoosterPredict(R_ExternalPtrAddr(handle),
|
||||
R_ExternalPtrAddr(dmat),
|
||||
asInteger(option_mask),
|
||||
asInteger(ntree_limit),
|
||||
asInteger(training),
|
||||
&olen, &res));
|
||||
ret = PROTECT(allocVector(REALSXP, olen));
|
||||
for (size_t i = 0; i < olen; ++i) {
|
||||
REAL(ret)[i] = res[i];
|
||||
}
|
||||
R_API_END();
|
||||
UNPROTECT(1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
XGB_DLL SEXP XGBoosterPredictFromDMatrix_R(SEXP handle, SEXP dmat, SEXP json_config) {
|
||||
SEXP r_out_shape;
|
||||
SEXP r_out_result;
|
||||
|
||||
@@ -59,12 +59,11 @@ XGB_DLL SEXP XGDMatrixCreateFromMat_R(SEXP mat,
|
||||
* \param indices row indices
|
||||
* \param data content of the data
|
||||
* \param num_row numer of rows (when it's set to 0, then guess from data)
|
||||
* \param missing which value to represent missing value
|
||||
* \param n_threads Number of threads used to construct DMatrix from csc matrix.
|
||||
* \return created dmatrix
|
||||
*/
|
||||
XGB_DLL SEXP XGDMatrixCreateFromCSC_R(SEXP indptr, SEXP indices, SEXP data, SEXP num_row,
|
||||
SEXP missing, SEXP n_threads);
|
||||
SEXP n_threads);
|
||||
|
||||
/*!
|
||||
* \brief create a matrix content from CSR format
|
||||
@@ -72,12 +71,11 @@ XGB_DLL SEXP XGDMatrixCreateFromCSC_R(SEXP indptr, SEXP indices, SEXP data, SEXP
|
||||
* \param indices column indices
|
||||
* \param data content of the data
|
||||
* \param num_col numer of columns (when it's set to 0, then guess from data)
|
||||
* \param missing which value to represent missing value
|
||||
* \param n_threads Number of threads used to construct DMatrix from csr matrix.
|
||||
* \return created dmatrix
|
||||
*/
|
||||
XGB_DLL SEXP XGDMatrixCreateFromCSR_R(SEXP indptr, SEXP indices, SEXP data, SEXP num_col,
|
||||
SEXP missing, SEXP n_threads);
|
||||
SEXP n_threads);
|
||||
|
||||
/*!
|
||||
* \brief create a new dmatrix from sliced content of existing matrix
|
||||
@@ -178,6 +176,17 @@ XGB_DLL SEXP XGBoosterBoostOneIter_R(SEXP handle, SEXP dtrain, SEXP grad, SEXP h
|
||||
*/
|
||||
XGB_DLL SEXP XGBoosterEvalOneIter_R(SEXP handle, SEXP iter, SEXP dmats, SEXP evnames);
|
||||
|
||||
/*!
|
||||
* \brief (Deprecated) make prediction based on dmat
|
||||
* \param handle handle
|
||||
* \param dmat data matrix
|
||||
* \param option_mask output_margin:1 predict_leaf:2
|
||||
* \param ntree_limit limit number of trees used in prediction
|
||||
* \param training Whether the prediction value is used for training.
|
||||
*/
|
||||
XGB_DLL SEXP XGBoosterPredict_R(SEXP handle, SEXP dmat, SEXP option_mask,
|
||||
SEXP ntree_limit, SEXP training);
|
||||
|
||||
/*!
|
||||
* \brief Run prediction on DMatrix, replacing `XGBoosterPredict_R`
|
||||
* \param handle handle
|
||||
|
||||
@@ -32,7 +32,7 @@ namespace common {
|
||||
bool CheckNAN(double v) {
|
||||
return ISNAN(v);
|
||||
}
|
||||
#if !defined(XGBOOST_USE_CUDA) && !defined(XGBOOST_USE_HIP)
|
||||
#if !defined(XGBOOST_USE_CUDA)
|
||||
double LogGamma(double v) {
|
||||
return lgammafn(v);
|
||||
}
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
## Install dependencies of R package for testing. The list might not be
|
||||
## up-to-date, check DESCRIPTION for the latest list and update this one if
|
||||
## inconsistent is found.
|
||||
pkgs <- c(
|
||||
## CI
|
||||
"caret",
|
||||
"pkgbuild",
|
||||
"roxygen2",
|
||||
"XML",
|
||||
"cplm",
|
||||
"e1071",
|
||||
## suggests
|
||||
"knitr",
|
||||
"rmarkdown",
|
||||
"ggplot2",
|
||||
"DiagrammeR",
|
||||
"Ckmeans.1d.dp",
|
||||
"vcd",
|
||||
"lintr",
|
||||
"testthat",
|
||||
"igraph",
|
||||
"float",
|
||||
"titanic",
|
||||
## imports
|
||||
"Matrix",
|
||||
"methods",
|
||||
"data.table",
|
||||
"jsonlite"
|
||||
)
|
||||
|
||||
ncpus <- parallel::detectCores()
|
||||
print(paste0("Using ", ncpus, " cores to install dependencies."))
|
||||
|
||||
if (.Platform$OS.type == "unix") {
|
||||
print("Installing source packages on unix.")
|
||||
install.packages(
|
||||
pkgs,
|
||||
repo = "https://cloud.r-project.org",
|
||||
dependencies = c("Depends", "Imports", "LinkingTo"),
|
||||
Ncpus = parallel::detectCores()
|
||||
)
|
||||
} else {
|
||||
print("Installing binary packages on Windows.")
|
||||
install.packages(
|
||||
pkgs,
|
||||
repo = "https://cloud.r-project.org",
|
||||
dependencies = c("Depends", "Imports", "LinkingTo"),
|
||||
Ncpus = parallel::detectCores(),
|
||||
type = "binary"
|
||||
)
|
||||
}
|
||||
71
R-package/tests/helper_scripts/run_lint.R
Normal file
71
R-package/tests/helper_scripts/run_lint.R
Normal file
@@ -0,0 +1,71 @@
|
||||
library(lintr)
|
||||
library(crayon)
|
||||
|
||||
my_linters <- list(
|
||||
absolute_path_linter = lintr::absolute_path_linter,
|
||||
assignment_linter = lintr::assignment_linter,
|
||||
closed_curly_linter = lintr::closed_curly_linter,
|
||||
commas_linter = lintr::commas_linter,
|
||||
equals_na = lintr::equals_na_linter,
|
||||
infix_spaces_linter = lintr::infix_spaces_linter,
|
||||
line_length_linter = lintr::line_length_linter,
|
||||
no_tab_linter = lintr::no_tab_linter,
|
||||
object_usage_linter = lintr::object_usage_linter,
|
||||
object_length_linter = lintr::object_length_linter,
|
||||
open_curly_linter = lintr::open_curly_linter,
|
||||
semicolon = lintr::semicolon_terminator_linter(semicolon = c("compound", "trailing")),
|
||||
seq = lintr::seq_linter,
|
||||
spaces_inside_linter = lintr::spaces_inside_linter,
|
||||
spaces_left_parentheses_linter = lintr::spaces_left_parentheses_linter,
|
||||
trailing_blank_lines_linter = lintr::trailing_blank_lines_linter,
|
||||
trailing_whitespace_linter = lintr::trailing_whitespace_linter,
|
||||
true_false = lintr::T_and_F_symbol_linter,
|
||||
unneeded_concatenation = lintr::unneeded_concatenation_linter
|
||||
)
|
||||
|
||||
results <- lapply(
|
||||
list.files(path = '.', pattern = '\\.[Rr]$', recursive = TRUE),
|
||||
function (r_file) {
|
||||
cat(sprintf("Processing %s ...\n", r_file))
|
||||
list(r_file = r_file,
|
||||
output = lintr::lint(filename = r_file, linters = my_linters))
|
||||
})
|
||||
num_issue <- Reduce(sum, lapply(results, function (e) length(e$output)))
|
||||
|
||||
lint2str <- function(lint_entry) {
|
||||
color <- function(type) {
|
||||
switch(type,
|
||||
"warning" = crayon::magenta,
|
||||
"error" = crayon::red,
|
||||
"style" = crayon::blue,
|
||||
crayon::bold
|
||||
)
|
||||
}
|
||||
|
||||
paste0(
|
||||
lapply(lint_entry$output,
|
||||
function (lint_line) {
|
||||
paste0(
|
||||
crayon::bold(lint_entry$r_file, ":",
|
||||
as.character(lint_line$line_number), ":",
|
||||
as.character(lint_line$column_number), ": ", sep = ""),
|
||||
color(lint_line$type)(lint_line$type, ": ", sep = ""),
|
||||
crayon::bold(lint_line$message), "\n",
|
||||
lint_line$line, "\n",
|
||||
lintr:::highlight_string(lint_line$message, lint_line$column_number, lint_line$ranges),
|
||||
"\n",
|
||||
collapse = "")
|
||||
}),
|
||||
collapse = "")
|
||||
}
|
||||
|
||||
if (num_issue > 0) {
|
||||
cat(sprintf('R linters found %d issues:\n', num_issue))
|
||||
for (entry in results) {
|
||||
if (length(entry$output)) {
|
||||
cat(paste0('**** ', crayon::bold(entry$r_file), '\n'))
|
||||
cat(paste0(lint2str(entry), collapse = ''))
|
||||
}
|
||||
}
|
||||
quit(save = 'no', status = 1) # Signal error to parent shell
|
||||
}
|
||||
@@ -1,3 +1,6 @@
|
||||
require(xgboost)
|
||||
library(Matrix)
|
||||
|
||||
context("basic functions")
|
||||
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
@@ -85,18 +88,9 @@ test_that("dart prediction works", {
|
||||
rnorm(100)
|
||||
|
||||
set.seed(1994)
|
||||
booster_by_xgboost <- xgboost(
|
||||
data = d,
|
||||
label = y,
|
||||
max_depth = 2,
|
||||
booster = "dart",
|
||||
rate_drop = 0.5,
|
||||
one_drop = TRUE,
|
||||
eta = 1,
|
||||
nthread = 2,
|
||||
nrounds = nrounds,
|
||||
objective = "reg:squarederror"
|
||||
)
|
||||
booster_by_xgboost <- xgboost(data = d, label = y, max_depth = 2, booster = "dart",
|
||||
rate_drop = 0.5, one_drop = TRUE,
|
||||
eta = 1, nthread = 2, nrounds = nrounds, objective = "reg:squarederror")
|
||||
pred_by_xgboost_0 <- predict(booster_by_xgboost, newdata = d, ntreelimit = 0)
|
||||
pred_by_xgboost_1 <- predict(booster_by_xgboost, newdata = d, ntreelimit = nrounds)
|
||||
expect_true(all(matrix(pred_by_xgboost_0, byrow = TRUE) == matrix(pred_by_xgboost_1, byrow = TRUE)))
|
||||
@@ -106,19 +100,19 @@ test_that("dart prediction works", {
|
||||
|
||||
set.seed(1994)
|
||||
dtrain <- xgb.DMatrix(data = d, info = list(label = y))
|
||||
booster_by_train <- xgb.train(
|
||||
params = list(
|
||||
booster = "dart",
|
||||
max_depth = 2,
|
||||
eta = 1,
|
||||
rate_drop = 0.5,
|
||||
one_drop = TRUE,
|
||||
nthread = 1,
|
||||
objective = "reg:squarederror"
|
||||
),
|
||||
data = dtrain,
|
||||
nrounds = nrounds
|
||||
)
|
||||
booster_by_train <- xgb.train(params = list(
|
||||
booster = "dart",
|
||||
max_depth = 2,
|
||||
eta = 1,
|
||||
rate_drop = 0.5,
|
||||
one_drop = TRUE,
|
||||
nthread = 1,
|
||||
tree_method = "exact",
|
||||
objective = "reg:squarederror"
|
||||
),
|
||||
data = dtrain,
|
||||
nrounds = nrounds
|
||||
)
|
||||
pred_by_train_0 <- predict(booster_by_train, newdata = dtrain, ntreelimit = 0)
|
||||
pred_by_train_1 <- predict(booster_by_train, newdata = dtrain, ntreelimit = nrounds)
|
||||
pred_by_train_2 <- predict(booster_by_train, newdata = dtrain, training = TRUE)
|
||||
@@ -241,20 +235,12 @@ test_that("train and predict RF with softprob", {
|
||||
test_that("use of multiple eval metrics works", {
|
||||
expect_output(
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic",
|
||||
eval_metric = 'error', eval_metric = 'auc', eval_metric = "logloss")
|
||||
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic",
|
||||
eval_metric = 'error', eval_metric = 'auc', eval_metric = "logloss")
|
||||
, "train-error.*train-auc.*train-logloss")
|
||||
expect_false(is.null(bst$evaluation_log))
|
||||
expect_equal(dim(bst$evaluation_log), c(2, 4))
|
||||
expect_equal(colnames(bst$evaluation_log), c("iter", "train_error", "train_auc", "train_logloss"))
|
||||
expect_output(
|
||||
bst2 <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic",
|
||||
eval_metric = list("error", "auc", "logloss"))
|
||||
, "train-error.*train-auc.*train-logloss")
|
||||
expect_false(is.null(bst2$evaluation_log))
|
||||
expect_equal(dim(bst2$evaluation_log), c(2, 4))
|
||||
expect_equal(colnames(bst2$evaluation_log), c("iter", "train_error", "train_auc", "train_logloss"))
|
||||
})
|
||||
|
||||
|
||||
@@ -408,7 +394,7 @@ test_that("colsample_bytree works", {
|
||||
xgb.importance(model = bst)
|
||||
# If colsample_bytree works properly, a variety of features should be used
|
||||
# in the 100 trees
|
||||
expect_gte(nrow(xgb.importance(model = bst)), 28)
|
||||
expect_gte(nrow(xgb.importance(model = bst)), 30)
|
||||
})
|
||||
|
||||
test_that("Configuration works", {
|
||||
@@ -418,7 +404,7 @@ test_that("Configuration works", {
|
||||
config <- xgb.config(bst)
|
||||
xgb.config(bst) <- config
|
||||
reloaded_config <- xgb.config(bst)
|
||||
expect_equal(config, reloaded_config)
|
||||
expect_equal(config, reloaded_config);
|
||||
})
|
||||
|
||||
test_that("strict_shape works", {
|
||||
|
||||
@@ -1,4 +1,9 @@
|
||||
# More specific testing of callbacks
|
||||
|
||||
require(xgboost)
|
||||
require(data.table)
|
||||
require(titanic)
|
||||
|
||||
context("callbacks")
|
||||
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
@@ -79,7 +84,7 @@ test_that("cb.evaluation.log works as expected", {
|
||||
list(c(iter = 1, bst_evaluation), c(iter = 2, bst_evaluation)))
|
||||
expect_silent(f(finalize = TRUE))
|
||||
expect_equal(evaluation_log,
|
||||
data.table::data.table(iter = 1:2, train_auc = c(0.9, 0.9), test_auc = c(0.8, 0.8)))
|
||||
data.table(iter = 1:2, train_auc = c(0.9, 0.9), test_auc = c(0.8, 0.8)))
|
||||
|
||||
bst_evaluation_err <- c('train-auc' = 0.1, 'test-auc' = 0.2)
|
||||
evaluation_log <- list()
|
||||
@@ -96,7 +101,7 @@ test_that("cb.evaluation.log works as expected", {
|
||||
c(iter = 2, c(bst_evaluation, bst_evaluation_err))))
|
||||
expect_silent(f(finalize = TRUE))
|
||||
expect_equal(evaluation_log,
|
||||
data.table::data.table(iter = 1:2,
|
||||
data.table(iter = 1:2,
|
||||
train_auc_mean = c(0.9, 0.9), train_auc_std = c(0.1, 0.1),
|
||||
test_auc_mean = c(0.8, 0.8), test_auc_std = c(0.2, 0.2)))
|
||||
})
|
||||
@@ -251,9 +256,6 @@ test_that("early stopping using a specific metric works", {
|
||||
})
|
||||
|
||||
test_that("early stopping works with titanic", {
|
||||
if (!requireNamespace("titanic")) {
|
||||
testthat::skip("Optional testing dependency 'titanic' not found.")
|
||||
}
|
||||
# This test was inspired by https://github.com/dmlc/xgboost/issues/5935
|
||||
# It catches possible issues on noLD R
|
||||
titanic <- titanic::titanic_train
|
||||
@@ -320,7 +322,7 @@ test_that("prediction in early-stopping xgb.cv works", {
|
||||
expect_output(
|
||||
cv <- xgb.cv(param, dtrain, nfold = 5, eta = 0.1, nrounds = 20,
|
||||
early_stopping_rounds = 5, maximize = FALSE, stratified = FALSE,
|
||||
prediction = TRUE, base_score = 0.5)
|
||||
prediction = TRUE)
|
||||
, "Stopping. Best iteration")
|
||||
|
||||
expect_false(is.null(cv$best_iteration))
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
context('Test models with custom objective')
|
||||
|
||||
require(xgboost)
|
||||
|
||||
set.seed(1994)
|
||||
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
library(Matrix)
|
||||
require(xgboost)
|
||||
require(Matrix)
|
||||
|
||||
context("testing xgb.DMatrix functionality")
|
||||
|
||||
data(agaricus.test, package = "xgboost")
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
test_data <- agaricus.test$data[1:100, ]
|
||||
test_label <- agaricus.test$label[1:100]
|
||||
|
||||
@@ -11,85 +13,14 @@ test_that("xgb.DMatrix: basic construction", {
|
||||
|
||||
# from dense matrix
|
||||
dtest2 <- xgb.DMatrix(as.matrix(test_data), label = test_label)
|
||||
expect_equal(getinfo(dtest1, "label"), getinfo(dtest2, "label"))
|
||||
expect_equal(getinfo(dtest1, 'label'), getinfo(dtest2, 'label'))
|
||||
expect_equal(dim(dtest1), dim(dtest2))
|
||||
|
||||
# from dense integer matrix
|
||||
#from dense integer matrix
|
||||
int_data <- as.matrix(test_data)
|
||||
storage.mode(int_data) <- "integer"
|
||||
dtest3 <- xgb.DMatrix(int_data, label = test_label)
|
||||
expect_equal(dim(dtest1), dim(dtest3))
|
||||
|
||||
n_samples <- 100
|
||||
X <- cbind(
|
||||
x1 = sample(x = 4, size = n_samples, replace = TRUE),
|
||||
x2 = sample(x = 4, size = n_samples, replace = TRUE),
|
||||
x3 = sample(x = 4, size = n_samples, replace = TRUE)
|
||||
)
|
||||
X <- matrix(X, nrow = n_samples)
|
||||
y <- rbinom(n = n_samples, size = 1, prob = 1 / 2)
|
||||
|
||||
fd <- xgb.DMatrix(X, label = y, missing = 1)
|
||||
|
||||
dgc <- as(X, "dgCMatrix")
|
||||
fdgc <- xgb.DMatrix(dgc, label = y, missing = 1.0)
|
||||
|
||||
dgr <- as(X, "dgRMatrix")
|
||||
fdgr <- xgb.DMatrix(dgr, label = y, missing = 1)
|
||||
|
||||
params <- list(tree_method = "hist")
|
||||
bst_fd <- xgb.train(
|
||||
params, nrounds = 8, fd, watchlist = list(train = fd)
|
||||
)
|
||||
bst_dgr <- xgb.train(
|
||||
params, nrounds = 8, fdgr, watchlist = list(train = fdgr)
|
||||
)
|
||||
bst_dgc <- xgb.train(
|
||||
params, nrounds = 8, fdgc, watchlist = list(train = fdgc)
|
||||
)
|
||||
|
||||
raw_fd <- xgb.save.raw(bst_fd, raw_format = "ubj")
|
||||
raw_dgr <- xgb.save.raw(bst_dgr, raw_format = "ubj")
|
||||
raw_dgc <- xgb.save.raw(bst_dgc, raw_format = "ubj")
|
||||
|
||||
expect_equal(raw_fd, raw_dgr)
|
||||
expect_equal(raw_fd, raw_dgc)
|
||||
})
|
||||
|
||||
test_that("xgb.DMatrix: NA", {
|
||||
n_samples <- 3
|
||||
x <- cbind(
|
||||
x1 = sample(x = 4, size = n_samples, replace = TRUE),
|
||||
x2 = sample(x = 4, size = n_samples, replace = TRUE)
|
||||
)
|
||||
x[1, "x1"] <- NA
|
||||
|
||||
m <- xgb.DMatrix(x)
|
||||
xgb.DMatrix.save(m, "int.dmatrix")
|
||||
|
||||
x <- matrix(as.numeric(x), nrow = n_samples, ncol = 2)
|
||||
colnames(x) <- c("x1", "x2")
|
||||
m <- xgb.DMatrix(x)
|
||||
|
||||
xgb.DMatrix.save(m, "float.dmatrix")
|
||||
|
||||
iconn <- file("int.dmatrix", "rb")
|
||||
fconn <- file("float.dmatrix", "rb")
|
||||
|
||||
expect_equal(file.size("int.dmatrix"), file.size("float.dmatrix"))
|
||||
|
||||
bytes <- file.size("int.dmatrix")
|
||||
idmatrix <- readBin(iconn, "raw", n = bytes)
|
||||
fdmatrix <- readBin(fconn, "raw", n = bytes)
|
||||
|
||||
expect_equal(length(idmatrix), length(fdmatrix))
|
||||
expect_equal(idmatrix, fdmatrix)
|
||||
|
||||
close(iconn)
|
||||
close(fconn)
|
||||
|
||||
file.remove("int.dmatrix")
|
||||
file.remove("float.dmatrix")
|
||||
})
|
||||
|
||||
test_that("xgb.DMatrix: saving, loading", {
|
||||
@@ -106,10 +37,9 @@ test_that("xgb.DMatrix: saving, loading", {
|
||||
|
||||
# from a libsvm text file
|
||||
tmp <- c("0 1:1 2:1", "1 3:1", "0 1:1")
|
||||
tmp_file <- tempfile(fileext = ".libsvm")
|
||||
tmp_file <- 'tmp.libsvm'
|
||||
writeLines(tmp, tmp_file)
|
||||
expect_true(file.exists(tmp_file))
|
||||
dtest4 <- xgb.DMatrix(paste(tmp_file, "?format=libsvm", sep = ""), silent = TRUE)
|
||||
dtest4 <- xgb.DMatrix(tmp_file, silent = TRUE)
|
||||
expect_equal(dim(dtest4), c(3, 4))
|
||||
expect_equal(getinfo(dtest4, 'label'), c(0, 1, 0))
|
||||
|
||||
@@ -123,7 +53,7 @@ test_that("xgb.DMatrix: saving, loading", {
|
||||
dtrain <- xgb.DMatrix(tmp_file)
|
||||
expect_equal(colnames(dtrain), cnames)
|
||||
|
||||
ft <- rep(c("c", "q"), each = length(cnames) / 2)
|
||||
ft <- rep(c("c", "q"), each=length(cnames)/2)
|
||||
setinfo(dtrain, "feature_type", ft)
|
||||
expect_equal(ft, getinfo(dtrain, "feature_type"))
|
||||
})
|
||||
@@ -193,62 +123,9 @@ test_that("xgb.DMatrix: colnames", {
|
||||
test_that("xgb.DMatrix: nrow is correct for a very sparse matrix", {
|
||||
set.seed(123)
|
||||
nr <- 1000
|
||||
x <- Matrix::rsparsematrix(nr, 100, density = 0.0005)
|
||||
x <- rsparsematrix(nr, 100, density = 0.0005)
|
||||
# we want it very sparse, so that last rows are empty
|
||||
expect_lt(max(x@i), nr)
|
||||
dtest <- xgb.DMatrix(x)
|
||||
expect_equal(dim(dtest), dim(x))
|
||||
})
|
||||
|
||||
test_that("xgb.DMatrix: print", {
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
|
||||
# core DMatrix with just data and labels
|
||||
dtrain <- xgb.DMatrix(
|
||||
data = agaricus.train$data
|
||||
, label = agaricus.train$label
|
||||
)
|
||||
txt <- capture.output({
|
||||
print(dtrain)
|
||||
})
|
||||
expect_equal(txt, "xgb.DMatrix dim: 6513 x 126 info: label colnames: yes")
|
||||
|
||||
# verbose=TRUE prints feature names
|
||||
txt <- capture.output({
|
||||
print(dtrain, verbose = TRUE)
|
||||
})
|
||||
expect_equal(txt[[1L]], "xgb.DMatrix dim: 6513 x 126 info: label colnames:")
|
||||
expect_equal(txt[[2L]], sprintf("'%s'", paste(colnames(dtrain), collapse = "','")))
|
||||
|
||||
# DMatrix with weights and base_margin
|
||||
dtrain <- xgb.DMatrix(
|
||||
data = agaricus.train$data
|
||||
, label = agaricus.train$label
|
||||
, weight = seq_along(agaricus.train$label)
|
||||
, base_margin = agaricus.train$label
|
||||
)
|
||||
txt <- capture.output({
|
||||
print(dtrain)
|
||||
})
|
||||
expect_equal(txt, "xgb.DMatrix dim: 6513 x 126 info: label weight base_margin colnames: yes")
|
||||
|
||||
# DMatrix with just features
|
||||
dtrain <- xgb.DMatrix(
|
||||
data = agaricus.train$data
|
||||
)
|
||||
txt <- capture.output({
|
||||
print(dtrain)
|
||||
})
|
||||
expect_equal(txt, "xgb.DMatrix dim: 6513 x 126 info: NA colnames: yes")
|
||||
|
||||
# DMatrix with no column names
|
||||
data_no_colnames <- agaricus.train$data
|
||||
colnames(data_no_colnames) <- NULL
|
||||
dtrain <- xgb.DMatrix(
|
||||
data = data_no_colnames
|
||||
)
|
||||
txt <- capture.output({
|
||||
print(dtrain)
|
||||
})
|
||||
expect_equal(txt, "xgb.DMatrix dim: 6513 x 126 info: NA colnames: no")
|
||||
})
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
library(xgboost)
|
||||
|
||||
context("feature weights")
|
||||
|
||||
test_that("training with feature weights works", {
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
require(xgboost)
|
||||
|
||||
context("Garbage Collection Safety Check")
|
||||
|
||||
test_that("train and prediction when gctorture is on", {
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
context('Test generalized linear models')
|
||||
|
||||
require(xgboost)
|
||||
|
||||
test_that("gblinear works", {
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
library(testthat)
|
||||
context('Test helper functions')
|
||||
|
||||
VCD_AVAILABLE <- requireNamespace("vcd", quietly = TRUE)
|
||||
.skip_if_vcd_not_available <- function() {
|
||||
if (!VCD_AVAILABLE) {
|
||||
testthat::skip("Optional testing dependency 'vcd' not found.")
|
||||
}
|
||||
}
|
||||
require(xgboost)
|
||||
require(data.table)
|
||||
require(Matrix)
|
||||
require(vcd, quietly = TRUE)
|
||||
|
||||
float_tolerance <- 5e-6
|
||||
|
||||
@@ -13,30 +12,25 @@ float_tolerance <- 5e-6
|
||||
flag_32bit <- .Machine$sizeof.pointer != 8
|
||||
|
||||
set.seed(1982)
|
||||
data(Arthritis)
|
||||
df <- data.table(Arthritis, keep.rownames = FALSE)
|
||||
df[, AgeDiscret := as.factor(round(Age / 10, 0))]
|
||||
df[, AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))]
|
||||
df[, ID := NULL]
|
||||
sparse_matrix <- sparse.model.matrix(Improved~.-1, data = df) # nolint
|
||||
label <- df[, ifelse(Improved == "Marked", 1, 0)]
|
||||
|
||||
# binary
|
||||
nrounds <- 12
|
||||
if (isTRUE(VCD_AVAILABLE)) {
|
||||
data(Arthritis, package = "vcd")
|
||||
df <- data.table::data.table(Arthritis, keep.rownames = FALSE)
|
||||
df[, AgeDiscret := as.factor(round(Age / 10, 0))]
|
||||
df[, AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))]
|
||||
df[, ID := NULL]
|
||||
sparse_matrix <- Matrix::sparse.model.matrix(Improved~.-1, data = df) # nolint
|
||||
label <- df[, ifelse(Improved == "Marked", 1, 0)]
|
||||
bst.Tree <- xgboost(data = sparse_matrix, label = label, max_depth = 9,
|
||||
eta = 1, nthread = 2, nrounds = nrounds, verbose = 0,
|
||||
objective = "binary:logistic", booster = "gbtree")
|
||||
|
||||
# binary
|
||||
bst.Tree <- xgboost(data = sparse_matrix, label = label, max_depth = 9,
|
||||
eta = 1, nthread = 2, nrounds = nrounds, verbose = 0,
|
||||
objective = "binary:logistic", booster = "gbtree",
|
||||
base_score = 0.5)
|
||||
bst.GLM <- xgboost(data = sparse_matrix, label = label,
|
||||
eta = 1, nthread = 1, nrounds = nrounds, verbose = 0,
|
||||
objective = "binary:logistic", booster = "gblinear")
|
||||
|
||||
bst.GLM <- xgboost(data = sparse_matrix, label = label,
|
||||
eta = 1, nthread = 1, nrounds = nrounds, verbose = 0,
|
||||
objective = "binary:logistic", booster = "gblinear",
|
||||
base_score = 0.5)
|
||||
|
||||
feature.names <- colnames(sparse_matrix)
|
||||
}
|
||||
feature.names <- colnames(sparse_matrix)
|
||||
|
||||
# multiclass
|
||||
mlabel <- as.numeric(iris$Species) - 1
|
||||
@@ -51,7 +45,6 @@ mbst.GLM <- xgboost(data = as.matrix(iris[, -5]), label = mlabel, verbose = 0,
|
||||
|
||||
|
||||
test_that("xgb.dump works", {
|
||||
.skip_if_vcd_not_available()
|
||||
if (!flag_32bit)
|
||||
expect_length(xgb.dump(bst.Tree), 200)
|
||||
dump_file <- file.path(tempdir(), 'xgb.model.dump')
|
||||
@@ -63,11 +56,10 @@ test_that("xgb.dump works", {
|
||||
dmp <- xgb.dump(bst.Tree, dump_format = "json")
|
||||
expect_length(dmp, 1)
|
||||
if (!flag_32bit)
|
||||
expect_length(grep('nodeid', strsplit(dmp, '\n', fixed = TRUE)[[1]], fixed = TRUE), 188)
|
||||
expect_length(grep('nodeid', strsplit(dmp, '\n')[[1]]), 188)
|
||||
})
|
||||
|
||||
test_that("xgb.dump works for gblinear", {
|
||||
.skip_if_vcd_not_available()
|
||||
expect_length(xgb.dump(bst.GLM), 14)
|
||||
# also make sure that it works properly for a sparse model where some coefficients
|
||||
# are 0 from setting large L1 regularization:
|
||||
@@ -80,11 +72,10 @@ test_that("xgb.dump works for gblinear", {
|
||||
# JSON format
|
||||
dmp <- xgb.dump(bst.GLM.sp, dump_format = "json")
|
||||
expect_length(dmp, 1)
|
||||
expect_length(grep('\\d', strsplit(dmp, '\n', fixed = TRUE)[[1]]), 11)
|
||||
expect_length(grep('\\d', strsplit(dmp, '\n')[[1]]), 11)
|
||||
})
|
||||
|
||||
test_that("predict leafs works", {
|
||||
.skip_if_vcd_not_available()
|
||||
# no error for gbtree
|
||||
expect_error(pred_leaf <- predict(bst.Tree, sparse_matrix, predleaf = TRUE), regexp = NA)
|
||||
expect_equal(dim(pred_leaf), c(nrow(sparse_matrix), nrounds))
|
||||
@@ -93,7 +84,6 @@ test_that("predict leafs works", {
|
||||
})
|
||||
|
||||
test_that("predict feature contributions works", {
|
||||
.skip_if_vcd_not_available()
|
||||
# gbtree binary classifier
|
||||
expect_error(pred_contr <- predict(bst.Tree, sparse_matrix, predcontrib = TRUE), regexp = NA)
|
||||
expect_equal(dim(pred_contr), c(nrow(sparse_matrix), ncol(sparse_matrix) + 1))
|
||||
@@ -180,16 +170,15 @@ test_that("SHAPs sum to predictions, with or without DART", {
|
||||
label = y,
|
||||
nrounds = nrounds)
|
||||
|
||||
pr <- function(...) {
|
||||
pr <- function(...)
|
||||
predict(fit, newdata = d, ...)
|
||||
}
|
||||
pred <- pr()
|
||||
shap <- pr(predcontrib = TRUE)
|
||||
shapi <- pr(predinteraction = TRUE)
|
||||
tol <- 1e-5
|
||||
|
||||
expect_equal(rowSums(shap), pred, tol = tol)
|
||||
expect_equal(rowSums(shapi), pred, tol = tol)
|
||||
expect_equal(apply(shapi, 1, sum), pred, tol = tol)
|
||||
for (i in seq_len(nrow(d)))
|
||||
for (f in list(rowSums, colSums))
|
||||
expect_equal(f(shapi[i, , ]), shap[i, ], tol = tol)
|
||||
@@ -197,7 +186,6 @@ test_that("SHAPs sum to predictions, with or without DART", {
|
||||
})
|
||||
|
||||
test_that("xgb-attribute functionality", {
|
||||
.skip_if_vcd_not_available()
|
||||
val <- "my attribute value"
|
||||
list.val <- list(my_attr = val, a = 123, b = 'ok')
|
||||
list.ch <- list.val[order(names(list.val))]
|
||||
@@ -231,11 +219,10 @@ test_that("xgb-attribute functionality", {
|
||||
expect_null(xgb.attributes(bst))
|
||||
})
|
||||
|
||||
if (grepl('Windows', Sys.info()[['sysname']], fixed = TRUE) ||
|
||||
grepl('Linux', Sys.info()[['sysname']], fixed = TRUE) ||
|
||||
grepl('Darwin', Sys.info()[['sysname']], fixed = TRUE)) {
|
||||
if (grepl('Windows', Sys.info()[['sysname']]) ||
|
||||
grepl('Linux', Sys.info()[['sysname']]) ||
|
||||
grepl('Darwin', Sys.info()[['sysname']])) {
|
||||
test_that("xgb-attribute numeric precision", {
|
||||
.skip_if_vcd_not_available()
|
||||
# check that lossless conversion works with 17 digits
|
||||
# numeric -> character -> numeric
|
||||
X <- 10^runif(100, -20, 20)
|
||||
@@ -254,7 +241,6 @@ if (grepl('Windows', Sys.info()[['sysname']], fixed = TRUE) ||
|
||||
}
|
||||
|
||||
test_that("xgb.Booster serializing as R object works", {
|
||||
.skip_if_vcd_not_available()
|
||||
saveRDS(bst.Tree, 'xgb.model.rds')
|
||||
bst <- readRDS('xgb.model.rds')
|
||||
dtrain <- xgb.DMatrix(sparse_matrix, label = label)
|
||||
@@ -273,7 +259,6 @@ test_that("xgb.Booster serializing as R object works", {
|
||||
})
|
||||
|
||||
test_that("xgb.model.dt.tree works with and without feature names", {
|
||||
.skip_if_vcd_not_available()
|
||||
names.dt.trees <- c("Tree", "Node", "ID", "Feature", "Split", "Yes", "No", "Missing", "Quality", "Cover")
|
||||
dt.tree <- xgb.model.dt.tree(feature_names = feature.names, model = bst.Tree)
|
||||
expect_equal(names.dt.trees, names(dt.tree))
|
||||
@@ -293,18 +278,16 @@ test_that("xgb.model.dt.tree works with and without feature names", {
|
||||
|
||||
# using integer node ID instead of character
|
||||
dt.tree.int <- xgb.model.dt.tree(model = bst.Tree, use_int_id = TRUE)
|
||||
expect_equal(as.integer(data.table::tstrsplit(dt.tree$Yes, '-', fixed = TRUE)[[2]]), dt.tree.int$Yes)
|
||||
expect_equal(as.integer(data.table::tstrsplit(dt.tree$No, '-', fixed = TRUE)[[2]]), dt.tree.int$No)
|
||||
expect_equal(as.integer(data.table::tstrsplit(dt.tree$Missing, '-', fixed = TRUE)[[2]]), dt.tree.int$Missing)
|
||||
expect_equal(as.integer(tstrsplit(dt.tree$Yes, '-')[[2]]), dt.tree.int$Yes)
|
||||
expect_equal(as.integer(tstrsplit(dt.tree$No, '-')[[2]]), dt.tree.int$No)
|
||||
expect_equal(as.integer(tstrsplit(dt.tree$Missing, '-')[[2]]), dt.tree.int$Missing)
|
||||
})
|
||||
|
||||
test_that("xgb.model.dt.tree throws error for gblinear", {
|
||||
.skip_if_vcd_not_available()
|
||||
expect_error(xgb.model.dt.tree(model = bst.GLM))
|
||||
})
|
||||
|
||||
test_that("xgb.importance works with and without feature names", {
|
||||
.skip_if_vcd_not_available()
|
||||
importance.Tree <- xgb.importance(feature_names = feature.names, model = bst.Tree)
|
||||
if (!flag_32bit)
|
||||
expect_equal(dim(importance.Tree), c(7, 4))
|
||||
@@ -362,8 +345,7 @@ test_that("xgb.importance works with and without feature names", {
|
||||
m <- xgboost::xgboost(
|
||||
data = as.matrix(data.frame(x = c(0, 1))),
|
||||
label = c(1, 2),
|
||||
nrounds = 1,
|
||||
base_score = 0.5
|
||||
nrounds = 1
|
||||
)
|
||||
df <- xgb.model.dt.tree(model = m)
|
||||
expect_equal(df$Feature, "Leaf")
|
||||
@@ -371,7 +353,6 @@ test_that("xgb.importance works with and without feature names", {
|
||||
})
|
||||
|
||||
test_that("xgb.importance works with GLM model", {
|
||||
.skip_if_vcd_not_available()
|
||||
importance.GLM <- xgb.importance(feature_names = feature.names, model = bst.GLM)
|
||||
expect_equal(dim(importance.GLM), c(10, 2))
|
||||
expect_equal(colnames(importance.GLM), c("Feature", "Weight"))
|
||||
@@ -387,7 +368,6 @@ test_that("xgb.importance works with GLM model", {
|
||||
})
|
||||
|
||||
test_that("xgb.model.dt.tree and xgb.importance work with a single split model", {
|
||||
.skip_if_vcd_not_available()
|
||||
bst1 <- xgboost(data = sparse_matrix, label = label, max_depth = 1,
|
||||
eta = 1, nthread = 2, nrounds = 1, verbose = 0,
|
||||
objective = "binary:logistic")
|
||||
@@ -399,19 +379,16 @@ test_that("xgb.model.dt.tree and xgb.importance work with a single split model",
|
||||
})
|
||||
|
||||
test_that("xgb.plot.tree works with and without feature names", {
|
||||
.skip_if_vcd_not_available()
|
||||
expect_silent(xgb.plot.tree(feature_names = feature.names, model = bst.Tree))
|
||||
expect_silent(xgb.plot.tree(model = bst.Tree))
|
||||
})
|
||||
|
||||
test_that("xgb.plot.multi.trees works with and without feature names", {
|
||||
.skip_if_vcd_not_available()
|
||||
xgb.plot.multi.trees(model = bst.Tree, feature_names = feature.names, features_keep = 3)
|
||||
xgb.plot.multi.trees(model = bst.Tree, features_keep = 3)
|
||||
})
|
||||
|
||||
test_that("xgb.plot.deepness works", {
|
||||
.skip_if_vcd_not_available()
|
||||
d2p <- xgb.plot.deepness(model = bst.Tree)
|
||||
expect_equal(colnames(d2p), c("ID", "Tree", "Depth", "Cover", "Weight"))
|
||||
xgb.plot.deepness(model = bst.Tree, which = "med.depth")
|
||||
@@ -419,7 +396,6 @@ test_that("xgb.plot.deepness works", {
|
||||
})
|
||||
|
||||
test_that("xgb.shap.data works when top_n is provided", {
|
||||
.skip_if_vcd_not_available()
|
||||
data_list <- xgb.shap.data(data = sparse_matrix, model = bst.Tree, top_n = 2)
|
||||
expect_equal(names(data_list), c("data", "shap_contrib"))
|
||||
expect_equal(NCOL(data_list$data), 2)
|
||||
@@ -437,14 +413,12 @@ test_that("xgb.shap.data works when top_n is provided", {
|
||||
})
|
||||
|
||||
test_that("xgb.shap.data works with subsampling", {
|
||||
.skip_if_vcd_not_available()
|
||||
data_list <- xgb.shap.data(data = sparse_matrix, model = bst.Tree, top_n = 2, subsample = 0.8)
|
||||
expect_equal(NROW(data_list$data), as.integer(0.8 * nrow(sparse_matrix)))
|
||||
expect_equal(NROW(data_list$data), NROW(data_list$shap_contrib))
|
||||
})
|
||||
|
||||
test_that("prepare.ggplot.shap.data works", {
|
||||
.skip_if_vcd_not_available()
|
||||
data_list <- xgb.shap.data(data = sparse_matrix, model = bst.Tree, top_n = 2)
|
||||
plot_data <- prepare.ggplot.shap.data(data_list, normalize = TRUE)
|
||||
expect_s3_class(plot_data, "data.frame")
|
||||
@@ -455,19 +429,17 @@ test_that("prepare.ggplot.shap.data works", {
|
||||
})
|
||||
|
||||
test_that("xgb.plot.shap works", {
|
||||
.skip_if_vcd_not_available()
|
||||
sh <- xgb.plot.shap(data = sparse_matrix, model = bst.Tree, top_n = 2, col = 4)
|
||||
expect_equal(names(sh), c("data", "shap_contrib"))
|
||||
})
|
||||
|
||||
test_that("xgb.plot.shap.summary works", {
|
||||
.skip_if_vcd_not_available()
|
||||
expect_silent(xgb.plot.shap.summary(data = sparse_matrix, model = bst.Tree, top_n = 2))
|
||||
expect_silent(xgb.ggplot.shap.summary(data = sparse_matrix, model = bst.Tree, top_n = 2))
|
||||
})
|
||||
|
||||
test_that("check.deprecation works", {
|
||||
ttt <- function(a = NNULL, DUMMY = NULL, ...) {
|
||||
ttt <- function(a = NNULL, DUMMY=NULL, ...) {
|
||||
check.deprecation(...)
|
||||
as.list((environment()))
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ test_that("interaction constraints for regression", {
|
||||
|
||||
# Set all observations to have the same x3 values then increment
|
||||
# by the same amount
|
||||
preds <- lapply(c(1, 2, 3), function(x) {
|
||||
preds <- lapply(c(1, 2, 3), function(x){
|
||||
tmat <- matrix(c(x1, x2, rep(x, 1000)), ncol = 3)
|
||||
return(predict(bst, tmat))
|
||||
})
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
context('Test prediction of feature interactions')
|
||||
|
||||
require(xgboost)
|
||||
|
||||
set.seed(123)
|
||||
|
||||
test_that("predict feature interactions works", {
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
context("Test model IO.")
|
||||
## some other tests are in test_basic.R
|
||||
require(xgboost)
|
||||
require(testthat)
|
||||
|
||||
data(agaricus.train, package = "xgboost")
|
||||
data(agaricus.test, package = "xgboost")
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
require(xgboost)
|
||||
require(jsonlite)
|
||||
|
||||
context("Models from previous versions of XGBoost can be loaded")
|
||||
|
||||
metadata <- list(
|
||||
@@ -59,12 +62,11 @@ test_that("Models from previous versions of XGBoost can be loaded", {
|
||||
bucket <- 'xgboost-ci-jenkins-artifacts'
|
||||
region <- 'us-west-2'
|
||||
file_name <- 'xgboost_r_model_compatibility_test.zip'
|
||||
zipfile <- tempfile(fileext = ".zip")
|
||||
extract_dir <- tempdir()
|
||||
zipfile <- file.path(getwd(), file_name)
|
||||
model_dir <- file.path(getwd(), 'models')
|
||||
download.file(paste('https://', bucket, '.s3-', region, '.amazonaws.com/', file_name, sep = ''),
|
||||
destfile = zipfile, mode = 'wb', quiet = TRUE)
|
||||
unzip(zipfile, exdir = extract_dir, overwrite = TRUE)
|
||||
model_dir <- file.path(extract_dir, 'models')
|
||||
unzip(zipfile, overwrite = TRUE)
|
||||
|
||||
pred_data <- xgb.DMatrix(matrix(c(0, 0, 0, 0), nrow = 1, ncol = 4))
|
||||
|
||||
@@ -76,20 +78,32 @@ test_that("Models from previous versions of XGBoost can be loaded", {
|
||||
name <- m[3]
|
||||
is_rds <- endsWith(model_file, '.rds')
|
||||
is_json <- endsWith(model_file, '.json')
|
||||
# Expect an R warning when a model is loaded from RDS and it was generated by version < 1.1.x
|
||||
if (is_rds && compareVersion(model_xgb_ver, '1.1.1.1') < 0) {
|
||||
booster <- readRDS(model_file)
|
||||
expect_warning(predict(booster, newdata = pred_data))
|
||||
booster <- readRDS(model_file)
|
||||
expect_warning(run_booster_check(booster, name))
|
||||
} else {
|
||||
if (is_rds) {
|
||||
|
||||
cpp_warning <- capture.output({
|
||||
# Expect an R warning when a model is loaded from RDS and it was generated by version < 1.1.x
|
||||
if (is_rds && compareVersion(model_xgb_ver, '1.1.1.1') < 0) {
|
||||
booster <- readRDS(model_file)
|
||||
expect_warning(predict(booster, newdata = pred_data))
|
||||
booster <- readRDS(model_file)
|
||||
expect_warning(run_booster_check(booster, name))
|
||||
} else {
|
||||
booster <- xgb.load(model_file)
|
||||
if (is_rds) {
|
||||
booster <- readRDS(model_file)
|
||||
} else {
|
||||
booster <- xgb.load(model_file)
|
||||
}
|
||||
predict(booster, newdata = pred_data)
|
||||
run_booster_check(booster, name)
|
||||
}
|
||||
predict(booster, newdata = pred_data)
|
||||
run_booster_check(booster, name)
|
||||
})
|
||||
cpp_warning <- paste0(cpp_warning, collapse = ' ')
|
||||
if (is_rds && compareVersion(model_xgb_ver, '1.1.1.1') >= 0) {
|
||||
# Expect a C++ warning when a model is loaded from RDS and it was generated by old XGBoost`
|
||||
m <- grepl(paste0('.*If you are loading a serialized model ',
|
||||
'\\(like pickle in Python, RDS in R\\).*',
|
||||
'for more details about differences between ',
|
||||
'saving model and serializing.*'), cpp_warning, perl = TRUE)
|
||||
expect_true(length(m) > 0 && all(m))
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
require(xgboost)
|
||||
|
||||
context("monotone constraints")
|
||||
|
||||
set.seed(1024)
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
context('Test model params and call are exposed to R')
|
||||
|
||||
require(xgboost)
|
||||
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
context('Test Poisson regression model')
|
||||
|
||||
require(xgboost)
|
||||
set.seed(1994)
|
||||
|
||||
test_that("Poisson regression works", {
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
require(xgboost)
|
||||
require(Matrix)
|
||||
|
||||
context('Learning to rank')
|
||||
|
||||
test_that('Test ranking with unweighted data', {
|
||||
X <- Matrix::sparseMatrix(
|
||||
i = c(2, 3, 7, 9, 12, 15, 17, 18)
|
||||
, j = c(1, 1, 2, 2, 3, 3, 4, 4)
|
||||
, x = rep(1.0, 8)
|
||||
, dims = c(20, 4)
|
||||
)
|
||||
X <- sparseMatrix(i = c(2, 3, 7, 9, 12, 15, 17, 18),
|
||||
j = c(1, 1, 2, 2, 3, 3, 4, 4),
|
||||
x = rep(1.0, 8), dims = c(20, 4))
|
||||
y <- c(0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0)
|
||||
group <- c(5, 5, 5, 5)
|
||||
dtrain <- xgb.DMatrix(X, label = y, group = group)
|
||||
@@ -20,12 +20,9 @@ test_that('Test ranking with unweighted data', {
|
||||
})
|
||||
|
||||
test_that('Test ranking with weighted data', {
|
||||
X <- Matrix::sparseMatrix(
|
||||
i = c(2, 3, 7, 9, 12, 15, 17, 18)
|
||||
, j = c(1, 1, 2, 2, 3, 3, 4, 4)
|
||||
, x = rep(1.0, 8)
|
||||
, dims = c(20, 4)
|
||||
)
|
||||
X <- sparseMatrix(i = c(2, 3, 7, 9, 12, 15, 17, 18),
|
||||
j = c(1, 1, 2, 2, 3, 3, 4, 4),
|
||||
x = rep(1.0, 8), dims = c(20, 4))
|
||||
y <- c(0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0)
|
||||
group <- c(5, 5, 5, 5)
|
||||
weight <- c(1.0, 2.0, 3.0, 4.0)
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
context("Test Unicode handling")
|
||||
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
train <- agaricus.train
|
||||
test <- agaricus.test
|
||||
set.seed(1994)
|
||||
|
||||
test_that("Can save and load models with Unicode paths", {
|
||||
nrounds <- 2
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
|
||||
eta = 1, nthread = 2, nrounds = nrounds, objective = "binary:logistic",
|
||||
eval_metric = "error")
|
||||
tmpdir <- tempdir()
|
||||
lapply(c("모델.json", "がうる・ぐら.json", "类继承.ubj"), function(x) {
|
||||
path <- file.path(tmpdir, x)
|
||||
xgb.save(bst, path)
|
||||
bst2 <- xgb.load(path)
|
||||
expect_equal(predict(bst, test$data), predict(bst2, test$data))
|
||||
})
|
||||
})
|
||||
@@ -1,3 +1,5 @@
|
||||
require(xgboost)
|
||||
|
||||
context("update trees in an existing model")
|
||||
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
@@ -13,10 +15,7 @@ test_that("updating the model works", {
|
||||
watchlist <- list(train = dtrain, test = dtest)
|
||||
|
||||
# no-subsampling
|
||||
p1 <- list(
|
||||
objective = "binary:logistic", max_depth = 2, eta = 0.05, nthread = 2,
|
||||
updater = "grow_colmaker,prune"
|
||||
)
|
||||
p1 <- list(objective = "binary:logistic", max_depth = 2, eta = 0.05, nthread = 2)
|
||||
set.seed(11)
|
||||
bst1 <- xgb.train(p1, dtrain, nrounds = 10, watchlist, verbose = 0)
|
||||
tr1 <- xgb.model.dt.tree(model = bst1)
|
||||
|
||||
@@ -28,9 +28,7 @@ Package loading:
|
||||
require(xgboost)
|
||||
require(Matrix)
|
||||
require(data.table)
|
||||
if (!require('vcd')) {
|
||||
install.packages('vcd')
|
||||
}
|
||||
if (!require('vcd')) install.packages('vcd')
|
||||
```
|
||||
|
||||
> **VCD** package is used for one of its embedded dataset only.
|
||||
@@ -51,24 +49,24 @@ A *categorical* variable has a fixed number of different values. For instance, i
|
||||
>
|
||||
> Type `?factor` in the console for more information.
|
||||
|
||||
To answer the question above we will convert *categorical* variables to `numeric` ones.
|
||||
To answer the question above we will convert *categorical* variables to `numeric` one.
|
||||
|
||||
### Conversion from categorical to numeric variables
|
||||
|
||||
#### Looking at the raw data
|
||||
|
||||
+In this Vignette we will see how to transform a *dense* `data.frame` (*dense* = the majority of the matrix is non-zero) with *categorical* variables to a very *sparse* matrix (*sparse* = lots of zero entries in the matrix) of `numeric` features.
|
||||
In this Vignette we will see how to transform a *dense* `data.frame` (*dense* = few zeroes in the matrix) with *categorical* variables to a very *sparse* matrix (*sparse* = lots of zero in the matrix) of `numeric` features.
|
||||
|
||||
The method we are going to see is usually called [one-hot encoding](https://en.wikipedia.org/wiki/One-hot).
|
||||
|
||||
The first step is to load the `Arthritis` dataset in memory and wrap it with the `data.table` package.
|
||||
The first step is to load `Arthritis` dataset in memory and wrap it with `data.table` package.
|
||||
|
||||
```{r, results='hide'}
|
||||
data(Arthritis)
|
||||
df <- data.table(Arthritis, keep.rownames = FALSE)
|
||||
```
|
||||
|
||||
> `data.table` is 100% compliant with **R** `data.frame` but its syntax is more consistent and its performance for large dataset is [best in class](https://stackoverflow.com/questions/21435339/data-table-vs-dplyr-can-one-do-something-well-the-other-cant-or-does-poorly) (`dplyr` from **R** and `Pandas` from **Python** [included](https://github.com/Rdatatable/data.table/wiki/Benchmarks-%3A-Grouping)). Some parts of **XGBoost's** **R** package use `data.table`.
|
||||
> `data.table` is 100% compliant with **R** `data.frame` but its syntax is more consistent and its performance for large dataset is [best in class](https://stackoverflow.com/questions/21435339/data-table-vs-dplyr-can-one-do-something-well-the-other-cant-or-does-poorly) (`dplyr` from **R** and `Pandas` from **Python** [included](https://github.com/Rdatatable/data.table/wiki/Benchmarks-%3A-Grouping)). Some parts of **XGBoost** **R** package use `data.table`.
|
||||
|
||||
The first thing we want to do is to have a look to the first few lines of the `data.table`:
|
||||
|
||||
@@ -95,22 +93,22 @@ We will add some new *categorical* features to see if it helps.
|
||||
|
||||
##### Grouping per 10 years
|
||||
|
||||
For the first features we create groups of age by rounding the real age.
|
||||
For the first feature we create groups of age by rounding the real age.
|
||||
|
||||
Note that we transform it to `factor` so the algorithm treats these age groups as independent values.
|
||||
Note that we transform it to `factor` so the algorithm treat these age groups as independent values.
|
||||
|
||||
Therefore, 20 is not closer to 30 than 60. In other words, the distance between ages is lost in this transformation.
|
||||
Therefore, 20 is not closer to 30 than 60. To make it short, the distance between ages is lost in this transformation.
|
||||
|
||||
```{r}
|
||||
head(df[, AgeDiscret := as.factor(round(Age / 10, 0))])
|
||||
head(df[,AgeDiscret := as.factor(round(Age/10,0))])
|
||||
```
|
||||
|
||||
##### Randomly split into two groups
|
||||
##### Random split into two groups
|
||||
|
||||
The following is an even stronger simplification of the real age with an arbitrary split at 30 years old. I choose this value **based on nothing**. We will see later if simplifying the information based on arbitrary values is a good strategy (you may already have an idea of how well it will work...).
|
||||
Following is an even stronger simplification of the real age with an arbitrary split at 30 years old. We choose this value **based on nothing**. We will see later if simplifying the information based on arbitrary values is a good strategy (you may already have an idea of how well it will work...).
|
||||
|
||||
```{r}
|
||||
head(df[, AgeCat := as.factor(ifelse(Age > 30, "Old", "Young"))])
|
||||
head(df[,AgeCat:= as.factor(ifelse(Age > 30, "Old", "Young"))])
|
||||
```
|
||||
|
||||
##### Risks in adding correlated features
|
||||
@@ -119,20 +117,20 @@ These new features are highly correlated to the `Age` feature because they are s
|
||||
|
||||
For many machine learning algorithms, using correlated features is not a good idea. It may sometimes make prediction less accurate, and most of the time make interpretation of the model almost impossible. GLM, for instance, assumes that the features are uncorrelated.
|
||||
|
||||
Fortunately, decision tree algorithms (including boosted trees) are very robust to these features. Therefore we don't have to do anything to manage this situation.
|
||||
Fortunately, decision tree algorithms (including boosted trees) are very robust to these features. Therefore we have nothing to do to manage this situation.
|
||||
|
||||
##### Cleaning data
|
||||
|
||||
We remove ID as there is nothing to learn from this feature (it would just add some noise).
|
||||
|
||||
```{r, results='hide'}
|
||||
df[, ID := NULL]
|
||||
df[,ID:=NULL]
|
||||
```
|
||||
|
||||
We will list the different values for the column `Treatment`:
|
||||
|
||||
```{r}
|
||||
levels(df[, Treatment])
|
||||
levels(df[,Treatment])
|
||||
```
|
||||
|
||||
|
||||
@@ -144,12 +142,12 @@ We will use the [dummy contrast coding](https://stats.oarc.ucla.edu/r/library/r-
|
||||
|
||||
The purpose is to transform each value of each *categorical* feature into a *binary* feature `{0, 1}`.
|
||||
|
||||
For example, the column `Treatment` will be replaced by two columns, `TreatmentPlacebo`, and `TreatmentTreated`. Each of them will be *binary*. Therefore, an observation which has the value `Placebo` in column `Treatment` before the transformation will have the value `1` in the new column `TreatmentPlacebo` and the value `0` in the new column `TreatmentTreated` after the transformation. The column `TreatmentPlacebo` will disappear during the contrast encoding, as it would be absorbed into a common constant intercept column.
|
||||
For example, the column `Treatment` will be replaced by two columns, `TreatmentPlacebo`, and `TreatmentTreated`. Each of them will be *binary*. Therefore, an observation which has the value `Placebo` in column `Treatment` before the transformation will have after the transformation the value `1` in the new column `TreatmentPlacebo` and the value `0` in the new column `TreatmentTreated`. The column `TreatmentPlacebo` will disappear during the contrast encoding, as it would be absorbed into a common constant intercept column.
|
||||
|
||||
Column `Improved` is excluded because it will be our `label` column, the one we want to predict.
|
||||
|
||||
```{r, warning=FALSE,message=FALSE}
|
||||
sparse_matrix <- sparse.model.matrix(Improved ~ ., data = df)[, -1]
|
||||
sparse_matrix <- sparse.model.matrix(Improved ~ ., data = df)[,-1]
|
||||
head(sparse_matrix)
|
||||
```
|
||||
|
||||
@@ -158,7 +156,7 @@ head(sparse_matrix)
|
||||
Create the output `numeric` vector (not as a sparse `Matrix`):
|
||||
|
||||
```{r}
|
||||
output_vector <- df[, Improved] == "Marked"
|
||||
output_vector = df[,Improved] == "Marked"
|
||||
```
|
||||
|
||||
1. set `Y` vector to `0`;
|
||||
@@ -172,13 +170,17 @@ The code below is very usual. For more information, you can look at the document
|
||||
|
||||
```{r}
|
||||
bst <- xgboost(data = sparse_matrix, label = output_vector, max_depth = 4,
|
||||
eta = 1, nthread = 2, nrounds = 10, objective = "binary:logistic")
|
||||
eta = 1, nthread = 2, nrounds = 10,objective = "binary:logistic")
|
||||
|
||||
```
|
||||
|
||||
You can see some `train-logloss: 0.XXXXX` lines followed by a number. It decreases. Each line shows how well the model explains the data. Lower is better.
|
||||
You can see some `train-error: 0.XXXXX` lines followed by a number. It decreases. Each line shows how well the model explains your data. Lower is better.
|
||||
|
||||
A small value for training error may be a symptom of [overfitting](https://en.wikipedia.org/wiki/Overfitting), meaning the model will not accurately predict unseen values.
|
||||
A small value for training error may be a symptom of [overfitting](https://en.wikipedia.org/wiki/Overfitting), meaning the model will not accurately predict the future values.
|
||||
|
||||
> Here you can see the numbers decrease until line 7 and then increase.
|
||||
>
|
||||
> It probably means we are overfitting. To fix that I should reduce the number of rounds to `nrounds = 4`. I will let things like that because I don't really care for the purpose of this example :-)
|
||||
|
||||
Feature importance
|
||||
------------------
|
||||
@@ -195,35 +197,64 @@ importance <- xgb.importance(feature_names = colnames(sparse_matrix), model = bs
|
||||
head(importance)
|
||||
```
|
||||
|
||||
> The column `Gain` provides the information we are looking for.
|
||||
> The column `Gain` provide the information we are looking for.
|
||||
>
|
||||
> As you can see, features are classified by `Gain`.
|
||||
|
||||
`Gain` is the improvement in accuracy brought by a feature to the branches it is on. The idea is that before adding a new split on a feature X to the branch there were some wrongly classified elements; after adding the split on this feature, there are two new branches, and each of these branches is more accurate (one branch saying if your observation is on this branch then it should be classified as `1`, and the other branch saying the exact opposite).
|
||||
`Gain` is the improvement in accuracy brought by a feature to the branches it is on. The idea is that before adding a new split on a feature X to the branch there was some wrongly classified elements, after adding the split on this feature, there are two new branches, and each of these branch is more accurate (one branch saying if your observation is on this branch then it should be classified as `1`, and the other branch saying the exact opposite).
|
||||
|
||||
`Cover` is related to the second order derivative (or Hessian) of the loss function with respect to a particular variable; thus, a large value indicates a variable has a large potential impact on the loss function and so is important.
|
||||
`Cover` measures the relative quantity of observations concerned by a feature.
|
||||
|
||||
`Frequency` is a simpler way to measure the `Gain`. It just counts the number of times a feature is used in all generated trees. You should not use it (unless you know why you want to use it).
|
||||
|
||||
#### Improvement in the interpretability of feature importance data.table
|
||||
|
||||
We can go deeper in the analysis of the model. In the `data.table` above, we have discovered which features counts to predict if the illness will go or not. But we don't yet know the role of these features. For instance, one of the question we may want to answer would be: does receiving a placebo treatment helps to recover from the illness?
|
||||
|
||||
One simple solution is to count the co-occurrences of a feature and a class of the classification.
|
||||
|
||||
For that purpose we will execute the same function as above but using two more parameters, `data` and `label`.
|
||||
|
||||
```{r}
|
||||
importanceRaw <- xgb.importance(feature_names = colnames(sparse_matrix), model = bst, data = sparse_matrix, label = output_vector)
|
||||
|
||||
# Cleaning for better display
|
||||
importanceClean <- importanceRaw[,`:=`(Cover=NULL, Frequency=NULL)]
|
||||
|
||||
head(importanceClean)
|
||||
```
|
||||
|
||||
> In the table above we have removed two not needed columns and select only the first lines.
|
||||
|
||||
First thing you notice is the new column `Split`. It is the split applied to the feature on a branch of one of the tree. Each split is present, therefore a feature can appear several times in this table. Here we can see the feature `Age` is used several times with different splits.
|
||||
|
||||
How the split is applied to count the co-occurrences? It is always `<`. For instance, in the second line, we measure the number of persons under 61.5 years with the illness gone after the treatment.
|
||||
|
||||
The two other new columns are `RealCover` and `RealCover %`. In the first column it measures the number of observations in the dataset where the split is respected and the label marked as `1`. The second column is the percentage of the whole population that `RealCover` represents.
|
||||
|
||||
Therefore, according to our findings, getting a placebo doesn't seem to help but being younger than 61 years may help (seems logic).
|
||||
|
||||
> You may wonder how to interpret the `< 1.00001` on the first line. Basically, in a sparse `Matrix`, there is no `0`, therefore, looking for one hot-encoded categorical observations validating the rule `< 1.00001` is like just looking for `1` for this feature.
|
||||
|
||||
### Plotting the feature importance
|
||||
|
||||
|
||||
All these things are nice, but it would be even better to plot the results.
|
||||
|
||||
```{r, fig.width=8, fig.height=5, fig.align='center'}
|
||||
xgb.plot.importance(importance_matrix = importance)
|
||||
```
|
||||
|
||||
Running this line of code, you should get a bar chart showing the importance of the 6 features (containing the same data as the output we saw earlier, but displaying it visually for easier consumption). Note that `xgb.ggplot.importance` is also available for all the ggplot2 fans!
|
||||
Feature have automatically been divided in 2 clusters: the interesting features... and the others.
|
||||
|
||||
> Depending of the dataset and the learning parameters you may have more than two clusters. Default value is to limit them to `10`, but you can increase this limit. Look at the function documentation for more information.
|
||||
|
||||
According to the plot above, the most important features in this dataset to predict if the treatment will work are :
|
||||
|
||||
* An individual's age;
|
||||
* Having received a placebo or not;
|
||||
* Gender;
|
||||
* Our generated feature AgeDiscret. We can see that its contribution is very low.
|
||||
|
||||
* the Age ;
|
||||
* having received a placebo or not ;
|
||||
* the sex is third but already included in the not interesting features group ;
|
||||
* then we see our generated features (AgeDiscret). We can see that their contribution is very low.
|
||||
|
||||
### Do these results make sense?
|
||||
|
||||
@@ -237,84 +268,69 @@ c2 <- chisq.test(df$Age, output_vector)
|
||||
print(c2)
|
||||
```
|
||||
|
||||
The Pearson correlation between Age and illness disappearing is **`r round(c2$statistic, 2 )`**.
|
||||
Pearson correlation between Age and illness disappearing is **`r round(c2$statistic, 2 )`**.
|
||||
|
||||
```{r, warning=FALSE, message=FALSE}
|
||||
c2 <- chisq.test(df$AgeDiscret, output_vector)
|
||||
print(c2)
|
||||
```
|
||||
|
||||
Our first simplification of Age gives a Pearson correlation of **`r round(c2$statistic, 2)`**.
|
||||
Our first simplification of Age gives a Pearson correlation is **`r round(c2$statistic, 2)`**.
|
||||
|
||||
```{r, warning=FALSE, message=FALSE}
|
||||
c2 <- chisq.test(df$AgeCat, output_vector)
|
||||
print(c2)
|
||||
```
|
||||
|
||||
The perfectly random split we did between young and old at 30 years old has a low correlation of **2.36**. This suggests that, for the particular illness we are studying, the age at which someone is vulnerable to this disease is likely very different from 30.
|
||||
The perfectly random split I did between young and old at 30 years old have a low correlation of **`r round(c2$statistic, 2)`**. It's a result we may expect as may be in my mind > 30 years is being old (I am 32 and starting feeling old, this may explain that), but for the illness we are studying, the age to be vulnerable is not the same.
|
||||
|
||||
Moral of the story: don't let your *gut* lower the quality of your model.
|
||||
Morality: don't let your *gut* lower the quality of your model.
|
||||
|
||||
In *data science*, there is the word *science* :-)
|
||||
In *data science* expression, there is the word *science* :-)
|
||||
|
||||
Conclusion
|
||||
----------
|
||||
|
||||
As you can see, in general *destroying information by simplifying it won't improve your model*. **Chi2** just demonstrates that.
|
||||
|
||||
But in more complex cases, creating a new feature from an existing one may help the algorithm and improve the model.
|
||||
But in more complex cases, creating a new feature based on existing one which makes link with the outcome more obvious may help the algorithm and improve the model.
|
||||
|
||||
+The case studied here is not complex enough to show that. Check [Kaggle website](https://www.kaggle.com/) for some challenging datasets.
|
||||
The case studied here is not enough complex to show that. Check [Kaggle website](http://www.kaggle.com/) for some challenging datasets. However it's almost always worse when you add some arbitrary rules.
|
||||
|
||||
Moreover, you can see that even if we have added some new features which are not very useful/highly correlated with other features, the boosting tree algorithm was still able to choose the best one (which in this case is the Age).
|
||||
Moreover, you can notice that even if we have added some not useful new features highly correlated with other features, the boosting tree algorithm have been able to choose the best one, which in this case is the Age.
|
||||
|
||||
Linear models may not perform as well.
|
||||
Linear model may not be that smart in this scenario.
|
||||
|
||||
Special Note: What about Random Forests™?
|
||||
-----------------------------------------
|
||||
|
||||
As you may know, the [Random Forests](https://en.wikipedia.org/wiki/Random_forest) algorithm is cousin with boosting and both are part of the [ensemble learning](https://en.wikipedia.org/wiki/Ensemble_learning) family.
|
||||
As you may know, [Random Forests](https://en.wikipedia.org/wiki/Random_forest) algorithm is cousin with boosting and both are part of the [ensemble learning](https://en.wikipedia.org/wiki/Ensemble_learning) family.
|
||||
|
||||
Both train several decision trees for one dataset. The *main* difference is that in Random Forests, trees are independent and in boosting, the `N+1`-st tree focuses its learning on the loss (<=> what has not been well modeled by the tree `N`).
|
||||
Both trains several decision trees for one dataset. The *main* difference is that in Random Forests, trees are independent and in boosting, the tree `N+1` focus its learning on the loss (<=> what has not been well modeled by the tree `N`).
|
||||
|
||||
This difference can have an impact on a edge case in feature importance analysis: *correlated features*.
|
||||
This difference have an impact on a corner case in feature importance analysis: the *correlated features*.
|
||||
|
||||
Imagine two features perfectly correlated, feature `A` and feature `B`. For one specific tree, if the algorithm needs one of them, it will choose randomly (true in both boosting and Random Forests).
|
||||
|
||||
However, in Random Forests this random choice will be done for each tree, because each tree is independent from the others. Therefore, approximately (and depending on your parameters) 50% of the trees will choose feature `A` and the other 50% will choose feature `B`. So the *importance* of the information contained in `A` and `B` (which is the same, because they are perfectly correlated) is diluted in `A` and `B`. So you won't easily know this information is important to predict what you want to predict! It is even worse when you have 10 correlated features...
|
||||
However, in Random Forests this random choice will be done for each tree, because each tree is independent from the others. Therefore, approximatively, depending of your parameters, 50% of the trees will choose feature `A` and the other 50% will choose feature `B`. So the *importance* of the information contained in `A` and `B` (which is the same, because they are perfectly correlated) is diluted in `A` and `B`. So you won't easily know this information is important to predict what you want to predict! It is even worse when you have 10 correlated features...
|
||||
|
||||
In boosting, when a specific link between feature and outcome have been learned by the algorithm, it will try to not refocus on it (in theory it is what happens, reality is not always that simple). Therefore, all the importance will be on feature `A` or on feature `B` (but not both). You will know that one feature has an important role in the link between the observations and the label. It is still up to you to search for the correlated features to the one detected as important if you need to know all of them.
|
||||
In boosting, when a specific link between feature and outcome have been learned by the algorithm, it will try to not refocus on it (in theory it is what happens, reality is not always that simple). Therefore, all the importance will be on feature `A` or on feature `B` (but not both). You will know that one feature have an important role in the link between the observations and the label. It is still up to you to search for the correlated features to the one detected as important if you need to know all of them.
|
||||
|
||||
If you want to try Random Forests algorithm, you can tweak XGBoost parameters!
|
||||
|
||||
For instance, to compute a model with 1000 trees, with a 0.5 factor on sampling rows and columns:
|
||||
|
||||
```{r, warning=FALSE, message=FALSE}
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
train <- agaricus.train
|
||||
test <- agaricus.test
|
||||
|
||||
#Random Forest - 1000 trees
|
||||
bst <- xgboost(
|
||||
data = train$data
|
||||
, label = train$label
|
||||
, max_depth = 4
|
||||
, num_parallel_tree = 1000
|
||||
, subsample = 0.5
|
||||
, colsample_bytree = 0.5
|
||||
, nrounds = 1
|
||||
, objective = "binary:logistic"
|
||||
)
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 4, num_parallel_tree = 1000, subsample = 0.5, colsample_bytree =0.5, nrounds = 1, objective = "binary:logistic")
|
||||
|
||||
#Boosting - 3 rounds
|
||||
bst <- xgboost(
|
||||
data = train$data
|
||||
, label = train$label
|
||||
, max_depth = 4
|
||||
, nrounds = 3
|
||||
, objective = "binary:logistic"
|
||||
)
|
||||
bst <- xgboost(data = train$data, label = train$label, max_depth = 4, nrounds = 3, objective = "binary:logistic")
|
||||
```
|
||||
|
||||
> Note that the parameter `round` is set to `1`.
|
||||
|
||||
@@ -18,11 +18,13 @@
|
||||
publisher={Institute of Mathematical Statistics}
|
||||
}
|
||||
|
||||
|
||||
@misc{
|
||||
Bache+Lichman:2013 ,
|
||||
author = "K. Bache and M. Lichman",
|
||||
year = "2013",
|
||||
title = "{UCI} Machine Learning Repository",
|
||||
url = "https://archive.ics.uci.edu/",
|
||||
institution = "University of California, Irvine, School of Information and Computer Sciences"
|
||||
url = "http://archive.ics.uci.edu/ml/",
|
||||
institution = "University of California, Irvine, School of Information and Computer Sciences"
|
||||
}
|
||||
|
||||
|
||||
@@ -52,9 +52,9 @@ It has several features:
|
||||
For weekly updated version (highly recommended), install from *GitHub*:
|
||||
|
||||
```{r installGithub, eval=FALSE}
|
||||
install.packages("drat", repos = "https://cran.rstudio.com")
|
||||
install.packages("drat", repos="https://cran.rstudio.com")
|
||||
drat:::addRepo("dmlc")
|
||||
install.packages("xgboost", repos = "http://dmlc.ml/drat/", type = "source")
|
||||
install.packages("xgboost", repos="http://dmlc.ml/drat/", type = "source")
|
||||
```
|
||||
|
||||
> *Windows* user will need to install [Rtools](https://cran.r-project.org/bin/windows/Rtools/) first.
|
||||
@@ -101,8 +101,8 @@ Why *split* the dataset in two parts?
|
||||
In the first part we will build our model. In the second part we will want to test it and assess its quality. Without dividing the dataset we would test the model on the data which the algorithm have already seen.
|
||||
|
||||
```{r datasetLoading, results='hold', message=F, warning=F}
|
||||
data(agaricus.train, package = 'xgboost')
|
||||
data(agaricus.test, package = 'xgboost')
|
||||
data(agaricus.train, package='xgboost')
|
||||
data(agaricus.test, package='xgboost')
|
||||
train <- agaricus.train
|
||||
test <- agaricus.test
|
||||
```
|
||||
@@ -152,15 +152,7 @@ We will train decision tree model using the following parameters:
|
||||
* `nrounds = 2`: there will be two passes on the data, the second one will enhance the model by further reducing the difference between ground truth and prediction.
|
||||
|
||||
```{r trainingSparse, message=F, warning=F}
|
||||
bstSparse <- xgboost(
|
||||
data = train$data
|
||||
, label = train$label
|
||||
, max_depth = 2
|
||||
, eta = 1
|
||||
, nthread = 2
|
||||
, nrounds = 2
|
||||
, objective = "binary:logistic"
|
||||
)
|
||||
bstSparse <- xgboost(data = train$data, label = train$label, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
```
|
||||
|
||||
> More complex the relationship between your features and your `label` is, more passes you need.
|
||||
@@ -172,15 +164,7 @@ bstSparse <- xgboost(
|
||||
Alternatively, you can put your dataset in a *dense* matrix, i.e. a basic **R** matrix.
|
||||
|
||||
```{r trainingDense, message=F, warning=F}
|
||||
bstDense <- xgboost(
|
||||
data = as.matrix(train$data)
|
||||
, label = train$label
|
||||
, max_depth = 2
|
||||
, eta = 1
|
||||
, nthread = 2
|
||||
, nrounds = 2
|
||||
, objective = "binary:logistic"
|
||||
)
|
||||
bstDense <- xgboost(data = as.matrix(train$data), label = train$label, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
```
|
||||
|
||||
##### xgb.DMatrix
|
||||
@@ -189,14 +173,7 @@ bstDense <- xgboost(
|
||||
|
||||
```{r trainingDmatrix, message=F, warning=F}
|
||||
dtrain <- xgb.DMatrix(data = train$data, label = train$label)
|
||||
bstDMatrix <- xgboost(
|
||||
data = dtrain
|
||||
, max_depth = 2
|
||||
, eta = 1
|
||||
, nthread = 2
|
||||
, nrounds = 2
|
||||
, objective = "binary:logistic"
|
||||
)
|
||||
bstDMatrix <- xgboost(data = dtrain, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
|
||||
```
|
||||
|
||||
##### Verbose option
|
||||
@@ -207,41 +184,17 @@ One of the simplest way to see the training progress is to set the `verbose` opt
|
||||
|
||||
```{r trainingVerbose0, message=T, warning=F}
|
||||
# verbose = 0, no message
|
||||
bst <- xgboost(
|
||||
data = dtrain
|
||||
, max_depth = 2
|
||||
, eta = 1
|
||||
, nthread = 2
|
||||
, nrounds = 2
|
||||
, objective = "binary:logistic"
|
||||
, verbose = 0
|
||||
)
|
||||
bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", verbose = 0)
|
||||
```
|
||||
|
||||
```{r trainingVerbose1, message=T, warning=F}
|
||||
# verbose = 1, print evaluation metric
|
||||
bst <- xgboost(
|
||||
data = dtrain
|
||||
, max_depth = 2
|
||||
, eta = 1
|
||||
, nthread = 2
|
||||
, nrounds = 2
|
||||
, objective = "binary:logistic"
|
||||
, verbose = 1
|
||||
)
|
||||
bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", verbose = 1)
|
||||
```
|
||||
|
||||
```{r trainingVerbose2, message=T, warning=F}
|
||||
# verbose = 2, also print information about tree
|
||||
bst <- xgboost(
|
||||
data = dtrain
|
||||
, max_depth = 2
|
||||
, eta = 1
|
||||
, nthread = 2
|
||||
, nrounds = 2
|
||||
, objective = "binary:logistic"
|
||||
, verbose = 2
|
||||
)
|
||||
bst <- xgboost(data = dtrain, max_depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", verbose = 2)
|
||||
```
|
||||
|
||||
## Basic prediction using XGBoost
|
||||
@@ -314,8 +267,8 @@ Most of the features below have been implemented to help you to improve your mod
|
||||
For the following advanced features, we need to put data in `xgb.DMatrix` as explained above.
|
||||
|
||||
```{r DMatrix, message=F, warning=F}
|
||||
dtrain <- xgb.DMatrix(data = train$data, label = train$label)
|
||||
dtest <- xgb.DMatrix(data = test$data, label = test$label)
|
||||
dtrain <- xgb.DMatrix(data = train$data, label=train$label)
|
||||
dtest <- xgb.DMatrix(data = test$data, label=test$label)
|
||||
```
|
||||
|
||||
### Measure learning progress with xgb.train
|
||||
@@ -332,17 +285,9 @@ One way to measure progress in learning of a model is to provide to **XGBoost**
|
||||
For the purpose of this example, we use `watchlist` parameter. It is a list of `xgb.DMatrix`, each of them tagged with a name.
|
||||
|
||||
```{r watchlist, message=F, warning=F}
|
||||
watchlist <- list(train = dtrain, test = dtest)
|
||||
watchlist <- list(train=dtrain, test=dtest)
|
||||
|
||||
bst <- xgb.train(
|
||||
data = dtrain
|
||||
, max_depth = 2
|
||||
, eta = 1
|
||||
, nthread = 2
|
||||
, nrounds = 2
|
||||
, watchlist = watchlist
|
||||
, objective = "binary:logistic"
|
||||
)
|
||||
bst <- xgb.train(data=dtrain, max_depth=2, eta=1, nthread = 2, nrounds=2, watchlist=watchlist, objective = "binary:logistic")
|
||||
```
|
||||
|
||||
**XGBoost** has computed at each round the same average error metric than seen above (we set `nrounds` to 2, that is why we have two lines). Obviously, the `train-error` number is related to the training dataset (the one the algorithm learns from) and the `test-error` number to the test dataset.
|
||||
@@ -354,17 +299,7 @@ If with your own dataset you have not such results, you should think about how y
|
||||
For a better understanding of the learning progression, you may want to have some specific metric or even use multiple evaluation metrics.
|
||||
|
||||
```{r watchlist2, message=F, warning=F}
|
||||
bst <- xgb.train(
|
||||
data = dtrain
|
||||
, max_depth = 2
|
||||
, eta = 1
|
||||
, nthread = 2
|
||||
, nrounds = 2
|
||||
, watchlist = watchlist
|
||||
, eval_metric = "error"
|
||||
, eval_metric = "logloss"
|
||||
, objective = "binary:logistic"
|
||||
)
|
||||
bst <- xgb.train(data=dtrain, max_depth=2, eta=1, nthread = 2, nrounds=2, watchlist=watchlist, eval_metric = "error", eval_metric = "logloss", objective = "binary:logistic")
|
||||
```
|
||||
|
||||
> `eval_metric` allows us to monitor two new metrics for each round, `logloss` and `error`.
|
||||
@@ -375,17 +310,7 @@ bst <- xgb.train(
|
||||
Until now, all the learnings we have performed were based on boosting trees. **XGBoost** implements a second algorithm, based on linear boosting. The only difference with previous command is `booster = "gblinear"` parameter (and removing `eta` parameter).
|
||||
|
||||
```{r linearBoosting, message=F, warning=F}
|
||||
bst <- xgb.train(
|
||||
data = dtrain
|
||||
, booster = "gblinear"
|
||||
, max_depth = 2
|
||||
, nthread = 2
|
||||
, nrounds = 2
|
||||
, watchlist = watchlist
|
||||
, eval_metric = "error"
|
||||
, eval_metric = "logloss"
|
||||
, objective = "binary:logistic"
|
||||
)
|
||||
bst <- xgb.train(data=dtrain, booster = "gblinear", max_depth=2, nthread = 2, nrounds=2, watchlist=watchlist, eval_metric = "error", eval_metric = "logloss", objective = "binary:logistic")
|
||||
```
|
||||
|
||||
In this specific case, *linear boosting* gets slightly better performance metrics than decision trees based algorithm.
|
||||
@@ -403,15 +328,7 @@ Like saving models, `xgb.DMatrix` object (which groups both dataset and outcome)
|
||||
xgb.DMatrix.save(dtrain, "dtrain.buffer")
|
||||
# to load it in, simply call xgb.DMatrix
|
||||
dtrain2 <- xgb.DMatrix("dtrain.buffer")
|
||||
bst <- xgb.train(
|
||||
data = dtrain2
|
||||
, max_depth = 2
|
||||
, eta = 1
|
||||
, nthread = 2
|
||||
, nrounds = 2
|
||||
, watchlist = watchlist
|
||||
, objective = "binary:logistic"
|
||||
)
|
||||
bst <- xgb.train(data=dtrain2, max_depth=2, eta=1, nthread = 2, nrounds=2, watchlist=watchlist, objective = "binary:logistic")
|
||||
```
|
||||
|
||||
```{r DMatrixDel, include=FALSE}
|
||||
@@ -423,9 +340,9 @@ file.remove("dtrain.buffer")
|
||||
Information can be extracted from `xgb.DMatrix` using `getinfo` function. Hereafter we will extract `label` data.
|
||||
|
||||
```{r getinfo, message=F, warning=F}
|
||||
label <- getinfo(dtest, "label")
|
||||
label = getinfo(dtest, "label")
|
||||
pred <- predict(bst, dtest)
|
||||
err <- as.numeric(sum(as.integer(pred > 0.5) != label)) / length(label)
|
||||
err <- as.numeric(sum(as.integer(pred > 0.5) != label))/length(label)
|
||||
print(paste("test-error=", err))
|
||||
```
|
||||
|
||||
@@ -479,7 +396,7 @@ bst2 <- xgb.load("xgboost.model")
|
||||
pred2 <- predict(bst2, test$data)
|
||||
|
||||
# And now the test
|
||||
print(paste("sum(abs(pred2-pred))=", sum(abs(pred2 - pred))))
|
||||
print(paste("sum(abs(pred2-pred))=", sum(abs(pred2-pred))))
|
||||
```
|
||||
|
||||
```{r clean, include=FALSE}
|
||||
@@ -503,7 +420,7 @@ bst3 <- xgb.load(rawVec)
|
||||
pred3 <- predict(bst3, test$data)
|
||||
|
||||
# pred2 should be identical to pred
|
||||
print(paste("sum(abs(pred3-pred))=", sum(abs(pred2 - pred))))
|
||||
print(paste("sum(abs(pred3-pred))=", sum(abs(pred2-pred))))
|
||||
```
|
||||
|
||||
> Again `0`? It seems that `XGBoost` works pretty well!
|
||||
|
||||
@@ -30,7 +30,7 @@ For the purpose of this tutorial we will load the xgboost, jsonlite, and float p
|
||||
require(xgboost)
|
||||
require(jsonlite)
|
||||
require(float)
|
||||
options(digits = 22)
|
||||
options(digits=22)
|
||||
```
|
||||
|
||||
We will create a toy binary logistic model based on the example first provided [here](https://github.com/dmlc/xgboost/issues/3960), so that we can easily understand the structure of the dumped JSON model object. This will allow us to understand where discrepancies can occur and how they should be handled.
|
||||
@@ -50,10 +50,10 @@ labels <- c(1, 1, 1,
|
||||
0, 0, 0,
|
||||
0, 0, 0)
|
||||
|
||||
data <- data.frame(dates = dates, labels = labels)
|
||||
data <- data.frame(dates = dates, labels=labels)
|
||||
|
||||
bst <- xgboost(
|
||||
data = as.matrix(data$dates),
|
||||
data = as.matrix(data$dates),
|
||||
label = labels,
|
||||
nthread = 2,
|
||||
nrounds = 1,
|
||||
@@ -69,7 +69,7 @@ We will now dump the model to JSON and attempt to illustrate a variety of issues
|
||||
First let's dump the model to JSON:
|
||||
|
||||
```{r}
|
||||
bst_json <- xgb.dump(bst, with_stats = FALSE, dump_format = 'json')
|
||||
bst_json <- xgb.dump(bst, with_stats = FALSE, dump_format='json')
|
||||
bst_from_json <- fromJSON(bst_json, simplifyDataFrame = FALSE)
|
||||
node <- bst_from_json[[1]]
|
||||
cat(bst_json)
|
||||
@@ -78,10 +78,10 @@ cat(bst_json)
|
||||
The tree JSON shown by the above code-chunk tells us that if the data is less than 20180132, the tree will output the value in the first leaf. Otherwise it will output the value in the second leaf. Let's try to reproduce this manually with the data we have and confirm that it matches the model predictions we've already calculated.
|
||||
|
||||
```{r}
|
||||
bst_preds_logodds <- predict(bst, as.matrix(data$dates), outputmargin = TRUE)
|
||||
bst_preds_logodds <- predict(bst,as.matrix(data$dates), outputmargin = TRUE)
|
||||
|
||||
# calculate the logodds values using the JSON representation
|
||||
bst_from_json_logodds <- ifelse(data$dates < node$split_condition,
|
||||
bst_from_json_logodds <- ifelse(data$dates<node$split_condition,
|
||||
node$children[[1]]$leaf,
|
||||
node$children[[2]]$leaf)
|
||||
|
||||
@@ -106,19 +106,19 @@ At this stage two things happened:
|
||||
To explain this, let's repeat the comparison and round to two decimals:
|
||||
|
||||
```{r}
|
||||
round(bst_preds_logodds, 2) == round(bst_from_json_logodds, 2)
|
||||
round(bst_preds_logodds,2) == round(bst_from_json_logodds,2)
|
||||
```
|
||||
|
||||
If we round to two decimals, we see that only the elements related to data values of `20180131` don't agree. If we convert the data to floats, they agree:
|
||||
|
||||
```{r}
|
||||
# now convert the dates to floats first
|
||||
bst_from_json_logodds <- ifelse(fl(data$dates) < node$split_condition,
|
||||
bst_from_json_logodds <- ifelse(fl(data$dates)<node$split_condition,
|
||||
node$children[[1]]$leaf,
|
||||
node$children[[2]]$leaf)
|
||||
|
||||
# test that values are equal
|
||||
round(bst_preds_logodds, 2) == round(bst_from_json_logodds, 2)
|
||||
round(bst_preds_logodds,2) == round(bst_from_json_logodds,2)
|
||||
```
|
||||
|
||||
What's the lesson? If we are going to work with an imported JSON model, any data must be converted to floats first. In this case, since '20180131' cannot be represented as a 32-bit float, it is rounded up to 20180132, as shown here:
|
||||
@@ -143,7 +143,7 @@ None are exactly equal. What happened? Although we've converted the data to 32
|
||||
|
||||
```{r}
|
||||
# now convert the dates to floats first
|
||||
bst_from_json_logodds <- ifelse(fl(data$dates) < fl(node$split_condition),
|
||||
bst_from_json_logodds <- ifelse(fl(data$dates)<fl(node$split_condition),
|
||||
as.numeric(fl(node$children[[1]]$leaf)),
|
||||
as.numeric(fl(node$children[[2]]$leaf)))
|
||||
|
||||
@@ -160,13 +160,12 @@ We were able to get the log-odds to agree, so now let's manually calculate the s
|
||||
|
||||
|
||||
```{r}
|
||||
bst_preds <- predict(bst, as.matrix(data$dates))
|
||||
bst_preds <- predict(bst,as.matrix(data$dates))
|
||||
|
||||
# calculate the predictions casting doubles to floats
|
||||
bst_from_json_preds <- ifelse(
|
||||
fl(data$dates) < fl(node$split_condition)
|
||||
, as.numeric(1 / (1 + exp(-1 * fl(node$children[[1]]$leaf))))
|
||||
, as.numeric(1 / (1 + exp(-1 * fl(node$children[[2]]$leaf))))
|
||||
bst_from_json_preds <- ifelse(fl(data$dates)<fl(node$split_condition),
|
||||
as.numeric(1/(1+exp(-1*fl(node$children[[1]]$leaf)))),
|
||||
as.numeric(1/(1+exp(-1*fl(node$children[[2]]$leaf))))
|
||||
)
|
||||
|
||||
# test that values are equal
|
||||
@@ -178,10 +177,9 @@ None are exactly equal again. What is going on here? Well, since we are using
|
||||
How do we fix this? We have to ensure we use the correct data types everywhere and the correct operators. If we use only floats, the float library that we have loaded will ensure the 32-bit float exponentiation operator is applied.
|
||||
```{r}
|
||||
# calculate the predictions casting doubles to floats
|
||||
bst_from_json_preds <- ifelse(
|
||||
fl(data$dates) < fl(node$split_condition)
|
||||
, as.numeric(fl(1) / (fl(1) + exp(fl(-1) * fl(node$children[[1]]$leaf))))
|
||||
, as.numeric(fl(1) / (fl(1) + exp(fl(-1) * fl(node$children[[2]]$leaf))))
|
||||
bst_from_json_preds <- ifelse(fl(data$dates)<fl(node$split_condition),
|
||||
as.numeric(fl(1)/(fl(1)+exp(fl(-1)*fl(node$children[[1]]$leaf)))),
|
||||
as.numeric(fl(1)/(fl(1)+exp(fl(-1)*fl(node$children[[2]]$leaf))))
|
||||
)
|
||||
|
||||
# test that values are equal
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
<img src="https://xgboost.ai/images/logo/xgboost-logo.svg" width=135/> eXtreme Gradient Boosting
|
||||
<img src=https://raw.githubusercontent.com/dmlc/dmlc.github.io/master/img/logo-m/xgboost.png width=135/> eXtreme Gradient Boosting
|
||||
===========
|
||||
[](https://buildkite.com/xgboost/xgboost-ci)
|
||||
[](https://xgboost-ci.net/blue/organizations/jenkins/xgboost/activity)
|
||||
[](https://travis-ci.org/dmlc/xgboost)
|
||||
[](https://github.com/dmlc/xgboost/actions)
|
||||
[](https://xgboost.readthedocs.org)
|
||||
[](./LICENSE)
|
||||
@@ -20,7 +21,7 @@
|
||||
XGBoost is an optimized distributed gradient boosting library designed to be highly ***efficient***, ***flexible*** and ***portable***.
|
||||
It implements machine learning algorithms under the [Gradient Boosting](https://en.wikipedia.org/wiki/Gradient_boosting) framework.
|
||||
XGBoost provides a parallel tree boosting (also known as GBDT, GBM) that solve many data science problems in a fast and accurate way.
|
||||
The same code runs on major distributed environment (Kubernetes, Hadoop, SGE, Dask, Spark, PySpark) and can solve problems beyond billions of examples.
|
||||
The same code runs on major distributed environment (Kubernetes, Hadoop, SGE, MPI, Dask) and can solve problems beyond billions of examples.
|
||||
|
||||
License
|
||||
-------
|
||||
@@ -48,6 +49,7 @@ Become a sponsor and get a logo here. See details at [Sponsoring the XGBoost Pro
|
||||
|
||||
<a href="https://www.nvidia.com/en-us/" target="_blank"><img src="https://raw.githubusercontent.com/xgboost-ai/xgboost-ai.github.io/master/images/sponsors/nvidia.jpg" alt="NVIDIA" width="72" height="72"></a>
|
||||
<a href="https://www.intel.com/" target="_blank"><img src="https://images.opencollective.com/intel-corporation/2fa85c1/logo/256.png" width="72" height="72"></a>
|
||||
<a href="https://getkoffie.com/?utm_source=opencollective&utm_medium=github&utm_campaign=xgboost" target="_blank"><img src="https://images.opencollective.com/koffielabs/f391ab8/logo/256.png" width="72" height="72"></a>
|
||||
|
||||
### Backers
|
||||
[[Become a backer](https://opencollective.com/xgboost#backer)]
|
||||
|
||||
1
cmake/Python_version.in
Normal file
1
cmake/Python_version.in
Normal file
@@ -0,0 +1 @@
|
||||
@xgboost_VERSION_MAJOR@.@xgboost_VERSION_MINOR@.@xgboost_VERSION_PATCH@
|
||||
@@ -8,6 +8,9 @@ macro(enable_sanitizer sanitizer)
|
||||
if(${sanitizer} MATCHES "address")
|
||||
find_package(ASan)
|
||||
set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=address")
|
||||
if (ASan_FOUND)
|
||||
link_libraries(${ASan_LIBRARY})
|
||||
endif (ASan_FOUND)
|
||||
|
||||
elseif(${sanitizer} MATCHES "thread")
|
||||
find_package(TSan)
|
||||
@@ -19,10 +22,16 @@ macro(enable_sanitizer sanitizer)
|
||||
elseif(${sanitizer} MATCHES "leak")
|
||||
find_package(LSan)
|
||||
set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=leak")
|
||||
if (LSan_FOUND)
|
||||
link_libraries(${LSan_LIBRARY})
|
||||
endif (LSan_FOUND)
|
||||
|
||||
elseif(${sanitizer} MATCHES "undefined")
|
||||
find_package(UBSan)
|
||||
set(SAN_COMPILE_FLAGS "${SAN_COMPILE_FLAGS} -fsanitize=undefined -fno-sanitize-recover=undefined")
|
||||
if (UBSan_FOUND)
|
||||
link_libraries(${UBSan_LIBRARY})
|
||||
endif (UBSan_FOUND)
|
||||
|
||||
else()
|
||||
message(FATAL_ERROR "Santizer ${sanitizer} not supported.")
|
||||
|
||||
@@ -90,8 +90,8 @@ function(format_gencode_flags flags out)
|
||||
endif()
|
||||
# Set up architecture flags
|
||||
if(NOT flags)
|
||||
if (CUDA_VERSION VERSION_GREATER_EQUAL "11.8")
|
||||
set(flags "50;60;70;80;90")
|
||||
if (CUDA_VERSION VERSION_GREATER_EQUAL "11.1")
|
||||
set(flags "50;60;70;80")
|
||||
elseif (CUDA_VERSION VERSION_GREATER_EQUAL "11.0")
|
||||
set(flags "50;60;70;80")
|
||||
elseif(CUDA_VERSION VERSION_GREATER_EQUAL "10.0")
|
||||
@@ -124,6 +124,13 @@ function(format_gencode_flags flags out)
|
||||
endif (CMAKE_VERSION VERSION_GREATER_EQUAL "3.18")
|
||||
endfunction(format_gencode_flags flags)
|
||||
|
||||
macro(enable_nvtx target)
|
||||
find_package(NVTX REQUIRED)
|
||||
target_include_directories(${target} PRIVATE "${NVTX_INCLUDE_DIR}")
|
||||
target_link_libraries(${target} PRIVATE "${NVTX_LIBRARY}")
|
||||
target_compile_definitions(${target} PRIVATE -DXGBOOST_USE_NVTX=1)
|
||||
endmacro()
|
||||
|
||||
# Set CUDA related flags to target. Must be used after code `format_gencode_flags`.
|
||||
function(xgboost_set_cuda_flags target)
|
||||
target_compile_options(${target} PRIVATE
|
||||
@@ -133,11 +140,6 @@ function(xgboost_set_cuda_flags target)
|
||||
$<$<COMPILE_LANGUAGE:CUDA>:-Xcompiler=${OpenMP_CXX_FLAGS}>
|
||||
$<$<COMPILE_LANGUAGE:CUDA>:-Xfatbin=-compress-all>)
|
||||
|
||||
if (USE_PER_THREAD_DEFAULT_STREAM)
|
||||
target_compile_options(${target} PRIVATE
|
||||
$<$<COMPILE_LANGUAGE:CUDA>:--default-stream per-thread>)
|
||||
endif (USE_PER_THREAD_DEFAULT_STREAM)
|
||||
|
||||
if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.18")
|
||||
set_property(TARGET ${target} PROPERTY CUDA_ARCHITECTURES ${CMAKE_CUDA_ARCHITECTURES})
|
||||
endif (CMAKE_VERSION VERSION_GREATER_EQUAL "3.18")
|
||||
@@ -160,14 +162,16 @@ function(xgboost_set_cuda_flags target)
|
||||
endif (USE_DEVICE_DEBUG)
|
||||
|
||||
if (USE_NVTX)
|
||||
target_compile_definitions(${target} PRIVATE -DXGBOOST_USE_NVTX=1)
|
||||
enable_nvtx(${target})
|
||||
endif (USE_NVTX)
|
||||
|
||||
target_compile_definitions(${target} PRIVATE -DXGBOOST_USE_CUDA=1)
|
||||
target_include_directories(
|
||||
${target} PRIVATE
|
||||
${xgboost_SOURCE_DIR}/gputreeshap
|
||||
${CUDAToolkit_INCLUDE_DIRS})
|
||||
if (NOT BUILD_WITH_CUDA_CUB)
|
||||
target_compile_definitions(${target} PRIVATE -DXGBOOST_USE_CUDA=1 -DTHRUST_IGNORE_CUB_VERSION_CHECK=1)
|
||||
target_include_directories(${target} PRIVATE ${xgboost_SOURCE_DIR}/cub/ ${xgboost_SOURCE_DIR}/gputreeshap)
|
||||
else ()
|
||||
target_compile_definitions(${target} PRIVATE -DXGBOOST_USE_CUDA=1)
|
||||
target_include_directories(${target} PRIVATE ${xgboost_SOURCE_DIR}/gputreeshap)
|
||||
endif (NOT BUILD_WITH_CUDA_CUB)
|
||||
|
||||
if (MSVC)
|
||||
target_compile_options(${target} PRIVATE
|
||||
@@ -177,27 +181,9 @@ function(xgboost_set_cuda_flags target)
|
||||
set_target_properties(${target} PROPERTIES
|
||||
CUDA_STANDARD 17
|
||||
CUDA_STANDARD_REQUIRED ON
|
||||
CUDA_SEPARABLE_COMPILATION OFF
|
||||
CUDA_RUNTIME_LIBRARY Static)
|
||||
CUDA_SEPARABLE_COMPILATION OFF)
|
||||
endfunction(xgboost_set_cuda_flags)
|
||||
|
||||
# Set HIP related flags to target.
|
||||
function(xgboost_set_hip_flags target)
|
||||
if (USE_DEVICE_DEBUG)
|
||||
target_compile_options(${target} PRIVATE
|
||||
$<$<AND:$<CONFIG:DEBUG>,$<COMPILE_LANGUAGE:HIP>>:-G>)
|
||||
endif (USE_DEVICE_DEBUG)
|
||||
|
||||
target_compile_definitions(${target} PRIVATE -DXGBOOST_USE_HIP=1)
|
||||
target_include_directories(${target} PRIVATE ${xgboost_SOURCE_DIR}/rocgputreeshap)
|
||||
target_include_directories(${target} PRIVATE ${xgboost_SOURCE_DIR}/warp-primitives/include)
|
||||
|
||||
set_target_properties(${target} PROPERTIES
|
||||
HIP_STANDARD 17
|
||||
HIP_STANDARD_REQUIRED ON
|
||||
HIP_SEPARABLE_COMPILATION OFF)
|
||||
endfunction(xgboost_set_hip_flags)
|
||||
|
||||
macro(xgboost_link_nccl target)
|
||||
if (BUILD_STATIC_LIB)
|
||||
target_include_directories(${target} PUBLIC ${NCCL_INCLUDE_DIR})
|
||||
@@ -210,20 +196,6 @@ macro(xgboost_link_nccl target)
|
||||
endif (BUILD_STATIC_LIB)
|
||||
endmacro(xgboost_link_nccl)
|
||||
|
||||
macro(xgboost_link_rccl target)
|
||||
if(BUILD_STATIC_LIB)
|
||||
target_include_directories(${target} PUBLIC ${RCCL_INCLUDE_DIR}/rccl)
|
||||
target_compile_definitions(${target} PUBLIC -DXGBOOST_USE_RCCL=1)
|
||||
target_link_directories(${target} PUBLIC ${HIP_LIB_INSTALL_DIR})
|
||||
target_link_libraries(${target} PUBLIC ${RCCL_LIBRARY})
|
||||
else()
|
||||
target_include_directories(${target} PRIVATE ${RCCL_INCLUDE_DIR}/rccl)
|
||||
target_compile_definitions(${target} PRIVATE -DXGBOOST_USE_RCCL=1)
|
||||
target_link_directories(${target} PUBLIC ${HIP_LIB_INSTALL_DIR})
|
||||
target_link_libraries(${target} PRIVATE ${RCCL_LIBRARY})
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
# compile options
|
||||
macro(xgboost_target_properties target)
|
||||
set_target_properties(${target} PROPERTIES
|
||||
@@ -246,10 +218,6 @@ macro(xgboost_target_properties target)
|
||||
-Xcompiler=-Wall -Xcompiler=-Wextra -Xcompiler=-Wno-expansion-to-defined,
|
||||
-Wall -Wextra -Wno-expansion-to-defined>
|
||||
)
|
||||
target_compile_options(${target} PUBLIC
|
||||
$<IF:$<COMPILE_LANGUAGE:HIP>,
|
||||
-Wall -Wextra >
|
||||
)
|
||||
endif(ENABLE_ALL_WARNINGS)
|
||||
|
||||
target_compile_options(${target}
|
||||
@@ -315,13 +283,8 @@ macro(xgboost_target_link_libraries target)
|
||||
|
||||
if (USE_CUDA)
|
||||
xgboost_set_cuda_flags(${target})
|
||||
target_link_libraries(${target} PUBLIC CUDA::cudart_static)
|
||||
endif (USE_CUDA)
|
||||
|
||||
if (USE_HIP)
|
||||
xgboost_set_hip_flags(${target})
|
||||
endif (USE_HIP)
|
||||
|
||||
if (PLUGIN_RMM)
|
||||
target_link_libraries(${target} PRIVATE rmm::rmm)
|
||||
endif (PLUGIN_RMM)
|
||||
@@ -330,12 +293,8 @@ macro(xgboost_target_link_libraries target)
|
||||
xgboost_link_nccl(${target})
|
||||
endif (USE_NCCL)
|
||||
|
||||
if(USE_RCCL)
|
||||
xgboost_link_rccl(${target})
|
||||
endif()
|
||||
|
||||
if (USE_NVTX)
|
||||
target_link_libraries(${target} PRIVATE CUDA::nvToolsExt)
|
||||
enable_nvtx(${target})
|
||||
endif (USE_NVTX)
|
||||
|
||||
if (RABIT_BUILD_MPI)
|
||||
|
||||
@@ -3,4 +3,7 @@ function (write_version)
|
||||
configure_file(
|
||||
${xgboost_SOURCE_DIR}/cmake/version_config.h.in
|
||||
${xgboost_SOURCE_DIR}/include/xgboost/version_config.h @ONLY)
|
||||
configure_file(
|
||||
${xgboost_SOURCE_DIR}/cmake/Python_version.in
|
||||
${xgboost_SOURCE_DIR}/python-package/xgboost/VERSION @ONLY)
|
||||
endfunction (write_version)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
set(ASan_LIB_NAME ASan)
|
||||
|
||||
find_library(ASan_LIBRARY
|
||||
NAMES libasan.so libasan.so.6 libasan.so.5 libasan.so.4 libasan.so.3 libasan.so.2 libasan.so.1 libasan.so.0
|
||||
NAMES libasan.so libasan.so.5 libasan.so.4 libasan.so.3 libasan.so.2 libasan.so.1 libasan.so.0
|
||||
PATHS ${SANITIZER_PATH} /usr/lib64 /usr/lib /usr/local/lib64 /usr/local/lib ${CMAKE_PREFIX_PATH}/lib)
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
|
||||
26
cmake/modules/FindNVTX.cmake
Normal file
26
cmake/modules/FindNVTX.cmake
Normal file
@@ -0,0 +1,26 @@
|
||||
if (NVTX_LIBRARY)
|
||||
unset(NVTX_LIBRARY CACHE)
|
||||
endif (NVTX_LIBRARY)
|
||||
|
||||
set(NVTX_LIB_NAME nvToolsExt)
|
||||
|
||||
|
||||
find_path(NVTX_INCLUDE_DIR
|
||||
NAMES nvToolsExt.h
|
||||
PATHS ${CUDA_HOME}/include ${CUDA_INCLUDE} /usr/local/cuda/include)
|
||||
|
||||
|
||||
find_library(NVTX_LIBRARY
|
||||
NAMES nvToolsExt
|
||||
PATHS ${CUDA_HOME}/lib64 /usr/local/cuda/lib64)
|
||||
|
||||
message(STATUS "Using nvtx library: ${NVTX_LIBRARY}")
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(NVTX DEFAULT_MSG
|
||||
NVTX_INCLUDE_DIR NVTX_LIBRARY)
|
||||
|
||||
mark_as_advanced(
|
||||
NVTX_INCLUDE_DIR
|
||||
NVTX_LIBRARY
|
||||
)
|
||||
@@ -52,11 +52,11 @@ endif (BUILD_WITH_SHARED_NCCL)
|
||||
|
||||
find_path(NCCL_INCLUDE_DIR
|
||||
NAMES nccl.h
|
||||
HINTS ${NCCL_ROOT}/include $ENV{NCCL_ROOT}/include)
|
||||
PATHS $ENV{NCCL_ROOT}/include ${NCCL_ROOT}/include)
|
||||
|
||||
find_library(NCCL_LIBRARY
|
||||
NAMES ${NCCL_LIB_NAME}
|
||||
HINTS ${NCCL_ROOT}/lib $ENV{NCCL_ROOT}/lib/)
|
||||
PATHS $ENV{NCCL_ROOT}/lib/ ${NCCL_ROOT}/lib)
|
||||
|
||||
message(STATUS "Using nccl library: ${NCCL_LIBRARY}")
|
||||
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
/**
|
||||
* Copyright 2019-2023 by XGBoost contributors
|
||||
/*!
|
||||
* Copyright 2019 XGBoost contributors
|
||||
*/
|
||||
#ifndef XGBOOST_VERSION_CONFIG_H_
|
||||
#define XGBOOST_VERSION_CONFIG_H_
|
||||
|
||||
#define XGBOOST_VER_MAJOR @xgboost_VERSION_MAJOR@ /* NOLINT */
|
||||
#define XGBOOST_VER_MINOR @xgboost_VERSION_MINOR@ /* NOLINT */
|
||||
#define XGBOOST_VER_PATCH @xgboost_VERSION_PATCH@ /* NOLINT */
|
||||
#define XGBOOST_VER_MAJOR @xgboost_VERSION_MAJOR@
|
||||
#define XGBOOST_VER_MINOR @xgboost_VERSION_MINOR@
|
||||
#define XGBOOST_VER_PATCH @xgboost_VERSION_PATCH@
|
||||
|
||||
#endif // XGBOOST_VERSION_CONFIG_H_
|
||||
|
||||
@@ -3,8 +3,6 @@
|
||||
set(USE_OPENMP @USE_OPENMP@)
|
||||
set(USE_CUDA @USE_CUDA@)
|
||||
set(USE_NCCL @USE_NCCL@)
|
||||
set(USE_HIP @USE_HIP@)
|
||||
set(USE_RCCL @USE_RCCL@)
|
||||
set(XGBOOST_BUILD_STATIC_LIB @BUILD_STATIC_LIB@)
|
||||
|
||||
include(CMakeFindDependencyMacro)
|
||||
@@ -17,9 +15,6 @@ if (XGBOOST_BUILD_STATIC_LIB)
|
||||
if(USE_CUDA)
|
||||
find_dependency(CUDA)
|
||||
endif()
|
||||
if(USE_HIP)
|
||||
find_dependency(HIP)
|
||||
endif()
|
||||
# nccl should be linked statically if xgboost is built as static library.
|
||||
endif (XGBOOST_BUILD_STATIC_LIB)
|
||||
|
||||
|
||||
1
cub
Submodule
1
cub
Submodule
Submodule cub added at af39ee264f
@@ -1,7 +1,7 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import random
|
||||
import sys
|
||||
import random
|
||||
|
||||
if len(sys.argv) < 2:
|
||||
print ('Usage:<filename> <k> [nfold = 5]')
|
||||
|
||||
@@ -20,10 +20,10 @@ num_round = 2
|
||||
# 0 means do not save any model except the final round model
|
||||
save_period = 2
|
||||
# The path of training data
|
||||
data = "agaricus.txt.train?format=libsvm"
|
||||
data = "agaricus.txt.train"
|
||||
# The path of validation data, used to monitor training process, here [test] sets name of the validation set
|
||||
eval[test] = "agaricus.txt.test?format=libsvm"
|
||||
eval[test] = "agaricus.txt.test"
|
||||
# evaluate on training data as well each round
|
||||
eval_train = 1
|
||||
# The path of test data
|
||||
test:data = "agaricus.txt.test?format=libsvm"
|
||||
test:data = "agaricus.txt.test"
|
||||
|
||||
@@ -21,8 +21,8 @@ num_round = 2
|
||||
# 0 means do not save any model except the final round model
|
||||
save_period = 0
|
||||
# The path of training data
|
||||
data = "machine.txt.train?format=libsvm"
|
||||
data = "machine.txt.train"
|
||||
# The path of validation data, used to monitor training process, here [test] sets name of the validation set
|
||||
eval[test] = "machine.txt.test?format=libsvm"
|
||||
eval[test] = "machine.txt.test"
|
||||
# The path of test data
|
||||
test:data = "machine.txt.test?format=libsvm"
|
||||
test:data = "machine.txt.test"
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user