Compare commits

...

18 Commits

Author SHA1 Message Date
Jiaming Yuan
36eb41c960 Bump version to 1.7.6 (#9305) 2023-06-16 03:33:16 +08:00
Jiaming Yuan
39ddf40a8d [backport] Optimize prediction with QuantileDMatrix. (#9096) (#9303) 2023-06-15 23:32:03 +08:00
Jiaming Yuan
573f1c7db4 [backport] Fix monotone constraints on CPU. (#9122) (#9287)
* [backport] Fix monotone constraints on CPU. (#9122)
2023-06-11 17:51:25 +08:00
Jiaming Yuan
abc80d2a6d [backport] Improve doxygen (#8959) (#9284)
* Remove Sphinx build from GH Action

* Build Doxygen as part of RTD build

* Add jQuery

Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>
2023-06-11 13:22:23 +08:00
Jiaming Yuan
e882fb3262 [backport] [spark] Make spark model have the same UID with its estimator (#9022) (#9285)
Signed-off-by: Weichen Xu <weichen.xu@databricks.com>
Co-authored-by: WeichenXu <weichen.xu@databricks.com>
2023-06-11 13:18:23 +08:00
Jiaming Yuan
3218f6cd3c [backport] Disable dense opt for distributed training. (#9272) (#9288) 2023-06-11 11:08:45 +08:00
Jiaming Yuan
a962611de7 Disable SHAP test on 1.7 (#9290) 2023-06-11 02:13:36 +08:00
Jiaming Yuan
14476e8868 [backport] Fix tests with pandas 2.0. (#9014) (#9289)
* Fix tests with pandas 2.0.

- `is_categorical` is replaced by `is_categorical_dtype`.
- one hot encoding returns boolean type instead of integer type.
2023-06-11 00:52:44 +08:00
Jiaming Yuan
03f3879b71 [backport] [doc] fix the cudf installation [skip ci] (#9106) (#9286)
Co-authored-by: Bobby Wang <wbo4958@gmail.com>
2023-06-10 04:09:27 +08:00
Jiaming Yuan
21d95f3d8f [backport] [doc][R] Update link. (#8998) (#9001) 2023-03-30 20:02:31 +08:00
Jiaming Yuan
5cd4015d70 [backport] Fill column size. (#8997) 2023-03-30 15:21:42 +08:00
Jiaming Yuan
b8c6b86792 Bump version to 1.7.5. (#8994) 2023-03-29 21:41:10 +08:00
Jiaming Yuan
1baebe231b [backport] [CI] Fix Windows wheel to be compatible with Poetry (#8991) (#8992)
* [CI] Fix Windows wheel to be compatible with Poetry

Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>
2023-03-29 14:26:20 +08:00
Jiaming Yuan
365da0b8f4 [backport] [doc] Add missing document for pyspark ranker. (#8692) (#8990) 2023-03-29 12:02:51 +08:00
Jiaming Yuan
f5f03dfb61 [backport] Update dmlc-core to get C++17 deprecation warning (#8855) (#8982)
Co-authored-by: Rong Ou <rong.ou@gmail.com>
2023-03-27 21:31:30 +08:00
Jiaming Yuan
a1c209182d [backport] Update c++ requirement to 17 for the R package. (#8860) (#8983) 2023-03-27 18:24:25 +08:00
Jiaming Yuan
4be75d852c [backport] Fix scope of feature set pointers (#8850) (#8972)
---------

Co-authored-by: Rory Mitchell <r.a.mitchell.nz@gmail.com>
2023-03-27 00:33:08 +08:00
Jiaming Yuan
ba50e6eb62 [backport] [CI] Require C++17 + CMake 3.18; Use CUDA 11.8 in CI (#8853) (#8971)
Co-authored-by: Philip Hyunsu Cho <chohyu01@cs.washington.edu>
2023-03-26 00:10:03 +08:00
78 changed files with 669 additions and 449 deletions

View File

@@ -148,66 +148,13 @@ jobs:
run: |
LINT_LANG=cpp make lint
doxygen:
runs-on: ubuntu-latest
name: Generate C/C++ API doc using Doxygen
steps:
- uses: actions/checkout@v2
with:
submodules: 'true'
- uses: actions/setup-python@v2
with:
python-version: "3.8"
architecture: 'x64'
- name: Install system packages
run: |
sudo apt-get install -y --no-install-recommends doxygen graphviz ninja-build
python -m pip install wheel setuptools
python -m pip install awscli
- name: Run Doxygen
run: |
mkdir build
cd build
cmake .. -DBUILD_C_DOC=ON -GNinja
ninja -v doc_doxygen
- name: Extract branch name
shell: bash
run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})"
id: extract_branch
if: github.ref == 'refs/heads/master' || contains(github.ref, 'refs/heads/release_')
- name: Publish
run: |
cd build/
tar cvjf ${{ steps.extract_branch.outputs.branch }}.tar.bz2 doc_doxygen/
python -m awscli s3 cp ./${{ steps.extract_branch.outputs.branch }}.tar.bz2 s3://xgboost-docs/doxygen/ --acl public-read
if: github.ref == 'refs/heads/master' || contains(github.ref, 'refs/heads/release_')
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID_IAM_S3_UPLOADER }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY_IAM_S3_UPLOADER }}
sphinx:
runs-on: ubuntu-latest
name: Build docs using Sphinx
steps:
- uses: actions/checkout@v2
with:
submodules: 'true'
- uses: actions/setup-python@v2
with:
python-version: "3.8"
architecture: 'x64'
- name: Install system packages
run: |
sudo apt-get install -y --no-install-recommends graphviz
python -m pip install wheel setuptools
python -m pip install -r doc/requirements.txt
- name: Extract branch name
shell: bash
run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})"
id: extract_branch
if: github.ref == 'refs/heads/master' || contains(github.ref, 'refs/heads/release_')
- name: Run Sphinx
run: |
make -C doc html
env:
SPHINX_GIT_BRANCH: ${{ steps.extract_branch.outputs.branch }}
python3 dmlc-core/scripts/lint.py --exclude_path \
python-package/xgboost/dmlc-core \
python-package/xgboost/include \
python-package/xgboost/lib \
python-package/xgboost/rabit \
python-package/xgboost/src \
--pylint-rc python-package/.pylintrc \
xgboost \
cpp \
include src python-package

View File

@@ -1,5 +1,5 @@
cmake_minimum_required(VERSION 3.14 FATAL_ERROR)
project(xgboost LANGUAGES CXX C VERSION 1.7.4)
cmake_minimum_required(VERSION 3.18 FATAL_ERROR)
project(xgboost LANGUAGES CXX C VERSION 1.7.6)
include(cmake/Utils.cmake)
list(APPEND CMAKE_MODULE_PATH "${xgboost_SOURCE_DIR}/cmake/modules")
cmake_policy(SET CMP0022 NEW)
@@ -168,9 +168,6 @@ find_package(Threads REQUIRED)
if (USE_OPENMP)
if (APPLE)
# Require CMake 3.16+ on Mac OSX, as previous versions of CMake had trouble locating
# OpenMP on Mac. See https://github.com/dmlc/xgboost/pull/5146#issuecomment-568312706
cmake_minimum_required(VERSION 3.16)
find_package(OpenMP)
if (NOT OpenMP_FOUND)
# Try again with extra path info; required for libomp 15+ from Homebrew

View File

@@ -31,7 +31,7 @@ if (USE_OPENMP)
endif (USE_OPENMP)
set_target_properties(
xgboost-r PROPERTIES
CXX_STANDARD 14
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON)

View File

@@ -1,8 +1,8 @@
Package: xgboost
Type: Package
Title: Extreme Gradient Boosting
Version: 1.7.4.1
Date: 2023-02-15
Version: 1.7.6.1
Date: 2023-06-16
Authors@R: c(
person("Tianqi", "Chen", role = c("aut"),
email = "tianqi.tchen@gmail.com"),
@@ -66,5 +66,6 @@ Imports:
methods,
data.table (>= 1.9.6),
jsonlite (>= 1.0),
RoxygenNote: 7.2.2
SystemRequirements: GNU make, C++14
RoxygenNote: 7.2.3
Encoding: UTF-8
SystemRequirements: GNU make, C++17

View File

@@ -1,9 +1,9 @@
Copyright (c) 2014 by Tianqi Chen and Contributors
Copyright (c) 2014-2023, Tianqi Chen and XBGoost Contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software

View File

@@ -34,7 +34,7 @@
#' The branches that also used for missing values are marked as bold
#' (as in "carrying extra capacity").
#'
#' This function uses \href{http://www.graphviz.org/}{GraphViz} as a backend of DiagrammeR.
#' This function uses \href{https://www.graphviz.org/}{GraphViz} as a backend of DiagrammeR.
#'
#' @return
#'

24
R-package/configure vendored
View File

@@ -1,6 +1,6 @@
#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
# Generated by GNU Autoconf 2.71 for xgboost 1.7.4.
# Generated by GNU Autoconf 2.71 for xgboost 1.7.6.
#
#
# Copyright (C) 1992-1996, 1998-2017, 2020-2021 Free Software Foundation,
@@ -607,8 +607,8 @@ MAKEFLAGS=
# Identity of this package.
PACKAGE_NAME='xgboost'
PACKAGE_TARNAME='xgboost'
PACKAGE_VERSION='1.7.4'
PACKAGE_STRING='xgboost 1.7.4'
PACKAGE_VERSION='1.7.6'
PACKAGE_STRING='xgboost 1.7.6'
PACKAGE_BUGREPORT=''
PACKAGE_URL=''
@@ -1225,7 +1225,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
\`configure' configures xgboost 1.7.4 to adapt to many kinds of systems.
\`configure' configures xgboost 1.7.6 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
@@ -1287,7 +1287,7 @@ fi
if test -n "$ac_init_help"; then
case $ac_init_help in
short | recursive ) echo "Configuration of xgboost 1.7.4:";;
short | recursive ) echo "Configuration of xgboost 1.7.6:";;
esac
cat <<\_ACEOF
@@ -1367,7 +1367,7 @@ fi
test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
xgboost configure 1.7.4
xgboost configure 1.7.6
generated by GNU Autoconf 2.71
Copyright (C) 2021 Free Software Foundation, Inc.
@@ -1533,7 +1533,7 @@ cat >config.log <<_ACEOF
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
It was created by xgboost $as_me 1.7.4, which was
It was created by xgboost $as_me 1.7.6, which was
generated by GNU Autoconf 2.71. Invocation command line was
$ $0$ac_configure_args_raw
@@ -2096,9 +2096,9 @@ if test -z "${R_HOME}"; then
exit 1
fi
CXX14=`"${R_HOME}/bin/R" CMD config CXX14`
CXX14STD=`"${R_HOME}/bin/R" CMD config CXX14STD`
CXX="${CXX14} ${CXX14STD}"
CXX17=`"${R_HOME}/bin/R" CMD config CXX17`
CXX17STD=`"${R_HOME}/bin/R" CMD config CXX17STD`
CXX="${CXX17} ${CXX17STD}"
CXXFLAGS=`"${R_HOME}/bin/R" CMD config CXXFLAGS`
CC=`"${R_HOME}/bin/R" CMD config CC`
@@ -3412,7 +3412,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
This file was extended by xgboost $as_me 1.7.4, which was
This file was extended by xgboost $as_me 1.7.6, which was
generated by GNU Autoconf 2.71. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
@@ -3467,7 +3467,7 @@ ac_cs_config_escaped=`printf "%s\n" "$ac_cs_config" | sed "s/^ //; s/'/'\\\\\\\\
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config='$ac_cs_config_escaped'
ac_cs_version="\\
xgboost config.status 1.7.4
xgboost config.status 1.7.6
configured by $0, generated by GNU Autoconf 2.71,
with options \\"\$ac_cs_config\\"

View File

@@ -2,7 +2,7 @@
AC_PREREQ(2.69)
AC_INIT([xgboost],[1.7.4],[],[xgboost],[])
AC_INIT([xgboost],[1.7.6],[],[xgboost],[])
: ${R_HOME=`R RHOME`}
if test -z "${R_HOME}"; then
@@ -10,9 +10,9 @@ if test -z "${R_HOME}"; then
exit 1
fi
CXX14=`"${R_HOME}/bin/R" CMD config CXX14`
CXX14STD=`"${R_HOME}/bin/R" CMD config CXX14STD`
CXX="${CXX14} ${CXX14STD}"
CXX17=`"${R_HOME}/bin/R" CMD config CXX17`
CXX17STD=`"${R_HOME}/bin/R" CMD config CXX17STD`
CXX="${CXX17} ${CXX17STD}"
CXXFLAGS=`"${R_HOME}/bin/R" CMD config CXXFLAGS`
CC=`"${R_HOME}/bin/R" CMD config CC`

View File

@@ -67,7 +67,7 @@ The "Yes" branches are marked by the "< split_value" label.
The branches that also used for missing values are marked as bold
(as in "carrying extra capacity").
This function uses \href{http://www.graphviz.org/}{GraphViz} as a backend of DiagrammeR.
This function uses \href{https://www.graphviz.org/}{GraphViz} as a backend of DiagrammeR.
}
\examples{
data(agaricus.train, package='xgboost')

View File

@@ -3,7 +3,7 @@ PKGROOT=../../
ENABLE_STD_THREAD=1
# _*_ mode: Makefile; _*_
CXX_STD = CXX14
CXX_STD = CXX17
XGB_RFLAGS = -DXGBOOST_STRICT_R_MODE=1 -DDMLC_LOG_BEFORE_THROW=0\
-DDMLC_ENABLE_STD_THREAD=$(ENABLE_STD_THREAD) -DDMLC_DISABLE_STDIN=1\

View File

@@ -3,7 +3,7 @@ PKGROOT=../../
ENABLE_STD_THREAD=0
# _*_ mode: Makefile; _*_
CXX_STD = CXX14
CXX_STD = CXX17
XGB_RFLAGS = -DXGBOOST_STRICT_R_MODE=1 -DDMLC_LOG_BEFORE_THROW=0\
-DDMLC_ENABLE_STD_THREAD=$(ENABLE_STD_THREAD) -DDMLC_DISABLE_STDIN=1\

View File

@@ -178,17 +178,10 @@ function(xgboost_set_cuda_flags target)
$<$<COMPILE_LANGUAGE:CUDA>:-Xcompiler=/utf-8>)
endif (MSVC)
if (PLUGIN_RMM)
set_target_properties(${target} PROPERTIES
CUDA_STANDARD 17
CUDA_STANDARD_REQUIRED ON
CUDA_SEPARABLE_COMPILATION OFF)
else ()
set_target_properties(${target} PROPERTIES
CUDA_STANDARD 14
CUDA_STANDARD_REQUIRED ON
CUDA_SEPARABLE_COMPILATION OFF)
endif (PLUGIN_RMM)
set_target_properties(${target} PROPERTIES
CUDA_STANDARD 17
CUDA_STANDARD_REQUIRED ON
CUDA_SEPARABLE_COMPILATION OFF)
endfunction(xgboost_set_cuda_flags)
macro(xgboost_link_nccl target)
@@ -205,17 +198,10 @@ endmacro(xgboost_link_nccl)
# compile options
macro(xgboost_target_properties target)
if (PLUGIN_RMM)
set_target_properties(${target} PROPERTIES
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON)
else ()
set_target_properties(${target} PROPERTIES
CXX_STANDARD 14
CXX_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON)
endif (PLUGIN_RMM)
set_target_properties(${target} PROPERTIES
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON)
if (HIDE_CXX_SYMBOLS)
#-- Hide all C++ symbols

View File

@@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 3.13)
cmake_minimum_required(VERSION 3.18)
project(xgboost-c-examples)
add_subdirectory(basic)

View File

@@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 3.13)
cmake_minimum_required(VERSION 3.18)
project(external-memory-demo LANGUAGES C VERSION 0.0.1)
find_package(xgboost REQUIRED)

View File

@@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 3.13)
cmake_minimum_required(VERSION 3.18)
project(inference-demo LANGUAGES C VERSION 0.0.1)
find_package(xgboost REQUIRED)

View File

@@ -8,5 +8,5 @@ As a result it's changing quite often and we don't maintain its stability. Alon
plugin system (see ``plugin/example`` in XGBoost's source tree), users can utilize some
existing c++ headers for gaining more access to the internal of XGBoost.
* `C++ interface documentation (latest master branch) <https://xgboost.readthedocs.io/en/latest/dev/files.html>`_
* `C++ interface documentation (latest master branch) <./dev/files.html>`_
* `C++ interface documentation (last stable release) <https://xgboost.readthedocs.io/en/stable/dev/files.html>`_

View File

@@ -10,7 +10,7 @@ simply look at function comments in ``include/xgboost/c_api.h``. The reference i
to sphinx with the help of breathe, which doesn't contain links to examples but might be
easier to read. For the original doxygen pages please visit:
* `C API documentation (latest master branch) <https://xgboost.readthedocs.io/en/latest/dev/c__api_8h.html>`_
* `C API documentation (latest master branch) <./dev/c__api_8h.html>`_
* `C API documentation (last stable release) <https://xgboost.readthedocs.io/en/stable/dev/c__api_8h.html>`_
***************

View File

@@ -11,54 +11,107 @@
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from subprocess import call
from sh.contrib import git
import urllib.request
from urllib.error import HTTPError
import sys
import re
import os
import re
import shutil
import subprocess
import sys
import tarfile
import urllib.request
import warnings
from urllib.error import HTTPError
git_branch = os.getenv('SPHINX_GIT_BRANCH', default=None)
from sh.contrib import git
CURR_PATH = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
PROJECT_ROOT = os.path.normpath(os.path.join(CURR_PATH, os.path.pardir))
TMP_DIR = os.path.join(CURR_PATH, "tmp")
DOX_DIR = "doxygen"
def run_doxygen():
"""Run the doxygen make command in the designated folder."""
curdir = os.path.normpath(os.path.abspath(os.path.curdir))
if os.path.exists(TMP_DIR):
print(f"Delete directory {TMP_DIR}")
shutil.rmtree(TMP_DIR)
else:
print(f"Create directory {TMP_DIR}")
os.mkdir(TMP_DIR)
try:
os.chdir(PROJECT_ROOT)
if not os.path.exists(DOX_DIR):
os.mkdir(DOX_DIR)
os.chdir(os.path.join(PROJECT_ROOT, DOX_DIR))
print(
"Build doxygen at {}".format(
os.path.join(PROJECT_ROOT, DOX_DIR, "doc_doxygen")
)
)
subprocess.check_call(["cmake", "..", "-DBUILD_C_DOC=ON", "-GNinja"])
subprocess.check_call(["ninja", "doc_doxygen"])
src = os.path.join(PROJECT_ROOT, DOX_DIR, "doc_doxygen", "html")
dest = os.path.join(TMP_DIR, "dev")
print(f"Copy directory {src} -> {dest}")
shutil.copytree(src, dest)
except OSError as e:
sys.stderr.write("doxygen execution failed: %s" % e)
finally:
os.chdir(curdir)
def is_readthedocs_build():
if os.environ.get("READTHEDOCS", None) == "True":
return True
warnings.warn(
"Skipping Doxygen build... You won't have documentation for C/C++ functions. "
"Set environment variable READTHEDOCS=True if you want to build Doxygen. "
"(If you do opt in, make sure to install Doxygen, Graphviz, CMake, and C++ compiler "
"on your system.)"
)
return False
if is_readthedocs_build():
run_doxygen()
git_branch = os.getenv("SPHINX_GIT_BRANCH", default=None)
if not git_branch:
# If SPHINX_GIT_BRANCH environment variable is not given, run git
# to determine branch name
git_branch = [
re.sub(r'origin/', '', x.lstrip(' ')) for x in str(
git.branch('-r', '--contains', 'HEAD')).rstrip('\n').split('\n')
re.sub(r"origin/", "", x.lstrip(" "))
for x in str(git.branch("-r", "--contains", "HEAD")).rstrip("\n").split("\n")
]
git_branch = [x for x in git_branch if 'HEAD' not in x]
git_branch = [x for x in git_branch if "HEAD" not in x]
else:
git_branch = [git_branch]
print('git_branch = {}'.format(git_branch[0]))
print("git_branch = {}".format(git_branch[0]))
try:
filename, _ = urllib.request.urlretrieve(
'https://s3-us-west-2.amazonaws.com/xgboost-docs/{}.tar.bz2'.format(
git_branch[0]))
call(
'if [ -d tmp ]; then rm -rf tmp; fi; mkdir -p tmp/jvm; cd tmp/jvm; tar xvf {}'
.format(filename),
shell=True)
f"https://s3-us-west-2.amazonaws.com/xgboost-docs/{git_branch[0]}.tar.bz2"
)
if not os.path.exists(TMP_DIR):
print(f"Create directory {TMP_DIR}")
os.mkdir(TMP_DIR)
jvm_doc_dir = os.path.join(TMP_DIR, "jvm")
if os.path.exists(jvm_doc_dir):
print(f"Delete directory {jvm_doc_dir}")
shutil.rmtree(jvm_doc_dir)
print(f"Create directory {jvm_doc_dir}")
os.mkdir(jvm_doc_dir)
with tarfile.open(filename, "r:bz2") as t:
t.extractall(jvm_doc_dir)
except HTTPError:
print('JVM doc not found. Skipping...')
try:
filename, _ = urllib.request.urlretrieve(
'https://s3-us-west-2.amazonaws.com/xgboost-docs/doxygen/{}.tar.bz2'.
format(git_branch[0]))
call(
'mkdir -p tmp/dev; cd tmp/dev; tar xvf {}; mv doc_doxygen/html/* .; rm -rf doc_doxygen'
.format(filename),
shell=True)
except HTTPError:
print('C API doc not found. Skipping...')
print("JVM doc not found. Skipping...")
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
CURR_PATH = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
PROJECT_ROOT = os.path.normpath(os.path.join(CURR_PATH, os.path.pardir))
libpath = os.path.join(PROJECT_ROOT, "python-package/")
sys.path.insert(0, libpath)
sys.path.insert(0, CURR_PATH)
@@ -81,50 +134,56 @@ release = xgboost.__version__
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones
extensions = [
'matplotlib.sphinxext.plot_directive',
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
'sphinx.ext.intersphinx',
"matplotlib.sphinxext.plot_directive",
"sphinxcontrib.jquery",
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.mathjax",
"sphinx.ext.intersphinx",
"sphinx_gallery.gen_gallery",
'breathe',
'recommonmark'
"breathe",
"recommonmark",
]
sphinx_gallery_conf = {
# path to your example scripts
"examples_dirs": ["../demo/guide-python", "../demo/dask", "../demo/aft_survival"],
# path to where to save gallery generated output
"gallery_dirs": ["python/examples", "python/dask-examples", "python/survival-examples"],
"gallery_dirs": [
"python/examples",
"python/dask-examples",
"python/survival-examples",
],
"matplotlib_animations": True,
}
autodoc_typehints = "description"
graphviz_output_format = 'png'
plot_formats = [('svg', 300), ('png', 100), ('hires.png', 300)]
graphviz_output_format = "png"
plot_formats = [("svg", 300), ("png", 100), ("hires.png", 300)]
plot_html_show_source_link = False
plot_html_show_formats = False
# Breathe extension variables
DOX_DIR = "doxygen"
breathe_projects = {
"xgboost": os.path.join(PROJECT_ROOT, DOX_DIR, "doc_doxygen/xml")
}
breathe_projects = {}
if is_readthedocs_build():
breathe_projects = {
"xgboost": os.path.join(PROJECT_ROOT, DOX_DIR, "doc_doxygen/xml")
}
breathe_default_project = "xgboost"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
@@ -133,7 +192,7 @@ master_doc = 'index'
# Usually you set "language" from the command line for these cases.
language = "en"
autoclass_content = 'both'
autoclass_content = "both"
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
@@ -143,8 +202,10 @@ autoclass_content = 'both'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
html_extra_path = ['./tmp']
exclude_patterns = ["_build"]
html_extra_path = []
if is_readthedocs_build():
html_extra_path = [TMP_DIR]
# The reST default role (used for this markup: `text`) to use for all
# documents.
@@ -162,7 +223,7 @@ html_extra_path = ['./tmp']
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
@@ -185,27 +246,24 @@ html_logo = "https://raw.githubusercontent.com/dmlc/dmlc.github.io/master/img/lo
html_css_files = ["css/custom.css"]
html_sidebars = {
'**': ['logo-text.html', 'globaltoc.html', 'searchbox.html']
}
html_sidebars = {"**": ["logo-text.html", "globaltoc.html", "searchbox.html"]}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_static_path = ["_static"]
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
htmlhelp_basename = project + "doc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
}
latex_elements = {}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, '%s.tex' % project, project, author, 'manual'),
(master_doc, "%s.tex" % project, project, author, "manual"),
]
intersphinx_mapping = {
@@ -220,30 +278,5 @@ intersphinx_mapping = {
}
# hook for doxygen
def run_doxygen():
"""Run the doxygen make command in the designated folder."""
curdir = os.path.normpath(os.path.abspath(os.path.curdir))
try:
os.chdir(PROJECT_ROOT)
if not os.path.exists(DOX_DIR):
os.mkdir(DOX_DIR)
os.chdir(os.path.join(PROJECT_ROOT, DOX_DIR))
subprocess.check_call(["cmake", "..", "-DBUILD_C_DOC=ON", "-GNinja"])
subprocess.check_call(["ninja", "doc_doxygen"])
except OSError as e:
sys.stderr.write("doxygen execution failed: %s" % e)
finally:
os.chdir(curdir)
def generate_doxygen_xml(app):
"""Run the doxygen make commands if we're on the ReadTheDocs server"""
read_the_docs_build = os.environ.get('READTHEDOCS', None) == 'True'
if read_the_docs_build:
run_doxygen()
def setup(app):
app.add_css_file('custom.css')
app.connect("builder-inited", generate_doxygen_xml)
app.add_css_file("custom.css")

View File

@@ -173,3 +173,13 @@ PySpark API
:members:
:inherited-members:
:show-inheritance:
.. autoclass:: xgboost.spark.SparkXGBRanker
:members:
:inherited-members:
:show-inheritance:
.. autoclass:: xgboost.spark.SparkXGBRankerModel
:members:
:inherited-members:
:show-inheritance:

View File

@@ -45,7 +45,7 @@ Use ``find_package()`` and ``target_link_libraries()`` in your application's CMa
.. code-block:: cmake
cmake_minimum_required(VERSION 3.13)
cmake_minimum_required(VERSION 3.18)
project(your_project_name LANGUAGES C CXX VERSION your_project_version)
find_package(xgboost REQUIRED)
add_executable(your_project_name /path/to/project_file.c)

View File

@@ -43,10 +43,10 @@ in spark estimator, and some parameters are replaced with pyspark specific param
such as `weight_col`, `validation_indicator_col`, `use_gpu`, for details please see
`SparkXGBRegressor` doc.
The following code snippet shows how to train a spark xgboost regressor model,
first we need to prepare a training dataset as a spark dataframe contains
"label" column and "features" column(s), the "features" column(s) must be `pyspark.ml.linalg.Vector`
type or spark array type or a list of feature column names.
The following code snippet shows how to train a spark xgboost regressor model, first we
need to prepare a training dataset as a spark dataframe contains "label" column and
"features" column(s), the "features" column(s) must be ``pyspark.ml.linalg.Vector`` type
or spark array type or a list of feature column names.
.. code-block:: python
@@ -54,10 +54,10 @@ type or spark array type or a list of feature column names.
xgb_regressor_model = xgb_regressor.fit(train_spark_dataframe)
The following code snippet shows how to predict test data using a spark xgboost regressor model,
first we need to prepare a test dataset as a spark dataframe contains
"features" and "label" column, the "features" column must be `pyspark.ml.linalg.Vector`
type or spark array type.
The following code snippet shows how to predict test data using a spark xgboost regressor
model, first we need to prepare a test dataset as a spark dataframe contains "features"
and "label" column, the "features" column must be ``pyspark.ml.linalg.Vector`` type or
spark array type.
.. code-block:: python
@@ -107,8 +107,8 @@ virtualenv and pip:
python -m venv xgboost_env
source xgboost_env/bin/activate
pip install pyarrow pandas venv-pack xgboost
# https://rapids.ai/pip.html#install
pip install cudf-cu11 --extra-index-url=https://pypi.ngc.nvidia.com
# https://docs.rapids.ai/install#pip-install
pip install cudf-cu11 --extra-index-url=https://pypi.nvidia.com
venv-pack -o xgboost_env.tar.gz
With Conda:
@@ -240,7 +240,7 @@ additional spark configurations and dependencies:
--master spark://<master-ip>:7077 \
--conf spark.executor.resource.gpu.amount=1 \
--conf spark.task.resource.gpu.amount=1 \
--packages com.nvidia:rapids-4-spark_2.12:22.08.0 \
--packages com.nvidia:rapids-4-spark_2.12:23.04.0 \
--conf spark.plugins=com.nvidia.spark.SQLPlugin \
--conf spark.sql.execution.arrow.maxRecordsPerBatch=1000000 \
--archives xgboost_env.tar.gz#environment \

View File

@@ -508,7 +508,7 @@ class RegTree : public Model {
* \brief drop the trace after fill, must be called after fill.
* \param inst The sparse instance to drop.
*/
void Drop(const SparsePage::Inst& inst);
void Drop();
/*!
* \brief returns the size of the feature vector
* \return the size of the feature vector
@@ -709,13 +709,10 @@ inline void RegTree::FVec::Fill(const SparsePage::Inst& inst) {
has_missing_ = data_.size() != feature_count;
}
inline void RegTree::FVec::Drop(const SparsePage::Inst& inst) {
for (auto const& entry : inst) {
if (entry.index >= data_.size()) {
continue;
}
data_[entry.index].flag = -1;
}
inline void RegTree::FVec::Drop() {
Entry e{};
e.flag = -1;
std::fill_n(data_.data(), data_.size(), e);
has_missing_ = true;
}

View File

@@ -6,6 +6,6 @@
#define XGBOOST_VER_MAJOR 1
#define XGBOOST_VER_MINOR 7
#define XGBOOST_VER_PATCH 4
#define XGBOOST_VER_PATCH 6
#endif // XGBOOST_VERSION_CONFIG_H_

View File

@@ -6,7 +6,7 @@
<groupId>ml.dmlc</groupId>
<artifactId>xgboost-jvm_2.12</artifactId>
<version>1.7.4</version>
<version>1.7.6</version>
<packaging>pom</packaging>
<name>XGBoost JVM Package</name>
<description>JVM Package for XGBoost</description>

View File

@@ -6,10 +6,10 @@
<parent>
<groupId>ml.dmlc</groupId>
<artifactId>xgboost-jvm_2.12</artifactId>
<version>1.7.4</version>
<version>1.7.6</version>
</parent>
<artifactId>xgboost4j-example_2.12</artifactId>
<version>1.7.4</version>
<version>1.7.6</version>
<packaging>jar</packaging>
<build>
<plugins>
@@ -26,7 +26,7 @@
<dependency>
<groupId>ml.dmlc</groupId>
<artifactId>xgboost4j-spark_${scala.binary.version}</artifactId>
<version>1.7.4</version>
<version>1.7.6</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
@@ -37,7 +37,7 @@
<dependency>
<groupId>ml.dmlc</groupId>
<artifactId>xgboost4j-flink_${scala.binary.version}</artifactId>
<version>1.7.4</version>
<version>1.7.6</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>

View File

@@ -6,10 +6,10 @@
<parent>
<groupId>ml.dmlc</groupId>
<artifactId>xgboost-jvm_2.12</artifactId>
<version>1.7.4</version>
<version>1.7.6</version>
</parent>
<artifactId>xgboost4j-flink_2.12</artifactId>
<version>1.7.4</version>
<version>1.7.6</version>
<build>
<plugins>
<plugin>
@@ -26,7 +26,7 @@
<dependency>
<groupId>ml.dmlc</groupId>
<artifactId>xgboost4j_${scala.binary.version}</artifactId>
<version>1.7.4</version>
<version>1.7.6</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>

View File

@@ -6,10 +6,10 @@
<parent>
<groupId>ml.dmlc</groupId>
<artifactId>xgboost-jvm_2.12</artifactId>
<version>1.7.4</version>
<version>1.7.6</version>
</parent>
<artifactId>xgboost4j-gpu_2.12</artifactId>
<version>1.7.4</version>
<version>1.7.6</version>
<packaging>jar</packaging>
<dependencies>

View File

@@ -6,7 +6,7 @@
<parent>
<groupId>ml.dmlc</groupId>
<artifactId>xgboost-jvm_2.12</artifactId>
<version>1.7.4</version>
<version>1.7.6</version>
</parent>
<artifactId>xgboost4j-spark-gpu_2.12</artifactId>
<build>
@@ -24,7 +24,7 @@
<dependency>
<groupId>ml.dmlc</groupId>
<artifactId>xgboost4j-gpu_${scala.binary.version}</artifactId>
<version>1.7.4</version>
<version>1.7.6</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>

View File

@@ -6,7 +6,7 @@
<parent>
<groupId>ml.dmlc</groupId>
<artifactId>xgboost-jvm_2.12</artifactId>
<version>1.7.4</version>
<version>1.7.6</version>
</parent>
<artifactId>xgboost4j-spark_2.12</artifactId>
<build>
@@ -24,7 +24,7 @@
<dependency>
<groupId>ml.dmlc</groupId>
<artifactId>xgboost4j_${scala.binary.version}</artifactId>
<version>1.7.4</version>
<version>1.7.6</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>

View File

@@ -6,10 +6,10 @@
<parent>
<groupId>ml.dmlc</groupId>
<artifactId>xgboost-jvm_2.12</artifactId>
<version>1.7.4</version>
<version>1.7.6</version>
</parent>
<artifactId>xgboost4j_2.12</artifactId>
<version>1.7.4</version>
<version>1.7.6</version>
<packaging>jar</packaging>
<dependencies>

View File

@@ -15,7 +15,7 @@ if (PLUGIN_UPDATER_ONEAPI)
target_link_libraries(oneapi_plugin PUBLIC -fsycl)
set_target_properties(oneapi_plugin PROPERTIES
COMPILE_FLAGS -fsycl
CXX_STANDARD 14
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON)
if (USE_OPENMP)

View File

@@ -1 +1 @@
1.7.4
1.7.6

View File

@@ -1,6 +1,5 @@
# type: ignore
"""PySpark XGBoost integration interface
"""
"""PySpark XGBoost integration interface"""
try:
import pyspark
@@ -11,6 +10,7 @@ from .estimator import (
SparkXGBClassifier,
SparkXGBClassifierModel,
SparkXGBRanker,
SparkXGBRankerModel,
SparkXGBRegressor,
SparkXGBRegressorModel,
)
@@ -21,4 +21,5 @@ __all__ = [
"SparkXGBRegressor",
"SparkXGBRegressorModel",
"SparkXGBRanker",
"SparkXGBRankerModel",
]

View File

@@ -866,7 +866,11 @@ class _SparkXGBEstimator(Estimator, _SparkXGBParams, MLReadable, MLWritable):
result_xgb_model = self._convert_to_sklearn_model(
bytearray(booster, "utf-8"), config
)
return self._copyValues(self._create_pyspark_model(result_xgb_model))
spark_model = self._create_pyspark_model(result_xgb_model)
# According to pyspark ML convention, the model uid should be the same
# with estimator uid.
spark_model._resetUid(self.uid)
return self._copyValues(spark_model)
def write(self):
"""

View File

@@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 3.3)
cmake_minimum_required(VERSION 3.18)
find_package(Threads REQUIRED)

View File

@@ -149,10 +149,28 @@ common::ColumnMatrix const &GHistIndexMatrix::Transpose() const {
return *columns_;
}
bst_bin_t GHistIndexMatrix::GetGindex(size_t ridx, size_t fidx) const {
auto begin = RowIdx(ridx);
if (IsDense()) {
return static_cast<bst_bin_t>(index[begin + fidx]);
}
auto end = RowIdx(ridx + 1);
auto const& cut_ptrs = cut.Ptrs();
auto f_begin = cut_ptrs[fidx];
auto f_end = cut_ptrs[fidx + 1];
return BinarySearchBin(begin, end, index, f_begin, f_end);
}
float GHistIndexMatrix::GetFvalue(size_t ridx, size_t fidx, bool is_cat) const {
auto const &values = cut.Values();
auto const &mins = cut.MinValues();
auto const &ptrs = cut.Ptrs();
return this->GetFvalue(ptrs, values, mins, ridx, fidx, is_cat);
}
float GHistIndexMatrix::GetFvalue(std::vector<std::uint32_t> const &ptrs,
std::vector<float> const &values, std::vector<float> const &mins,
bst_row_t ridx, bst_feature_t fidx, bool is_cat) const {
if (is_cat) {
auto f_begin = ptrs[fidx];
auto f_end = ptrs[fidx + 1];
@@ -172,24 +190,27 @@ float GHistIndexMatrix::GetFvalue(size_t ridx, size_t fidx, bool is_cat) const {
}
return common::HistogramCuts::NumericBinValue(ptrs, values, mins, fidx, bin_idx);
};
if (columns_->GetColumnType(fidx) == common::kDenseColumn) {
if (columns_->AnyMissing()) {
switch (columns_->GetColumnType(fidx)) {
case common::kDenseColumn: {
if (columns_->AnyMissing()) {
return common::DispatchBinType(columns_->GetTypeSize(), [&](auto dtype) {
auto column = columns_->DenseColumn<decltype(dtype), true>(fidx);
return get_bin_val(column);
});
} else {
return common::DispatchBinType(columns_->GetTypeSize(), [&](auto dtype) {
auto column = columns_->DenseColumn<decltype(dtype), false>(fidx);
auto bin_idx = column[ridx];
return common::HistogramCuts::NumericBinValue(ptrs, values, mins, fidx, bin_idx);
});
}
}
case common::kSparseColumn: {
return common::DispatchBinType(columns_->GetTypeSize(), [&](auto dtype) {
auto column = columns_->DenseColumn<decltype(dtype), true>(fidx);
return get_bin_val(column);
});
} else {
return common::DispatchBinType(columns_->GetTypeSize(), [&](auto dtype) {
auto column = columns_->DenseColumn<decltype(dtype), false>(fidx);
auto column = columns_->SparseColumn<decltype(dtype)>(fidx, 0);
return get_bin_val(column);
});
}
} else {
return common::DispatchBinType(columns_->GetTypeSize(), [&](auto dtype) {
auto column = columns_->SparseColumn<decltype(dtype)>(fidx, 0);
return get_bin_val(column);
});
}
SPAN_CHECK(false);

View File

@@ -227,7 +227,12 @@ class GHistIndexMatrix {
common::ColumnMatrix const& Transpose() const;
bst_bin_t GetGindex(size_t ridx, size_t fidx) const;
float GetFvalue(size_t ridx, size_t fidx, bool is_cat) const;
float GetFvalue(std::vector<std::uint32_t> const& ptrs, std::vector<float> const& values,
std::vector<float> const& mins, bst_row_t ridx, bst_feature_t fidx,
bool is_cat) const;
private:
std::unique_ptr<common::ColumnMatrix> columns_;

View File

@@ -110,6 +110,7 @@ void IterativeDMatrix::InitFromCPU(DataIterHandle iter_handle, float missing,
size_t n_threads = ctx_.Threads();
size_t n_features = column_sizes.size();
linalg::Tensor<size_t, 2> column_sizes_tloc({n_threads, n_features}, Context::kCpuId);
column_sizes_tloc.Data()->Fill(0);
auto view = column_sizes_tloc.HostView();
common::ParallelFor(value.Size(), n_threads, common::Sched::Static(256), [&](auto i) {
auto const& line = value.GetLine(i);

View File

@@ -63,7 +63,7 @@ bst_float PredValue(const SparsePage::Inst &inst,
psum += (*trees[i])[nidx].LeafValue();
}
}
p_feats->Drop(inst);
p_feats->Drop();
return psum;
}
@@ -116,13 +116,11 @@ void FVecFill(const size_t block_size, const size_t batch_offset, const int num_
}
}
template <typename DataView>
void FVecDrop(const size_t block_size, const size_t batch_offset, DataView* batch,
const size_t fvec_offset, std::vector<RegTree::FVec>* p_feats) {
void FVecDrop(std::size_t const block_size, std::size_t const fvec_offset,
std::vector<RegTree::FVec> *p_feats) {
for (size_t i = 0; i < block_size; ++i) {
RegTree::FVec &feats = (*p_feats)[fvec_offset + i];
const SparsePage::Inst inst = (*batch)[batch_offset + i];
feats.Drop(inst);
feats.Drop();
}
}
@@ -142,11 +140,15 @@ struct SparsePageView {
struct GHistIndexMatrixView {
private:
GHistIndexMatrix const &page_;
uint64_t n_features_;
std::uint64_t const n_features_;
common::Span<FeatureType const> ft_;
common::Span<Entry> workspace_;
std::vector<size_t> current_unroll_;
std::vector<std::uint32_t> const& ptrs_;
std::vector<float> const& mins_;
std::vector<float> const& values_;
public:
size_t base_rowid;
@@ -159,6 +161,9 @@ struct GHistIndexMatrixView {
ft_{ft},
workspace_{workplace},
current_unroll_(n_threads > 0 ? n_threads : 1, 0),
ptrs_{_page.cut.Ptrs()},
mins_{_page.cut.MinValues()},
values_{_page.cut.Values()},
base_rowid{_page.base_rowid} {}
SparsePage::Inst operator[](size_t r) {
@@ -167,7 +172,7 @@ struct GHistIndexMatrixView {
size_t non_missing{beg};
for (bst_feature_t c = 0; c < n_features_; ++c) {
float f = page_.GetFvalue(r, c, common::IsCat(ft_, c));
float f = page_.GetFvalue(ptrs_, values_, mins_, r, c, common::IsCat(ft_, c));
if (!common::CheckNAN(f)) {
workspace_[non_missing] = Entry{c, f};
++non_missing;
@@ -250,10 +255,9 @@ void PredictBatchByBlockOfRowsKernel(
FVecFill(block_size, batch_offset, num_feature, &batch, fvec_offset,
p_thread_temp);
// process block of rows through all trees to keep cache locality
PredictByAllTrees(model, tree_begin, tree_end, out_preds,
batch_offset + batch.base_rowid, num_group, thread_temp,
fvec_offset, block_size);
FVecDrop(block_size, batch_offset, &batch, fvec_offset, p_thread_temp);
PredictByAllTrees(model, tree_begin, tree_end, out_preds, batch_offset + batch.base_rowid,
num_group, thread_temp, fvec_offset, block_size);
FVecDrop(block_size, fvec_offset, p_thread_temp);
});
}
@@ -470,7 +474,7 @@ class CPUPredictor : public Predictor {
bst_node_t tid = GetLeafIndex<true, true>(tree, feats, cats);
preds[ridx * ntree_limit + j] = static_cast<bst_float>(tid);
}
feats.Drop(page[i]);
feats.Drop();
});
}
}
@@ -544,7 +548,7 @@ class CPUPredictor : public Predictor {
(tree_weights == nullptr ? 1 : (*tree_weights)[j]);
}
}
feats.Drop(page[i]);
feats.Drop();
// add base margin to BIAS
if (base_margin.Size() != 0) {
CHECK_EQ(base_margin.Shape(1), ngroup);

View File

@@ -389,6 +389,7 @@ class HistEvaluator {
tree_evaluator_.AddSplit(candidate.nid, left_child, right_child,
tree[candidate.nid].SplitIndex(), left_weight,
right_weight);
evaluator = tree_evaluator_.GetEvaluator();
auto max_node = std::max(left_child, tree[candidate.nid].RightChild());
max_node = std::max(candidate.nid, max_node);

View File

@@ -48,6 +48,8 @@ class TreeEvaluator {
monotone_.HostVector().resize(n_features, 0);
has_constraint_ = false;
} else {
CHECK_LE(p.monotone_constraints.size(), n_features)
<< "The size of monotone constraint should be less or equal to the number of features.";
monotone_.HostVector() = p.monotone_constraints;
monotone_.HostVector().resize(n_features, 0);
// Initialised to some small size, can grow if needed

View File

@@ -286,7 +286,7 @@ struct GPUHistMakerDevice {
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
matrix.is_dense
matrix.is_dense && !collective::IsDistributed()
};
auto split = this->evaluator_.EvaluateSingleSplit(inputs, shared_inputs);
return split;
@@ -300,12 +300,14 @@ struct GPUHistMakerDevice {
std::vector<bst_node_t> nidx(2 * candidates.size());
auto h_node_inputs = pinned2.GetSpan<EvaluateSplitInputs>(2 * candidates.size());
auto matrix = page->GetDeviceAccessor(ctx_->gpu_id);
EvaluateSplitSharedInputs shared_inputs{
GPUTrainingParam{param}, *quantiser, feature_types, matrix.feature_segments,
matrix.gidx_fvalue_map, matrix.min_fvalue,
matrix.is_dense
};
EvaluateSplitSharedInputs shared_inputs{GPUTrainingParam{param}, *quantiser, feature_types,
matrix.feature_segments, matrix.gidx_fvalue_map,
matrix.min_fvalue,
// is_dense represents the local data
matrix.is_dense && !collective::IsDistributed()};
dh::TemporaryArray<GPUExpandEntry> entries(2 * candidates.size());
// Store the feature set ptrs so they dont go out of scope before the kernel is called
std::vector<std::shared_ptr<HostDeviceVector<bst_feature_t>>> feature_sets;
for (size_t i = 0; i < candidates.size(); i++) {
auto candidate = candidates.at(i);
int left_nidx = tree[candidate.nid].LeftChild();
@@ -314,10 +316,12 @@ struct GPUHistMakerDevice {
nidx[i * 2 + 1] = right_nidx;
auto left_sampled_features = column_sampler.GetFeatureSet(tree.GetDepth(left_nidx));
left_sampled_features->SetDevice(ctx_->gpu_id);
feature_sets.emplace_back(left_sampled_features);
common::Span<bst_feature_t> left_feature_set =
interaction_constraints.Query(left_sampled_features->DeviceSpan(), left_nidx);
auto right_sampled_features = column_sampler.GetFeatureSet(tree.GetDepth(right_nidx));
right_sampled_features->SetDevice(ctx_->gpu_id);
feature_sets.emplace_back(right_sampled_features);
common::Span<bst_feature_t> right_feature_set =
interaction_constraints.Query(right_sampled_features->DeviceSpan(),
right_nidx);

View File

@@ -78,7 +78,7 @@ CPUExpandEntry QuantileHistMaker::Builder::InitRoot(
{
GradientPairPrecise grad_stat;
if (p_fmat->IsDense()) {
if (p_fmat->IsDense() && !collective::IsDistributed()) {
/**
* Specialized code for dense data: For dense data (with no missing value), the sum
* of gradient histogram is equal to snode[nid]

View File

@@ -89,7 +89,7 @@ class TreeRefresher : public TreeUpdater {
dmlc::BeginPtr(stemp[tid]) + offset);
offset += tree->param.num_nodes;
}
feats.Drop(inst);
feats.Drop();
});
}
// aggregate the statistics

View File

@@ -23,10 +23,15 @@ case "${container}" in
gpu|rmm)
BUILD_ARGS="$BUILD_ARGS --build-arg CUDA_VERSION_ARG=$CUDA_VERSION"
BUILD_ARGS="$BUILD_ARGS --build-arg RAPIDS_VERSION_ARG=$RAPIDS_VERSION"
if [[ $container == "rmm" ]]
then
BUILD_ARGS="$BUILD_ARGS --build-arg NCCL_VERSION_ARG=$NCCL_VERSION"
fi
;;
gpu_build_centos7|jvm_gpu_build)
BUILD_ARGS="$BUILD_ARGS --build-arg CUDA_VERSION_ARG=$CUDA_VERSION"
BUILD_ARGS="$BUILD_ARGS --build-arg NCCL_VERSION_ARG=$NCCL_VERSION"
;;
*)

View File

@@ -15,7 +15,8 @@ fi
command_wrapper="tests/ci_build/ci_build.sh rmm docker --build-arg "`
`"CUDA_VERSION_ARG=$CUDA_VERSION --build-arg "`
`"RAPIDS_VERSION_ARG=$RAPIDS_VERSION"
`"RAPIDS_VERSION_ARG=$RAPIDS_VERSION --build-arg "`
`"NCCL_VERSION_ARG=$NCCL_VERSION"
echo "--- Build libxgboost from the source"
$command_wrapper tests/ci_build/build_via_cmake.sh --conda-env=gpu_test -DUSE_CUDA=ON \

View File

@@ -16,7 +16,8 @@ else
fi
command_wrapper="tests/ci_build/ci_build.sh gpu_build_centos7 docker --build-arg "`
`"CUDA_VERSION_ARG=$CUDA_VERSION"
`"CUDA_VERSION_ARG=$CUDA_VERSION --build-arg "`
`"NCCL_VERSION_ARG=$NCCL_VERSION"
echo "--- Build libxgboost from the source"
$command_wrapper tests/ci_build/prune_libnccl.sh

View File

@@ -14,5 +14,7 @@ else
fi
tests/ci_build/ci_build.sh jvm_gpu_build nvidia-docker \
--build-arg CUDA_VERSION_ARG=${CUDA_VERSION} tests/ci_build/build_jvm_packages.sh \
--build-arg CUDA_VERSION_ARG=${CUDA_VERSION} \
--build-arg NCCL_VERSION_ARG=${NCCL_VERSION} \
tests/ci_build/build_jvm_packages.sh \
${SPARK_VERSION} -Duse.cuda=ON ${arch_flag}

View File

@@ -12,10 +12,10 @@ if ( $is_release_branch -eq 0 ) {
}
mkdir build
cd build
cmake .. -G"Visual Studio 15 2017 Win64" -DUSE_CUDA=ON -DCMAKE_VERBOSE_MAKEFILE=ON `
-DGOOGLE_TEST=ON -DUSE_DMLC_GTEST=ON -DCMAKE_UNITY_BUILD=ON ${arch_flag}
cmake .. -G"Visual Studio 17 2022" -A x64 -DUSE_CUDA=ON -DCMAKE_VERBOSE_MAKEFILE=ON `
-DGOOGLE_TEST=ON -DUSE_DMLC_GTEST=ON ${arch_flag}
$msbuild = -join @(
"C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\MSBuild\\15.0"
"C:\\Program Files\\Microsoft Visual Studio\\2022\\Community\\MSBuild\\Current"
"\\Bin\\MSBuild.exe"
)
& $msbuild xgboost.sln /m /p:Configuration=Release /nodeReuse:false

View File

@@ -22,9 +22,10 @@ function set_buildkite_env_vars_in_container {
set -x
CUDA_VERSION=11.0.3
RAPIDS_VERSION=22.10
SPARK_VERSION=3.0.1
CUDA_VERSION=11.8.0
NCCL_VERSION=2.16.5-1
RAPIDS_VERSION=23.02
SPARK_VERSION=3.1.1
JDK_VERSION=8
if [[ -z ${BUILDKITE:-} ]]

View File

@@ -9,5 +9,6 @@ then
echo "--- Deploy JVM packages to xgboost-maven-repo S3 repo"
tests/ci_build/ci_build.sh jvm_gpu_build docker \
--build-arg CUDA_VERSION_ARG=${CUDA_VERSION} \
--build-arg NCCL_VERSION_ARG=${NCCL_VERSION} \
tests/ci_build/deploy_jvm_packages.sh ${SPARK_VERSION}
fi

View File

@@ -2,12 +2,16 @@ import argparse
import copy
import os
import re
import sys
import boto3
import botocore
from metadata import AMI_ID, COMMON_STACK_PARAMS, STACK_PARAMS
current_dir = os.path.dirname(__file__)
sys.path.append(os.path.join(current_dir, ".."))
from common_blocks.utils import create_or_update_stack, wait
TEMPLATE_URL = "https://s3.amazonaws.com/buildkite-aws-stack/latest/aws-stack.yml"
@@ -68,72 +72,7 @@ def get_full_stack_id(stack_id):
return f"buildkite-{stack_id}-autoscaling-group"
def stack_exists(args, *, stack_name):
client = boto3.client("cloudformation", region_name=args.aws_region)
waiter = client.get_waiter("stack_exists")
try:
waiter.wait(StackName=stack_name, WaiterConfig={"MaxAttempts": 1})
return True
except botocore.exceptions.WaiterError as e:
return False
def create_or_update_stack(
args, *, stack_name, template_url=None, template_body=None, params=None
):
kwargs = {
"StackName": stack_name,
"Capabilities": [
"CAPABILITY_IAM",
"CAPABILITY_NAMED_IAM",
"CAPABILITY_AUTO_EXPAND",
],
}
if template_url:
kwargs["TemplateURL"] = template_url
if template_body:
kwargs["TemplateBody"] = template_body
if params:
kwargs["Parameters"] = params
client = boto3.client("cloudformation", region_name=args.aws_region)
if stack_exists(args, stack_name=stack_name):
print(f"Stack {stack_name} already exists. Updating...")
try:
response = client.update_stack(**kwargs)
return {"StackName": stack_name, "Action": "update"}
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "ValidationError" and re.search(
"No updates are to be performed", e.response["Error"]["Message"]
):
print(f"No update was made to {stack_name}")
return {"StackName": stack_name, "Action": "noop"}
else:
raise e
else:
kwargs.update({"OnFailure": "ROLLBACK", "EnableTerminationProtection": False})
response = client.create_stack(**kwargs)
return {"StackName": stack_name, "Action": "create"}
def wait(promise):
client = boto3.client("cloudformation", region_name=args.aws_region)
stack_name = promise["StackName"]
print(f"Waiting for {stack_name}...")
if promise["Action"] == "create":
waiter = client.get_waiter("stack_create_complete")
waiter.wait(StackName=stack_name)
print(f"Finished creating stack {stack_name}")
elif promise["Action"] == "update":
waiter = client.get_waiter("stack_update_complete")
waiter.wait(StackName=stack_name)
print(f"Finished updating stack {stack_name}")
elif promise["Action"] != "noop":
raise ValueError(f"Invalid promise {promise}")
def create_agent_iam_policy(args):
def create_agent_iam_policy(args, *, client):
policy_stack_name = "buildkite-agent-iam-policy"
print(f"Creating stack {policy_stack_name} for agent IAM policy...")
with open(
@@ -142,9 +81,9 @@ def create_agent_iam_policy(args):
) as f:
policy_template = f.read()
promise = create_or_update_stack(
args, stack_name=policy_stack_name, template_body=policy_template
args, client=client, stack_name=policy_stack_name, template_body=policy_template
)
wait(promise)
wait(promise, client=client)
cf = boto3.resource("cloudformation", region_name=args.aws_region)
policy = cf.StackResource(policy_stack_name, "BuildkiteAgentManagedPolicy")
@@ -152,10 +91,10 @@ def create_agent_iam_policy(args):
def main(args):
agent_iam_policy = create_agent_iam_policy(args)
client = boto3.client("cloudformation", region_name=args.aws_region)
agent_iam_policy = create_agent_iam_policy(args, client=client)
promises = []
for stack_id in AMI_ID:
@@ -167,13 +106,17 @@ def main(args):
)
promise = create_or_update_stack(
args, stack_name=stack_id_full, template_url=TEMPLATE_URL, params=params
args,
client=client,
stack_name=stack_id_full,
template_url=TEMPLATE_URL,
params=params,
)
promises.append(promise)
print(f"CI stack {stack_id_full} is in progress in the background")
for promise in promises:
wait(promise)
wait(promise, client=client)
if __name__ == "__main__":

View File

@@ -1,27 +1,27 @@
AMI_ID = {
# Managed by XGBoost team
"linux-amd64-gpu": {
"us-west-2": "ami-00ed92bd37f77bc33",
"us-west-2": "ami-094271bed4788ddb5",
},
"linux-amd64-mgpu": {
"us-west-2": "ami-00ed92bd37f77bc33",
"us-west-2": "ami-094271bed4788ddb5",
},
"windows-gpu": {
"us-west-2": "ami-0a1a2ea551a07ad5f",
"us-west-2": "ami-0839681594a1d7627",
},
"windows-cpu": {
"us-west-2": "ami-0a1a2ea551a07ad5f",
"us-west-2": "ami-0839681594a1d7627",
},
# Managed by BuildKite
# from https://s3.amazonaws.com/buildkite-aws-stack/latest/aws-stack.yml
"linux-amd64-cpu": {
"us-west-2": "ami-075d4c25d5f0c17c1",
"us-west-2": "ami-00f2127550cf03658",
},
"pipeline-loader": {
"us-west-2": "ami-075d4c25d5f0c17c1",
"us-west-2": "ami-00f2127550cf03658",
},
"linux-arm64-cpu": {
"us-west-2": "ami-0952c6fb6db9a9891",
"us-west-2": "ami-0c5789068f4a2d1b5",
},
}

View File

@@ -0,0 +1,97 @@
import re
import boto3
import botocore
def stack_exists(args, *, stack_name):
client = boto3.client("cloudformation", region_name=args.aws_region)
waiter = client.get_waiter("stack_exists")
try:
waiter.wait(StackName=stack_name, WaiterConfig={"MaxAttempts": 1})
return True
except botocore.exceptions.WaiterError as e:
return False
def create_or_update_stack(
args, *, client, stack_name, template_url=None, template_body=None, params=None
):
kwargs = {
"StackName": stack_name,
"Capabilities": [
"CAPABILITY_IAM",
"CAPABILITY_NAMED_IAM",
"CAPABILITY_AUTO_EXPAND",
],
}
if template_url:
kwargs["TemplateURL"] = template_url
if template_body:
kwargs["TemplateBody"] = template_body
if params:
kwargs["Parameters"] = params
if stack_exists(args, stack_name=stack_name):
print(f"Stack {stack_name} already exists. Updating...")
try:
response = client.update_stack(**kwargs)
return {"StackName": stack_name, "Action": "update"}
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "ValidationError" and re.search(
"No updates are to be performed", e.response["Error"]["Message"]
):
print(f"No update was made to {stack_name}")
return {"StackName": stack_name, "Action": "noop"}
else:
raise e
else:
kwargs.update({"OnFailure": "ROLLBACK", "EnableTerminationProtection": False})
response = client.create_stack(**kwargs)
return {"StackName": stack_name, "Action": "create"}
def replace_stack(
args, *, client, stack_name, template_url=None, template_body=None, params=None
):
"""Delete an existing stack and create a new stack with identical name"""
if not stack_exists(args, stack_name=stack_name):
raise ValueError(f"Stack {stack_name} does not exist")
r = client.delete_stack(StackName=stack_name)
delete_waiter = client.get_waiter("stack_delete_complete")
delete_waiter.wait(StackName=stack_name)
kwargs = {
"StackName": stack_name,
"Capabilities": [
"CAPABILITY_IAM",
"CAPABILITY_NAMED_IAM",
"CAPABILITY_AUTO_EXPAND",
],
"OnFailure": "ROLLBACK",
"EnableTerminationProtection": False,
}
if template_url:
kwargs["TemplateURL"] = template_url
if template_body:
kwargs["TemplateBody"] = template_body
if params:
kwargs["Parameters"] = params
response = client.create_stack(**kwargs)
return {"StackName": stack_name, "Action": "create"}
def wait(promise, *, client):
stack_name = promise["StackName"]
print(f"Waiting for {stack_name}...")
if promise["Action"] == "create":
waiter = client.get_waiter("stack_create_complete")
waiter.wait(StackName=stack_name)
print(f"Finished creating stack {stack_name}")
elif promise["Action"] == "update":
waiter = client.get_waiter("stack_update_complete")
waiter.wait(StackName=stack_name)
print(f"Finished updating stack {stack_name}")
elif promise["Action"] != "noop":
raise ValueError(f"Invalid promise {promise}")

View File

@@ -2,6 +2,7 @@ import argparse
import copy
import json
import os
import sys
from urllib.request import urlopen
import boto3
@@ -9,6 +10,9 @@ import cfn_flip
from metadata import IMAGE_PARAMS
current_dir = os.path.dirname(__file__)
sys.path.append(os.path.join(current_dir, ".."))
from common_blocks.utils import replace_stack, wait
BUILDKITE_CF_TEMPLATE_URL = (
"https://s3.amazonaws.com/buildkite-aws-stack/latest/aws-stack.yml"
@@ -47,6 +51,9 @@ def main(args):
ami_mapping = get_ami_mapping()
client = boto3.client("cloudformation", region_name=args.aws_region)
promises = []
for stack_id in IMAGE_PARAMS:
stack_id_full = get_full_stack_id(stack_id)
print(f"Creating EC2 image builder stack {stack_id_full}...")
@@ -55,28 +62,20 @@ def main(args):
stack_id=stack_id, aws_region=args.aws_region, ami_mapping=ami_mapping
)
client = boto3.client("cloudformation", region_name=args.aws_region)
response = client.create_stack(
StackName=stack_id_full,
TemplateBody=ec2_image_pipeline_template,
Capabilities=[
"CAPABILITY_IAM",
"CAPABILITY_NAMED_IAM",
"CAPABILITY_AUTO_EXPAND",
],
OnFailure="ROLLBACK",
EnableTerminationProtection=False,
Parameters=params,
promise = replace_stack(
args,
client=client,
stack_name=stack_id_full,
template_body=ec2_image_pipeline_template,
params=params,
)
promises.append(promise)
print(
f"EC2 image builder stack {stack_id_full} is in progress in the background"
)
for stack_id in IMAGE_PARAMS:
stack_id_full = get_full_stack_id(stack_id)
waiter = client.get_waiter("stack_create_complete")
waiter.wait(StackName=stack_id_full)
print(f"EC2 image builder stack {stack_id_full} is now finished.")
for promise in promises:
wait(promise, client=client)
if __name__ == "__main__":

View File

@@ -58,7 +58,7 @@ Resources:
BootstrapComponent:
Type: AWS::ImageBuilder::Component
Properties:
Name: !Sub "${AWS::StackName}-bootstrap-component"
Name: !Join ["-", [!Ref AWS::StackName, "bootstrap-component", !Select [2, !Split ['/', !Ref AWS::StackId]]]]
Platform: !Ref InstanceOperatingSystem
Version: "1.0.0"
Description: Execute a bootstrap script.
@@ -67,7 +67,7 @@ Resources:
Recipe:
Type: AWS::ImageBuilder::ImageRecipe
Properties:
Name: !Sub "${AWS::StackName}-image"
Name: !Join ["-", [!Ref AWS::StackName, "image", !Select [2, !Split ['/', !Ref AWS::StackId]]]]
Components:
- ComponentArn: !Ref BootstrapComponent
ParentImage: !Ref BaseImageId
@@ -83,7 +83,7 @@ Resources:
Infrastructure:
Type: AWS::ImageBuilder::InfrastructureConfiguration
Properties:
Name: !Sub "${AWS::StackName}-image-pipeline-infrastructure"
Name: !Join ["-", [!Ref AWS::StackName, "image-pipeline-infrastructure", !Select [2, !Split ['/', !Ref AWS::StackId]]]]
InstanceProfileName: !Ref InstanceProfile
InstanceTypes:
- !Ref InstanceType
@@ -93,7 +93,7 @@ Resources:
Distribution:
Type: AWS::ImageBuilder::DistributionConfiguration
Properties:
Name: !Sub "${AWS::StackName}-image-pipeline-distribution-config"
Name: !Join ["-", [!Ref AWS::StackName, "image-pipeline-distribution-config", !Select [2, !Split ['/', !Ref AWS::StackId]]]]
Distributions:
- Region: !Ref AWS::Region
AmiDistributionConfiguration: {}
@@ -102,7 +102,7 @@ Resources:
Pipeline:
Type: AWS::ImageBuilder::ImagePipeline
Properties:
Name: !Sub "${AWS::StackName}-image-pipeline"
Name: !Join ["-", [!Ref AWS::StackName, "image-pipeline", !Select [2, !Split ['/', !Ref AWS::StackId]]]]
DistributionConfigurationArn: !Ref Distribution
ImageRecipeArn: !Ref Recipe
InfrastructureConfigurationArn: !Ref Infrastructure

View File

@@ -13,6 +13,6 @@ IMAGE_PARAMS = {
"BootstrapScript": "windows-gpu-bootstrap.yml",
"InstanceType": "g4dn.2xlarge",
"InstanceOperatingSystem": "Windows",
"VolumeSize": "80", # in GiBs
"VolumeSize": "120", # in GiBs
},
}

View File

@@ -15,9 +15,9 @@ phases:
choco --version
choco feature enable -n=allowGlobalConfirmation
# CMake 3.18
Write-Host '>>> Installing CMake 3.18...'
choco install cmake --version 3.18.0 --installargs "ADD_CMAKE_TO_PATH=System"
# CMake 3.25
Write-Host '>>> Installing CMake 3.25...'
choco install cmake --version 3.25.2 --installargs "ADD_CMAKE_TO_PATH=System"
if ($LASTEXITCODE -ne 0) { throw "Last command failed" }
# Notepad++
@@ -45,18 +45,18 @@ phases:
choco install graphviz
if ($LASTEXITCODE -ne 0) { throw "Last command failed" }
# Install Visual Studio Community 2017 (15.9)
Write-Host '>>> Installing Visual Studio 2017 Community (15.9)...'
choco install visualstudio2017community --version 15.9.23.0 `
# Install Visual Studio 2022 Community
Write-Host '>>> Installing Visual Studio 2022 Community...'
choco install visualstudio2022community `
--params "--wait --passive --norestart"
if ($LASTEXITCODE -ne 0) { throw "Last command failed" }
choco install visualstudio2017-workload-nativedesktop --params `
choco install visualstudio2022-workload-nativedesktop --params `
"--wait --passive --norestart --includeOptional"
if ($LASTEXITCODE -ne 0) { throw "Last command failed" }
# Install CUDA 11.0
Write-Host '>>> Installing CUDA 11.0...'
choco install cuda --version 11.0.3
# Install CUDA 11.8
Write-Host '>>> Installing CUDA 11.8...'
choco install cuda --version=11.8.0.52206
if ($LASTEXITCODE -ne 0) { throw "Last command failed" }
# Install Python packages

View File

@@ -22,11 +22,11 @@ steps:
queue: linux-amd64-cpu
- wait
#### -------- BUILD --------
- label: ":console: Run clang-tidy"
command: "tests/buildkite/run-clang-tidy.sh"
key: run-clang-tidy
agents:
queue: linux-amd64-cpu
# - label: ":console: Run clang-tidy"
# command: "tests/buildkite/run-clang-tidy.sh"
# key: run-clang-tidy
# agents:
# queue: linux-amd64-cpu
- wait
- label: ":console: Build CPU"
command: "tests/buildkite/build-cpu.sh"

View File

@@ -20,4 +20,5 @@ tests/ci_build/ci_build.sh gpu nvidia-docker \
# tests/ci_build/ci_build.sh rmm nvidia-docker \
# --build-arg CUDA_VERSION_ARG=$CUDA_VERSION \
# --build-arg RAPIDS_VERSION_ARG=$RAPIDS_VERSION bash -c \
# --build-arg NCCL_VERSION_ARG=$NCCL_VERSION bash -c \
# "source activate gpu_test && build/testxgboost --use-rmm-pool"

View File

@@ -15,8 +15,8 @@ RUN \
add-apt-repository -u 'deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-11 main' && \
apt-get update && \
apt-get install -y llvm-11 clang-tidy-11 clang-11 && \
wget -nv -nc https://cmake.org/files/v3.14/cmake-3.14.0-Linux-x86_64.sh --no-check-certificate && \
bash cmake-3.14.0-Linux-x86_64.sh --skip-license --prefix=/usr
wget -nv -nc https://cmake.org/files/v3.18/cmake-3.18.0-Linux-x86_64.sh --no-check-certificate && \
bash cmake-3.18.0-Linux-x86_64.sh --skip-license --prefix=/usr
# Set default clang-tidy version
RUN \

View File

@@ -12,8 +12,8 @@ RUN \
apt-get update && \
apt-get install -y tar unzip wget git build-essential doxygen graphviz llvm libasan2 libidn11 ninja-build gcc-8 g++-8 openjdk-8-jdk-headless && \
# CMake
wget -nv -nc https://cmake.org/files/v3.14/cmake-3.14.0-Linux-x86_64.sh --no-check-certificate && \
bash cmake-3.14.0-Linux-x86_64.sh --skip-license --prefix=/usr && \
wget -nv -nc https://cmake.org/files/v3.18/cmake-3.18.0-Linux-x86_64.sh --no-check-certificate && \
bash cmake-3.18.0-Linux-x86_64.sh --skip-license --prefix=/usr && \
# Python
wget -nv https://github.com/conda-forge/miniforge/releases/latest/download/Mambaforge-Linux-x86_64.sh && \
bash Mambaforge-Linux-x86_64.sh -b -p /opt/python

View File

@@ -22,10 +22,10 @@ ENV PATH=/opt/python/bin:$PATH
RUN \
conda install -c conda-forge mamba && \
mamba create -n gpu_test -c rapidsai-nightly -c rapidsai -c nvidia -c conda-forge -c defaults \
python=3.9 cudf=$RAPIDS_VERSION_ARG* rmm=$RAPIDS_VERSION_ARG* cudatoolkit=$CUDA_VERSION_ARG \
python=3.10 cudf=$RAPIDS_VERSION_ARG* rmm=$RAPIDS_VERSION_ARG* cudatoolkit=$CUDA_VERSION_ARG \
dask dask-cuda=$RAPIDS_VERSION_ARG* dask-cudf=$RAPIDS_VERSION_ARG* cupy \
numpy pytest pytest-timeout scipy scikit-learn pandas matplotlib wheel python-kubernetes urllib3 graphviz hypothesis \
pyspark cloudpickle cuda-python=11.7.0 && \
pyspark cloudpickle cuda-python && \
mamba clean --all && \
conda run --no-capture-output -n gpu_test pip install buildkite-test-collector

View File

@@ -1,6 +1,7 @@
ARG CUDA_VERSION_ARG
FROM nvidia/cuda:$CUDA_VERSION_ARG-devel-centos7
ARG CUDA_VERSION_ARG
ARG NCCL_VERSION_ARG
# Install all basic requirements
RUN \
@@ -21,7 +22,7 @@ RUN \
# NCCL2 (License: https://docs.nvidia.com/deeplearning/sdk/nccl-sla/index.html)
RUN \
export CUDA_SHORT=`echo $CUDA_VERSION_ARG | grep -o -E '[0-9]+\.[0-9]'` && \
export NCCL_VERSION=2.13.4-1 && \
export NCCL_VERSION=$NCCL_VERSION_ARG && \
wget -nv -nc https://developer.download.nvidia.com/compute/machine-learning/repos/rhel7/x86_64/nvidia-machine-learning-repo-rhel7-1.0.0-1.x86_64.rpm && \
rpm -i nvidia-machine-learning-repo-rhel7-1.0.0-1.x86_64.rpm && \
yum -y update && \

View File

@@ -36,8 +36,8 @@ RUN \
bash Miniconda3.sh -b -p /opt/python && \
/opt/python/bin/python -m pip install auditwheel awscli && \
# CMake
wget -nv -nc https://cmake.org/files/v3.14/cmake-3.14.0-Linux-x86_64.sh --no-check-certificate && \
bash cmake-3.14.0-Linux-x86_64.sh --skip-license --prefix=/usr
wget -nv -nc https://cmake.org/files/v3.18/cmake-3.18.0-Linux-x86_64.sh --no-check-certificate && \
bash cmake-3.18.0-Linux-x86_64.sh --skip-license --prefix=/usr
ENV GOSU_VERSION 1.10

View File

@@ -12,8 +12,8 @@ RUN \
wget -nv -nc -O Miniconda3.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \
bash Miniconda3.sh -b -p /opt/python && \
# CMake
wget -nv -nc https://cmake.org/files/v3.14/cmake-3.14.0-Linux-x86_64.sh --no-check-certificate && \
bash cmake-3.14.0-Linux-x86_64.sh --skip-license --prefix=/usr && \
wget -nv -nc https://cmake.org/files/v3.18/cmake-3.18.0-Linux-x86_64.sh --no-check-certificate && \
bash cmake-3.18.0-Linux-x86_64.sh --skip-license --prefix=/usr && \
# Maven
wget -nv -nc https://archive.apache.org/dist/maven/maven-3/3.6.1/binaries/apache-maven-3.6.1-bin.tar.gz && \
tar xvf apache-maven-3.6.1-bin.tar.gz -C /opt && \

View File

@@ -1,6 +1,7 @@
ARG CUDA_VERSION_ARG
FROM nvidia/cuda:$CUDA_VERSION_ARG-devel-centos7
ARG CUDA_VERSION_ARG
ARG NCCL_VERSION_ARG
# Install all basic requirements
RUN \
@@ -14,8 +15,8 @@ RUN \
wget -nv -nc -O Miniconda3.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \
bash Miniconda3.sh -b -p /opt/python && \
# CMake
wget -nv -nc https://cmake.org/files/v3.14/cmake-3.14.0-Linux-x86_64.sh --no-check-certificate && \
bash cmake-3.14.0-Linux-x86_64.sh --skip-license --prefix=/usr && \
wget -nv -nc https://cmake.org/files/v3.18/cmake-3.18.0-Linux-x86_64.sh --no-check-certificate && \
bash cmake-3.18.0-Linux-x86_64.sh --skip-license --prefix=/usr && \
# Maven
wget -nv -nc https://archive.apache.org/dist/maven/maven-3/3.6.1/binaries/apache-maven-3.6.1-bin.tar.gz && \
tar xvf apache-maven-3.6.1-bin.tar.gz -C /opt && \
@@ -24,7 +25,7 @@ RUN \
# NCCL2 (License: https://docs.nvidia.com/deeplearning/sdk/nccl-sla/index.html)
RUN \
export CUDA_SHORT=`echo $CUDA_VERSION_ARG | grep -o -E '[0-9]+\.[0-9]'` && \
export NCCL_VERSION=2.13.4-1 && \
export NCCL_VERSION=$NCCL_VERSION_ARG && \
yum-config-manager --add-repo http://developer.download.nvidia.com/compute/cuda/repos/rhel7/x86_64/cuda-rhel7.repo && \
yum -y update && \
yum install -y libnccl-${NCCL_VERSION}+cuda${CUDA_SHORT} libnccl-devel-${NCCL_VERSION}+cuda${CUDA_SHORT} libnccl-static-${NCCL_VERSION}+cuda${CUDA_SHORT}

View File

@@ -1,7 +1,8 @@
ARG CUDA_VERSION_ARG
FROM nvidia/cuda:$CUDA_VERSION_ARG-devel-ubuntu18.04
FROM nvidia/cuda:$CUDA_VERSION_ARG-devel-ubuntu20.04
ARG CUDA_VERSION_ARG
ARG RAPIDS_VERSION_ARG
ARG NCCL_VERSION_ARG
# Environment
ENV DEBIAN_FRONTEND noninteractive
@@ -19,7 +20,7 @@ RUN \
# NCCL2 (License: https://docs.nvidia.com/deeplearning/sdk/nccl-sla/index.html)
RUN \
export CUDA_SHORT=`echo $CUDA_VERSION_ARG | grep -o -E '[0-9]+\.[0-9]'` && \
export NCCL_VERSION=2.13.4-1 && \
export NCCL_VERSION=$NCCL_VERSION_ARG && \
apt-get update && \
apt-get install -y --allow-downgrades --allow-change-held-packages libnccl2=${NCCL_VERSION}+cuda${CUDA_SHORT} libnccl-dev=${NCCL_VERSION}+cuda${CUDA_SHORT}
@@ -29,7 +30,7 @@ ENV PATH=/opt/python/bin:$PATH
RUN \
conda install -c conda-forge mamba && \
mamba create -n gpu_test -c rapidsai-nightly -c rapidsai -c nvidia -c conda-forge -c defaults \
python=3.9 rmm=$RAPIDS_VERSION_ARG* cudatoolkit=$CUDA_VERSION_ARG cmake && \
python=3.10 rmm=$RAPIDS_VERSION_ARG* cudatoolkit=$CUDA_VERSION_ARG cmake && \
mamba clean --all
ENV GOSU_VERSION 1.10

View File

@@ -15,7 +15,7 @@ mv xgboost/ xgboost_rpack/
mkdir build
cd build
cmake .. -G"Visual Studio 15 2017 Win64" -DUSE_CUDA=ON -DR_LIB=ON -DLIBR_HOME="c:\\Program Files\\R\\R-3.6.3"
cmake .. -G"Visual Studio 17 2022" -A x64 -DUSE_CUDA=ON -DR_LIB=ON -DLIBR_HOME="c:\\Program Files\\R\\R-3.6.3"
cmake --build . --config Release --parallel
cd ..

View File

@@ -31,6 +31,5 @@ dependencies:
- pyspark
- cloudpickle
- pip:
- shap
- awscli
- auditwheel

View File

@@ -34,7 +34,6 @@ dependencies:
- pyarrow
- protobuf
- cloudpickle
- shap
- modin
# TODO: Replace it with pyspark>=3.4 once 3.4 released.
# - https://ml-team-public-read.s3.us-west-2.amazonaws.com/pyspark-3.4.0.dev0.tar.gz

View File

@@ -1,19 +1,102 @@
import sys
import re
import zipfile
import argparse
import base64
import glob
import hashlib
import os
import pathlib
import re
import shutil
import tempfile
if len(sys.argv) != 2:
print('Usage: {} [wheel]'.format(sys.argv[0]))
sys.exit(1)
VCOMP140_PATH = "C:\\Windows\\System32\\vcomp140.dll"
vcomp140_path = 'C:\\Windows\\System32\\vcomp140.dll'
for wheel_path in sorted(glob.glob(sys.argv[1])):
m = re.search(r'xgboost-(.*)-py3', wheel_path)
assert m, f'wheel_path = {wheel_path}'
version = m.group(1)
def get_sha256sum(path):
return (
base64.urlsafe_b64encode(hashlib.sha256(open(path, "rb").read()).digest())
.decode("latin1")
.rstrip("=")
)
print(f"Inserting vcomp140.dll into {wheel_path}...")
with zipfile.ZipFile(wheel_path, 'a') as f:
f.write(vcomp140_path, 'xgboost-{}.data/data/xgboost/vcomp140.dll'.format(version))
def update_record(*, wheel_content_dir, xgboost_version):
vcomp140_size = os.path.getsize(VCOMP140_PATH)
vcomp140_hash = get_sha256sum(VCOMP140_PATH)
record_path = wheel_content_dir / pathlib.Path(
f"xgboost-{xgboost_version}.dist-info/RECORD"
)
with open(record_path, "r") as f:
record_content = f.read()
record_content += f"xgboost-{xgboost_version}.data/data/xgboost/vcomp140.dll,"
record_content += f"sha256={vcomp140_hash},{vcomp140_size}\n"
with open(record_path, "w") as f:
f.write(record_content)
def main(args):
candidates = list(sorted(glob.glob(args.wheel_path)))
for wheel_path in candidates:
print(f"Processing wheel {wheel_path}")
m = re.search(r"xgboost-(.*)\+.*-py3", wheel_path)
if not m:
raise ValueError(f"Wheel {wheel_path} has unexpected name")
version = m.group(1)
print(f" Detected version for {wheel_path}: {version}")
print(f" Inserting vcomp140.dll into {wheel_path}...")
with tempfile.TemporaryDirectory() as tempdir:
wheel_content_dir = pathlib.Path(tempdir) / "wheel_content"
print(f" Extract {wheel_path} into {wheel_content_dir}")
shutil.unpack_archive(
wheel_path, extract_dir=wheel_content_dir, format="zip"
)
data_dir = wheel_content_dir / pathlib.Path(
f"xgboost-{version}.data/data/xgboost"
)
data_dir.mkdir(parents=True, exist_ok=True)
print(f" Copy {VCOMP140_PATH} -> {data_dir}")
shutil.copy(VCOMP140_PATH, data_dir)
print(f" Update RECORD")
update_record(wheel_content_dir=wheel_content_dir, xgboost_version=version)
print(f" Content of {wheel_content_dir}:")
for e in sorted(wheel_content_dir.rglob("*")):
if e.is_file():
r = e.relative_to(wheel_content_dir)
print(f" {r}")
print(f" Create new wheel...")
new_wheel_tmp_path = pathlib.Path(tempdir) / "new_wheel"
shutil.make_archive(
str(new_wheel_tmp_path.resolve()),
format="zip",
root_dir=wheel_content_dir,
)
new_wheel_tmp_path = new_wheel_tmp_path.resolve().with_suffix(".zip")
new_wheel_tmp_path = new_wheel_tmp_path.rename(
new_wheel_tmp_path.with_suffix(".whl")
)
print(f" Created new wheel {new_wheel_tmp_path}")
# Rename the old wheel with suffix .bak
# The new wheel takes the name of the old wheel
wheel_path_obj = pathlib.Path(wheel_path).resolve()
backup_path = wheel_path_obj.with_suffix(".whl.bak")
print(f" Rename {wheel_path_obj} -> {backup_path}")
wheel_path_obj.replace(backup_path)
print(f" Rename {new_wheel_tmp_path} -> {wheel_path_obj}")
new_wheel_tmp_path.replace(wheel_path_obj)
shutil.rmtree(wheel_content_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"wheel_path", type=str, help="Path to wheel (wildcard permitted)"
)
args = parser.parse_args()
main(args)

View File

@@ -6,6 +6,9 @@
#include <string>
#include "../../../src/tree/constraints.h"
#include "../../../src/tree/hist/evaluate_splits.h"
#include "../../../src/tree/hist/expand_entry.h"
#include "../helpers.h"
namespace xgboost {
namespace tree {
@@ -56,5 +59,38 @@ TEST(CPUFeatureInteractionConstraint, Basic) {
ASSERT_FALSE(constraints.Query(1, 5));
}
TEST(CPUMonoConstraint, Basic) {
std::size_t kRows{64}, kCols{16};
Context ctx;
TrainParam param;
std::vector<std::int32_t> mono(kCols, 1);
I32Array arr;
for (std::size_t i = 0; i < kCols; ++i) {
arr.GetArray().push_back(mono[i]);
}
Json jarr{std::move(arr)};
std::string str_mono;
Json::Dump(jarr, &str_mono);
str_mono.front() = '(';
str_mono.back() = ')';
param.UpdateAllowUnknown(Args{{"monotone_constraints", str_mono}});
auto Xy = RandomDataGenerator{kRows, kCols, 0.0}.GenerateDMatrix(true);
auto sampler = std::make_shared<common::ColumnSampler>();
HistEvaluator<CPUExpandEntry> evalutor{param, Xy->Info(), ctx.Threads(), sampler};
evalutor.InitRoot(GradStats{2.0, 2.0});
SplitEntry split;
split.Update(1.0f, 0, 3.0, false, false, GradStats{1.0, 1.0}, GradStats{1.0, 1.0});
CPUExpandEntry entry{0, 0, split};
RegTree tree;
tree.param.UpdateAllowUnknown(Args{{"num_feature", std::to_string(kCols)}});
evalutor.ApplyTreeSplit(entry, &tree);
ASSERT_TRUE(evalutor.Evaluator().has_constraint);
}
} // namespace tree
} // namespace xgboost

View File

@@ -216,6 +216,7 @@ class TestGPUPredict:
def test_inplace_predict_cupy(self):
self.run_inplace_predict_cupy(0)
@pytest.mark.xfail
@pytest.mark.skipif(**tm.no_cupy())
@pytest.mark.mgpu
def test_inplace_predict_cupy_specified_device(self):

View File

@@ -215,3 +215,34 @@ class TestGPUUpdaters:
@pytest.mark.parametrize("weighted", [True, False])
def test_adaptive(self, weighted) -> None:
self.cputest.run_adaptive("gpu_hist", weighted)
@pytest.mark.skipif(**tm.no_pandas())
def test_issue8824(self):
# column sampling by node crashes because shared pointers go out of scope
import pandas as pd
data = pd.DataFrame(np.random.rand(1024, 8))
data.columns = "x" + data.columns.astype(str)
features = data.columns
data["y"] = data.sum(axis=1) < 4
dtrain = xgb.DMatrix(data[features], label=data["y"])
model = xgb.train(
dtrain=dtrain,
params={
"max_depth": 5,
"learning_rate": 0.05,
"objective": "binary:logistic",
"tree_method": "gpu_hist",
"colsample_bytree": 0.5,
"colsample_bylevel": 0.5,
"colsample_bynode": 0.5, # Causes issues
"reg_alpha": 0.05,
"reg_lambda": 0.005,
"seed": 66,
"subsample": 0.5,
"gamma": 0.2,
"predictor": "auto",
"eval_metric": "auc",
},
num_boost_round=150,
)

View File

@@ -578,7 +578,7 @@ class TestModels:
y = rng.randn(rows)
feature_names = ["test_feature_" + str(i) for i in range(cols)]
X_pd = pd.DataFrame(X, columns=feature_names)
X_pd.iloc[:, 3] = X_pd.iloc[:, 3].astype(np.int32)
X_pd[f"test_feature_{3}"] = X_pd.iloc[:, 3].astype(np.int32)
Xy = xgb.DMatrix(X_pd, y)
assert Xy.feature_types[3] == "int"

View File

@@ -75,7 +75,10 @@ class TestPandas:
np.testing.assert_array_equal(result, exp)
dm = xgb.DMatrix(dummies)
assert dm.feature_names == ['B', 'A_X', 'A_Y', 'A_Z']
assert dm.feature_types == ['int', 'int', 'int', 'int']
if int(pd.__version__[0]) >= 2:
assert dm.feature_types == ['int', 'i', 'i', 'i']
else:
assert dm.feature_types == ['int', 'int', 'int', 'int']
assert dm.num_row() == 3
assert dm.num_col() == 4