Mark CUDA 10.1 as unsupported. (#4265)

This commit is contained in:
Jiaming Yuan 2019-03-17 16:59:15 +08:00 committed by GitHub
parent fdcae024e7
commit cf8d5b9b76
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 9 additions and 7 deletions

View File

@ -28,7 +28,7 @@ This page gives instructions on how to build and install XGBoost from scratch on
.. note:: Use of Git submodules
XGBoost uses Git submodules to manage dependencies. So when you clone the repo, remember to specify ``--recursive`` option:
.. code-block:: bash
git clone --recursive https://github.com/dmlc/xgboost
@ -185,7 +185,9 @@ Building with GPU support
=========================
XGBoost can be built with GPU support for both Linux and Windows using CMake. GPU support works with the Python package as well as the CLI version. See `Installing R package with GPU support`_ for special instructions for R.
An up-to-date version of the CUDA toolkit is required.
An up-to-date version of the CUDA toolkit is required. Please note that we
skipped the support for compiling XGBoost with NVCC 10.1 due a small bug in its
spliter, see `#4264 <https://github.com/dmlc/xgboost/issues/4264>`_.
From the command line on Linux starting from the XGBoost directory:
@ -448,4 +450,3 @@ Trouble Shooting
.. code-block:: bash
git clone https://github.com/dmlc/xgboost --recursive

View File

@ -26,8 +26,9 @@
#include "../common/io.h"
#endif
// Uncomment to enable
#define TIMERS
#if __CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ == 1
#error "CUDA 10.1 is not supported, see #4264."
#endif
namespace dh {
@ -893,14 +894,14 @@ class AllReducer {
int nccl_nranks = std::accumulate(device_counts.begin(),
device_counts.end(), 0);
nccl_rank += nccl_rank_offset;
GroupStart();
for (size_t i = 0; i < device_ordinals.size(); i++) {
int dev = device_ordinals.at(i);
dh::safe_cuda(cudaSetDevice(dev));
dh::safe_nccl(ncclCommInitRank(
&comms.at(i),
nccl_nranks, id,
nccl_nranks, id,
nccl_rank));
nccl_rank++;