diff --git a/appveyor.yml b/appveyor.yml index 3f3405e64..0f1008568 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -54,6 +54,8 @@ install: Bootstrap $DEPS = "c('data.table','magrittr','stringi','ggplot2','DiagrammeR','Ckmeans.1d.dp','vcd','testthat','lintr','igraph','knitr','rmarkdown')" cmd.exe /c "R.exe -q -e ""install.packages($DEPS, repos='$CRAN', type='both')"" 2>&1" + $BINARY_DEPS = "c('XML')" + cmd.exe /c "R.exe -q -e ""install.packages($BINARY_DEPS, repos='$CRAN', type='win.binary')"" 2>&1" } build_script: diff --git a/doc/build.md b/doc/build.md index 28ade8639..f61a2fb2b 100644 --- a/doc/build.md +++ b/doc/build.md @@ -167,7 +167,12 @@ Other versions of Visual Studio may work but are untested. ### Building with GPU support -XGBoost can be built with GPU support for both Linux and Windows using cmake. GPU support works with the Python package as well as the CLI version. See [Installing R package with GPU support](#installing-r-package-with-gpu-support) for special instructions for R. +Linux users may simply install prebuilt python binaries: +```bash +pip install xgboost +``` + +XGBoost can be built from source with GPU support for both Linux and Windows using cmake. GPU support works with the Python package as well as the CLI version. See [Installing R package with GPU support](#installing-r-package-with-gpu-support) for special instructions for R. An up-to-date version of the CUDA toolkit is required. diff --git a/src/c_api/c_api.cc b/src/c_api/c_api.cc index 988f43225..c4c293846 100644 --- a/src/c_api/c_api.cc +++ b/src/c_api/c_api.cc @@ -194,9 +194,9 @@ struct XGBAPIThreadLocalEntry { /*! \brief result holder for returning string pointers */ std::vector ret_vec_charp; /*! \brief returning float vector. */ - HostDeviceVector ret_vec_float; + std::vector ret_vec_float; /*! \brief temp variable of gradient pairs. */ - HostDeviceVector tmp_gpair; + std::vector tmp_gpair; }; // define the threadlocal store. @@ -861,7 +861,7 @@ XGB_DLL int XGBoosterBoostOneIter(BoosterHandle handle, bst_float *grad, bst_float *hess, xgboost::bst_ulong len) { - HostDeviceVector& tmp_gpair = XGBAPIThreadLocalStore::Get()->tmp_gpair; + HostDeviceVector tmp_gpair; API_BEGIN(); CHECK_HANDLE(); auto* bst = static_cast(handle); @@ -908,22 +908,24 @@ XGB_DLL int XGBoosterPredict(BoosterHandle handle, unsigned ntree_limit, xgboost::bst_ulong *len, const bst_float **out_result) { - HostDeviceVector& preds = + std::vector&preds = XGBAPIThreadLocalStore::Get()->ret_vec_float; API_BEGIN(); CHECK_HANDLE(); auto *bst = static_cast(handle); bst->LazyInit(); + HostDeviceVector tmp_preds; bst->learner()->Predict( static_cast*>(dmat)->get(), (option_mask & 1) != 0, - &preds, ntree_limit, + &tmp_preds, ntree_limit, (option_mask & 2) != 0, (option_mask & 4) != 0, (option_mask & 8) != 0, (option_mask & 16) != 0); - *out_result = dmlc::BeginPtr(preds.HostVector()); - *len = static_cast(preds.Size()); + preds = tmp_preds.HostVector(); + *out_result = dmlc::BeginPtr(preds); + *len = static_cast(preds.size()); API_END(); } diff --git a/src/common/column_matrix.h b/src/common/column_matrix.h index c8363054c..47e6d3310 100644 --- a/src/common/column_matrix.h +++ b/src/common/column_matrix.h @@ -122,6 +122,7 @@ class ColumnMatrix { boundary_[fid].row_ind_begin = accum_row_ind_; if (type_[fid] == kDenseColumn) { accum_index_ += static_cast(nrow); + accum_row_ind_ += static_cast(nrow); } else { accum_index_ += feature_counts_[fid]; accum_row_ind_ += feature_counts_[fid]; diff --git a/tests/ci_build/test_gpu.sh b/tests/ci_build/test_gpu.sh index ee8c98109..dbef1bdde 100755 --- a/tests/ci_build/test_gpu.sh +++ b/tests/ci_build/test_gpu.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +set -e cd python-package python setup.py install --user diff --git a/tests/python-gpu/test_gpu_linear.py b/tests/python-gpu/test_gpu_linear.py index ad727afeb..a56230463 100644 --- a/tests/python-gpu/test_gpu_linear.py +++ b/tests/python-gpu/test_gpu_linear.py @@ -9,6 +9,10 @@ import unittest class TestGPULinear(unittest.TestCase): def test_gpu_coordinate(self): tm._skip_if_no_sklearn() - variable_param = {'alpha': [.005, .1], 'lambda': [0.005], - 'coordinate_selection': ['cyclic', 'random', 'greedy'], 'n_gpus': [-1, 1]} - test_linear.assert_updater_accuracy('gpu_coord_descent', variable_param) + variable_param = {'booster': ['gblinear'], 'updater': ['coord_descent'], 'eta': [0.5], + 'top_k': [10], 'tolerance': [1e-5], 'nthread': [2], 'alpha': [.005, .1], 'lambda': [0.005], + 'coordinate_selection': ['cyclic', 'random', 'greedy'], 'n_gpus': [-1]} + for param in test_linear.parameter_combinations(variable_param): + results = test_linear.run_suite(param, 200, None, scale_features=True) + test_linear.assert_regression_result(results, 1e-2) + test_linear.assert_classification_result(results) diff --git a/tests/python-gpu/test_gpu_updaters.py b/tests/python-gpu/test_gpu_updaters.py index 670a223a0..df0fa9958 100644 --- a/tests/python-gpu/test_gpu_updaters.py +++ b/tests/python-gpu/test_gpu_updaters.py @@ -29,9 +29,9 @@ class TestGPU(unittest.TestCase): assert_gpu_results(cpu_results, gpu_results) def test_gpu_hist(self): - variable_param = {'n_gpus': [1, -1], 'max_depth': [2, 6], 'max_leaves': [255, 4], - 'max_bin': [2, 16, 1024], - 'grow_policy': ['depthwise', 'lossguide']} + variable_param = {'n_gpus': [-1], 'max_depth': [2, 10], 'max_leaves': [255, 4], + 'max_bin': [2, 256], + 'grow_policy': ['lossguide']} for param in parameter_combinations(variable_param): param['tree_method'] = 'gpu_hist' gpu_results = run_suite(param, select_datasets=datasets)