Updates for GPU CI tests (#3467)
* Fail GPU CI after test failure * Fix GPU linear tests * Reduced number of GPU tests to speed up CI * Remove static allocations of device memory * Resolve illegal memory access for updater_fast_hist.cc * Fix broken r tests dependency * Update python install documentation for GPU
This commit is contained in:
parent
a13e29ece1
commit
1b59316444
@ -54,6 +54,8 @@ install:
|
||||
Bootstrap
|
||||
$DEPS = "c('data.table','magrittr','stringi','ggplot2','DiagrammeR','Ckmeans.1d.dp','vcd','testthat','lintr','igraph','knitr','rmarkdown')"
|
||||
cmd.exe /c "R.exe -q -e ""install.packages($DEPS, repos='$CRAN', type='both')"" 2>&1"
|
||||
$BINARY_DEPS = "c('XML')"
|
||||
cmd.exe /c "R.exe -q -e ""install.packages($BINARY_DEPS, repos='$CRAN', type='win.binary')"" 2>&1"
|
||||
}
|
||||
|
||||
build_script:
|
||||
|
||||
@ -167,7 +167,12 @@ Other versions of Visual Studio may work but are untested.
|
||||
|
||||
### Building with GPU support
|
||||
|
||||
XGBoost can be built with GPU support for both Linux and Windows using cmake. GPU support works with the Python package as well as the CLI version. See [Installing R package with GPU support](#installing-r-package-with-gpu-support) for special instructions for R.
|
||||
Linux users may simply install prebuilt python binaries:
|
||||
```bash
|
||||
pip install xgboost
|
||||
```
|
||||
|
||||
XGBoost can be built from source with GPU support for both Linux and Windows using cmake. GPU support works with the Python package as well as the CLI version. See [Installing R package with GPU support](#installing-r-package-with-gpu-support) for special instructions for R.
|
||||
|
||||
An up-to-date version of the CUDA toolkit is required.
|
||||
|
||||
|
||||
@ -194,9 +194,9 @@ struct XGBAPIThreadLocalEntry {
|
||||
/*! \brief result holder for returning string pointers */
|
||||
std::vector<const char *> ret_vec_charp;
|
||||
/*! \brief returning float vector. */
|
||||
HostDeviceVector<bst_float> ret_vec_float;
|
||||
std::vector<bst_float> ret_vec_float;
|
||||
/*! \brief temp variable of gradient pairs. */
|
||||
HostDeviceVector<GradientPair> tmp_gpair;
|
||||
std::vector<GradientPair> tmp_gpair;
|
||||
};
|
||||
|
||||
// define the threadlocal store.
|
||||
@ -861,7 +861,7 @@ XGB_DLL int XGBoosterBoostOneIter(BoosterHandle handle,
|
||||
bst_float *grad,
|
||||
bst_float *hess,
|
||||
xgboost::bst_ulong len) {
|
||||
HostDeviceVector<GradientPair>& tmp_gpair = XGBAPIThreadLocalStore::Get()->tmp_gpair;
|
||||
HostDeviceVector<GradientPair> tmp_gpair;
|
||||
API_BEGIN();
|
||||
CHECK_HANDLE();
|
||||
auto* bst = static_cast<Booster*>(handle);
|
||||
@ -908,22 +908,24 @@ XGB_DLL int XGBoosterPredict(BoosterHandle handle,
|
||||
unsigned ntree_limit,
|
||||
xgboost::bst_ulong *len,
|
||||
const bst_float **out_result) {
|
||||
HostDeviceVector<bst_float>& preds =
|
||||
std::vector<bst_float>&preds =
|
||||
XGBAPIThreadLocalStore::Get()->ret_vec_float;
|
||||
API_BEGIN();
|
||||
CHECK_HANDLE();
|
||||
auto *bst = static_cast<Booster*>(handle);
|
||||
bst->LazyInit();
|
||||
HostDeviceVector<bst_float> tmp_preds;
|
||||
bst->learner()->Predict(
|
||||
static_cast<std::shared_ptr<DMatrix>*>(dmat)->get(),
|
||||
(option_mask & 1) != 0,
|
||||
&preds, ntree_limit,
|
||||
&tmp_preds, ntree_limit,
|
||||
(option_mask & 2) != 0,
|
||||
(option_mask & 4) != 0,
|
||||
(option_mask & 8) != 0,
|
||||
(option_mask & 16) != 0);
|
||||
*out_result = dmlc::BeginPtr(preds.HostVector());
|
||||
*len = static_cast<xgboost::bst_ulong>(preds.Size());
|
||||
preds = tmp_preds.HostVector();
|
||||
*out_result = dmlc::BeginPtr(preds);
|
||||
*len = static_cast<xgboost::bst_ulong>(preds.size());
|
||||
API_END();
|
||||
}
|
||||
|
||||
|
||||
@ -122,6 +122,7 @@ class ColumnMatrix {
|
||||
boundary_[fid].row_ind_begin = accum_row_ind_;
|
||||
if (type_[fid] == kDenseColumn) {
|
||||
accum_index_ += static_cast<size_t>(nrow);
|
||||
accum_row_ind_ += static_cast<size_t>(nrow);
|
||||
} else {
|
||||
accum_index_ += feature_counts_[fid];
|
||||
accum_row_ind_ += feature_counts_[fid];
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
cd python-package
|
||||
python setup.py install --user
|
||||
|
||||
@ -9,6 +9,10 @@ import unittest
|
||||
class TestGPULinear(unittest.TestCase):
|
||||
def test_gpu_coordinate(self):
|
||||
tm._skip_if_no_sklearn()
|
||||
variable_param = {'alpha': [.005, .1], 'lambda': [0.005],
|
||||
'coordinate_selection': ['cyclic', 'random', 'greedy'], 'n_gpus': [-1, 1]}
|
||||
test_linear.assert_updater_accuracy('gpu_coord_descent', variable_param)
|
||||
variable_param = {'booster': ['gblinear'], 'updater': ['coord_descent'], 'eta': [0.5],
|
||||
'top_k': [10], 'tolerance': [1e-5], 'nthread': [2], 'alpha': [.005, .1], 'lambda': [0.005],
|
||||
'coordinate_selection': ['cyclic', 'random', 'greedy'], 'n_gpus': [-1]}
|
||||
for param in test_linear.parameter_combinations(variable_param):
|
||||
results = test_linear.run_suite(param, 200, None, scale_features=True)
|
||||
test_linear.assert_regression_result(results, 1e-2)
|
||||
test_linear.assert_classification_result(results)
|
||||
|
||||
@ -29,9 +29,9 @@ class TestGPU(unittest.TestCase):
|
||||
assert_gpu_results(cpu_results, gpu_results)
|
||||
|
||||
def test_gpu_hist(self):
|
||||
variable_param = {'n_gpus': [1, -1], 'max_depth': [2, 6], 'max_leaves': [255, 4],
|
||||
'max_bin': [2, 16, 1024],
|
||||
'grow_policy': ['depthwise', 'lossguide']}
|
||||
variable_param = {'n_gpus': [-1], 'max_depth': [2, 10], 'max_leaves': [255, 4],
|
||||
'max_bin': [2, 256],
|
||||
'grow_policy': ['lossguide']}
|
||||
for param in parameter_combinations(variable_param):
|
||||
param['tree_method'] = 'gpu_hist'
|
||||
gpu_results = run_suite(param, select_datasets=datasets)
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user