Added finding quantiles on GPU. (#3393)

* Added finding quantiles on GPU.

- this includes datasets where weights are assigned to data rows
- as the quantiles found by the new algorithm are not the same
  as those found by the old one, test thresholds in
    tests/python-gpu/test_gpu_updaters.py have been adjusted.

* Adjustments and improved testing for finding quantiles on the GPU.

- added C++ tests for the DeviceSketch() function
- reduced one of the thresholds in test_gpu_updaters.py
- adjusted the cuts found by the find_cuts_k kernel
This commit is contained in:
Andy Adinets
2018-07-27 04:03:16 +02:00
committed by Rory Mitchell
parent e2f09db77a
commit cc6a5a3666
14 changed files with 691 additions and 116 deletions

View File

@@ -0,0 +1,60 @@
#include "../../../src/common/device_helpers.cuh"
#include "../../../src/common/hist_util.h"
#include "gtest/gtest.h"
#include "xgboost/c_api.h"
#include <algorithm>
#include <cmath>
#include <thrust/device_vector.h>
#include <thrust/iterator/counting_iterator.h>
namespace xgboost {
namespace common {
TEST(gpu_hist_util, TestDeviceSketch) {
// create the data
int nrows = 10001;
std::vector<float> test_data(nrows);
auto count_iter = thrust::make_counting_iterator(0);
// fill in reverse order
std::copy(count_iter, count_iter + nrows, test_data.rbegin());
// create the DMatrix
DMatrixHandle dmat_handle;
XGDMatrixCreateFromMat(test_data.data(), nrows, 1, -1,
&dmat_handle);
auto dmat = *static_cast<std::shared_ptr<xgboost::DMatrix> *>(dmat_handle);
// parameters for finding quantiles
tree::TrainParam p;
p.max_bin = 20;
p.gpu_id = 0;
p.n_gpus = 1;
// ensure that the exact quantiles are found
p.gpu_batch_nrows = nrows * 10;
// find quantiles on the CPU
HistCutMatrix hmat_cpu;
hmat_cpu.Init(dmat.get(), p.max_bin);
// find the cuts on the GPU
dmlc::DataIter<SparsePage>* iter = dmat->RowIterator();
iter->BeforeFirst();
CHECK(iter->Next());
const SparsePage& batch = iter->Value();
HistCutMatrix hmat_gpu;
DeviceSketch(batch, dmat->Info(), p, &hmat_gpu);
CHECK(!iter->Next());
// compare the cuts
double eps = 1e-2;
ASSERT_EQ(hmat_gpu.min_val.size(), 1);
ASSERT_EQ(hmat_gpu.row_ptr.size(), 2);
ASSERT_EQ(hmat_gpu.cut.size(), hmat_cpu.cut.size());
ASSERT_LT(fabs(hmat_cpu.min_val[0] - hmat_gpu.min_val[0]), eps * nrows);
for (int i = 0; i < hmat_gpu.cut.size(); ++i) {
ASSERT_LT(fabs(hmat_cpu.cut[i] - hmat_gpu.cut[i]), eps * nrows);
}
}
} // namespace common
} // namespace xgboost

View File

@@ -30,8 +30,9 @@ TEST(gpu_hist_experimental, TestSparseShard) {
iter->BeforeFirst();
CHECK(iter->Next());
const SparsePage& batch = iter->Value();
DeviceShard shard(0, 0, 0, rows, hmat.row_ptr.back(), p);
shard.Init(hmat, batch);
DeviceShard shard(0, 0, 0, rows, p);
shard.InitRowPtrs(batch);
shard.InitCompressedData(hmat, batch);
CHECK(!iter->Next());
ASSERT_LT(shard.row_stride, columns);
@@ -72,8 +73,9 @@ TEST(gpu_hist_experimental, TestDenseShard) {
CHECK(iter->Next());
const SparsePage& batch = iter->Value();
DeviceShard shard(0, 0, 0, rows, hmat.row_ptr.back(), p);
shard.Init(hmat, batch);
DeviceShard shard(0, 0, 0, rows, p);
shard.InitRowPtrs(batch);
shard.InitCompressedData(hmat, batch);
CHECK(!iter->Next());
ASSERT_EQ(shard.row_stride, columns);

View File

@@ -7,12 +7,26 @@ import unittest
class TestGPULinear(unittest.TestCase):
datasets = ["Boston", "Digits", "Cancer", "Sparse regression",
"Boston External Memory"]
def test_gpu_coordinate(self):
tm._skip_if_no_sklearn()
variable_param = {'booster': ['gblinear'], 'updater': ['coord_descent'], 'eta': [0.5],
'top_k': [10], 'tolerance': [1e-5], 'nthread': [2], 'alpha': [.005, .1], 'lambda': [0.005],
'coordinate_selection': ['cyclic', 'random', 'greedy'], 'n_gpus': [-1]}
variable_param = {
'booster': ['gblinear'],
'updater': ['coord_descent'],
'eta': [0.5],
'top_k': [10],
'tolerance': [1e-5],
'nthread': [2],
'alpha': [.005, .1],
'lambda': [0.005],
'coordinate_selection': ['cyclic', 'random', 'greedy'],
'n_gpus': [-1]
}
for param in test_linear.parameter_combinations(variable_param):
results = test_linear.run_suite(param, 200, None, scale_features=True)
results = test_linear.run_suite(
param, 200, self.datasets, scale_features=True)
test_linear.assert_regression_result(results, 1e-2)
test_linear.assert_classification_result(results)

View File

@@ -11,11 +11,10 @@ from regression_test_utilities import run_suite, parameter_combinations, \
def assert_gpu_results(cpu_results, gpu_results):
for cpu_res, gpu_res in zip(cpu_results, gpu_results):
# Check final eval result roughly equivalent
assert np.allclose(cpu_res["eval"][-1], gpu_res["eval"][-1], 1e-3, 1e-2)
datasets = ["Boston", "Cancer", "Digits", "Sparse regression"]
assert np.allclose(cpu_res["eval"][-1], gpu_res["eval"][-1], 1e-2, 1e-2)
datasets = ["Boston", "Cancer", "Digits", "Sparse regression",
"Sparse regression with weights"]
class TestGPU(unittest.TestCase):
def test_gpu_exact(self):

View File

@@ -15,11 +15,16 @@ except ImportError:
class Dataset:
def __init__(self, name, get_dataset, objective, metric, use_external_memory=False):
def __init__(self, name, get_dataset, objective, metric,
has_weights=False, use_external_memory=False):
self.name = name
self.objective = objective
self.metric = metric
self.X, self.y = get_dataset()
if has_weights:
self.X, self.y, self.w = get_dataset()
else:
self.X, self.y = get_dataset()
self.w = None
self.use_external_memory = use_external_memory
@@ -49,6 +54,16 @@ def get_sparse():
return X, y
def get_sparse_weights():
rng = np.random.RandomState(199)
n = 10000
sparsity = 0.25
X, y = datasets.make_regression(n, random_state=rng)
X = np.array([[np.nan if rng.uniform(0, 1) < sparsity else x for x in x_row] for x_row in X])
w = np.array([rng.uniform(1, 10) for i in range(n)])
return X, y, w
def train_dataset(dataset, param_in, num_rounds=10, scale_features=False):
param = param_in.copy()
param["objective"] = dataset.objective
@@ -64,9 +79,10 @@ def train_dataset(dataset, param_in, num_rounds=10, scale_features=False):
if dataset.use_external_memory:
np.savetxt('tmptmp_1234.csv', np.hstack((dataset.y.reshape(len(dataset.y), 1), X)),
delimiter=',')
dtrain = xgb.DMatrix('tmptmp_1234.csv?format=csv&label_column=0#tmptmp_')
dtrain = xgb.DMatrix('tmptmp_1234.csv?format=csv&label_column=0#tmptmp_',
weight=dataset.w)
else:
dtrain = xgb.DMatrix(X, dataset.y)
dtrain = xgb.DMatrix(X, dataset.y, weight=dataset.w)
print("Training on dataset: " + dataset.name, file=sys.stderr)
print("Using parameters: " + str(param), file=sys.stderr)
@@ -112,6 +128,8 @@ def run_suite(param, num_rounds=10, select_datasets=None, scale_features=False):
Dataset("Digits", get_digits, "multi:softmax", "merror"),
Dataset("Cancer", get_cancer, "binary:logistic", "error"),
Dataset("Sparse regression", get_sparse, "reg:linear", "rmse"),
Dataset("Sparse regression with weights", get_sparse_weights,
"reg:linear", "rmse", has_weights=True),
Dataset("Boston External Memory", get_boston, "reg:linear", "rmse",
use_external_memory=True)
]

View File

@@ -52,6 +52,10 @@ def assert_classification_result(results):
class TestLinear(unittest.TestCase):
datasets = ["Boston", "Digits", "Cancer", "Sparse regression",
"Boston External Memory"]
def test_coordinate(self):
tm._skip_if_no_sklearn()
variable_param = {'booster': ['gblinear'], 'updater': ['coord_descent'], 'eta': [0.5],
@@ -60,7 +64,7 @@ class TestLinear(unittest.TestCase):
'feature_selector': ['cyclic', 'shuffle', 'greedy', 'thrifty']
}
for param in parameter_combinations(variable_param):
results = run_suite(param, 200, None, scale_features=True)
results = run_suite(param, 200, self.datasets, scale_features=True)
assert_regression_result(results, 1e-2)
assert_classification_result(results)
@@ -72,6 +76,6 @@ class TestLinear(unittest.TestCase):
'feature_selector': ['cyclic', 'shuffle']
}
for param in parameter_combinations(variable_param):
results = run_suite(param, 200, None, True)
results = run_suite(param, 200, self.datasets, True)
assert_regression_result(results, 1e-2)
assert_classification_result(results)