Fix logic in GPU predictor cache lookup (#3217)

* Fix logic in GPU predictor cache lookup

* Add sklearn test for GPU prediction
This commit is contained in:
Rory Mitchell 2018-04-04 15:08:22 +12:00 committed by GitHub
parent a1ec7b1716
commit 443ff746e9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 29 additions and 1 deletions

View File

@ -267,7 +267,7 @@ class GPUPredictor : public xgboost::Predictor {
std::shared_ptr<DeviceMatrix> device_matrix;
// Matrix is not in host cache, create a temporary matrix
if (this->cache_.find(dmat) != this->cache_.end()) {
if (this->cache_.find(dmat) == this->cache_.end()) {
device_matrix = std::shared_ptr<DeviceMatrix>(
new DeviceMatrix(dmat, param.gpu_id, param.silent));
} else {

View File

@ -72,3 +72,31 @@ class TestGPUPredict(unittest.TestCase):
assert np.allclose(predict0, predict1)
assert np.allclose(predict0, cpu_predict)
def test_sklearn(self):
m, n = 15000, 14
tr_size = 2500
X = np.random.rand(m, n)
y = 200 * np.matmul(X, np.arange(-3, -3 + n))
X_train, y_train = X[:tr_size, :], y[:tr_size]
X_test, y_test = X[tr_size:, :], y[tr_size:]
# First with cpu_predictor
params = {'tree_method': 'gpu_hist',
'predictor': 'cpu_predictor',
'n_jobs': -1,
'seed': 123
}
m = xgb.XGBRegressor(**params).fit(X_train, y_train)
cpu_train_score = m.score(X_train, y_train)
cpu_test_score = m.score(X_test, y_test)
# Now with gpu_predictor
params['predictor'] = 'gpu_predictor'
m = xgb.XGBRegressor(**params).fit(X_train, y_train)
gpu_train_score = m.score(X_train, y_train)
gpu_test_score = m.score(X_test, y_test)
assert np.allclose(cpu_train_score, gpu_train_score)
assert np.allclose(cpu_test_score, gpu_test_score)