Update GPUTreeshap (#6163)

* Reduce shap test duration

* Test interoperability with shap package

* Add feature interactions

* Update GPUTreeShap
This commit is contained in:
Rory Mitchell
2020-09-28 09:43:47 +13:00
committed by GitHub
parent 434a3f35a3
commit dda9e1e487
14 changed files with 176 additions and 87 deletions

View File

@@ -29,6 +29,7 @@ dependencies:
- boto3
- awscli
- pip:
- shap
- guzzle_sphinx_theme
- datatable
- modin[all]

View File

@@ -53,24 +53,28 @@ TEST(CpuPredictor, Basic) {
}
// Test predict contribution
std::vector<float> out_contribution;
cpu_predictor->PredictContribution(dmat.get(), &out_contribution, model);
HostDeviceVector<float> out_contribution_hdv;
auto& out_contribution = out_contribution_hdv.HostVector();
cpu_predictor->PredictContribution(dmat.get(), &out_contribution_hdv, model);
ASSERT_EQ(out_contribution.size(), kRows * (kCols + 1));
for (size_t i = 0; i < out_contribution.size(); ++i) {
auto const& contri = out_contribution[i];
// shift 1 for bias, as test tree is a decision dump, only global bias is filled with LeafValue().
if ((i+1) % (kCols+1) == 0) {
// shift 1 for bias, as test tree is a decision dump, only global bias is
// filled with LeafValue().
if ((i + 1) % (kCols + 1) == 0) {
ASSERT_EQ(out_contribution.back(), 1.5f);
} else {
ASSERT_EQ(contri, 0);
}
}
// Test predict contribution (approximate method)
cpu_predictor->PredictContribution(dmat.get(), &out_contribution, model, 0, nullptr, true);
cpu_predictor->PredictContribution(dmat.get(), &out_contribution_hdv, model,
0, nullptr, true);
for (size_t i = 0; i < out_contribution.size(); ++i) {
auto const& contri = out_contribution[i];
// shift 1 for bias, as test tree is a decision dump, only global bias is filled with LeafValue().
if ((i+1) % (kCols+1) == 0) {
// shift 1 for bias, as test tree is a decision dump, only global bias is
// filled with LeafValue().
if ((i + 1) % (kCols + 1) == 0) {
ASSERT_EQ(out_contribution.back(), 1.5f);
} else {
ASSERT_EQ(contri, 0);
@@ -112,8 +116,9 @@ TEST(CpuPredictor, ExternalMemory) {
}
// Test predict contribution
std::vector<float> out_contribution;
cpu_predictor->PredictContribution(dmat.get(), &out_contribution, model);
HostDeviceVector<float> out_contribution_hdv;
auto& out_contribution = out_contribution_hdv.HostVector();
cpu_predictor->PredictContribution(dmat.get(), &out_contribution_hdv, model);
ASSERT_EQ(out_contribution.size(), dmat->Info().num_row_ * (dmat->Info().num_col_ + 1));
for (size_t i = 0; i < out_contribution.size(); ++i) {
auto const& contri = out_contribution[i];
@@ -126,8 +131,10 @@ TEST(CpuPredictor, ExternalMemory) {
}
// Test predict contribution (approximate method)
std::vector<float> out_contribution_approximate;
cpu_predictor->PredictContribution(dmat.get(), &out_contribution_approximate, model, 0, nullptr, true);
HostDeviceVector<float> out_contribution_approximate_hdv;
auto& out_contribution_approximate = out_contribution_approximate_hdv.HostVector();
cpu_predictor->PredictContribution(
dmat.get(), &out_contribution_approximate_hdv, model, 0, nullptr, true);
ASSERT_EQ(out_contribution_approximate.size(),
dmat->Info().num_row_ * (dmat->Info().num_col_ + 1));
for (size_t i = 0; i < out_contribution.size(); ++i) {

View File

@@ -176,12 +176,13 @@ TEST(GPUPredictor, ShapStump) {
model.CommitModel(std::move(trees), 0);
auto gpu_lparam = CreateEmptyGenericParam(0);
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &gpu_lparam));
std::unique_ptr<Predictor> gpu_predictor = std::unique_ptr<Predictor>(
Predictor::Create("gpu_predictor", &gpu_lparam));
gpu_predictor->Configure({});
std::vector<float > phis;
auto dmat = RandomDataGenerator(3, 1, 0).GenerateDMatrix();
gpu_predictor->PredictContribution(dmat.get(), &phis, model);
HostDeviceVector<float> predictions;
auto dmat = RandomDataGenerator(3, 1, 0).GenerateDMatrix();
gpu_predictor->PredictContribution(dmat.get(), &predictions, model);
auto& phis = predictions.HostVector();
EXPECT_EQ(phis[0], 0.0);
EXPECT_EQ(phis[1], param.base_score);
EXPECT_EQ(phis[2], 0.0);
@@ -202,19 +203,20 @@ TEST(GPUPredictor, Shap) {
auto gpu_lparam = CreateEmptyGenericParam(0);
auto cpu_lparam = CreateEmptyGenericParam(-1);
std::unique_ptr<Predictor> gpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &gpu_lparam));
std::unique_ptr<Predictor> cpu_predictor =
std::unique_ptr<Predictor>(Predictor::Create("cpu_predictor", &cpu_lparam));
std::unique_ptr<Predictor> gpu_predictor = std::unique_ptr<Predictor>(
Predictor::Create("gpu_predictor", &gpu_lparam));
std::unique_ptr<Predictor> cpu_predictor = std::unique_ptr<Predictor>(
Predictor::Create("cpu_predictor", &cpu_lparam));
gpu_predictor->Configure({});
cpu_predictor->Configure({});
std::vector<float > phis;
std::vector<float > cpu_phis;
HostDeviceVector<float> predictions;
HostDeviceVector<float> cpu_predictions;
auto dmat = RandomDataGenerator(3, 1, 0).GenerateDMatrix();
gpu_predictor->PredictContribution(dmat.get(), &phis, model);
cpu_predictor->PredictContribution(dmat.get(), &cpu_phis, model);
for(auto i = 0ull; i < phis.size(); i++)
{
gpu_predictor->PredictContribution(dmat.get(), &predictions, model);
cpu_predictor->PredictContribution(dmat.get(), &cpu_predictions, model);
auto& phis = predictions.HostVector();
auto& cpu_phis = cpu_predictions.HostVector();
for (auto i = 0ull; i < phis.size(); i++) {
EXPECT_NEAR(cpu_phis[i], phis[i], 1e-3);
}
}

View File

@@ -16,7 +16,7 @@ shap_parameter_strategy = strategies.fixed_dictionaries({
'max_depth': strategies.integers(0, 11),
'max_leaves': strategies.integers(0, 256),
'num_parallel_tree': strategies.sampled_from([1, 10]),
})
}).filter(lambda x: x['max_depth'] > 0 or x['max_leaves'] > 0)
class TestGPUPredict(unittest.TestCase):
@@ -194,26 +194,31 @@ class TestGPUPredict(unittest.TestCase):
for i in range(10):
run_threaded_predict(X, rows, predict_df)
@given(strategies.integers(1, 200),
tm.dataset_strategy, shap_parameter_strategy, strategies.booleans())
@given(strategies.integers(1, 10),
tm.dataset_strategy, shap_parameter_strategy)
@settings(deadline=None)
def test_shap(self, num_rounds, dataset, param, all_rows):
if param['max_depth'] == 0 and param['max_leaves'] == 0:
return
def test_shap(self, num_rounds, dataset, param):
param.update({"predictor": "gpu_predictor", "gpu_id": 0})
param = dataset.set_params(param)
dmat = dataset.get_dmat()
bst = xgb.train(param, dmat, num_rounds)
if all_rows:
test_dmat = xgb.DMatrix(dataset.X, dataset.y, dataset.w, dataset.margin)
else:
test_dmat = xgb.DMatrix(dataset.X[0:1, :])
test_dmat = xgb.DMatrix(dataset.X, dataset.y, dataset.w, dataset.margin)
shap = bst.predict(test_dmat, pred_contribs=True)
bst.set_param({"predictor": "cpu_predictor"})
cpu_shap = bst.predict(test_dmat, pred_contribs=True)
margin = bst.predict(test_dmat, output_margin=True)
assert np.allclose(shap, cpu_shap, 1e-3, 1e-3)
# feature contributions should add up to predictions
assume(len(dataset.y) > 0)
assert np.allclose(np.sum(shap, axis=len(shap.shape) - 1), margin, 1e-3, 1e-3)
@given(strategies.integers(1, 10),
tm.dataset_strategy, shap_parameter_strategy)
@settings(deadline=None, max_examples=20)
def test_shap_interactions(self, num_rounds, dataset, param):
param.update({"predictor": "gpu_predictor", "gpu_id": 0})
param = dataset.set_params(param)
dmat = dataset.get_dmat()
bst = xgb.train(param, dmat, num_rounds)
test_dmat = xgb.DMatrix(dataset.X, dataset.y, dataset.w, dataset.margin)
shap = bst.predict(test_dmat, pred_interactions=True)
margin = bst.predict(test_dmat, output_margin=True)
assume(len(dataset.y) > 0)
assert np.allclose(np.sum(shap, axis=(len(shap.shape) - 1, len(shap.shape) - 2)), margin,
1e-3, 1e-3)

View File

@@ -0,0 +1,25 @@
import numpy as np
import xgboost as xgb
import testing as tm
import pytest
try:
import shap
except ImportError:
shap = None
pass
pytestmark = pytest.mark.skipif(shap is None, reason="Requires shap package")
# Check integration is not broken from xgboost side
# Changes in binary format may cause problems
def test_with_shap():
X, y = shap.datasets.boston()
dtrain = xgb.DMatrix(X, label=y)
model = xgb.train({"learning_rate": 0.01}, dtrain, 10)
explainer = shap.TreeExplainer(model)
shap_values = explainer.shap_values(X)
margin = model.predict(dtrain, output_margin=True)
assert np.allclose(np.sum(shap_values, axis=len(shap_values.shape) - 1),
margin - explainer.expected_value, 1e-3, 1e-3)