[pyspark] Make Xgboost estimator support using sparse matrix as optimization (#8145)

Signed-off-by: Weichen Xu <weichen.xu@databricks.com>
This commit is contained in:
WeichenXu
2022-08-19 01:57:28 +08:00
committed by GitHub
parent 1703dc330f
commit 53d2a733b0
6 changed files with 318 additions and 27 deletions

View File

@@ -11,7 +11,12 @@ if tm.no_spark()["condition"]:
if sys.platform.startswith("win") or sys.platform.startswith("darwin"):
pytest.skip("Skipping PySpark tests on Windows", allow_module_level=True)
from xgboost.spark.data import alias, create_dmatrix_from_partitions, stack_series
from xgboost.spark.data import (
_read_csr_matrix_from_unwrapped_spark_vec,
alias,
create_dmatrix_from_partitions,
stack_series,
)
def test_stack() -> None:
@@ -62,10 +67,12 @@ def run_dmatrix_ctor(is_dqm: bool) -> None:
kwargs = {"feature_types": feature_types}
if is_dqm:
cols = [f"feat-{i}" for i in range(n_features)]
train_Xy, valid_Xy = create_dmatrix_from_partitions(iter(dfs), cols, 0, kwargs)
train_Xy, valid_Xy = create_dmatrix_from_partitions(
iter(dfs), cols, 0, kwargs, False
)
else:
train_Xy, valid_Xy = create_dmatrix_from_partitions(
iter(dfs), None, None, kwargs
iter(dfs), None, None, kwargs, False
)
assert valid_Xy is not None
@@ -100,3 +107,35 @@ def run_dmatrix_ctor(is_dqm: bool) -> None:
def test_dmatrix_ctor() -> None:
run_dmatrix_ctor(False)
def test_read_csr_matrix_from_unwrapped_spark_vec() -> None:
from scipy.sparse import csr_matrix
pd1 = pd.DataFrame(
{
"featureVectorType": [0, 1, 1, 0],
"featureVectorSize": [3, None, None, 3],
"featureVectorIndices": [
np.array([0, 2], dtype=np.int32),
None,
None,
np.array([1, 2], dtype=np.int32),
],
"featureVectorValues": [
np.array([3.0, 0.0], dtype=np.float64),
np.array([13.0, 14.0, 0.0], dtype=np.float64),
np.array([0.0, 24.0, 25.0], dtype=np.float64),
np.array([0.0, 35.0], dtype=np.float64),
],
}
)
sm = _read_csr_matrix_from_unwrapped_spark_vec(pd1)
assert isinstance(sm, csr_matrix)
np.testing.assert_array_equal(
sm.data, [3.0, 0.0, 13.0, 14.0, 0.0, 0.0, 24.0, 25.0, 0.0, 35.0]
)
np.testing.assert_array_equal(sm.indptr, [0, 2, 5, 8, 10])
np.testing.assert_array_equal(sm.indices, [0, 2, 0, 1, 2, 0, 1, 2, 1, 2])
assert sm.shape == (4, 3)

View File

@@ -381,6 +381,26 @@ class XgboostLocalTest(SparkTestCase):
],
)
self.reg_df_sparse_train = self.session.createDataFrame(
[
(Vectors.dense(1.0, 0.0, 3.0, 0.0, 0.0), 0),
(Vectors.sparse(5, {1: 1.0, 3: 5.5}), 1),
(Vectors.sparse(5, {4: -3.0}), 2),
]
* 10,
["features", "label"],
)
self.cls_df_sparse_train = self.session.createDataFrame(
[
(Vectors.dense(1.0, 0.0, 3.0, 0.0, 0.0), 0),
(Vectors.sparse(5, {1: 1.0, 3: 5.5}), 1),
(Vectors.sparse(5, {4: -3.0}), 0),
]
* 10,
["features", "label"],
)
def get_local_tmp_dir(self):
return self.tempdir + str(uuid.uuid4())
@@ -972,3 +992,35 @@ class XgboostLocalTest(SparkTestCase):
)
model = classifier.fit(self.cls_df_train)
model.transform(self.cls_df_test).collect()
def test_regressor_with_sparse_optim(self):
regressor = SparkXGBRegressor(missing=0.0)
model = regressor.fit(self.reg_df_sparse_train)
assert model._xgb_sklearn_model.missing == 0.0
pred_result = model.transform(self.reg_df_sparse_train).collect()
# enable sparse optimiaztion
regressor2 = SparkXGBRegressor(missing=0.0, enable_sparse_data_optim=True)
model2 = regressor2.fit(self.reg_df_sparse_train)
assert model2.getOrDefault(model2.enable_sparse_data_optim)
assert model2._xgb_sklearn_model.missing == 0.0
pred_result2 = model2.transform(self.reg_df_sparse_train).collect()
for row1, row2 in zip(pred_result, pred_result2):
self.assertTrue(np.isclose(row1.prediction, row2.prediction, atol=1e-3))
def test_classifier_with_sparse_optim(self):
cls = SparkXGBClassifier(missing=0.0)
model = cls.fit(self.cls_df_sparse_train)
assert model._xgb_sklearn_model.missing == 0.0
pred_result = model.transform(self.cls_df_sparse_train).collect()
# enable sparse optimiaztion
cls2 = SparkXGBClassifier(missing=0.0, enable_sparse_data_optim=True)
model2 = cls2.fit(self.cls_df_sparse_train)
assert model2.getOrDefault(model2.enable_sparse_data_optim)
assert model2._xgb_sklearn_model.missing == 0.0
pred_result2 = model2.transform(self.cls_df_sparse_train).collect()
for row1, row2 in zip(pred_result, pred_result2):
self.assertTrue(np.allclose(row1.probability, row2.probability, rtol=1e-3))