Complete cudf support. (#4850)

* Handles missing value.
* Accept all floating point and integer types.
* Move to cudf 9.0 API.
* Remove requirement on `null_count`.
* Arbitrary column types support.
This commit is contained in:
Jiaming Yuan
2019-09-16 23:52:00 -04:00
committed by GitHub
parent 125bcec62e
commit 5374f52531
17 changed files with 702 additions and 339 deletions

View File

@@ -67,17 +67,17 @@ class TestBasic(unittest.TestCase):
def test_np_view(self):
# Sliced Float32 array
y = np.array([12, 34, 56], np.float32)[::2]
from_view = xgb.DMatrix([], label=y).get_label()
from_array = xgb.DMatrix([], label=y + 0).get_label()
from_view = xgb.DMatrix(np.array([[]]), label=y).get_label()
from_array = xgb.DMatrix(np.array([[]]), label=y + 0).get_label()
assert (from_view.shape == from_array.shape)
assert (from_view == from_array).all()
# Sliced UInt array
z = np.array([12, 34, 56], np.uint32)[::2]
dmat = xgb.DMatrix([])
dmat = xgb.DMatrix(np.array([[]]))
dmat.set_uint_info('root_index', z)
from_view = dmat.get_uint_info('root_index')
dmat = xgb.DMatrix([])
dmat = xgb.DMatrix(np.array([[]]))
dmat.set_uint_info('root_index', z + 0)
from_array = dmat.get_uint_info('root_index')
assert (from_view.shape == from_array.shape)
@@ -256,7 +256,7 @@ class TestBasic(unittest.TestCase):
assert dm.num_row() == 5
assert dm.num_col() == 5
data = np.matrix([[1, 2], [3, 4]])
data = np.array([[1, 2], [3, 4]])
dm = xgb.DMatrix(data)
assert dm.num_row() == 2
assert dm.num_col() == 2
@@ -430,4 +430,3 @@ class TestBasicPathLike(unittest.TestCase):
# invalid values raise Type error
self.assertRaises(TypeError, xgb.compat.os_fspath, 123)

View File

@@ -69,8 +69,8 @@ class TestUpdaters(unittest.TestCase):
nan = np.nan
param = {'missing': nan, 'tree_method': 'hist'}
model = xgb.XGBRegressor(**param)
X = [[6.18827160e+05, 1.73000000e+02], [6.37345679e+05, nan],
[6.38888889e+05, nan], [6.28086420e+05, nan]]
X = np.array([[6.18827160e+05, 1.73000000e+02], [6.37345679e+05, nan],
[6.38888889e+05, nan], [6.28086420e+05, nan]])
y = [1000000., 0., 0., 500000.]
w = [0, 0, 1, 0]
model.fit(X, y, sample_weight=w)

View File

@@ -19,7 +19,7 @@ pytestmark = pytest.mark.skipif(**tm.no_dask())
def run_train():
# Contains one label equal to rank
dmat = xgb.DMatrix([[0]], label=[xgb.rabit.get_rank()])
dmat = xgb.DMatrix(np.array([[0]]), label=[xgb.rabit.get_rank()])
bst = xgb.train({"eta": 1.0, "lambda": 0.0}, dmat, 1)
pred = bst.predict(dmat)
expected_result = np.average(range(xgb.rabit.get_world_size()))
@@ -78,7 +78,7 @@ def test_get_local_data(client):
def run_sklearn():
# Contains one label equal to rank
X = [[0]]
X = np.array([[0]])
y = [xgb.rabit.get_rank()]
model = xgb.XGBRegressor(learning_rate=1.0)
model.fit(X, y)

View File

@@ -393,7 +393,8 @@ def test_sklearn_nfolds_cv():
nfolds = 5
skf = StratifiedKFold(n_splits=nfolds, shuffle=True, random_state=seed)
cv1 = xgb.cv(params, dm, num_boost_round=10, nfold=nfolds, seed=seed, as_pandas=True)
cv1 = xgb.cv(params, dm, num_boost_round=10, nfold=nfolds,
seed=seed, as_pandas=True)
cv2 = xgb.cv(params, dm, num_boost_round=10, nfold=nfolds,
folds=skf, seed=seed, as_pandas=True)
cv3 = xgb.cv(params, dm, num_boost_round=10, nfold=nfolds,