- Rewrite GPU demos. notebook is converted to script to avoid committing additional png plots. - Add GPU demos into the sphinx gallery. - Add RMM demos into the sphinx gallery. - Test for firing threads with different device ordinals.
28 lines
745 B
Python
28 lines
745 B
Python
"""
|
|
Using rmm on a single node device
|
|
=================================
|
|
"""
|
|
import rmm
|
|
from sklearn.datasets import make_classification
|
|
|
|
import xgboost as xgb
|
|
|
|
# Initialize RMM pool allocator
|
|
rmm.reinitialize(pool_allocator=True)
|
|
# Optionally force XGBoost to use RMM for all GPU memory allocation, see ./README.md
|
|
# xgb.set_config(use_rmm=True)
|
|
|
|
X, y = make_classification(n_samples=10000, n_informative=5, n_classes=3)
|
|
dtrain = xgb.DMatrix(X, label=y)
|
|
|
|
params = {
|
|
"max_depth": 8,
|
|
"eta": 0.01,
|
|
"objective": "multi:softprob",
|
|
"num_class": 3,
|
|
"tree_method": "hist",
|
|
"device": "cuda",
|
|
}
|
|
# XGBoost will automatically use the RMM pool allocator
|
|
bst = xgb.train(params, dtrain, num_boost_round=100, evals=[(dtrain, "train")])
|