Clarify the behavior of use_rmm. (#6808)
* Clarify the `use_rmm` flag in document and demo.
This commit is contained in:
@@ -4,13 +4,18 @@ from sklearn.datasets import make_classification
|
||||
|
||||
# Initialize RMM pool allocator
|
||||
rmm.reinitialize(pool_allocator=True)
|
||||
# Inform XGBoost that RMM is used for GPU memory allocation
|
||||
xgb.set_config(use_rmm=True)
|
||||
# Optionally force XGBoost to use RMM for all GPU memory allocation, see ./README.md
|
||||
# xgb.set_config(use_rmm=True)
|
||||
|
||||
X, y = make_classification(n_samples=10000, n_informative=5, n_classes=3)
|
||||
dtrain = xgb.DMatrix(X, label=y)
|
||||
|
||||
params = {'max_depth': 8, 'eta': 0.01, 'objective': 'multi:softprob', 'num_class': 3,
|
||||
'tree_method': 'gpu_hist'}
|
||||
params = {
|
||||
"max_depth": 8,
|
||||
"eta": 0.01,
|
||||
"objective": "multi:softprob",
|
||||
"num_class": 3,
|
||||
"tree_method": "gpu_hist",
|
||||
}
|
||||
# XGBoost will automatically use the RMM pool allocator
|
||||
bst = xgb.train(params, dtrain, num_boost_round=100, evals=[(dtrain, 'train')])
|
||||
bst = xgb.train(params, dtrain, num_boost_round=100, evals=[(dtrain, "train")])
|
||||
|
||||
Reference in New Issue
Block a user