[MT-TREE] Support prediction cache and model slicing. (#8968)
- Fix prediction range. - Support prediction cache in mt-hist. - Support model slicing. - Make the booster a Python iterable by defining `__iter__`. - Cleanup removed/deprecated parameters. - A new field in the output model `iteration_indptr` for pointing to the ranges of trees for each iteration.
This commit is contained in:
@@ -342,7 +342,6 @@ class DeviceModel {
|
||||
void Init(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end, int32_t gpu_id) {
|
||||
dh::safe_cuda(cudaSetDevice(gpu_id));
|
||||
|
||||
CHECK_EQ(model.param.size_leaf_vector, 0);
|
||||
// Copy decision trees to device
|
||||
tree_segments = std::move(HostDeviceVector<size_t>({}, gpu_id));
|
||||
auto& h_tree_segments = tree_segments.HostVector();
|
||||
|
||||
Reference in New Issue
Block a user