Remove omp_get_max_threads in gbm and linear. (#7537)
* Use ctx in gbm. * Use ctx threads in gbm and linear.
This commit is contained in:
@@ -32,7 +32,7 @@ class GPUCoordinateUpdater : public LinearUpdater { // NOLINT
|
||||
void Configure(Args const& args) override {
|
||||
tparam_.UpdateAllowUnknown(args);
|
||||
coord_param_.UpdateAllowUnknown(args);
|
||||
selector_.reset(FeatureSelector::Create(tparam_.feature_selector));
|
||||
selector_.reset(FeatureSelector::Create(tparam_.feature_selector, ctx_->Threads()));
|
||||
monitor_.Init("GPUCoordinateUpdater");
|
||||
}
|
||||
|
||||
@@ -48,7 +48,7 @@ class GPUCoordinateUpdater : public LinearUpdater { // NOLINT
|
||||
}
|
||||
|
||||
void LazyInitDevice(DMatrix *p_fmat, const LearnerModelParam &model_param) {
|
||||
if (learner_param_->gpu_id < 0) return;
|
||||
if (ctx_->gpu_id < 0) return;
|
||||
|
||||
num_row_ = static_cast<size_t>(p_fmat->Info().num_row_);
|
||||
|
||||
@@ -60,7 +60,7 @@ class GPUCoordinateUpdater : public LinearUpdater { // NOLINT
|
||||
return;
|
||||
}
|
||||
|
||||
dh::safe_cuda(cudaSetDevice(learner_param_->gpu_id));
|
||||
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
|
||||
// The begin and end indices for the section of each column associated with
|
||||
// this device
|
||||
std::vector<std::pair<bst_uint, bst_uint>> column_segments;
|
||||
@@ -103,7 +103,7 @@ class GPUCoordinateUpdater : public LinearUpdater { // NOLINT
|
||||
monitor_.Start("UpdateGpair");
|
||||
auto &in_gpair_host = in_gpair->ConstHostVector();
|
||||
// Update gpair
|
||||
if (learner_param_->gpu_id >= 0) {
|
||||
if (ctx_->gpu_id >= 0) {
|
||||
this->UpdateGpair(in_gpair_host);
|
||||
}
|
||||
monitor_.Stop("UpdateGpair");
|
||||
@@ -134,7 +134,7 @@ class GPUCoordinateUpdater : public LinearUpdater { // NOLINT
|
||||
++group_idx) {
|
||||
// Get gradient
|
||||
auto grad = GradientPair(0, 0);
|
||||
if (learner_param_->gpu_id >= 0) {
|
||||
if (ctx_->gpu_id >= 0) {
|
||||
grad = GetBiasGradient(group_idx, model->learner_model_param->num_output_group);
|
||||
}
|
||||
auto dbias = static_cast<float>(
|
||||
@@ -143,7 +143,7 @@ class GPUCoordinateUpdater : public LinearUpdater { // NOLINT
|
||||
model->Bias()[group_idx] += dbias;
|
||||
|
||||
// Update residual
|
||||
if (learner_param_->gpu_id >= 0) {
|
||||
if (ctx_->gpu_id >= 0) {
|
||||
UpdateBiasResidual(dbias, group_idx, model->learner_model_param->num_output_group);
|
||||
}
|
||||
}
|
||||
@@ -155,7 +155,7 @@ class GPUCoordinateUpdater : public LinearUpdater { // NOLINT
|
||||
bst_float &w = (*model)[fidx][group_idx];
|
||||
// Get gradient
|
||||
auto grad = GradientPair(0, 0);
|
||||
if (learner_param_->gpu_id >= 0) {
|
||||
if (ctx_->gpu_id >= 0) {
|
||||
grad = GetGradient(group_idx, model->learner_model_param->num_output_group, fidx);
|
||||
}
|
||||
auto dw = static_cast<float>(tparam_.learning_rate *
|
||||
@@ -164,14 +164,14 @@ class GPUCoordinateUpdater : public LinearUpdater { // NOLINT
|
||||
tparam_.reg_lambda_denorm));
|
||||
w += dw;
|
||||
|
||||
if (learner_param_->gpu_id >= 0) {
|
||||
if (ctx_->gpu_id >= 0) {
|
||||
UpdateResidual(dw, group_idx, model->learner_model_param->num_output_group, fidx);
|
||||
}
|
||||
}
|
||||
|
||||
// This needs to be public because of the __device__ lambda.
|
||||
GradientPair GetBiasGradient(int group_idx, int num_group) {
|
||||
dh::safe_cuda(cudaSetDevice(learner_param_->gpu_id));
|
||||
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
|
||||
auto counting = thrust::make_counting_iterator(0ull);
|
||||
auto f = [=] __device__(size_t idx) {
|
||||
return idx * num_group + group_idx;
|
||||
@@ -195,7 +195,7 @@ class GPUCoordinateUpdater : public LinearUpdater { // NOLINT
|
||||
|
||||
// This needs to be public because of the __device__ lambda.
|
||||
GradientPair GetGradient(int group_idx, int num_group, int fidx) {
|
||||
dh::safe_cuda(cudaSetDevice(learner_param_->gpu_id));
|
||||
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
|
||||
common::Span<xgboost::Entry> d_col = dh::ToSpan(data_).subspan(row_ptr_[fidx]);
|
||||
size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx];
|
||||
common::Span<GradientPair> d_gpair = dh::ToSpan(gpair_);
|
||||
|
||||
Reference in New Issue
Block a user