Remove omp_get_max_threads in objective. (#7589)

This commit is contained in:
Jiaming Yuan
2022-01-24 04:35:49 +08:00
committed by GitHub
parent 5817840858
commit 6967ef7267
11 changed files with 76 additions and 74 deletions

View File

@@ -1,5 +1,5 @@
/*!
* Copyright 2019-2020 by Contributors
* Copyright 2019-2022 by Contributors
* \file aft_obj.cu
* \brief Definition of AFT loss for survival analysis.
* \author Avinash Barnwal, Hyunsu Cho and Toby Hocking
@@ -65,7 +65,7 @@ class AFTObj : public ObjFunction {
const bst_float w = is_null_weight ? 1.0f : _weights[_idx];
_out_gpair[_idx] = GradientPair(grad * w, hess * w);
},
common::Range{0, static_cast<int64_t>(ndata)}, device).Eval(
common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval(
out_gpair, &preds, &info.labels_lower_bound_, &info.labels_upper_bound_,
&info.weights_);
}
@@ -78,7 +78,7 @@ class AFTObj : public ObjFunction {
CHECK_EQ(info.labels_lower_bound_.Size(), ndata);
CHECK_EQ(info.labels_upper_bound_.Size(), ndata);
out_gpair->Resize(ndata);
const int device = tparam_->gpu_id;
const int device = ctx_->gpu_id;
const float aft_loss_distribution_scale = param_.aft_loss_distribution_scale;
const bool is_null_weight = info.weights_.Size() == 0;
if (!is_null_weight) {
@@ -108,10 +108,11 @@ class AFTObj : public ObjFunction {
// Trees give us a prediction in log scale, so exponentiate
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = exp(_preds[_idx]);
}, common::Range{0, static_cast<int64_t>(io_preds->Size())},
_preds[_idx] = exp(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(),
io_preds->DeviceIdx())
.Eval(io_preds);
.Eval(io_preds);
}
void EvalTransform(HostDeviceVector<bst_float> *io_preds) override {

View File

@@ -1,5 +1,5 @@
/*!
* Copyright 2018-2019 by Contributors
* Copyright 2018-2022 by XGBoost Contributors
* \file hinge.cc
* \brief Provides an implementation of the hinge loss function
* \author Henry Gouk
@@ -65,8 +65,8 @@ class HingeObj : public ObjFunction {
}
_out_gpair[_idx] = GradientPair(g, h);
},
common::Range{0, static_cast<int64_t>(ndata)},
tparam_->gpu_id).Eval(
common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(),
ctx_->gpu_id).Eval(
out_gpair, &preds, info.labels.Data(), &info.weights_);
}
@@ -75,7 +75,7 @@ class HingeObj : public ObjFunction {
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = _preds[_idx] > 0.0 ? 1.0 : 0.0;
},
common::Range{0, static_cast<int64_t>(io_preds->Size()), 1},
common::Range{0, static_cast<int64_t>(io_preds->Size()), 1}, this->ctx_->Threads(),
io_preds->DeviceIdx())
.Eval(io_preds);
}

View File

@@ -1,5 +1,5 @@
/*!
* Copyright 2015-2018 by Contributors
* Copyright 2015-2022 by XGBoost Contributors
* \file multi_class.cc
* \brief Definition of multi-class classification objectives.
* \author Tianqi Chen
@@ -68,7 +68,7 @@ class SoftmaxMultiClassObj : public ObjFunction {
const int nclass = param_.num_class;
const auto ndata = static_cast<int64_t>(preds.Size() / nclass);
auto device = tparam_->gpu_id;
auto device = ctx_->gpu_id;
out_gpair->SetDevice(device);
info.labels.SetDevice(device);
info.weights_.SetDevice(device);
@@ -114,7 +114,7 @@ class SoftmaxMultiClassObj : public ObjFunction {
p = label == k ? p - 1.0f : p;
gpair[idx * nclass + k] = GradientPair(p * wt, h);
}
}, common::Range{0, ndata}, device, false)
}, common::Range{0, ndata}, ctx_->Threads(), device)
.Eval(out_gpair, info.labels.Data(), &preds, &info.weights_, &label_correct_);
std::vector<int>& label_correct_h = label_correct_.HostVector();
@@ -146,8 +146,8 @@ class SoftmaxMultiClassObj : public ObjFunction {
_preds.subspan(_idx * nclass, nclass);
common::Softmax(point.begin(), point.end());
},
common::Range{0, ndata}, device)
.Eval(io_preds);
common::Range{0, ndata}, this->ctx_->Threads(), device)
.Eval(io_preds);
} else {
io_preds->SetDevice(device);
HostDeviceVector<bst_float> max_preds;
@@ -162,7 +162,7 @@ class SoftmaxMultiClassObj : public ObjFunction {
common::FindMaxIndex(point.cbegin(), point.cend()) -
point.cbegin();
},
common::Range{0, ndata}, device, false)
common::Range{0, ndata}, this->ctx_->Threads(), device)
.Eval(io_preds, &max_preds);
io_preds->Resize(max_preds.Size());
io_preds->Copy(max_preds);

View File

@@ -27,7 +27,7 @@ ObjFunction* ObjFunction::Create(const std::string& name, GenericParameter const
<< ss.str();
}
auto pobj = (e->body)();
pobj->tparam_ = tparam;
pobj->ctx_ = tparam;
return pobj;
}

View File

@@ -773,7 +773,7 @@ class LambdaRankObj : public ObjFunction {
#if defined(__CUDACC__)
// Check if we have a GPU assignment; else, revert back to CPU
auto device = tparam_->gpu_id;
auto device = ctx_->gpu_id;
if (device >= 0) {
ComputeGradientsOnGPU(preds, info, iter, out_gpair, gptr);
} else {
@@ -909,7 +909,7 @@ class LambdaRankObj : public ObjFunction {
const std::vector<unsigned> &gptr) {
LOG(DEBUG) << "Computing " << LambdaWeightComputerT::Name() << " gradients on GPU.";
auto device = tparam_->gpu_id;
auto device = ctx_->gpu_id;
dh::safe_cuda(cudaSetDevice(device));
bst_float weight_normalization_factor = ComputeWeightNormalizationFactor(info, gptr);

View File

@@ -1,5 +1,5 @@
/*!
* Copyright 2015-2019 by Contributors
* Copyright 2015-2022 by XGBoost Contributors
* \file regression_obj.cu
* \brief Definition of single-value regression and classification objectives.
* \author Tianqi Chen, Kailong Chen
@@ -70,7 +70,7 @@ class RegLossObj : public ObjFunction {
<< "Loss: " << Loss::Name();
size_t const ndata = preds.Size();
out_gpair->Resize(ndata);
auto device = tparam_->gpu_id;
auto device = ctx_->gpu_id;
additional_input_.HostVector().begin()[0] = 1; // Fill the label_correct flag
bool is_null_weight = info.weights_.Size() == 0;
@@ -82,7 +82,7 @@ class RegLossObj : public ObjFunction {
additional_input_.HostVector().begin()[1] = scale_pos_weight;
additional_input_.HostVector().begin()[2] = is_null_weight;
const size_t nthreads = tparam_->Threads();
const size_t nthreads = ctx_->Threads();
bool on_device = device >= 0;
// On CPU we run the transformation each thread processing a contigious block of data
// for better performance.
@@ -121,7 +121,7 @@ class RegLossObj : public ObjFunction {
Loss::SecondOrderGradient(p, label) * w);
}
},
common::Range{0, static_cast<int64_t>(n_data_blocks)}, device)
common::Range{0, static_cast<int64_t>(n_data_blocks)}, nthreads, device)
.Eval(&additional_input_, out_gpair, &preds, info.labels.Data(),
&info.weights_);
@@ -140,7 +140,8 @@ class RegLossObj : public ObjFunction {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<float> _preds) {
_preds[_idx] = Loss::PredTransform(_preds[_idx]);
}, common::Range{0, static_cast<int64_t>(io_preds->Size())},
},
common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(),
io_preds->DeviceIdx())
.Eval(io_preds);
}
@@ -228,7 +229,7 @@ class PoissonRegression : public ObjFunction {
CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided";
size_t const ndata = preds.Size();
out_gpair->Resize(ndata);
auto device = tparam_->gpu_id;
auto device = ctx_->gpu_id;
label_correct_.Resize(1);
label_correct_.Fill(1);
@@ -254,7 +255,7 @@ class PoissonRegression : public ObjFunction {
_out_gpair[_idx] = GradientPair{(expf(p) - y) * w,
expf(p + max_delta_step) * w};
},
common::Range{0, static_cast<int64_t>(ndata)}, device).Eval(
common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval(
&label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_);
// copy "label correct" flags back to host
std::vector<int>& label_correct_h = label_correct_.HostVector();
@@ -269,7 +270,7 @@ class PoissonRegression : public ObjFunction {
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = expf(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())},
common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(),
io_preds->DeviceIdx())
.Eval(io_preds);
}
@@ -381,7 +382,7 @@ class CoxRegression : public ObjFunction {
void PredTransform(HostDeviceVector<bst_float> *io_preds) const override {
std::vector<bst_float> &preds = io_preds->HostVector();
const long ndata = static_cast<long>(preds.size()); // NOLINT(*)
common::ParallelFor(ndata, [&](long j) { // NOLINT(*)
common::ParallelFor(ndata, ctx_->Threads(), [&](long j) { // NOLINT(*)
preds[j] = std::exp(preds[j]);
});
}
@@ -423,7 +424,7 @@ class GammaRegression : public ObjFunction {
CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided";
const size_t ndata = preds.Size();
auto device = tparam_->gpu_id;
auto device = ctx_->gpu_id;
out_gpair->Resize(ndata);
label_correct_.Resize(1);
label_correct_.Fill(1);
@@ -448,7 +449,7 @@ class GammaRegression : public ObjFunction {
}
_out_gpair[_idx] = GradientPair((1 - y / expf(p)) * w, y / expf(p) * w);
},
common::Range{0, static_cast<int64_t>(ndata)}, device).Eval(
common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval(
&label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_);
// copy "label correct" flags back to host
@@ -464,7 +465,7 @@ class GammaRegression : public ObjFunction {
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = expf(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())},
common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(),
io_preds->DeviceIdx())
.Eval(io_preds);
}
@@ -525,7 +526,7 @@ class TweedieRegression : public ObjFunction {
const size_t ndata = preds.Size();
out_gpair->Resize(ndata);
auto device = tparam_->gpu_id;
auto device = ctx_->gpu_id;
label_correct_.Resize(1);
label_correct_.Fill(1);
@@ -555,7 +556,7 @@ class TweedieRegression : public ObjFunction {
std::exp((1 - rho) * p) + (2 - rho) * expf((2 - rho) * p);
_out_gpair[_idx] = GradientPair(grad * w, hess * w);
},
common::Range{0, static_cast<int64_t>(ndata), 1}, device)
common::Range{0, static_cast<int64_t>(ndata), 1}, this->ctx_->Threads(), device)
.Eval(&label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_);
// copy "label correct" flags back to host
@@ -571,7 +572,7 @@ class TweedieRegression : public ObjFunction {
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = expf(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())},
common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(),
io_preds->DeviceIdx())
.Eval(io_preds);
}