Fix compiler warnings. (#8059)

- Remove unused parameters.
- Avoid comparison of different signedness.
This commit is contained in:
Jiaming Yuan 2022-07-14 05:29:56 +08:00 committed by GitHub
parent 937352c78f
commit abaa593aa0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 29 additions and 37 deletions

View File

@ -402,7 +402,7 @@ void GPUHistEvaluator<GradientSumT>::EvaluateSplits(
template <typename GradientSumT>
GPUExpandEntry GPUHistEvaluator<GradientSumT>::EvaluateSingleSplit(
EvaluateSplitInputs input, EvaluateSplitSharedInputs shared_inputs, float weight) {
EvaluateSplitInputs input, EvaluateSplitSharedInputs shared_inputs) {
dh::device_vector<EvaluateSplitInputs> inputs = std::vector<EvaluateSplitInputs>{input};
dh::TemporaryArray<GPUExpandEntry> out_entries(1);
this->EvaluateSplits({input.nidx}, input.feature_set.size(), dh::ToSpan(inputs), shared_inputs,

View File

@ -167,19 +167,20 @@ class GPUHistEvaluator {
TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator);
// impl of evaluate splits, contains CUDA kernels so it's public
void LaunchEvaluateSplits(bst_feature_t number_active_features,common::Span<const EvaluateSplitInputs> d_inputs,EvaluateSplitSharedInputs shared_inputs,
void LaunchEvaluateSplits(bst_feature_t number_active_features,common::Span<const EvaluateSplitInputs> d_inputs,EvaluateSplitSharedInputs shared_inputs,
TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator,
common::Span<DeviceSplitCandidate> out_splits);
/**
* \brief Evaluate splits for left and right nodes.
*/
void EvaluateSplits(const std::vector<bst_node_t> &nidx,bst_feature_t number_active_features,common::Span<const EvaluateSplitInputs> d_inputs,
EvaluateSplitSharedInputs shared_inputs,
EvaluateSplitSharedInputs shared_inputs,
common::Span<GPUExpandEntry> out_splits);
/**
* \brief Evaluate splits for root node.
*/
GPUExpandEntry EvaluateSingleSplit(EvaluateSplitInputs input,EvaluateSplitSharedInputs shared_inputs, float weight);
GPUExpandEntry EvaluateSingleSplit(EvaluateSplitInputs input,
EvaluateSplitSharedInputs shared_inputs);
};
} // namespace tree
} // namespace xgboost

View File

@ -272,7 +272,7 @@ class RowPartitioner {
dh::TemporaryArray<PerNodeData<OpDataT>> d_batch_info(nidx.size());
std::size_t total_rows = 0;
for (int i = 0; i < nidx.size(); i++) {
for (size_t i = 0; i < nidx.size(); i++) {
h_batch_info[i] = {ridx_segments_.at(nidx.at(i)).segment, op_data.at(i)};
total_rows += ridx_segments_.at(nidx.at(i)).segment.Size();
}
@ -295,7 +295,7 @@ class RowPartitioner {
dh::safe_cuda(cudaStreamSynchronize(stream_));
// Update segments
for (int i = 0; i < nidx.size(); i++) {
for (size_t i = 0; i < nidx.size(); i++) {
auto segment = ridx_segments_.at(nidx[i]).segment;
auto left_count = h_counts[i];
CHECK_LE(left_count, segment.Size());

View File

@ -436,16 +436,14 @@ class HistEvaluator {
*
* \param p_last_tree The last tree being updated by tree updater
*/
template <typename Partitioner, typename ExpandEntry>
template <typename Partitioner>
void UpdatePredictionCacheImpl(GenericParameter const *ctx, RegTree const *p_last_tree,
std::vector<Partitioner> const &partitioner,
HistEvaluator<ExpandEntry> const &hist_evaluator,
linalg::VectorView<float> out_preds) {
CHECK_GT(out_preds.Size(), 0U);
CHECK(p_last_tree);
auto const &tree = *p_last_tree;
auto evaluator = hist_evaluator.Evaluator();
CHECK_EQ(out_preds.DeviceIdx(), GenericParameter::kCpuId);
size_t n_nodes = p_last_tree->GetNodes().size();
for (auto &part : partitioner) {

View File

@ -116,7 +116,7 @@ class GloablApproxBuilder {
// Caching prediction seems redundant for approx tree method, as sketching takes up
// majority of training time.
CHECK_EQ(out_preds.Size(), data->Info().num_row_);
UpdatePredictionCacheImpl(ctx_, p_last_tree_, partitioner_, evaluator_, out_preds);
UpdatePredictionCacheImpl(ctx_, p_last_tree_, partitioner_, out_preds);
monitor_->Stop(__func__);
}

View File

@ -272,7 +272,7 @@ struct GPUHistMakerDevice {
hist.Reset();
}
GPUExpandEntry EvaluateRootSplit(GradientPairPrecise root_sum, float weight) {
GPUExpandEntry EvaluateRootSplit(GradientPairPrecise root_sum) {
int nidx = RegTree::kRoot;
GPUTrainingParam gpu_param(param);
auto sampled_features = column_sampler.GetFeatureSet(0);
@ -285,7 +285,7 @@ struct GPUHistMakerDevice {
gpu_param, feature_types, matrix.feature_segments, matrix.gidx_fvalue_map,
matrix.min_fvalue,
};
auto split = this->evaluator_.EvaluateSingleSplit(inputs, shared_inputs, weight);
auto split = this->evaluator_.EvaluateSingleSplit(inputs, shared_inputs);
return split;
}
@ -298,11 +298,11 @@ struct GPUHistMakerDevice {
auto h_node_inputs = pinned2.GetSpan<EvaluateSplitInputs>(2 * candidates.size());
auto matrix = page->GetDeviceAccessor(ctx_->gpu_id);
EvaluateSplitSharedInputs shared_inputs{
GPUTrainingParam(param), feature_types, matrix.feature_segments,
GPUTrainingParam{param}, feature_types, matrix.feature_segments,
matrix.gidx_fvalue_map, matrix.min_fvalue,
};
dh::TemporaryArray<GPUExpandEntry> entries(2 * candidates.size());
for (int i = 0; i < candidates.size(); i++) {
for (size_t i = 0; i < candidates.size(); i++) {
auto candidate = candidates.at(i);
int left_nidx = tree[candidate.nid].LeftChild();
int right_nidx = tree[candidate.nid].RightChild();
@ -378,7 +378,7 @@ struct GPUHistMakerDevice {
std::vector<int> left_nidx(candidates.size());
std::vector<int> right_nidx(candidates.size());
std::vector<NodeSplitData> split_data(candidates.size());
for (int i = 0; i < candidates.size(); i++) {
for (size_t i = 0; i < candidates.size(); i++) {
auto& e = candidates[i];
RegTree::Node split_node = (*p_tree)[e.nid];
auto split_type = p_tree->NodeSplitType(e.nid);
@ -658,7 +658,7 @@ struct GPUHistMakerDevice {
(*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight);
// Generate first split
auto root_entry = this->EvaluateRootSplit(root_sum, weight);
auto root_entry = this->EvaluateRootSplit(root_sum);
return root_entry;
}

View File

@ -257,7 +257,7 @@ bool QuantileHistMaker::Builder::UpdatePredictionCache(DMatrix const *data,
}
monitor_->Start(__func__);
CHECK_EQ(out_preds.Size(), data->Info().num_row_);
UpdatePredictionCacheImpl(ctx_, p_last_tree_, partitioner_, *evaluator_, out_preds);
UpdatePredictionCacheImpl(ctx_, p_last_tree_, partitioner_, out_preds);
monitor_->Stop(__func__);
return true;
}

View File

@ -65,8 +65,7 @@ void TestEvaluateSingleSplit(bool is_categorical) {
GPUHistEvaluator<GradientPairPrecise> evaluator{
tparam, static_cast<bst_feature_t>(feature_set.size()), 0};
evaluator.Reset(cuts, dh::ToSpan(feature_types), feature_set.size(), tparam, 0);
DeviceSplitCandidate result =
evaluator.EvaluateSingleSplit(input, shared_inputs,0).split;
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
EXPECT_EQ(result.findex, 1);
EXPECT_EQ(result.fvalue, 11.0);
@ -111,7 +110,7 @@ TEST(GpuHist, EvaluateSingleSplitMissing) {
};
GPUHistEvaluator<GradientPairPrecise> evaluator(tparam, feature_set.size(), 0);
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs,0).split;
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
EXPECT_EQ(result.findex, 0);
EXPECT_EQ(result.fvalue, 1.0);
@ -124,7 +123,7 @@ TEST(GpuHist, EvaluateSingleSplitEmpty) {
TrainParam tparam = ZeroParam();
GPUHistEvaluator<GradientPairPrecise> evaluator(tparam, 1, 0);
DeviceSplitCandidate result =
evaluator.EvaluateSingleSplit(EvaluateSplitInputs{}, EvaluateSplitSharedInputs{}, 0).split;
evaluator.EvaluateSingleSplit(EvaluateSplitInputs{}, EvaluateSplitSharedInputs{}).split;
EXPECT_EQ(result.findex, -1);
EXPECT_LT(result.loss_chg, 0.0f);
}
@ -161,7 +160,7 @@ TEST(GpuHist, EvaluateSingleSplitFeatureSampling) {
};
GPUHistEvaluator<GradientPairPrecise> evaluator(tparam, feature_min_values.size(), 0);
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input,shared_inputs, 0).split;
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
EXPECT_EQ(result.findex, 1);
EXPECT_EQ(result.fvalue, 11.0);
@ -201,7 +200,7 @@ TEST(GpuHist, EvaluateSingleSplitBreakTies) {
};
GPUHistEvaluator<GradientPairPrecise> evaluator(tparam, feature_min_values.size(), 0);
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input,shared_inputs, 0).split;
DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input,shared_inputs).split;
EXPECT_EQ(result.findex, 0);
EXPECT_EQ(result.fvalue, 1.0);
@ -279,18 +278,13 @@ TEST_F(TestPartitionBasedSplit, GpuHist) {
cudaMemcpyHostToDevice));
dh::device_vector<bst_feature_t> feature_set{std::vector<bst_feature_t>{0}};
EvaluateSplitInputs input{0,0,
total_gpair_,
dh::ToSpan(feature_set),
dh::ToSpan(d_hist)};
EvaluateSplitInputs input{0, 0, total_gpair_, dh::ToSpan(feature_set), dh::ToSpan(d_hist)};
EvaluateSplitSharedInputs shared_inputs{
GPUTrainingParam{ param_},
dh::ToSpan(ft),
cuts_.cut_ptrs_.ConstDeviceSpan(),
cuts_.cut_values_.ConstDeviceSpan(),
cuts_.min_vals_.ConstDeviceSpan(),
GPUTrainingParam{param_}, dh::ToSpan(ft),
cuts_.cut_ptrs_.ConstDeviceSpan(), cuts_.cut_values_.ConstDeviceSpan(),
cuts_.min_vals_.ConstDeviceSpan(),
};
auto split = evaluator.EvaluateSingleSplit(input, shared_inputs, 0).split;
auto split = evaluator.EvaluateSingleSplit(input, shared_inputs).split;
ASSERT_NEAR(split.loss_chg, best_score_, 1e-16);
}
} // namespace tree

View File

@ -63,7 +63,7 @@ void TestSortPositionBatch(const std::vector<int>& ridx_in, const std::vector<Se
dh::TemporaryArray<PerNodeData<int>> d_batch_info(segments.size());
std::size_t total_rows = 0;
for (int i = 0; i < segments.size(); i++) {
for (size_t i = 0; i < segments.size(); i++) {
h_batch_info[i] = {segments.at(i), 0};
total_rows += segments.at(i).Size();
}
@ -76,7 +76,7 @@ void TestSortPositionBatch(const std::vector<int>& ridx_in, const std::vector<Se
total_rows, op, &tmp, nullptr);
auto op_without_data = [=] __device__(auto ridx) { return ridx % 2 == 0; };
for (int i = 0; i < segments.size(); i++) {
for (size_t i = 0; i < segments.size(); i++) {
auto begin = ridx.begin() + segments[i].begin;
auto end = ridx.begin() + segments[i].end;
bst_uint count = counts[i];

View File

@ -228,8 +228,7 @@ TEST(GpuHist, EvaluateRootSplit) {
info.num_row_ = kNRows;
info.num_col_ = kNCols;
DeviceSplitCandidate res =
maker.EvaluateRootSplit({6.4f, 12.8f}, 0).split;
DeviceSplitCandidate res = maker.EvaluateRootSplit({6.4f, 12.8f}).split;
ASSERT_EQ(res.findex, 7);
ASSERT_NEAR(res.fvalue, 0.26, xgboost::kRtEps);