From abaa593aa0707a2bfeb72c313e3c1c52c86009c0 Mon Sep 17 00:00:00 2001 From: Jiaming Yuan Date: Thu, 14 Jul 2022 05:29:56 +0800 Subject: [PATCH] Fix compiler warnings. (#8059) - Remove unused parameters. - Avoid comparison of different signedness. --- src/tree/gpu_hist/evaluate_splits.cu | 2 +- src/tree/gpu_hist/evaluate_splits.cuh | 7 ++--- src/tree/gpu_hist/row_partitioner.cuh | 4 +-- src/tree/hist/evaluate_splits.h | 4 +-- src/tree/updater_approx.cc | 2 +- src/tree/updater_gpu_hist.cu | 12 ++++----- src/tree/updater_quantile_hist.cc | 2 +- .../cpp/tree/gpu_hist/test_evaluate_splits.cu | 26 +++++++------------ .../cpp/tree/gpu_hist/test_row_partitioner.cu | 4 +-- tests/cpp/tree/test_gpu_hist.cu | 3 +-- 10 files changed, 29 insertions(+), 37 deletions(-) diff --git a/src/tree/gpu_hist/evaluate_splits.cu b/src/tree/gpu_hist/evaluate_splits.cu index 249a2aca7..c79fab0c2 100644 --- a/src/tree/gpu_hist/evaluate_splits.cu +++ b/src/tree/gpu_hist/evaluate_splits.cu @@ -402,7 +402,7 @@ void GPUHistEvaluator::EvaluateSplits( template GPUExpandEntry GPUHistEvaluator::EvaluateSingleSplit( - EvaluateSplitInputs input, EvaluateSplitSharedInputs shared_inputs, float weight) { + EvaluateSplitInputs input, EvaluateSplitSharedInputs shared_inputs) { dh::device_vector inputs = std::vector{input}; dh::TemporaryArray out_entries(1); this->EvaluateSplits({input.nidx}, input.feature_set.size(), dh::ToSpan(inputs), shared_inputs, diff --git a/src/tree/gpu_hist/evaluate_splits.cuh b/src/tree/gpu_hist/evaluate_splits.cuh index e5105183c..a4c2e271f 100644 --- a/src/tree/gpu_hist/evaluate_splits.cuh +++ b/src/tree/gpu_hist/evaluate_splits.cuh @@ -167,19 +167,20 @@ class GPUHistEvaluator { TreeEvaluator::SplitEvaluator evaluator); // impl of evaluate splits, contains CUDA kernels so it's public - void LaunchEvaluateSplits(bst_feature_t number_active_features,common::Span d_inputs,EvaluateSplitSharedInputs shared_inputs, + void LaunchEvaluateSplits(bst_feature_t number_active_features,common::Span d_inputs,EvaluateSplitSharedInputs shared_inputs, TreeEvaluator::SplitEvaluator evaluator, common::Span out_splits); /** * \brief Evaluate splits for left and right nodes. */ void EvaluateSplits(const std::vector &nidx,bst_feature_t number_active_features,common::Span d_inputs, - EvaluateSplitSharedInputs shared_inputs, + EvaluateSplitSharedInputs shared_inputs, common::Span out_splits); /** * \brief Evaluate splits for root node. */ - GPUExpandEntry EvaluateSingleSplit(EvaluateSplitInputs input,EvaluateSplitSharedInputs shared_inputs, float weight); + GPUExpandEntry EvaluateSingleSplit(EvaluateSplitInputs input, + EvaluateSplitSharedInputs shared_inputs); }; } // namespace tree } // namespace xgboost diff --git a/src/tree/gpu_hist/row_partitioner.cuh b/src/tree/gpu_hist/row_partitioner.cuh index 4ba0bd27f..4f0a4142e 100644 --- a/src/tree/gpu_hist/row_partitioner.cuh +++ b/src/tree/gpu_hist/row_partitioner.cuh @@ -272,7 +272,7 @@ class RowPartitioner { dh::TemporaryArray> d_batch_info(nidx.size()); std::size_t total_rows = 0; - for (int i = 0; i < nidx.size(); i++) { + for (size_t i = 0; i < nidx.size(); i++) { h_batch_info[i] = {ridx_segments_.at(nidx.at(i)).segment, op_data.at(i)}; total_rows += ridx_segments_.at(nidx.at(i)).segment.Size(); } @@ -295,7 +295,7 @@ class RowPartitioner { dh::safe_cuda(cudaStreamSynchronize(stream_)); // Update segments - for (int i = 0; i < nidx.size(); i++) { + for (size_t i = 0; i < nidx.size(); i++) { auto segment = ridx_segments_.at(nidx[i]).segment; auto left_count = h_counts[i]; CHECK_LE(left_count, segment.Size()); diff --git a/src/tree/hist/evaluate_splits.h b/src/tree/hist/evaluate_splits.h index c84b58d0b..002728be3 100644 --- a/src/tree/hist/evaluate_splits.h +++ b/src/tree/hist/evaluate_splits.h @@ -436,16 +436,14 @@ class HistEvaluator { * * \param p_last_tree The last tree being updated by tree updater */ -template +template void UpdatePredictionCacheImpl(GenericParameter const *ctx, RegTree const *p_last_tree, std::vector const &partitioner, - HistEvaluator const &hist_evaluator, linalg::VectorView out_preds) { CHECK_GT(out_preds.Size(), 0U); CHECK(p_last_tree); auto const &tree = *p_last_tree; - auto evaluator = hist_evaluator.Evaluator(); CHECK_EQ(out_preds.DeviceIdx(), GenericParameter::kCpuId); size_t n_nodes = p_last_tree->GetNodes().size(); for (auto &part : partitioner) { diff --git a/src/tree/updater_approx.cc b/src/tree/updater_approx.cc index ff0412c66..8bb787d7d 100644 --- a/src/tree/updater_approx.cc +++ b/src/tree/updater_approx.cc @@ -116,7 +116,7 @@ class GloablApproxBuilder { // Caching prediction seems redundant for approx tree method, as sketching takes up // majority of training time. CHECK_EQ(out_preds.Size(), data->Info().num_row_); - UpdatePredictionCacheImpl(ctx_, p_last_tree_, partitioner_, evaluator_, out_preds); + UpdatePredictionCacheImpl(ctx_, p_last_tree_, partitioner_, out_preds); monitor_->Stop(__func__); } diff --git a/src/tree/updater_gpu_hist.cu b/src/tree/updater_gpu_hist.cu index aba4ca1dd..79aad1708 100644 --- a/src/tree/updater_gpu_hist.cu +++ b/src/tree/updater_gpu_hist.cu @@ -272,7 +272,7 @@ struct GPUHistMakerDevice { hist.Reset(); } - GPUExpandEntry EvaluateRootSplit(GradientPairPrecise root_sum, float weight) { + GPUExpandEntry EvaluateRootSplit(GradientPairPrecise root_sum) { int nidx = RegTree::kRoot; GPUTrainingParam gpu_param(param); auto sampled_features = column_sampler.GetFeatureSet(0); @@ -285,7 +285,7 @@ struct GPUHistMakerDevice { gpu_param, feature_types, matrix.feature_segments, matrix.gidx_fvalue_map, matrix.min_fvalue, }; - auto split = this->evaluator_.EvaluateSingleSplit(inputs, shared_inputs, weight); + auto split = this->evaluator_.EvaluateSingleSplit(inputs, shared_inputs); return split; } @@ -298,11 +298,11 @@ struct GPUHistMakerDevice { auto h_node_inputs = pinned2.GetSpan(2 * candidates.size()); auto matrix = page->GetDeviceAccessor(ctx_->gpu_id); EvaluateSplitSharedInputs shared_inputs{ - GPUTrainingParam(param), feature_types, matrix.feature_segments, + GPUTrainingParam{param}, feature_types, matrix.feature_segments, matrix.gidx_fvalue_map, matrix.min_fvalue, }; dh::TemporaryArray entries(2 * candidates.size()); - for (int i = 0; i < candidates.size(); i++) { + for (size_t i = 0; i < candidates.size(); i++) { auto candidate = candidates.at(i); int left_nidx = tree[candidate.nid].LeftChild(); int right_nidx = tree[candidate.nid].RightChild(); @@ -378,7 +378,7 @@ struct GPUHistMakerDevice { std::vector left_nidx(candidates.size()); std::vector right_nidx(candidates.size()); std::vector split_data(candidates.size()); - for (int i = 0; i < candidates.size(); i++) { + for (size_t i = 0; i < candidates.size(); i++) { auto& e = candidates[i]; RegTree::Node split_node = (*p_tree)[e.nid]; auto split_type = p_tree->NodeSplitType(e.nid); @@ -658,7 +658,7 @@ struct GPUHistMakerDevice { (*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight); // Generate first split - auto root_entry = this->EvaluateRootSplit(root_sum, weight); + auto root_entry = this->EvaluateRootSplit(root_sum); return root_entry; } diff --git a/src/tree/updater_quantile_hist.cc b/src/tree/updater_quantile_hist.cc index bd023af13..2ccc426cf 100644 --- a/src/tree/updater_quantile_hist.cc +++ b/src/tree/updater_quantile_hist.cc @@ -257,7 +257,7 @@ bool QuantileHistMaker::Builder::UpdatePredictionCache(DMatrix const *data, } monitor_->Start(__func__); CHECK_EQ(out_preds.Size(), data->Info().num_row_); - UpdatePredictionCacheImpl(ctx_, p_last_tree_, partitioner_, *evaluator_, out_preds); + UpdatePredictionCacheImpl(ctx_, p_last_tree_, partitioner_, out_preds); monitor_->Stop(__func__); return true; } diff --git a/tests/cpp/tree/gpu_hist/test_evaluate_splits.cu b/tests/cpp/tree/gpu_hist/test_evaluate_splits.cu index f2750ed67..eec029c92 100644 --- a/tests/cpp/tree/gpu_hist/test_evaluate_splits.cu +++ b/tests/cpp/tree/gpu_hist/test_evaluate_splits.cu @@ -65,8 +65,7 @@ void TestEvaluateSingleSplit(bool is_categorical) { GPUHistEvaluator evaluator{ tparam, static_cast(feature_set.size()), 0}; evaluator.Reset(cuts, dh::ToSpan(feature_types), feature_set.size(), tparam, 0); - DeviceSplitCandidate result = - evaluator.EvaluateSingleSplit(input, shared_inputs,0).split; + DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split; EXPECT_EQ(result.findex, 1); EXPECT_EQ(result.fvalue, 11.0); @@ -111,7 +110,7 @@ TEST(GpuHist, EvaluateSingleSplitMissing) { }; GPUHistEvaluator evaluator(tparam, feature_set.size(), 0); - DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs,0).split; + DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split; EXPECT_EQ(result.findex, 0); EXPECT_EQ(result.fvalue, 1.0); @@ -124,7 +123,7 @@ TEST(GpuHist, EvaluateSingleSplitEmpty) { TrainParam tparam = ZeroParam(); GPUHistEvaluator evaluator(tparam, 1, 0); DeviceSplitCandidate result = - evaluator.EvaluateSingleSplit(EvaluateSplitInputs{}, EvaluateSplitSharedInputs{}, 0).split; + evaluator.EvaluateSingleSplit(EvaluateSplitInputs{}, EvaluateSplitSharedInputs{}).split; EXPECT_EQ(result.findex, -1); EXPECT_LT(result.loss_chg, 0.0f); } @@ -161,7 +160,7 @@ TEST(GpuHist, EvaluateSingleSplitFeatureSampling) { }; GPUHistEvaluator evaluator(tparam, feature_min_values.size(), 0); - DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input,shared_inputs, 0).split; + DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split; EXPECT_EQ(result.findex, 1); EXPECT_EQ(result.fvalue, 11.0); @@ -201,7 +200,7 @@ TEST(GpuHist, EvaluateSingleSplitBreakTies) { }; GPUHistEvaluator evaluator(tparam, feature_min_values.size(), 0); - DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input,shared_inputs, 0).split; + DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input,shared_inputs).split; EXPECT_EQ(result.findex, 0); EXPECT_EQ(result.fvalue, 1.0); @@ -279,18 +278,13 @@ TEST_F(TestPartitionBasedSplit, GpuHist) { cudaMemcpyHostToDevice)); dh::device_vector feature_set{std::vector{0}}; - EvaluateSplitInputs input{0,0, - total_gpair_, - dh::ToSpan(feature_set), - dh::ToSpan(d_hist)}; + EvaluateSplitInputs input{0, 0, total_gpair_, dh::ToSpan(feature_set), dh::ToSpan(d_hist)}; EvaluateSplitSharedInputs shared_inputs{ - GPUTrainingParam{ param_}, - dh::ToSpan(ft), - cuts_.cut_ptrs_.ConstDeviceSpan(), - cuts_.cut_values_.ConstDeviceSpan(), - cuts_.min_vals_.ConstDeviceSpan(), + GPUTrainingParam{param_}, dh::ToSpan(ft), + cuts_.cut_ptrs_.ConstDeviceSpan(), cuts_.cut_values_.ConstDeviceSpan(), + cuts_.min_vals_.ConstDeviceSpan(), }; - auto split = evaluator.EvaluateSingleSplit(input, shared_inputs, 0).split; + auto split = evaluator.EvaluateSingleSplit(input, shared_inputs).split; ASSERT_NEAR(split.loss_chg, best_score_, 1e-16); } } // namespace tree diff --git a/tests/cpp/tree/gpu_hist/test_row_partitioner.cu b/tests/cpp/tree/gpu_hist/test_row_partitioner.cu index 520cc3cd0..fb9e03c35 100644 --- a/tests/cpp/tree/gpu_hist/test_row_partitioner.cu +++ b/tests/cpp/tree/gpu_hist/test_row_partitioner.cu @@ -63,7 +63,7 @@ void TestSortPositionBatch(const std::vector& ridx_in, const std::vector> d_batch_info(segments.size()); std::size_t total_rows = 0; - for (int i = 0; i < segments.size(); i++) { + for (size_t i = 0; i < segments.size(); i++) { h_batch_info[i] = {segments.at(i), 0}; total_rows += segments.at(i).Size(); } @@ -76,7 +76,7 @@ void TestSortPositionBatch(const std::vector& ridx_in, const std::vector