From 7d96758382e33c737e73b73663bb9ab6881e1c25 Mon Sep 17 00:00:00 2001 From: amdsc21 <96135754+amdsc21@users.noreply.github.com> Date: Sat, 11 Mar 2023 06:57:24 +0100 Subject: [PATCH] macro format --- src/context.cc | 2 +- src/tree/fit_stump.cc | 2 +- src/tree/gpu_hist/row_partitioner.cuh | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/context.cc b/src/context.cc index 6d4eb6d8a..74de5b834 100644 --- a/src/context.cc +++ b/src/context.cc @@ -47,7 +47,7 @@ void Context::ConfigureGpuId(bool require_gpu) { // Just set it to CPU, don't think about it. this->UpdateAllowUnknown(Args{{"gpu_id", std::to_string(kCpuId)}}); (void)(require_gpu); -#endif // defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_ +#endif // defined(XGBOOST_USE_CUDA) || defined(XGBOOST_USE_HIP) common::SetDevice(this->gpu_id); } diff --git a/src/tree/fit_stump.cc b/src/tree/fit_stump.cc index 1a35da374..4213e74ad 100644 --- a/src/tree/fit_stump.cc +++ b/src/tree/fit_stump.cc @@ -61,7 +61,7 @@ inline void FitStump(Context const*, linalg::TensorView, linalg::VectorView) { common::AssertGPUSupport(); } -#endif // !defined(XGBOOST_USE_CUDA) && !defined(XGBOOST_USE_C +#endif // !defined(XGBOOST_USE_CUDA) && !defined(XGBOOST_USE_HIP) } // namespace cuda_impl void FitStump(Context const* ctx, HostDeviceVector const& gpair, diff --git a/src/tree/gpu_hist/row_partitioner.cuh b/src/tree/gpu_hist/row_partitioner.cuh index acacc40e8..5732ad0fe 100644 --- a/src/tree/gpu_hist/row_partitioner.cuh +++ b/src/tree/gpu_hist/row_partitioner.cuh @@ -124,7 +124,7 @@ void SortPositionBatch(common::Span> d_batch_info, dh::device_vector* tmp, #if defined(XGBOOST_USE_HIP) hipStream_t stream -#else +#elif defined(XGBOOST_USE_CUDA) cudaStream_t stream #endif ) {