diff --git a/src/common/linalg_op.h b/src/common/linalg_op.h index 7e908135c..dae2112c0 100644 --- a/src/common/linalg_op.h +++ b/src/common/linalg_op.h @@ -60,7 +60,7 @@ void ElementWiseKernel(Context const* ctx, linalg::TensorView t, Fn&& fn) } ElementWiseKernelHost(t, ctx->Threads(), fn); } -#endif // !defined(XGBOOST_USE_CUDA) && !defined(XGBOOST_USE_ +#endif // !defined(XGBOOST_USE_CUDA) && !defined(XGBOOST_USE_HIP) template auto cbegin(TensorView const& v) { // NOLINT diff --git a/src/common/transform.h b/src/common/transform.h index 389ff7f6e..fd6f82817 100644 --- a/src/common/transform.h +++ b/src/common/transform.h @@ -145,7 +145,7 @@ class Transform { #if defined(XGBOOST_USE_HIP) dh::safe_cuda(hipSetDevice(device_)); -#else +#elif defined(XGBOOST_USE_CUDA) dh::safe_cuda(cudaSetDevice(device_)); #endif diff --git a/src/data/array_interface.h b/src/data/array_interface.h index d62936e90..53d4ae266 100644 --- a/src/data/array_interface.h +++ b/src/data/array_interface.h @@ -603,7 +603,7 @@ void DispatchDType(ArrayInterface const array, std::int32_t device, Fn fn) { }; switch (array.type) { case ArrayInterfaceHandler::kF2: { -#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 600 +#if (defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 600) || defined(__HIP_PLATFORM_AMD__) dispatch(__half{}); #endif break;