CUDAHooksInterface.h 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206
  1. #pragma once
  2. #include <c10/core/Allocator.h>
  3. #include <c10/util/Exception.h>
  4. #include <c10/util/Optional.h>
  5. #include <c10/util/Registry.h>
  6. #include <cstddef>
  7. #include <functional>
  8. #include <memory>
  9. // Forward-declares at::cuda::NVRTC
  10. namespace at {
  11. class Context;
  12. struct Generator;
  13. namespace cuda {
  14. struct NVRTC;
  15. } // namespace cuda
  16. } // namespace at
  17. // NB: Class must live in `at` due to limitations of Registry.h.
  18. namespace at {
  19. #ifdef _MSC_VER
  20. constexpr const char* CUDA_HELP =
  21. "PyTorch splits its backend into two shared libraries: a CPU library "
  22. "and a CUDA library; this error has occurred because you are trying "
  23. "to use some CUDA functionality, but the CUDA library has not been "
  24. "loaded by the dynamic linker for some reason. The CUDA library MUST "
  25. "be loaded, EVEN IF you don't directly use any symbols from the CUDA library! "
  26. "One common culprit is a lack of -INCLUDE:?warp_size@cuda@at@@YAHXZ "
  27. "in your link arguments; many dynamic linkers will delete dynamic library "
  28. "dependencies if you don't depend on any of their symbols. You can check "
  29. "if this has occurred by using link on your binary to see if there is a "
  30. "dependency on *_cuda.dll library.";
  31. #else
  32. constexpr const char* CUDA_HELP =
  33. "PyTorch splits its backend into two shared libraries: a CPU library "
  34. "and a CUDA library; this error has occurred because you are trying "
  35. "to use some CUDA functionality, but the CUDA library has not been "
  36. "loaded by the dynamic linker for some reason. The CUDA library MUST "
  37. "be loaded, EVEN IF you don't directly use any symbols from the CUDA library! "
  38. "One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many "
  39. "dynamic linkers will delete dynamic library dependencies if you don't "
  40. "depend on any of their symbols. You can check if this has occurred by "
  41. "using ldd on your binary to see if there is a dependency on *_cuda.so "
  42. "library.";
  43. #endif
  44. // The CUDAHooksInterface is an omnibus interface for any CUDA functionality
  45. // which we may want to call into from CPU code (and thus must be dynamically
  46. // dispatched, to allow for separate compilation of CUDA code). How do I
  47. // decide if a function should live in this class? There are two tests:
  48. //
  49. // 1. Does the *implementation* of this function require linking against
  50. // CUDA libraries?
  51. //
  52. // 2. Is this function *called* from non-CUDA ATen code?
  53. //
  54. // (2) should filter out many ostensible use-cases, since many times a CUDA
  55. // function provided by ATen is only really ever used by actual CUDA code.
  56. //
  57. // TODO: Consider putting the stub definitions in another class, so that one
  58. // never forgets to implement each virtual function in the real implementation
  59. // in CUDAHooks. This probably doesn't buy us much though.
  60. struct TORCH_API CUDAHooksInterface {
  61. // This should never actually be implemented, but it is used to
  62. // squelch -Werror=non-virtual-dtor
  63. virtual ~CUDAHooksInterface() = default;
  64. // Initialize THCState and, transitively, the CUDA state
  65. virtual void initCUDA() const {
  66. TORCH_CHECK(false, "Cannot initialize CUDA without ATen_cuda library. ", CUDA_HELP);
  67. }
  68. virtual const Generator& getDefaultCUDAGenerator(DeviceIndex device_index = -1) const {
  69. (void)device_index; // Suppress unused variable warning
  70. TORCH_CHECK(false, "Cannot get default CUDA generator without ATen_cuda library. ", CUDA_HELP);
  71. }
  72. virtual Device getDeviceFromPtr(void* /*data*/) const {
  73. TORCH_CHECK(false, "Cannot get device of pointer on CUDA without ATen_cuda library. ", CUDA_HELP);
  74. }
  75. virtual bool isPinnedPtr(void* /*data*/) const {
  76. return false;
  77. }
  78. virtual bool hasCUDA() const {
  79. return false;
  80. }
  81. virtual bool hasCUDART() const {
  82. return false;
  83. }
  84. virtual bool hasMAGMA() const {
  85. return false;
  86. }
  87. virtual bool hasCuDNN() const {
  88. return false;
  89. }
  90. virtual bool hasCuSOLVER() const {
  91. return false;
  92. }
  93. virtual bool hasROCM() const {
  94. return false;
  95. }
  96. virtual const at::cuda::NVRTC& nvrtc() const {
  97. TORCH_CHECK(false, "NVRTC requires CUDA. ", CUDA_HELP);
  98. }
  99. virtual bool hasPrimaryContext(int64_t device_index) const {
  100. TORCH_CHECK(false, "Cannot call hasPrimaryContext(", device_index, ") without ATen_cuda library. ", CUDA_HELP);
  101. }
  102. virtual int64_t current_device() const {
  103. return -1;
  104. }
  105. virtual Allocator* getPinnedMemoryAllocator() const {
  106. TORCH_CHECK(false, "Pinned memory requires CUDA. ", CUDA_HELP);
  107. }
  108. virtual Allocator* getCUDADeviceAllocator() const {
  109. TORCH_CHECK(false, "CUDADeviceAllocator requires CUDA. ", CUDA_HELP);
  110. }
  111. virtual bool compiledWithCuDNN() const {
  112. return false;
  113. }
  114. virtual bool compiledWithMIOpen() const {
  115. return false;
  116. }
  117. virtual bool supportsDilatedConvolutionWithCuDNN() const {
  118. return false;
  119. }
  120. virtual bool supportsDepthwiseConvolutionWithCuDNN() const {
  121. return false;
  122. }
  123. virtual bool supportsBFloat16ConvolutionWithCuDNNv8() const {
  124. return false;
  125. }
  126. virtual long versionCuDNN() const {
  127. TORCH_CHECK(false, "Cannot query cuDNN version without ATen_cuda library. ", CUDA_HELP);
  128. }
  129. virtual long versionCUDART() const {
  130. TORCH_CHECK(false, "Cannot query CUDART version without ATen_cuda library. ", CUDA_HELP);
  131. }
  132. virtual std::string showConfig() const {
  133. TORCH_CHECK(false, "Cannot query detailed CUDA version without ATen_cuda library. ", CUDA_HELP);
  134. }
  135. virtual double batchnormMinEpsilonCuDNN() const {
  136. TORCH_CHECK(false,
  137. "Cannot query batchnormMinEpsilonCuDNN() without ATen_cuda library. ", CUDA_HELP);
  138. }
  139. virtual int64_t cuFFTGetPlanCacheMaxSize(int64_t /*device_index*/) const {
  140. TORCH_CHECK(false, "Cannot access cuFFT plan cache without ATen_cuda library. ", CUDA_HELP);
  141. }
  142. virtual void cuFFTSetPlanCacheMaxSize(int64_t /*device_index*/, int64_t /*max_size*/) const {
  143. TORCH_CHECK(false, "Cannot access cuFFT plan cache without ATen_cuda library. ", CUDA_HELP);
  144. }
  145. virtual int64_t cuFFTGetPlanCacheSize(int64_t /*device_index*/) const {
  146. TORCH_CHECK(false, "Cannot access cuFFT plan cache without ATen_cuda library. ", CUDA_HELP);
  147. }
  148. virtual void cuFFTClearPlanCache(int64_t /*device_index*/) const {
  149. TORCH_CHECK(false, "Cannot access cuFFT plan cache without ATen_cuda library. ", CUDA_HELP);
  150. }
  151. virtual int getNumGPUs() const {
  152. return 0;
  153. }
  154. virtual void deviceSynchronize(int64_t /*device_index*/) const {
  155. TORCH_CHECK(false, "Cannot synchronize CUDA device without ATen_cuda library. ", CUDA_HELP);
  156. }
  157. };
  158. // NB: dummy argument to suppress "ISO C++11 requires at least one argument
  159. // for the "..." in a variadic macro"
  160. struct TORCH_API CUDAHooksArgs {};
  161. C10_DECLARE_REGISTRY(CUDAHooksRegistry, CUDAHooksInterface, CUDAHooksArgs);
  162. #define REGISTER_CUDA_HOOKS(clsname) \
  163. C10_REGISTER_CLASS(CUDAHooksRegistry, clsname, clsname)
  164. namespace detail {
  165. TORCH_API const CUDAHooksInterface& getCUDAHooks();
  166. } // namespace detail
  167. } // namespace at