CUDAException.h 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102
  1. #pragma once
  2. #include <c10/cuda/CUDADeviceAssertionHost.h>
  3. #include <c10/cuda/CUDAMacros.h>
  4. #include <c10/cuda/CUDAMiscFunctions.h>
  5. #include <c10/macros/Macros.h>
  6. #include <c10/util/Exception.h>
  7. #include <c10/util/irange.h>
  8. #include <cuda.h>
  9. // Note [CHECK macro]
  10. // ~~~~~~~~~~~~~~~~~~
  11. // This is a macro so that AT_ERROR can get accurate __LINE__
  12. // and __FILE__ information. We could split this into a short
  13. // macro and a function implementation if we pass along __LINE__
  14. // and __FILE__, but no one has found this worth doing.
  15. // Used to denote errors from CUDA framework.
  16. // This needs to be declared here instead util/Exception.h for proper conversion
  17. // during hipify.
  18. namespace c10 {
  19. class C10_CUDA_API CUDAError : public c10::Error {
  20. using Error::Error;
  21. };
  22. } // namespace c10
  23. #define C10_CUDA_CHECK(EXPR) \
  24. do { \
  25. const cudaError_t __err = EXPR; \
  26. c10::cuda::c10_cuda_check_implementation( \
  27. static_cast<int32_t>(__err), \
  28. __FILE__, \
  29. __func__, /* Line number data type not well-defined between \
  30. compilers, so we perform an explicit cast */ \
  31. static_cast<uint32_t>(__LINE__), \
  32. true); \
  33. } while (0)
  34. #define C10_CUDA_CHECK_WARN(EXPR) \
  35. do { \
  36. const cudaError_t __err = EXPR; \
  37. if (C10_UNLIKELY(__err != cudaSuccess)) { \
  38. auto error_unused C10_UNUSED = cudaGetLastError(); \
  39. (void)error_unused; \
  40. TORCH_WARN("CUDA warning: ", cudaGetErrorString(__err)); \
  41. } \
  42. } while (0)
  43. // Indicates that a CUDA error is handled in a non-standard way
  44. #define C10_CUDA_ERROR_HANDLED(EXPR) EXPR
  45. // Intentionally ignore a CUDA error
  46. #define C10_CUDA_IGNORE_ERROR(EXPR) \
  47. do { \
  48. const cudaError_t __err = EXPR; \
  49. if (C10_UNLIKELY(__err != cudaSuccess)) { \
  50. cudaError_t error_unused C10_UNUSED = cudaGetLastError(); \
  51. (void)error_unused; \
  52. } \
  53. } while (0)
  54. // Clear the last CUDA error
  55. #define C10_CUDA_CLEAR_ERROR() \
  56. do { \
  57. cudaError_t error_unused C10_UNUSED = cudaGetLastError(); \
  58. (void)error_unused; \
  59. } while (0)
  60. // This should be used directly after every kernel launch to ensure
  61. // the launch happened correctly and provide an early, close-to-source
  62. // diagnostic if it didn't.
  63. #define C10_CUDA_KERNEL_LAUNCH_CHECK() C10_CUDA_CHECK(cudaGetLastError())
  64. /// Launches a CUDA kernel appending to it all the information need to handle
  65. /// device-side assertion failures. Checks that the launch was successful.
  66. #define TORCH_DSA_KERNEL_LAUNCH( \
  67. kernel, blocks, threads, shared_mem, stream, ...) \
  68. do { \
  69. auto& launch_registry = \
  70. c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref(); \
  71. kernel<<<blocks, threads, shared_mem, stream>>>( \
  72. __VA_ARGS__, \
  73. launch_registry.get_uvm_assertions_ptr_for_current_device(), \
  74. launch_registry.insert( \
  75. __FILE__, __FUNCTION__, __LINE__, #kernel, stream.id())); \
  76. C10_CUDA_KERNEL_LAUNCH_CHECK(); \
  77. } while (0)
  78. namespace c10 {
  79. namespace cuda {
  80. /// In the event of a CUDA failure, formats a nice error message about that
  81. /// failure and also checks for device-side assertion failures
  82. C10_CUDA_API void c10_cuda_check_implementation(
  83. const int32_t err,
  84. const char* filename,
  85. const char* function_name,
  86. const int line_number,
  87. const bool include_device_assertions);
  88. } // namespace cuda
  89. } // namespace c10