Export.h 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154
  1. #ifndef C10_MACROS_EXPORT_H_
  2. #define C10_MACROS_EXPORT_H_
  3. /* Header file to define the common scaffolding for exported symbols.
  4. *
  5. * Export is by itself a quite tricky situation to deal with, and if you are
  6. * hitting this file, make sure you start with the background here:
  7. * - Linux: https://gcc.gnu.org/wiki/Visibility
  8. * - Windows:
  9. * https://docs.microsoft.com/en-us/cpp/cpp/dllexport-dllimport?view=vs-2017
  10. *
  11. * Do NOT include this file directly. Instead, use c10/macros/Macros.h
  12. */
  13. // You do not need to edit this part of file unless you are changing the core
  14. // pytorch export abstractions.
  15. //
  16. // This part defines the C10 core export and import macros. This is controlled
  17. // by whether we are building shared libraries or not, which is determined
  18. // during build time and codified in c10/core/cmake_macros.h.
  19. // When the library is built as a shared lib, EXPORT and IMPORT will contain
  20. // visibility attributes. If it is being built as a static lib, then EXPORT
  21. // and IMPORT basically have no effect.
  22. // As a rule of thumb, you should almost NEVER mix static and shared builds for
  23. // libraries that depend on c10. AKA, if c10 is built as a static library, we
  24. // recommend everything dependent on c10 to be built statically. If c10 is built
  25. // as a shared library, everything dependent on it should be built as shared. In
  26. // the PyTorch project, all native libraries shall use the macro
  27. // C10_BUILD_SHARED_LIB to check whether pytorch is building shared or static
  28. // libraries.
  29. // For build systems that do not directly depend on CMake and directly build
  30. // from the source directory (such as Buck), one may not have a cmake_macros.h
  31. // file at all. In this case, the build system is responsible for providing
  32. // correct macro definitions corresponding to the cmake_macros.h.in file.
  33. //
  34. // In such scenarios, one should define the macro
  35. // C10_USING_CUSTOM_GENERATED_MACROS
  36. // to inform this header that it does not need to include the cmake_macros.h
  37. // file.
  38. #ifndef C10_USING_CUSTOM_GENERATED_MACROS
  39. #include <c10/macros/cmake_macros.h>
  40. #endif // C10_USING_CUSTOM_GENERATED_MACROS
  41. #ifdef _WIN32
  42. #define C10_HIDDEN
  43. #if defined(C10_BUILD_SHARED_LIBS)
  44. #define C10_EXPORT __declspec(dllexport)
  45. #define C10_IMPORT __declspec(dllimport)
  46. #else
  47. #define C10_EXPORT
  48. #define C10_IMPORT
  49. #endif
  50. #else // _WIN32
  51. #if defined(__GNUC__)
  52. #define C10_EXPORT __attribute__((__visibility__("default")))
  53. #define C10_HIDDEN __attribute__((__visibility__("hidden")))
  54. #else // defined(__GNUC__)
  55. #define C10_EXPORT
  56. #define C10_HIDDEN
  57. #endif // defined(__GNUC__)
  58. #define C10_IMPORT C10_EXPORT
  59. #endif // _WIN32
  60. #ifdef NO_EXPORT
  61. #undef C10_EXPORT
  62. #define C10_EXPORT
  63. #endif
  64. // Definition of an adaptive XX_API macro, that depends on whether you are
  65. // building the library itself or not, routes to XX_EXPORT and XX_IMPORT.
  66. // Basically, you will need to do this for each shared library that you are
  67. // building, and the instruction is as follows: assuming that you are building
  68. // a library called libawesome.so. You should:
  69. // (1) for your cmake target (usually done by "add_library(awesome, ...)"),
  70. // define a macro called AWESOME_BUILD_MAIN_LIB using
  71. // target_compile_options.
  72. // (2) define the AWESOME_API macro similar to the one below.
  73. // And in the source file of your awesome library, use AWESOME_API to
  74. // annotate public symbols.
  75. // Here, for the C10 library, we will define the macro C10_API for both import
  76. // and export.
  77. // This one is being used by libc10.so
  78. #ifdef C10_BUILD_MAIN_LIB
  79. #define C10_API C10_EXPORT
  80. #else
  81. #define C10_API C10_IMPORT
  82. #endif
  83. // This one is being used by libtorch.so
  84. #ifdef CAFFE2_BUILD_MAIN_LIB
  85. #define TORCH_API C10_EXPORT
  86. #else
  87. #define TORCH_API C10_IMPORT
  88. #endif
  89. // You may be wondering: Whose brilliant idea was it to split torch_cuda into
  90. // two pieces with confusing names?
  91. // Once upon a time, there _was_ only TORCH_CUDA_API. All was happy until we
  92. // tried to compile PyTorch for CUDA 11.1, which ran into relocation marker
  93. // issues when linking big binaries.
  94. // (https://github.com/pytorch/pytorch/issues/39968) We had two choices:
  95. // (1) Stop supporting so many GPU architectures
  96. // (2) Do something else
  97. // We chose #2 and decided to split the behemoth that was torch_cuda into two
  98. // smaller libraries, one with most of the core kernel functions (torch_cuda_cu)
  99. // and the other that had..well..everything else (torch_cuda_cpp). The idea was
  100. // this: instead of linking our static libraries (like the hefty
  101. // libcudnn_static.a) with another huge library, torch_cuda, and run into pesky
  102. // relocation marker issues, we could link our static libraries to a smaller
  103. // part of torch_cuda (torch_cuda_cpp) and avoid the issues.
  104. // libtorch_cuda_cu.so
  105. #ifdef TORCH_CUDA_CU_BUILD_MAIN_LIB
  106. #define TORCH_CUDA_CU_API C10_EXPORT
  107. #elif defined(BUILD_SPLIT_CUDA)
  108. #define TORCH_CUDA_CU_API C10_IMPORT
  109. #endif
  110. // libtorch_cuda_cpp.so
  111. #ifdef TORCH_CUDA_CPP_BUILD_MAIN_LIB
  112. #define TORCH_CUDA_CPP_API C10_EXPORT
  113. #elif defined(BUILD_SPLIT_CUDA)
  114. #define TORCH_CUDA_CPP_API C10_IMPORT
  115. #endif
  116. // libtorch_cuda.so (where torch_cuda_cu and torch_cuda_cpp are a part of the
  117. // same api)
  118. #ifdef TORCH_CUDA_BUILD_MAIN_LIB
  119. #define TORCH_CUDA_CPP_API C10_EXPORT
  120. #define TORCH_CUDA_CU_API C10_EXPORT
  121. #elif !defined(BUILD_SPLIT_CUDA)
  122. #define TORCH_CUDA_CPP_API C10_IMPORT
  123. #define TORCH_CUDA_CU_API C10_IMPORT
  124. #endif
  125. #if defined(TORCH_HIP_BUILD_MAIN_LIB)
  126. #define TORCH_HIP_API C10_EXPORT
  127. #else
  128. #define TORCH_HIP_API C10_IMPORT
  129. #endif
  130. // Enums only need to be exported on windows for non-CUDA files
  131. #if defined(_WIN32) && defined(__CUDACC__)
  132. #define C10_API_ENUM C10_API
  133. #else
  134. #define C10_API_ENUM
  135. #endif
  136. #endif // C10_MACROS_MACROS_H_