Parallel.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. #pragma once
  2. #include <ATen/Config.h>
  3. #include <c10/macros/Macros.h>
  4. #include <functional>
  5. #include <string>
  6. namespace at {
  7. inline int64_t divup(int64_t x, int64_t y) {
  8. return (x + y - 1) / y;
  9. }
  10. // Called during new thread initialization
  11. TORCH_API void init_num_threads();
  12. // Sets the number of threads to be used in parallel region
  13. TORCH_API void set_num_threads(int);
  14. // Returns the maximum number of threads that may be used in a parallel region
  15. TORCH_API int get_num_threads();
  16. // Returns the current thread number (starting from 0)
  17. // in the current parallel region, or 0 in the sequential region
  18. TORCH_API int get_thread_num();
  19. // Checks whether the code runs in parallel region
  20. TORCH_API bool in_parallel_region();
  21. namespace internal {
  22. // Initialise num_threads lazily at first parallel call
  23. inline void lazy_init_num_threads() {
  24. thread_local bool init = false;
  25. if (C10_UNLIKELY(!init)) {
  26. at::init_num_threads();
  27. init = true;
  28. }
  29. }
  30. TORCH_API void set_thread_num(int);
  31. class TORCH_API ThreadIdGuard {
  32. public:
  33. ThreadIdGuard(int new_id) : old_id_(at::get_thread_num()) {
  34. set_thread_num(new_id);
  35. }
  36. ~ThreadIdGuard() {
  37. set_thread_num(old_id_);
  38. }
  39. private:
  40. int old_id_;
  41. };
  42. } // namespace internal
  43. /*
  44. parallel_for
  45. begin: index at which to start applying user function
  46. end: index at which to stop applying user function
  47. grain_size: number of elements per chunk. impacts the degree of parallelization
  48. f: user function applied in parallel to the chunks, signature:
  49. void f(int64_t begin, int64_t end)
  50. Warning: parallel_for does NOT copy thread local
  51. states from the current thread to the worker threads.
  52. This means for example that Tensor operations CANNOT be used in the
  53. body of your function, only data pointers.
  54. */
  55. template <class F>
  56. inline void parallel_for(
  57. const int64_t begin,
  58. const int64_t end,
  59. const int64_t grain_size,
  60. const F& f);
  61. /*
  62. parallel_reduce
  63. begin: index at which to start applying reduction
  64. end: index at which to stop applying reduction
  65. grain_size: number of elements per chunk. impacts number of elements in
  66. intermediate results tensor and degree of parallelization.
  67. ident: identity for binary combination function sf. sf(ident, x) needs to return
  68. x.
  69. f: function for reduction over a chunk. f needs to be of signature scalar_t
  70. f(int64_t partial_begin, int64_t partial_end, scalar_t identifiy)
  71. sf: function to combine two partial results. sf needs to be of signature
  72. scalar_t sf(scalar_t x, scalar_t y)
  73. For example, you might have a tensor of 10000 entires and want to sum together
  74. all the elements. Parallel_reduce with a grain_size of 2500 will then allocate
  75. an intermediate result tensor with 4 elements. Then it will execute the function
  76. "f" you provide and pass the beginning and end index of these chunks, so
  77. 0-2499, 2500-4999, etc. and the combination identity. It will then write out
  78. the result from each of these chunks into the intermediate result tensor. After
  79. that it'll reduce the partial results from each chunk into a single number using
  80. the combination function sf and the identity ident. For a total summation this
  81. would be "+" and 0 respectively. This is similar to tbb's approach [1], where
  82. you need to provide a function to accumulate a subrange, a function to combine
  83. two partial results and an identity.
  84. Warning: parallel_reduce does NOT copy thread local
  85. states from the current thread to the worker threads.
  86. This means for example that Tensor operations CANNOT be used in the
  87. body of your function, only data pointers.
  88. [1] https://software.intel.com/en-us/node/506154
  89. */
  90. template <class scalar_t, class F, class SF>
  91. inline scalar_t parallel_reduce(
  92. const int64_t begin,
  93. const int64_t end,
  94. const int64_t grain_size,
  95. const scalar_t ident,
  96. const F& f,
  97. const SF& sf);
  98. // Returns a detailed string describing parallelization settings
  99. TORCH_API std::string get_parallel_info();
  100. // Sets number of threads used for inter-op parallelism
  101. TORCH_API void set_num_interop_threads(int);
  102. // Returns the number of threads used for inter-op parallelism
  103. TORCH_API int get_num_interop_threads();
  104. // Launches inter-op parallel task
  105. TORCH_API void launch(std::function<void()> func);
  106. namespace internal {
  107. void launch_no_thread_state(std::function<void()> fn);
  108. } // namespace internal
  109. // Launches intra-op parallel task
  110. TORCH_API void intraop_launch(std::function<void()> func);
  111. // Returns number of intra-op threads used by default
  112. TORCH_API int intraop_default_num_threads();
  113. } // namespace at
  114. #if AT_PARALLEL_OPENMP
  115. #include <ATen/ParallelOpenMP.h> // IWYU pragma: keep
  116. #elif AT_PARALLEL_NATIVE
  117. #include <ATen/ParallelNative.h> // IWYU pragma: keep
  118. #elif AT_PARALLEL_NATIVE_TBB
  119. #include <ATen/ParallelNativeTBB.h> // IWYU pragma: keep
  120. #endif
  121. #include <ATen/Parallel-inl.h> // IWYU pragma: keep