CUDAApplyUtils.cuh 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539
  1. #pragma once
  2. #include <ATen/cuda/ApplyGridUtils.cuh>
  3. #include <ATen/cuda/detail/IndexUtils.cuh>
  4. #include <ATen/core/TensorBase.h>
  5. #include <ATen/ceil_div.h>
  6. #include <ATen/cuda/Atomic.cuh>
  7. #include <ATen/cuda/CUDAContext.h>
  8. #include <c10/macros/Macros.h>
  9. #include <ATen/native/Copy.h>
  10. #include <math.h>
  11. //
  12. // This file contains pointwise operation functions and kernels that
  13. // work on both contiguous and non-contiguous tensor arguments of
  14. // arbitrary (up to MAX_CUTORCH_DIMS) dimensioned arguments without
  15. // copying or temporary storage.
  16. //
  17. /*
  18. NOTE [ CUDA_tensor_applyN helpers ]
  19. The following CUDA_tensor_applyN (where N currently can be 1, 2, 3, or 4)
  20. functions apply a pointwise operator to N tensor(s).
  21. The calling convention is
  22. 1. The template arguments should be, sequentially,
  23. - First N typename args specify the scalar types of each of the N tensors.
  24. - (Optional) `int step` arg specifies the number of elements processed
  25. together at the same time.
  26. Default is 1.
  27. - A usually omitted (i.e., inferred) typename arg specifies the type of the
  28. function/functor applied on `N * step` values in each iteration of each
  29. CUDA thread.
  30. 2. The arguments should be, sequentially,
  31. - N tensors
  32. - op: a function/functor that processes `N * step` values at the same time.
  33. - If `step == 1`, it must have signature
  34. `void(*)(scalar1_t&, scalar2_t&, ..., scalarN_t&)`, where
  35. `scalar*_t`s are the first N typename template args, and the inputs
  36. are the `N` values from the `N` tensors retrieved at a common index.
  37. - Otherwise, it must must have signature
  38. void(*)(int n, scalar1_t&, scalar1_t&, ..., scalar1_t&, // repeat `step` times
  39. scalar2_t&, scalar2_t&, ..., scalar2_t&, // repeat `step` times
  40. ...,
  41. scalarN_t&, scalarN_t&, ..., scalarN_t&) // repeat `step` times
  42. Different from `step == 1` case, it processes `N * step` values taken
  43. from `step` common indices. Moreover, the first input `n` represents the
  44. number of valid indices (it will always have `0 < n <= step`). It will
  45. almost always be `step`, but at the boundary we may not have full `step`
  46. elements and `n` can be a lesser value.
  47. E.g., if `step == 4` and `N == 2`, `op` could be
  48. [](int n, scalar1_t &u1, scalar1_t &u2, scalar1_t &u3, scalar1_t &u4,
  49. scalar2_t &v1, scalar2_t &v2, scalar2_t &v3, scalar2_t &v4) {
  50. // Only process u1, ..., un and v1, ..., vn.
  51. // So if `n == 3`, `u4` and `v4` need not to be considered.
  52. }
  53. In both cases, the references can actually be const, but at least one of
  54. them should be non-const in order to write the output.
  55. - (Optional, but recommended) N TensorArgType args that specify for each
  56. tensor whether `op` reads AND writes ] (i.e., TensorArgType::ReadWrite),
  57. or only reads (i.e., TensorArgType::ReadOnly).
  58. Default is TensorArgType::ReadWrite for first Tensor, and
  59. TensorArgType::ReadOnly for the rest.
  60. E.g.,
  61. to compute a = b^2 for a and b of same dtype, we can call
  62. CUDA_tensor_apply2<scalar, scalar>(
  63. a, b,
  64. [] __device__ (scalar &a_val, const scalar &b_val) { a_val = b_val * b_val; }
  65. );
  66. to work on 2 values at the same time, we can call
  67. CUDA_tensor_apply2<scalar1, scalar2, 2>(
  68. a, b,
  69. [] __device__ (int n, scalar1 &a_val1, scalar1 &a_val2,
  70. const scalar2 &b_val1, const scalar2 &b_val2) {
  71. // call special vectorized op here, or just do elementwise and enjoy unrolling...
  72. // if n == 1, only process a_val1 and b_val1
  73. }
  74. );
  75. */
  76. namespace at {
  77. namespace cuda {
  78. // TODO: combine with TensorArg? So far that's been for debugging, and this is functional...
  79. enum class TensorArgType { ReadWrite, ReadOnly };
  80. namespace {
  81. // Rearrange dimensions for pointwise operations so that strides are in
  82. // decreasing order as much as possible, so that kernels have better memory
  83. // access patterns.
  84. //
  85. // For example, consider a binary operation on two "transposed" 2-dim tensors:
  86. // sizes: 256 512
  87. // aInfo->strides: 1 256
  88. // bInfo->strides: 1 256
  89. //
  90. // Given this, each concurrent memory access inside kernelPointwiseApply2() is
  91. // exactly 256 elements apart, resulting in poor performance.
  92. //
  93. // This function exchanges dimensions so that memory access is contiguous:
  94. // sizes: 512 256
  95. // aInfo->strides: 256 1
  96. // bInfo->strides: 256 1
  97. //
  98. // (Actually, it becomes even better because now collapseDims() can turn each
  99. // input into one contiguous array.)
  100. //
  101. // In general, given M (<=4) TensorInfo's with N dimensions, we can view each
  102. // strides[i] (0 <= i < N) as an M-tuple. Given each pair i < j, we exchange
  103. // strides[i] and [j] if
  104. // (1) strides[i][k] < strides[j][k] for some k (0 <= k < M)
  105. // (exchanging them will benefit input #k), and
  106. // (2) strides[i][k] <= strieds[j][k] for all k
  107. // (exchanging them will not make any input worse).
  108. template <typename T1, typename IndexType,
  109. typename T2 = void, typename T3 = void, typename T4 = void>
  110. inline void rearrangeDims(detail::TensorInfo<T1, IndexType>* aInfo,
  111. detail::TensorInfo<T2, IndexType>* bInfo = nullptr,
  112. detail::TensorInfo<T3, IndexType>* cInfo = nullptr,
  113. detail::TensorInfo<T4, IndexType>* dInfo = nullptr) {
  114. int numInfos = 1;
  115. int dims = aInfo->dims;
  116. IndexType *sizes[4] = { aInfo->sizes, };
  117. IndexType *strides[4] = { aInfo->strides, };
  118. if (bInfo != nullptr) {
  119. ++numInfos;
  120. if (bInfo->dims != dims) return;
  121. sizes[1] = bInfo->sizes;
  122. strides[1] = bInfo->strides;
  123. }
  124. if (cInfo != nullptr) {
  125. ++numInfos;
  126. if (cInfo->dims != dims) return;
  127. sizes[2] = cInfo->sizes;
  128. strides[2] = cInfo->strides;
  129. }
  130. if (dInfo != nullptr) {
  131. ++numInfos;
  132. if (dInfo->dims != dims) return;
  133. sizes[3] = dInfo->sizes;
  134. strides[3] = dInfo->strides;
  135. }
  136. // Bail out if sizes do not match: we are using "deprecated pointwise
  137. // behavior" among tensors of different shapes but same number of elements.
  138. for (int i = 1; i < numInfos; ++i) {
  139. for (int j = 0; j < dims; ++j) {
  140. if (sizes[i][j] != sizes[0][j]) return;
  141. }
  142. }
  143. for (int i = 0; i < dims - 1; ++i) {
  144. // No need to consider dimensions of size 1.
  145. if (sizes[0][i] == 1) continue;
  146. for (int j = i + 1; j < dims; ++j) {
  147. if (sizes[0][j] == 1) continue;
  148. // Compare the relative sizes of strides between dim #i and dim #j.
  149. bool hasIncreasingStrides = false;
  150. bool hasDecreasingStrides = false;
  151. for (int k = 0; k < numInfos; k++) {
  152. IndexType stride_i = strides[k][i];
  153. IndexType stride_j = strides[k][j];
  154. if (stride_i < stride_j) {
  155. hasIncreasingStrides = true;
  156. } else if (stride_i > stride_j) {
  157. hasDecreasingStrides = true;
  158. }
  159. }
  160. if (hasIncreasingStrides && !hasDecreasingStrides) {
  161. for (int k = 0; k < numInfos; k++) {
  162. IndexType size = sizes[k][i];
  163. sizes[k][i] = sizes[k][j];
  164. sizes[k][j] = size;
  165. IndexType stride = strides[k][i];
  166. strides[k][i] = strides[k][j];
  167. strides[k][j] = stride;
  168. }
  169. }
  170. }
  171. }
  172. }
  173. // The `remaining_steps` argument is used to support Op that operates on
  174. // multiple elements at the same time. Generally, the strategy of ApplyOpN is to
  175. // 1. Initialize `remaining_steps = step`, where `step` is the template arg of
  176. // CUDA_tensor_applyN helpers. The input arg `n` to `apply()` represents the
  177. // number of elements in bound for this call. It will almost always equal to
  178. // `step` except at boundaries.
  179. // 2. If `remaining_steps > 0` convert the current linearIndex to offset (if in
  180. // bound), and recursively call `ApplyOpN` with `remaining_steps - 1`.
  181. // 3. At `remaining_steps = 0`,
  182. // if `step = 1`, call `op(tensor1_val, tensor2_val, ...)`;
  183. // if `step > 1`, call `op(n, tensor1_val1, tensor1_val2, ..., tesor1_valstep,
  184. // tensor2_val1, tensor2_val2, ..., tesor2_valstep,
  185. // ...
  186. // tensorN_val1, tensorN_val2, ..., tesorN_valstep);`
  187. //
  188. // See NOTE [ CUDA_tensor_applyN helpers ] above for how Op may look like.
  189. template <typename Op,
  190. typename scalar,
  191. typename IndexType,
  192. int ADims,
  193. int remaining_steps,
  194. typename... Offsets>
  195. struct ApplyOp1 {
  196. __device__ __forceinline__
  197. static void apply(detail::TensorInfo<scalar, IndexType> &a, const Op &op, int n,
  198. IndexType linearIndex, Offsets... aOffsets) {
  199. // Convert `linearIndex` into an offset of `a`
  200. const IndexType aOffset = sizeof...(Offsets) < n ?
  201. detail::IndexToOffset<scalar, IndexType, ADims>::get(linearIndex, a) : 0;
  202. ApplyOp1<Op, scalar, IndexType, ADims, remaining_steps - 1, const IndexType, Offsets...>::apply(
  203. a, op, n, linearIndex + 1, aOffsets..., aOffset
  204. );
  205. }
  206. };
  207. // Specialize `step=1` case (i.e., `remaining_steps=0` and `len(Offsets)=1`).
  208. // We don't need to pass in how many elements need to processed in this case.
  209. template <typename Op,
  210. typename scalar,
  211. typename IndexType,
  212. int ADims,
  213. typename Offset>
  214. struct ApplyOp1<Op, scalar, IndexType, ADims, 0, Offset> {
  215. __device__ __forceinline__
  216. static void apply(detail::TensorInfo<scalar, IndexType> &a, const Op &op,
  217. int n, IndexType linearIndex, Offset offset) {
  218. op(a.data[offset]);
  219. }
  220. };
  221. template <typename Op,
  222. typename scalar,
  223. typename IndexType,
  224. int ADims,
  225. typename... Offsets>
  226. struct ApplyOp1<Op, scalar, IndexType, ADims, 0, Offsets...> {
  227. __device__ __forceinline__
  228. static void apply(detail::TensorInfo<scalar, IndexType> &a, const Op &op, int n,
  229. IndexType linearIndex, Offsets... offsets) {
  230. op(n, a.data[offsets]...);
  231. }
  232. };
  233. template <typename Op,
  234. typename scalar,
  235. typename IndexType,
  236. int ADims,
  237. int step>
  238. #if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
  239. C10_LAUNCH_BOUNDS_2(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM)
  240. #endif
  241. __global__ void kernelPointwiseApply1(detail::TensorInfo<scalar, IndexType> a,
  242. IndexType totalElements, const Op op) {
  243. for (IndexType linearIndex = (blockIdx.x * blockDim.x + threadIdx.x) * step;
  244. linearIndex < totalElements;
  245. linearIndex += gridDim.x * blockDim.x * step) {
  246. ApplyOp1<Op, scalar, IndexType, ADims, step>::apply(
  247. a, op, ::min(step, static_cast<int>(totalElements - linearIndex)), linearIndex);
  248. }
  249. }
  250. template <typename Op,
  251. typename scalar1,
  252. typename scalar2,
  253. typename IndexType,
  254. int ADims,
  255. int BDims,
  256. int remaining_steps,
  257. typename... Offsets>
  258. struct ApplyOp2 {
  259. __device__ __forceinline__
  260. static void apply(detail::TensorInfo<scalar1, IndexType> &a,
  261. detail::TensorInfo<scalar2, IndexType> &b,
  262. const Op &op, int64_t n, IndexType linearIndex,
  263. Offsets... aOffsets, Offsets... bOffsets) {
  264. // Convert `linearIndex` into an offset of `a`
  265. const IndexType aOffset = static_cast<int64_t>(sizeof...(Offsets)) < n ?
  266. detail::IndexToOffset<scalar1, IndexType, ADims>::get(linearIndex, a) : 0;
  267. // Convert `linearIndex` into an offset of `b`
  268. const IndexType bOffset = static_cast<int64_t>(sizeof...(Offsets)) < n ?
  269. detail::IndexToOffset<scalar2, IndexType, BDims>::get(linearIndex, b) : 0;
  270. ApplyOp2<Op, scalar1, scalar2, IndexType, ADims, BDims, remaining_steps - 1, const IndexType, Offsets...>::apply(
  271. a, b, op, n, linearIndex + 1, aOffsets..., aOffset, bOffsets..., bOffset
  272. );
  273. }
  274. };
  275. // Specialize `step=1` case (i.e., `remaining_steps=0` and `len(Offsets)=1`).
  276. // We don't need to pass in how many elements need to processed in this case.
  277. template <typename Op,
  278. typename scalar1,
  279. typename scalar2,
  280. typename IndexType,
  281. int ADims,
  282. int BDims,
  283. typename Offset>
  284. struct ApplyOp2<Op, scalar1, scalar2, IndexType, ADims, BDims, 0, Offset> {
  285. __device__ __forceinline__
  286. static void apply(detail::TensorInfo<scalar1, IndexType> &a,
  287. detail::TensorInfo<scalar2, IndexType> &b,
  288. const Op &op, int /*n*/, IndexType /*linearIndex*/,
  289. Offset aOffset, Offset bOffset) {
  290. op(a.data[aOffset], b.data[bOffset]);
  291. }
  292. };
  293. template <typename Op,
  294. typename scalar1,
  295. typename scalar2,
  296. typename IndexType,
  297. int ADims,
  298. int BDims,
  299. typename... Offsets>
  300. struct ApplyOp2<Op, scalar1, scalar2, IndexType, ADims, BDims, 0, Offsets...> {
  301. __device__ __forceinline__
  302. static void apply(detail::TensorInfo<scalar1, IndexType> &a,
  303. detail::TensorInfo<scalar2, IndexType> &b,
  304. const Op &op, int n, IndexType linearIndex,
  305. Offsets... aOffsets, Offsets... bOffsets) {
  306. op(n, a.data[aOffsets]..., b.data[bOffsets]...);
  307. }
  308. };
  309. template <typename Op,
  310. typename scalar1,
  311. typename scalar2,
  312. typename IndexType,
  313. int ADims, int BDims,
  314. int step,
  315. int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK,
  316. int min_blocks_per_sm=AT_APPLY_BLOCKS_PER_SM>
  317. #if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
  318. C10_LAUNCH_BOUNDS_2(max_threads_per_block, min_blocks_per_sm)
  319. #endif
  320. __global__ void
  321. kernelPointwiseApply2(detail::TensorInfo<scalar1, IndexType> a,
  322. detail::TensorInfo<scalar2, IndexType> b,
  323. IndexType totalElements,
  324. const Op op) {
  325. for (IndexType linearIndex = (blockIdx.x * blockDim.x + threadIdx.x) * step;
  326. linearIndex < totalElements;
  327. linearIndex += gridDim.x * blockDim.x * step) {
  328. ApplyOp2<Op, scalar1, scalar2, IndexType, ADims, BDims, step>::apply(
  329. a, b, op, ::min(step, static_cast<int>(totalElements - linearIndex)),
  330. linearIndex);
  331. }
  332. }
  333. } // namespace
  334. template <typename scalar1, typename scalar2, int step, typename Op,
  335. int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK,
  336. int min_blocks_per_sm=AT_APPLY_BLOCKS_PER_SM>
  337. inline bool CUDA_tensor_apply2(at::TensorBase a,
  338. at::TensorBase b,
  339. const Op op,
  340. TensorArgType aType = TensorArgType::ReadWrite,
  341. TensorArgType bType = TensorArgType::ReadOnly) {
  342. TORCH_CHECK(a.device().is_cuda() && b.device().is_cuda(),
  343. "CUDA_tensor_apply2: Expected tensors to have CUDA DeviceType, but got "
  344. "tensors with type ", a.device().type(), " and ", b.device().type());
  345. int64_t totalElements = a.numel();
  346. if (totalElements != b.numel()) {
  347. return false;
  348. }
  349. if (a.dim() > MAX_TENSORINFO_DIMS ||
  350. b.dim() > MAX_TENSORINFO_DIMS) {
  351. return false;
  352. }
  353. if (a.numel() == 0) {
  354. // Empty tensor; do nothing
  355. return true;
  356. }
  357. const dim3 block = getApplyBlock(max_threads_per_block);
  358. dim3 grid;
  359. int64_t curDevice = current_device();
  360. if (curDevice == -1) return false;
  361. if (!getApplyGrid<step>(totalElements, grid, curDevice, max_threads_per_block)) {
  362. return false;
  363. }
  364. /*
  365. Expands readable/writable tensors whose indices may be "overlapped."
  366. This ensures that each element of the tensor is operated on once and only
  367. once.
  368. */
  369. TensorBase oldA;
  370. TensorBase oldB;
  371. if (aType == TensorArgType::ReadWrite && detail::maybeOverlappingIndices(a)) {
  372. // Must perform in contiguous space
  373. oldA = std::exchange(a, a.contiguous());
  374. }
  375. if (bType == TensorArgType::ReadWrite && detail::maybeOverlappingIndices(b)) {
  376. // Must perform in contiguous space
  377. oldB = std::exchange(b, b.contiguous());
  378. }
  379. // It is possible that the tensor dimensions are able to be collapsed,
  380. // and thus we can reduce the actual code complexity of the copy by
  381. // exploiting this knowledge statically, since the div/mod is the
  382. // most expensive part of the operation, more so than memory accesses.
  383. // For instance, when copying a non-contiguous to a contiguous tensor
  384. // (or vice versa), the contiguous tensor can be collapsed to one
  385. // dimension, and the loop to translate the linear index to the array
  386. // index can be similarly collapsed. That is what this unrolling is for.
  387. #define HANDLE_CASE(TYPE, A, B) \
  388. kernelPointwiseApply2<Op, \
  389. scalar1, \
  390. scalar2, \
  391. TYPE, A, B, step, \
  392. max_threads_per_block, \
  393. min_blocks_per_sm> \
  394. <<<grid, block, 0, at::cuda::getCurrentCUDAStream(curDevice)>>>( \
  395. aInfo, bInfo, static_cast<TYPE>(totalElements), op); \
  396. C10_CUDA_KERNEL_LAUNCH_CHECK();
  397. #define HANDLE_B_CASE(TYPE, A, B) { \
  398. switch (B) { \
  399. case 1: \
  400. HANDLE_CASE(TYPE, A, 1); \
  401. break; \
  402. case 2: \
  403. HANDLE_CASE(TYPE, A, 2); \
  404. break; \
  405. default: \
  406. HANDLE_CASE(TYPE, A, -1); \
  407. break; \
  408. } \
  409. }
  410. #define HANDLE_A_CASE(TYPE, A, B) { \
  411. switch (A) { \
  412. case 1: \
  413. HANDLE_B_CASE(TYPE, 1, B); \
  414. break; \
  415. case 2: \
  416. HANDLE_B_CASE(TYPE, 2, B); \
  417. break; \
  418. default: \
  419. HANDLE_B_CASE(TYPE, -1, B); \
  420. break; \
  421. } \
  422. }
  423. if (detail::canUse32BitIndexMath(a) &&
  424. detail::canUse32BitIndexMath(b)) {
  425. detail::TensorInfo<scalar1, unsigned int> aInfo =
  426. detail::getTensorInfo<scalar1, unsigned int>(a);
  427. detail::TensorInfo<scalar2, unsigned int> bInfo =
  428. detail::getTensorInfo<scalar2, unsigned int>(b);
  429. rearrangeDims(&aInfo, &bInfo);
  430. aInfo.collapseDims();
  431. bInfo.collapseDims();
  432. HANDLE_A_CASE(unsigned int, aInfo.dims, bInfo.dims);
  433. } else {
  434. detail::TensorInfo<scalar1, uint64_t> aInfo =
  435. detail::getTensorInfo<scalar1, uint64_t>(a);
  436. detail::TensorInfo<scalar2, uint64_t> bInfo =
  437. detail::getTensorInfo<scalar2, uint64_t>(b);
  438. rearrangeDims(&aInfo, &bInfo);
  439. aInfo.collapseDims();
  440. bInfo.collapseDims();
  441. /*
  442. Only instantiates the all 1D special case and the fallback all nD case for
  443. large (64-bit indexed) tensors to reduce compilation time.
  444. */
  445. if (aInfo.dims == 1 && bInfo.dims == 1) {
  446. HANDLE_CASE(uint64_t, 1, 1);
  447. } else {
  448. HANDLE_CASE(uint64_t, -1, -1);
  449. }
  450. }
  451. #undef HANDLE_CASE
  452. #undef HANDLE_B_CASE
  453. #undef HANDLE_A_CASE
  454. if (oldA.defined()) {
  455. at::native::copy_ignoring_overlaps(oldA, a);
  456. }
  457. if (oldB.defined()) {
  458. at::native::copy_ignoring_overlaps(oldB, b);
  459. }
  460. return true;
  461. }
  462. /* Provides default step = 1 to CUDA_tensor_apply2. */
  463. template <typename scalar1, typename scalar2, typename Op,
  464. int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK,
  465. int min_blocks_per_sm=AT_APPLY_BLOCKS_PER_SM>
  466. inline bool CUDA_tensor_apply2(const at::TensorBase &a,
  467. const at::TensorBase &b,
  468. const Op op,
  469. TensorArgType aType = TensorArgType::ReadWrite,
  470. TensorArgType bType = TensorArgType::ReadOnly) {
  471. return CUDA_tensor_apply2<scalar1, scalar2, 1, Op,
  472. max_threads_per_block, min_blocks_per_sm>(a, b, op, aType, bType);
  473. }
  474. } // cuda
  475. } // at