TopKImpl.h 3.2 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. #pragma once
  2. #include <ATen/core/TensorAccessor.h>
  3. #include <ATen/NumericUtils.h>
  4. namespace at {
  5. namespace native {
  6. #ifdef CPU_CAPABILITY
  7. inline namespace CPU_CAPABILITY {
  8. #else
  9. inline namespace DEFAULT {
  10. #endif
  11. // Core topk loop, shared between CPU and QuantizedCPU
  12. template <typename scalar_t, typename accscalar_t>
  13. void topk_impl_loop(
  14. const int64_t mode_values_stride,
  15. const int64_t mode_indices_stride,
  16. const int64_t tmp_values_stride,
  17. const int64_t k,
  18. const int64_t dim_size,
  19. const bool largest,
  20. const bool sorted,
  21. char** data, const int64_t* strides, const int64_t n) {
  22. using elem_t = std::pair<accscalar_t, int64_t>;
  23. std::vector<elem_t> queue(dim_size);
  24. for (const auto i : c10::irange(n)) {
  25. TensorAccessor<scalar_t, 1> mode_values(
  26. reinterpret_cast<scalar_t*>(data[0] + i * strides[0]),
  27. &k, &mode_values_stride);
  28. TensorAccessor<int64_t, 1> mode_indices(
  29. reinterpret_cast<int64_t*>(data[1] + i * strides[1]),
  30. &k, &mode_indices_stride);
  31. TensorAccessor<scalar_t, 1> tmp_values(
  32. reinterpret_cast<scalar_t*>(data[2] + i * strides[2]),
  33. &dim_size, &tmp_values_stride);
  34. auto n = dim_size;
  35. auto use_partial_sort = k * 64 <= n;
  36. for (const auto j : c10::irange(n)) {
  37. queue[j].first = tmp_values[j];
  38. queue[j].second = j;
  39. }
  40. // we want nan to be sorted as top for numpy compatibility
  41. if (use_partial_sort) {
  42. if (largest) {
  43. std::partial_sort(queue.begin(), queue.begin() + k, queue.end(),
  44. [](const elem_t& x, const elem_t& y) -> bool {
  45. return ((_isnan<accscalar_t>(x.first) && !_isnan<accscalar_t>(y.first)) || (x.first > y.first));
  46. });
  47. } else {
  48. std::partial_sort(queue.begin(), queue.begin() + k, queue.end(),
  49. [](const elem_t& x, const elem_t& y) -> bool {
  50. return ((!_isnan<accscalar_t>(x.first) && _isnan<accscalar_t>(y.first)) || (x.first < y.first));
  51. });
  52. }
  53. } else {
  54. if (largest) {
  55. std::nth_element(queue.begin(), queue.begin() + k - 1, queue.end(),
  56. [](const elem_t& x, const elem_t& y) -> bool {
  57. return ((_isnan<accscalar_t>(x.first) && !_isnan<accscalar_t>(y.first)) || (x.first > y.first));
  58. });
  59. if (sorted) {
  60. std::sort(queue.begin(), queue.begin() + k - 1,
  61. [](const elem_t& x, const elem_t& y) -> bool {
  62. return ((_isnan<accscalar_t>(x.first) && !_isnan<accscalar_t>(y.first)) || (x.first > y.first));
  63. });
  64. }
  65. } else {
  66. std::nth_element(queue.begin(), queue.begin() + k -1, queue.end(),
  67. [](const elem_t& x, const elem_t& y) -> bool {
  68. return ((!_isnan<accscalar_t>(x.first) && _isnan<accscalar_t>(y.first)) || (x.first < y.first));
  69. });
  70. if (sorted) {
  71. std::sort(queue.begin(), queue.begin() + k -1,
  72. [](const elem_t& x, const elem_t& y) -> bool {
  73. return ((!_isnan<accscalar_t>(x.first) && _isnan<accscalar_t>(y.first)) || (x.first < y.first));
  74. });
  75. }
  76. }
  77. }
  78. for (const auto j : c10::irange(k)) {
  79. mode_values[j] = queue[j].first;
  80. mode_indices[j] = queue[j].second;
  81. }
  82. }
  83. }
  84. } // namespace CPU_CAPABILITY
  85. } // namespace native
  86. } // namespace at