DistributionTemplates.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366
  1. #pragma once
  2. #include <ATen/core/Tensor.h>
  3. #include <ATen/Dispatch.h>
  4. #include <ATen/Generator.h>
  5. #include <ATen/ExpandUtils.h>
  6. #include <ATen/Tensor.h>
  7. #include <ATen/MemoryOverlap.h>
  8. #include <ATen/NamedTensorUtils.h>
  9. #include <ATen/native/Resize.h>
  10. #include <ATen/native/TensorIterator.h>
  11. #include <c10/util/Optional.h>
  12. #include <limits>
  13. #include <cmath>
  14. #ifndef AT_PER_OPERATOR_HEADERS
  15. #include <ATen/Functions.h>
  16. #else
  17. #include <ATen/ops/empty_like.h>
  18. #include <ATen/ops/empty.h>
  19. #include <ATen/ops/full.h>
  20. #include <ATen/ops/view_as_real.h>
  21. #endif
  22. namespace at {
  23. namespace native {
  24. namespace templates {
  25. // ==================================================== Random ========================================================
  26. // The purpose of `update_from` and `update_to` is to find the closest valid int64_t number that can be used as actual `from`.
  27. // The current implementation of `random_` uses uint64_t arithmetics and casts the result to the target dtype(scalar_t).
  28. // This casting can result in generating numbers that happen to be greater or equal to `to` value. For instance:
  29. //
  30. // auto actual = torch::empty({3, 3}, torch::half);
  31. // actual.random_(0, 65504);
  32. //
  33. // If random's uint64_t arithmetics produces 65503 as a random value after casting to torch::half it becomes 65504
  34. // and violates the requirement that random value must be less than `to`. To resolve this issue `update_from` and `update_to`
  35. // moves `from` to the right and `to` to the left to the next closest value that won't go outside [from, to) after casting to
  36. // the target dtype. For `to` = 65504 it moves left for (1 << (log2(to) - 11 + 1)) = 32 and becomes 65472, which is previous
  37. // available number for torch::half dtype.
  38. template<typename scalar_t>
  39. int64_t update_from(int64_t from) {
  40. static_assert(
  41. std::is_floating_point<scalar_t>::value ||
  42. std::is_same<scalar_t, at::Half>::value ||
  43. std::is_same<scalar_t, at::BFloat16>::value, "scalar_t must be floating-point type");
  44. const auto from_plus_1 = static_cast<int64_t>(static_cast<scalar_t>(from + 1));
  45. if (from_plus_1 < from) {
  46. int64_t from_ = std::abs(from + 1);
  47. int n = 0;
  48. while (from_ >>= 1) ++n;
  49. // NOLINTNEXTLINE(clang-analyzer-core.UndefinedBinaryOperatorResult)
  50. from = from_plus_1 + (1LL << (n - std::numeric_limits<scalar_t>::digits + 1));
  51. }
  52. return from;
  53. }
  54. template<typename scalar_t>
  55. int64_t update_to(int64_t to) {
  56. static_assert(
  57. std::is_floating_point<scalar_t>::value ||
  58. std::is_same<scalar_t, at::Half>::value ||
  59. std::is_same<scalar_t, at::BFloat16>::value, "scalar_t must be floating-point type");
  60. const auto to_minus_1 = static_cast<int64_t>(static_cast<scalar_t>(to - 1));
  61. if (to_minus_1 >= to) {
  62. int64_t to_ = std::abs(to - 1);
  63. int n = 0;
  64. while (to_ >>= 1) ++n;
  65. // NOLINTNEXTLINE(clang-analyzer-core.UndefinedBinaryOperatorResult)
  66. to = to_minus_1 - (1LL << (n - std::numeric_limits<scalar_t>::digits + 1));
  67. }
  68. return to;
  69. }
  70. template<template<typename> class random_kernel, typename RNG>
  71. at::Tensor& random_impl(at::Tensor& self, c10::optional<Generator> generator) {
  72. auto iter = at::TensorIterator::borrowing_nullary_op(self);
  73. random_kernel<RNG>()(iter, generator);
  74. return self;
  75. }
  76. #define CHECK_OUT_OF_BOUNDS(var, name, min, max, dtype) \
  77. TORCH_CHECK(var >= min && var <= max, name , " is out of bounds for ", dtype); \
  78. #define WARN_OUT_OF_BOUNDS(var, name, digits, dtype) \
  79. if (var < -(1LL << digits) || var > (1LL << digits)) { \
  80. TORCH_WARN(name , " is out of bounds [-(2^", digits, "), 2^", digits, "]. ", \
  81. "Due to precision limitations ", dtype, " can support discrete uniform distribution only within this range. ", \
  82. "This warning will become an error in version 1.7 release, please fix the code in advance"); \
  83. }
  84. static void check_from_to_in_range(int64_t from, int64_t to_inc, caffe2::TypeMeta dtype) {
  85. const auto scalar_type = typeMetaToScalarType(dtype);
  86. if (isFloatingType(scalar_type)) {
  87. AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "check_random_fp_bounds", [&] {
  88. const auto min = static_cast<double>(std::numeric_limits<scalar_t>::lowest());
  89. const auto max = static_cast<double>(std::numeric_limits<scalar_t>::max());
  90. CHECK_OUT_OF_BOUNDS(from, "from", min, max, dtype);
  91. CHECK_OUT_OF_BOUNDS(to_inc, "to - 1", min, max, dtype);
  92. constexpr auto digits = std::numeric_limits<scalar_t>::digits;
  93. WARN_OUT_OF_BOUNDS(from, "from", digits, dtype);
  94. WARN_OUT_OF_BOUNDS(to_inc, "to - 1", digits, dtype);
  95. });
  96. } else if (isIntegralType(scalar_type, /*includeBool=*/true)) {
  97. AT_DISPATCH_INTEGRAL_TYPES_AND(at::ScalarType::Bool, scalar_type, "check_random_integral_bounds", [&]() {
  98. const auto min = static_cast<int64_t>(std::numeric_limits<scalar_t>::lowest());
  99. const auto max = static_cast<int64_t>(std::numeric_limits<scalar_t>::max());
  100. CHECK_OUT_OF_BOUNDS(from, "from", min, max, dtype);
  101. CHECK_OUT_OF_BOUNDS(to_inc, "to - 1", min, max, dtype);
  102. });
  103. } else {
  104. TORCH_CHECK(false, "check_random_bounds handles only integral, floating-point and boolean types");
  105. }
  106. }
  107. template<template<typename> class random_from_to_kernel, typename RNG>
  108. at::Tensor& random_from_to_impl(at::Tensor& self, int64_t from, c10::optional<int64_t> to_opt, c10::optional<Generator> generator) {
  109. uint64_t range = 0;
  110. auto iter = at::TensorIterator::borrowing_nullary_op(self);
  111. if (to_opt.has_value()) {
  112. // [from, to)
  113. int64_t to = *to_opt;
  114. TORCH_CHECK(from < to, "random_ expects 'from' to be less than 'to', but got from=", from, " >= to=", to);
  115. if (isFloatingType(iter.dtype())) {
  116. AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "random_update_from_to", [&] {
  117. from = update_from<scalar_t>(from);
  118. to = update_to<scalar_t>(to);
  119. TORCH_CHECK(from < to, "random_ expects 'from' casted to dtype to be less than 'to' casted to dtype, but got from=", from, " >= to=", to);
  120. });
  121. }
  122. check_from_to_in_range(from, to - 1, self.dtype());
  123. range = static_cast<uint64_t>(to) - static_cast<uint64_t>(from);
  124. random_from_to_kernel<RNG>()(iter, range, from, generator);
  125. } else if (from != std::numeric_limits<int64_t>::lowest()) {
  126. // [from, std::numeric_limits<int64_t>::max()]
  127. int64_t to_inc = 0;
  128. if (isFloatingType(iter.dtype())) {
  129. AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "random_from_to_range_calc", [&] {
  130. constexpr int64_t scalar_t_max = static_cast<int64_t>(1) << std::numeric_limits<scalar_t>::digits;
  131. to_inc = scalar_t_max > std::numeric_limits<int64_t>::max() ? std::numeric_limits<int64_t>::max() : static_cast<int64_t>(scalar_t_max);
  132. from = update_from<scalar_t>(from);
  133. TORCH_CHECK(from < to_inc, "random_ expects 'from' casted to dtype to be less than or equal to 'to_inc' casted to dtype, but got from=", from, " > to_inc=", to_inc);
  134. });
  135. } else if (isIntegralType(iter.dtype(), /*includeBool=*/true)) {
  136. AT_DISPATCH_INTEGRAL_TYPES_AND(at::ScalarType::Bool, self.scalar_type(), "random_from_to_range_calc", [&] {
  137. if (std::is_same<scalar_t, bool>::value) {
  138. to_inc = static_cast<int64_t>(true);
  139. } else {
  140. to_inc = static_cast<int64_t>(std::numeric_limits<scalar_t>::max());
  141. }
  142. });
  143. } else {
  144. TORCH_CHECK(false, "random_from_to_impl handles only integral, floating-point and boolean types");
  145. }
  146. check_from_to_in_range(from, to_inc, self.dtype());
  147. range = static_cast<uint64_t>(to_inc) - static_cast<uint64_t>(from) + 1;
  148. random_from_to_kernel<RNG>()(iter, range, from, generator);
  149. } else {
  150. // [std::numeric_limits<int64_t>::lowest(), std::numeric_limits<int64_t>::max()]
  151. // range = 2^64
  152. random_from_to_kernel<RNG>()(iter, generator);
  153. }
  154. return self;
  155. }
  156. // ==================================================== Normal ========================================================
  157. #define CHECK_NORMAL_TENSOR_STD(std) \
  158. do { \
  159. TORCH_CHECK( \
  160. !std.is_complex(), \
  161. "normal expects standard deviation to be non-complex"); \
  162. TORCH_CHECK( \
  163. std.numel() == 0 || std.is_meta() || std.min().ge(0).item<bool>(), \
  164. "normal expects all elements of std >= 0.0"); \
  165. } while (0)
  166. #define CHECK_NORMAL_STD(std) \
  167. TORCH_CHECK(std >= 0.0, "normal expects std >= 0.0, but found std ", std);
  168. template<template<typename> class normal_kernel, typename RNG>
  169. Tensor& normal_impl_(Tensor& self, double mean, double std, c10::optional<Generator> gen) {
  170. CHECK_NORMAL_STD(std);
  171. if (self.is_complex()) {
  172. auto float_tensor = at::view_as_real(self);
  173. // variance for normal distribution of the real and imaginary values
  174. // is half of the input variance
  175. normal_kernel<RNG>()(float_tensor, mean, std/(std::sqrt(2)), gen);
  176. } else {
  177. normal_kernel<RNG>()(self, mean, std, gen);
  178. }
  179. return self;
  180. }
  181. template<template<typename> class normal_kernel, typename RNG>
  182. Tensor& normal_out_impl(Tensor& output, const Tensor& mean, double std, c10::optional<Generator> gen) {
  183. CHECK_NORMAL_STD(std);
  184. auto std_tensor = at::empty_like(output, MemoryFormat::Contiguous);
  185. auto shape = at::infer_size(mean.sizes(), std_tensor.sizes());
  186. at::native::resize_output(output, shape);
  187. normal_impl_<normal_kernel, RNG>(output, 0, std, gen);
  188. output.add_(mean);
  189. return output;
  190. }
  191. template<template<typename> class normal_kernel, typename RNG>
  192. Tensor& normal_out_impl(Tensor& output, double mean, const Tensor& std, c10::optional<Generator> gen) {
  193. CHECK_NORMAL_TENSOR_STD(std);
  194. auto mean_tensor = at::full({}, mean, output.options());
  195. auto shape = at::infer_size(mean_tensor.sizes(), std.sizes());
  196. at::native::resize_output(output, shape);
  197. normal_impl_<normal_kernel, RNG>(output, 0, 1, gen);
  198. // CUDA NB: addcmul_out copies the tensor to be added into the output.
  199. // The previous function here was addcmul_out(output, mean_tensor, output, std, 1);
  200. // The third argument is not a constant reference and hence the samples in output are overwritten.
  201. // Consequently, the computation performed is mean_tensor + mean_tensor * std instead of mean_tensor + output * std
  202. output.mul_(std).add_(mean_tensor);
  203. return output;
  204. }
  205. template<template<typename> class normal_kernel, typename RNG>
  206. Tensor& normal_out_impl(Tensor& output, const Tensor& mean, const Tensor& std, c10::optional<Generator> gen) {
  207. CHECK_NORMAL_TENSOR_STD(std);
  208. auto shape = at::infer_size(mean.sizes(), std.sizes());
  209. at::native::resize_output(output, shape);
  210. normal_impl_<normal_kernel, RNG>(output, 0, 1, gen);
  211. // CUDA NB: addcmul_out copies the tensor to be added into the output.
  212. // The previous function here was addcmul_out(output, mean, output, std, 1);
  213. // The third argument is not a constant reference and hence the samples in output are overwritten.
  214. // Consequently, the computation performed is mean + mean * std instead of mean + output * std
  215. output.mul_(std).add_(mean);
  216. return output;
  217. }
  218. template<template<typename> class normal_kernel, typename RNG>
  219. Tensor normal_impl(const Tensor& mean, double std, c10::optional<Generator> gen) {
  220. CHECK_NORMAL_STD(std);
  221. Tensor ret = at::empty_like(mean, MemoryFormat::Contiguous);
  222. normal_out_impl<normal_kernel, RNG>(ret, mean, std, gen);
  223. return ret;
  224. }
  225. template<template<typename> class normal_kernel, typename RNG>
  226. Tensor normal_impl(double mean, const Tensor& std, c10::optional<Generator> gen) {
  227. CHECK_NORMAL_TENSOR_STD(std);
  228. Tensor ret = at::empty_like(std, MemoryFormat::Contiguous);
  229. normal_out_impl<normal_kernel, RNG>(ret, mean, std, gen);
  230. return ret;
  231. }
  232. template<template<typename> class normal_kernel, typename RNG>
  233. Tensor normal_impl(const Tensor& mean, const Tensor& std, c10::optional<Generator> gen) {
  234. CHECK_NORMAL_TENSOR_STD(std);
  235. auto shape = at::infer_size(mean.sizes(), std.sizes());
  236. Tensor ret = at::empty(shape, mean.options(), MemoryFormat::Contiguous);
  237. normal_out_impl<normal_kernel, RNG>(ret, mean, std, gen);
  238. return ret;
  239. }
  240. // ==================================================== Uniform =======================================================
  241. template<template<typename> class uniform_kernel, typename RNG>
  242. at::Tensor& uniform_impl_(at::Tensor& self, double from, double to, c10::optional<Generator> generator) {
  243. if (self.is_complex()) {
  244. auto float_tensor = at::view_as_real(self);
  245. uniform_impl_<uniform_kernel, RNG>(float_tensor, from, to, generator);
  246. } else {
  247. AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "check_uniform_bounds", [&] {
  248. const auto dtype = self.dtype();
  249. const auto min = static_cast<double>(std::numeric_limits<scalar_t>::lowest());
  250. const auto max = static_cast<double>(std::numeric_limits<scalar_t>::max());
  251. CHECK_OUT_OF_BOUNDS(from, "from", min, max, dtype);
  252. CHECK_OUT_OF_BOUNDS(to, "to", min, max, dtype);
  253. TORCH_CHECK(from <= to, "uniform_ expects to return a [from, to) range, but found from=", from, " > to=", to);
  254. TORCH_CHECK((to - from) <= std::numeric_limits<scalar_t>::max(),
  255. "uniform_ expects to-from <= std::numeric_limits<", toString(self.scalar_type()),
  256. ">::max(), but found to=", to, " and from=", from,
  257. " which result in to-from to exceed the limit");
  258. from = std::min(std::max(from, min), max);
  259. to = std::max(std::min(to, max), min);
  260. });
  261. auto iter = at::TensorIterator::borrowing_nullary_op(self);
  262. uniform_kernel<RNG>()(iter, from, to, generator);
  263. }
  264. return self;
  265. }
  266. // ================================================== LogNormal =======================================================
  267. template<template<typename> class log_normal_kernel, typename RNG>
  268. at::Tensor& log_normal_impl_(at::Tensor& self, double mean, double std, c10::optional<Generator> gen) {
  269. TORCH_CHECK(std > 0.0, "log_normal_ expects std > 0.0, but found std=", std);
  270. auto iter = TensorIterator::borrowing_nullary_op(self);
  271. log_normal_kernel<RNG>()(iter, mean, std, gen);
  272. return self;
  273. }
  274. // =================================================== Geometric ======================================================
  275. template<template<typename> class geometric_kernel, typename RNG>
  276. Tensor& geometric_impl_(Tensor& self, double p, c10::optional<Generator> gen) {
  277. TORCH_CHECK(0 < p && p < 1, "geometric_ expects p to be in (0, 1), but got p=", p);
  278. auto iter = TensorIterator::borrowing_nullary_op(self);
  279. geometric_kernel<RNG>()(iter, p, gen);
  280. return self;
  281. }
  282. // ================================================== Exponential =====================================================
  283. template<template<typename> class exponential_kernel, typename RNG>
  284. Tensor& exponential_impl_(Tensor& self, double lambda, c10::optional<Generator> gen) {
  285. TORCH_CHECK(lambda > 0.0, "exponential_ expects lambda > 0.0, but found lambda=", lambda);
  286. auto iter = TensorIterator::borrowing_nullary_op(self);
  287. exponential_kernel<RNG>()(iter, lambda, gen);
  288. return self;
  289. }
  290. // ==================================================== Cauchy ========================================================
  291. template<template<typename> class cauchy_kernel, typename RNG>
  292. Tensor& cauchy_impl_(Tensor& self, double median, double sigma, c10::optional<Generator> gen) {
  293. // TODO: instead of variable name 'sigma', use 'gamma' or 'scale'
  294. // the variance, squared sigma, is undefined for cauchy distribution
  295. TORCH_CHECK(sigma > 0.0, "cauchy_ expects sigma > 0.0, but found sigma=", sigma);
  296. TORCH_CHECK(at::isFloatingType(self.scalar_type()), "Cauchy distribution is a continuous probability distribution. dtype must be a floating point but you specified ", self.dtype());
  297. auto iter = TensorIterator::borrowing_nullary_op(self);
  298. cauchy_kernel<RNG>()(iter, median, sigma, gen);
  299. return self;
  300. }
  301. // ==================================================== Bernoulli =====================================================
  302. template<template<typename> class bernoulli_tensor_kernel, typename RNG>
  303. Tensor& bernoulli_impl_(Tensor& self, const Tensor& p_, c10::optional<Generator> gen) {
  304. NoNamesGuard guard;
  305. at::assert_no_internal_overlap(self);
  306. bernoulli_tensor_kernel<RNG>()(self, p_, gen);
  307. return self;
  308. }
  309. template<template<typename> class bernoulli_scalar_kernel, typename RNG>
  310. Tensor& bernoulli_impl_(Tensor& self, double p, c10::optional<Generator> gen) {
  311. TORCH_CHECK(0 <= p && p <= 1, "bernoulli_ expects p to be in [0, 1], but got p=", p);
  312. at::assert_no_internal_overlap(self);
  313. bernoulli_scalar_kernel<RNG>()(self, p, gen);
  314. return self;
  315. }
  316. template<template<typename> class bernoulli_tensor_kernel, typename RNG>
  317. Tensor& bernoulli_out_impl(Tensor& result, const Tensor& self, c10::optional<Generator> gen) {
  318. // result.resize_as_(self) requires self to have same dtype as result, so we
  319. // use resize_ instead.
  320. // TODO: Fix resize_as_. See pytorch/pytorch#11665.
  321. result.resize_(self.sizes());
  322. bernoulli_impl_<bernoulli_tensor_kernel, RNG>(result, self, gen);
  323. namedinference::propagate_names(result, self);
  324. return result;
  325. }
  326. #undef CHECK_OUT_OF_BOUNDS
  327. #undef WARN_OUT_OF_BOUNDS
  328. }}}