Lerp.h 1.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748
  1. #pragma once
  2. #include <ATen/native/DispatchStub.h>
  3. #include <ATen/OpMathType.h>
  4. #include <ATen/TensorIterator.h>
  5. #include <c10/core/Scalar.h>
  6. namespace at {
  7. namespace native {
  8. template <typename scalar_t>
  9. C10_HOST_DEVICE C10_ALWAYS_INLINE bool is_lerp_weight_small(scalar_t weight) {
  10. return std::abs(weight) < scalar_t(0.5);
  11. }
  12. template <typename scalar_t>
  13. C10_HOST_DEVICE C10_ALWAYS_INLINE bool is_lerp_weight_small(c10::complex<scalar_t> weight) {
  14. // Avoid the sqrt in abs(weight)
  15. return (weight.real() * weight.real() + weight.imag() * weight.imag()) < scalar_t(0.25);
  16. }
  17. template <typename scalar_t, typename weight_t>
  18. C10_HOST_DEVICE C10_ALWAYS_INLINE scalar_t lerp(scalar_t self_, scalar_t end_, weight_t weight_) {
  19. using opmath_t = at::opmath_type<scalar_t>;
  20. using opmath_weight_t = at::opmath_type<weight_t>;
  21. opmath_t self = self_;
  22. opmath_t end = end_;
  23. opmath_weight_t weight = weight_;
  24. // Conditional for better numeric. This has been discussed in
  25. // https://github.com/pytorch/pytorch/pull/18871
  26. return is_lerp_weight_small(weight)
  27. ? self + weight * (end - self)
  28. : end - (end - self) * (opmath_t(1) - weight);
  29. }
  30. using lerp_fn_scalar = void (*)(
  31. at::TensorIteratorBase& iter,
  32. const Scalar& weight);
  33. using lerp_fn_tensor = void (*)(
  34. at::TensorIteratorBase& iter);
  35. DECLARE_DISPATCH(lerp_fn_scalar, lerp_kernel_scalar_weight);
  36. DECLARE_DISPATCH(lerp_fn_tensor, lerp_kernel_tensor_weight);
  37. } // namespace native
  38. } // namespace at