mathops.h 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133
  1. /*
  2. * simple math operations
  3. * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #ifndef AVCODEC_X86_MATHOPS_H
  22. #define AVCODEC_X86_MATHOPS_H
  23. #include "config.h"
  24. #include "libavutil/common.h"
  25. #include "libavutil/x86/asm.h"
  26. #if HAVE_INLINE_ASM
  27. #if ARCH_X86_32
  28. #define MULL MULL
  29. static av_always_inline av_const int MULL(int a, int b, unsigned shift)
  30. {
  31. int rt, dummy;
  32. __asm__ (
  33. "imull %3 \n\t"
  34. "shrdl %4, %%edx, %%eax \n\t"
  35. :"=a"(rt), "=d"(dummy)
  36. :"a"(a), "rm"(b), "ci"((uint8_t)shift)
  37. );
  38. return rt;
  39. }
  40. #define MULH MULH
  41. static av_always_inline av_const int MULH(int a, int b)
  42. {
  43. int rt, dummy;
  44. __asm__ (
  45. "imull %3"
  46. :"=d"(rt), "=a"(dummy)
  47. :"a"(a), "rm"(b)
  48. );
  49. return rt;
  50. }
  51. #define MUL64 MUL64
  52. static av_always_inline av_const int64_t MUL64(int a, int b)
  53. {
  54. int64_t rt;
  55. __asm__ (
  56. "imull %2"
  57. :"=A"(rt)
  58. :"a"(a), "rm"(b)
  59. );
  60. return rt;
  61. }
  62. #endif /* ARCH_X86_32 */
  63. #if HAVE_I686
  64. /* median of 3 */
  65. #define mid_pred mid_pred
  66. static inline av_const int mid_pred(int a, int b, int c)
  67. {
  68. int i=b;
  69. __asm__ (
  70. "cmp %2, %1 \n\t"
  71. "cmovg %1, %0 \n\t"
  72. "cmovg %2, %1 \n\t"
  73. "cmp %3, %1 \n\t"
  74. "cmovl %3, %1 \n\t"
  75. "cmp %1, %0 \n\t"
  76. "cmovg %1, %0 \n\t"
  77. :"+&r"(i), "+&r"(a)
  78. :"r"(b), "r"(c)
  79. );
  80. return i;
  81. }
  82. #if HAVE_6REGS
  83. #define COPY3_IF_LT(x, y, a, b, c, d)\
  84. __asm__ volatile(\
  85. "cmpl %0, %3 \n\t"\
  86. "cmovl %3, %0 \n\t"\
  87. "cmovl %4, %1 \n\t"\
  88. "cmovl %5, %2 \n\t"\
  89. : "+&r" (x), "+&r" (a), "+r" (c)\
  90. : "r" (y), "r" (b), "r" (d)\
  91. );
  92. #endif /* HAVE_6REGS */
  93. #endif /* HAVE_I686 */
  94. #define MASK_ABS(mask, level) \
  95. __asm__ ("cdq \n\t" \
  96. "xorl %1, %0 \n\t" \
  97. "subl %1, %0 \n\t" \
  98. : "+a"(level), "=&d"(mask))
  99. // avoid +32 for shift optimization (gcc should do that ...)
  100. #define NEG_SSR32 NEG_SSR32
  101. static inline int32_t NEG_SSR32( int32_t a, int8_t s){
  102. __asm__ ("sarl %1, %0\n\t"
  103. : "+r" (a)
  104. : "ic" ((uint8_t)(-s))
  105. );
  106. return a;
  107. }
  108. #define NEG_USR32 NEG_USR32
  109. static inline uint32_t NEG_USR32(uint32_t a, int8_t s){
  110. __asm__ ("shrl %1, %0\n\t"
  111. : "+r" (a)
  112. : "ic" ((uint8_t)(-s))
  113. );
  114. return a;
  115. }
  116. #endif /* HAVE_INLINE_ASM */
  117. #endif /* AVCODEC_X86_MATHOPS_H */