asm.h 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153
  1. /*
  2. * Alpha optimized DSP utils
  3. * Copyright (c) 2002 Falk Hueffner <falk@debian.org>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #ifndef AVCODEC_ALPHA_ASM_H
  22. #define AVCODEC_ALPHA_ASM_H
  23. #include <inttypes.h>
  24. #include "libavutil/common.h"
  25. #if AV_GCC_VERSION_AT_LEAST(2,96)
  26. # define likely(x) __builtin_expect((x) != 0, 1)
  27. # define unlikely(x) __builtin_expect((x) != 0, 0)
  28. #else
  29. # define likely(x) (x)
  30. # define unlikely(x) (x)
  31. #endif
  32. #define AMASK_BWX (1 << 0)
  33. #define AMASK_FIX (1 << 1)
  34. #define AMASK_CIX (1 << 2)
  35. #define AMASK_MVI (1 << 8)
  36. static inline uint64_t BYTE_VEC(uint64_t x)
  37. {
  38. x |= x << 8;
  39. x |= x << 16;
  40. x |= x << 32;
  41. return x;
  42. }
  43. static inline uint64_t WORD_VEC(uint64_t x)
  44. {
  45. x |= x << 16;
  46. x |= x << 32;
  47. return x;
  48. }
  49. #define sextw(x) ((int16_t) (x))
  50. #ifdef __GNUC__
  51. #define ldq(p) \
  52. (((const union { \
  53. uint64_t __l; \
  54. __typeof__(*(p)) __s[sizeof (uint64_t) / sizeof *(p)]; \
  55. } *) (p))->__l)
  56. #define ldl(p) \
  57. (((const union { \
  58. int32_t __l; \
  59. __typeof__(*(p)) __s[sizeof (int32_t) / sizeof *(p)]; \
  60. } *) (p))->__l)
  61. #define stq(l, p) \
  62. do { \
  63. (((union { \
  64. uint64_t __l; \
  65. __typeof__(*(p)) __s[sizeof (uint64_t) / sizeof *(p)]; \
  66. } *) (p))->__l) = l; \
  67. } while (0)
  68. #define stl(l, p) \
  69. do { \
  70. (((union { \
  71. int32_t __l; \
  72. __typeof__(*(p)) __s[sizeof (int32_t) / sizeof *(p)]; \
  73. } *) (p))->__l) = l; \
  74. } while (0)
  75. struct unaligned_long { uint64_t l; } __attribute__((packed));
  76. #define ldq_u(p) (*(const uint64_t *) (((uint64_t) (p)) & ~7ul))
  77. #define uldq(a) (((const struct unaligned_long *) (a))->l)
  78. #if AV_GCC_VERSION_AT_LEAST(3,3)
  79. #define prefetch(p) __builtin_prefetch((p), 0, 1)
  80. #define prefetch_en(p) __builtin_prefetch((p), 0, 0)
  81. #define prefetch_m(p) __builtin_prefetch((p), 1, 1)
  82. #define prefetch_men(p) __builtin_prefetch((p), 1, 0)
  83. #define cmpbge __builtin_alpha_cmpbge
  84. /* Avoid warnings. */
  85. #define extql(a, b) __builtin_alpha_extql(a, (uint64_t) (b))
  86. #define extwl(a, b) __builtin_alpha_extwl(a, (uint64_t) (b))
  87. #define extqh(a, b) __builtin_alpha_extqh(a, (uint64_t) (b))
  88. #define zap __builtin_alpha_zap
  89. #define zapnot __builtin_alpha_zapnot
  90. #define amask __builtin_alpha_amask
  91. #define implver __builtin_alpha_implver
  92. #define rpcc __builtin_alpha_rpcc
  93. #else
  94. #define prefetch(p) __asm__ volatile("ldl $31,%0" : : "m"(*(const char *) (p)) : "memory")
  95. #define prefetch_en(p) __asm__ volatile("ldq $31,%0" : : "m"(*(const char *) (p)) : "memory")
  96. #define prefetch_m(p) __asm__ volatile("lds $f31,%0" : : "m"(*(const char *) (p)) : "memory")
  97. #define prefetch_men(p) __asm__ volatile("ldt $f31,%0" : : "m"(*(const char *) (p)) : "memory")
  98. #define cmpbge(a, b) ({ uint64_t __r; __asm__ ("cmpbge %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; })
  99. #define extql(a, b) ({ uint64_t __r; __asm__ ("extql %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; })
  100. #define extwl(a, b) ({ uint64_t __r; __asm__ ("extwl %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; })
  101. #define extqh(a, b) ({ uint64_t __r; __asm__ ("extqh %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; })
  102. #define zap(a, b) ({ uint64_t __r; __asm__ ("zap %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; })
  103. #define zapnot(a, b) ({ uint64_t __r; __asm__ ("zapnot %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; })
  104. #define amask(a) ({ uint64_t __r; __asm__ ("amask %1,%0" : "=r" (__r) : "rI" (a)); __r; })
  105. #define implver() ({ uint64_t __r; __asm__ ("implver %0" : "=r" (__r)); __r; })
  106. #define rpcc() ({ uint64_t __r; __asm__ volatile ("rpcc %0" : "=r" (__r)); __r; })
  107. #endif
  108. #define wh64(p) __asm__ volatile("wh64 (%0)" : : "r"(p) : "memory")
  109. #if AV_GCC_VERSION_AT_LEAST(3,3) && defined(__alpha_max__)
  110. #define minub8 __builtin_alpha_minub8
  111. #define minsb8 __builtin_alpha_minsb8
  112. #define minuw4 __builtin_alpha_minuw4
  113. #define minsw4 __builtin_alpha_minsw4
  114. #define maxub8 __builtin_alpha_maxub8
  115. #define maxsb8 __builtin_alpha_maxsb8
  116. #define maxuw4 __builtin_alpha_maxuw4
  117. #define maxsw4 __builtin_alpha_maxsw4
  118. #define perr __builtin_alpha_perr
  119. #define pklb __builtin_alpha_pklb
  120. #define pkwb __builtin_alpha_pkwb
  121. #define unpkbl __builtin_alpha_unpkbl
  122. #define unpkbw __builtin_alpha_unpkbw
  123. #else
  124. #define minub8(a, b) ({ uint64_t __r; __asm__ (".arch ev6; minub8 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
  125. #define minsb8(a, b) ({ uint64_t __r; __asm__ (".arch ev6; minsb8 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
  126. #define minuw4(a, b) ({ uint64_t __r; __asm__ (".arch ev6; minuw4 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
  127. #define minsw4(a, b) ({ uint64_t __r; __asm__ (".arch ev6; minsw4 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
  128. #define maxub8(a, b) ({ uint64_t __r; __asm__ (".arch ev6; maxub8 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
  129. #define maxsb8(a, b) ({ uint64_t __r; __asm__ (".arch ev6; maxsb8 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
  130. #define maxuw4(a, b) ({ uint64_t __r; __asm__ (".arch ev6; maxuw4 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
  131. #define maxsw4(a, b) ({ uint64_t __r; __asm__ (".arch ev6; maxsw4 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
  132. #define perr(a, b) ({ uint64_t __r; __asm__ (".arch ev6; perr %r1,%r2,%0" : "=r" (__r) : "%rJ" (a), "rJ" (b)); __r; })
  133. #define pklb(a) ({ uint64_t __r; __asm__ (".arch ev6; pklb %r1,%0" : "=r" (__r) : "rJ" (a)); __r; })
  134. #define pkwb(a) ({ uint64_t __r; __asm__ (".arch ev6; pkwb %r1,%0" : "=r" (__r) : "rJ" (a)); __r; })
  135. #define unpkbl(a) ({ uint64_t __r; __asm__ (".arch ev6; unpkbl %r1,%0" : "=r" (__r) : "rJ" (a)); __r; })
  136. #define unpkbw(a) ({ uint64_t __r; __asm__ (".arch ev6; unpkbw %r1,%0" : "=r" (__r) : "rJ" (a)); __r; })
  137. #endif
  138. #else
  139. #error "Unknown compiler!"
  140. #endif
  141. #endif /* AVCODEC_ALPHA_ASM_H */