fft_vsx.h 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829
  1. #ifndef AVCODEC_PPC_FFT_VSX_H
  2. #define AVCODEC_PPC_FFT_VSX_H
  3. /*
  4. * FFT transform, optimized with VSX built-in functions
  5. * Copyright (c) 2014 Rong Yan Copyright (c) 2009 Loren Merritt
  6. *
  7. * This algorithm (though not any of the implementation details) is
  8. * based on libdjbfft by D. J. Bernstein, and fft_altivec_s.S.
  9. *
  10. * This file is part of FFmpeg.
  11. *
  12. * FFmpeg is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU Lesser General Public
  14. * License as published by the Free Software Foundation; either
  15. * version 2.1 of the License, or (at your option) any later version.
  16. *
  17. * FFmpeg is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  20. * Lesser General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU Lesser General Public
  23. * License along with FFmpeg; if not, write to the Free Software
  24. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  25. */
  26. #include "config.h"
  27. #include "libavutil/cpu.h"
  28. #include "libavutil/ppc/util_altivec.h"
  29. #include "libavcodec/fft.h"
  30. #include "libavcodec/fft-internal.h"
  31. #if HAVE_VSX
  32. void ff_fft_calc_interleave_vsx(FFTContext *s, FFTComplex *z);
  33. void ff_fft_calc_vsx(FFTContext *s, FFTComplex *z);
  34. #define byte_2complex (2*sizeof(FFTComplex))
  35. #define byte_4complex (4*sizeof(FFTComplex))
  36. #define byte_6complex (6*sizeof(FFTComplex))
  37. #define byte_8complex (8*sizeof(FFTComplex))
  38. #define byte_10complex (10*sizeof(FFTComplex))
  39. #define byte_12complex (12*sizeof(FFTComplex))
  40. #define byte_14complex (14*sizeof(FFTComplex))
  41. inline static void pass_vsx_interleave(FFTComplex *z, const FFTSample *wre, unsigned int n)
  42. {
  43. int o1 = n<<1;
  44. int o2 = n<<2;
  45. int o3 = o1+o2;
  46. int i1, i2, i3;
  47. FFTSample* out = (FFTSample*)z;
  48. const FFTSample *wim = wre+o1;
  49. vec_f vz0, vzo1, vzo2, vzo3;
  50. vec_f x0, x1, x2, x3;
  51. vec_f x4, x5, x6, x7;
  52. vec_f x8, x9, x10, x11;
  53. vec_f x12, x13, x14, x15;
  54. vec_f x16, x17, x18, x19;
  55. vec_f x20, x21, x22, x23;
  56. vec_f vz0plus1, vzo1plus1, vzo2plus1, vzo3plus1;
  57. vec_f y0, y1, y2, y3;
  58. vec_f y4, y5, y8, y9;
  59. vec_f y10, y13, y14, y15;
  60. vec_f y16, y17, y18, y19;
  61. vec_f y20, y21, y22, y23;
  62. vec_f wr1, wi1, wr0, wi0;
  63. vec_f wr2, wi2, wr3, wi3;
  64. vec_f xmulwi0, xmulwi1, ymulwi2, ymulwi3;
  65. n = n-2;
  66. i1 = o1*sizeof(FFTComplex);
  67. i2 = o2*sizeof(FFTComplex);
  68. i3 = o3*sizeof(FFTComplex);
  69. vzo2 = vec_ld(i2, &(out[0])); // zo2.r zo2.i z(o2+1).r z(o2+1).i
  70. vzo2plus1 = vec_ld(i2+16, &(out[0]));
  71. vzo3 = vec_ld(i3, &(out[0])); // zo3.r zo3.i z(o3+1).r z(o3+1).i
  72. vzo3plus1 = vec_ld(i3+16, &(out[0]));
  73. vz0 = vec_ld(0, &(out[0])); // z0.r z0.i z1.r z1.i
  74. vz0plus1 = vec_ld(16, &(out[0]));
  75. vzo1 = vec_ld(i1, &(out[0])); // zo1.r zo1.i z(o1+1).r z(o1+1).i
  76. vzo1plus1 = vec_ld(i1+16, &(out[0]));
  77. x0 = vec_add(vzo2, vzo3);
  78. x1 = vec_sub(vzo2, vzo3);
  79. y0 = vec_add(vzo2plus1, vzo3plus1);
  80. y1 = vec_sub(vzo2plus1, vzo3plus1);
  81. wr1 = vec_splats(wre[1]);
  82. wi1 = vec_splats(wim[-1]);
  83. wi2 = vec_splats(wim[-2]);
  84. wi3 = vec_splats(wim[-3]);
  85. wr2 = vec_splats(wre[2]);
  86. wr3 = vec_splats(wre[3]);
  87. x2 = vec_perm(x0, x1, vcprm(2,s2,3,s3));
  88. x3 = vec_perm(x0, x1, vcprm(s3,3,s2,2));
  89. y4 = vec_perm(y0, y1, vcprm(s1,1,s0,0));
  90. y5 = vec_perm(y0, y1, vcprm(s3,3,s2,2));
  91. y2 = vec_perm(y0, y1, vcprm(0,s0,1,s1));
  92. y3 = vec_perm(y0, y1, vcprm(2,s2,3,s3));
  93. ymulwi2 = vec_mul(y4, wi2);
  94. ymulwi3 = vec_mul(y5, wi3);
  95. x4 = vec_mul(x2, wr1);
  96. x5 = vec_mul(x3, wi1);
  97. y8 = vec_madd(y2, wr2, ymulwi2);
  98. y9 = vec_msub(y2, wr2, ymulwi2);
  99. x6 = vec_add(x4, x5);
  100. x7 = vec_sub(x4, x5);
  101. y13 = vec_madd(y3, wr3, ymulwi3);
  102. y14 = vec_msub(y3, wr3, ymulwi3);
  103. x8 = vec_perm(x6, x7, vcprm(0,1,s2,s3));
  104. y10 = vec_perm(y8, y9, vcprm(0,1,s2,s3));
  105. y15 = vec_perm(y13, y14, vcprm(0,1,s2,s3));
  106. x9 = vec_perm(x0, x8, vcprm(0,1,s0,s2));
  107. x10 = vec_perm(x1, x8, vcprm(1,0,s3,s1));
  108. y16 = vec_perm(y10, y15, vcprm(0,2,s0,s2));
  109. y17 = vec_perm(y10, y15, vcprm(3,1,s3,s1));
  110. x11 = vec_add(vz0, x9);
  111. x12 = vec_sub(vz0, x9);
  112. x13 = vec_add(vzo1, x10);
  113. x14 = vec_sub(vzo1, x10);
  114. y18 = vec_add(vz0plus1, y16);
  115. y19 = vec_sub(vz0plus1, y16);
  116. y20 = vec_add(vzo1plus1, y17);
  117. y21 = vec_sub(vzo1plus1, y17);
  118. x15 = vec_perm(x13, x14, vcprm(0,s1,2,s3));
  119. x16 = vec_perm(x13, x14, vcprm(s0,1,s2,3));
  120. y22 = vec_perm(y20, y21, vcprm(0,s1,2,s3));
  121. y23 = vec_perm(y20, y21, vcprm(s0,1,s2,3));
  122. vec_st(x11, 0, &(out[0]));
  123. vec_st(y18, 16, &(out[0]));
  124. vec_st(x15, i1, &(out[0]));
  125. vec_st(y22, i1+16, &(out[0]));
  126. vec_st(x12, i2, &(out[0]));
  127. vec_st(y19, i2+16, &(out[0]));
  128. vec_st(x16, i3, &(out[0]));
  129. vec_st(y23, i3+16, &(out[0]));
  130. do {
  131. out += 8;
  132. wre += 4;
  133. wim -= 4;
  134. wr0 = vec_splats(wre[0]);
  135. wr1 = vec_splats(wre[1]);
  136. wi0 = vec_splats(wim[0]);
  137. wi1 = vec_splats(wim[-1]);
  138. wr2 = vec_splats(wre[2]);
  139. wr3 = vec_splats(wre[3]);
  140. wi2 = vec_splats(wim[-2]);
  141. wi3 = vec_splats(wim[-3]);
  142. vzo2 = vec_ld(i2, &(out[0])); // zo2.r zo2.i z(o2+1).r z(o2+1).i
  143. vzo2plus1 = vec_ld(i2+16, &(out[0]));
  144. vzo3 = vec_ld(i3, &(out[0])); // zo3.r zo3.i z(o3+1).r z(o3+1).i
  145. vzo3plus1 = vec_ld(i3+16, &(out[0]));
  146. vz0 = vec_ld(0, &(out[0])); // z0.r z0.i z1.r z1.i
  147. vz0plus1 = vec_ld(16, &(out[0]));
  148. vzo1 = vec_ld(i1, &(out[0])); // zo1.r zo1.i z(o1+1).r z(o1+1).i
  149. vzo1plus1 = vec_ld(i1+16, &(out[0]));
  150. x0 = vec_add(vzo2, vzo3);
  151. x1 = vec_sub(vzo2, vzo3);
  152. y0 = vec_add(vzo2plus1, vzo3plus1);
  153. y1 = vec_sub(vzo2plus1, vzo3plus1);
  154. x4 = vec_perm(x0, x1, vcprm(s1,1,s0,0));
  155. x5 = vec_perm(x0, x1, vcprm(s3,3,s2,2));
  156. x2 = vec_perm(x0, x1, vcprm(0,s0,1,s1));
  157. x3 = vec_perm(x0, x1, vcprm(2,s2,3,s3));
  158. y2 = vec_perm(y0, y1, vcprm(0,s0,1,s1));
  159. y3 = vec_perm(y0, y1, vcprm(2,s2,3,s3));
  160. xmulwi0 = vec_mul(x4, wi0);
  161. xmulwi1 = vec_mul(x5, wi1);
  162. y4 = vec_perm(y0, y1, vcprm(s1,1,s0,0));
  163. y5 = vec_perm(y0, y1, vcprm(s3,3,s2,2));
  164. x8 = vec_madd(x2, wr0, xmulwi0);
  165. x9 = vec_msub(x2, wr0, xmulwi0);
  166. ymulwi2 = vec_mul(y4, wi2);
  167. ymulwi3 = vec_mul(y5, wi3);
  168. x13 = vec_madd(x3, wr1, xmulwi1);
  169. x14 = vec_msub(x3, wr1, xmulwi1);
  170. y8 = vec_madd(y2, wr2, ymulwi2);
  171. y9 = vec_msub(y2, wr2, ymulwi2);
  172. y13 = vec_madd(y3, wr3, ymulwi3);
  173. y14 = vec_msub(y3, wr3, ymulwi3);
  174. x10 = vec_perm(x8, x9, vcprm(0,1,s2,s3));
  175. x15 = vec_perm(x13, x14, vcprm(0,1,s2,s3));
  176. y10 = vec_perm(y8, y9, vcprm(0,1,s2,s3));
  177. y15 = vec_perm(y13, y14, vcprm(0,1,s2,s3));
  178. x16 = vec_perm(x10, x15, vcprm(0,2,s0,s2));
  179. x17 = vec_perm(x10, x15, vcprm(3,1,s3,s1));
  180. y16 = vec_perm(y10, y15, vcprm(0,2,s0,s2));
  181. y17 = vec_perm(y10, y15, vcprm(3,1,s3,s1));
  182. x18 = vec_add(vz0, x16);
  183. x19 = vec_sub(vz0, x16);
  184. x20 = vec_add(vzo1, x17);
  185. x21 = vec_sub(vzo1, x17);
  186. y18 = vec_add(vz0plus1, y16);
  187. y19 = vec_sub(vz0plus1, y16);
  188. y20 = vec_add(vzo1plus1, y17);
  189. y21 = vec_sub(vzo1plus1, y17);
  190. x22 = vec_perm(x20, x21, vcprm(0,s1,2,s3));
  191. x23 = vec_perm(x20, x21, vcprm(s0,1,s2,3));
  192. y22 = vec_perm(y20, y21, vcprm(0,s1,2,s3));
  193. y23 = vec_perm(y20, y21, vcprm(s0,1,s2,3));
  194. vec_st(x18, 0, &(out[0]));
  195. vec_st(y18, 16, &(out[0]));
  196. vec_st(x22, i1, &(out[0]));
  197. vec_st(y22, i1+16, &(out[0]));
  198. vec_st(x19, i2, &(out[0]));
  199. vec_st(y19, i2+16, &(out[0]));
  200. vec_st(x23, i3, &(out[0]));
  201. vec_st(y23, i3+16, &(out[0]));
  202. } while (n-=2);
  203. }
  204. inline static void fft2_vsx_interleave(FFTComplex *z)
  205. {
  206. FFTSample r1, i1;
  207. r1 = z[0].re - z[1].re;
  208. z[0].re += z[1].re;
  209. z[1].re = r1;
  210. i1 = z[0].im - z[1].im;
  211. z[0].im += z[1].im;
  212. z[1].im = i1;
  213. }
  214. inline static void fft4_vsx_interleave(FFTComplex *z)
  215. {
  216. vec_f a, b, c, d;
  217. float* out= (float*)z;
  218. a = vec_ld(0, &(out[0]));
  219. b = vec_ld(byte_2complex, &(out[0]));
  220. c = vec_perm(a, b, vcprm(0,1,s2,s1));
  221. d = vec_perm(a, b, vcprm(2,3,s0,s3));
  222. a = vec_add(c, d);
  223. b = vec_sub(c, d);
  224. c = vec_perm(a, b, vcprm(0,1,s0,s1));
  225. d = vec_perm(a, b, vcprm(2,3,s3,s2));
  226. a = vec_add(c, d);
  227. b = vec_sub(c, d);
  228. vec_st(a, 0, &(out[0]));
  229. vec_st(b, byte_2complex, &(out[0]));
  230. }
  231. inline static void fft8_vsx_interleave(FFTComplex *z)
  232. {
  233. vec_f vz0, vz1, vz2, vz3;
  234. vec_f x0, x1, x2, x3;
  235. vec_f x4, x5, x6, x7;
  236. vec_f x8, x9, x10, x11;
  237. vec_f x12, x13, x14, x15;
  238. vec_f x16, x17, x18, x19;
  239. vec_f x20, x21, x22, x23;
  240. vec_f x24, x25, x26, x27;
  241. vec_f x28, x29, x30, x31;
  242. vec_f x32, x33, x34;
  243. float* out= (float*)z;
  244. vec_f vc1 = {sqrthalf, sqrthalf, sqrthalf, sqrthalf};
  245. vz0 = vec_ld(0, &(out[0]));
  246. vz1 = vec_ld(byte_2complex, &(out[0]));
  247. vz2 = vec_ld(byte_4complex, &(out[0]));
  248. vz3 = vec_ld(byte_6complex, &(out[0]));
  249. x0 = vec_perm(vz0, vz1, vcprm(0,1,s2,s1));
  250. x1 = vec_perm(vz0, vz1, vcprm(2,3,s0,s3));
  251. x2 = vec_perm(vz2, vz3, vcprm(2,1,s0,s1));
  252. x3 = vec_perm(vz2, vz3, vcprm(0,3,s2,s3));
  253. x4 = vec_add(x0, x1);
  254. x5 = vec_sub(x0, x1);
  255. x6 = vec_add(x2, x3);
  256. x7 = vec_sub(x2, x3);
  257. x8 = vec_perm(x4, x5, vcprm(0,1,s0,s1));
  258. x9 = vec_perm(x4, x5, vcprm(2,3,s3,s2));
  259. x10 = vec_perm(x6, x7, vcprm(2,1,s2,s1));
  260. x11 = vec_perm(x6, x7, vcprm(0,3,s0,s3));
  261. x12 = vec_add(x8, x9);
  262. x13 = vec_sub(x8, x9);
  263. x14 = vec_add(x10, x11);
  264. x15 = vec_sub(x10, x11);
  265. x16 = vec_perm(x12, x13, vcprm(0,s0,1,s1));
  266. x17 = vec_perm(x14, x15, vcprm(0,s0,1,s1));
  267. x18 = vec_perm(x16, x17, vcprm(s0,s3,s2,s1));
  268. x19 = vec_add(x16, x18); // z0.r z2.r z0.i z2.i
  269. x20 = vec_sub(x16, x18); // z4.r z6.r z4.i z6.i
  270. x21 = vec_perm(x12, x13, vcprm(2,s2,3,s3));
  271. x22 = vec_perm(x14, x15, vcprm(2,3,s2,s3));
  272. x23 = vec_perm(x14, x15, vcprm(3,2,s3,s2));
  273. x24 = vec_add(x22, x23);
  274. x25 = vec_sub(x22, x23);
  275. x26 = vec_mul( vec_perm(x24, x25, vcprm(2,s2,0,s0)), vc1);
  276. x27 = vec_add(x21, x26); // z1.r z7.r z1.i z3.i
  277. x28 = vec_sub(x21, x26); //z5.r z3.r z5.i z7.i
  278. x29 = vec_perm(x19, x27, vcprm(0,2,s0,s2)); // z0.r z0.i z1.r z1.i
  279. x30 = vec_perm(x19, x27, vcprm(1,3,s1,s3)); // z2.r z2.i z7.r z3.i
  280. x31 = vec_perm(x20, x28, vcprm(0,2,s0,s2)); // z4.r z4.i z5.r z5.i
  281. x32 = vec_perm(x20, x28, vcprm(1,3,s1,s3)); // z6.r z6.i z3.r z7.i
  282. x33 = vec_perm(x30, x32, vcprm(0,1,s2,3)); // z2.r z2.i z3.r z3.i
  283. x34 = vec_perm(x30, x32, vcprm(s0,s1,2,s3)); // z6.r z6.i z7.r z7.i
  284. vec_st(x29, 0, &(out[0]));
  285. vec_st(x33, byte_2complex, &(out[0]));
  286. vec_st(x31, byte_4complex, &(out[0]));
  287. vec_st(x34, byte_6complex, &(out[0]));
  288. }
  289. inline static void fft16_vsx_interleave(FFTComplex *z)
  290. {
  291. float* out= (float*)z;
  292. vec_f vc0 = {sqrthalf, sqrthalf, sqrthalf, sqrthalf};
  293. vec_f vc1 = {ff_cos_16[1], ff_cos_16[1], ff_cos_16[1], ff_cos_16[1]};
  294. vec_f vc2 = {ff_cos_16[3], ff_cos_16[3], ff_cos_16[3], ff_cos_16[3]};
  295. vec_f vz0, vz1, vz2, vz3;
  296. vec_f vz4, vz5, vz6, vz7;
  297. vec_f x0, x1, x2, x3;
  298. vec_f x4, x5, x6, x7;
  299. vec_f x8, x9, x10, x11;
  300. vec_f x12, x13, x14, x15;
  301. vec_f x16, x17, x18, x19;
  302. vec_f x20, x21, x22, x23;
  303. vec_f x24, x25, x26, x27;
  304. vec_f x28, x29, x30, x31;
  305. vec_f x32, x33, x34, x35;
  306. vec_f x36, x37, x38, x39;
  307. vec_f x40, x41, x42, x43;
  308. vec_f x44, x45, x46, x47;
  309. vec_f x48, x49, x50, x51;
  310. vec_f x52, x53, x54, x55;
  311. vec_f x56, x57, x58, x59;
  312. vec_f x60, x61, x62, x63;
  313. vec_f x64, x65, x66, x67;
  314. vec_f x68, x69, x70, x71;
  315. vec_f x72, x73, x74, x75;
  316. vec_f x76, x77, x78, x79;
  317. vec_f x80, x81, x82, x83;
  318. vec_f x84, x85, x86;
  319. vz0 = vec_ld(0, &(out[0]));
  320. vz1 = vec_ld(byte_2complex, &(out[0]));
  321. vz2 = vec_ld(byte_4complex, &(out[0]));
  322. vz3 = vec_ld(byte_6complex, &(out[0]));
  323. vz4 = vec_ld(byte_8complex, &(out[0]));
  324. vz5 = vec_ld(byte_10complex, &(out[0]));
  325. vz6 = vec_ld(byte_12complex, &(out[0]));
  326. vz7 = vec_ld(byte_14complex, &(out[0]));
  327. x0 = vec_perm(vz0, vz1, vcprm(0,1,s2,s1));
  328. x1 = vec_perm(vz0, vz1, vcprm(2,3,s0,s3));
  329. x2 = vec_perm(vz2, vz3, vcprm(0,1,s0,s1));
  330. x3 = vec_perm(vz2, vz3, vcprm(2,3,s2,s3));
  331. x4 = vec_perm(vz4, vz5, vcprm(0,1,s2,s1));
  332. x5 = vec_perm(vz4, vz5, vcprm(2,3,s0,s3));
  333. x6 = vec_perm(vz6, vz7, vcprm(0,1,s2,s1));
  334. x7 = vec_perm(vz6, vz7, vcprm(2,3,s0,s3));
  335. x8 = vec_add(x0, x1);
  336. x9 = vec_sub(x0, x1);
  337. x10 = vec_add(x2, x3);
  338. x11 = vec_sub(x2, x3);
  339. x12 = vec_add(x4, x5);
  340. x13 = vec_sub(x4, x5);
  341. x14 = vec_add(x6, x7);
  342. x15 = vec_sub(x6, x7);
  343. x16 = vec_perm(x8, x9, vcprm(0,1,s0,s1));
  344. x17 = vec_perm(x8, x9, vcprm(2,3,s3,s2));
  345. x18 = vec_perm(x10, x11, vcprm(2,1,s1,s2));
  346. x19 = vec_perm(x10, x11, vcprm(0,3,s0,s3));
  347. x20 = vec_perm(x12, x14, vcprm(0,1,s0, s1));
  348. x21 = vec_perm(x12, x14, vcprm(2,3,s2,s3));
  349. x22 = vec_perm(x13, x15, vcprm(0,1,s0,s1));
  350. x23 = vec_perm(x13, x15, vcprm(3,2,s3,s2));
  351. x24 = vec_add(x16, x17);
  352. x25 = vec_sub(x16, x17);
  353. x26 = vec_add(x18, x19);
  354. x27 = vec_sub(x18, x19);
  355. x28 = vec_add(x20, x21);
  356. x29 = vec_sub(x20, x21);
  357. x30 = vec_add(x22, x23);
  358. x31 = vec_sub(x22, x23);
  359. x32 = vec_add(x24, x26);
  360. x33 = vec_sub(x24, x26);
  361. x34 = vec_perm(x32, x33, vcprm(0,1,s0,s1));
  362. x35 = vec_perm(x28, x29, vcprm(2,1,s1,s2));
  363. x36 = vec_perm(x28, x29, vcprm(0,3,s0,s3));
  364. x37 = vec_add(x35, x36);
  365. x38 = vec_sub(x35, x36);
  366. x39 = vec_perm(x37, x38, vcprm(0,1,s1,s0));
  367. x40 = vec_perm(x27, x38, vcprm(3,2,s2,s3));
  368. x41 = vec_perm(x26, x37, vcprm(2,3,s3,s2));
  369. x42 = vec_add(x40, x41);
  370. x43 = vec_sub(x40, x41);
  371. x44 = vec_mul(x42, vc0);
  372. x45 = vec_mul(x43, vc0);
  373. x46 = vec_add(x34, x39); // z0.r z0.i z4.r z4.i
  374. x47 = vec_sub(x34, x39); // z8.r z8.i z12.r z12.i
  375. x48 = vec_perm(x30, x31, vcprm(2,1,s1,s2));
  376. x49 = vec_perm(x30, x31, vcprm(0,3,s3,s0));
  377. x50 = vec_add(x48, x49);
  378. x51 = vec_sub(x48, x49);
  379. x52 = vec_mul(x50, vc1);
  380. x53 = vec_mul(x50, vc2);
  381. x54 = vec_mul(x51, vc1);
  382. x55 = vec_mul(x51, vc2);
  383. x56 = vec_perm(x24, x25, vcprm(2,3,s2,s3));
  384. x57 = vec_perm(x44, x45, vcprm(0,1,s1,s0));
  385. x58 = vec_add(x56, x57);
  386. x59 = vec_sub(x56, x57);
  387. x60 = vec_perm(x54, x55, vcprm(1,0,3,2));
  388. x61 = vec_perm(x54, x55, vcprm(s1,s0,s3,s2));
  389. x62 = vec_add(x52, x61);
  390. x63 = vec_sub(x52, x61);
  391. x64 = vec_add(x60, x53);
  392. x65 = vec_sub(x60, x53);
  393. x66 = vec_perm(x62, x64, vcprm(0,1,s3,s2));
  394. x67 = vec_perm(x63, x65, vcprm(s0,s1,3,2));
  395. x68 = vec_add(x58, x66); // z1.r z1.i z3.r z3.i
  396. x69 = vec_sub(x58, x66); // z9.r z9.i z11.r z11.i
  397. x70 = vec_add(x59, x67); // z5.r z5.i z15.r z15.i
  398. x71 = vec_sub(x59, x67); // z13.r z13.i z7.r z7.i
  399. x72 = vec_perm(x25, x27, vcprm(s1,s0,s2,s3));
  400. x73 = vec_add(x25, x72);
  401. x74 = vec_sub(x25, x72);
  402. x75 = vec_perm(x73, x74, vcprm(0,1,s0,s1));
  403. x76 = vec_perm(x44, x45, vcprm(3,2,s2,s3));
  404. x77 = vec_add(x75, x76); // z2.r z2.i z6.r z6.i
  405. x78 = vec_sub(x75, x76); // z10.r z10.i z14.r z14.i
  406. x79 = vec_perm(x46, x68, vcprm(0,1,s0,s1)); // z0.r z0.i z1.r z1.i
  407. x80 = vec_perm(x77, x68, vcprm(0,1,s2,s3)); // z2.r z2.i z3.r z3.i
  408. x81 = vec_perm(x46, x70, vcprm(2,3,s0,s1)); // z4.r z4.i z5.r z5.i
  409. x82 = vec_perm(x71, x77, vcprm(s2,s3,2,3)); // z6.r z6.i z7.r z7.i
  410. vec_st(x79, 0, &(out[0]));
  411. vec_st(x80, byte_2complex, &(out[0]));
  412. vec_st(x81, byte_4complex, &(out[0]));
  413. vec_st(x82, byte_6complex, &(out[0]));
  414. x83 = vec_perm(x47, x69, vcprm(0,1,s0,s1)); // z8.r z8.i z9.r z9.i
  415. x84 = vec_perm(x78, x69, vcprm(0,1,s2,s3)); // z10.r z10.i z11.r z11.i
  416. x85 = vec_perm(x47, x71, vcprm(2,3,s0,s1)); // z12.r z12.i z13.r z13.i
  417. x86 = vec_perm(x70, x78, vcprm(s2,s3,2,3)); // z14.r z14.i z15.r z15.i
  418. vec_st(x83, byte_8complex, &(out[0]));
  419. vec_st(x84, byte_10complex, &(out[0]));
  420. vec_st(x85, byte_12complex, &(out[0]));
  421. vec_st(x86, byte_14complex, &(out[0]));
  422. }
  423. inline static void fft4_vsx(FFTComplex *z)
  424. {
  425. vec_f a, b, c, d;
  426. float* out= (float*)z;
  427. a = vec_ld(0, &(out[0]));
  428. b = vec_ld(byte_2complex, &(out[0]));
  429. c = vec_perm(a, b, vcprm(0,1,s2,s1));
  430. d = vec_perm(a, b, vcprm(2,3,s0,s3));
  431. a = vec_add(c, d);
  432. b = vec_sub(c, d);
  433. c = vec_perm(a,b, vcprm(0,s0,1,s1));
  434. d = vec_perm(a, b, vcprm(2,s3,3,s2));
  435. a = vec_add(c, d);
  436. b = vec_sub(c, d);
  437. c = vec_perm(a, b, vcprm(0,1,s0,s1));
  438. d = vec_perm(a, b, vcprm(2,3,s2,s3));
  439. vec_st(c, 0, &(out[0]));
  440. vec_st(d, byte_2complex, &(out[0]));
  441. return;
  442. }
  443. inline static void fft8_vsx(FFTComplex *z)
  444. {
  445. vec_f vz0, vz1, vz2, vz3;
  446. vec_f vz4, vz5, vz6, vz7, vz8;
  447. float* out= (float*)z;
  448. vec_f vc0 = {0.0, 0.0, 0.0, 0.0};
  449. vec_f vc1 = {-sqrthalf, sqrthalf, sqrthalf, -sqrthalf};
  450. vec_f vc2 = {sqrthalf, sqrthalf, sqrthalf, sqrthalf};
  451. vz0 = vec_ld(0, &(out[0]));
  452. vz1 = vec_ld(byte_2complex, &(out[0]));
  453. vz2 = vec_ld(byte_4complex, &(out[0]));
  454. vz3 = vec_ld(byte_6complex, &(out[0]));
  455. vz6 = vec_perm(vz2, vz3, vcprm(0,s0,1,s1));
  456. vz7 = vec_perm(vz2, vz3, vcprm(2,s2,3,s3));
  457. vz4 = vec_perm(vz0, vz1, vcprm(0,1,s2,s1));
  458. vz5 = vec_perm(vz0, vz1, vcprm(2,3,s0,s3));
  459. vz2 = vec_add(vz6, vz7);
  460. vz3 = vec_sub(vz6, vz7);
  461. vz8 = vec_perm(vz3, vz3, vcprm(2,3,0,1));
  462. vz0 = vec_add(vz4, vz5);
  463. vz1 = vec_sub(vz4, vz5);
  464. vz3 = vec_madd(vz3, vc1, vc0);
  465. vz3 = vec_madd(vz8, vc2, vz3);
  466. vz4 = vec_perm(vz0, vz1, vcprm(0,s0,1,s1));
  467. vz5 = vec_perm(vz0, vz1, vcprm(2,s3,3,s2));
  468. vz6 = vec_perm(vz2, vz3, vcprm(1,2,s3,s0));
  469. vz7 = vec_perm(vz2, vz3, vcprm(0,3,s2,s1));
  470. vz0 = vec_add(vz4, vz5);
  471. vz1 = vec_sub(vz4, vz5);
  472. vz2 = vec_add(vz6, vz7);
  473. vz3 = vec_sub(vz6, vz7);
  474. vz4 = vec_perm(vz0, vz1, vcprm(0,1,s0,s1));
  475. vz5 = vec_perm(vz0, vz1, vcprm(2,3,s2,s3));
  476. vz6 = vec_perm(vz2, vz3, vcprm(0,2,s1,s3));
  477. vz7 = vec_perm(vz2, vz3, vcprm(1,3,s0,s2));
  478. vz2 = vec_sub(vz4, vz6);
  479. vz3 = vec_sub(vz5, vz7);
  480. vz0 = vec_add(vz4, vz6);
  481. vz1 = vec_add(vz5, vz7);
  482. vec_st(vz0, 0, &(out[0]));
  483. vec_st(vz1, byte_2complex, &(out[0]));
  484. vec_st(vz2, byte_4complex, &(out[0]));
  485. vec_st(vz3, byte_6complex, &(out[0]));
  486. return;
  487. }
  488. inline static void fft16_vsx(FFTComplex *z)
  489. {
  490. float* out= (float*)z;
  491. vec_f vc0 = {0.0, 0.0, 0.0, 0.0};
  492. vec_f vc1 = {-sqrthalf, sqrthalf, sqrthalf, -sqrthalf};
  493. vec_f vc2 = {sqrthalf, sqrthalf, sqrthalf, sqrthalf};
  494. vec_f vc3 = {1.0, 0.92387953, sqrthalf, 0.38268343};
  495. vec_f vc4 = {0.0, 0.38268343, sqrthalf, 0.92387953};
  496. vec_f vc5 = {-0.0, -0.38268343, -sqrthalf, -0.92387953};
  497. vec_f vz0, vz1, vz2, vz3;
  498. vec_f vz4, vz5, vz6, vz7;
  499. vec_f vz8, vz9, vz10, vz11;
  500. vec_f vz12, vz13;
  501. vz0 = vec_ld(byte_8complex, &(out[0]));
  502. vz1 = vec_ld(byte_10complex, &(out[0]));
  503. vz2 = vec_ld(byte_12complex, &(out[0]));
  504. vz3 = vec_ld(byte_14complex, &(out[0]));
  505. vz4 = vec_perm(vz0, vz1, vcprm(0,1,s2,s1));
  506. vz5 = vec_perm(vz0, vz1, vcprm(2,3,s0,s3));
  507. vz6 = vec_perm(vz2, vz3, vcprm(0,1,s2,s1));
  508. vz7 = vec_perm(vz2, vz3, vcprm(2,3,s0,s3));
  509. vz0 = vec_add(vz4, vz5);
  510. vz1= vec_sub(vz4, vz5);
  511. vz2 = vec_add(vz6, vz7);
  512. vz3 = vec_sub(vz6, vz7);
  513. vz4 = vec_perm(vz0, vz1, vcprm(0,s0,1,s1));
  514. vz5 = vec_perm(vz0, vz1, vcprm(2,s3,3,s2));
  515. vz6 = vec_perm(vz2, vz3, vcprm(0,s0,1,s1));
  516. vz7 = vec_perm(vz2, vz3, vcprm(2,s3,3,s2));
  517. vz0 = vec_add(vz4, vz5);
  518. vz1 = vec_sub(vz4, vz5);
  519. vz2 = vec_add(vz6, vz7);
  520. vz3 = vec_sub(vz6, vz7);
  521. vz4 = vec_perm(vz0, vz1, vcprm(0,1,s0,s1));
  522. vz5 = vec_perm(vz0, vz1, vcprm(2,3,s2,s3));
  523. vz6 = vec_perm(vz2, vz3, vcprm(0,1,s0,s1));
  524. vz7 = vec_perm(vz2, vz3, vcprm(2,3,s2,s3));
  525. vz0 = vec_ld(0, &(out[0]));
  526. vz1 = vec_ld(byte_2complex, &(out[0]));
  527. vz2 = vec_ld(byte_4complex, &(out[0]));
  528. vz3 = vec_ld(byte_6complex, &(out[0]));
  529. vz10 = vec_perm(vz2, vz3, vcprm(0,s0,1,s1));
  530. vz11 = vec_perm(vz2, vz3, vcprm(2,s2,3,s3));
  531. vz8 = vec_perm(vz0, vz1, vcprm(0,1,s2,s1));
  532. vz9 = vec_perm(vz0, vz1, vcprm(2,3,s0,s3));
  533. vz2 = vec_add(vz10, vz11);
  534. vz3 = vec_sub(vz10, vz11);
  535. vz12 = vec_perm(vz3, vz3, vcprm(2,3,0,1));
  536. vz0 = vec_add(vz8, vz9);
  537. vz1 = vec_sub(vz8, vz9);
  538. vz3 = vec_madd(vz3, vc1, vc0);
  539. vz3 = vec_madd(vz12, vc2, vz3);
  540. vz8 = vec_perm(vz0, vz1, vcprm(0,s0,1,s1));
  541. vz9 = vec_perm(vz0, vz1, vcprm(2,s3,3,s2));
  542. vz10 = vec_perm(vz2, vz3, vcprm(1,2,s3,s0));
  543. vz11 = vec_perm(vz2, vz3, vcprm(0,3,s2,s1));
  544. vz0 = vec_add(vz8, vz9);
  545. vz1 = vec_sub(vz8, vz9);
  546. vz2 = vec_add(vz10, vz11);
  547. vz3 = vec_sub(vz10, vz11);
  548. vz8 = vec_perm(vz0, vz1, vcprm(0,1,s0,s1));
  549. vz9 = vec_perm(vz0, vz1, vcprm(2,3,s2,s3));
  550. vz10 = vec_perm(vz2, vz3, vcprm(0,2,s1,s3));
  551. vz11 = vec_perm(vz2, vz3, vcprm(1,3,s0,s2));
  552. vz2 = vec_sub(vz8, vz10);
  553. vz3 = vec_sub(vz9, vz11);
  554. vz0 = vec_add(vz8, vz10);
  555. vz1 = vec_add(vz9, vz11);
  556. vz8 = vec_madd(vz4, vc3, vc0);
  557. vz9 = vec_madd(vz5, vc3, vc0);
  558. vz10 = vec_madd(vz6, vc3, vc0);
  559. vz11 = vec_madd(vz7, vc3, vc0);
  560. vz8 = vec_madd(vz5, vc4, vz8);
  561. vz9 = vec_madd(vz4, vc5, vz9);
  562. vz10 = vec_madd(vz7, vc5, vz10);
  563. vz11 = vec_madd(vz6, vc4, vz11);
  564. vz12 = vec_sub(vz10, vz8);
  565. vz10 = vec_add(vz10, vz8);
  566. vz13 = vec_sub(vz9, vz11);
  567. vz11 = vec_add(vz9, vz11);
  568. vz4 = vec_sub(vz0, vz10);
  569. vz0 = vec_add(vz0, vz10);
  570. vz7= vec_sub(vz3, vz12);
  571. vz3= vec_add(vz3, vz12);
  572. vz5 = vec_sub(vz1, vz11);
  573. vz1 = vec_add(vz1, vz11);
  574. vz6 = vec_sub(vz2, vz13);
  575. vz2 = vec_add(vz2, vz13);
  576. vec_st(vz0, 0, &(out[0]));
  577. vec_st(vz1, byte_2complex, &(out[0]));
  578. vec_st(vz2, byte_4complex, &(out[0]));
  579. vec_st(vz3, byte_6complex, &(out[0]));
  580. vec_st(vz4, byte_8complex, &(out[0]));
  581. vec_st(vz5, byte_10complex, &(out[0]));
  582. vec_st(vz6, byte_12complex, &(out[0]));
  583. vec_st(vz7, byte_14complex, &(out[0]));
  584. return;
  585. }
  586. inline static void pass_vsx(FFTComplex * z, const FFTSample * wre, unsigned int n)
  587. {
  588. int o1 = n<<1;
  589. int o2 = n<<2;
  590. int o3 = o1+o2;
  591. int i1, i2, i3;
  592. FFTSample* out = (FFTSample*)z;
  593. const FFTSample *wim = wre+o1;
  594. vec_f v0, v1, v2, v3;
  595. vec_f v4, v5, v6, v7;
  596. vec_f v8, v9, v10, v11;
  597. vec_f v12, v13;
  598. n = n-2;
  599. i1 = o1*sizeof(FFTComplex);
  600. i2 = o2*sizeof(FFTComplex);
  601. i3 = o3*sizeof(FFTComplex);
  602. v8 = vec_ld(0, &(wre[0]));
  603. v10 = vec_ld(0, &(wim[0]));
  604. v9 = vec_ld(0, &(wim[-4]));
  605. v9 = vec_perm(v9, v10, vcprm(s0,3,2,1));
  606. v4 = vec_ld(i2, &(out[0]));
  607. v5 = vec_ld(i2+16, &(out[0]));
  608. v6 = vec_ld(i3, &(out[0]));
  609. v7 = vec_ld(i3+16, &(out[0]));
  610. v10 = vec_mul(v4, v8); // r2*wre
  611. v11 = vec_mul(v5, v8); // i2*wre
  612. v12 = vec_mul(v6, v8); // r3*wre
  613. v13 = vec_mul(v7, v8); // i3*wre
  614. v0 = vec_ld(0, &(out[0])); // r0
  615. v3 = vec_ld(i1+16, &(out[0])); // i1
  616. v10 = vec_madd(v5, v9, v10); // r2*wim
  617. v11 = vec_nmsub(v4, v9, v11); // i2*wim
  618. v12 = vec_nmsub(v7, v9, v12); // r3*wim
  619. v13 = vec_madd(v6, v9, v13); // i3*wim
  620. v1 = vec_ld(16, &(out[0])); // i0
  621. v2 = vec_ld(i1, &(out[0])); // r1
  622. v8 = vec_sub(v12, v10);
  623. v12 = vec_add(v12, v10);
  624. v9 = vec_sub(v11, v13);
  625. v13 = vec_add(v11, v13);
  626. v4 = vec_sub(v0, v12);
  627. v0 = vec_add(v0, v12);
  628. v7 = vec_sub(v3, v8);
  629. v3 = vec_add(v3, v8);
  630. vec_st(v0, 0, &(out[0])); // r0
  631. vec_st(v3, i1+16, &(out[0])); // i1
  632. vec_st(v4, i2, &(out[0])); // r2
  633. vec_st(v7, i3+16, &(out[0]));// i3
  634. v5 = vec_sub(v1, v13);
  635. v1 = vec_add(v1, v13);
  636. v6 = vec_sub(v2, v9);
  637. v2 = vec_add(v2, v9);
  638. vec_st(v1, 16, &(out[0])); // i0
  639. vec_st(v2, i1, &(out[0])); // r1
  640. vec_st(v5, i2+16, &(out[0])); // i2
  641. vec_st(v6, i3, &(out[0])); // r3
  642. do {
  643. out += 8;
  644. wre += 4;
  645. wim -= 4;
  646. v8 = vec_ld(0, &(wre[0]));
  647. v10 = vec_ld(0, &(wim[0]));
  648. v9 = vec_ld(0, &(wim[-4]));
  649. v9 = vec_perm(v9, v10, vcprm(s0,3,2,1));
  650. v4 = vec_ld(i2, &(out[0])); // r2
  651. v5 = vec_ld(i2+16, &(out[0])); // i2
  652. v6 = vec_ld(i3, &(out[0])); // r3
  653. v7 = vec_ld(i3+16, &(out[0]));// i3
  654. v10 = vec_mul(v4, v8); // r2*wre
  655. v11 = vec_mul(v5, v8); // i2*wre
  656. v12 = vec_mul(v6, v8); // r3*wre
  657. v13 = vec_mul(v7, v8); // i3*wre
  658. v0 = vec_ld(0, &(out[0])); // r0
  659. v3 = vec_ld(i1+16, &(out[0])); // i1
  660. v10 = vec_madd(v5, v9, v10); // r2*wim
  661. v11 = vec_nmsub(v4, v9, v11); // i2*wim
  662. v12 = vec_nmsub(v7, v9, v12); // r3*wim
  663. v13 = vec_madd(v6, v9, v13); // i3*wim
  664. v1 = vec_ld(16, &(out[0])); // i0
  665. v2 = vec_ld(i1, &(out[0])); // r1
  666. v8 = vec_sub(v12, v10);
  667. v12 = vec_add(v12, v10);
  668. v9 = vec_sub(v11, v13);
  669. v13 = vec_add(v11, v13);
  670. v4 = vec_sub(v0, v12);
  671. v0 = vec_add(v0, v12);
  672. v7 = vec_sub(v3, v8);
  673. v3 = vec_add(v3, v8);
  674. vec_st(v0, 0, &(out[0])); // r0
  675. vec_st(v3, i1+16, &(out[0])); // i1
  676. vec_st(v4, i2, &(out[0])); // r2
  677. vec_st(v7, i3+16, &(out[0])); // i3
  678. v5 = vec_sub(v1, v13);
  679. v1 = vec_add(v1, v13);
  680. v6 = vec_sub(v2, v9);
  681. v2 = vec_add(v2, v9);
  682. vec_st(v1, 16, &(out[0])); // i0
  683. vec_st(v2, i1, &(out[0])); // r1
  684. vec_st(v5, i2+16, &(out[0])); // i2
  685. vec_st(v6, i3, &(out[0])); // r3
  686. } while (n-=2);
  687. }
  688. #endif
  689. #endif /* AVCODEC_PPC_FFT_VSX_H */