avx_mathfun.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522
  1. #pragma once
  2. /*
  3. AVX implementation of sin, cos, sincos, exp and log
  4. Based on "sse_mathfun.h", by Julien Pommier
  5. http://gruntthepeon.free.fr/ssemath/
  6. Copyright (C) 2012 Giovanni Garberoglio
  7. Interdisciplinary Laboratory for Computational Science (LISC)
  8. Fondazione Bruno Kessler and University of Trento
  9. via Sommarive, 18
  10. I-38123 Trento (Italy)
  11. This software is provided 'as-is', without any express or implied
  12. warranty. In no event will the authors be held liable for any damages
  13. arising from the use of this software.
  14. Permission is granted to anyone to use this software for any purpose,
  15. including commercial applications, and to alter it and redistribute it
  16. freely, subject to the following restrictions:
  17. 1. The origin of this software must not be misrepresented; you must not
  18. claim that you wrote the original software. If you use this software
  19. in a product, an acknowledgment in the product documentation would be
  20. appreciated but is not required.
  21. 2. Altered source versions must be plainly marked as such, and must not be
  22. misrepresented as being the original software.
  23. 3. This notice may not be removed or altered from any source distribution.
  24. (this is the zlib license)
  25. */
  26. #include <ATen/native/cpu/Intrinsics.h>
  27. /* The original source of this file has been modified. */
  28. #if defined(CPU_CAPABILITY_AVX2)
  29. #if defined(__GNUC__)
  30. # define ALIGN32_BEG __attribute__((aligned(32)))
  31. #elif defined(_WIN32)
  32. # define ALIGN32_BEG __declspec(align(32))
  33. #endif
  34. typedef __m256 v8sf; // vector of 8 float (avx2)
  35. typedef __m256i v8si; // vector of 8 int (avx2)
  36. /* declare some AVX constants -- why can't I figure a better way to do that? */
  37. #define _PS256_CONST(Name, Val) \
  38. static const ALIGN32_BEG float _ps256_##Name[8] = { Val, Val, Val, Val, Val, Val, Val, Val }
  39. #define _PI32_CONST256(Name, Val) \
  40. static const ALIGN32_BEG int _pi32_256_##Name[8] = { Val, Val, Val, Val, Val, Val, Val, Val }
  41. #define _PS256_CONST_TYPE(Name, Type, Val) \
  42. static const ALIGN32_BEG Type _ps256_##Name[8] = { Val, Val, Val, Val, Val, Val, Val, Val }
  43. _PS256_CONST(1 , 1.0f);
  44. _PS256_CONST(0p5, 0.5f);
  45. /* the smallest non denormalized float number */
  46. _PS256_CONST_TYPE(min_norm_pos, int, 0x00800000);
  47. _PS256_CONST_TYPE(mant_mask, int, 0x7f800000);
  48. _PS256_CONST_TYPE(inv_mant_mask, int, ~0x7f800000);
  49. _PS256_CONST_TYPE(sign_mask, int, (int)0x80000000);
  50. _PS256_CONST_TYPE(inv_sign_mask, int, ~0x80000000);
  51. _PI32_CONST256(0, 0);
  52. _PI32_CONST256(1, 1);
  53. _PI32_CONST256(inv1, ~1);
  54. _PI32_CONST256(2, 2);
  55. _PI32_CONST256(4, 4);
  56. _PI32_CONST256(0x7f, 0x7f);
  57. _PS256_CONST(cephes_SQRTHF, 0.707106781186547524);
  58. _PS256_CONST(cephes_log_p0, 7.0376836292E-2);
  59. _PS256_CONST(cephes_log_p1, - 1.1514610310E-1);
  60. _PS256_CONST(cephes_log_p2, 1.1676998740E-1);
  61. _PS256_CONST(cephes_log_p3, - 1.2420140846E-1);
  62. _PS256_CONST(cephes_log_p4, + 1.4249322787E-1);
  63. _PS256_CONST(cephes_log_p5, - 1.6668057665E-1);
  64. _PS256_CONST(cephes_log_p6, + 2.0000714765E-1);
  65. _PS256_CONST(cephes_log_p7, - 2.4999993993E-1);
  66. _PS256_CONST(cephes_log_p8, + 3.3333331174E-1);
  67. _PS256_CONST(cephes_log_q1, -2.12194440e-4);
  68. _PS256_CONST(cephes_log_q2, 0.693359375);
  69. /* natural logarithm computed for 8 simultaneous float
  70. return NaN for x <= 0
  71. */
  72. inline v8sf log256_ps(v8sf x) {
  73. v8si imm0;
  74. v8sf one = *(v8sf*)_ps256_1;
  75. //v8sf invalid_mask = _mm256_cmple_ps(x, _mm256_setzero_ps());
  76. v8sf invalid_mask = _mm256_cmp_ps(x, _mm256_setzero_ps(), _CMP_LE_OS);
  77. x = _mm256_max_ps(x, *(v8sf*)_ps256_min_norm_pos); /* cut off denormalized stuff */
  78. // can be done with AVX2
  79. imm0 = _mm256_srli_epi32(_mm256_castps_si256(x), 23);
  80. /* keep only the fractional part */
  81. x = _mm256_and_ps(x, *(v8sf*)_ps256_inv_mant_mask);
  82. x = _mm256_or_ps(x, *(v8sf*)_ps256_0p5);
  83. // this is again another AVX2 instruction
  84. imm0 = _mm256_sub_epi32(imm0, *(v8si*)_pi32_256_0x7f);
  85. v8sf e = _mm256_cvtepi32_ps(imm0);
  86. e = _mm256_add_ps(e, one);
  87. /* part2:
  88. if( x < SQRTHF ) {
  89. e -= 1;
  90. x = x + x - 1.0;
  91. } else { x = x - 1.0; }
  92. */
  93. //v8sf mask = _mm256_cmplt_ps(x, *(v8sf*)_ps256_cephes_SQRTHF);
  94. v8sf mask = _mm256_cmp_ps(x, *(v8sf*)_ps256_cephes_SQRTHF, _CMP_LT_OS);
  95. v8sf tmp = _mm256_and_ps(x, mask);
  96. x = _mm256_sub_ps(x, one);
  97. e = _mm256_sub_ps(e, _mm256_and_ps(one, mask));
  98. x = _mm256_add_ps(x, tmp);
  99. v8sf z = _mm256_mul_ps(x,x);
  100. v8sf y = *(v8sf*)_ps256_cephes_log_p0;
  101. y = _mm256_mul_ps(y, x);
  102. y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p1);
  103. y = _mm256_mul_ps(y, x);
  104. y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p2);
  105. y = _mm256_mul_ps(y, x);
  106. y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p3);
  107. y = _mm256_mul_ps(y, x);
  108. y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p4);
  109. y = _mm256_mul_ps(y, x);
  110. y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p5);
  111. y = _mm256_mul_ps(y, x);
  112. y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p6);
  113. y = _mm256_mul_ps(y, x);
  114. y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p7);
  115. y = _mm256_mul_ps(y, x);
  116. y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p8);
  117. y = _mm256_mul_ps(y, x);
  118. y = _mm256_mul_ps(y, z);
  119. tmp = _mm256_mul_ps(e, *(v8sf*)_ps256_cephes_log_q1);
  120. y = _mm256_add_ps(y, tmp);
  121. tmp = _mm256_mul_ps(z, *(v8sf*)_ps256_0p5);
  122. y = _mm256_sub_ps(y, tmp);
  123. tmp = _mm256_mul_ps(e, *(v8sf*)_ps256_cephes_log_q2);
  124. x = _mm256_add_ps(x, y);
  125. x = _mm256_add_ps(x, tmp);
  126. x = _mm256_or_ps(x, invalid_mask); // negative arg will be NAN
  127. return x;
  128. }
  129. _PS256_CONST(exp_hi, 88.3762626647949f);
  130. _PS256_CONST(exp_lo, -88.3762626647949f);
  131. _PS256_CONST(cephes_LOG2EF, 1.44269504088896341);
  132. _PS256_CONST(cephes_exp_C1, 0.693359375);
  133. _PS256_CONST(cephes_exp_C2, -2.12194440e-4);
  134. _PS256_CONST(cephes_exp_p0, 1.9875691500E-4);
  135. _PS256_CONST(cephes_exp_p1, 1.3981999507E-3);
  136. _PS256_CONST(cephes_exp_p2, 8.3334519073E-3);
  137. _PS256_CONST(cephes_exp_p3, 4.1665795894E-2);
  138. _PS256_CONST(cephes_exp_p4, 1.6666665459E-1);
  139. _PS256_CONST(cephes_exp_p5, 5.0000001201E-1);
  140. inline v8sf exp256_ps(v8sf x) {
  141. v8sf tmp = _mm256_setzero_ps(), fx;
  142. v8si imm0;
  143. v8sf one = *(v8sf*)_ps256_1;
  144. x = _mm256_min_ps(x, *(v8sf*)_ps256_exp_hi);
  145. x = _mm256_max_ps(x, *(v8sf*)_ps256_exp_lo);
  146. /* express exp(x) as exp(g + n*log(2)) */
  147. fx = _mm256_mul_ps(x, *(v8sf*)_ps256_cephes_LOG2EF);
  148. fx = _mm256_add_ps(fx, *(v8sf*)_ps256_0p5);
  149. /* how to perform a floorf with SSE: just below */
  150. //imm0 = _mm256_cvttps_epi32(fx);
  151. //tmp = _mm256_cvtepi32_ps(imm0);
  152. tmp = _mm256_floor_ps(fx);
  153. /* if greater, subtract 1 */
  154. //v8sf mask = _mm256_cmpgt_ps(tmp, fx);
  155. v8sf mask = _mm256_cmp_ps(tmp, fx, _CMP_GT_OS);
  156. mask = _mm256_and_ps(mask, one);
  157. fx = _mm256_sub_ps(tmp, mask);
  158. tmp = _mm256_mul_ps(fx, *(v8sf*)_ps256_cephes_exp_C1);
  159. v8sf z = _mm256_mul_ps(fx, *(v8sf*)_ps256_cephes_exp_C2);
  160. x = _mm256_sub_ps(x, tmp);
  161. x = _mm256_sub_ps(x, z);
  162. z = _mm256_mul_ps(x,x);
  163. v8sf y = *(v8sf*)_ps256_cephes_exp_p0;
  164. y = _mm256_mul_ps(y, x);
  165. y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p1);
  166. y = _mm256_mul_ps(y, x);
  167. y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p2);
  168. y = _mm256_mul_ps(y, x);
  169. y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p3);
  170. y = _mm256_mul_ps(y, x);
  171. y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p4);
  172. y = _mm256_mul_ps(y, x);
  173. y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p5);
  174. y = _mm256_mul_ps(y, z);
  175. y = _mm256_add_ps(y, x);
  176. y = _mm256_add_ps(y, one);
  177. /* build 2^n */
  178. imm0 = _mm256_cvttps_epi32(fx);
  179. // another two AVX2 instructions
  180. imm0 = _mm256_add_epi32(imm0, *(v8si*)_pi32_256_0x7f);
  181. imm0 = _mm256_slli_epi32(imm0, 23);
  182. v8sf pow2n = _mm256_castsi256_ps(imm0);
  183. y = _mm256_mul_ps(y, pow2n);
  184. return y;
  185. }
  186. _PS256_CONST(minus_cephes_DP1, -0.78515625);
  187. _PS256_CONST(minus_cephes_DP2, -2.4187564849853515625e-4);
  188. _PS256_CONST(minus_cephes_DP3, -3.77489497744594108e-8);
  189. _PS256_CONST(sincof_p0, -1.9515295891E-4);
  190. _PS256_CONST(sincof_p1, 8.3321608736E-3);
  191. _PS256_CONST(sincof_p2, -1.6666654611E-1);
  192. _PS256_CONST(coscof_p0, 2.443315711809948E-005);
  193. _PS256_CONST(coscof_p1, -1.388731625493765E-003);
  194. _PS256_CONST(coscof_p2, 4.166664568298827E-002);
  195. _PS256_CONST(cephes_FOPI, 1.27323954473516); // 4 / M_PI
  196. /* evaluation of 8 sines at onces using AVX intrisics
  197. The code is the exact rewriting of the cephes sinf function.
  198. Precision is excellent as long as x < 8192 (I did not bother to
  199. take into account the special handling they have for greater values
  200. -- it does not return garbage for arguments over 8192, though, but
  201. the extra precision is missing).
  202. Note that it is such that sinf((float)M_PI) = 8.74e-8, which is the
  203. surprising but correct result.
  204. */
  205. inline v8sf sin256_ps(v8sf x) { // any x
  206. v8sf xmm1, xmm2 = _mm256_setzero_ps(), xmm3, sign_bit, y;
  207. v8si imm0, imm2;
  208. sign_bit = x;
  209. /* take the absolute value */
  210. x = _mm256_and_ps(x, *(v8sf*)_ps256_inv_sign_mask);
  211. /* extract the sign bit (upper one) */
  212. sign_bit = _mm256_and_ps(sign_bit, *(v8sf*)_ps256_sign_mask);
  213. /* scale by 4/Pi */
  214. y = _mm256_mul_ps(x, *(v8sf*)_ps256_cephes_FOPI);
  215. /*
  216. Here we start a series of integer operations, which are in the
  217. realm of AVX2.
  218. If we don't have AVX, let's perform them using SSE2 directives
  219. */
  220. /* store the integer part of y in mm0 */
  221. imm2 = _mm256_cvttps_epi32(y);
  222. /* j=(j+1) & (~1) (see the cephes sources) */
  223. // another two AVX2 instruction
  224. imm2 = _mm256_add_epi32(imm2, *(v8si*)_pi32_256_1);
  225. imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_inv1);
  226. y = _mm256_cvtepi32_ps(imm2);
  227. /* get the swap sign flag */
  228. imm0 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_4);
  229. imm0 = _mm256_slli_epi32(imm0, 29);
  230. /* get the polynom selection mask
  231. there is one polynom for 0 <= x <= Pi/4
  232. and another one for Pi/4<x<=Pi/2
  233. Both branches will be computed.
  234. */
  235. imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_2);
  236. imm2 = _mm256_cmpeq_epi32(imm2,*(v8si*)_pi32_256_0);
  237. v8sf swap_sign_bit = _mm256_castsi256_ps(imm0);
  238. v8sf poly_mask = _mm256_castsi256_ps(imm2);
  239. sign_bit = _mm256_xor_ps(sign_bit, swap_sign_bit);
  240. /* The magic pass: "Extended precision modular arithmetic"
  241. x = ((x - y * DP1) - y * DP2) - y * DP3; */
  242. xmm1 = *(v8sf*)_ps256_minus_cephes_DP1;
  243. xmm2 = *(v8sf*)_ps256_minus_cephes_DP2;
  244. xmm3 = *(v8sf*)_ps256_minus_cephes_DP3;
  245. xmm1 = _mm256_mul_ps(y, xmm1);
  246. xmm2 = _mm256_mul_ps(y, xmm2);
  247. xmm3 = _mm256_mul_ps(y, xmm3);
  248. x = _mm256_add_ps(x, xmm1);
  249. x = _mm256_add_ps(x, xmm2);
  250. x = _mm256_add_ps(x, xmm3);
  251. /* Evaluate the first polynom (0 <= x <= Pi/4) */
  252. y = *(v8sf*)_ps256_coscof_p0;
  253. v8sf z = _mm256_mul_ps(x,x);
  254. y = _mm256_mul_ps(y, z);
  255. y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p1);
  256. y = _mm256_mul_ps(y, z);
  257. y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p2);
  258. y = _mm256_mul_ps(y, z);
  259. y = _mm256_mul_ps(y, z);
  260. v8sf tmp = _mm256_mul_ps(z, *(v8sf*)_ps256_0p5);
  261. y = _mm256_sub_ps(y, tmp);
  262. y = _mm256_add_ps(y, *(v8sf*)_ps256_1);
  263. /* Evaluate the second polynom (Pi/4 <= x <= 0) */
  264. v8sf y2 = *(v8sf*)_ps256_sincof_p0;
  265. y2 = _mm256_mul_ps(y2, z);
  266. y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p1);
  267. y2 = _mm256_mul_ps(y2, z);
  268. y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p2);
  269. y2 = _mm256_mul_ps(y2, z);
  270. y2 = _mm256_mul_ps(y2, x);
  271. y2 = _mm256_add_ps(y2, x);
  272. /* select the correct result from the two polynoms */
  273. xmm3 = poly_mask;
  274. y2 = _mm256_and_ps(xmm3, y2); //, xmm3);
  275. y = _mm256_andnot_ps(xmm3, y);
  276. y = _mm256_add_ps(y,y2);
  277. /* update the sign */
  278. y = _mm256_xor_ps(y, sign_bit);
  279. return y;
  280. }
  281. /* almost the same as sin_ps */
  282. inline v8sf cos256_ps(v8sf x) { // any x
  283. v8sf xmm1, xmm2 = _mm256_setzero_ps(), xmm3, y;
  284. v8si imm0, imm2;
  285. /* take the absolute value */
  286. x = _mm256_and_ps(x, *(v8sf*)_ps256_inv_sign_mask);
  287. /* scale by 4/Pi */
  288. y = _mm256_mul_ps(x, *(v8sf*)_ps256_cephes_FOPI);
  289. /* store the integer part of y in mm0 */
  290. imm2 = _mm256_cvttps_epi32(y);
  291. /* j=(j+1) & (~1) (see the cephes sources) */
  292. imm2 = _mm256_add_epi32(imm2, *(v8si*)_pi32_256_1);
  293. imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_inv1);
  294. y = _mm256_cvtepi32_ps(imm2);
  295. imm2 = _mm256_sub_epi32(imm2, *(v8si*)_pi32_256_2);
  296. /* get the swap sign flag */
  297. imm0 = _mm256_andnot_si256(imm2, *(v8si*)_pi32_256_4);
  298. imm0 = _mm256_slli_epi32(imm0, 29);
  299. /* get the polynom selection mask */
  300. imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_2);
  301. imm2 = _mm256_cmpeq_epi32(imm2, *(v8si*)_pi32_256_0);
  302. v8sf sign_bit = _mm256_castsi256_ps(imm0);
  303. v8sf poly_mask = _mm256_castsi256_ps(imm2);
  304. /* The magic pass: "Extended precision modular arithmetic"
  305. x = ((x - y * DP1) - y * DP2) - y * DP3; */
  306. xmm1 = *(v8sf*)_ps256_minus_cephes_DP1;
  307. xmm2 = *(v8sf*)_ps256_minus_cephes_DP2;
  308. xmm3 = *(v8sf*)_ps256_minus_cephes_DP3;
  309. xmm1 = _mm256_mul_ps(y, xmm1);
  310. xmm2 = _mm256_mul_ps(y, xmm2);
  311. xmm3 = _mm256_mul_ps(y, xmm3);
  312. x = _mm256_add_ps(x, xmm1);
  313. x = _mm256_add_ps(x, xmm2);
  314. x = _mm256_add_ps(x, xmm3);
  315. /* Evaluate the first polynom (0 <= x <= Pi/4) */
  316. y = *(v8sf*)_ps256_coscof_p0;
  317. v8sf z = _mm256_mul_ps(x,x);
  318. y = _mm256_mul_ps(y, z);
  319. y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p1);
  320. y = _mm256_mul_ps(y, z);
  321. y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p2);
  322. y = _mm256_mul_ps(y, z);
  323. y = _mm256_mul_ps(y, z);
  324. v8sf tmp = _mm256_mul_ps(z, *(v8sf*)_ps256_0p5);
  325. y = _mm256_sub_ps(y, tmp);
  326. y = _mm256_add_ps(y, *(v8sf*)_ps256_1);
  327. /* Evaluate the second polynom (Pi/4 <= x <= 0) */
  328. v8sf y2 = *(v8sf*)_ps256_sincof_p0;
  329. y2 = _mm256_mul_ps(y2, z);
  330. y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p1);
  331. y2 = _mm256_mul_ps(y2, z);
  332. y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p2);
  333. y2 = _mm256_mul_ps(y2, z);
  334. y2 = _mm256_mul_ps(y2, x);
  335. y2 = _mm256_add_ps(y2, x);
  336. /* select the correct result from the two polynoms */
  337. xmm3 = poly_mask;
  338. y2 = _mm256_and_ps(xmm3, y2); //, xmm3);
  339. y = _mm256_andnot_ps(xmm3, y);
  340. y = _mm256_add_ps(y,y2);
  341. /* update the sign */
  342. y = _mm256_xor_ps(y, sign_bit);
  343. return y;
  344. }
  345. /* since sin256_ps and cos256_ps are almost identical, sincos256_ps could replace both of them..
  346. it is almost as fast, and gives you a free cosine with your sine */
  347. inline void sincos256_ps(v8sf x, v8sf *s, v8sf *c) {
  348. v8sf xmm1, xmm2, xmm3 = _mm256_setzero_ps(), sign_bit_sin, y;
  349. v8si imm0, imm2, imm4;
  350. sign_bit_sin = x;
  351. /* take the absolute value */
  352. x = _mm256_and_ps(x, *(v8sf*)_ps256_inv_sign_mask);
  353. /* extract the sign bit (upper one) */
  354. sign_bit_sin = _mm256_and_ps(sign_bit_sin, *(v8sf*)_ps256_sign_mask);
  355. /* scale by 4/Pi */
  356. y = _mm256_mul_ps(x, *(v8sf*)_ps256_cephes_FOPI);
  357. /* store the integer part of y in imm2 */
  358. imm2 = _mm256_cvttps_epi32(y);
  359. /* j=(j+1) & (~1) (see the cephes sources) */
  360. imm2 = _mm256_add_epi32(imm2, *(v8si*)_pi32_256_1);
  361. imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_inv1);
  362. y = _mm256_cvtepi32_ps(imm2);
  363. imm4 = imm2;
  364. /* get the swap sign flag for the sine */
  365. imm0 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_4);
  366. imm0 = _mm256_slli_epi32(imm0, 29);
  367. //v8sf swap_sign_bit_sin = _mm256_castsi256_ps(imm0);
  368. /* get the polynom selection mask for the sine*/
  369. imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_2);
  370. imm2 = _mm256_cmpeq_epi32(imm2, *(v8si*)_pi32_256_0);
  371. //v8sf poly_mask = _mm256_castsi256_ps(imm2);
  372. v8sf swap_sign_bit_sin = _mm256_castsi256_ps(imm0);
  373. v8sf poly_mask = _mm256_castsi256_ps(imm2);
  374. /* The magic pass: "Extended precision modular arithmetic"
  375. x = ((x - y * DP1) - y * DP2) - y * DP3; */
  376. xmm1 = *(v8sf*)_ps256_minus_cephes_DP1;
  377. xmm2 = *(v8sf*)_ps256_minus_cephes_DP2;
  378. xmm3 = *(v8sf*)_ps256_minus_cephes_DP3;
  379. xmm1 = _mm256_mul_ps(y, xmm1);
  380. xmm2 = _mm256_mul_ps(y, xmm2);
  381. xmm3 = _mm256_mul_ps(y, xmm3);
  382. x = _mm256_add_ps(x, xmm1);
  383. x = _mm256_add_ps(x, xmm2);
  384. x = _mm256_add_ps(x, xmm3);
  385. imm4 = _mm256_sub_epi32(imm4, *(v8si*)_pi32_256_2);
  386. imm4 = _mm256_andnot_si256(imm4, *(v8si*)_pi32_256_4);
  387. imm4 = _mm256_slli_epi32(imm4, 29);
  388. v8sf sign_bit_cos = _mm256_castsi256_ps(imm4);
  389. sign_bit_sin = _mm256_xor_ps(sign_bit_sin, swap_sign_bit_sin);
  390. /* Evaluate the first polynom (0 <= x <= Pi/4) */
  391. v8sf z = _mm256_mul_ps(x,x);
  392. y = *(v8sf*)_ps256_coscof_p0;
  393. y = _mm256_mul_ps(y, z);
  394. y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p1);
  395. y = _mm256_mul_ps(y, z);
  396. y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p2);
  397. y = _mm256_mul_ps(y, z);
  398. y = _mm256_mul_ps(y, z);
  399. v8sf tmp = _mm256_mul_ps(z, *(v8sf*)_ps256_0p5);
  400. y = _mm256_sub_ps(y, tmp);
  401. y = _mm256_add_ps(y, *(v8sf*)_ps256_1);
  402. /* Evaluate the second polynom (Pi/4 <= x <= 0) */
  403. v8sf y2 = *(v8sf*)_ps256_sincof_p0;
  404. y2 = _mm256_mul_ps(y2, z);
  405. y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p1);
  406. y2 = _mm256_mul_ps(y2, z);
  407. y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p2);
  408. y2 = _mm256_mul_ps(y2, z);
  409. y2 = _mm256_mul_ps(y2, x);
  410. y2 = _mm256_add_ps(y2, x);
  411. /* select the correct result from the two polynoms */
  412. xmm3 = poly_mask;
  413. v8sf ysin2 = _mm256_and_ps(xmm3, y2);
  414. v8sf ysin1 = _mm256_andnot_ps(xmm3, y);
  415. y2 = _mm256_sub_ps(y2,ysin2);
  416. y = _mm256_sub_ps(y, ysin1);
  417. xmm1 = _mm256_add_ps(ysin1,ysin2);
  418. xmm2 = _mm256_add_ps(y,y2);
  419. /* update the sign */
  420. *s = _mm256_xor_ps(xmm1, sign_bit_sin);
  421. *c = _mm256_xor_ps(xmm2, sign_bit_cos);
  422. }
  423. #endif // CPU_CAPABILITY_AVX2