123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522 |
- #pragma once
- /*
- AVX implementation of sin, cos, sincos, exp and log
- Based on "sse_mathfun.h", by Julien Pommier
- http://gruntthepeon.free.fr/ssemath/
- Copyright (C) 2012 Giovanni Garberoglio
- Interdisciplinary Laboratory for Computational Science (LISC)
- Fondazione Bruno Kessler and University of Trento
- via Sommarive, 18
- I-38123 Trento (Italy)
- This software is provided 'as-is', without any express or implied
- warranty. In no event will the authors be held liable for any damages
- arising from the use of this software.
- Permission is granted to anyone to use this software for any purpose,
- including commercial applications, and to alter it and redistribute it
- freely, subject to the following restrictions:
- 1. The origin of this software must not be misrepresented; you must not
- claim that you wrote the original software. If you use this software
- in a product, an acknowledgment in the product documentation would be
- appreciated but is not required.
- 2. Altered source versions must be plainly marked as such, and must not be
- misrepresented as being the original software.
- 3. This notice may not be removed or altered from any source distribution.
- (this is the zlib license)
- */
- #include <ATen/native/cpu/Intrinsics.h>
- /* The original source of this file has been modified. */
- #if defined(CPU_CAPABILITY_AVX2)
- #if defined(__GNUC__)
- # define ALIGN32_BEG __attribute__((aligned(32)))
- #elif defined(_WIN32)
- # define ALIGN32_BEG __declspec(align(32))
- #endif
- typedef __m256 v8sf; // vector of 8 float (avx2)
- typedef __m256i v8si; // vector of 8 int (avx2)
- /* declare some AVX constants -- why can't I figure a better way to do that? */
- #define _PS256_CONST(Name, Val) \
- static const ALIGN32_BEG float _ps256_##Name[8] = { Val, Val, Val, Val, Val, Val, Val, Val }
- #define _PI32_CONST256(Name, Val) \
- static const ALIGN32_BEG int _pi32_256_##Name[8] = { Val, Val, Val, Val, Val, Val, Val, Val }
- #define _PS256_CONST_TYPE(Name, Type, Val) \
- static const ALIGN32_BEG Type _ps256_##Name[8] = { Val, Val, Val, Val, Val, Val, Val, Val }
- _PS256_CONST(1 , 1.0f);
- _PS256_CONST(0p5, 0.5f);
- /* the smallest non denormalized float number */
- _PS256_CONST_TYPE(min_norm_pos, int, 0x00800000);
- _PS256_CONST_TYPE(mant_mask, int, 0x7f800000);
- _PS256_CONST_TYPE(inv_mant_mask, int, ~0x7f800000);
- _PS256_CONST_TYPE(sign_mask, int, (int)0x80000000);
- _PS256_CONST_TYPE(inv_sign_mask, int, ~0x80000000);
- _PI32_CONST256(0, 0);
- _PI32_CONST256(1, 1);
- _PI32_CONST256(inv1, ~1);
- _PI32_CONST256(2, 2);
- _PI32_CONST256(4, 4);
- _PI32_CONST256(0x7f, 0x7f);
- _PS256_CONST(cephes_SQRTHF, 0.707106781186547524);
- _PS256_CONST(cephes_log_p0, 7.0376836292E-2);
- _PS256_CONST(cephes_log_p1, - 1.1514610310E-1);
- _PS256_CONST(cephes_log_p2, 1.1676998740E-1);
- _PS256_CONST(cephes_log_p3, - 1.2420140846E-1);
- _PS256_CONST(cephes_log_p4, + 1.4249322787E-1);
- _PS256_CONST(cephes_log_p5, - 1.6668057665E-1);
- _PS256_CONST(cephes_log_p6, + 2.0000714765E-1);
- _PS256_CONST(cephes_log_p7, - 2.4999993993E-1);
- _PS256_CONST(cephes_log_p8, + 3.3333331174E-1);
- _PS256_CONST(cephes_log_q1, -2.12194440e-4);
- _PS256_CONST(cephes_log_q2, 0.693359375);
- /* natural logarithm computed for 8 simultaneous float
- return NaN for x <= 0
- */
- inline v8sf log256_ps(v8sf x) {
- v8si imm0;
- v8sf one = *(v8sf*)_ps256_1;
- //v8sf invalid_mask = _mm256_cmple_ps(x, _mm256_setzero_ps());
- v8sf invalid_mask = _mm256_cmp_ps(x, _mm256_setzero_ps(), _CMP_LE_OS);
- x = _mm256_max_ps(x, *(v8sf*)_ps256_min_norm_pos); /* cut off denormalized stuff */
- // can be done with AVX2
- imm0 = _mm256_srli_epi32(_mm256_castps_si256(x), 23);
- /* keep only the fractional part */
- x = _mm256_and_ps(x, *(v8sf*)_ps256_inv_mant_mask);
- x = _mm256_or_ps(x, *(v8sf*)_ps256_0p5);
- // this is again another AVX2 instruction
- imm0 = _mm256_sub_epi32(imm0, *(v8si*)_pi32_256_0x7f);
- v8sf e = _mm256_cvtepi32_ps(imm0);
- e = _mm256_add_ps(e, one);
- /* part2:
- if( x < SQRTHF ) {
- e -= 1;
- x = x + x - 1.0;
- } else { x = x - 1.0; }
- */
- //v8sf mask = _mm256_cmplt_ps(x, *(v8sf*)_ps256_cephes_SQRTHF);
- v8sf mask = _mm256_cmp_ps(x, *(v8sf*)_ps256_cephes_SQRTHF, _CMP_LT_OS);
- v8sf tmp = _mm256_and_ps(x, mask);
- x = _mm256_sub_ps(x, one);
- e = _mm256_sub_ps(e, _mm256_and_ps(one, mask));
- x = _mm256_add_ps(x, tmp);
- v8sf z = _mm256_mul_ps(x,x);
- v8sf y = *(v8sf*)_ps256_cephes_log_p0;
- y = _mm256_mul_ps(y, x);
- y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p1);
- y = _mm256_mul_ps(y, x);
- y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p2);
- y = _mm256_mul_ps(y, x);
- y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p3);
- y = _mm256_mul_ps(y, x);
- y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p4);
- y = _mm256_mul_ps(y, x);
- y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p5);
- y = _mm256_mul_ps(y, x);
- y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p6);
- y = _mm256_mul_ps(y, x);
- y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p7);
- y = _mm256_mul_ps(y, x);
- y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p8);
- y = _mm256_mul_ps(y, x);
- y = _mm256_mul_ps(y, z);
- tmp = _mm256_mul_ps(e, *(v8sf*)_ps256_cephes_log_q1);
- y = _mm256_add_ps(y, tmp);
- tmp = _mm256_mul_ps(z, *(v8sf*)_ps256_0p5);
- y = _mm256_sub_ps(y, tmp);
- tmp = _mm256_mul_ps(e, *(v8sf*)_ps256_cephes_log_q2);
- x = _mm256_add_ps(x, y);
- x = _mm256_add_ps(x, tmp);
- x = _mm256_or_ps(x, invalid_mask); // negative arg will be NAN
- return x;
- }
- _PS256_CONST(exp_hi, 88.3762626647949f);
- _PS256_CONST(exp_lo, -88.3762626647949f);
- _PS256_CONST(cephes_LOG2EF, 1.44269504088896341);
- _PS256_CONST(cephes_exp_C1, 0.693359375);
- _PS256_CONST(cephes_exp_C2, -2.12194440e-4);
- _PS256_CONST(cephes_exp_p0, 1.9875691500E-4);
- _PS256_CONST(cephes_exp_p1, 1.3981999507E-3);
- _PS256_CONST(cephes_exp_p2, 8.3334519073E-3);
- _PS256_CONST(cephes_exp_p3, 4.1665795894E-2);
- _PS256_CONST(cephes_exp_p4, 1.6666665459E-1);
- _PS256_CONST(cephes_exp_p5, 5.0000001201E-1);
- inline v8sf exp256_ps(v8sf x) {
- v8sf tmp = _mm256_setzero_ps(), fx;
- v8si imm0;
- v8sf one = *(v8sf*)_ps256_1;
- x = _mm256_min_ps(x, *(v8sf*)_ps256_exp_hi);
- x = _mm256_max_ps(x, *(v8sf*)_ps256_exp_lo);
- /* express exp(x) as exp(g + n*log(2)) */
- fx = _mm256_mul_ps(x, *(v8sf*)_ps256_cephes_LOG2EF);
- fx = _mm256_add_ps(fx, *(v8sf*)_ps256_0p5);
- /* how to perform a floorf with SSE: just below */
- //imm0 = _mm256_cvttps_epi32(fx);
- //tmp = _mm256_cvtepi32_ps(imm0);
- tmp = _mm256_floor_ps(fx);
- /* if greater, subtract 1 */
- //v8sf mask = _mm256_cmpgt_ps(tmp, fx);
- v8sf mask = _mm256_cmp_ps(tmp, fx, _CMP_GT_OS);
- mask = _mm256_and_ps(mask, one);
- fx = _mm256_sub_ps(tmp, mask);
- tmp = _mm256_mul_ps(fx, *(v8sf*)_ps256_cephes_exp_C1);
- v8sf z = _mm256_mul_ps(fx, *(v8sf*)_ps256_cephes_exp_C2);
- x = _mm256_sub_ps(x, tmp);
- x = _mm256_sub_ps(x, z);
- z = _mm256_mul_ps(x,x);
- v8sf y = *(v8sf*)_ps256_cephes_exp_p0;
- y = _mm256_mul_ps(y, x);
- y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p1);
- y = _mm256_mul_ps(y, x);
- y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p2);
- y = _mm256_mul_ps(y, x);
- y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p3);
- y = _mm256_mul_ps(y, x);
- y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p4);
- y = _mm256_mul_ps(y, x);
- y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p5);
- y = _mm256_mul_ps(y, z);
- y = _mm256_add_ps(y, x);
- y = _mm256_add_ps(y, one);
- /* build 2^n */
- imm0 = _mm256_cvttps_epi32(fx);
- // another two AVX2 instructions
- imm0 = _mm256_add_epi32(imm0, *(v8si*)_pi32_256_0x7f);
- imm0 = _mm256_slli_epi32(imm0, 23);
- v8sf pow2n = _mm256_castsi256_ps(imm0);
- y = _mm256_mul_ps(y, pow2n);
- return y;
- }
- _PS256_CONST(minus_cephes_DP1, -0.78515625);
- _PS256_CONST(minus_cephes_DP2, -2.4187564849853515625e-4);
- _PS256_CONST(minus_cephes_DP3, -3.77489497744594108e-8);
- _PS256_CONST(sincof_p0, -1.9515295891E-4);
- _PS256_CONST(sincof_p1, 8.3321608736E-3);
- _PS256_CONST(sincof_p2, -1.6666654611E-1);
- _PS256_CONST(coscof_p0, 2.443315711809948E-005);
- _PS256_CONST(coscof_p1, -1.388731625493765E-003);
- _PS256_CONST(coscof_p2, 4.166664568298827E-002);
- _PS256_CONST(cephes_FOPI, 1.27323954473516); // 4 / M_PI
- /* evaluation of 8 sines at onces using AVX intrisics
- The code is the exact rewriting of the cephes sinf function.
- Precision is excellent as long as x < 8192 (I did not bother to
- take into account the special handling they have for greater values
- -- it does not return garbage for arguments over 8192, though, but
- the extra precision is missing).
- Note that it is such that sinf((float)M_PI) = 8.74e-8, which is the
- surprising but correct result.
- */
- inline v8sf sin256_ps(v8sf x) { // any x
- v8sf xmm1, xmm2 = _mm256_setzero_ps(), xmm3, sign_bit, y;
- v8si imm0, imm2;
- sign_bit = x;
- /* take the absolute value */
- x = _mm256_and_ps(x, *(v8sf*)_ps256_inv_sign_mask);
- /* extract the sign bit (upper one) */
- sign_bit = _mm256_and_ps(sign_bit, *(v8sf*)_ps256_sign_mask);
- /* scale by 4/Pi */
- y = _mm256_mul_ps(x, *(v8sf*)_ps256_cephes_FOPI);
- /*
- Here we start a series of integer operations, which are in the
- realm of AVX2.
- If we don't have AVX, let's perform them using SSE2 directives
- */
- /* store the integer part of y in mm0 */
- imm2 = _mm256_cvttps_epi32(y);
- /* j=(j+1) & (~1) (see the cephes sources) */
- // another two AVX2 instruction
- imm2 = _mm256_add_epi32(imm2, *(v8si*)_pi32_256_1);
- imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_inv1);
- y = _mm256_cvtepi32_ps(imm2);
- /* get the swap sign flag */
- imm0 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_4);
- imm0 = _mm256_slli_epi32(imm0, 29);
- /* get the polynom selection mask
- there is one polynom for 0 <= x <= Pi/4
- and another one for Pi/4<x<=Pi/2
- Both branches will be computed.
- */
- imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_2);
- imm2 = _mm256_cmpeq_epi32(imm2,*(v8si*)_pi32_256_0);
- v8sf swap_sign_bit = _mm256_castsi256_ps(imm0);
- v8sf poly_mask = _mm256_castsi256_ps(imm2);
- sign_bit = _mm256_xor_ps(sign_bit, swap_sign_bit);
- /* The magic pass: "Extended precision modular arithmetic"
- x = ((x - y * DP1) - y * DP2) - y * DP3; */
- xmm1 = *(v8sf*)_ps256_minus_cephes_DP1;
- xmm2 = *(v8sf*)_ps256_minus_cephes_DP2;
- xmm3 = *(v8sf*)_ps256_minus_cephes_DP3;
- xmm1 = _mm256_mul_ps(y, xmm1);
- xmm2 = _mm256_mul_ps(y, xmm2);
- xmm3 = _mm256_mul_ps(y, xmm3);
- x = _mm256_add_ps(x, xmm1);
- x = _mm256_add_ps(x, xmm2);
- x = _mm256_add_ps(x, xmm3);
- /* Evaluate the first polynom (0 <= x <= Pi/4) */
- y = *(v8sf*)_ps256_coscof_p0;
- v8sf z = _mm256_mul_ps(x,x);
- y = _mm256_mul_ps(y, z);
- y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p1);
- y = _mm256_mul_ps(y, z);
- y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p2);
- y = _mm256_mul_ps(y, z);
- y = _mm256_mul_ps(y, z);
- v8sf tmp = _mm256_mul_ps(z, *(v8sf*)_ps256_0p5);
- y = _mm256_sub_ps(y, tmp);
- y = _mm256_add_ps(y, *(v8sf*)_ps256_1);
- /* Evaluate the second polynom (Pi/4 <= x <= 0) */
- v8sf y2 = *(v8sf*)_ps256_sincof_p0;
- y2 = _mm256_mul_ps(y2, z);
- y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p1);
- y2 = _mm256_mul_ps(y2, z);
- y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p2);
- y2 = _mm256_mul_ps(y2, z);
- y2 = _mm256_mul_ps(y2, x);
- y2 = _mm256_add_ps(y2, x);
- /* select the correct result from the two polynoms */
- xmm3 = poly_mask;
- y2 = _mm256_and_ps(xmm3, y2); //, xmm3);
- y = _mm256_andnot_ps(xmm3, y);
- y = _mm256_add_ps(y,y2);
- /* update the sign */
- y = _mm256_xor_ps(y, sign_bit);
- return y;
- }
- /* almost the same as sin_ps */
- inline v8sf cos256_ps(v8sf x) { // any x
- v8sf xmm1, xmm2 = _mm256_setzero_ps(), xmm3, y;
- v8si imm0, imm2;
- /* take the absolute value */
- x = _mm256_and_ps(x, *(v8sf*)_ps256_inv_sign_mask);
- /* scale by 4/Pi */
- y = _mm256_mul_ps(x, *(v8sf*)_ps256_cephes_FOPI);
- /* store the integer part of y in mm0 */
- imm2 = _mm256_cvttps_epi32(y);
- /* j=(j+1) & (~1) (see the cephes sources) */
- imm2 = _mm256_add_epi32(imm2, *(v8si*)_pi32_256_1);
- imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_inv1);
- y = _mm256_cvtepi32_ps(imm2);
- imm2 = _mm256_sub_epi32(imm2, *(v8si*)_pi32_256_2);
- /* get the swap sign flag */
- imm0 = _mm256_andnot_si256(imm2, *(v8si*)_pi32_256_4);
- imm0 = _mm256_slli_epi32(imm0, 29);
- /* get the polynom selection mask */
- imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_2);
- imm2 = _mm256_cmpeq_epi32(imm2, *(v8si*)_pi32_256_0);
- v8sf sign_bit = _mm256_castsi256_ps(imm0);
- v8sf poly_mask = _mm256_castsi256_ps(imm2);
- /* The magic pass: "Extended precision modular arithmetic"
- x = ((x - y * DP1) - y * DP2) - y * DP3; */
- xmm1 = *(v8sf*)_ps256_minus_cephes_DP1;
- xmm2 = *(v8sf*)_ps256_minus_cephes_DP2;
- xmm3 = *(v8sf*)_ps256_minus_cephes_DP3;
- xmm1 = _mm256_mul_ps(y, xmm1);
- xmm2 = _mm256_mul_ps(y, xmm2);
- xmm3 = _mm256_mul_ps(y, xmm3);
- x = _mm256_add_ps(x, xmm1);
- x = _mm256_add_ps(x, xmm2);
- x = _mm256_add_ps(x, xmm3);
- /* Evaluate the first polynom (0 <= x <= Pi/4) */
- y = *(v8sf*)_ps256_coscof_p0;
- v8sf z = _mm256_mul_ps(x,x);
- y = _mm256_mul_ps(y, z);
- y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p1);
- y = _mm256_mul_ps(y, z);
- y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p2);
- y = _mm256_mul_ps(y, z);
- y = _mm256_mul_ps(y, z);
- v8sf tmp = _mm256_mul_ps(z, *(v8sf*)_ps256_0p5);
- y = _mm256_sub_ps(y, tmp);
- y = _mm256_add_ps(y, *(v8sf*)_ps256_1);
- /* Evaluate the second polynom (Pi/4 <= x <= 0) */
- v8sf y2 = *(v8sf*)_ps256_sincof_p0;
- y2 = _mm256_mul_ps(y2, z);
- y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p1);
- y2 = _mm256_mul_ps(y2, z);
- y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p2);
- y2 = _mm256_mul_ps(y2, z);
- y2 = _mm256_mul_ps(y2, x);
- y2 = _mm256_add_ps(y2, x);
- /* select the correct result from the two polynoms */
- xmm3 = poly_mask;
- y2 = _mm256_and_ps(xmm3, y2); //, xmm3);
- y = _mm256_andnot_ps(xmm3, y);
- y = _mm256_add_ps(y,y2);
- /* update the sign */
- y = _mm256_xor_ps(y, sign_bit);
- return y;
- }
- /* since sin256_ps and cos256_ps are almost identical, sincos256_ps could replace both of them..
- it is almost as fast, and gives you a free cosine with your sine */
- inline void sincos256_ps(v8sf x, v8sf *s, v8sf *c) {
- v8sf xmm1, xmm2, xmm3 = _mm256_setzero_ps(), sign_bit_sin, y;
- v8si imm0, imm2, imm4;
- sign_bit_sin = x;
- /* take the absolute value */
- x = _mm256_and_ps(x, *(v8sf*)_ps256_inv_sign_mask);
- /* extract the sign bit (upper one) */
- sign_bit_sin = _mm256_and_ps(sign_bit_sin, *(v8sf*)_ps256_sign_mask);
- /* scale by 4/Pi */
- y = _mm256_mul_ps(x, *(v8sf*)_ps256_cephes_FOPI);
- /* store the integer part of y in imm2 */
- imm2 = _mm256_cvttps_epi32(y);
- /* j=(j+1) & (~1) (see the cephes sources) */
- imm2 = _mm256_add_epi32(imm2, *(v8si*)_pi32_256_1);
- imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_inv1);
- y = _mm256_cvtepi32_ps(imm2);
- imm4 = imm2;
- /* get the swap sign flag for the sine */
- imm0 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_4);
- imm0 = _mm256_slli_epi32(imm0, 29);
- //v8sf swap_sign_bit_sin = _mm256_castsi256_ps(imm0);
- /* get the polynom selection mask for the sine*/
- imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_2);
- imm2 = _mm256_cmpeq_epi32(imm2, *(v8si*)_pi32_256_0);
- //v8sf poly_mask = _mm256_castsi256_ps(imm2);
- v8sf swap_sign_bit_sin = _mm256_castsi256_ps(imm0);
- v8sf poly_mask = _mm256_castsi256_ps(imm2);
- /* The magic pass: "Extended precision modular arithmetic"
- x = ((x - y * DP1) - y * DP2) - y * DP3; */
- xmm1 = *(v8sf*)_ps256_minus_cephes_DP1;
- xmm2 = *(v8sf*)_ps256_minus_cephes_DP2;
- xmm3 = *(v8sf*)_ps256_minus_cephes_DP3;
- xmm1 = _mm256_mul_ps(y, xmm1);
- xmm2 = _mm256_mul_ps(y, xmm2);
- xmm3 = _mm256_mul_ps(y, xmm3);
- x = _mm256_add_ps(x, xmm1);
- x = _mm256_add_ps(x, xmm2);
- x = _mm256_add_ps(x, xmm3);
- imm4 = _mm256_sub_epi32(imm4, *(v8si*)_pi32_256_2);
- imm4 = _mm256_andnot_si256(imm4, *(v8si*)_pi32_256_4);
- imm4 = _mm256_slli_epi32(imm4, 29);
- v8sf sign_bit_cos = _mm256_castsi256_ps(imm4);
- sign_bit_sin = _mm256_xor_ps(sign_bit_sin, swap_sign_bit_sin);
- /* Evaluate the first polynom (0 <= x <= Pi/4) */
- v8sf z = _mm256_mul_ps(x,x);
- y = *(v8sf*)_ps256_coscof_p0;
- y = _mm256_mul_ps(y, z);
- y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p1);
- y = _mm256_mul_ps(y, z);
- y = _mm256_add_ps(y, *(v8sf*)_ps256_coscof_p2);
- y = _mm256_mul_ps(y, z);
- y = _mm256_mul_ps(y, z);
- v8sf tmp = _mm256_mul_ps(z, *(v8sf*)_ps256_0p5);
- y = _mm256_sub_ps(y, tmp);
- y = _mm256_add_ps(y, *(v8sf*)_ps256_1);
- /* Evaluate the second polynom (Pi/4 <= x <= 0) */
- v8sf y2 = *(v8sf*)_ps256_sincof_p0;
- y2 = _mm256_mul_ps(y2, z);
- y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p1);
- y2 = _mm256_mul_ps(y2, z);
- y2 = _mm256_add_ps(y2, *(v8sf*)_ps256_sincof_p2);
- y2 = _mm256_mul_ps(y2, z);
- y2 = _mm256_mul_ps(y2, x);
- y2 = _mm256_add_ps(y2, x);
- /* select the correct result from the two polynoms */
- xmm3 = poly_mask;
- v8sf ysin2 = _mm256_and_ps(xmm3, y2);
- v8sf ysin1 = _mm256_andnot_ps(xmm3, y);
- y2 = _mm256_sub_ps(y2,ysin2);
- y = _mm256_sub_ps(y, ysin1);
- xmm1 = _mm256_add_ps(ysin1,ysin2);
- xmm2 = _mm256_add_ps(y,y2);
- /* update the sign */
- *s = _mm256_xor_ps(xmm1, sign_bit_sin);
- *c = _mm256_xor_ps(xmm2, sign_bit_cos);
- }
- #endif // CPU_CAPABILITY_AVX2
|