mirror of
https://github.com/cookiengineer/audacity
synced 2025-09-18 17:10:55 +02:00
Andrew Hallendorff's multithreaded Equalization effect.
This commit is contained in:
parent
22019c47dc
commit
3da033cdb7
@ -105,7 +105,6 @@ HFFT InitializeFFT(int fftlen)
|
||||
for(i=0;i<32;i++)
|
||||
if((1<<i)&fftlen)
|
||||
h->pow2Bits=i;
|
||||
InitializeFFT1x(fftlen);
|
||||
#endif
|
||||
|
||||
return h;
|
||||
|
2690
src/RealFFTf48x.cpp
2690
src/RealFFTf48x.cpp
File diff suppressed because it is too large
Load Diff
@ -3,21 +3,98 @@
|
||||
|
||||
#define fft_type float
|
||||
|
||||
HFFT InitializeFFT1x(int);
|
||||
void EndFFT1x(HFFT);
|
||||
HFFT GetFFT1x(int);
|
||||
void ReleaseFFT1x(HFFT);
|
||||
void CleanupFFT1x();
|
||||
void RealFFTf1x(fft_type *,HFFT);
|
||||
void InverseRealFFTf1x(fft_type *,HFFT);
|
||||
void ReorderToTime1x(HFFT hFFT, fft_type *buffer, fft_type *TimeOut);
|
||||
void ReorderToFreq1x(HFFT hFFT, fft_type *buffer, fft_type *RealOut, fft_type *ImagOut);
|
||||
int SmallReverseBits(int bits, int numberBits);
|
||||
void RealFFTf4x(fft_type *,HFFT);
|
||||
void InverseRealFFTf4x(fft_type *,HFFT);
|
||||
void ReorderToTime4x(HFFT hFFT, fft_type *buffer, fft_type *TimeOut);
|
||||
void ReorderToFreq4x(HFFT hFFT, fft_type *buffer, fft_type *RealOut, fft_type *ImagOut);
|
||||
int SmallRB(int bits, int numberBits);
|
||||
|
||||
enum {
|
||||
FFT_SinCosBRTable,
|
||||
FFT_SinCosTableVBR16,
|
||||
FFT_SinCosTableBR16,
|
||||
FFT_FastMathBR16,
|
||||
FFT_FastMathBR24
|
||||
};
|
||||
|
||||
/* wrapper funcitons. If passed -1 function choice will be made locally */
|
||||
void RealFFTf1x(fft_type *,HFFT, int functionType=-1);
|
||||
void InverseRealFFTf1x(fft_type *,HFFT, int functionType=-1);
|
||||
void ReorderToTime1x(HFFT hFFT, fft_type *buffer, fft_type *TimeOut, int functionType=-1);
|
||||
void ReorderToFreq1x(HFFT hFFT, fft_type *buffer, fft_type *RealOut, fft_type *ImagOut, int functionType=-1);
|
||||
void RealFFTf4x(fft_type *,HFFT, int functionType=-1);
|
||||
void InverseRealFFTf4x(fft_type *,HFFT, int functionType=-1);
|
||||
void ReorderToTime4x(HFFT hFFT, fft_type *buffer, fft_type *TimeOut, int functionType=-1);
|
||||
void ReorderToFreq4x(HFFT hFFT, fft_type *buffer, fft_type *RealOut, fft_type *ImagOut, int functionType=-1);
|
||||
|
||||
/* SinCosBRTable versions */
|
||||
void RealFFTf1xSinCosBRTable(fft_type *,HFFT);
|
||||
void InverseRealFFTf1xSinCosBRTable(fft_type *,HFFT);
|
||||
void ReorderToTime1xSinCosBRTable(HFFT hFFT, fft_type *buffer, fft_type *TimeOut);
|
||||
void ReorderToFreq1xSinCosBRTable(HFFT hFFT, fft_type *buffer, fft_type *RealOut, fft_type *ImagOut);
|
||||
void RealFFTf4xSinCosBRTable(fft_type *,HFFT);
|
||||
void InverseRealFFTf4xSinCosBRTable(fft_type *,HFFT);
|
||||
void ReorderToTime4xSinCosBRTable(HFFT hFFT, fft_type *buffer, fft_type *TimeOut);
|
||||
void ReorderToFreq4xSinCosBRTable(HFFT hFFT, fft_type *buffer, fft_type *RealOut, fft_type *ImagOut);
|
||||
|
||||
/* Fast Math BR16 versions */
|
||||
void RealFFTf1xFastMathBR16(fft_type *,HFFT);
|
||||
void InverseRealFFTf1xFastMathBR16(fft_type *,HFFT);
|
||||
void ReorderToTime1xFastMathBR16(HFFT hFFT, fft_type *buffer, fft_type *TimeOut);
|
||||
void ReorderToFreq1xFastMathBR16(HFFT hFFT, fft_type *buffer, fft_type *RealOut, fft_type *ImagOut);
|
||||
void RealFFTf4xFastMathBR16(fft_type *,HFFT);
|
||||
void InverseRealFFTf4xFastMathBR16(fft_type *,HFFT);
|
||||
void ReorderToTime4xFastMathBR16(HFFT hFFT, fft_type *buffer, fft_type *TimeOut);
|
||||
void ReorderToFreq4xFastMathBR16(HFFT hFFT, fft_type *buffer, fft_type *RealOut, fft_type *ImagOut);
|
||||
|
||||
/* Fast Math BR24 versions */
|
||||
void RealFFTf1xFastMathBR24(fft_type *,HFFT);
|
||||
void InverseRealFFTf1xFastMathBR24(fft_type *,HFFT);
|
||||
void ReorderToTime1xFastMathBR24(HFFT hFFT, fft_type *buffer, fft_type *TimeOut);
|
||||
void ReorderToFreq1xFastMathBR24(HFFT hFFT, fft_type *buffer, fft_type *RealOut, fft_type *ImagOut);
|
||||
void RealFFTf4xFastMathBR24(fft_type *,HFFT);
|
||||
void InverseRealFFTf4xFastMathBR24(fft_type *,HFFT);
|
||||
void ReorderToTime4xFastMathBR24(HFFT hFFT, fft_type *buffer, fft_type *TimeOut);
|
||||
void ReorderToFreq4xFastMathBR24(HFFT hFFT, fft_type *buffer, fft_type *RealOut, fft_type *ImagOut);
|
||||
|
||||
/* SinCosTable virtual BR versions */
|
||||
void RealFFTf1xSinCosTableVBR16(fft_type *,HFFT);
|
||||
void InverseRealFFTf1xSinCosTableVBR16(fft_type *,HFFT);
|
||||
void ReorderToTime1xSinCosTableVBR16(HFFT hFFT, fft_type *buffer, fft_type *TimeOut);
|
||||
void ReorderToFreq1xSinCosTableVBR16(HFFT hFFT, fft_type *buffer, fft_type *RealOut, fft_type *ImagOut);
|
||||
void RealFFTf4xSinCosTableVBR16(fft_type *,HFFT);
|
||||
void InverseRealFFTf4xSinCosTableVBR16(fft_type *,HFFT);
|
||||
void ReorderToTime4xSinCosTableVBR16(HFFT hFFT, fft_type *buffer, fft_type *TimeOut);
|
||||
void ReorderToFreq4xSinCosTableVBR16(HFFT hFFT, fft_type *buffer, fft_type *RealOut, fft_type *ImagOut);
|
||||
|
||||
/* SinCosTable BR16 versions */
|
||||
void RealFFTf1xSinCosTableBR16(fft_type *,HFFT);
|
||||
void InverseRealFFTf1xSinCosTableBR16(fft_type *,HFFT);
|
||||
void ReorderToTime1xSinCosTableBR16(HFFT hFFT, fft_type *buffer, fft_type *TimeOut);
|
||||
void ReorderToFreq1xSinCosTableBR16(HFFT hFFT, fft_type *buffer, fft_type *RealOut, fft_type *ImagOut);
|
||||
void RealFFTf4xSinCosTableBR16(fft_type *,HFFT);
|
||||
void InverseRealFFTf4xSinCosTableBR16(fft_type *,HFFT);
|
||||
void ReorderToTime4xSinCosTableBR16(HFFT hFFT, fft_type *buffer, fft_type *TimeOut);
|
||||
void ReorderToFreq4xSinCosTableBR16(HFFT hFFT, fft_type *buffer, fft_type *RealOut, fft_type *ImagOut);
|
||||
|
||||
void TableUsage(int iMask);
|
||||
|
||||
#ifndef M_PI
|
||||
#define M_PI 3.14159265358979323846 /* pi */
|
||||
#endif
|
||||
|
||||
typedef struct {
|
||||
float mSin;
|
||||
float mCos;
|
||||
} SinCosStruct;
|
||||
|
||||
class SinCosTable {
|
||||
public:
|
||||
int mSinCosTablePow;
|
||||
SinCosStruct *mSinCosTable;
|
||||
SinCosTable();
|
||||
~SinCosTable(){ delete [] mSinCosTable; };
|
||||
};
|
||||
|
||||
int SmallRB(int bits, int numberBits);
|
||||
int (*SmallVRB[])(int bits);
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -1,704 +0,0 @@
|
||||
/**********************************************************************
|
||||
|
||||
Audacity: A Digital Audio Editor
|
||||
|
||||
SseMathFuncs.cpp
|
||||
|
||||
Stephen Moshier (wrote original C version, The Cephes Library)
|
||||
Julien Pommier (converted to use SSE)
|
||||
Andrew Hallendorff (adapted for Audacity)
|
||||
|
||||
*******************************************************************//**
|
||||
|
||||
\file SseMathfuncs.cpp
|
||||
\brief SSE maths functions (for FFTs)
|
||||
|
||||
*//****************************************************************/
|
||||
|
||||
#include "Experimental.h"
|
||||
#ifdef EXPERIMENTAL_EQ_SSE_THREADED
|
||||
|
||||
#include "SseMathFuncs.h"
|
||||
|
||||
|
||||
|
||||
/* JKC: The trig functions use Taylor's series, on the range 0 to Pi/4
|
||||
* computing both Sin and Cos, and using one or the other (in the
|
||||
* solo functions), or both in the more useful for us for FFTs sincos
|
||||
* function.
|
||||
* The constants minus_cephes_DP1 to minus_cephes_DP3 are used in the
|
||||
* angle reduction modulo function.
|
||||
* 4 sincos are done at a time.
|
||||
* If we wanted to do just sin or just cos, we could speed things up
|
||||
* by queuing up the Sines and Cosines into batches of 4 separately.*/
|
||||
|
||||
|
||||
|
||||
#ifndef USE_SSE2 //sry this is all sse2 now
|
||||
#define USE_SSE2
|
||||
#endif
|
||||
|
||||
/* declare some SSE constants -- why can't I figure a better way to do that? */
|
||||
#define _PS_CONST(Name, Val) \
|
||||
static const ALIGN16_BEG float _ps_##Name[4] ALIGN16_END = { (float)Val, (float)Val, (float)Val, (float)Val }
|
||||
#define _PI32_CONST(Name, Val) \
|
||||
static const ALIGN16_BEG int _pi32_##Name[4] ALIGN16_END = { Val, Val, Val, Val }
|
||||
#define _PS_CONST_TYPE(Name, Type, Val) \
|
||||
static const ALIGN16_BEG Type _ps_##Name[4] ALIGN16_END = { Val, Val, Val, Val }
|
||||
|
||||
_PS_CONST(1 , 1.0f);
|
||||
_PS_CONST(0p5, 0.5f);
|
||||
/* the smallest non denormalized float number */
|
||||
_PS_CONST_TYPE(min_norm_pos, int, 0x00800000);
|
||||
_PS_CONST_TYPE(mant_mask, int, 0x7f800000);
|
||||
_PS_CONST_TYPE(inv_mant_mask, int, ~0x7f800000);
|
||||
|
||||
_PS_CONST_TYPE(sign_mask, int, (int)0x80000000);
|
||||
_PS_CONST_TYPE(inv_sign_mask, int, ~0x80000000);
|
||||
|
||||
_PI32_CONST(1, 1);
|
||||
_PI32_CONST(inv1, ~1);
|
||||
_PI32_CONST(2, 2);
|
||||
_PI32_CONST(4, 4);
|
||||
_PI32_CONST(0x7f, 0x7f);
|
||||
|
||||
_PS_CONST(cephes_SQRTHF, 0.707106781186547524);
|
||||
_PS_CONST(cephes_log_p0, 7.0376836292E-2);
|
||||
_PS_CONST(cephes_log_p1, - 1.1514610310E-1);
|
||||
_PS_CONST(cephes_log_p2, 1.1676998740E-1);
|
||||
_PS_CONST(cephes_log_p3, - 1.2420140846E-1);
|
||||
_PS_CONST(cephes_log_p4, + 1.4249322787E-1);
|
||||
_PS_CONST(cephes_log_p5, - 1.6668057665E-1);
|
||||
_PS_CONST(cephes_log_p6, + 2.0000714765E-1);
|
||||
_PS_CONST(cephes_log_p7, - 2.4999993993E-1);
|
||||
_PS_CONST(cephes_log_p8, + 3.3333331174E-1);
|
||||
_PS_CONST(cephes_log_q1, -2.12194440e-4);
|
||||
_PS_CONST(cephes_log_q2, 0.693359375);
|
||||
|
||||
#ifndef USE_SSE2
|
||||
typedef union xmm_mm_union {
|
||||
__m128 xmm;
|
||||
__m64 mm[2];
|
||||
} xmm_mm_union;
|
||||
|
||||
#define COPY_XMM_TO_MM(xmm_, mm0_, mm1_) { \
|
||||
xmm_mm_union u; u.xmm = xmm_; \
|
||||
mm0_ = u.mm[0]; \
|
||||
mm1_ = u.mm[1]; \
|
||||
}
|
||||
|
||||
#define COPY_MM_TO_XMM(mm0_, mm1_, xmm_) { \
|
||||
xmm_mm_union u; u.mm[0]=mm0_; u.mm[1]=mm1_; xmm_ = u.xmm; \
|
||||
}
|
||||
|
||||
#endif // USE_SSE2
|
||||
|
||||
/* natural logarithm computed for 4 simultaneous float
|
||||
return NaN for x <= 0
|
||||
*/
|
||||
__m128 log_ps(v4sfu *xPtr) {
|
||||
__m128 x=*((__m128 *)xPtr);
|
||||
#ifdef USE_SSE2
|
||||
__m128i emm0;
|
||||
#else
|
||||
__m64 mm0, mm1;
|
||||
#endif
|
||||
__m128 one = *(__m128*)_ps_1;
|
||||
|
||||
__m128 invalid_mask = _mm_cmple_ps(x, _mm_setzero_ps());
|
||||
|
||||
x = _mm_max_ps(x, *(__m128*)_ps_min_norm_pos); /* cut off denormalized stuff */
|
||||
|
||||
#ifndef USE_SSE2
|
||||
/* part 1: x = frexpf(x, &e); */
|
||||
COPY_XMM_TO_MM(x, mm0, mm1);
|
||||
mm0 = _mm_srli_pi32(mm0, 23);
|
||||
mm1 = _mm_srli_pi32(mm1, 23);
|
||||
#else
|
||||
emm0 = _mm_srli_epi32(_mm_castps_si128(x), 23);
|
||||
#endif
|
||||
/* keep only the fractional part */
|
||||
x = _mm_and_ps(x, *(__m128*)_ps_inv_mant_mask);
|
||||
x = _mm_or_ps(x, *(__m128*)_ps_0p5);
|
||||
|
||||
#ifndef USE_SSE2
|
||||
/* now e=mm0:mm1 contain the really base-2 exponent */
|
||||
mm0 = _mm_sub_pi32(mm0, *(__m64*)_pi32_0x7f);
|
||||
mm1 = _mm_sub_pi32(mm1, *(__m64*)_pi32_0x7f);
|
||||
__m128 e = _mm_cvtpi32x2_ps(mm0, mm1);
|
||||
_mm_empty(); /* bye bye mmx */
|
||||
#else
|
||||
emm0 = _mm_sub_epi32(emm0, *(__m128i*)_pi32_0x7f);
|
||||
__m128 e = _mm_cvtepi32_ps(emm0);
|
||||
#endif
|
||||
|
||||
e = _mm_add_ps(e, one);
|
||||
|
||||
/* part2:
|
||||
if( x < SQRTHF ) {
|
||||
e -= 1;
|
||||
x = x + x - 1.0;
|
||||
} else { x = x - 1.0; }
|
||||
*/
|
||||
__m128 mask = _mm_cmplt_ps(x, *(__m128*)_ps_cephes_SQRTHF);
|
||||
__m128 tmp = _mm_and_ps(x, mask);
|
||||
x = _mm_sub_ps(x, one);
|
||||
e = _mm_sub_ps(e, _mm_and_ps(one, mask));
|
||||
x = _mm_add_ps(x, tmp);
|
||||
|
||||
|
||||
__m128 z = _mm_mul_ps(x,x);
|
||||
|
||||
__m128 y = *(__m128*)_ps_cephes_log_p0;
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(__m128*)_ps_cephes_log_p1);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(__m128*)_ps_cephes_log_p2);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(__m128*)_ps_cephes_log_p3);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(__m128*)_ps_cephes_log_p4);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(__m128*)_ps_cephes_log_p5);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(__m128*)_ps_cephes_log_p6);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(__m128*)_ps_cephes_log_p7);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(__m128*)_ps_cephes_log_p8);
|
||||
y = _mm_mul_ps(y, x);
|
||||
|
||||
y = _mm_mul_ps(y, z);
|
||||
|
||||
|
||||
tmp = _mm_mul_ps(e, *(__m128*)_ps_cephes_log_q1);
|
||||
y = _mm_add_ps(y, tmp);
|
||||
|
||||
|
||||
tmp = _mm_mul_ps(z, *(__m128*)_ps_0p5);
|
||||
y = _mm_sub_ps(y, tmp);
|
||||
|
||||
tmp = _mm_mul_ps(e, *(__m128*)_ps_cephes_log_q2);
|
||||
x = _mm_add_ps(x, y);
|
||||
x = _mm_add_ps(x, tmp);
|
||||
x = _mm_or_ps(x, invalid_mask); // negative arg will be NAN
|
||||
return x;
|
||||
}
|
||||
|
||||
_PS_CONST(exp_hi, 88.3762626647949f);
|
||||
_PS_CONST(exp_lo, -88.3762626647949f);
|
||||
|
||||
_PS_CONST(cephes_LOG2EF, 1.44269504088896341);
|
||||
_PS_CONST(cephes_exp_C1, 0.693359375);
|
||||
_PS_CONST(cephes_exp_C2, -2.12194440e-4);
|
||||
|
||||
_PS_CONST(cephes_exp_p0, 1.9875691500E-4);
|
||||
_PS_CONST(cephes_exp_p1, 1.3981999507E-3);
|
||||
_PS_CONST(cephes_exp_p2, 8.3334519073E-3);
|
||||
_PS_CONST(cephes_exp_p3, 4.1665795894E-2);
|
||||
_PS_CONST(cephes_exp_p4, 1.6666665459E-1);
|
||||
_PS_CONST(cephes_exp_p5, 5.0000001201E-1);
|
||||
|
||||
__m128 exp_ps(v4sfu *xPtr) {
|
||||
__m128 x=*((__m128 *)xPtr);
|
||||
__m128 tmp = _mm_setzero_ps(), fx;
|
||||
#ifdef USE_SSE2
|
||||
__m128i emm0;
|
||||
#else
|
||||
__m64 mm0, mm1;
|
||||
#endif
|
||||
__m128 one = *(__m128*)_ps_1;
|
||||
|
||||
x = _mm_min_ps(x, *(__m128*)_ps_exp_hi);
|
||||
x = _mm_max_ps(x, *(__m128*)_ps_exp_lo);
|
||||
|
||||
/* express exp(x) as exp(g + n*log(2)) */
|
||||
fx = _mm_mul_ps(x, *(__m128*)_ps_cephes_LOG2EF);
|
||||
fx = _mm_add_ps(fx, *(__m128*)_ps_0p5);
|
||||
|
||||
/* how to perform a floorf with SSE: just below */
|
||||
#ifndef USE_SSE2
|
||||
/* step 1 : cast to int */
|
||||
tmp = _mm_movehl_ps(tmp, fx);
|
||||
mm0 = _mm_cvttps_pi32(fx);
|
||||
mm1 = _mm_cvttps_pi32(tmp);
|
||||
/* step 2 : cast back to float */
|
||||
tmp = _mm_cvtpi32x2_ps(mm0, mm1);
|
||||
#else
|
||||
emm0 = _mm_cvttps_epi32(fx);
|
||||
tmp = _mm_cvtepi32_ps(emm0);
|
||||
#endif
|
||||
/* if greater, substract 1 */
|
||||
__m128 mask = _mm_cmpgt_ps(tmp, fx);
|
||||
mask = _mm_and_ps(mask, one);
|
||||
fx = _mm_sub_ps(tmp, mask);
|
||||
|
||||
tmp = _mm_mul_ps(fx, *(__m128*)_ps_cephes_exp_C1);
|
||||
__m128 z = _mm_mul_ps(fx, *(__m128*)_ps_cephes_exp_C2);
|
||||
x = _mm_sub_ps(x, tmp);
|
||||
x = _mm_sub_ps(x, z);
|
||||
|
||||
z = _mm_mul_ps(x,x);
|
||||
|
||||
__m128 y = *(__m128*)_ps_cephes_exp_p0;
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(__m128*)_ps_cephes_exp_p1);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(__m128*)_ps_cephes_exp_p2);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(__m128*)_ps_cephes_exp_p3);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(__m128*)_ps_cephes_exp_p4);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(__m128*)_ps_cephes_exp_p5);
|
||||
y = _mm_mul_ps(y, z);
|
||||
y = _mm_add_ps(y, x);
|
||||
y = _mm_add_ps(y, one);
|
||||
|
||||
/* build 2^n */
|
||||
#ifndef USE_SSE2
|
||||
z = _mm_movehl_ps(z, fx);
|
||||
mm0 = _mm_cvttps_pi32(fx);
|
||||
mm1 = _mm_cvttps_pi32(z);
|
||||
mm0 = _mm_add_pi32(mm0, *(__m64*)_pi32_0x7f);
|
||||
mm1 = _mm_add_pi32(mm1, *(__m64*)_pi32_0x7f);
|
||||
mm0 = _mm_slli_pi32(mm0, 23);
|
||||
mm1 = _mm_slli_pi32(mm1, 23);
|
||||
|
||||
__m128 pow2n;
|
||||
COPY_MM_TO_XMM(mm0, mm1, pow2n);
|
||||
_mm_empty();
|
||||
#else
|
||||
emm0 = _mm_cvttps_epi32(fx);
|
||||
emm0 = _mm_add_epi32(emm0, *(__m128i*)_pi32_0x7f);
|
||||
emm0 = _mm_slli_epi32(emm0, 23);
|
||||
__m128 pow2n = _mm_castsi128_ps(emm0);
|
||||
#endif
|
||||
y = _mm_mul_ps(y, pow2n);
|
||||
return y;
|
||||
}
|
||||
|
||||
_PS_CONST(minus_cephes_DP1, -0.78515625);
|
||||
_PS_CONST(minus_cephes_DP2, -2.4187564849853515625e-4);
|
||||
_PS_CONST(minus_cephes_DP3, -3.77489497744594108e-8);
|
||||
_PS_CONST(sincof_p0, -1.9515295891E-4);
|
||||
_PS_CONST(sincof_p1, 8.3321608736E-3);
|
||||
_PS_CONST(sincof_p2, -1.6666654611E-1);
|
||||
_PS_CONST(coscof_p0, 2.443315711809948E-005);
|
||||
_PS_CONST(coscof_p1, -1.388731625493765E-003);
|
||||
_PS_CONST(coscof_p2, 4.166664568298827E-002);
|
||||
_PS_CONST(cephes_FOPI, 1.27323954473516); // 4 / M_PI
|
||||
|
||||
|
||||
/* evaluation of 4 sines at onces, using only SSE1+MMX intrinsics so
|
||||
it runs also on old athlons XPs and the pentium III of your grand
|
||||
mother.
|
||||
|
||||
The code is the exact rewriting of the cephes sinf function.
|
||||
Precision is excellent as long as x < 8192 (I did not bother to
|
||||
take into account the special handling they have for greater values
|
||||
-- it does not return garbage for arguments over 8192, though, but
|
||||
the extra precision is missing).
|
||||
|
||||
Note that it is such that sinf((float)M_PI) = 8.74e-8, which is the
|
||||
surprising but correct result.
|
||||
|
||||
Performance is also surprisingly good, 1.33 times faster than the
|
||||
macos vsinf SSE2 function, and 1.5 times faster than the
|
||||
__vrs4_sinf of amd's ACML (which is only available in 64 bits). Not
|
||||
too bad for an SSE1 function (with no special tuning) !
|
||||
However the latter libraries probably have a much better handling of NaN,
|
||||
Inf, denormalized and other special arguments..
|
||||
|
||||
On my core 1 duo, the execution of this function takes approximately 95 cycles.
|
||||
|
||||
From what I have observed on the experiments with Intel AMath lib, switching to an
|
||||
SSE2 version would improve the perf by only 10%.
|
||||
|
||||
Since it is based on SSE intrinsics, it has to be compiled at -O2 to
|
||||
deliver full speed.
|
||||
*/
|
||||
__m128 sin_ps(v4sfu *xPtr) { // any x
|
||||
__m128 x=*((__m128 *)xPtr);
|
||||
__m128 xmm1, xmm2 = _mm_setzero_ps(), xmm3, sign_bit, y;
|
||||
|
||||
#ifdef USE_SSE2
|
||||
__m128i emm0, emm2;
|
||||
#else
|
||||
__m64 mm0, mm1, mm2, mm3;
|
||||
#endif
|
||||
sign_bit = x;
|
||||
/* take the absolute value */
|
||||
x = _mm_and_ps(x, *(__m128*)_ps_inv_sign_mask);
|
||||
/* extract the sign bit (upper one) */
|
||||
sign_bit = _mm_and_ps(sign_bit, *(__m128*)_ps_sign_mask);
|
||||
|
||||
/* scale by 4/Pi */
|
||||
y = _mm_mul_ps(x, *(__m128*)_ps_cephes_FOPI);
|
||||
|
||||
#ifdef USE_SSE2
|
||||
/* store the integer part of y in mm0 */
|
||||
emm2 = _mm_cvttps_epi32(y);
|
||||
/* j=(j+1) & (~1) (see the cephes sources) */
|
||||
emm2 = _mm_add_epi32(emm2, *(__m128i*)_pi32_1);
|
||||
emm2 = _mm_and_si128(emm2, *(__m128i*)_pi32_inv1);
|
||||
y = _mm_cvtepi32_ps(emm2);
|
||||
|
||||
/* get the swap sign flag */
|
||||
emm0 = _mm_and_si128(emm2, *(__m128i*)_pi32_4);
|
||||
emm0 = _mm_slli_epi32(emm0, 29);
|
||||
/* get the polynom selection mask
|
||||
there is one polynom for 0 <= x <= Pi/4
|
||||
and another one for Pi/4<x<=Pi/2
|
||||
|
||||
Both branches will be computed.
|
||||
*/
|
||||
emm2 = _mm_and_si128(emm2, *(__m128i*)_pi32_2);
|
||||
emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
|
||||
|
||||
__m128 swap_sign_bit = _mm_castsi128_ps(emm0);
|
||||
__m128 poly_mask = _mm_castsi128_ps(emm2);
|
||||
sign_bit = _mm_xor_ps(sign_bit, swap_sign_bit);
|
||||
|
||||
#else
|
||||
/* store the integer part of y in mm0:mm1 */
|
||||
xmm2 = _mm_movehl_ps(xmm2, y);
|
||||
mm2 = _mm_cvttps_pi32(y);
|
||||
mm3 = _mm_cvttps_pi32(xmm2);
|
||||
/* j=(j+1) & (~1) (see the cephes sources) */
|
||||
mm2 = _mm_add_pi32(mm2, *(__m64*)_pi32_1);
|
||||
mm3 = _mm_add_pi32(mm3, *(__m64*)_pi32_1);
|
||||
mm2 = _mm_and_si64(mm2, *(__m64*)_pi32_inv1);
|
||||
mm3 = _mm_and_si64(mm3, *(__m64*)_pi32_inv1);
|
||||
y = _mm_cvtpi32x2_ps(mm2, mm3);
|
||||
/* get the swap sign flag */
|
||||
mm0 = _mm_and_si64(mm2, *(__m64*)_pi32_4);
|
||||
mm1 = _mm_and_si64(mm3, *(__m64*)_pi32_4);
|
||||
mm0 = _mm_slli_pi32(mm0, 29);
|
||||
mm1 = _mm_slli_pi32(mm1, 29);
|
||||
/* get the polynom selection mask */
|
||||
mm2 = _mm_and_si64(mm2, *(__m64*)_pi32_2);
|
||||
mm3 = _mm_and_si64(mm3, *(__m64*)_pi32_2);
|
||||
mm2 = _mm_cmpeq_pi32(mm2, _mm_setzero_si64());
|
||||
mm3 = _mm_cmpeq_pi32(mm3, _mm_setzero_si64());
|
||||
__m128 swap_sign_bit, poly_mask;
|
||||
COPY_MM_TO_XMM(mm0, mm1, swap_sign_bit);
|
||||
COPY_MM_TO_XMM(mm2, mm3, poly_mask);
|
||||
sign_bit = _mm_xor_ps(sign_bit, swap_sign_bit);
|
||||
_mm_empty(); /* good-bye mmx */
|
||||
#endif
|
||||
|
||||
/* The magic pass: "Extended precision modular arithmetic"
|
||||
x = ((x - y * DP1) - y * DP2) - y * DP3; */
|
||||
xmm1 = *(__m128*)_ps_minus_cephes_DP1;
|
||||
xmm2 = *(__m128*)_ps_minus_cephes_DP2;
|
||||
xmm3 = *(__m128*)_ps_minus_cephes_DP3;
|
||||
xmm1 = _mm_mul_ps(y, xmm1);
|
||||
xmm2 = _mm_mul_ps(y, xmm2);
|
||||
xmm3 = _mm_mul_ps(y, xmm3);
|
||||
x = _mm_add_ps(x, xmm1);
|
||||
x = _mm_add_ps(x, xmm2);
|
||||
x = _mm_add_ps(x, xmm3);
|
||||
|
||||
/* Evaluate the first polynom (0 <= x <= Pi/4) */
|
||||
y = *(__m128*)_ps_coscof_p0;
|
||||
__m128 z = _mm_mul_ps(x,x);
|
||||
|
||||
y = _mm_mul_ps(y, z);
|
||||
y = _mm_add_ps(y, *(__m128*)_ps_coscof_p1);
|
||||
y = _mm_mul_ps(y, z);
|
||||
y = _mm_add_ps(y, *(__m128*)_ps_coscof_p2);
|
||||
y = _mm_mul_ps(y, z);
|
||||
y = _mm_mul_ps(y, z);
|
||||
__m128 tmp = _mm_mul_ps(z, *(__m128*)_ps_0p5);
|
||||
y = _mm_sub_ps(y, tmp);
|
||||
y = _mm_add_ps(y, *(__m128*)_ps_1);
|
||||
|
||||
/* Evaluate the second polynom (Pi/4 <= x <= 0) */
|
||||
|
||||
__m128 y2 = *(__m128*)_ps_sincof_p0;
|
||||
y2 = _mm_mul_ps(y2, z);
|
||||
y2 = _mm_add_ps(y2, *(__m128*)_ps_sincof_p1);
|
||||
y2 = _mm_mul_ps(y2, z);
|
||||
y2 = _mm_add_ps(y2, *(__m128*)_ps_sincof_p2);
|
||||
y2 = _mm_mul_ps(y2, z);
|
||||
y2 = _mm_mul_ps(y2, x);
|
||||
y2 = _mm_add_ps(y2, x);
|
||||
|
||||
/* select the correct result from the two polynoms */
|
||||
xmm3 = poly_mask;
|
||||
y2 = _mm_and_ps(xmm3, y2); //, xmm3);
|
||||
y = _mm_andnot_ps(xmm3, y);
|
||||
y = _mm_add_ps(y,y2);
|
||||
/* update the sign */
|
||||
y = _mm_xor_ps(y, sign_bit);
|
||||
return y;
|
||||
}
|
||||
|
||||
/* almost the same as sin_ps */
|
||||
__m128 cos_ps(v4sfu *xPtr) { // any x
|
||||
__m128 x=*((__m128 *)xPtr);
|
||||
__m128 xmm1, xmm2 = _mm_setzero_ps(), xmm3, y;
|
||||
#ifdef USE_SSE2
|
||||
__m128i emm0, emm2;
|
||||
#else
|
||||
__m64 mm0, mm1, mm2, mm3;
|
||||
#endif
|
||||
/* take the absolute value */
|
||||
x = _mm_and_ps(x, *(__m128*)_ps_inv_sign_mask);
|
||||
|
||||
/* scale by 4/Pi */
|
||||
y = _mm_mul_ps(x, *(__m128*)_ps_cephes_FOPI);
|
||||
|
||||
#ifdef USE_SSE2
|
||||
/* store the integer part of y in mm0 */
|
||||
emm2 = _mm_cvttps_epi32(y);
|
||||
/* j=(j+1) & (~1) (see the cephes sources) */
|
||||
emm2 = _mm_add_epi32(emm2, *(__m128i*)_pi32_1);
|
||||
emm2 = _mm_and_si128(emm2, *(__m128i*)_pi32_inv1);
|
||||
y = _mm_cvtepi32_ps(emm2);
|
||||
|
||||
emm2 = _mm_sub_epi32(emm2, *(__m128i*)_pi32_2);
|
||||
|
||||
/* get the swap sign flag */
|
||||
emm0 = _mm_andnot_si128(emm2, *(__m128i*)_pi32_4);
|
||||
emm0 = _mm_slli_epi32(emm0, 29);
|
||||
/* get the polynom selection mask */
|
||||
emm2 = _mm_and_si128(emm2, *(__m128i*)_pi32_2);
|
||||
emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
|
||||
|
||||
__m128 sign_bit = _mm_castsi128_ps(emm0);
|
||||
__m128 poly_mask = _mm_castsi128_ps(emm2);
|
||||
#else
|
||||
/* store the integer part of y in mm0:mm1 */
|
||||
xmm2 = _mm_movehl_ps(xmm2, y);
|
||||
mm2 = _mm_cvttps_pi32(y);
|
||||
mm3 = _mm_cvttps_pi32(xmm2);
|
||||
|
||||
/* j=(j+1) & (~1) (see the cephes sources) */
|
||||
mm2 = _mm_add_pi32(mm2, *(__m64*)_pi32_1);
|
||||
mm3 = _mm_add_pi32(mm3, *(__m64*)_pi32_1);
|
||||
mm2 = _mm_and_si64(mm2, *(__m64*)_pi32_inv1);
|
||||
mm3 = _mm_and_si64(mm3, *(__m64*)_pi32_inv1);
|
||||
|
||||
y = _mm_cvtpi32x2_ps(mm2, mm3);
|
||||
|
||||
|
||||
mm2 = _mm_sub_pi32(mm2, *(__m64*)_pi32_2);
|
||||
mm3 = _mm_sub_pi32(mm3, *(__m64*)_pi32_2);
|
||||
|
||||
/* get the swap sign flag in mm0:mm1 and the
|
||||
polynom selection mask in mm2:mm3 */
|
||||
|
||||
mm0 = _mm_andnot_si64(mm2, *(__m64*)_pi32_4);
|
||||
mm1 = _mm_andnot_si64(mm3, *(__m64*)_pi32_4);
|
||||
mm0 = _mm_slli_pi32(mm0, 29);
|
||||
mm1 = _mm_slli_pi32(mm1, 29);
|
||||
|
||||
mm2 = _mm_and_si64(mm2, *(__m64*)_pi32_2);
|
||||
mm3 = _mm_and_si64(mm3, *(__m64*)_pi32_2);
|
||||
|
||||
mm2 = _mm_cmpeq_pi32(mm2, _mm_setzero_si64());
|
||||
mm3 = _mm_cmpeq_pi32(mm3, _mm_setzero_si64());
|
||||
|
||||
__m128 sign_bit, poly_mask;
|
||||
COPY_MM_TO_XMM(mm0, mm1, sign_bit);
|
||||
COPY_MM_TO_XMM(mm2, mm3, poly_mask);
|
||||
_mm_empty(); /* good-bye mmx */
|
||||
#endif
|
||||
/* The magic pass: "Extended precision modular arithmetic"
|
||||
x = ((x - y * DP1) - y * DP2) - y * DP3; */
|
||||
xmm1 = *(__m128*)_ps_minus_cephes_DP1;
|
||||
xmm2 = *(__m128*)_ps_minus_cephes_DP2;
|
||||
xmm3 = *(__m128*)_ps_minus_cephes_DP3;
|
||||
xmm1 = _mm_mul_ps(y, xmm1);
|
||||
xmm2 = _mm_mul_ps(y, xmm2);
|
||||
xmm3 = _mm_mul_ps(y, xmm3);
|
||||
x = _mm_add_ps(x, xmm1);
|
||||
x = _mm_add_ps(x, xmm2);
|
||||
x = _mm_add_ps(x, xmm3);
|
||||
|
||||
/* Evaluate the first polynom (0 <= x <= Pi/4) */
|
||||
y = *(__m128*)_ps_coscof_p0;
|
||||
__m128 z = _mm_mul_ps(x,x);
|
||||
|
||||
y = _mm_mul_ps(y, z);
|
||||
y = _mm_add_ps(y, *(__m128*)_ps_coscof_p1);
|
||||
y = _mm_mul_ps(y, z);
|
||||
y = _mm_add_ps(y, *(__m128*)_ps_coscof_p2);
|
||||
y = _mm_mul_ps(y, z);
|
||||
y = _mm_mul_ps(y, z);
|
||||
__m128 tmp = _mm_mul_ps(z, *(__m128*)_ps_0p5);
|
||||
y = _mm_sub_ps(y, tmp);
|
||||
y = _mm_add_ps(y, *(__m128*)_ps_1);
|
||||
|
||||
/* Evaluate the second polynom (Pi/4 <= x <= 0) */
|
||||
|
||||
__m128 y2 = *(__m128*)_ps_sincof_p0;
|
||||
y2 = _mm_mul_ps(y2, z);
|
||||
y2 = _mm_add_ps(y2, *(__m128*)_ps_sincof_p1);
|
||||
y2 = _mm_mul_ps(y2, z);
|
||||
y2 = _mm_add_ps(y2, *(__m128*)_ps_sincof_p2);
|
||||
y2 = _mm_mul_ps(y2, z);
|
||||
y2 = _mm_mul_ps(y2, x);
|
||||
y2 = _mm_add_ps(y2, x);
|
||||
|
||||
/* select the correct result from the two polynoms */
|
||||
xmm3 = poly_mask;
|
||||
y2 = _mm_and_ps(xmm3, y2); //, xmm3);
|
||||
y = _mm_andnot_ps(xmm3, y);
|
||||
y = _mm_add_ps(y,y2);
|
||||
/* update the sign */
|
||||
y = _mm_xor_ps(y, sign_bit);
|
||||
|
||||
return y;
|
||||
}
|
||||
|
||||
/* since sin_ps and cos_ps are almost identical, sincos_ps could replace both of them..
|
||||
it is almost as fast, and gives you a free cosine with your sine */
|
||||
void sincos_ps(v4sfu *xptr, v4sfu *sptr, v4sfu *cptr) {
|
||||
__m128 x=*((__m128 *)xptr), *s=(__m128 *)sptr, *c=(__m128 *)cptr, xmm1, xmm2, xmm3 = _mm_setzero_ps(), sign_bit_sin, y;
|
||||
#ifdef USE_SSE2
|
||||
__m128i emm0, emm2, emm4;
|
||||
#else
|
||||
__m64 mm0, mm1, mm2, mm3, mm4, mm5;
|
||||
#endif
|
||||
sign_bit_sin = x;
|
||||
/* take the absolute value */
|
||||
x = _mm_and_ps(x, *(__m128*)_ps_inv_sign_mask);
|
||||
/* extract the sign bit (upper one) */
|
||||
sign_bit_sin = _mm_and_ps(sign_bit_sin, *(__m128*)_ps_sign_mask);
|
||||
|
||||
/* scale by 4/Pi */
|
||||
y = _mm_mul_ps(x, *(__m128*)_ps_cephes_FOPI);
|
||||
|
||||
#ifdef USE_SSE2
|
||||
/* store the integer part of y in emm2 */
|
||||
emm2 = _mm_cvttps_epi32(y);
|
||||
|
||||
/* j=(j+1) & (~1) (see the cephes sources) */
|
||||
emm2 = _mm_add_epi32(emm2, *(__m128i*)_pi32_1);
|
||||
emm2 = _mm_and_si128(emm2, *(__m128i*)_pi32_inv1);
|
||||
y = _mm_cvtepi32_ps(emm2);
|
||||
|
||||
emm4 = emm2;
|
||||
|
||||
/* get the swap sign flag for the sine */
|
||||
emm0 = _mm_and_si128(emm2, *(__m128i*)_pi32_4);
|
||||
emm0 = _mm_slli_epi32(emm0, 29);
|
||||
__m128 swap_sign_bit_sin = _mm_castsi128_ps(emm0);
|
||||
|
||||
/* get the polynom selection mask for the sine*/
|
||||
emm2 = _mm_and_si128(emm2, *(__m128i*)_pi32_2);
|
||||
emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
|
||||
__m128 poly_mask = _mm_castsi128_ps(emm2);
|
||||
#else
|
||||
/* store the integer part of y in mm2:mm3 */
|
||||
xmm3 = _mm_movehl_ps(xmm3, y);
|
||||
mm2 = _mm_cvttps_pi32(y);
|
||||
mm3 = _mm_cvttps_pi32(xmm3);
|
||||
|
||||
/* j=(j+1) & (~1) (see the cephes sources) */
|
||||
mm2 = _mm_add_pi32(mm2, *(__m64*)_pi32_1);
|
||||
mm3 = _mm_add_pi32(mm3, *(__m64*)_pi32_1);
|
||||
mm2 = _mm_and_si64(mm2, *(__m64*)_pi32_inv1);
|
||||
mm3 = _mm_and_si64(mm3, *(__m64*)_pi32_inv1);
|
||||
|
||||
y = _mm_cvtpi32x2_ps(mm2, mm3);
|
||||
|
||||
mm4 = mm2;
|
||||
mm5 = mm3;
|
||||
|
||||
/* get the swap sign flag for the sine */
|
||||
mm0 = _mm_and_si64(mm2, *(__m64*)_pi32_4);
|
||||
mm1 = _mm_and_si64(mm3, *(__m64*)_pi32_4);
|
||||
mm0 = _mm_slli_pi32(mm0, 29);
|
||||
mm1 = _mm_slli_pi32(mm1, 29);
|
||||
__m128 swap_sign_bit_sin;
|
||||
COPY_MM_TO_XMM(mm0, mm1, swap_sign_bit_sin);
|
||||
|
||||
/* get the polynom selection mask for the sine */
|
||||
|
||||
mm2 = _mm_and_si64(mm2, *(__m64*)_pi32_2);
|
||||
mm3 = _mm_and_si64(mm3, *(__m64*)_pi32_2);
|
||||
mm2 = _mm_cmpeq_pi32(mm2, _mm_setzero_si64());
|
||||
mm3 = _mm_cmpeq_pi32(mm3, _mm_setzero_si64());
|
||||
__m128 poly_mask;
|
||||
COPY_MM_TO_XMM(mm2, mm3, poly_mask);
|
||||
#endif
|
||||
|
||||
/* The magic pass: "Extended precision modular arithmetic"
|
||||
x = ((x - y * DP1) - y * DP2) - y * DP3; */
|
||||
xmm1 = *(__m128*)_ps_minus_cephes_DP1;
|
||||
xmm2 = *(__m128*)_ps_minus_cephes_DP2;
|
||||
xmm3 = *(__m128*)_ps_minus_cephes_DP3;
|
||||
xmm1 = _mm_mul_ps(y, xmm1);
|
||||
xmm2 = _mm_mul_ps(y, xmm2);
|
||||
xmm3 = _mm_mul_ps(y, xmm3);
|
||||
x = _mm_add_ps(x, xmm1);
|
||||
x = _mm_add_ps(x, xmm2);
|
||||
x = _mm_add_ps(x, xmm3);
|
||||
|
||||
#ifdef USE_SSE2
|
||||
emm4 = _mm_sub_epi32(emm4, *(__m128i*)_pi32_2);
|
||||
emm4 = _mm_andnot_si128(emm4, *(__m128i*)_pi32_4);
|
||||
emm4 = _mm_slli_epi32(emm4, 29);
|
||||
__m128 sign_bit_cos = _mm_castsi128_ps(emm4);
|
||||
#else
|
||||
/* get the sign flag for the cosine */
|
||||
mm4 = _mm_sub_pi32(mm4, *(__m64*)_pi32_2);
|
||||
mm5 = _mm_sub_pi32(mm5, *(__m64*)_pi32_2);
|
||||
mm4 = _mm_andnot_si64(mm4, *(__m64*)_pi32_4);
|
||||
mm5 = _mm_andnot_si64(mm5, *(__m64*)_pi32_4);
|
||||
mm4 = _mm_slli_pi32(mm4, 29);
|
||||
mm5 = _mm_slli_pi32(mm5, 29);
|
||||
__m128 sign_bit_cos;
|
||||
COPY_MM_TO_XMM(mm4, mm5, sign_bit_cos);
|
||||
_mm_empty(); /* good-bye mmx */
|
||||
#endif
|
||||
|
||||
sign_bit_sin = _mm_xor_ps(sign_bit_sin, swap_sign_bit_sin);
|
||||
|
||||
|
||||
/* Evaluate the first polynom (0 <= x <= Pi/4) */
|
||||
__m128 z = _mm_mul_ps(x,x);
|
||||
y = *(__m128*)_ps_coscof_p0;
|
||||
|
||||
y = _mm_mul_ps(y, z);
|
||||
y = _mm_add_ps(y, *(__m128*)_ps_coscof_p1);
|
||||
y = _mm_mul_ps(y, z);
|
||||
y = _mm_add_ps(y, *(__m128*)_ps_coscof_p2);
|
||||
y = _mm_mul_ps(y, z);
|
||||
y = _mm_mul_ps(y, z);
|
||||
__m128 tmp = _mm_mul_ps(z, *(__m128*)_ps_0p5);
|
||||
y = _mm_sub_ps(y, tmp);
|
||||
y = _mm_add_ps(y, *(__m128*)_ps_1);
|
||||
|
||||
/* Evaluate the second polynom (Pi/4 <= x <= 0) */
|
||||
|
||||
__m128 y2 = *(__m128*)_ps_sincof_p0;
|
||||
y2 = _mm_mul_ps(y2, z);
|
||||
y2 = _mm_add_ps(y2, *(__m128*)_ps_sincof_p1);
|
||||
y2 = _mm_mul_ps(y2, z);
|
||||
y2 = _mm_add_ps(y2, *(__m128*)_ps_sincof_p2);
|
||||
y2 = _mm_mul_ps(y2, z);
|
||||
y2 = _mm_mul_ps(y2, x);
|
||||
y2 = _mm_add_ps(y2, x);
|
||||
|
||||
/* select the correct result from the two polynoms */
|
||||
xmm3 = poly_mask;
|
||||
__m128 ysin2 = _mm_and_ps(xmm3, y2);
|
||||
__m128 ysin1 = _mm_andnot_ps(xmm3, y);
|
||||
y2 = _mm_sub_ps(y2,ysin2);
|
||||
y = _mm_sub_ps(y, ysin1);
|
||||
|
||||
xmm1 = _mm_add_ps(ysin1,ysin2);
|
||||
xmm2 = _mm_add_ps(y,y2);
|
||||
|
||||
/* update the sign */
|
||||
*s = _mm_xor_ps(xmm1, sign_bit_sin);
|
||||
*c = _mm_xor_ps(xmm2, sign_bit_cos);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -1,3 +1,34 @@
|
||||
/**********************************************************************
|
||||
|
||||
Audacity: A Digital Audio Editor
|
||||
|
||||
SseMathFuncs.h
|
||||
|
||||
Stephen Moshier (wrote original C version, The Cephes Library)
|
||||
Julien Pommier (converted to use SSE)
|
||||
Andrew Hallendorff (adapted for Audacity)
|
||||
|
||||
*******************************************************************//**
|
||||
|
||||
\file SseMathfuncs.h
|
||||
\brief SSE maths functions (for FFTs)
|
||||
|
||||
*//****************************************************************/
|
||||
|
||||
|
||||
/* JKC: The trig functions use Taylor's series, on the range 0 to Pi/4
|
||||
* computing both Sin and Cos, and using one or the other (in the
|
||||
* solo functions), or both in the more useful for us for FFTs sincos
|
||||
* function.
|
||||
* The constants minus_cephes_DP1 to minus_cephes_DP3 are used in the
|
||||
* angle reduction modulo function.
|
||||
* 4 sincos are done at a time.
|
||||
* If we wanted to do just sin or just cos, we could speed things up
|
||||
* by queuing up the Sines and Cosines into batches of 4 separately.*/
|
||||
|
||||
|
||||
|
||||
|
||||
/* SIMD (SSE1+MMX or SSE2) implementation of sin, cos, exp and log
|
||||
|
||||
Inspired by Intel Approximate Math library, and based on the
|
||||
@ -30,8 +61,37 @@ misrepresented as being the original software.
|
||||
*/
|
||||
#ifndef SSE_MATHFUN
|
||||
#define SSE_MATHFUN
|
||||
/* SIMD (SSE1+MMX or SSE2) implementation of sin, cos, exp and log
|
||||
|
||||
Inspired by Intel Approximate Math library, and based on the
|
||||
corresponding algorithms of the cephes math library
|
||||
|
||||
The default is to use the SSE1 version. If you define USE_SSE2 the
|
||||
the SSE2 intrinsics will be used in place of the MMX intrinsics. Do
|
||||
not expect any significant performance improvement with SSE2.
|
||||
*/
|
||||
|
||||
/* Copyright (C) 2007 Julien Pommier
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
(this is the zlib license)
|
||||
*/
|
||||
|
||||
#include <inttypes.h>
|
||||
#include <xmmintrin.h>
|
||||
|
||||
/* yes I know, the top of this file is quite ugly */
|
||||
@ -45,36 +105,671 @@ misrepresented as being the original software.
|
||||
#endif
|
||||
|
||||
/* __m128 is ugly to write */
|
||||
//typedef __m128 _v4sfu; // vector of 4 float (sse1)
|
||||
|
||||
#ifndef USE_SSE2 //sry this is all sse2 now
|
||||
#define USE_SSE2
|
||||
#endif
|
||||
typedef __m128 v4sf; // vector of 4 float (sse1)
|
||||
|
||||
#ifdef USE_SSE2
|
||||
# include <emmintrin.h>
|
||||
typedef __m128i v4si; // vector of 4 int (sse2)
|
||||
#else
|
||||
typedef __m64 v2si; // vector of 2 int (mmx)
|
||||
#endif
|
||||
|
||||
// !!! Andrew Hallendorff Warning changed call structure to make compatible with gcc
|
||||
/* declare some SSE constants -- why can't I figure a better way to do that? */
|
||||
#define _PS_CONST(Name, Val) \
|
||||
static const ALIGN16_BEG float _ps_##Name[4] ALIGN16_END = { Val, Val, Val, Val }
|
||||
#define _PI32_CONST(Name, Val) \
|
||||
static const ALIGN16_BEG int _pi32_##Name[4] ALIGN16_END = { Val, Val, Val, Val }
|
||||
#define _PS_CONST_TYPE(Name, Type, Val) \
|
||||
static const ALIGN16_BEG Type _ps_##Name[4] ALIGN16_END = { Val, Val, Val, Val }
|
||||
|
||||
typedef ALIGN16_BEG union {
|
||||
float m128_f32[4];
|
||||
int8_t m128_i8[16];
|
||||
int16_t m128_i16[8];
|
||||
int32_t m128_i32[4];
|
||||
int64_t m128_i64[2];
|
||||
uint8_t m128_u8[16];
|
||||
uint16_t m128_u16[8];
|
||||
uint32_t m128_u32[4];
|
||||
uint64_t m128_u64[2];
|
||||
} ALIGN16_END v4sfu;
|
||||
_PS_CONST(1 , 1.0f);
|
||||
_PS_CONST(0p5, 0.5f);
|
||||
/* the smallest non denormalized float number */
|
||||
_PS_CONST_TYPE(min_norm_pos, int, 0x00800000);
|
||||
_PS_CONST_TYPE(mant_mask, int, 0x7f800000);
|
||||
_PS_CONST_TYPE(inv_mant_mask, int, ~0x7f800000);
|
||||
|
||||
__m128 log_ps(v4sfu *xPtr);
|
||||
__m128 sin_ps(v4sfu *xPtr);
|
||||
void sincos_ps(v4sfu *xptr, v4sfu *sptr, v4sfu *cptr);
|
||||
_PS_CONST_TYPE(sign_mask, int, (int)0x80000000);
|
||||
_PS_CONST_TYPE(inv_sign_mask, int, ~0x80000000);
|
||||
|
||||
_PI32_CONST(1, 1);
|
||||
_PI32_CONST(inv1, ~1);
|
||||
_PI32_CONST(2, 2);
|
||||
_PI32_CONST(4, 4);
|
||||
_PI32_CONST(0x7f, 0x7f);
|
||||
|
||||
_PS_CONST(cephes_SQRTHF, 0.707106781186547524);
|
||||
_PS_CONST(cephes_log_p0, 7.0376836292E-2);
|
||||
_PS_CONST(cephes_log_p1, - 1.1514610310E-1);
|
||||
_PS_CONST(cephes_log_p2, 1.1676998740E-1);
|
||||
_PS_CONST(cephes_log_p3, - 1.2420140846E-1);
|
||||
_PS_CONST(cephes_log_p4, + 1.4249322787E-1);
|
||||
_PS_CONST(cephes_log_p5, - 1.6668057665E-1);
|
||||
_PS_CONST(cephes_log_p6, + 2.0000714765E-1);
|
||||
_PS_CONST(cephes_log_p7, - 2.4999993993E-1);
|
||||
_PS_CONST(cephes_log_p8, + 3.3333331174E-1);
|
||||
_PS_CONST(cephes_log_q1, -2.12194440e-4);
|
||||
_PS_CONST(cephes_log_q2, 0.693359375);
|
||||
|
||||
#ifndef USE_SSE2
|
||||
typedef union xmm_mm_union {
|
||||
__m128 xmm;
|
||||
__m64 mm[2];
|
||||
} xmm_mm_union;
|
||||
|
||||
#define COPY_XMM_TO_MM(xmm_, mm0_, mm1_) { \
|
||||
xmm_mm_union u; u.xmm = xmm_; \
|
||||
mm0_ = u.mm[0]; \
|
||||
mm1_ = u.mm[1]; \
|
||||
}
|
||||
|
||||
#define COPY_MM_TO_XMM(mm0_, mm1_, xmm_) { \
|
||||
xmm_mm_union u; u.mm[0]=mm0_; u.mm[1]=mm1_; xmm_ = u.xmm; \
|
||||
}
|
||||
|
||||
#endif // USE_SSE2
|
||||
|
||||
/* natural logarithm computed for 4 simultaneous float
|
||||
return NaN for x <= 0
|
||||
*/
|
||||
v4sf log_ps(v4sf x) {
|
||||
#ifdef USE_SSE2
|
||||
v4si emm0;
|
||||
#else
|
||||
v2si mm0, mm1;
|
||||
#endif
|
||||
v4sf one = *(v4sf*)_ps_1;
|
||||
|
||||
v4sf invalid_mask = _mm_cmple_ps(x, _mm_setzero_ps());
|
||||
|
||||
x = _mm_max_ps(x, *(v4sf*)_ps_min_norm_pos); /* cut off denormalized stuff */
|
||||
|
||||
#ifndef USE_SSE2
|
||||
/* part 1: x = frexpf(x, &e); */
|
||||
COPY_XMM_TO_MM(x, mm0, mm1);
|
||||
mm0 = _mm_srli_pi32(mm0, 23);
|
||||
mm1 = _mm_srli_pi32(mm1, 23);
|
||||
#else
|
||||
emm0 = _mm_srli_epi32(_mm_castps_si128(x), 23);
|
||||
#endif
|
||||
/* keep only the fractional part */
|
||||
x = _mm_and_ps(x, *(v4sf*)_ps_inv_mant_mask);
|
||||
x = _mm_or_ps(x, *(v4sf*)_ps_0p5);
|
||||
|
||||
#ifndef USE_SSE2
|
||||
/* now e=mm0:mm1 contain the really base-2 exponent */
|
||||
mm0 = _mm_sub_pi32(mm0, *(v2si*)_pi32_0x7f);
|
||||
mm1 = _mm_sub_pi32(mm1, *(v2si*)_pi32_0x7f);
|
||||
v4sf e = _mm_cvtpi32x2_ps(mm0, mm1);
|
||||
_mm_empty(); /* bye bye mmx */
|
||||
#else
|
||||
emm0 = _mm_sub_epi32(emm0, *(v4si*)_pi32_0x7f);
|
||||
v4sf e = _mm_cvtepi32_ps(emm0);
|
||||
#endif
|
||||
|
||||
e = _mm_add_ps(e, one);
|
||||
|
||||
/* part2:
|
||||
if( x < SQRTHF ) {
|
||||
e -= 1;
|
||||
x = x + x - 1.0;
|
||||
} else { x = x - 1.0; }
|
||||
*/
|
||||
v4sf mask = _mm_cmplt_ps(x, *(v4sf*)_ps_cephes_SQRTHF);
|
||||
v4sf tmp = _mm_and_ps(x, mask);
|
||||
x = _mm_sub_ps(x, one);
|
||||
e = _mm_sub_ps(e, _mm_and_ps(one, mask));
|
||||
x = _mm_add_ps(x, tmp);
|
||||
|
||||
|
||||
v4sf z = _mm_mul_ps(x,x);
|
||||
|
||||
v4sf y = *(v4sf*)_ps_cephes_log_p0;
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p1);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p2);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p3);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p4);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p5);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p6);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p7);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_log_p8);
|
||||
y = _mm_mul_ps(y, x);
|
||||
|
||||
y = _mm_mul_ps(y, z);
|
||||
|
||||
|
||||
tmp = _mm_mul_ps(e, *(v4sf*)_ps_cephes_log_q1);
|
||||
y = _mm_add_ps(y, tmp);
|
||||
|
||||
|
||||
tmp = _mm_mul_ps(z, *(v4sf*)_ps_0p5);
|
||||
y = _mm_sub_ps(y, tmp);
|
||||
|
||||
tmp = _mm_mul_ps(e, *(v4sf*)_ps_cephes_log_q2);
|
||||
x = _mm_add_ps(x, y);
|
||||
x = _mm_add_ps(x, tmp);
|
||||
x = _mm_or_ps(x, invalid_mask); // negative arg will be NAN
|
||||
return x;
|
||||
}
|
||||
|
||||
_PS_CONST(exp_hi, 88.3762626647949f);
|
||||
_PS_CONST(exp_lo, -88.3762626647949f);
|
||||
|
||||
_PS_CONST(cephes_LOG2EF, 1.44269504088896341);
|
||||
_PS_CONST(cephes_exp_C1, 0.693359375);
|
||||
_PS_CONST(cephes_exp_C2, -2.12194440e-4);
|
||||
|
||||
_PS_CONST(cephes_exp_p0, 1.9875691500E-4);
|
||||
_PS_CONST(cephes_exp_p1, 1.3981999507E-3);
|
||||
_PS_CONST(cephes_exp_p2, 8.3334519073E-3);
|
||||
_PS_CONST(cephes_exp_p3, 4.1665795894E-2);
|
||||
_PS_CONST(cephes_exp_p4, 1.6666665459E-1);
|
||||
_PS_CONST(cephes_exp_p5, 5.0000001201E-1);
|
||||
|
||||
v4sf exp_ps(v4sf x) {
|
||||
v4sf tmp = _mm_setzero_ps(), fx;
|
||||
#ifdef USE_SSE2
|
||||
v4si emm0;
|
||||
#else
|
||||
v2si mm0, mm1;
|
||||
#endif
|
||||
v4sf one = *(v4sf*)_ps_1;
|
||||
|
||||
x = _mm_min_ps(x, *(v4sf*)_ps_exp_hi);
|
||||
x = _mm_max_ps(x, *(v4sf*)_ps_exp_lo);
|
||||
|
||||
/* express exp(x) as exp(g + n*log(2)) */
|
||||
fx = _mm_mul_ps(x, *(v4sf*)_ps_cephes_LOG2EF);
|
||||
fx = _mm_add_ps(fx, *(v4sf*)_ps_0p5);
|
||||
|
||||
/* how to perform a floorf with SSE: just below */
|
||||
#ifndef USE_SSE2
|
||||
/* step 1 : cast to int */
|
||||
tmp = _mm_movehl_ps(tmp, fx);
|
||||
mm0 = _mm_cvttps_pi32(fx);
|
||||
mm1 = _mm_cvttps_pi32(tmp);
|
||||
/* step 2 : cast back to float */
|
||||
tmp = _mm_cvtpi32x2_ps(mm0, mm1);
|
||||
#else
|
||||
emm0 = _mm_cvttps_epi32(fx);
|
||||
tmp = _mm_cvtepi32_ps(emm0);
|
||||
#endif
|
||||
/* if greater, substract 1 */
|
||||
v4sf mask = _mm_cmpgt_ps(tmp, fx);
|
||||
mask = _mm_and_ps(mask, one);
|
||||
fx = _mm_sub_ps(tmp, mask);
|
||||
|
||||
tmp = _mm_mul_ps(fx, *(v4sf*)_ps_cephes_exp_C1);
|
||||
v4sf z = _mm_mul_ps(fx, *(v4sf*)_ps_cephes_exp_C2);
|
||||
x = _mm_sub_ps(x, tmp);
|
||||
x = _mm_sub_ps(x, z);
|
||||
|
||||
z = _mm_mul_ps(x,x);
|
||||
|
||||
v4sf y = *(v4sf*)_ps_cephes_exp_p0;
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p1);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p2);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p3);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p4);
|
||||
y = _mm_mul_ps(y, x);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_cephes_exp_p5);
|
||||
y = _mm_mul_ps(y, z);
|
||||
y = _mm_add_ps(y, x);
|
||||
y = _mm_add_ps(y, one);
|
||||
|
||||
/* build 2^n */
|
||||
#ifndef USE_SSE2
|
||||
z = _mm_movehl_ps(z, fx);
|
||||
mm0 = _mm_cvttps_pi32(fx);
|
||||
mm1 = _mm_cvttps_pi32(z);
|
||||
mm0 = _mm_add_pi32(mm0, *(v2si*)_pi32_0x7f);
|
||||
mm1 = _mm_add_pi32(mm1, *(v2si*)_pi32_0x7f);
|
||||
mm0 = _mm_slli_pi32(mm0, 23);
|
||||
mm1 = _mm_slli_pi32(mm1, 23);
|
||||
|
||||
v4sf pow2n;
|
||||
COPY_MM_TO_XMM(mm0, mm1, pow2n);
|
||||
_mm_empty();
|
||||
#else
|
||||
emm0 = _mm_cvttps_epi32(fx);
|
||||
emm0 = _mm_add_epi32(emm0, *(v4si*)_pi32_0x7f);
|
||||
emm0 = _mm_slli_epi32(emm0, 23);
|
||||
v4sf pow2n = _mm_castsi128_ps(emm0);
|
||||
#endif
|
||||
y = _mm_mul_ps(y, pow2n);
|
||||
return y;
|
||||
}
|
||||
|
||||
_PS_CONST(minus_cephes_DP1, -0.78515625);
|
||||
_PS_CONST(minus_cephes_DP2, -2.4187564849853515625e-4);
|
||||
_PS_CONST(minus_cephes_DP3, -3.77489497744594108e-8);
|
||||
_PS_CONST(sincof_p0, -1.9515295891E-4);
|
||||
_PS_CONST(sincof_p1, 8.3321608736E-3);
|
||||
_PS_CONST(sincof_p2, -1.6666654611E-1);
|
||||
_PS_CONST(coscof_p0, 2.443315711809948E-005);
|
||||
_PS_CONST(coscof_p1, -1.388731625493765E-003);
|
||||
_PS_CONST(coscof_p2, 4.166664568298827E-002);
|
||||
_PS_CONST(cephes_FOPI, 1.27323954473516); // 4 / M_PI
|
||||
|
||||
|
||||
/* evaluation of 4 sines at onces, using only SSE1+MMX intrinsics so
|
||||
it runs also on old athlons XPs and the pentium III of your grand
|
||||
mother.
|
||||
|
||||
The code is the exact rewriting of the cephes sinf function.
|
||||
Precision is excellent as long as x < 8192 (I did not bother to
|
||||
take into account the special handling they have for greater values
|
||||
-- it does not return garbage for arguments over 8192, though, but
|
||||
the extra precision is missing).
|
||||
|
||||
Note that it is such that sinf((float)M_PI) = 8.74e-8, which is the
|
||||
surprising but correct result.
|
||||
|
||||
Performance is also surprisingly good, 1.33 times faster than the
|
||||
macos vsinf SSE2 function, and 1.5 times faster than the
|
||||
__vrs4_sinf of amd's ACML (which is only available in 64 bits). Not
|
||||
too bad for an SSE1 function (with no special tuning) !
|
||||
However the latter libraries probably have a much better handling of NaN,
|
||||
Inf, denormalized and other special arguments..
|
||||
|
||||
On my core 1 duo, the execution of this function takes approximately 95 cycles.
|
||||
|
||||
From what I have observed on the experiments with Intel AMath lib, switching to an
|
||||
SSE2 version would improve the perf by only 10%.
|
||||
|
||||
Since it is based on SSE intrinsics, it has to be compiled at -O2 to
|
||||
deliver full speed.
|
||||
*/
|
||||
v4sf sin_ps(v4sf x) { // any x
|
||||
v4sf xmm1, xmm2 = _mm_setzero_ps(), xmm3, sign_bit, y;
|
||||
|
||||
#ifdef USE_SSE2
|
||||
v4si emm0, emm2;
|
||||
#else
|
||||
v2si mm0, mm1, mm2, mm3;
|
||||
#endif
|
||||
sign_bit = x;
|
||||
/* take the absolute value */
|
||||
x = _mm_and_ps(x, *(v4sf*)_ps_inv_sign_mask);
|
||||
/* extract the sign bit (upper one) */
|
||||
sign_bit = _mm_and_ps(sign_bit, *(v4sf*)_ps_sign_mask);
|
||||
|
||||
/* scale by 4/Pi */
|
||||
y = _mm_mul_ps(x, *(v4sf*)_ps_cephes_FOPI);
|
||||
|
||||
#ifdef USE_SSE2
|
||||
/* store the integer part of y in mm0 */
|
||||
emm2 = _mm_cvttps_epi32(y);
|
||||
/* j=(j+1) & (~1) (see the cephes sources) */
|
||||
emm2 = _mm_add_epi32(emm2, *(v4si*)_pi32_1);
|
||||
emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_inv1);
|
||||
y = _mm_cvtepi32_ps(emm2);
|
||||
|
||||
/* get the swap sign flag */
|
||||
emm0 = _mm_and_si128(emm2, *(v4si*)_pi32_4);
|
||||
emm0 = _mm_slli_epi32(emm0, 29);
|
||||
/* get the polynom selection mask
|
||||
there is one polynom for 0 <= x <= Pi/4
|
||||
and another one for Pi/4<x<=Pi/2
|
||||
|
||||
Both branches will be computed.
|
||||
*/
|
||||
emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_2);
|
||||
emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
|
||||
|
||||
v4sf swap_sign_bit = _mm_castsi128_ps(emm0);
|
||||
v4sf poly_mask = _mm_castsi128_ps(emm2);
|
||||
sign_bit = _mm_xor_ps(sign_bit, swap_sign_bit);
|
||||
|
||||
#else
|
||||
/* store the integer part of y in mm0:mm1 */
|
||||
xmm2 = _mm_movehl_ps(xmm2, y);
|
||||
mm2 = _mm_cvttps_pi32(y);
|
||||
mm3 = _mm_cvttps_pi32(xmm2);
|
||||
/* j=(j+1) & (~1) (see the cephes sources) */
|
||||
mm2 = _mm_add_pi32(mm2, *(v2si*)_pi32_1);
|
||||
mm3 = _mm_add_pi32(mm3, *(v2si*)_pi32_1);
|
||||
mm2 = _mm_and_si64(mm2, *(v2si*)_pi32_inv1);
|
||||
mm3 = _mm_and_si64(mm3, *(v2si*)_pi32_inv1);
|
||||
y = _mm_cvtpi32x2_ps(mm2, mm3);
|
||||
/* get the swap sign flag */
|
||||
mm0 = _mm_and_si64(mm2, *(v2si*)_pi32_4);
|
||||
mm1 = _mm_and_si64(mm3, *(v2si*)_pi32_4);
|
||||
mm0 = _mm_slli_pi32(mm0, 29);
|
||||
mm1 = _mm_slli_pi32(mm1, 29);
|
||||
/* get the polynom selection mask */
|
||||
mm2 = _mm_and_si64(mm2, *(v2si*)_pi32_2);
|
||||
mm3 = _mm_and_si64(mm3, *(v2si*)_pi32_2);
|
||||
mm2 = _mm_cmpeq_pi32(mm2, _mm_setzero_si64());
|
||||
mm3 = _mm_cmpeq_pi32(mm3, _mm_setzero_si64());
|
||||
v4sf swap_sign_bit, poly_mask;
|
||||
COPY_MM_TO_XMM(mm0, mm1, swap_sign_bit);
|
||||
COPY_MM_TO_XMM(mm2, mm3, poly_mask);
|
||||
sign_bit = _mm_xor_ps(sign_bit, swap_sign_bit);
|
||||
_mm_empty(); /* good-bye mmx */
|
||||
#endif
|
||||
|
||||
/* The magic pass: "Extended precision modular arithmetic"
|
||||
x = ((x - y * DP1) - y * DP2) - y * DP3; */
|
||||
xmm1 = *(v4sf*)_ps_minus_cephes_DP1;
|
||||
xmm2 = *(v4sf*)_ps_minus_cephes_DP2;
|
||||
xmm3 = *(v4sf*)_ps_minus_cephes_DP3;
|
||||
xmm1 = _mm_mul_ps(y, xmm1);
|
||||
xmm2 = _mm_mul_ps(y, xmm2);
|
||||
xmm3 = _mm_mul_ps(y, xmm3);
|
||||
x = _mm_add_ps(x, xmm1);
|
||||
x = _mm_add_ps(x, xmm2);
|
||||
x = _mm_add_ps(x, xmm3);
|
||||
|
||||
/* Evaluate the first polynom (0 <= x <= Pi/4) */
|
||||
y = *(v4sf*)_ps_coscof_p0;
|
||||
v4sf z = _mm_mul_ps(x,x);
|
||||
|
||||
y = _mm_mul_ps(y, z);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_coscof_p1);
|
||||
y = _mm_mul_ps(y, z);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_coscof_p2);
|
||||
y = _mm_mul_ps(y, z);
|
||||
y = _mm_mul_ps(y, z);
|
||||
v4sf tmp = _mm_mul_ps(z, *(v4sf*)_ps_0p5);
|
||||
y = _mm_sub_ps(y, tmp);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_1);
|
||||
|
||||
/* Evaluate the second polynom (Pi/4 <= x <= 0) */
|
||||
|
||||
v4sf y2 = *(v4sf*)_ps_sincof_p0;
|
||||
y2 = _mm_mul_ps(y2, z);
|
||||
y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p1);
|
||||
y2 = _mm_mul_ps(y2, z);
|
||||
y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p2);
|
||||
y2 = _mm_mul_ps(y2, z);
|
||||
y2 = _mm_mul_ps(y2, x);
|
||||
y2 = _mm_add_ps(y2, x);
|
||||
|
||||
/* select the correct result from the two polynoms */
|
||||
xmm3 = poly_mask;
|
||||
y2 = _mm_and_ps(xmm3, y2); //, xmm3);
|
||||
y = _mm_andnot_ps(xmm3, y);
|
||||
y = _mm_add_ps(y,y2);
|
||||
/* update the sign */
|
||||
y = _mm_xor_ps(y, sign_bit);
|
||||
return y;
|
||||
}
|
||||
|
||||
/* almost the same as sin_ps */
|
||||
v4sf cos_ps(v4sf x) { // any x
|
||||
v4sf xmm1, xmm2 = _mm_setzero_ps(), xmm3, y;
|
||||
#ifdef USE_SSE2
|
||||
v4si emm0, emm2;
|
||||
#else
|
||||
v2si mm0, mm1, mm2, mm3;
|
||||
#endif
|
||||
/* take the absolute value */
|
||||
x = _mm_and_ps(x, *(v4sf*)_ps_inv_sign_mask);
|
||||
|
||||
/* scale by 4/Pi */
|
||||
y = _mm_mul_ps(x, *(v4sf*)_ps_cephes_FOPI);
|
||||
|
||||
#ifdef USE_SSE2
|
||||
/* store the integer part of y in mm0 */
|
||||
emm2 = _mm_cvttps_epi32(y);
|
||||
/* j=(j+1) & (~1) (see the cephes sources) */
|
||||
emm2 = _mm_add_epi32(emm2, *(v4si*)_pi32_1);
|
||||
emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_inv1);
|
||||
y = _mm_cvtepi32_ps(emm2);
|
||||
|
||||
emm2 = _mm_sub_epi32(emm2, *(v4si*)_pi32_2);
|
||||
|
||||
/* get the swap sign flag */
|
||||
emm0 = _mm_andnot_si128(emm2, *(v4si*)_pi32_4);
|
||||
emm0 = _mm_slli_epi32(emm0, 29);
|
||||
/* get the polynom selection mask */
|
||||
emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_2);
|
||||
emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
|
||||
|
||||
v4sf sign_bit = _mm_castsi128_ps(emm0);
|
||||
v4sf poly_mask = _mm_castsi128_ps(emm2);
|
||||
#else
|
||||
/* store the integer part of y in mm0:mm1 */
|
||||
xmm2 = _mm_movehl_ps(xmm2, y);
|
||||
mm2 = _mm_cvttps_pi32(y);
|
||||
mm3 = _mm_cvttps_pi32(xmm2);
|
||||
|
||||
/* j=(j+1) & (~1) (see the cephes sources) */
|
||||
mm2 = _mm_add_pi32(mm2, *(v2si*)_pi32_1);
|
||||
mm3 = _mm_add_pi32(mm3, *(v2si*)_pi32_1);
|
||||
mm2 = _mm_and_si64(mm2, *(v2si*)_pi32_inv1);
|
||||
mm3 = _mm_and_si64(mm3, *(v2si*)_pi32_inv1);
|
||||
|
||||
y = _mm_cvtpi32x2_ps(mm2, mm3);
|
||||
|
||||
|
||||
mm2 = _mm_sub_pi32(mm2, *(v2si*)_pi32_2);
|
||||
mm3 = _mm_sub_pi32(mm3, *(v2si*)_pi32_2);
|
||||
|
||||
/* get the swap sign flag in mm0:mm1 and the
|
||||
polynom selection mask in mm2:mm3 */
|
||||
|
||||
mm0 = _mm_andnot_si64(mm2, *(v2si*)_pi32_4);
|
||||
mm1 = _mm_andnot_si64(mm3, *(v2si*)_pi32_4);
|
||||
mm0 = _mm_slli_pi32(mm0, 29);
|
||||
mm1 = _mm_slli_pi32(mm1, 29);
|
||||
|
||||
mm2 = _mm_and_si64(mm2, *(v2si*)_pi32_2);
|
||||
mm3 = _mm_and_si64(mm3, *(v2si*)_pi32_2);
|
||||
|
||||
mm2 = _mm_cmpeq_pi32(mm2, _mm_setzero_si64());
|
||||
mm3 = _mm_cmpeq_pi32(mm3, _mm_setzero_si64());
|
||||
|
||||
v4sf sign_bit, poly_mask;
|
||||
COPY_MM_TO_XMM(mm0, mm1, sign_bit);
|
||||
COPY_MM_TO_XMM(mm2, mm3, poly_mask);
|
||||
_mm_empty(); /* good-bye mmx */
|
||||
#endif
|
||||
/* The magic pass: "Extended precision modular arithmetic"
|
||||
x = ((x - y * DP1) - y * DP2) - y * DP3; */
|
||||
xmm1 = *(v4sf*)_ps_minus_cephes_DP1;
|
||||
xmm2 = *(v4sf*)_ps_minus_cephes_DP2;
|
||||
xmm3 = *(v4sf*)_ps_minus_cephes_DP3;
|
||||
xmm1 = _mm_mul_ps(y, xmm1);
|
||||
xmm2 = _mm_mul_ps(y, xmm2);
|
||||
xmm3 = _mm_mul_ps(y, xmm3);
|
||||
x = _mm_add_ps(x, xmm1);
|
||||
x = _mm_add_ps(x, xmm2);
|
||||
x = _mm_add_ps(x, xmm3);
|
||||
|
||||
/* Evaluate the first polynom (0 <= x <= Pi/4) */
|
||||
y = *(v4sf*)_ps_coscof_p0;
|
||||
v4sf z = _mm_mul_ps(x,x);
|
||||
|
||||
y = _mm_mul_ps(y, z);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_coscof_p1);
|
||||
y = _mm_mul_ps(y, z);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_coscof_p2);
|
||||
y = _mm_mul_ps(y, z);
|
||||
y = _mm_mul_ps(y, z);
|
||||
v4sf tmp = _mm_mul_ps(z, *(v4sf*)_ps_0p5);
|
||||
y = _mm_sub_ps(y, tmp);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_1);
|
||||
|
||||
/* Evaluate the second polynom (Pi/4 <= x <= 0) */
|
||||
|
||||
v4sf y2 = *(v4sf*)_ps_sincof_p0;
|
||||
y2 = _mm_mul_ps(y2, z);
|
||||
y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p1);
|
||||
y2 = _mm_mul_ps(y2, z);
|
||||
y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p2);
|
||||
y2 = _mm_mul_ps(y2, z);
|
||||
y2 = _mm_mul_ps(y2, x);
|
||||
y2 = _mm_add_ps(y2, x);
|
||||
|
||||
/* select the correct result from the two polynoms */
|
||||
xmm3 = poly_mask;
|
||||
y2 = _mm_and_ps(xmm3, y2); //, xmm3);
|
||||
y = _mm_andnot_ps(xmm3, y);
|
||||
y = _mm_add_ps(y,y2);
|
||||
/* update the sign */
|
||||
y = _mm_xor_ps(y, sign_bit);
|
||||
|
||||
return y;
|
||||
}
|
||||
|
||||
/* since sin_ps and cos_ps are almost identical, sincos_ps could replace both of them..
|
||||
it is almost as fast, and gives you a free cosine with your sine */
|
||||
void sincos_ps(v4sf x, v4sf *s, v4sf *c) {
|
||||
v4sf xmm1, xmm2, xmm3 = _mm_setzero_ps(), sign_bit_sin, y;
|
||||
#ifdef USE_SSE2
|
||||
v4si emm0, emm2, emm4;
|
||||
#else
|
||||
v2si mm0, mm1, mm2, mm3, mm4, mm5;
|
||||
#endif
|
||||
sign_bit_sin = x;
|
||||
/* take the absolute value */
|
||||
x = _mm_and_ps(x, *(v4sf*)_ps_inv_sign_mask);
|
||||
/* extract the sign bit (upper one) */
|
||||
sign_bit_sin = _mm_and_ps(sign_bit_sin, *(v4sf*)_ps_sign_mask);
|
||||
|
||||
/* scale by 4/Pi */
|
||||
y = _mm_mul_ps(x, *(v4sf*)_ps_cephes_FOPI);
|
||||
|
||||
#ifdef USE_SSE2
|
||||
/* store the integer part of y in emm2 */
|
||||
emm2 = _mm_cvttps_epi32(y);
|
||||
|
||||
/* j=(j+1) & (~1) (see the cephes sources) */
|
||||
emm2 = _mm_add_epi32(emm2, *(v4si*)_pi32_1);
|
||||
emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_inv1);
|
||||
y = _mm_cvtepi32_ps(emm2);
|
||||
|
||||
emm4 = emm2;
|
||||
|
||||
/* get the swap sign flag for the sine */
|
||||
emm0 = _mm_and_si128(emm2, *(v4si*)_pi32_4);
|
||||
emm0 = _mm_slli_epi32(emm0, 29);
|
||||
v4sf swap_sign_bit_sin = _mm_castsi128_ps(emm0);
|
||||
|
||||
/* get the polynom selection mask for the sine*/
|
||||
emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_2);
|
||||
emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
|
||||
v4sf poly_mask = _mm_castsi128_ps(emm2);
|
||||
#else
|
||||
/* store the integer part of y in mm2:mm3 */
|
||||
xmm3 = _mm_movehl_ps(xmm3, y);
|
||||
mm2 = _mm_cvttps_pi32(y);
|
||||
mm3 = _mm_cvttps_pi32(xmm3);
|
||||
|
||||
/* j=(j+1) & (~1) (see the cephes sources) */
|
||||
mm2 = _mm_add_pi32(mm2, *(v2si*)_pi32_1);
|
||||
mm3 = _mm_add_pi32(mm3, *(v2si*)_pi32_1);
|
||||
mm2 = _mm_and_si64(mm2, *(v2si*)_pi32_inv1);
|
||||
mm3 = _mm_and_si64(mm3, *(v2si*)_pi32_inv1);
|
||||
|
||||
y = _mm_cvtpi32x2_ps(mm2, mm3);
|
||||
|
||||
mm4 = mm2;
|
||||
mm5 = mm3;
|
||||
|
||||
/* get the swap sign flag for the sine */
|
||||
mm0 = _mm_and_si64(mm2, *(v2si*)_pi32_4);
|
||||
mm1 = _mm_and_si64(mm3, *(v2si*)_pi32_4);
|
||||
mm0 = _mm_slli_pi32(mm0, 29);
|
||||
mm1 = _mm_slli_pi32(mm1, 29);
|
||||
v4sf swap_sign_bit_sin;
|
||||
COPY_MM_TO_XMM(mm0, mm1, swap_sign_bit_sin);
|
||||
|
||||
/* get the polynom selection mask for the sine */
|
||||
|
||||
mm2 = _mm_and_si64(mm2, *(v2si*)_pi32_2);
|
||||
mm3 = _mm_and_si64(mm3, *(v2si*)_pi32_2);
|
||||
mm2 = _mm_cmpeq_pi32(mm2, _mm_setzero_si64());
|
||||
mm3 = _mm_cmpeq_pi32(mm3, _mm_setzero_si64());
|
||||
v4sf poly_mask;
|
||||
COPY_MM_TO_XMM(mm2, mm3, poly_mask);
|
||||
#endif
|
||||
|
||||
/* The magic pass: "Extended precision modular arithmetic"
|
||||
x = ((x - y * DP1) - y * DP2) - y * DP3; */
|
||||
xmm1 = *(v4sf*)_ps_minus_cephes_DP1;
|
||||
xmm2 = *(v4sf*)_ps_minus_cephes_DP2;
|
||||
xmm3 = *(v4sf*)_ps_minus_cephes_DP3;
|
||||
xmm1 = _mm_mul_ps(y, xmm1);
|
||||
xmm2 = _mm_mul_ps(y, xmm2);
|
||||
xmm3 = _mm_mul_ps(y, xmm3);
|
||||
x = _mm_add_ps(x, xmm1);
|
||||
x = _mm_add_ps(x, xmm2);
|
||||
x = _mm_add_ps(x, xmm3);
|
||||
|
||||
#ifdef USE_SSE2
|
||||
emm4 = _mm_sub_epi32(emm4, *(v4si*)_pi32_2);
|
||||
emm4 = _mm_andnot_si128(emm4, *(v4si*)_pi32_4);
|
||||
emm4 = _mm_slli_epi32(emm4, 29);
|
||||
v4sf sign_bit_cos = _mm_castsi128_ps(emm4);
|
||||
#else
|
||||
/* get the sign flag for the cosine */
|
||||
mm4 = _mm_sub_pi32(mm4, *(v2si*)_pi32_2);
|
||||
mm5 = _mm_sub_pi32(mm5, *(v2si*)_pi32_2);
|
||||
mm4 = _mm_andnot_si64(mm4, *(v2si*)_pi32_4);
|
||||
mm5 = _mm_andnot_si64(mm5, *(v2si*)_pi32_4);
|
||||
mm4 = _mm_slli_pi32(mm4, 29);
|
||||
mm5 = _mm_slli_pi32(mm5, 29);
|
||||
v4sf sign_bit_cos;
|
||||
COPY_MM_TO_XMM(mm4, mm5, sign_bit_cos);
|
||||
_mm_empty(); /* good-bye mmx */
|
||||
#endif
|
||||
|
||||
sign_bit_sin = _mm_xor_ps(sign_bit_sin, swap_sign_bit_sin);
|
||||
|
||||
|
||||
/* Evaluate the first polynom (0 <= x <= Pi/4) */
|
||||
v4sf z = _mm_mul_ps(x,x);
|
||||
y = *(v4sf*)_ps_coscof_p0;
|
||||
|
||||
y = _mm_mul_ps(y, z);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_coscof_p1);
|
||||
y = _mm_mul_ps(y, z);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_coscof_p2);
|
||||
y = _mm_mul_ps(y, z);
|
||||
y = _mm_mul_ps(y, z);
|
||||
v4sf tmp = _mm_mul_ps(z, *(v4sf*)_ps_0p5);
|
||||
y = _mm_sub_ps(y, tmp);
|
||||
y = _mm_add_ps(y, *(v4sf*)_ps_1);
|
||||
|
||||
/* Evaluate the second polynom (Pi/4 <= x <= 0) */
|
||||
|
||||
v4sf y2 = *(v4sf*)_ps_sincof_p0;
|
||||
y2 = _mm_mul_ps(y2, z);
|
||||
y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p1);
|
||||
y2 = _mm_mul_ps(y2, z);
|
||||
y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p2);
|
||||
y2 = _mm_mul_ps(y2, z);
|
||||
y2 = _mm_mul_ps(y2, x);
|
||||
y2 = _mm_add_ps(y2, x);
|
||||
|
||||
/* select the correct result from the two polynoms */
|
||||
xmm3 = poly_mask;
|
||||
v4sf ysin2 = _mm_and_ps(xmm3, y2);
|
||||
v4sf ysin1 = _mm_andnot_ps(xmm3, y);
|
||||
y2 = _mm_sub_ps(y2,ysin2);
|
||||
y = _mm_sub_ps(y, ysin1);
|
||||
|
||||
xmm1 = _mm_add_ps(ysin1,ysin2);
|
||||
xmm2 = _mm_add_ps(y,y2);
|
||||
|
||||
/* update the sign */
|
||||
*s = _mm_xor_ps(xmm1, sign_bit_sin);
|
||||
*c = _mm_xor_ps(xmm2, sign_bit_cos);
|
||||
}
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
@ -13,18 +13,21 @@ Intrinsics (SSE/AVX) and Threaded Equalization
|
||||
#ifndef __AUDACITY_EFFECT_EQUALIZATION48X__
|
||||
#define __AUDACITY_EFFECT_EQUALIZATION48X__
|
||||
|
||||
#ifdef __AVX_ENABLED
|
||||
#define __MAXBUFFERCOUNT 8
|
||||
#else
|
||||
#define __MAXBUFFERCOUNT 4
|
||||
#endif
|
||||
|
||||
// bitwise function selection
|
||||
// options are
|
||||
// options are
|
||||
#define MATH_FUNCTION_ORIGINAL 0 // 0 original path
|
||||
#define MATH_FUNCTION_BITREVERSE_TABLE 1 // 1 SSE BitReverse Table
|
||||
#define MATH_FUNCTION_SIN_COS_TABLE 2 // 2 SSE SinCos Table
|
||||
// 3 SSE with SinCos and BitReverse buffer
|
||||
#define MATH_FUNCTION_SSE 4 // 4 SSE no SinCos and no BitReverse buffer
|
||||
#define MATH_FUNCTION_THREADED 8 // 8 SSE threaded no SinCos and no BitReverse buffer
|
||||
// 9 SSE threaded BitReverse Table
|
||||
// 10 SSE threaded SinCos Table
|
||||
// 11 SSE threaded with SinCos and BitReverse buffer
|
||||
//#define MATH_FUNCTION_AVX 16
|
||||
#define MATH_FUNCTION_THREADED 4 // 4 SSE threaded no SinCos and no BitReverse buffer
|
||||
#define MATH_FUNCTION_SSE 8 // 8 SSE no SinCos and no BitReverse buffer
|
||||
#define MATH_FUNCTION_AVX 16
|
||||
#define MATH_FUNCTION_SEGMENTED_CODE 32
|
||||
|
||||
// added by Andrew Hallendorff intrinsics processing
|
||||
enum EQBufferStatus
|
||||
@ -35,16 +38,16 @@ enum EQBufferStatus
|
||||
BufferDone
|
||||
};
|
||||
|
||||
|
||||
class BufferInfo {
|
||||
public:
|
||||
BufferInfo() { mBufferLength=0; mBufferStatus=BufferEmpty; };
|
||||
float* mBufferSouce[4];
|
||||
float* mBufferDest[4];
|
||||
BufferInfo() { mBufferLength=0; mBufferStatus=BufferEmpty; mContiguousBufferSize=0; };
|
||||
float* mBufferSouce[__MAXBUFFERCOUNT];
|
||||
float* mBufferDest[__MAXBUFFERCOUNT];
|
||||
int mBufferLength;
|
||||
sampleCount mFftWindowSize;
|
||||
sampleCount mFftFilterSize;
|
||||
float* mScratchBuffer;
|
||||
int mContiguousBufferSize;
|
||||
EQBufferStatus mBufferStatus;
|
||||
};
|
||||
|
||||
@ -64,20 +67,22 @@ typedef struct {
|
||||
int FMA4;
|
||||
} MathCaps;
|
||||
|
||||
|
||||
class EffectEqualization;
|
||||
|
||||
class EffectEqualization48x;
|
||||
|
||||
static int EQWorkerCounter=0;
|
||||
|
||||
class EQWorker : public wxThread {
|
||||
public:
|
||||
EQWorker():wxThread(wxTHREAD_JOINABLE) {
|
||||
EQWorker():wxThread(wxTHREAD_JOINABLE) {
|
||||
mBufferInfoList=NULL;
|
||||
mBufferInfoCount=0;
|
||||
mMutex=NULL;
|
||||
mEffectEqualization48x=NULL;
|
||||
mExitLoop=false;
|
||||
mThreadID=EQWorkerCounter++;
|
||||
mProcessingType=4;
|
||||
}
|
||||
void SetData( BufferInfo* bufferInfoList, int bufferInfoCount, wxMutex *mutex, EffectEqualization48x *effectEqualization48x) {
|
||||
mBufferInfoList=bufferInfoList;
|
||||
@ -94,6 +99,7 @@ public:
|
||||
wxMutex *mMutex;
|
||||
EffectEqualization48x *mEffectEqualization48x;
|
||||
bool mExitLoop;
|
||||
int mProcessingType;
|
||||
};
|
||||
|
||||
class EffectEqualization48x {
|
||||
@ -112,22 +118,37 @@ public:
|
||||
bool Process(EffectEqualization* effectEqualization);
|
||||
bool Benchmark(EffectEqualization* effectEqualization);
|
||||
private:
|
||||
bool RunFunctionSelect(int flags, int count, WaveTrack * t, sampleCount start, sampleCount len);
|
||||
bool TrackCompare();
|
||||
bool DeltaTrack(WaveTrack * t, WaveTrack * t2, sampleCount start, sampleCount len);
|
||||
bool AllocateBuffersWorkers(bool threaded);
|
||||
bool AllocateBuffersWorkers(int nThreads);
|
||||
bool FreeBuffersWorkers();
|
||||
|
||||
bool ProcessTail(WaveTrack * t, WaveTrack * output, sampleCount start, sampleCount len);
|
||||
|
||||
bool ProcessBuffer(fft_type *sourceBuffer, fft_type *destBuffer, sampleCount bufferLength);
|
||||
bool ProcessBuffer1x(BufferInfo *bufferInfo);
|
||||
bool ProcessOne1x(int count, WaveTrack * t, sampleCount start, sampleCount len);
|
||||
void Filter1x(sampleCount len, float *buffer, float *scratchBuffer);
|
||||
|
||||
bool ProcessBuffer4x(BufferInfo *bufferInfo);
|
||||
bool ProcessOne4x(int count, WaveTrack * t, sampleCount start, sampleCount len);
|
||||
bool ProcessOne4xThreaded(int count, WaveTrack * t, sampleCount start, sampleCount len);
|
||||
bool ProcessTail(WaveTrack * t, WaveTrack * output, sampleCount start, sampleCount len);
|
||||
bool ProcessOne1x4xThreaded(int count, WaveTrack * t, sampleCount start, sampleCount len, int processingType=4);
|
||||
void Filter4x(sampleCount len, float *buffer, float *scratchBuffer);
|
||||
|
||||
#ifdef __AVX_ENABLED
|
||||
bool ProcessBuffer8x(BufferInfo *bufferInfo);
|
||||
bool ProcessOne8x(int count, WaveTrack * t, sampleCount start, sampleCount len);
|
||||
bool ProcessOne8xThreaded(int count, WaveTrack * t, sampleCount start, sampleCount len);
|
||||
void Filter8x(sampleCount len, float *buffer, float *scratchBuffer);
|
||||
#endif
|
||||
|
||||
EffectEqualization* mEffectEqualization;
|
||||
int mThreadCount;
|
||||
sampleCount mFilterSize;
|
||||
sampleCount mBlockSize;
|
||||
sampleCount mWindowSize;
|
||||
int mBufferCount;
|
||||
int mWorkerDataCount;
|
||||
int mBlocksPerBuffer;
|
||||
int mScratchBufferSize;
|
||||
@ -139,6 +160,7 @@ private:
|
||||
bool mThreaded;
|
||||
bool mBenching;
|
||||
friend EQWorker;
|
||||
friend EffectEqualization;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
Loading…
x
Reference in New Issue
Block a user