diff --git a/third_party/pffft/src/pffft.c b/third_party/pffft/src/pffft.c index 643836626c0f..3033e61b813e 100644 --- a/third_party/pffft/src/pffft.c +++ b/third_party/pffft/src/pffft.c @@ -58,6 +58,7 @@ */ #include "pffft.h" +#include #include #include #include @@ -125,7 +126,7 @@ inline v4sf ld_ps1(const float *p) { v4sf v=vec_lde(0,p); return vec_splat(vec_p x3 = vec_mergel(y1, y3); \ } # define VSWAPHL(a,b) vec_perm(a,b, (vector unsigned char)(16,17,18,19,20,21,22,23,8,9,10,11,12,13,14,15)) -# define VALIGNED(ptr) ((((long)(ptr)) & 0xF) == 0) +# define VALIGNED(ptr) ((((uintptr_t)(ptr)) & 0xF) == 0) /* SSE1 support macros @@ -145,7 +146,7 @@ typedef __m128 v4sf; # define UNINTERLEAVE2(in1, in2, out1, out2) { v4sf tmp__ = _mm_shuffle_ps(in1, in2, _MM_SHUFFLE(2,0,2,0)); out2 = _mm_shuffle_ps(in1, in2, _MM_SHUFFLE(3,1,3,1)); out1 = tmp__; } # define VTRANSPOSE4(x0,x1,x2,x3) _MM_TRANSPOSE4_PS(x0,x1,x2,x3) # define VSWAPHL(a,b) _mm_shuffle_ps(b, a, _MM_SHUFFLE(3,2,1,0)) -# define VALIGNED(ptr) ((((long)(ptr)) & 0xF) == 0) +# define VALIGNED(ptr) ((((uintptr_t)(ptr)) & 0xF) == 0) /* ARM NEON support macros @@ -172,7 +173,7 @@ typedef float32x4_t v4sf; // marginally faster version //# define VTRANSPOSE4(x0,x1,x2,x3) { asm("vtrn.32 %q0, %q1;\n vtrn.32 %q2,%q3\n vswp %f0,%e2\n vswp %f1,%e3" : "+w"(x0), "+w"(x1), "+w"(x2), "+w"(x3)::); } # define VSWAPHL(a,b) vcombine_f32(vget_low_f32(b), vget_high_f32(a)) -# define VALIGNED(ptr) ((((long)(ptr)) & 0x3) == 0) +# define VALIGNED(ptr) ((((uintptr_t)(ptr)) & 0x3) == 0) #else # if !defined(PFFFT_SIMD_DISABLE) # warning "building with simd disabled !\n"; @@ -190,7 +191,7 @@ typedef float v4sf; # define VMADD(a,b,c) ((a)*(b)+(c)) # define VSUB(a,b) ((a)-(b)) # define LD_PS1(p) (p) -# define VALIGNED(ptr) ((((long)(ptr)) & 0x3) == 0) +# define VALIGNED(ptr) ((((uintptr_t)(ptr)) & 0x3) == 0) #endif // shortcuts for complex multiplcations