diff options
Diffstat (limited to 'thirdparty/embree/common')
49 files changed, 8508 insertions, 4953 deletions
diff --git a/thirdparty/embree/common/algorithms/parallel_for.h b/thirdparty/embree/common/algorithms/parallel_for.h index 645681ac63..6d411e4852 100644 --- a/thirdparty/embree/common/algorithms/parallel_for.h +++ b/thirdparty/embree/common/algorithms/parallel_for.h @@ -26,7 +26,6 @@ namespace embree abort(); // -- GODOT end -- } - #elif defined(TASKING_TBB) #if TBB_INTERFACE_VERSION >= 12002 tbb::task_group_context context; diff --git a/thirdparty/embree/common/algorithms/parallel_for_for.h b/thirdparty/embree/common/algorithms/parallel_for_for.h index 92c37a4a38..7838ef11b3 100644 --- a/thirdparty/embree/common/algorithms/parallel_for_for.h +++ b/thirdparty/embree/common/algorithms/parallel_for_for.h @@ -30,15 +30,20 @@ namespace embree template<typename ArrayArray> __forceinline ParallelForForState (ArrayArray& array2, const size_t minStepSize) { init(array2,minStepSize); + } + + template<typename SizeFunc> + __forceinline ParallelForForState (const size_t numArrays, const SizeFunc& getSize, const size_t minStepSize) { + init(numArrays,getSize,minStepSize); } - template<typename ArrayArray> - __forceinline void init ( ArrayArray& array2, const size_t minStepSize ) + template<typename SizeFunc> + __forceinline void init ( const size_t numArrays, const SizeFunc& getSize, const size_t minStepSize ) { /* first calculate total number of elements */ size_t N = 0; - for (size_t i=0; i<array2.size(); i++) { - N += array2[i] ? array2[i]->size() : 0; + for (size_t i=0; i<numArrays; i++) { + N += getSize(i); } this->N = N; @@ -54,8 +59,8 @@ namespace embree size_t k0 = (++taskIndex)*N/taskCount; for (size_t i=0, k=0; taskIndex < taskCount; i++) { - assert(i<array2.size()); - size_t j=0, M = array2[i] ? array2[i]->size() : 0; + assert(i<numArrays); + size_t j=0, M = getSize(i); while (j<M && k+M-j >= k0 && taskIndex < taskCount) { assert(taskIndex<taskCount); i0[taskIndex] = i; @@ -67,6 +72,12 @@ namespace embree } } + template<typename ArrayArray> + __forceinline void init ( ArrayArray& array2, const size_t minStepSize ) + { + init(array2.size(),[&](size_t i) { return array2[i] ? array2[i]->size() : 0; },minStepSize); + } + __forceinline size_t size() const { return N; } diff --git a/thirdparty/embree/common/algorithms/parallel_for_for_prefix_sum.h b/thirdparty/embree/common/algorithms/parallel_for_for_prefix_sum.h index b15b44a991..8c3f4aace7 100644 --- a/thirdparty/embree/common/algorithms/parallel_for_for_prefix_sum.h +++ b/thirdparty/embree/common/algorithms/parallel_for_for_prefix_sum.h @@ -17,15 +17,20 @@ namespace embree __forceinline ParallelForForPrefixSumState (ArrayArray& array2, const size_t minStepSize) : ParallelForForState(array2,minStepSize) {} + template<typename SizeFunc> + __forceinline ParallelForForPrefixSumState (size_t numArrays, const SizeFunc& getSize, const size_t minStepSize) + : ParallelForForState(numArrays,getSize,minStepSize) {} + ParallelPrefixSumState<Value> prefix_state; }; - template<typename ArrayArray, typename Index, typename Value, typename Func, typename Reduction> - __forceinline Value parallel_for_for_prefix_sum0( ParallelForForPrefixSumState<Value>& state, ArrayArray& array2, Index minStepSize, - const Value& identity, const Func& func, const Reduction& reduction) + template<typename SizeFunc, typename Index, typename Value, typename Func, typename Reduction> + __forceinline Value parallel_for_for_prefix_sum0_( ParallelForForPrefixSumState<Value>& state, Index minStepSize, + const SizeFunc& getSize, const Value& identity, const Func& func, const Reduction& reduction) { /* calculate number of tasks to use */ const size_t taskCount = state.taskCount; + /* perform parallel prefix sum */ parallel_for(taskCount, [&](const size_t taskIndex) { @@ -38,9 +43,9 @@ namespace embree size_t k=k0; Value N=identity; for (size_t i=i0; k<k1; i++) { - const size_t size = array2[i] ? array2[i]->size() : 0; + const size_t size = getSize(i); const size_t r0 = j0, r1 = min(size,r0+k1-k); - if (r1 > r0) N = reduction(N, func(array2[i],range<Index>((Index)r0,(Index)r1),(Index)k,(Index)i)); + if (r1 > r0) N = reduction(N, func((Index)i,range<Index>((Index)r0,(Index)r1),(Index)k)); k+=r1-r0; j0 = 0; } state.prefix_state.counts[taskIndex] = N; @@ -58,9 +63,10 @@ namespace embree return sum; } - template<typename ArrayArray, typename Index, typename Value, typename Func, typename Reduction> - __forceinline Value parallel_for_for_prefix_sum1( ParallelForForPrefixSumState<Value>& state, ArrayArray& array2, Index minStepSize, - const Value& identity, const Func& func, const Reduction& reduction) + template<typename SizeFunc, typename Index, typename Value, typename Func, typename Reduction> + __forceinline Value parallel_for_for_prefix_sum1_( ParallelForForPrefixSumState<Value>& state, Index minStepSize, + const SizeFunc& getSize, + const Value& identity, const Func& func, const Reduction& reduction) { /* calculate number of tasks to use */ const size_t taskCount = state.taskCount; @@ -76,9 +82,9 @@ namespace embree size_t k=k0; Value N=identity; for (size_t i=i0; k<k1; i++) { - const size_t size = array2[i] ? array2[i]->size() : 0; + const size_t size = getSize(i); const size_t r0 = j0, r1 = min(size,r0+k1-k); - if (r1 > r0) N = reduction(N, func(array2[i],range<Index>((Index)r0,(Index)r1),(Index)k,(Index)i,reduction(state.prefix_state.sums[taskIndex],N))); + if (r1 > r0) N = reduction(N, func((Index)i,range<Index>((Index)r0,(Index)r1),(Index)k,reduction(state.prefix_state.sums[taskIndex],N))); k+=r1-r0; j0 = 0; } state.prefix_state.counts[taskIndex] = N; @@ -96,6 +102,30 @@ namespace embree return sum; } + template<typename ArrayArray, typename Index, typename Value, typename Func, typename Reduction> + __forceinline Value parallel_for_for_prefix_sum0( ParallelForForPrefixSumState<Value>& state, + ArrayArray& array2, Index minStepSize, + const Value& identity, const Func& func, const Reduction& reduction) + { + return parallel_for_for_prefix_sum0_(state,minStepSize, + [&](Index i) { return array2[i] ? array2[i]->size() : 0; }, + identity, + [&](Index i, const range<Index>& r, Index k) { return func(array2[i], r, k, i); }, + reduction); + } + + template<typename ArrayArray, typename Index, typename Value, typename Func, typename Reduction> + __forceinline Value parallel_for_for_prefix_sum1( ParallelForForPrefixSumState<Value>& state, + ArrayArray& array2, Index minStepSize, + const Value& identity, const Func& func, const Reduction& reduction) + { + return parallel_for_for_prefix_sum1_(state,minStepSize, + [&](Index i) { return array2[i] ? array2[i]->size() : 0; }, + identity, + [&](Index i, const range<Index>& r, Index k, const Value& base) { return func(array2[i], r, k, i, base); }, + reduction); + } + template<typename ArrayArray, typename Value, typename Func, typename Reduction> __forceinline Value parallel_for_for_prefix_sum0( ParallelForForPrefixSumState<Value>& state, ArrayArray& array2, const Value& identity, const Func& func, const Reduction& reduction) diff --git a/thirdparty/embree/common/algorithms/parallel_reduce.h b/thirdparty/embree/common/algorithms/parallel_reduce.h index 8271372ea4..cd0078f2e6 100644 --- a/thirdparty/embree/common/algorithms/parallel_reduce.h +++ b/thirdparty/embree/common/algorithms/parallel_reduce.h @@ -26,7 +26,7 @@ namespace embree const Index threadCount = (Index) TaskScheduler::threadCount(); taskCount = min(taskCount,threadCount,maxTasks); - /* parallel invokation of all tasks */ + /* parallel invocation of all tasks */ dynamic_large_stack_array(Value,values,taskCount,8192); // consumes at most 8192 bytes on the stack parallel_for(taskCount, [&](const Index taskIndex) { const Index k0 = first+(taskIndex+0)*(last-first)/taskCount; diff --git a/thirdparty/embree/common/math/bbox.h b/thirdparty/embree/common/math/bbox.h index bc43155358..e4eb3df9a4 100644 --- a/thirdparty/embree/common/math/bbox.h +++ b/thirdparty/embree/common/math/bbox.h @@ -77,7 +77,7 @@ namespace embree return lower > upper; } -#if defined(__SSE__) +#if defined(__SSE__) || defined(__ARM_NEON) template<> __forceinline bool BBox<Vec3fa>::empty() const { return !all(le_mask(lower,upper)); } @@ -196,11 +196,11 @@ namespace embree } template<> __inline bool subset( const BBox<Vec3fa>& a, const BBox<Vec3fa>& b ) { - return all(ge_mask(a.lower,b.lower)) & all(le_mask(a.upper,b.upper)); + return all(ge_mask(a.lower,b.lower)) && all(le_mask(a.upper,b.upper)); } template<> __inline bool subset( const BBox<Vec3fx>& a, const BBox<Vec3fx>& b ) { - return all(ge_mask(a.lower,b.lower)) & all(le_mask(a.upper,b.upper)); + return all(ge_mask(a.lower,b.lower)) && all(le_mask(a.upper,b.upper)); } /*! blending */ @@ -228,11 +228,11 @@ namespace embree /// SSE / AVX / MIC specializations //////////////////////////////////////////////////////////////////////////////// -#if defined __SSE__ +#if defined (__SSE__) || defined(__ARM_NEON) #include "../simd/sse.h" #endif -#if defined __AVX__ +#if defined (__AVX__) #include "../simd/avx.h" #endif diff --git a/thirdparty/embree/common/math/color.h b/thirdparty/embree/common/math/color.h index 529584ea16..e62e4ad2a4 100644 --- a/thirdparty/embree/common/math/color.h +++ b/thirdparty/embree/common/math/color.h @@ -152,21 +152,38 @@ namespace embree } __forceinline const Color rcp ( const Color& a ) { +#if defined(__aarch64__) + __m128 reciprocal = _mm_rcp_ps(a.m128); + reciprocal = vmulq_f32(vrecpsq_f32(a.m128, reciprocal), reciprocal); + reciprocal = vmulq_f32(vrecpsq_f32(a.m128, reciprocal), reciprocal); + return (const Color)reciprocal; +#else #if defined(__AVX512VL__) const Color r = _mm_rcp14_ps(a.m128); #else const Color r = _mm_rcp_ps(a.m128); #endif - return _mm_sub_ps(_mm_add_ps(r, r), _mm_mul_ps(_mm_mul_ps(r, r), a)); + return _mm_add_ps(r,_mm_mul_ps(r, _mm_sub_ps(_mm_set1_ps(1.0f), _mm_mul_ps(a, r)))); // computes r + r * (1 - a * r) + +#endif //defined(__aarch64__) } __forceinline const Color rsqrt( const Color& a ) { +#if defined(__aarch64__) + __m128 r = _mm_rsqrt_ps(a.m128); + r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a.m128, r), r)); + r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a.m128, r), r)); + return r; +#else + #if defined(__AVX512VL__) __m128 r = _mm_rsqrt14_ps(a.m128); #else __m128 r = _mm_rsqrt_ps(a.m128); #endif return _mm_add_ps(_mm_mul_ps(_mm_set1_ps(1.5f),r), _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r))); + +#endif //defined(__aarch64__) } __forceinline const Color sqrt ( const Color& a ) { return _mm_sqrt_ps(a.m128); } diff --git a/thirdparty/embree/common/math/constants.cpp b/thirdparty/embree/common/math/constants.cpp index 03919ae20c..f51c642bfc 100644 --- a/thirdparty/embree/common/math/constants.cpp +++ b/thirdparty/embree/common/math/constants.cpp @@ -5,23 +5,4 @@ namespace embree { - TrueTy True; - FalseTy False; - ZeroTy zero; - OneTy one; - NegInfTy neg_inf; - PosInfTy inf; - PosInfTy pos_inf; - NaNTy nan; - UlpTy ulp; - PiTy pi; - OneOverPiTy one_over_pi; - TwoPiTy two_pi; - OneOverTwoPiTy one_over_two_pi; - FourPiTy four_pi; - OneOverFourPiTy one_over_four_pi; - StepTy step; - ReverseStepTy reverse_step; - EmptyTy empty; - UndefinedTy undefined; } diff --git a/thirdparty/embree/common/math/constants.h b/thirdparty/embree/common/math/constants.h index 578473a8ab..07a1a868ba 100644 --- a/thirdparty/embree/common/math/constants.h +++ b/thirdparty/embree/common/math/constants.h @@ -24,13 +24,13 @@ namespace embree __forceinline operator bool( ) const { return true; } }; - extern MAYBE_UNUSED TrueTy True; + const constexpr TrueTy True = TrueTy(); struct FalseTy { __forceinline operator bool( ) const { return false; } }; - extern MAYBE_UNUSED FalseTy False; + const constexpr FalseTy False = FalseTy(); struct ZeroTy { @@ -48,7 +48,7 @@ namespace embree __forceinline operator unsigned char ( ) const { return 0; } }; - extern MAYBE_UNUSED ZeroTy zero; + const constexpr ZeroTy zero = ZeroTy(); struct OneTy { @@ -66,7 +66,7 @@ namespace embree __forceinline operator unsigned char ( ) const { return 1; } }; - extern MAYBE_UNUSED OneTy one; + const constexpr OneTy one = OneTy(); struct NegInfTy { @@ -85,7 +85,7 @@ namespace embree }; - extern MAYBE_UNUSED NegInfTy neg_inf; + const constexpr NegInfTy neg_inf = NegInfTy(); struct PosInfTy { @@ -103,8 +103,8 @@ namespace embree __forceinline operator unsigned char ( ) const { return std::numeric_limits<unsigned char>::max(); } }; - extern MAYBE_UNUSED PosInfTy inf; - extern MAYBE_UNUSED PosInfTy pos_inf; + const constexpr PosInfTy inf = PosInfTy(); + const constexpr PosInfTy pos_inf = PosInfTy(); struct NaNTy { @@ -112,15 +112,15 @@ namespace embree __forceinline operator float ( ) const { return std::numeric_limits<float>::quiet_NaN(); } }; - extern MAYBE_UNUSED NaNTy nan; + const constexpr NaNTy nan = NaNTy(); struct UlpTy { __forceinline operator double( ) const { return std::numeric_limits<double>::epsilon(); } __forceinline operator float ( ) const { return std::numeric_limits<float>::epsilon(); } }; - - extern MAYBE_UNUSED UlpTy ulp; + + const constexpr UlpTy ulp = UlpTy(); struct PiTy { @@ -128,7 +128,7 @@ namespace embree __forceinline operator float ( ) const { return float(M_PI); } }; - extern MAYBE_UNUSED PiTy pi; + const constexpr PiTy pi = PiTy(); struct OneOverPiTy { @@ -136,7 +136,7 @@ namespace embree __forceinline operator float ( ) const { return float(M_1_PI); } }; - extern MAYBE_UNUSED OneOverPiTy one_over_pi; + const constexpr OneOverPiTy one_over_pi = OneOverPiTy(); struct TwoPiTy { @@ -144,7 +144,7 @@ namespace embree __forceinline operator float ( ) const { return float(2.0*M_PI); } }; - extern MAYBE_UNUSED TwoPiTy two_pi; + const constexpr TwoPiTy two_pi = TwoPiTy(); struct OneOverTwoPiTy { @@ -152,7 +152,7 @@ namespace embree __forceinline operator float ( ) const { return float(0.5*M_1_PI); } }; - extern MAYBE_UNUSED OneOverTwoPiTy one_over_two_pi; + const constexpr OneOverTwoPiTy one_over_two_pi = OneOverTwoPiTy(); struct FourPiTy { @@ -160,7 +160,7 @@ namespace embree __forceinline operator float ( ) const { return float(4.0*M_PI); } }; - extern MAYBE_UNUSED FourPiTy four_pi; + const constexpr FourPiTy four_pi = FourPiTy(); struct OneOverFourPiTy { @@ -168,30 +168,42 @@ namespace embree __forceinline operator float ( ) const { return float(0.25*M_1_PI); } }; - extern MAYBE_UNUSED OneOverFourPiTy one_over_four_pi; + const constexpr OneOverFourPiTy one_over_four_pi = OneOverFourPiTy(); struct StepTy { + __forceinline operator double ( ) const { return 0; } + __forceinline operator float ( ) const { return 0; } + __forceinline operator long long( ) const { return 0; } + __forceinline operator unsigned long long( ) const { return 0; } + __forceinline operator long ( ) const { return 0; } + __forceinline operator unsigned long ( ) const { return 0; } + __forceinline operator int ( ) const { return 0; } + __forceinline operator unsigned int ( ) const { return 0; } + __forceinline operator short ( ) const { return 0; } + __forceinline operator unsigned short ( ) const { return 0; } + __forceinline operator char ( ) const { return 0; } + __forceinline operator unsigned char ( ) const { return 0; } }; - extern MAYBE_UNUSED StepTy step; + const constexpr StepTy step = StepTy(); struct ReverseStepTy { }; - extern MAYBE_UNUSED ReverseStepTy reverse_step; + const constexpr ReverseStepTy reverse_step = ReverseStepTy(); struct EmptyTy { }; - extern MAYBE_UNUSED EmptyTy empty; + const constexpr EmptyTy empty = EmptyTy(); struct FullTy { }; - extern MAYBE_UNUSED FullTy full; + const constexpr FullTy full = FullTy(); struct UndefinedTy { }; - extern MAYBE_UNUSED UndefinedTy undefined; + const constexpr UndefinedTy undefined = UndefinedTy(); } diff --git a/thirdparty/embree/common/math/math.h b/thirdparty/embree/common/math/math.h index 4bc54c1a6a..7930c17727 100644 --- a/thirdparty/embree/common/math/math.h +++ b/thirdparty/embree/common/math/math.h @@ -53,6 +53,16 @@ namespace embree __forceinline float rcp ( const float x ) { +#if defined(__aarch64__) + // Move scalar to vector register and do rcp. + __m128 a; + a[0] = x; + float32x4_t reciprocal = vrecpeq_f32(a); + reciprocal = vmulq_f32(vrecpsq_f32(a, reciprocal), reciprocal); + reciprocal = vmulq_f32(vrecpsq_f32(a, reciprocal), reciprocal); + return reciprocal[0]; +#else + const __m128 a = _mm_set_ss(x); #if defined(__AVX512VL__) @@ -66,30 +76,71 @@ namespace embree #else return _mm_cvtss_f32(_mm_mul_ss(r,_mm_sub_ss(_mm_set_ss(2.0f), _mm_mul_ss(r, a)))); #endif + +#endif //defined(__aarch64__) } __forceinline float signmsk ( const float x ) { +#if defined(__aarch64__) + // FP and Neon shares same vector register in arm64 + __m128 a; + __m128i b; + a[0] = x; + b[0] = 0x80000000; + a = _mm_and_ps(a, vreinterpretq_f32_s32(b)); + return a[0]; +#else return _mm_cvtss_f32(_mm_and_ps(_mm_set_ss(x),_mm_castsi128_ps(_mm_set1_epi32(0x80000000)))); +#endif } __forceinline float xorf( const float x, const float y ) { +#if defined(__aarch64__) + // FP and Neon shares same vector register in arm64 + __m128 a; + __m128 b; + a[0] = x; + b[0] = y; + a = _mm_xor_ps(a, b); + return a[0]; +#else return _mm_cvtss_f32(_mm_xor_ps(_mm_set_ss(x),_mm_set_ss(y))); +#endif } __forceinline float andf( const float x, const unsigned y ) { +#if defined(__aarch64__) + // FP and Neon shares same vector register in arm64 + __m128 a; + __m128i b; + a[0] = x; + b[0] = y; + a = _mm_and_ps(a, vreinterpretq_f32_s32(b)); + return a[0]; +#else return _mm_cvtss_f32(_mm_and_ps(_mm_set_ss(x),_mm_castsi128_ps(_mm_set1_epi32(y)))); +#endif } __forceinline float rsqrt( const float x ) { +#if defined(__aarch64__) + // FP and Neon shares same vector register in arm64 + __m128 a; + a[0] = x; + __m128 value = _mm_rsqrt_ps(a); + value = vmulq_f32(value, vrsqrtsq_f32(vmulq_f32(a, value), value)); + value = vmulq_f32(value, vrsqrtsq_f32(vmulq_f32(a, value), value)); + return value[0]; +#else + const __m128 a = _mm_set_ss(x); #if defined(__AVX512VL__) __m128 r = _mm_rsqrt14_ss(_mm_set_ss(0.0f),a); #else __m128 r = _mm_rsqrt_ss(a); #endif - r = _mm_add_ss(_mm_mul_ss(_mm_set_ss(1.5f), r), _mm_mul_ss(_mm_mul_ss(_mm_mul_ss(a, _mm_set_ss(-0.5f)), r), _mm_mul_ss(r, r))); -#if defined(__ARM_NEON) - r = _mm_add_ss(_mm_mul_ss(_mm_set_ss(1.5f), r), _mm_mul_ss(_mm_mul_ss(_mm_mul_ss(a, _mm_set_ss(-0.5f)), r), _mm_mul_ss(r, r))); + const __m128 c = _mm_add_ss(_mm_mul_ss(_mm_set_ss(1.5f), r), + _mm_mul_ss(_mm_mul_ss(_mm_mul_ss(a, _mm_set_ss(-0.5f)), r), _mm_mul_ss(r, r))); + return _mm_cvtss_f32(c); #endif - return _mm_cvtss_f32(r); } #if defined(__WIN32__) && defined(_MSC_VER) && (_MSC_VER <= 1700) @@ -146,7 +197,17 @@ namespace embree __forceinline double floor( const double x ) { return ::floor (x); } __forceinline double ceil ( const double x ) { return ::ceil (x); } -#if defined(__SSE4_1__) +#if defined(__aarch64__) + __forceinline float mini(float a, float b) { + // FP and Neon shares same vector register in arm64 + __m128 x; + __m128 y; + x[0] = a; + y[0] = b; + x = _mm_min_ps(x, y); + return x[0]; + } +#elif defined(__SSE4_1__) __forceinline float mini(float a, float b) { const __m128i ai = _mm_castps_si128(_mm_set_ss(a)); const __m128i bi = _mm_castps_si128(_mm_set_ss(b)); @@ -155,7 +216,17 @@ namespace embree } #endif -#if defined(__SSE4_1__) +#if defined(__aarch64__) + __forceinline float maxi(float a, float b) { + // FP and Neon shares same vector register in arm64 + __m128 x; + __m128 y; + x[0] = a; + y[0] = b; + x = _mm_max_ps(x, y); + return x[0]; + } +#elif defined(__SSE4_1__) __forceinline float maxi(float a, float b) { const __m128i ai = _mm_castps_si128(_mm_set_ss(a)); const __m128i bi = _mm_castps_si128(_mm_set_ss(b)); @@ -172,9 +243,12 @@ namespace embree __forceinline int64_t min(int64_t a, int64_t b) { return a<b ? a:b; } __forceinline float min(float a, float b) { return a<b ? a:b; } __forceinline double min(double a, double b) { return a<b ? a:b; } -#if defined(__64BIT__) +#if defined(__64BIT__) || defined(__EMSCRIPTEN__) __forceinline size_t min(size_t a, size_t b) { return a<b ? a:b; } #endif +#if defined(__EMSCRIPTEN__) + __forceinline long min(long a, long b) { return a<b ? a:b; } +#endif template<typename T> __forceinline T min(const T& a, const T& b, const T& c) { return min(min(a,b),c); } template<typename T> __forceinline T min(const T& a, const T& b, const T& c, const T& d) { return min(min(a,b),min(c,d)); } @@ -189,9 +263,12 @@ namespace embree __forceinline int64_t max(int64_t a, int64_t b) { return a<b ? b:a; } __forceinline float max(float a, float b) { return a<b ? b:a; } __forceinline double max(double a, double b) { return a<b ? b:a; } -#if defined(__64BIT__) +#if defined(__64BIT__) || defined(__EMSCRIPTEN__) __forceinline size_t max(size_t a, size_t b) { return a<b ? b:a; } #endif +#if defined(__EMSCRIPTEN__) + __forceinline long max(long a, long b) { return a<b ? b:a; } +#endif template<typename T> __forceinline T max(const T& a, const T& b, const T& c) { return max(max(a,b),c); } template<typename T> __forceinline T max(const T& a, const T& b, const T& c, const T& d) { return max(max(a,b),max(c,d)); } @@ -231,6 +308,15 @@ namespace embree __forceinline float msub ( const float a, const float b, const float c) { return _mm_cvtss_f32(_mm_fmsub_ss(_mm_set_ss(a),_mm_set_ss(b),_mm_set_ss(c))); } __forceinline float nmadd ( const float a, const float b, const float c) { return _mm_cvtss_f32(_mm_fnmadd_ss(_mm_set_ss(a),_mm_set_ss(b),_mm_set_ss(c))); } __forceinline float nmsub ( const float a, const float b, const float c) { return _mm_cvtss_f32(_mm_fnmsub_ss(_mm_set_ss(a),_mm_set_ss(b),_mm_set_ss(c))); } + +#elif defined (__aarch64__) && defined(__clang__) +#pragma clang fp contract(fast) +__forceinline float madd ( const float a, const float b, const float c) { return a*b + c; } +__forceinline float msub ( const float a, const float b, const float c) { return a*b - c; } +__forceinline float nmadd ( const float a, const float b, const float c) { return c - a*b; } +__forceinline float nmsub ( const float a, const float b, const float c) { return -(c + a*b); } +#pragma clang fp contract(on) + #else __forceinline float madd ( const float a, const float b, const float c) { return a*b+c; } __forceinline float msub ( const float a, const float b, const float c) { return a*b-c; } @@ -326,7 +412,7 @@ namespace embree return x | (y << 1) | (z << 2); } -#if defined(__AVX2__) +#if defined(__AVX2__) && !defined(__aarch64__) template<> __forceinline unsigned int bitInterleave(const unsigned int &xi, const unsigned int& yi, const unsigned int& zi) diff --git a/thirdparty/embree/common/math/quaternion.h b/thirdparty/embree/common/math/quaternion.h index 080800efcd..78efccda72 100644 --- a/thirdparty/embree/common/math/quaternion.h +++ b/thirdparty/embree/common/math/quaternion.h @@ -242,13 +242,17 @@ namespace embree T cosTheta = dot(q0, q1_); QuaternionT<T> q1 = select(cosTheta < 0.f, -q1_, q1_); cosTheta = select(cosTheta < 0.f, -cosTheta, cosTheta); - if (unlikely(all(cosTheta > 0.9995f))) { - return normalize(lerp(q0, q1, t)); - } + + // spherical linear interpolation const T phi = t * fastapprox::acos(cosTheta); T sinPhi, cosPhi; fastapprox::sincos(phi, sinPhi, cosPhi); QuaternionT<T> qperp = sinPhi * normalize(msub(cosTheta, q0, q1)); - return msub(cosPhi, q0, qperp); + QuaternionT<T> qslerp = msub(cosPhi, q0, qperp); + + // regular linear interpolation as fallback + QuaternionT<T> qlerp = normalize(lerp(q0, q1, t)); + + return select(cosTheta > 0.9995f, qlerp, qslerp); } } diff --git a/thirdparty/embree/common/math/transcendental.h b/thirdparty/embree/common/math/transcendental.h index fd16c26e81..daf9dd96d2 100644 --- a/thirdparty/embree/common/math/transcendental.h +++ b/thirdparty/embree/common/math/transcendental.h @@ -27,7 +27,7 @@ __forceinline T sin(const T &v) // Reduced range version of x auto x = v - kReal * piOverTwoVec; auto kMod4 = k & 3; - auto sinUseCos = (kMod4 == 1 | kMod4 == 3); + auto sinUseCos = (kMod4 == 1) | (kMod4 == 3); auto flipSign = (kMod4 > 1); // These coefficients are from sollya with fpminimax(sin(x)/x, [|0, 2, @@ -76,8 +76,8 @@ __forceinline T cos(const T &v) auto x = v - kReal * piOverTwoVec; auto kMod4 = k & 3; - auto cosUseCos = (kMod4 == 0 | kMod4 == 2); - auto flipSign = (kMod4 == 1 | kMod4 == 2); + auto cosUseCos = (kMod4 == 0) | (kMod4 == 2); + auto flipSign = (kMod4 == 1) | (kMod4 == 2); const float sinC2 = -0.16666667163372039794921875; const float sinC4 = +8.333347737789154052734375e-3; diff --git a/thirdparty/embree/common/math/vec2.h b/thirdparty/embree/common/math/vec2.h index d62aef51f3..f6d98ffa0d 100644 --- a/thirdparty/embree/common/math/vec2.h +++ b/thirdparty/embree/common/math/vec2.h @@ -144,7 +144,7 @@ namespace embree } //////////////////////////////////////////////////////////////////////////////// - /// Euclidian Space Operators + /// Euclidean Space Operators //////////////////////////////////////////////////////////////////////////////// template<typename T> __forceinline T dot ( const Vec2<T>& a, const Vec2<T>& b ) { return madd(a.x,b.x,a.y*b.y); } @@ -205,11 +205,11 @@ namespace embree #include "vec2fa.h" -#if defined __SSE__ +#if defined(__SSE__) || defined(__ARM_NEON) #include "../simd/sse.h" #endif -#if defined __AVX__ +#if defined(__AVX__) #include "../simd/avx.h" #endif @@ -221,7 +221,7 @@ namespace embree { template<> __forceinline Vec2<float>::Vec2(const Vec2fa& a) : x(a.x), y(a.y) {} -#if defined(__SSE__) +#if defined(__SSE__) || defined(__ARM_NEON) template<> __forceinline Vec2<vfloat4>::Vec2(const Vec2fa& a) : x(a.x), y(a.y) {} #endif diff --git a/thirdparty/embree/common/math/vec2fa.h b/thirdparty/embree/common/math/vec2fa.h index a51fb68fd0..4f222894c2 100644 --- a/thirdparty/embree/common/math/vec2fa.h +++ b/thirdparty/embree/common/math/vec2fa.h @@ -97,6 +97,12 @@ namespace embree __forceinline Vec2fa rcp ( const Vec2fa& a ) { +#if defined(__aarch64__) + __m128 reciprocal = _mm_rcp_ps(a.m128); + reciprocal = vmulq_f32(vrecpsq_f32(a.m128, reciprocal), reciprocal); + reciprocal = vmulq_f32(vrecpsq_f32(a.m128, reciprocal), reciprocal); + return (const Vec2fa)reciprocal; +#else #if defined(__AVX512VL__) const Vec2fa r = _mm_rcp14_ps(a.m128); #else @@ -104,13 +110,15 @@ namespace embree #endif #if defined(__AVX2__) - const Vec2fa res = _mm_mul_ps(r,_mm_fnmadd_ps(r, a, vfloat4(2.0f))); + const Vec2fa h_n = _mm_fnmadd_ps(a, r, vfloat4(1.0)); // First, compute 1 - a * r (which will be very close to 0) + const Vec2fa res = _mm_fmadd_ps(r, h_n, r); // Then compute r + r * h_n #else - const Vec2fa res = _mm_mul_ps(r,_mm_sub_ps(vfloat4(2.0f), _mm_mul_ps(r, a))); - //return _mm_sub_ps(_mm_add_ps(r, r), _mm_mul_ps(_mm_mul_ps(r, r), a)); + const Vec2fa h_n = _mm_sub_ps(vfloat4(1.0f), _mm_mul_ps(a, r)); // First, compute 1 - a * r (which will be very close to 0) + const Vec2fa res = _mm_add_ps(r,_mm_mul_ps(r, h_n)); // Then compute r + r * h_n #endif return res; +#endif //defined(__aarch64__) } __forceinline Vec2fa sqrt ( const Vec2fa& a ) { return _mm_sqrt_ps(a.m128); } @@ -118,12 +126,21 @@ namespace embree __forceinline Vec2fa rsqrt( const Vec2fa& a ) { +#if defined(__aarch64__) + __m128 r = _mm_rsqrt_ps(a.m128); + r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a.m128, r), r)); + r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a.m128, r), r)); + return r; +#else + #if defined(__AVX512VL__) __m128 r = _mm_rsqrt14_ps(a.m128); #else __m128 r = _mm_rsqrt_ps(a.m128); #endif return _mm_add_ps(_mm_mul_ps(_mm_set1_ps(1.5f),r), _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r))); + +#endif } __forceinline Vec2fa zero_fix(const Vec2fa& a) { @@ -156,7 +173,7 @@ namespace embree __forceinline Vec2fa min( const Vec2fa& a, const Vec2fa& b ) { return _mm_min_ps(a.m128,b.m128); } __forceinline Vec2fa max( const Vec2fa& a, const Vec2fa& b ) { return _mm_max_ps(a.m128,b.m128); } -#if defined(__SSE4_1__) +#if defined(__aarch64__) || defined(__SSE4_1__) __forceinline Vec2fa mini(const Vec2fa& a, const Vec2fa& b) { const vint4 ai = _mm_castps_si128(a); const vint4 bi = _mm_castps_si128(b); @@ -165,7 +182,7 @@ namespace embree } #endif -#if defined(__SSE4_1__) +#if defined(__aarch64__) || defined(__SSE4_1__) __forceinline Vec2fa maxi(const Vec2fa& a, const Vec2fa& b) { const vint4 ai = _mm_castps_si128(a); const vint4 bi = _mm_castps_si128(b); @@ -227,7 +244,7 @@ namespace embree __forceinline bool operator !=( const Vec2fa& a, const Vec2fa& b ) { return (_mm_movemask_ps(_mm_cmpneq_ps(a.m128, b.m128)) & 3) != 0; } //////////////////////////////////////////////////////////////////////////////// - /// Euclidian Space Operators + /// Euclidean Space Operators //////////////////////////////////////////////////////////////////////////////// #if defined(__SSE4_1__) diff --git a/thirdparty/embree/common/math/vec3.h b/thirdparty/embree/common/math/vec3.h index ce94eff327..254f6c4011 100644 --- a/thirdparty/embree/common/math/vec3.h +++ b/thirdparty/embree/common/math/vec3.h @@ -197,7 +197,7 @@ namespace embree template<typename T> __forceinline Vec3<bool> ge_mask( const Vec3<T>& a, const Vec3<T>& b ) { return Vec3<bool>(a.x>=b.x,a.y>=b.y,a.z>=b.z); } //////////////////////////////////////////////////////////////////////////////// - /// Euclidian Space Operators + /// Euclidean Space Operators //////////////////////////////////////////////////////////////////////////////// template<typename T> __forceinline T sqr ( const Vec3<T>& a ) { return dot(a,a); } @@ -207,7 +207,6 @@ namespace embree template<typename T> __forceinline Vec3<T> normalize( const Vec3<T>& a ) { return a*rsqrt(sqr(a)); } template<typename T> __forceinline T distance ( const Vec3<T>& a, const Vec3<T>& b ) { return length(a-b); } template<typename T> __forceinline Vec3<T> cross ( const Vec3<T>& a, const Vec3<T>& b ) { return Vec3<T>(msub(a.y,b.z,a.z*b.y), msub(a.z,b.x,a.x*b.z), msub(a.x,b.y,a.y*b.x)); } - template<typename T> __forceinline Vec3<T> stable_triangle_normal( const Vec3<T>& a, const Vec3<T>& b, const Vec3<T>& c ) { const T ab_x = a.z*b.y, ab_y = a.x*b.z, ab_z = a.y*b.x; @@ -266,11 +265,11 @@ namespace embree /// SSE / AVX / MIC specializations //////////////////////////////////////////////////////////////////////////////// -#if defined __SSE__ +#if defined(__SSE__) || defined(__ARM_NEON) #include "../simd/sse.h" #endif -#if defined __AVX__ +#if defined(__AVX__) #include "../simd/avx.h" #endif @@ -291,14 +290,14 @@ namespace embree template<> __forceinline Vec3<vfloat4>::Vec3(const Vec3fa& a) { x = a.x; y = a.y; z = a.z; } -#elif defined(__SSE__) +#elif defined(__SSE__) || defined(__ARM_NEON) template<> __forceinline Vec3<vfloat4>::Vec3(const Vec3fa& a) { const vfloat4 v = vfloat4(a.m128); x = shuffle<0,0,0,0>(v); y = shuffle<1,1,1,1>(v); z = shuffle<2,2,2,2>(v); } #endif -#if defined(__SSE__) +#if defined(__SSE__) || defined(__ARM_NEON) template<> __forceinline Vec3<vfloat4> broadcast<vfloat4,vfloat4>(const Vec3<vfloat4>& a, const size_t k) { return Vec3<vfloat4>(vfloat4::broadcast(&a.x[k]), vfloat4::broadcast(&a.y[k]), vfloat4::broadcast(&a.z[k])); diff --git a/thirdparty/embree/common/math/vec3fa.h b/thirdparty/embree/common/math/vec3fa.h index 586039741d..8564cf6d10 100644 --- a/thirdparty/embree/common/math/vec3fa.h +++ b/thirdparty/embree/common/math/vec3fa.h @@ -55,7 +55,13 @@ namespace embree //////////////////////////////////////////////////////////////////////////////// static __forceinline Vec3fa load( const void* const a ) { +#if defined(__aarch64__) + __m128 t = _mm_load_ps((float*)a); + t[3] = 0.0f; + return Vec3fa(t); +#else return Vec3fa(_mm_and_ps(_mm_load_ps((float*)a),_mm_castsi128_ps(_mm_set_epi32(0, -1, -1, -1)))); +#endif } static __forceinline Vec3fa loadu( const void* const a ) { @@ -89,12 +95,20 @@ namespace embree __forceinline Vec3fa operator +( const Vec3fa& a ) { return a; } __forceinline Vec3fa operator -( const Vec3fa& a ) { +#if defined(__aarch64__) + return vnegq_f32(a.m128); +#else const __m128 mask = _mm_castsi128_ps(_mm_set1_epi32(0x80000000)); return _mm_xor_ps(a.m128, mask); +#endif } __forceinline Vec3fa abs ( const Vec3fa& a ) { +#if defined(__aarch64__) + return _mm_abs_ps(a.m128); +#else const __m128 mask = _mm_castsi128_ps(_mm_set1_epi32(0x7fffffff)); return _mm_and_ps(a.m128, mask); +#endif } __forceinline Vec3fa sign ( const Vec3fa& a ) { return blendv_ps(Vec3fa(one).m128, (-Vec3fa(one)).m128, _mm_cmplt_ps (a.m128,Vec3fa(zero).m128)); @@ -102,6 +116,10 @@ namespace embree __forceinline Vec3fa rcp ( const Vec3fa& a ) { +#if defined(__aarch64__) + return vdivq_f32(vdupq_n_f32(1.0f),a.m128); +#else + #if defined(__AVX512VL__) const Vec3fa r = _mm_rcp14_ps(a.m128); #else @@ -109,13 +127,15 @@ namespace embree #endif #if defined(__AVX2__) - const Vec3fa res = _mm_mul_ps(r.m128,_mm_fnmadd_ps(r.m128, a.m128, vfloat4(2.0f))); + const Vec3fa h_n = _mm_fnmadd_ps(a.m128, r.m128, vfloat4(1.0)); // First, compute 1 - a * r (which will be very close to 0) + const Vec3fa res = _mm_fmadd_ps(r.m128, h_n.m128, r.m128); // Then compute r + r * h_n #else - const Vec3fa res = _mm_mul_ps(r.m128,_mm_sub_ps(vfloat4(2.0f), _mm_mul_ps(r.m128, a.m128))); - //return _mm_sub_ps(_mm_add_ps(r, r), _mm_mul_ps(_mm_mul_ps(r, r), a)); + const Vec3fa h_n = _mm_sub_ps(vfloat4(1.0f), _mm_mul_ps(a.m128, r.m128)); // First, compute 1 - a * r (which will be very close to 0) + const Vec3fa res = _mm_add_ps(r.m128,_mm_mul_ps(r.m128, h_n.m128)); // Then compute r + r * h_n #endif return res; +#endif //defined(__aarch64__) } __forceinline Vec3fa sqrt ( const Vec3fa& a ) { return _mm_sqrt_ps(a.m128); } @@ -123,12 +143,20 @@ namespace embree __forceinline Vec3fa rsqrt( const Vec3fa& a ) { +#if defined(__aarch64__) + __m128 r = _mm_rsqrt_ps(a.m128); + r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a.m128, r), r)); + r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a.m128, r), r)); + return r; +#else + #if defined(__AVX512VL__) __m128 r = _mm_rsqrt14_ps(a.m128); #else __m128 r = _mm_rsqrt_ps(a.m128); #endif return _mm_add_ps(_mm_mul_ps(_mm_set1_ps(1.5f),r), _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a.m128, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r))); +#endif } __forceinline Vec3fa zero_fix(const Vec3fa& a) { @@ -161,7 +189,7 @@ namespace embree __forceinline Vec3fa min( const Vec3fa& a, const Vec3fa& b ) { return _mm_min_ps(a.m128,b.m128); } __forceinline Vec3fa max( const Vec3fa& a, const Vec3fa& b ) { return _mm_max_ps(a.m128,b.m128); } -#if defined(__SSE4_1__) +#if defined(__aarch64__) || defined(__SSE4_1__) __forceinline Vec3fa mini(const Vec3fa& a, const Vec3fa& b) { const vint4 ai = _mm_castps_si128(a.m128); const vint4 bi = _mm_castps_si128(b.m128); @@ -170,7 +198,7 @@ namespace embree } #endif -#if defined(__SSE4_1__) +#if defined(__aarch64__) || defined(__SSE4_1__) __forceinline Vec3fa maxi(const Vec3fa& a, const Vec3fa& b) { const vint4 ai = _mm_castps_si128(a.m128); const vint4 bi = _mm_castps_si128(b.m128); @@ -187,16 +215,16 @@ namespace embree /// Ternary Operators //////////////////////////////////////////////////////////////////////////////// -#if defined(__AVX2__) +#if defined(__AVX2__) || defined(__ARM_NEON) __forceinline Vec3fa madd ( const Vec3fa& a, const Vec3fa& b, const Vec3fa& c) { return _mm_fmadd_ps(a.m128,b.m128,c.m128); } __forceinline Vec3fa msub ( const Vec3fa& a, const Vec3fa& b, const Vec3fa& c) { return _mm_fmsub_ps(a.m128,b.m128,c.m128); } __forceinline Vec3fa nmadd ( const Vec3fa& a, const Vec3fa& b, const Vec3fa& c) { return _mm_fnmadd_ps(a.m128,b.m128,c.m128); } __forceinline Vec3fa nmsub ( const Vec3fa& a, const Vec3fa& b, const Vec3fa& c) { return _mm_fnmsub_ps(a.m128,b.m128,c.m128); } #else __forceinline Vec3fa madd ( const Vec3fa& a, const Vec3fa& b, const Vec3fa& c) { return a*b+c; } - __forceinline Vec3fa msub ( const Vec3fa& a, const Vec3fa& b, const Vec3fa& c) { return a*b-c; } __forceinline Vec3fa nmadd ( const Vec3fa& a, const Vec3fa& b, const Vec3fa& c) { return -a*b+c;} __forceinline Vec3fa nmsub ( const Vec3fa& a, const Vec3fa& b, const Vec3fa& c) { return -a*b-c; } + __forceinline Vec3fa msub ( const Vec3fa& a, const Vec3fa& b, const Vec3fa& c) { return a*b-c; } #endif __forceinline Vec3fa madd ( const float a, const Vec3fa& b, const Vec3fa& c) { return madd(Vec3fa(a),b,c); } @@ -218,8 +246,26 @@ namespace embree //////////////////////////////////////////////////////////////////////////////// /// Reductions //////////////////////////////////////////////////////////////////////////////// +#if defined(__aarch64__) + __forceinline float reduce_add(const Vec3fa& v) { + float32x4_t t = v.m128; + t[3] = 0.0f; + return vaddvq_f32(t); + } - __forceinline float reduce_add(const Vec3fa& v) { + __forceinline float reduce_mul(const Vec3fa& v) { return v.x*v.y*v.z; } + __forceinline float reduce_min(const Vec3fa& v) { + float32x4_t t = v.m128; + t[3] = t[2]; + return vminvq_f32(t); + } + __forceinline float reduce_max(const Vec3fa& v) { + float32x4_t t = v.m128; + t[3] = t[2]; + return vmaxvq_f32(t); + } +#else + __forceinline float reduce_add(const Vec3fa& v) { const vfloat4 a(v.m128); const vfloat4 b = shuffle<1>(a); const vfloat4 c = shuffle<2>(a); @@ -229,6 +275,7 @@ namespace embree __forceinline float reduce_mul(const Vec3fa& v) { return v.x*v.y*v.z; } __forceinline float reduce_min(const Vec3fa& v) { return min(v.x,v.y,v.z); } __forceinline float reduce_max(const Vec3fa& v) { return max(v.x,v.y,v.z); } +#endif //////////////////////////////////////////////////////////////////////////////// /// Comparison Operators @@ -241,8 +288,13 @@ namespace embree __forceinline Vec3ba neq_mask(const Vec3fa& a, const Vec3fa& b ) { return _mm_cmpneq_ps(a.m128, b.m128); } __forceinline Vec3ba lt_mask( const Vec3fa& a, const Vec3fa& b ) { return _mm_cmplt_ps (a.m128, b.m128); } __forceinline Vec3ba le_mask( const Vec3fa& a, const Vec3fa& b ) { return _mm_cmple_ps (a.m128, b.m128); } - __forceinline Vec3ba gt_mask( const Vec3fa& a, const Vec3fa& b ) { return _mm_cmpnle_ps(a.m128, b.m128); } - __forceinline Vec3ba ge_mask( const Vec3fa& a, const Vec3fa& b ) { return _mm_cmpnlt_ps(a.m128, b.m128); } + #if defined(__aarch64__) + __forceinline Vec3ba gt_mask( const Vec3fa& a, const Vec3fa& b ) { return _mm_cmpgt_ps (a.m128, b.m128); } + __forceinline Vec3ba ge_mask( const Vec3fa& a, const Vec3fa& b ) { return _mm_cmpge_ps (a.m128, b.m128); } +#else + __forceinline Vec3ba gt_mask(const Vec3fa& a, const Vec3fa& b) { return _mm_cmpnle_ps(a.m128, b.m128); } + __forceinline Vec3ba ge_mask(const Vec3fa& a, const Vec3fa& b) { return _mm_cmpnlt_ps(a.m128, b.m128); } +#endif __forceinline bool isvalid ( const Vec3fa& v ) { return all(gt_mask(v,Vec3fa(-FLT_LARGE)) & lt_mask(v,Vec3fa(+FLT_LARGE))); @@ -261,7 +313,7 @@ namespace embree } //////////////////////////////////////////////////////////////////////////////// - /// Euclidian Space Operators + /// Euclidean Space Operators //////////////////////////////////////////////////////////////////////////////// #if defined(__SSE4_1__) @@ -335,7 +387,11 @@ namespace embree /// Rounding Functions //////////////////////////////////////////////////////////////////////////////// -#if defined (__SSE4_1__) +#if defined(__aarch64__) + __forceinline Vec3fa floor(const Vec3fa& a) { return vrndmq_f32(a.m128); } + __forceinline Vec3fa ceil (const Vec3fa& a) { return vrndpq_f32(a.m128); } + __forceinline Vec3fa trunc(const Vec3fa& a) { return vrndq_f32(a.m128); } +#elif defined (__SSE4_1__) __forceinline Vec3fa trunc( const Vec3fa& a ) { return _mm_round_ps(a.m128, _MM_FROUND_TO_NEAREST_INT); } __forceinline Vec3fa floor( const Vec3fa& a ) { return _mm_round_ps(a.m128, _MM_FROUND_TO_NEG_INF ); } __forceinline Vec3fa ceil ( const Vec3fa& a ) { return _mm_round_ps(a.m128, _MM_FROUND_TO_POS_INF ); } @@ -393,8 +449,10 @@ namespace embree __forceinline Vec3fx( const Vec3fa& other, const int a1) { m128 = other.m128; a = a1; } __forceinline Vec3fx( const Vec3fa& other, const unsigned a1) { m128 = other.m128; u = a1; } - __forceinline Vec3fx( const Vec3fa& other, const float w1) { -#if defined (__SSE4_1__) + __forceinline Vec3fx( const Vec3fa& other, const float w1) { +#if defined (__aarch64__) + m128 = other.m128; m128[3] = w1; +#elif defined (__SSE4_1__) m128 = _mm_insert_ps(other.m128, _mm_set_ss(w1),3 << 4); #else const vint4 mask(-1,-1,-1,0); @@ -526,7 +584,7 @@ namespace embree __forceinline Vec3fx min( const Vec3fx& a, const Vec3fx& b ) { return _mm_min_ps(a.m128,b.m128); } __forceinline Vec3fx max( const Vec3fx& a, const Vec3fx& b ) { return _mm_max_ps(a.m128,b.m128); } -#if defined(__SSE4_1__) +#if defined(__SSE4_1__) || defined(__aarch64__) __forceinline Vec3fx mini(const Vec3fx& a, const Vec3fx& b) { const vint4 ai = _mm_castps_si128(a.m128); const vint4 bi = _mm_castps_si128(b.m128); @@ -535,7 +593,7 @@ namespace embree } #endif -#if defined(__SSE4_1__) +#if defined(__SSE4_1__) || defined(__aarch64__) __forceinline Vec3fx maxi(const Vec3fx& a, const Vec3fx& b) { const vint4 ai = _mm_castps_si128(a.m128); const vint4 bi = _mm_castps_si128(b.m128); @@ -626,7 +684,7 @@ namespace embree } //////////////////////////////////////////////////////////////////////////////// - /// Euclidian Space Operators + /// Euclidean Space Operators //////////////////////////////////////////////////////////////////////////////// #if defined(__SSE4_1__) diff --git a/thirdparty/embree/common/math/vec3ia.h b/thirdparty/embree/common/math/vec3ia.h index 694804c40d..d4cc3125cd 100644 --- a/thirdparty/embree/common/math/vec3ia.h +++ b/thirdparty/embree/common/math/vec3ia.h @@ -65,7 +65,9 @@ namespace embree __forceinline Vec3ia operator +( const Vec3ia& a ) { return a; } __forceinline Vec3ia operator -( const Vec3ia& a ) { return _mm_sub_epi32(_mm_setzero_si128(), a.m128); } -#if defined(__SSSE3__) +#if (defined(__aarch64__)) + __forceinline Vec3ia abs ( const Vec3ia& a ) { return vabsq_s32(a.m128); } +#elif defined(__SSSE3__) __forceinline Vec3ia abs ( const Vec3ia& a ) { return _mm_abs_epi32(a.m128); } #endif @@ -81,7 +83,7 @@ namespace embree __forceinline Vec3ia operator -( const Vec3ia& a, const int b ) { return a-Vec3ia(b); } __forceinline Vec3ia operator -( const int a, const Vec3ia& b ) { return Vec3ia(a)-b; } -#if defined(__SSE4_1__) +#if defined(__aarch64__) || defined(__SSE4_1__) __forceinline Vec3ia operator *( const Vec3ia& a, const Vec3ia& b ) { return _mm_mullo_epi32(a.m128, b.m128); } __forceinline Vec3ia operator *( const Vec3ia& a, const int b ) { return a * Vec3ia(b); } __forceinline Vec3ia operator *( const int a, const Vec3ia& b ) { return Vec3ia(a) * b; } @@ -116,7 +118,7 @@ namespace embree __forceinline Vec3ia& operator -=( Vec3ia& a, const Vec3ia& b ) { return a = a - b; } __forceinline Vec3ia& operator -=( Vec3ia& a, const int& b ) { return a = a - b; } -#if defined(__SSE4_1__) +#if defined(__aarch64__) || defined(__SSE4_1__) __forceinline Vec3ia& operator *=( Vec3ia& a, const Vec3ia& b ) { return a = a * b; } __forceinline Vec3ia& operator *=( Vec3ia& a, const int& b ) { return a = a * b; } #endif @@ -127,18 +129,38 @@ namespace embree __forceinline Vec3ia& operator |=( Vec3ia& a, const Vec3ia& b ) { return a = a | b; } __forceinline Vec3ia& operator |=( Vec3ia& a, const int& b ) { return a = a | b; } +#if !defined(__ARM_NEON) __forceinline Vec3ia& operator <<=( Vec3ia& a, const int& b ) { return a = a << b; } __forceinline Vec3ia& operator >>=( Vec3ia& a, const int& b ) { return a = a >> b; } +#endif //////////////////////////////////////////////////////////////////////////////// - /// Reductions + /// Select //////////////////////////////////////////////////////////////////////////////// + __forceinline Vec3ia select( const Vec3ba& m, const Vec3ia& t, const Vec3ia& f ) { +#if defined(__aarch64__) || defined(__SSE4_1__) + return _mm_castps_si128(_mm_blendv_ps(_mm_castsi128_ps(f), _mm_castsi128_ps(t), m)); +#else + return _mm_or_si128(_mm_and_si128(_mm_castps_si128(m), t), _mm_andnot_si128(_mm_castps_si128(m), f)); +#endif + } + + //////////////////////////////////////////////////////////////////////////////// + /// Reductions + //////////////////////////////////////////////////////////////////////////////// +#if defined(__aarch64__) + __forceinline int reduce_add(const Vec3ia& v) { return vaddvq_s32(select(Vec3ba(1,1,1),v,Vec3ia(0))); } + __forceinline int reduce_mul(const Vec3ia& v) { return v.x*v.y*v.z; } + __forceinline int reduce_min(const Vec3ia& v) { return vminvq_s32(select(Vec3ba(1,1,1),v,Vec3ia(0x7FFFFFFF))); } + __forceinline int reduce_max(const Vec3ia& v) { return vmaxvq_s32(select(Vec3ba(1,1,1),v,Vec3ia(0x80000000))); } +#else __forceinline int reduce_add(const Vec3ia& v) { return v.x+v.y+v.z; } __forceinline int reduce_mul(const Vec3ia& v) { return v.x*v.y*v.z; } __forceinline int reduce_min(const Vec3ia& v) { return min(v.x,v.y,v.z); } __forceinline int reduce_max(const Vec3ia& v) { return max(v.x,v.y,v.z); } - +#endif + //////////////////////////////////////////////////////////////////////////////// /// Comparison Operators //////////////////////////////////////////////////////////////////////////////// @@ -156,19 +178,7 @@ namespace embree __forceinline Vec3ba lt_mask( const Vec3ia& a, const Vec3ia& b ) { return _mm_castsi128_ps(_mm_cmplt_epi32 (a.m128, b.m128)); } __forceinline Vec3ba gt_mask( const Vec3ia& a, const Vec3ia& b ) { return _mm_castsi128_ps(_mm_cmpgt_epi32 (a.m128, b.m128)); } - //////////////////////////////////////////////////////////////////////////////// - /// Select - //////////////////////////////////////////////////////////////////////////////// - - __forceinline Vec3ia select( const Vec3ba& m, const Vec3ia& t, const Vec3ia& f ) { -#if defined(__SSE4_1__) - return _mm_castps_si128(_mm_blendv_ps(_mm_castsi128_ps(f), _mm_castsi128_ps(t), m)); -#else - return _mm_or_si128(_mm_and_si128(_mm_castps_si128(m), t), _mm_andnot_si128(_mm_castps_si128(m), f)); -#endif - } - -#if defined(__SSE4_1__) +#if defined(__aarch64__) || defined(__SSE4_1__) __forceinline Vec3ia min( const Vec3ia& a, const Vec3ia& b ) { return _mm_min_epi32(a.m128,b.m128); } __forceinline Vec3ia max( const Vec3ia& a, const Vec3ia& b ) { return _mm_max_epi32(a.m128,b.m128); } #else diff --git a/thirdparty/embree/common/math/vec4.h b/thirdparty/embree/common/math/vec4.h index 0ed107928a..10c53f47b4 100644 --- a/thirdparty/embree/common/math/vec4.h +++ b/thirdparty/embree/common/math/vec4.h @@ -149,7 +149,7 @@ namespace embree } //////////////////////////////////////////////////////////////////////////////// - /// Euclidian Space Operators + /// Euclidean Space Operators //////////////////////////////////////////////////////////////////////////////// template<typename T> __forceinline T dot ( const Vec4<T>& a, const Vec4<T>& b ) { return madd(a.x,b.x,madd(a.y,b.y,madd(a.z,b.z,a.w*b.w))); } @@ -205,7 +205,7 @@ namespace embree /// SSE / AVX / MIC specializations //////////////////////////////////////////////////////////////////////////////// -#if defined __SSE__ +#if defined(__SSE__) || defined(__ARM_NEON) #include "../simd/sse.h" #endif @@ -225,7 +225,7 @@ namespace embree template<> __forceinline Vec4<vfloat4>::Vec4( const Vec3fx& a ) { x = a.x; y = a.y; z = a.z; w = a.w; } -#elif defined(__SSE__) +#elif defined(__SSE__) || defined(__ARM_NEON) template<> __forceinline Vec4<vfloat4>::Vec4( const Vec3fx& a ) { const vfloat4 v = vfloat4(a.m128); x = shuffle<0,0,0,0>(v); y = shuffle<1,1,1,1>(v); z = shuffle<2,2,2,2>(v); w = shuffle<3,3,3,3>(v); } diff --git a/thirdparty/embree/common/simd/arm/avx2neon.h b/thirdparty/embree/common/simd/arm/avx2neon.h new file mode 100644 index 0000000000..dd321d3d64 --- /dev/null +++ b/thirdparty/embree/common/simd/arm/avx2neon.h @@ -0,0 +1,1196 @@ +#pragma once + +#if !defined(__aarch64__) +#error "avx2neon is only supported for AARCH64" +#endif + +#include "sse2neon.h" + +#define AVX2NEON_ABI static inline __attribute__((always_inline)) + + +struct __m256 { + __m128 lo,hi; + __m256() {} +}; + + + + +struct __m256i { + __m128i lo,hi; + explicit __m256i(const __m256 a) : lo(__m128i(a.lo)),hi(__m128i(a.hi)) {} + operator __m256() const {__m256 res; res.lo = __m128(lo);res.hi = __m128(hi); return res;} + __m256i() {} +}; + + + + +struct __m256d { + float64x2_t lo,hi; + __m256d() {} + __m256d(const __m256& a) : lo(float64x2_t(a.lo)),hi(float64x2_t(a.hi)) {} + __m256d(const __m256i& a) : lo(float64x2_t(a.lo)),hi(float64x2_t(a.hi)) {} +}; + +#define UNARY_AVX_OP(type,func,basic_func) AVX2NEON_ABI type func(const type& a) {type res;res.lo=basic_func(a.lo);res.hi=basic_func(a.hi);return res;} + + +#define BINARY_AVX_OP(type,func,basic_func) AVX2NEON_ABI type func(const type& a,const type& b) {type res;res.lo=basic_func(a.lo,b.lo);res.hi=basic_func(a.hi,b.hi);return res;} +#define BINARY_AVX_OP_CAST(type,func,basic_func,bdst,bsrc) AVX2NEON_ABI type func(const type& a,const type& b) {type res;res.lo=bdst(basic_func(bsrc(a.lo),bsrc(b.lo)));res.hi=bdst(basic_func(bsrc(a.hi),bsrc(b.hi)));return res;} + +#define TERNARY_AVX_OP(type,func,basic_func) AVX2NEON_ABI type func(const type& a,const type& b,const type& c) {type res;res.lo=basic_func(a.lo,b.lo,c.lo);res.hi=basic_func(a.hi,b.hi,c.hi);return res;} + + +#define CAST_SIMD_TYPE(to,name,from,basic_dst) AVX2NEON_ABI to name(const from& a) { to res; res.lo = basic_dst(a.lo); res.hi=basic_dst(a.hi); return res;} + + + +#define _mm_stream_load_si128 _mm_load_si128 +#define _mm256_stream_load_si256 _mm256_load_si256 + + +AVX2NEON_ABI +__m128i _mm_blend_epi32 (__m128i a, __m128i b, const int imm8) +{ + __m128 af = _mm_castsi128_ps(a); + __m128 bf = _mm_castsi128_ps(b); + __m128 blendf = _mm_blend_ps(af, bf, imm8); + return _mm_castps_si128(blendf); +} + +AVX2NEON_ABI +int _mm_movemask_popcnt(__m128 a) +{ + return __builtin_popcount(_mm_movemask_ps(a)); +} + +AVX2NEON_ABI +__m128 _mm_maskload_ps (float const * mem_addr, __m128i mask) +{ + float32x4_t res; + uint32x4_t mask_u32 = vreinterpretq_u32_m128i(mask); + for (int i=0;i<4;i++) { + if (mask_u32[i] & 0x80000000) res[i] = mem_addr[i]; else res[i] = 0; + } + return vreinterpretq_m128_f32(res); +} + +AVX2NEON_ABI +void _mm_maskstore_ps (float * mem_addr, __m128i mask, __m128 a) +{ + float32x4_t a_f32 = vreinterpretq_f32_m128(a); + uint32x4_t mask_u32 = vreinterpretq_u32_m128i(mask); + for (int i=0;i<4;i++) { + if (mask_u32[i] & 0x80000000) mem_addr[i] = a_f32[i]; + } +} + +AVX2NEON_ABI +void _mm_maskstore_epi32 (int * mem_addr, __m128i mask, __m128i a) +{ + uint32x4_t mask_u32 = vreinterpretq_u32_m128i(mask); + int32x4_t a_s32 = vreinterpretq_s32_m128i(a); + for (int i=0;i<4;i++) { + if (mask_u32[i] & 0x80000000) mem_addr[i] = a_s32[i]; + } +} + + +#define _mm_fmadd_ss _mm_fmadd_ps +#define _mm_fmsub_ss _mm_fmsub_ps +#define _mm_fnmsub_ss _mm_fnmsub_ps +#define _mm_fnmadd_ss _mm_fnmadd_ps + +template<int code> +AVX2NEON_ABI float32x4_t dpps_neon(const float32x4_t& a,const float32x4_t& b) +{ + float v; + v = 0; + v += (code & 0x10) ? a[0]*b[0] : 0; + v += (code & 0x20) ? a[1]*b[1] : 0; + v += (code & 0x40) ? a[2]*b[2] : 0; + v += (code & 0x80) ? a[3]*b[3] : 0; + float32x4_t res; + res[0] = (code & 0x1) ? v : 0; + res[1] = (code & 0x2) ? v : 0; + res[2] = (code & 0x4) ? v : 0; + res[3] = (code & 0x8) ? v : 0; + return res; +} + +template<> +inline float32x4_t dpps_neon<0x7f>(const float32x4_t& a,const float32x4_t& b) +{ + float v; + float32x4_t m = _mm_mul_ps(a,b); + m[3] = 0; + v = vaddvq_f32(m); + return _mm_set1_ps(v); +} + +template<> +inline float32x4_t dpps_neon<0xff>(const float32x4_t& a,const float32x4_t& b) +{ + float v; + float32x4_t m = _mm_mul_ps(a,b); + v = vaddvq_f32(m); + return _mm_set1_ps(v); +} + +#define _mm_dp_ps(a,b,c) dpps_neon<c>((a),(b)) + + +AVX2NEON_ABI +__m128 _mm_permutevar_ps (__m128 a, __m128i b) +{ + uint32x4_t b_u32 = vreinterpretq_u32_m128i(b); + float32x4_t x; + for (int i=0;i<4;i++) + { + x[i] = a[b_u32[i]]; + } + return vreinterpretq_m128_f32(x); +} + +AVX2NEON_ABI +__m256i _mm256_setzero_si256() +{ + __m256i res; + res.lo = res.hi = vdupq_n_s32(0); + return res; +} + +AVX2NEON_ABI +__m256 _mm256_setzero_ps() +{ + __m256 res; + res.lo = res.hi = vdupq_n_f32(0.0f); + return res; +} + +AVX2NEON_ABI +__m256i _mm256_undefined_si256() +{ + return _mm256_setzero_si256(); +} + +AVX2NEON_ABI +__m256 _mm256_undefined_ps() +{ + return _mm256_setzero_ps(); +} + +CAST_SIMD_TYPE(__m256d, _mm256_castps_pd, __m256, float64x2_t) +CAST_SIMD_TYPE(__m256i, _mm256_castps_si256, __m256, __m128i) +CAST_SIMD_TYPE(__m256, _mm256_castsi256_ps, __m256i, __m128) +CAST_SIMD_TYPE(__m256, _mm256_castpd_ps , __m256d, __m128) +CAST_SIMD_TYPE(__m256d, _mm256_castsi256_pd, __m256i, float64x2_t) +CAST_SIMD_TYPE(__m256i, _mm256_castpd_si256, __m256d, __m128i) + + + + +AVX2NEON_ABI +__m128 _mm256_castps256_ps128 (__m256 a) +{ + return a.lo; +} + +AVX2NEON_ABI +__m256i _mm256_castsi128_si256 (__m128i a) +{ + __m256i res; + res.lo = a ; + res.hi = vdupq_n_s32(0); + return res; +} + +AVX2NEON_ABI +__m128i _mm256_castsi256_si128 (__m256i a) +{ + return a.lo; +} + +AVX2NEON_ABI +__m256 _mm256_castps128_ps256 (__m128 a) +{ + __m256 res; + res.lo = a; + res.hi = vdupq_n_f32(0); + return res; +} + + +AVX2NEON_ABI +__m256 _mm256_broadcast_ss (float const * mem_addr) +{ + __m256 res; + res.lo = res.hi = vdupq_n_f32(*mem_addr); + return res; +} + + +AVX2NEON_ABI +__m256i _mm256_set_epi32 (int e7, int e6, int e5, int e4, int e3, int e2, int e1, int e0) +{ + __m256i res; + res.lo = _mm_set_epi32(e3,e2,e1,e0); + res.hi = _mm_set_epi32(e7,e6,e5,e4); + return res; + +} + +AVX2NEON_ABI +__m256i _mm256_set1_epi32 (int a) +{ + __m256i res; + res.lo = res.hi = vdupq_n_s32(a); + return res; +} +AVX2NEON_ABI +__m256i _mm256_set1_epi8 (int a) +{ + __m256i res; + res.lo = res.hi = vdupq_n_s8(a); + return res; +} +AVX2NEON_ABI +__m256i _mm256_set1_epi16 (int a) +{ + __m256i res; + res.lo = res.hi = vdupq_n_s16(a); + return res; +} + + + + +AVX2NEON_ABI +int _mm256_movemask_ps(const __m256& v) +{ + return (_mm_movemask_ps(v.hi) << 4) | _mm_movemask_ps(v.lo); +} + +template<int imm8> +AVX2NEON_ABI +__m256 __mm256_permute_ps (const __m256& a) +{ + __m256 res; + res.lo = _mm_shuffle_ps(a.lo,a.lo,imm8); + res.hi = _mm_shuffle_ps(a.hi,a.hi,imm8); + return res; + +} + +#define _mm256_permute_ps(a,c) __mm256_permute_ps<c>(a) + + +template<int imm8> +AVX2NEON_ABI +__m256 __mm256_shuffle_ps (const __m256 a,const __m256& b) +{ + __m256 res; + res.lo = _mm_shuffle_ps(a.lo,b.lo,imm8); + res.hi = _mm_shuffle_ps(a.hi,b.hi,imm8); + return res; + +} + +template<int imm8> +AVX2NEON_ABI +__m256i __mm256_shuffle_epi32 (const __m256i a) +{ + __m256i res; + res.lo = _mm_shuffle_epi32(a.lo,imm8); + res.hi = _mm_shuffle_epi32(a.hi,imm8); + return res; + +} + +template<int imm8> +AVX2NEON_ABI +__m256i __mm256_srli_si256 (__m256i a) +{ + __m256i res; + res.lo = _mm_srli_si128(a.lo,imm8); + res.hi = _mm_srli_si128(a.hi,imm8); + return res; +} + +template<int imm8> +AVX2NEON_ABI +__m256i __mm256_slli_si256 (__m256i a) +{ + __m256i res; + res.lo = _mm_slli_si128(a.lo,imm8); + res.hi = _mm_slli_si128(a.hi,imm8); + return res; +} + + +#define _mm256_srli_si256(a,b) __mm256_srli_si256<b>(a) +#define _mm256_slli_si256(a,b) __mm256_slli_si256<b>(a) + + + +#define _mm256_shuffle_ps(a,b,c) __mm256_shuffle_ps<c>(a,b) +#define _mm256_shuffle_epi32(a,c) __mm256_shuffle_epi32<c>(a) + + +AVX2NEON_ABI +__m256i _mm256_set1_epi64x (long long a) +{ + __m256i res; + int64x2_t t = vdupq_n_s64(a); + res.lo = res.hi = __m128i(t); + return res; +} + + +AVX2NEON_ABI +__m256 _mm256_permute2f128_ps (__m256 a, __m256 b, int imm8) +{ + __m256 res; + __m128 tmp; + switch (imm8 & 0x7) + { + case 0: tmp = a.lo; break; + case 1: tmp = a.hi; break; + case 2: tmp = b.lo; break; + case 3: tmp = b.hi; break; + } + if (imm8 & 0x8) + tmp = _mm_setzero_ps(); + + + + res.lo = tmp; + imm8 >>= 4; + + switch (imm8 & 0x7) + { + case 0: tmp = a.lo; break; + case 1: tmp = a.hi; break; + case 2: tmp = b.lo; break; + case 3: tmp = b.hi; break; + } + if (imm8 & 0x8) + tmp = _mm_setzero_ps(); + + res.hi = tmp; + + return res; +} + +AVX2NEON_ABI +__m256 _mm256_moveldup_ps (__m256 a) +{ + __m256 res; + res.lo = _mm_moveldup_ps(a.lo); + res.hi = _mm_moveldup_ps(a.hi); + return res; +} + +AVX2NEON_ABI +__m256 _mm256_movehdup_ps (__m256 a) +{ + __m256 res; + res.lo = _mm_movehdup_ps(a.lo); + res.hi = _mm_movehdup_ps(a.hi); + return res; +} + +AVX2NEON_ABI +__m256 _mm256_insertf128_ps (__m256 a, __m128 b, int imm8) +{ + __m256 res = a; + if (imm8 & 1) res.hi = b; + else res.lo = b; + return res; +} + + +AVX2NEON_ABI +__m128 _mm256_extractf128_ps (__m256 a, const int imm8) +{ + if (imm8 & 1) return a.hi; + return a.lo; +} + + +AVX2NEON_ABI +__m256d _mm256_movedup_pd (__m256d a) +{ + __m256d res; + res.lo = _mm_movedup_pd(a.lo); + res.hi = _mm_movedup_pd(a.hi); + return res; +} + +AVX2NEON_ABI +__m256i _mm256_abs_epi32(__m256i a) +{ + __m256i res; + res.lo = vabsq_s32(a.lo); + res.hi = vabsq_s32(a.hi); + return res; +} + +UNARY_AVX_OP(__m256,_mm256_sqrt_ps,_mm_sqrt_ps) +UNARY_AVX_OP(__m256,_mm256_rsqrt_ps,_mm_rsqrt_ps) +UNARY_AVX_OP(__m256,_mm256_rcp_ps,_mm_rcp_ps) +UNARY_AVX_OP(__m256,_mm256_floor_ps,vrndmq_f32) +UNARY_AVX_OP(__m256,_mm256_ceil_ps,vrndpq_f32) +UNARY_AVX_OP(__m256i,_mm256_abs_epi16,_mm_abs_epi16) + + +BINARY_AVX_OP(__m256i,_mm256_add_epi8,_mm_add_epi8) +BINARY_AVX_OP(__m256i,_mm256_adds_epi8,_mm_adds_epi8) + +BINARY_AVX_OP(__m256i,_mm256_hadd_epi32,_mm_hadd_epi32) +BINARY_AVX_OP(__m256i,_mm256_add_epi32,_mm_add_epi32) +BINARY_AVX_OP(__m256i,_mm256_sub_epi32,_mm_sub_epi32) +BINARY_AVX_OP(__m256i,_mm256_mullo_epi32,_mm_mullo_epi32) + +BINARY_AVX_OP(__m256i,_mm256_min_epi32,_mm_min_epi32) +BINARY_AVX_OP(__m256i,_mm256_max_epi32,_mm_max_epi32) +BINARY_AVX_OP(__m256i,_mm256_min_epi16,_mm_min_epi16) +BINARY_AVX_OP(__m256i,_mm256_max_epi16,_mm_max_epi16) +BINARY_AVX_OP(__m256i,_mm256_min_epi8,_mm_min_epi8) +BINARY_AVX_OP(__m256i,_mm256_max_epi8,_mm_max_epi8) +BINARY_AVX_OP(__m256i,_mm256_min_epu16,_mm_min_epu16) +BINARY_AVX_OP(__m256i,_mm256_max_epu16,_mm_max_epu16) +BINARY_AVX_OP(__m256i,_mm256_min_epu8,_mm_min_epu8) +BINARY_AVX_OP(__m256i,_mm256_max_epu8,_mm_max_epu8) +BINARY_AVX_OP(__m256i,_mm256_sign_epi16,_mm_sign_epi16) + + +BINARY_AVX_OP_CAST(__m256i,_mm256_min_epu32,vminq_u32,__m128i,uint32x4_t) +BINARY_AVX_OP_CAST(__m256i,_mm256_max_epu32,vmaxq_u32,__m128i,uint32x4_t) + +BINARY_AVX_OP(__m256,_mm256_min_ps,_mm_min_ps) +BINARY_AVX_OP(__m256,_mm256_max_ps,_mm_max_ps) + +BINARY_AVX_OP(__m256,_mm256_add_ps,_mm_add_ps) +BINARY_AVX_OP(__m256,_mm256_mul_ps,_mm_mul_ps) +BINARY_AVX_OP(__m256,_mm256_sub_ps,_mm_sub_ps) +BINARY_AVX_OP(__m256,_mm256_div_ps,_mm_div_ps) + +BINARY_AVX_OP(__m256,_mm256_and_ps,_mm_and_ps) +BINARY_AVX_OP(__m256,_mm256_andnot_ps,_mm_andnot_ps) +BINARY_AVX_OP(__m256,_mm256_or_ps,_mm_or_ps) +BINARY_AVX_OP(__m256,_mm256_xor_ps,_mm_xor_ps) + +BINARY_AVX_OP_CAST(__m256d,_mm256_and_pd,vandq_s64,float64x2_t,int64x2_t) +BINARY_AVX_OP_CAST(__m256d,_mm256_or_pd,vorrq_s64,float64x2_t,int64x2_t) +BINARY_AVX_OP_CAST(__m256d,_mm256_xor_pd,veorq_s64,float64x2_t,int64x2_t) + + + +BINARY_AVX_OP(__m256i,_mm256_and_si256,_mm_and_si128) +BINARY_AVX_OP(__m256i,_mm256_andnot_si256,_mm_andnot_si128) +BINARY_AVX_OP(__m256i,_mm256_or_si256,_mm_or_si128) +BINARY_AVX_OP(__m256i,_mm256_xor_si256,_mm_xor_si128) + + +BINARY_AVX_OP(__m256,_mm256_unpackhi_ps,_mm_unpackhi_ps) +BINARY_AVX_OP(__m256,_mm256_unpacklo_ps,_mm_unpacklo_ps) +TERNARY_AVX_OP(__m256,_mm256_blendv_ps,_mm_blendv_ps) +TERNARY_AVX_OP(__m256i,_mm256_blendv_epi8,_mm_blendv_epi8) + + +TERNARY_AVX_OP(__m256,_mm256_fmadd_ps,_mm_fmadd_ps) +TERNARY_AVX_OP(__m256,_mm256_fnmadd_ps,_mm_fnmadd_ps) +TERNARY_AVX_OP(__m256,_mm256_fmsub_ps,_mm_fmsub_ps) +TERNARY_AVX_OP(__m256,_mm256_fnmsub_ps,_mm_fnmsub_ps) + + + +BINARY_AVX_OP(__m256i,_mm256_packs_epi32,_mm_packs_epi32) +BINARY_AVX_OP(__m256i,_mm256_packs_epi16,_mm_packs_epi16) +BINARY_AVX_OP(__m256i,_mm256_packus_epi32,_mm_packus_epi32) +BINARY_AVX_OP(__m256i,_mm256_packus_epi16,_mm_packus_epi16) + + +BINARY_AVX_OP(__m256i,_mm256_unpackhi_epi64,_mm_unpackhi_epi64) +BINARY_AVX_OP(__m256i,_mm256_unpackhi_epi32,_mm_unpackhi_epi32) +BINARY_AVX_OP(__m256i,_mm256_unpackhi_epi16,_mm_unpackhi_epi16) +BINARY_AVX_OP(__m256i,_mm256_unpackhi_epi8,_mm_unpackhi_epi8) + +BINARY_AVX_OP(__m256i,_mm256_unpacklo_epi64,_mm_unpacklo_epi64) +BINARY_AVX_OP(__m256i,_mm256_unpacklo_epi32,_mm_unpacklo_epi32) +BINARY_AVX_OP(__m256i,_mm256_unpacklo_epi16,_mm_unpacklo_epi16) +BINARY_AVX_OP(__m256i,_mm256_unpacklo_epi8,_mm_unpacklo_epi8) + +BINARY_AVX_OP(__m256i,_mm256_mulhrs_epi16,_mm_mulhrs_epi16) +BINARY_AVX_OP(__m256i,_mm256_mulhi_epu16,_mm_mulhi_epu16) +BINARY_AVX_OP(__m256i,_mm256_mulhi_epi16,_mm_mulhi_epi16) +//BINARY_AVX_OP(__m256i,_mm256_mullo_epu16,_mm_mullo_epu16) +BINARY_AVX_OP(__m256i,_mm256_mullo_epi16,_mm_mullo_epi16) + +BINARY_AVX_OP(__m256i,_mm256_subs_epu16,_mm_subs_epu16) +BINARY_AVX_OP(__m256i,_mm256_adds_epu16,_mm_adds_epu16) +BINARY_AVX_OP(__m256i,_mm256_subs_epi16,_mm_subs_epi16) +BINARY_AVX_OP(__m256i,_mm256_adds_epi16,_mm_adds_epi16) +BINARY_AVX_OP(__m256i,_mm256_sub_epi16,_mm_sub_epi16) +BINARY_AVX_OP(__m256i,_mm256_add_epi16,_mm_add_epi16) +BINARY_AVX_OP(__m256i,_mm256_sub_epi8,_mm_sub_epi8) + + +BINARY_AVX_OP(__m256i,_mm256_hadd_epi16,_mm_hadd_epi16) +BINARY_AVX_OP(__m256i,_mm256_hadds_epi16,_mm_hadds_epi16) + + + + +BINARY_AVX_OP(__m256i,_mm256_cmpeq_epi32,_mm_cmpeq_epi32) +BINARY_AVX_OP(__m256i,_mm256_cmpgt_epi32,_mm_cmpgt_epi32) + +BINARY_AVX_OP(__m256i,_mm256_cmpeq_epi8,_mm_cmpeq_epi8) +BINARY_AVX_OP(__m256i,_mm256_cmpgt_epi8,_mm_cmpgt_epi8) + +BINARY_AVX_OP(__m256i,_mm256_cmpeq_epi16,_mm_cmpeq_epi16) +BINARY_AVX_OP(__m256i,_mm256_cmpgt_epi16,_mm_cmpgt_epi16) + + +BINARY_AVX_OP(__m256i,_mm256_shuffle_epi8,_mm_shuffle_epi8) + + +BINARY_AVX_OP(__m256,_mm256_cmpeq_ps,_mm_cmpeq_ps) +BINARY_AVX_OP(__m256,_mm256_cmpneq_ps,_mm_cmpneq_ps) +BINARY_AVX_OP(__m256,_mm256_cmpnlt_ps,_mm_cmpnlt_ps) +BINARY_AVX_OP(__m256,_mm256_cmpngt_ps,_mm_cmpngt_ps) +BINARY_AVX_OP(__m256,_mm256_cmpge_ps,_mm_cmpge_ps) +BINARY_AVX_OP(__m256,_mm256_cmpnge_ps,_mm_cmpnge_ps) +BINARY_AVX_OP(__m256,_mm256_cmplt_ps,_mm_cmplt_ps) +BINARY_AVX_OP(__m256,_mm256_cmple_ps,_mm_cmple_ps) +BINARY_AVX_OP(__m256,_mm256_cmpgt_ps,_mm_cmpgt_ps) +BINARY_AVX_OP(__m256,_mm256_cmpnle_ps,_mm_cmpnle_ps) + + +AVX2NEON_ABI +__m256i _mm256_cvtps_epi32 (__m256 a) +{ + __m256i res; + res.lo = _mm_cvtps_epi32(a.lo); + res.hi = _mm_cvtps_epi32(a.hi); + return res; + +} + +AVX2NEON_ABI +__m256i _mm256_cvttps_epi32 (__m256 a) +{ + __m256i res; + res.lo = _mm_cvttps_epi32(a.lo); + res.hi = _mm_cvttps_epi32(a.hi); + return res; + +} + +AVX2NEON_ABI +__m256 _mm256_loadu_ps (float const * mem_addr) +{ + __m256 res; + res.lo = *(__m128 *)(mem_addr + 0); + res.hi = *(__m128 *)(mem_addr + 4); + return res; +} +#define _mm256_load_ps _mm256_loadu_ps + + +AVX2NEON_ABI +int _mm256_testz_ps (const __m256& a, const __m256& b) +{ + __m256 t = a; + if (&a != &b) + t = _mm256_and_ps(a,b); + + int32x4_t l = vshrq_n_s32(vreinterpretq_s32_m128(t.lo),31); + int32x4_t h = vshrq_n_s32(vreinterpretq_s32_m128(t.hi),31); + return vaddvq_s32(vaddq_s32(l,h)) == 0; +} + + +AVX2NEON_ABI +__m256i _mm256_set_epi64x (int64_t e3, int64_t e2, int64_t e1, int64_t e0) +{ + __m256i res; + int64x2_t t0 = {e0,e1}; + int64x2_t t1 = {e2,e3}; + res.lo = __m128i(t0); + res.hi = __m128i(t1); + return res; +} +AVX2NEON_ABI +__m256i _mm256_setr_epi64x (int64_t e0, int64_t e1, int64_t e2, int64_t e3) +{ + __m256i res; + int64x2_t t0 = {e0,e1}; + int64x2_t t1 = {e2,e3}; + res.lo = __m128i(t0); + res.hi = __m128i(t1); + return res; +} + + + +AVX2NEON_ABI +__m256i _mm256_set_epi8 (char e31, char e30, char e29, char e28, char e27, char e26, char e25, char e24, char e23, char e22, char e21, char e20, char e19, char e18, char e17, char e16, char e15, char e14, char e13, char e12, char e11, char e10, char e9, char e8, char e7, char e6, char e5, char e4, char e3, char e2, char e1, char e0) +{ + int8x16_t lo = {e0,e1,e2,e3,e4,e5,e6,e7,e8,e9,e10,e11,e12,e13,e14,e15}; + int8x16_t hi = {e16,e17,e18,e19,e20,e21,e22,e23,e24,e25,e26,e27,e28,e29,e30,e31}; + __m256i res; + res.lo = lo; res.hi = hi; + return res; +} + +AVX2NEON_ABI +__m256i _mm256_setr_epi8 (char e0, char e1, char e2, char e3, char e4, char e5, char e6, char e7, char e8, char e9, char e10, char e11, char e12, char e13, char e14, char e15, char e16, char e17, char e18, char e19, char e20, char e21, char e22, char e23, char e24, char e25, char e26, char e27, char e28, char e29, char e30, char e31) +{ + int8x16_t lo = {e0,e1,e2,e3,e4,e5,e6,e7,e8,e9,e10,e11,e12,e13,e14,e15}; + int8x16_t hi = {e16,e17,e18,e19,e20,e21,e22,e23,e24,e25,e26,e27,e28,e29,e30,e31}; + __m256i res; + res.lo = lo; res.hi = hi; + return res; +} + + +AVX2NEON_ABI +__m256i _mm256_set_epi16 (short e15, short e14, short e13, short e12, short e11, short e10, short e9, short e8, short e7, short e6, short e5, short e4, short e3, short e2, short e1, short e0) +{ + int16x8_t lo = {e0,e1,e2,e3,e4,e5,e6,e7}; + int16x8_t hi = {e8,e9,e10,e11,e12,e13,e14,e15}; + __m256i res; + res.lo = lo; res.hi = hi; + return res; +} + +AVX2NEON_ABI +__m256i _mm256_setr_epi16 (short e0, short e1, short e2, short e3, short e4, short e5, short e6, short e7, short e8, short e9, short e10, short e11, short e12, short e13, short e14, short e15) +{ + int16x8_t lo = {e0,e1,e2,e3,e4,e5,e6,e7}; + int16x8_t hi = {e8,e9,e10,e11,e12,e13,e14,e15}; + __m256i res; + res.lo = lo; res.hi = hi; + return res; +} + + + + +AVX2NEON_ABI +int _mm256_movemask_epi8(const __m256i& a) +{ + return (_mm_movemask_epi8(a.hi) << 16) | _mm_movemask_epi8(a.lo); +} + + +AVX2NEON_ABI +int _mm256_testz_si256(const __m256i& a,const __m256i& b) +{ + uint32x4_t lo = vandq_u32(a.lo,b.lo); + uint32x4_t hi = vandq_u32(a.hi,b.hi); + + return (vaddvq_u32(lo) + vaddvq_u32(hi)) == 0; +} + +AVX2NEON_ABI +__m256d _mm256_setzero_pd () +{ + __m256d res; + res.lo = res.hi = vdupq_n_f64(0); + return res; +} + +AVX2NEON_ABI +int _mm256_movemask_pd (__m256d a) +{ + return (_mm_movemask_pd(a.hi) << 2) | _mm_movemask_pd(a.lo); +} + +AVX2NEON_ABI +__m256i _mm256_cmpeq_epi64 (__m256i a, __m256i b) +{ + __m256i res; + res.lo = _mm_cmpeq_epi64(a.lo, b.lo); + res.hi = _mm_cmpeq_epi64(a.hi, b.hi); + return res; +} + +AVX2NEON_ABI +__m256d _mm256_cmpeq_pd (__m256d a, __m256d b) +{ + __m256d res; + res.lo = _mm_cmpeq_pd(a.lo, b.lo); + res.hi = _mm_cmpeq_pd(a.hi, b.hi); + return res; +} + + +AVX2NEON_ABI +int _mm256_testz_pd (const __m256d& a, const __m256d& b) +{ + __m256d t = a; + + if (&a != &b) + t = _mm256_and_pd(a,b); + + return _mm256_movemask_pd(t) == 0; +} + +AVX2NEON_ABI +__m256d _mm256_blendv_pd (__m256d a, __m256d b, __m256d mask) +{ + __m256d res; + res.lo = _mm_blendv_pd(a.lo, b.lo, mask.lo); + res.hi = _mm_blendv_pd(a.hi, b.hi, mask.hi); + return res; +} + +template<int imm8> +AVX2NEON_ABI +__m256 __mm256_dp_ps (__m256 a, __m256 b) +{ + __m256 res; + res.lo = _mm_dp_ps(a.lo, b.lo, imm8); + res.hi = _mm_dp_ps(a.hi, b.hi, imm8); + return res; +} + +#define _mm256_dp_ps(a,b,c) __mm256_dp_ps<c>(a,b) + +AVX2NEON_ABI +double _mm256_permute4x64_pd_select(__m256d a, const int imm8) +{ + switch (imm8 & 3) { + case 0: + return ((float64x2_t)a.lo)[0]; + case 1: + return ((float64x2_t)a.lo)[1]; + case 2: + return ((float64x2_t)a.hi)[0]; + case 3: + return ((float64x2_t)a.hi)[1]; + } + __builtin_unreachable(); + return 0; +} + +AVX2NEON_ABI +__m256d _mm256_permute4x64_pd (__m256d a, const int imm8) +{ + float64x2_t lo,hi; + lo[0] = _mm256_permute4x64_pd_select(a,imm8 >> 0); + lo[1] = _mm256_permute4x64_pd_select(a,imm8 >> 2); + hi[0] = _mm256_permute4x64_pd_select(a,imm8 >> 4); + hi[1] = _mm256_permute4x64_pd_select(a,imm8 >> 6); + + __m256d res; + res.lo = lo; res.hi = hi; + return res; +} + +AVX2NEON_ABI +__m256i _mm256_insertf128_si256 (__m256i a, __m128i b, int imm8) +{ + return __m256i(_mm256_insertf128_ps((__m256)a,(__m128)b,imm8)); +} + + +AVX2NEON_ABI +__m256i _mm256_loadu_si256 (__m256i const * mem_addr) +{ + __m256i res; + res.lo = *(__m128i *)((int32_t *)mem_addr + 0); + res.hi = *(__m128i *)((int32_t *)mem_addr + 4); + return res; +} + +#define _mm256_load_si256 _mm256_loadu_si256 + +AVX2NEON_ABI +void _mm256_storeu_ps (float * mem_addr, __m256 a) +{ + *(__m128 *)(mem_addr + 0) = a.lo; + *(__m128 *)(mem_addr + 4) = a.hi; +} + +#define _mm256_store_ps _mm256_storeu_ps +#define _mm256_stream_ps _mm256_storeu_ps + + +AVX2NEON_ABI +void _mm256_storeu_si256 (__m256i * mem_addr, __m256i a) +{ + *(__m128i *)((int32_t *)mem_addr + 0) = a.lo; + *(__m128i *)((int32_t *)mem_addr + 4) = a.hi; +} + +#define _mm256_store_si256 _mm256_storeu_si256 + + + +AVX2NEON_ABI +__m256i _mm256_permute4x64_epi64 (const __m256i a, const int imm8) +{ + uint8x16x2_t tbl = {a.lo, a.hi}; + + uint8_t sz = sizeof(uint64_t); + uint8_t u64[4] = { + (uint8_t)(((imm8 >> 0) & 0x3) * sz), + (uint8_t)(((imm8 >> 2) & 0x3) * sz), + (uint8_t)(((imm8 >> 4) & 0x3) * sz), + (uint8_t)(((imm8 >> 6) & 0x3) * sz), + }; + + uint8x16_t idx_lo = { + // lo[0] bytes + (uint8_t)(u64[0]+0), (uint8_t)(u64[0]+1), (uint8_t)(u64[0]+2), (uint8_t)(u64[0]+3), + (uint8_t)(u64[0]+4), (uint8_t)(u64[0]+5), (uint8_t)(u64[0]+6), (uint8_t)(u64[0]+7), + + // lo[1] bytes + (uint8_t)(u64[1]+0), (uint8_t)(u64[1]+1), (uint8_t)(u64[1]+2), (uint8_t)(u64[1]+3), + (uint8_t)(u64[1]+4), (uint8_t)(u64[1]+5), (uint8_t)(u64[1]+6), (uint8_t)(u64[1]+7), + }; + uint8x16_t idx_hi = { + // hi[0] bytes + (uint8_t)(u64[2]+0), (uint8_t)(u64[2]+1), (uint8_t)(u64[2]+2), (uint8_t)(u64[2]+3), + (uint8_t)(u64[2]+4), (uint8_t)(u64[2]+5), (uint8_t)(u64[2]+6), (uint8_t)(u64[2]+7), + + // hi[1] bytes + (uint8_t)(u64[3]+0), (uint8_t)(u64[3]+1), (uint8_t)(u64[3]+2), (uint8_t)(u64[3]+3), + (uint8_t)(u64[3]+4), (uint8_t)(u64[3]+5), (uint8_t)(u64[3]+6), (uint8_t)(u64[3]+7), + }; + + uint8x16_t lo = vqtbl2q_u8(tbl, idx_lo); + uint8x16_t hi = vqtbl2q_u8(tbl, idx_hi); + + __m256i res; + res.lo = lo; res.hi = hi; + return res; +} + + +AVX2NEON_ABI +__m256i _mm256_permute2x128_si256(const __m256i a,const __m256i b, const int imm8) +{ + return __m256i(_mm256_permute2f128_ps(__m256(a),__m256(b),imm8)); +} + + + +AVX2NEON_ABI +__m256 _mm256_maskload_ps (float const * mem_addr, __m256i mask) +{ + __m256 res; + res.lo = _mm_maskload_ps(mem_addr,mask.lo); + res.hi = _mm_maskload_ps(mem_addr + 4,mask.hi); + return res; +} + + +AVX2NEON_ABI +__m256i _mm256_cvtepu8_epi32 (__m128i a) +{ + uint8x16_t a_u8 = vreinterpretq_u8_m128i(a); // xxxx xxxx xxxx xxxx HHGG FFEE DDCC BBAA + uint16x8_t u16x8 = vmovl_u8(vget_low_u8(a_u8)); // 00HH 00GG 00FF 00EE 00DD 00CC 00BB 00AA + uint32x4_t lo = vmovl_u16(vget_low_u16(u16x8)); // 0000 00DD 0000 00CC 0000 00BB 0000 00AA + uint32x4_t hi = vmovl_high_u16(u16x8); // 0000 00HH 0000 00GG 0000 00FF 0000 00EE + + __m256i res; + res.lo = lo; res.hi = hi; + return res; +} + + +AVX2NEON_ABI +__m256i _mm256_cvtepi8_epi32 (__m128i a) +{ + int8x16_t a_s8 = vreinterpretq_s8_m128i(a); // xxxx xxxx xxxx xxxx HHGG FFEE DDCC BBAA + int16x8_t s16x8 = vmovl_s8(vget_low_s8(a_s8)); // ssHH ssGG ssFF ssEE ssDD ssCC ssBB ssAA + int32x4_t lo = vmovl_s16(vget_low_s16(s16x8)); // ssss ssDD ssss ssCC ssss ssBB ssss ssAA + int32x4_t hi = vmovl_high_s16(s16x8); // ssss ssHH ssss ssGG ssss ssFF ssss ssEE + + __m256i res; + res.lo = lo; res.hi = hi; + return res; +} + + +AVX2NEON_ABI +__m256i _mm256_cvtepi16_epi32 (__m128i a) +{ + int16x8_t a_s16 = vreinterpretq_s16_m128i(a); // HHHH GGGG FFFF EEEE DDDD CCCC BBBB AAAA + int32x4_t lo = vmovl_s16(vget_low_s16(a_s16)); // ssss DDDD ssss CCCC ssss BBBB ssss AAAA + int32x4_t hi = vmovl_high_s16(a_s16); // ssss HHHH ssss GGGG ssss FFFF ssss EEEE + + __m256i res; + res.lo = lo; res.hi = hi; + return res; +} + + + +AVX2NEON_ABI +void _mm256_maskstore_epi32 (int* mem_addr, __m256i mask, __m256i a) +{ + _mm_maskstore_epi32(mem_addr,mask.lo,a.lo); + _mm_maskstore_epi32(mem_addr + 4,mask.hi,a.hi); +} + +AVX2NEON_ABI +__m256i _mm256_slli_epi64 (__m256i a, int imm8) +{ + __m256i res; + res.lo = _mm_slli_epi64(a.lo,imm8); + res.hi = _mm_slli_epi64(a.hi,imm8); + return res; +} + +AVX2NEON_ABI +__m256i _mm256_slli_epi32 (__m256i a, int imm8) +{ + __m256i res; + res.lo = _mm_slli_epi32(a.lo,imm8); + res.hi = _mm_slli_epi32(a.hi,imm8); + return res; +} + + +AVX2NEON_ABI +__m256i __mm256_slli_epi16 (__m256i a, int imm8) +{ + __m256i res; + res.lo = _mm_slli_epi16(a.lo,imm8); + res.hi = _mm_slli_epi16(a.hi,imm8); + return res; +} + + +AVX2NEON_ABI +__m256i _mm256_srli_epi32 (__m256i a, int imm8) +{ + __m256i res; + res.lo = _mm_srli_epi32(a.lo,imm8); + res.hi = _mm_srli_epi32(a.hi,imm8); + return res; +} + +AVX2NEON_ABI +__m256i __mm256_srli_epi16 (__m256i a, int imm8) +{ + __m256i res; + res.lo = _mm_srli_epi16(a.lo,imm8); + res.hi = _mm_srli_epi16(a.hi,imm8); + return res; +} + +AVX2NEON_ABI +__m256i _mm256_cvtepu16_epi32(__m128i a) +{ + __m256i res; + res.lo = vmovl_u16(vget_low_u16(a)); + res.hi = vmovl_high_u16(a); + return res; +} + +AVX2NEON_ABI +__m256i _mm256_cvtepu8_epi16(__m128i a) +{ + __m256i res; + res.lo = vmovl_u8(vget_low_u8(a)); + res.hi = vmovl_high_u8(a); + return res; +} + + +AVX2NEON_ABI +__m256i _mm256_srai_epi32 (__m256i a, int imm8) +{ + __m256i res; + res.lo = _mm_srai_epi32(a.lo,imm8); + res.hi = _mm_srai_epi32(a.hi,imm8); + return res; +} + +AVX2NEON_ABI +__m256i _mm256_srai_epi16 (__m256i a, int imm8) +{ + __m256i res; + res.lo = _mm_srai_epi16(a.lo,imm8); + res.hi = _mm_srai_epi16(a.hi,imm8); + return res; +} + + +AVX2NEON_ABI +__m256i _mm256_sllv_epi32 (__m256i a, __m256i count) +{ + __m256i res; + res.lo = vshlq_s32(a.lo,count.lo); + res.hi = vshlq_s32(a.hi,count.hi); + return res; + +} + + +AVX2NEON_ABI +__m256i _mm256_srav_epi32 (__m256i a, __m256i count) +{ + __m256i res; + res.lo = vshlq_s32(a.lo,vnegq_s32(count.lo)); + res.hi = vshlq_s32(a.hi,vnegq_s32(count.hi)); + return res; + +} + +AVX2NEON_ABI +__m256i _mm256_srlv_epi32 (__m256i a, __m256i count) +{ + __m256i res; + res.lo = __m128i(vshlq_u32(uint32x4_t(a.lo),vnegq_s32(count.lo))); + res.hi = __m128i(vshlq_u32(uint32x4_t(a.hi),vnegq_s32(count.hi))); + return res; + +} + + +AVX2NEON_ABI +__m256i _mm256_permute2f128_si256 (__m256i a, __m256i b, int imm8) +{ + return __m256i(_mm256_permute2f128_ps(__m256(a),__m256(b),imm8)); +} + + +AVX2NEON_ABI +__m128i _mm256_extractf128_si256 (__m256i a, const int imm8) +{ + if (imm8 & 1) return a.hi; + return a.lo; +} + +AVX2NEON_ABI +__m256 _mm256_set1_ps(float x) +{ + __m256 res; + res.lo = res.hi = vdupq_n_f32(x); + return res; +} + +AVX2NEON_ABI +__m256 _mm256_set_ps (float e7, float e6, float e5, float e4, float e3, float e2, float e1, float e0) +{ + __m256 res; + res.lo = _mm_set_ps(e3,e2,e1,e0); + res.hi = _mm_set_ps(e7,e6,e5,e4); + return res; +} + +AVX2NEON_ABI +__m256 _mm256_broadcast_ps (__m128 const * mem_addr) +{ + __m256 res; + res.lo = res.hi = *mem_addr; + return res; +} + +AVX2NEON_ABI +__m256 _mm256_cvtepi32_ps (__m256i a) +{ + __m256 res; + res.lo = _mm_cvtepi32_ps(a.lo); + res.hi = _mm_cvtepi32_ps(a.hi); + return res; +} +AVX2NEON_ABI +void _mm256_maskstore_ps (float * mem_addr, __m256i mask, __m256 a) +{ + uint32x4_t mask_lo = mask.lo; + uint32x4_t mask_hi = mask.hi; + float32x4_t a_lo = a.lo; + float32x4_t a_hi = a.hi; + + for (int i=0;i<4;i++) { + if (mask_lo[i] & 0x80000000) mem_addr[i] = a_lo[i]; + if (mask_hi[i] & 0x80000000) mem_addr[i+4] = a_hi[i]; + } +} + +AVX2NEON_ABI +__m256d _mm256_andnot_pd (__m256d a, __m256d b) +{ + __m256d res; + res.lo = float64x2_t(_mm_andnot_ps(__m128(a.lo),__m128(b.lo))); + res.hi = float64x2_t(_mm_andnot_ps(__m128(a.hi),__m128(b.hi))); + return res; +} + +AVX2NEON_ABI +__m256 _mm256_blend_ps (__m256 a, __m256 b, const int imm8) +{ + __m256 res; + res.lo = _mm_blend_ps(a.lo,b.lo,imm8 & 0xf); + res.hi = _mm_blend_ps(a.hi,b.hi,imm8 >> 4); + return res; + +} + + +AVX2NEON_ABI +__m256i _mm256_blend_epi32 (__m256i a, __m256i b, const int imm8) +{ + return __m256i(_mm256_blend_ps(__m256(a),__m256(b),imm8)); + +} + +AVX2NEON_ABI +__m256i _mm256_blend_epi16 (__m256i a, __m256i b, const int imm8) +{ + __m256i res; + res.lo = _mm_blend_epi16(a.lo,b.lo,imm8); + res.hi = _mm_blend_epi16(a.hi,b.hi,imm8); + return res; +} + + + +AVX2NEON_ABI +__m256i _mm256_i32gather_epi32 (int const* base_addr, __m256i vindex, const int scale) +{ + int32x4_t vindex_lo = vindex.lo; + int32x4_t vindex_hi = vindex.hi; + int32x4_t lo,hi; + for (int i=0;i<4;i++) + { + lo[i] = *(int32_t *)((char *) base_addr + (vindex_lo[i]*scale)); + hi[i] = *(int32_t *)((char *) base_addr + (vindex_hi[i]*scale)); + } + + __m256i res; + res.lo = lo; res.hi = hi; + return res; +} + + +AVX2NEON_ABI +__m256i _mm256_mask_i32gather_epi32 (__m256i src, int const* base_addr, __m256i vindex, __m256i mask, const int scale) +{ + uint32x4_t mask_lo = mask.lo; + uint32x4_t mask_hi = mask.hi; + int32x4_t vindex_lo = vindex.lo; + int32x4_t vindex_hi = vindex.hi; + int32x4_t lo,hi; + lo = hi = _mm_setzero_si128(); + for (int i=0;i<4;i++) + { + if (mask_lo[i] >> 31) lo[i] = *(int32_t *)((char *) base_addr + (vindex_lo[i]*scale)); + if (mask_hi[i] >> 31) hi[i] = *(int32_t *)((char *) base_addr + (vindex_hi[i]*scale)); + } + + __m256i res; + res.lo = lo; res.hi = hi; + return res; +} diff --git a/thirdparty/embree/common/simd/arm/emulation.h b/thirdparty/embree/common/simd/arm/emulation.h index 1c3875fb27..4327298019 100644 --- a/thirdparty/embree/common/simd/arm/emulation.h +++ b/thirdparty/embree/common/simd/arm/emulation.h @@ -11,33 +11,28 @@ #include "sse2neon.h" -__forceinline __m128 _mm_fmsub_ps(__m128 a, __m128 b, __m128 c) { - __m128 neg_c = vreinterpretq_m128_f32(vnegq_f32(vreinterpretq_f32_m128(c))); - return _mm_fmadd_ps(a, b, neg_c); -} - -__forceinline __m128 _mm_fnmadd_ps(__m128 a, __m128 b, __m128 c) { -#if defined(__aarch64__) - return vreinterpretq_m128_f32(vfmsq_f32(vreinterpretq_f32_m128(c), - vreinterpretq_f32_m128(b), - vreinterpretq_f32_m128(a))); -#else - return _mm_sub_ps(c, _mm_mul_ps(a, b)); -#endif -} +__forceinline __m128 _mm_abs_ps(__m128 a) { return vabsq_f32(a); } + +__forceinline __m128 _mm_fmadd_ps (__m128 a, __m128 b, __m128 c) { return vfmaq_f32(c, a, b); } +__forceinline __m128 _mm_fnmadd_ps(__m128 a, __m128 b, __m128 c) { return vfmsq_f32(c, a, b); } +__forceinline __m128 _mm_fnmsub_ps(__m128 a, __m128 b, __m128 c) { return vnegq_f32(vfmaq_f32(c, a, b)); } +__forceinline __m128 _mm_fmsub_ps (__m128 a, __m128 b, __m128 c) { return vnegq_f32(vfmsq_f32(c, a, b)); } -__forceinline __m128 _mm_fnmsub_ps(__m128 a, __m128 b, __m128 c) { - return vreinterpretq_m128_f32(vnegq_f32(vreinterpretq_f32_m128(_mm_fmadd_ps(a,b,c)))); +__forceinline __m128 _mm_broadcast_ss (float const * mem_addr) +{ + return vdupq_n_f32(*mem_addr); } +// AVX2 emulation leverages Intel FMA defs above. Include after them. +#include "avx2neon.h" /* Dummy defines for floating point control */ #define _MM_MASK_MASK 0x1f80 #define _MM_MASK_DIV_ZERO 0x200 -#define _MM_FLUSH_ZERO_ON 0x8000 +// #define _MM_FLUSH_ZERO_ON 0x8000 #define _MM_MASK_DENORM 0x100 #define _MM_SET_EXCEPTION_MASK(x) -#define _MM_SET_FLUSH_ZERO_MODE(x) +// #define _MM_SET_FLUSH_ZERO_MODE(x) __forceinline int _mm_getcsr() { @@ -48,3 +43,43 @@ __forceinline void _mm_mfence() { __sync_synchronize(); } + +__forceinline __m128i _mm_load4epu8_epi32(__m128i *ptr) +{ + uint8x8_t t0 = vld1_u8((uint8_t*)ptr); + uint16x8_t t1 = vmovl_u8(t0); + uint32x4_t t2 = vmovl_u16(vget_low_u16(t1)); + return vreinterpretq_s32_u32(t2); +} + +__forceinline __m128i _mm_load4epu16_epi32(__m128i *ptr) +{ + uint16x8_t t0 = vld1q_u16((uint16_t*)ptr); + uint32x4_t t1 = vmovl_u16(vget_low_u16(t0)); + return vreinterpretq_s32_u32(t1); +} + +__forceinline __m128i _mm_load4epi8_f32(__m128i *ptr) +{ + int8x8_t t0 = vld1_s8((int8_t*)ptr); + int16x8_t t1 = vmovl_s8(t0); + int32x4_t t2 = vmovl_s16(vget_low_s16(t1)); + float32x4_t t3 = vcvtq_f32_s32(t2); + return vreinterpretq_s32_f32(t3); +} + +__forceinline __m128i _mm_load4epu8_f32(__m128i *ptr) +{ + uint8x8_t t0 = vld1_u8((uint8_t*)ptr); + uint16x8_t t1 = vmovl_u8(t0); + uint32x4_t t2 = vmovl_u16(vget_low_u16(t1)); + return vreinterpretq_s32_u32(t2); +} + +__forceinline __m128i _mm_load4epi16_f32(__m128i *ptr) +{ + int16x8_t t0 = vld1q_s16((int16_t*)ptr); + int32x4_t t1 = vmovl_s16(vget_low_s16(t0)); + float32x4_t t2 = vcvtq_f32_s32(t1); + return vreinterpretq_s32_f32(t2); +} diff --git a/thirdparty/embree/common/simd/arm/sse2neon.h b/thirdparty/embree/common/simd/arm/sse2neon.h index 7eb25cf2c5..43416662d7 100644 --- a/thirdparty/embree/common/simd/arm/sse2neon.h +++ b/thirdparty/embree/common/simd/arm/sse2neon.h @@ -52,7 +52,7 @@ /* Enable precise implementation of math operations * This would slow down the computation a bit, but gives consistent result with - * x86 SSE2. (e.g. would solve a hole or NaN pixel in the rendering result) + * x86 SSE. (e.g. would solve a hole or NaN pixel in the rendering result) */ /* _mm_min_ps and _mm_max_ps */ #ifndef SSE2NEON_PRECISE_MINMAX @@ -66,36 +66,29 @@ #ifndef SSE2NEON_PRECISE_SQRT #define SSE2NEON_PRECISE_SQRT (0) #endif -#ifndef SSE2NEON_PRECISE_RSQRT -#define SSE2NEON_PRECISE_RSQRT (0) +/* _mm_dp_pd */ +#ifndef SSE2NEON_PRECISE_DP +#define SSE2NEON_PRECISE_DP (0) #endif +/* compiler specific definitions */ #if defined(__GNUC__) || defined(__clang__) #pragma push_macro("FORCE_INLINE") #pragma push_macro("ALIGN_STRUCT") #define FORCE_INLINE static inline __attribute__((always_inline)) #define ALIGN_STRUCT(x) __attribute__((aligned(x))) -#ifndef likely -#define likely(x) __builtin_expect(!!(x), 1) -#endif -#ifndef unlikely -#define unlikely(x) __builtin_expect(!!(x), 0) -#endif -#else -#error "Macro name collisions may happen with unsupported compiler." -#ifdef FORCE_INLINE -#undef FORCE_INLINE -#endif +#define _sse2neon_likely(x) __builtin_expect(!!(x), 1) +#define _sse2neon_unlikely(x) __builtin_expect(!!(x), 0) +#else /* non-GNU / non-clang compilers */ +#warning "Macro name collisions may happen with unsupported compiler." +#ifndef FORCE_INLINE #define FORCE_INLINE static inline +#endif #ifndef ALIGN_STRUCT #define ALIGN_STRUCT(x) __declspec(align(x)) #endif -#endif -#ifndef likely -#define likely(x) (x) -#endif -#ifndef unlikely -#define unlikely(x) (x) +#define _sse2neon_likely(x) (x) +#define _sse2neon_unlikely(x) (x) #endif #include <stdint.h> @@ -155,6 +148,14 @@ * argument "a" of mm_shuffle_ps that will be places in fp1 of result. * fp0 is the same for fp0 of result. */ +#if defined(__aarch64__) +#define _MN_SHUFFLE(fp3,fp2,fp1,fp0) ( (uint8x16_t){ (((fp3)*4)+0), (((fp3)*4)+1), (((fp3)*4)+2), (((fp3)*4)+3), (((fp2)*4)+0), (((fp2)*4)+1), (((fp2)*4)+\ +2), (((fp2)*4)+3), (((fp1)*4)+0), (((fp1)*4)+1), (((fp1)*4)+2), (((fp1)*4)+3), (((fp0)*4)+0), (((fp0)*4)+1), (((fp0)*4)+2), (((fp0)*4)+3) } ) +#define _MF_SHUFFLE(fp3,fp2,fp1,fp0) ( (uint8x16_t){ (((fp3)*4)+0), (((fp3)*4)+1), (((fp3)*4)+2), (((fp3)*4)+3), (((fp2)*4)+0), (((fp2)*4)+1), (((fp2)*4)+\ +2), (((fp2)*4)+3), (((fp1)*4)+16+0), (((fp1)*4)+16+1), (((fp1)*4)+16+2), (((fp1)*4)+16+3), (((fp0)*4)+16+0), (((fp0)*4)+16+1), (((fp0)*4)+16+2), (((fp0)*\ +4)+16+3) } ) +#endif + #define _MM_SHUFFLE(fp3, fp2, fp1, fp0) \ (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | ((fp0))) @@ -169,6 +170,14 @@ #define _MM_ROUND_DOWN 0x2000 #define _MM_ROUND_UP 0x4000 #define _MM_ROUND_TOWARD_ZERO 0x6000 +/* Flush zero mode macros. */ +#define _MM_FLUSH_ZERO_MASK 0x8000 +#define _MM_FLUSH_ZERO_ON 0x8000 +#define _MM_FLUSH_ZERO_OFF 0x0000 +/* Denormals are zeros mode macros. */ +#define _MM_DENORMALS_ZERO_MASK 0x0040 +#define _MM_DENORMALS_ZERO_ON 0x0040 +#define _MM_DENORMALS_ZERO_OFF 0x0000 /* indicate immediate constant argument in a given range */ #define __constrange(a, b) const @@ -189,7 +198,10 @@ typedef float64x2_t __m128d; /* 128-bit vector containing 2 doubles */ #else typedef float32x4_t __m128d; #endif -typedef int64x2_t __m128i; /* 128-bit vector containing integers */ +// Note: upstream sse2neon declares __m128i as int64x2_t. However, there's +// many places within embree that assume __m128i can be indexed as a +// 4 element u32. +typedef int32x4_t __m128i; /* 128-bit vector containing integers */ /* type-safe casting between types */ @@ -221,28 +233,28 @@ typedef int64x2_t __m128i; /* 128-bit vector containing integers */ #define vreinterpretq_s32_m128(x) vreinterpretq_s32_f32(x) #define vreinterpretq_s64_m128(x) vreinterpretq_s64_f32(x) -#define vreinterpretq_m128i_s8(x) vreinterpretq_s64_s8(x) -#define vreinterpretq_m128i_s16(x) vreinterpretq_s64_s16(x) -#define vreinterpretq_m128i_s32(x) vreinterpretq_s64_s32(x) -#define vreinterpretq_m128i_s64(x) (x) +#define vreinterpretq_m128i_s8(x) vreinterpretq_s32_s8(x) +#define vreinterpretq_m128i_s16(x) vreinterpretq_s32_s16(x) +#define vreinterpretq_m128i_s32(x) (x) +#define vreinterpretq_m128i_s64(x) vreinterpretq_s32_s64(x) -#define vreinterpretq_m128i_u8(x) vreinterpretq_s64_u8(x) -#define vreinterpretq_m128i_u16(x) vreinterpretq_s64_u16(x) -#define vreinterpretq_m128i_u32(x) vreinterpretq_s64_u32(x) -#define vreinterpretq_m128i_u64(x) vreinterpretq_s64_u64(x) +#define vreinterpretq_m128i_u8(x) vreinterpretq_s32_u8(x) +#define vreinterpretq_m128i_u16(x) vreinterpretq_s32_u16(x) +#define vreinterpretq_m128i_u32(x) vreinterpretq_s32_u32(x) +#define vreinterpretq_m128i_u64(x) vreinterpretq_s32_u64(x) -#define vreinterpretq_f32_m128i(x) vreinterpretq_f32_s64(x) -#define vreinterpretq_f64_m128i(x) vreinterpretq_f64_s64(x) +#define vreinterpretq_f32_m128i(x) vreinterpretq_f32_s32(x) +#define vreinterpretq_f64_m128i(x) vreinterpretq_f64_s32(x) -#define vreinterpretq_s8_m128i(x) vreinterpretq_s8_s64(x) -#define vreinterpretq_s16_m128i(x) vreinterpretq_s16_s64(x) -#define vreinterpretq_s32_m128i(x) vreinterpretq_s32_s64(x) -#define vreinterpretq_s64_m128i(x) (x) +#define vreinterpretq_s8_m128i(x) vreinterpretq_s8_s32(x) +#define vreinterpretq_s16_m128i(x) vreinterpretq_s16_s32(x) +#define vreinterpretq_s32_m128i(x) (x) +#define vreinterpretq_s64_m128i(x) vreinterpretq_s64_s32(x) -#define vreinterpretq_u8_m128i(x) vreinterpretq_u8_s64(x) -#define vreinterpretq_u16_m128i(x) vreinterpretq_u16_s64(x) -#define vreinterpretq_u32_m128i(x) vreinterpretq_u32_s64(x) -#define vreinterpretq_u64_m128i(x) vreinterpretq_u64_s64(x) +#define vreinterpretq_u8_m128i(x) vreinterpretq_u8_s32(x) +#define vreinterpretq_u16_m128i(x) vreinterpretq_u16_s32(x) +#define vreinterpretq_u32_m128i(x) vreinterpretq_u32_s32(x) +#define vreinterpretq_u64_m128i(x) vreinterpretq_u64_s32(x) #define vreinterpret_m64_s8(x) vreinterpret_s64_s8(x) #define vreinterpret_m64_s16(x) vreinterpret_s64_s16(x) @@ -281,6 +293,7 @@ typedef int64x2_t __m128i; /* 128-bit vector containing integers */ #define vreinterpretq_s64_m128d(x) vreinterpretq_s64_f64(x) +#define vreinterpretq_u32_m128d(x) vreinterpretq_u32_f64(x) #define vreinterpretq_u64_m128d(x) vreinterpretq_u64_f64(x) #define vreinterpretq_f64_m128d(x) (x) @@ -303,10 +316,10 @@ typedef int64x2_t __m128i; /* 128-bit vector containing integers */ #endif // A struct is defined in this header file called 'SIMDVec' which can be used -// by applications which attempt to access the contents of an _m128 struct +// by applications which attempt to access the contents of an __m128 struct // directly. It is important to note that accessing the __m128 struct directly // is bad coding practice by Microsoft: @see: -// https://msdn.microsoft.com/en-us/library/ayeb3ayc.aspx +// https://docs.microsoft.com/en-us/cpp/cpp/m128 // // However, some legacy source code may try to access the contents of an __m128 // struct directly so the developer can use the SIMDVec as an alias for it. Any @@ -342,13 +355,48 @@ typedef union ALIGN_STRUCT(16) SIMDVec { #define vreinterpretq_nth_u32_m128i(x, n) (((SIMDVec *) &x)->m128_u32[n]) #define vreinterpretq_nth_u8_m128i(x, n) (((SIMDVec *) &x)->m128_u8[n]) +/* SSE macros */ +#define _MM_GET_FLUSH_ZERO_MODE _sse2neon_mm_get_flush_zero_mode +#define _MM_SET_FLUSH_ZERO_MODE _sse2neon_mm_set_flush_zero_mode +#define _MM_GET_DENORMALS_ZERO_MODE _sse2neon_mm_get_denormals_zero_mode +#define _MM_SET_DENORMALS_ZERO_MODE _sse2neon_mm_set_denormals_zero_mode + +// Function declaration +// SSE +FORCE_INLINE unsigned int _MM_GET_ROUNDING_MODE(); +FORCE_INLINE __m128 _mm_move_ss(__m128, __m128); +FORCE_INLINE __m128 _mm_or_ps(__m128, __m128); +FORCE_INLINE __m128 _mm_set_ps1(float); +FORCE_INLINE __m128 _mm_setzero_ps(void); +// SSE2 +FORCE_INLINE __m128i _mm_and_si128(__m128i, __m128i); +FORCE_INLINE __m128i _mm_castps_si128(__m128); +FORCE_INLINE __m128i _mm_cmpeq_epi32(__m128i, __m128i); +FORCE_INLINE __m128i _mm_cvtps_epi32(__m128); +FORCE_INLINE __m128d _mm_move_sd(__m128d, __m128d); +FORCE_INLINE __m128i _mm_or_si128(__m128i, __m128i); +FORCE_INLINE __m128i _mm_set_epi32(int, int, int, int); +FORCE_INLINE __m128i _mm_set_epi64x(int64_t, int64_t); +FORCE_INLINE __m128d _mm_set_pd(double, double); +FORCE_INLINE __m128i _mm_set1_epi32(int); +FORCE_INLINE __m128i _mm_setzero_si128(); +// SSE4.1 +FORCE_INLINE __m128d _mm_ceil_pd(__m128d); +FORCE_INLINE __m128 _mm_ceil_ps(__m128); +FORCE_INLINE __m128d _mm_floor_pd(__m128d); +FORCE_INLINE __m128 _mm_floor_ps(__m128); +FORCE_INLINE __m128d _mm_round_pd(__m128d, int); +FORCE_INLINE __m128 _mm_round_ps(__m128, int); +// SSE4.2 +FORCE_INLINE uint32_t _mm_crc32_u8(uint32_t, uint8_t); + /* Backwards compatibility for compilers with lack of specific type support */ // Older gcc does not define vld1q_u8_x4 type -#if defined(__GNUC__) && !defined(__clang__) && \ - ((__GNUC__ == 10 && (__GNUC_MINOR__ <= 1)) || \ - (__GNUC__ == 9 && (__GNUC_MINOR__ <= 3)) || \ - (__GNUC__ == 8 && (__GNUC_MINOR__ <= 4)) || __GNUC__ <= 7) +#if defined(__GNUC__) && !defined(__clang__) && \ + ((__GNUC__ <= 10 && defined(__arm__)) || \ + (__GNUC__ == 10 && __GNUC_MINOR__ < 3 && defined(__aarch64__)) || \ + (__GNUC__ <= 9 && defined(__aarch64__))) FORCE_INLINE uint8x16x4_t _sse2neon_vld1q_u8_x4(const uint8_t *p) { uint8x16x4_t ret; @@ -443,8 +491,6 @@ FORCE_INLINE uint8x16x4_t _sse2neon_vld1q_u8_x4(const uint8_t *p) +------+------+------+------+------+------+-------------+ */ -/* Set/get methods */ - /* Constants for use with _mm_prefetch. */ enum _mm_hint { _MM_HINT_NTA = 0, /* load data to L1 and L2 cache, mark it as NTA */ @@ -457,1098 +503,1568 @@ enum _mm_hint { _MM_HINT_ET2 = 7 /* exclusive version of _MM_HINT_T2 */ }; -// Loads one cache line of data from address p to a location closer to the -// processor. https://msdn.microsoft.com/en-us/library/84szxsww(v=vs.100).aspx -FORCE_INLINE void _mm_prefetch(const void *p, int i) +// The bit field mapping to the FPCR(floating-point control register) +typedef struct { + uint16_t res0; + uint8_t res1 : 6; + uint8_t bit22 : 1; + uint8_t bit23 : 1; + uint8_t bit24 : 1; + uint8_t res2 : 7; +#if defined(__aarch64__) + uint32_t res3; +#endif +} fpcr_bitfield; + +// Takes the upper 64 bits of a and places it in the low end of the result +// Takes the lower 64 bits of b and places it into the high end of the result. +FORCE_INLINE __m128 _mm_shuffle_ps_1032(__m128 a, __m128 b) { - (void) i; - __builtin_prefetch(p); + float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a)); + float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b)); + return vreinterpretq_m128_f32(vcombine_f32(a32, b10)); } -// Pause the processor. This is typically used in spin-wait loops and depending -// on the x86 processor typical values are in the 40-100 cycle range. The -// 'yield' instruction isn't a good fit beacuse it's effectively a nop on most -// Arm cores. Experience with several databases has shown has shown an 'isb' is -// a reasonable approximation. -FORCE_INLINE void _mm_pause() +// takes the lower two 32-bit values from a and swaps them and places in high +// end of result takes the higher two 32 bit values from b and swaps them and +// places in low end of result. +FORCE_INLINE __m128 _mm_shuffle_ps_2301(__m128 a, __m128 b) { - __asm__ __volatile__("isb\n"); + float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a))); + float32x2_t b23 = vrev64_f32(vget_high_f32(vreinterpretq_f32_m128(b))); + return vreinterpretq_m128_f32(vcombine_f32(a01, b23)); } -// Copy the lower single-precision (32-bit) floating-point element of a to dst. -// -// dst[31:0] := a[31:0] -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtss_f32 -FORCE_INLINE float _mm_cvtss_f32(__m128 a) +FORCE_INLINE __m128 _mm_shuffle_ps_0321(__m128 a, __m128 b) { - return vgetq_lane_f32(vreinterpretq_f32_m128(a), 0); + float32x2_t a21 = vget_high_f32( + vextq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 3)); + float32x2_t b03 = vget_low_f32( + vextq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b), 3)); + return vreinterpretq_m128_f32(vcombine_f32(a21, b03)); } -// Convert the lower single-precision (32-bit) floating-point element in b to a -// double-precision (64-bit) floating-point element, store the result in the -// lower element of dst, and copy the upper element from a to the upper element -// of dst. +FORCE_INLINE __m128 _mm_shuffle_ps_2103(__m128 a, __m128 b) +{ + float32x2_t a03 = vget_low_f32( + vextq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 3)); + float32x2_t b21 = vget_high_f32( + vextq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b), 3)); + return vreinterpretq_m128_f32(vcombine_f32(a03, b21)); +} + +FORCE_INLINE __m128 _mm_shuffle_ps_1010(__m128 a, __m128 b) +{ + float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a)); + float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b)); + return vreinterpretq_m128_f32(vcombine_f32(a10, b10)); +} + +FORCE_INLINE __m128 _mm_shuffle_ps_1001(__m128 a, __m128 b) +{ + float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a))); + float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b)); + return vreinterpretq_m128_f32(vcombine_f32(a01, b10)); +} + +FORCE_INLINE __m128 _mm_shuffle_ps_0101(__m128 a, __m128 b) +{ + float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a))); + float32x2_t b01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(b))); + return vreinterpretq_m128_f32(vcombine_f32(a01, b01)); +} + +// keeps the low 64 bits of b in the low and puts the high 64 bits of a in the +// high +FORCE_INLINE __m128 _mm_shuffle_ps_3210(__m128 a, __m128 b) +{ + float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a)); + float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b)); + return vreinterpretq_m128_f32(vcombine_f32(a10, b32)); +} + +FORCE_INLINE __m128 _mm_shuffle_ps_0011(__m128 a, __m128 b) +{ + float32x2_t a11 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(a)), 1); + float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0); + return vreinterpretq_m128_f32(vcombine_f32(a11, b00)); +} + +FORCE_INLINE __m128 _mm_shuffle_ps_0022(__m128 a, __m128 b) +{ + float32x2_t a22 = + vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 0); + float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0); + return vreinterpretq_m128_f32(vcombine_f32(a22, b00)); +} + +FORCE_INLINE __m128 _mm_shuffle_ps_2200(__m128 a, __m128 b) +{ + float32x2_t a00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(a)), 0); + float32x2_t b22 = + vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(b)), 0); + return vreinterpretq_m128_f32(vcombine_f32(a00, b22)); +} + +FORCE_INLINE __m128 _mm_shuffle_ps_3202(__m128 a, __m128 b) +{ + float32_t a0 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0); + float32x2_t a22 = + vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 0); + float32x2_t a02 = vset_lane_f32(a0, a22, 1); /* TODO: use vzip ?*/ + float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b)); + return vreinterpretq_m128_f32(vcombine_f32(a02, b32)); +} + +FORCE_INLINE __m128 _mm_shuffle_ps_1133(__m128 a, __m128 b) +{ + float32x2_t a33 = + vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 1); + float32x2_t b11 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 1); + return vreinterpretq_m128_f32(vcombine_f32(a33, b11)); +} + +FORCE_INLINE __m128 _mm_shuffle_ps_2010(__m128 a, __m128 b) +{ + float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a)); + float32_t b2 = vgetq_lane_f32(vreinterpretq_f32_m128(b), 2); + float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0); + float32x2_t b20 = vset_lane_f32(b2, b00, 1); + return vreinterpretq_m128_f32(vcombine_f32(a10, b20)); +} + +FORCE_INLINE __m128 _mm_shuffle_ps_2001(__m128 a, __m128 b) +{ + float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a))); + float32_t b2 = vgetq_lane_f32(b, 2); + float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0); + float32x2_t b20 = vset_lane_f32(b2, b00, 1); + return vreinterpretq_m128_f32(vcombine_f32(a01, b20)); +} + +FORCE_INLINE __m128 _mm_shuffle_ps_2032(__m128 a, __m128 b) +{ + float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a)); + float32_t b2 = vgetq_lane_f32(b, 2); + float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0); + float32x2_t b20 = vset_lane_f32(b2, b00, 1); + return vreinterpretq_m128_f32(vcombine_f32(a32, b20)); +} + +// Kahan summation for accurate summation of floating-point numbers. +// http://blog.zachbjornson.com/2019/08/11/fast-float-summation.html +FORCE_INLINE void _sse2neon_kadd_f32(float *sum, float *c, float y) +{ + y -= *c; + float t = *sum + y; + *c = (t - *sum) - y; + *sum = t; +} + +#if defined(__ARM_FEATURE_CRYPTO) +// Wraps vmull_p64 +FORCE_INLINE uint64x2_t _sse2neon_vmull_p64(uint64x1_t _a, uint64x1_t _b) +{ + poly64_t a = vget_lane_p64(vreinterpret_p64_u64(_a), 0); + poly64_t b = vget_lane_p64(vreinterpret_p64_u64(_b), 0); + return vreinterpretq_u64_p128(vmull_p64(a, b)); +} +#else // ARMv7 polyfill +// ARMv7/some A64 lacks vmull_p64, but it has vmull_p8. // -// dst[63:0] := Convert_FP32_To_FP64(b[31:0]) -// dst[127:64] := a[127:64] +// vmull_p8 calculates 8 8-bit->16-bit polynomial multiplies, but we need a +// 64-bit->128-bit polynomial multiply. // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtss_sd -FORCE_INLINE __m128d _mm_cvtss_sd(__m128d a, __m128 b) +// It needs some work and is somewhat slow, but it is still faster than all +// known scalar methods. +// +// Algorithm adapted to C from +// https://www.workofard.com/2017/07/ghash-for-low-end-cores/, which is adapted +// from "Fast Software Polynomial Multiplication on ARM Processors Using the +// NEON Engine" by Danilo Camara, Conrado Gouvea, Julio Lopez and Ricardo Dahab +// (https://hal.inria.fr/hal-01506572) +static uint64x2_t _sse2neon_vmull_p64(uint64x1_t _a, uint64x1_t _b) { - double d = (double) vgetq_lane_f32(vreinterpretq_f32_m128(b), 0); + poly8x8_t a = vreinterpret_p8_u64(_a); + poly8x8_t b = vreinterpret_p8_u64(_b); + + // Masks + uint8x16_t k48_32 = vcombine_u8(vcreate_u8(0x0000ffffffffffff), + vcreate_u8(0x00000000ffffffff)); + uint8x16_t k16_00 = vcombine_u8(vcreate_u8(0x000000000000ffff), + vcreate_u8(0x0000000000000000)); + + // Do the multiplies, rotating with vext to get all combinations + uint8x16_t d = vreinterpretq_u8_p16(vmull_p8(a, b)); // D = A0 * B0 + uint8x16_t e = + vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 1))); // E = A0 * B1 + uint8x16_t f = + vreinterpretq_u8_p16(vmull_p8(vext_p8(a, a, 1), b)); // F = A1 * B0 + uint8x16_t g = + vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 2))); // G = A0 * B2 + uint8x16_t h = + vreinterpretq_u8_p16(vmull_p8(vext_p8(a, a, 2), b)); // H = A2 * B0 + uint8x16_t i = + vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 3))); // I = A0 * B3 + uint8x16_t j = + vreinterpretq_u8_p16(vmull_p8(vext_p8(a, a, 3), b)); // J = A3 * B0 + uint8x16_t k = + vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 4))); // L = A0 * B4 + + // Add cross products + uint8x16_t l = veorq_u8(e, f); // L = E + F + uint8x16_t m = veorq_u8(g, h); // M = G + H + uint8x16_t n = veorq_u8(i, j); // N = I + J + + // Interleave. Using vzip1 and vzip2 prevents Clang from emitting TBL + // instructions. #if defined(__aarch64__) - return vreinterpretq_m128d_f64( - vsetq_lane_f64(d, vreinterpretq_f64_m128d(a), 0)); + uint8x16_t lm_p0 = vreinterpretq_u8_u64( + vzip1q_u64(vreinterpretq_u64_u8(l), vreinterpretq_u64_u8(m))); + uint8x16_t lm_p1 = vreinterpretq_u8_u64( + vzip2q_u64(vreinterpretq_u64_u8(l), vreinterpretq_u64_u8(m))); + uint8x16_t nk_p0 = vreinterpretq_u8_u64( + vzip1q_u64(vreinterpretq_u64_u8(n), vreinterpretq_u64_u8(k))); + uint8x16_t nk_p1 = vreinterpretq_u8_u64( + vzip2q_u64(vreinterpretq_u64_u8(n), vreinterpretq_u64_u8(k))); #else - return vreinterpretq_m128d_s64( - vsetq_lane_s64(*(int64_t *) &d, vreinterpretq_s64_m128d(a), 0)); + uint8x16_t lm_p0 = vcombine_u8(vget_low_u8(l), vget_low_u8(m)); + uint8x16_t lm_p1 = vcombine_u8(vget_high_u8(l), vget_high_u8(m)); + uint8x16_t nk_p0 = vcombine_u8(vget_low_u8(n), vget_low_u8(k)); + uint8x16_t nk_p1 = vcombine_u8(vget_high_u8(n), vget_high_u8(k)); #endif -} + // t0 = (L) (P0 + P1) << 8 + // t1 = (M) (P2 + P3) << 16 + uint8x16_t t0t1_tmp = veorq_u8(lm_p0, lm_p1); + uint8x16_t t0t1_h = vandq_u8(lm_p1, k48_32); + uint8x16_t t0t1_l = veorq_u8(t0t1_tmp, t0t1_h); -// Convert the lower single-precision (32-bit) floating-point element in a to a -// 32-bit integer, and store the result in dst. -// -// dst[31:0] := Convert_FP32_To_Int32(a[31:0]) -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtss_si32 -#define _mm_cvtss_si32(a) _mm_cvt_ss2si(a) + // t2 = (N) (P4 + P5) << 24 + // t3 = (K) (P6 + P7) << 32 + uint8x16_t t2t3_tmp = veorq_u8(nk_p0, nk_p1); + uint8x16_t t2t3_h = vandq_u8(nk_p1, k16_00); + uint8x16_t t2t3_l = veorq_u8(t2t3_tmp, t2t3_h); -// Convert the lower single-precision (32-bit) floating-point element in a to a -// 64-bit integer, and store the result in dst. -// -// dst[63:0] := Convert_FP32_To_Int64(a[31:0]) -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtss_si64 -FORCE_INLINE int _mm_cvtss_si64(__m128 a) -{ + // De-interleave #if defined(__aarch64__) - return vgetq_lane_s64( - vreinterpretq_s64_s32(vcvtnq_s32_f32(vreinterpretq_f32_m128(a))), 0); + uint8x16_t t0 = vreinterpretq_u8_u64( + vuzp1q_u64(vreinterpretq_u64_u8(t0t1_l), vreinterpretq_u64_u8(t0t1_h))); + uint8x16_t t1 = vreinterpretq_u8_u64( + vuzp2q_u64(vreinterpretq_u64_u8(t0t1_l), vreinterpretq_u64_u8(t0t1_h))); + uint8x16_t t2 = vreinterpretq_u8_u64( + vuzp1q_u64(vreinterpretq_u64_u8(t2t3_l), vreinterpretq_u64_u8(t2t3_h))); + uint8x16_t t3 = vreinterpretq_u8_u64( + vuzp2q_u64(vreinterpretq_u64_u8(t2t3_l), vreinterpretq_u64_u8(t2t3_h))); #else - float32_t data = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0); - float32_t diff = data - floor(data); - if (diff > 0.5) - return (int64_t) ceil(data); - if (unlikely(diff == 0.5)) { - int64_t f = (int64_t) floor(data); - int64_t c = (int64_t) ceil(data); - return c & 1 ? f : c; - } - return (int64_t) floor(data); + uint8x16_t t1 = vcombine_u8(vget_high_u8(t0t1_l), vget_high_u8(t0t1_h)); + uint8x16_t t0 = vcombine_u8(vget_low_u8(t0t1_l), vget_low_u8(t0t1_h)); + uint8x16_t t3 = vcombine_u8(vget_high_u8(t2t3_l), vget_high_u8(t2t3_h)); + uint8x16_t t2 = vcombine_u8(vget_low_u8(t2t3_l), vget_low_u8(t2t3_h)); #endif + // Shift the cross products + uint8x16_t t0_shift = vextq_u8(t0, t0, 15); // t0 << 8 + uint8x16_t t1_shift = vextq_u8(t1, t1, 14); // t1 << 16 + uint8x16_t t2_shift = vextq_u8(t2, t2, 13); // t2 << 24 + uint8x16_t t3_shift = vextq_u8(t3, t3, 12); // t3 << 32 + + // Accumulate the products + uint8x16_t cross1 = veorq_u8(t0_shift, t1_shift); + uint8x16_t cross2 = veorq_u8(t2_shift, t3_shift); + uint8x16_t mix = veorq_u8(d, cross1); + uint8x16_t r = veorq_u8(mix, cross2); + return vreinterpretq_u64_u8(r); } +#endif // ARMv7 polyfill -// Convert packed single-precision (32-bit) floating-point elements in a to -// packed 32-bit integers with truncation, and store the results in dst. -// -// FOR j := 0 to 1 -// i := 32*j -// dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) -// ENDFOR -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtt_ps2pi -FORCE_INLINE __m64 _mm_cvtt_ps2pi(__m128 a) +// C equivalent: +// __m128i _mm_shuffle_epi32_default(__m128i a, +// __constrange(0, 255) int imm) { +// __m128i ret; +// ret[0] = a[imm & 0x3]; ret[1] = a[(imm >> 2) & 0x3]; +// ret[2] = a[(imm >> 4) & 0x03]; ret[3] = a[(imm >> 6) & 0x03]; +// return ret; +// } +#define _mm_shuffle_epi32_default(a, imm) \ + __extension__({ \ + int32x4_t ret; \ + ret = vmovq_n_s32( \ + vgetq_lane_s32(vreinterpretq_s32_m128i(a), (imm) & (0x3))); \ + ret = vsetq_lane_s32( \ + vgetq_lane_s32(vreinterpretq_s32_m128i(a), ((imm) >> 2) & 0x3), \ + ret, 1); \ + ret = vsetq_lane_s32( \ + vgetq_lane_s32(vreinterpretq_s32_m128i(a), ((imm) >> 4) & 0x3), \ + ret, 2); \ + ret = vsetq_lane_s32( \ + vgetq_lane_s32(vreinterpretq_s32_m128i(a), ((imm) >> 6) & 0x3), \ + ret, 3); \ + vreinterpretq_m128i_s32(ret); \ + }) + +// Takes the upper 64 bits of a and places it in the low end of the result +// Takes the lower 64 bits of a and places it into the high end of the result. +FORCE_INLINE __m128i _mm_shuffle_epi_1032(__m128i a) { - return vreinterpret_m64_s32( - vget_low_s32(vcvtq_s32_f32(vreinterpretq_f32_m128(a)))); + int32x2_t a32 = vget_high_s32(vreinterpretq_s32_m128i(a)); + int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a)); + return vreinterpretq_m128i_s32(vcombine_s32(a32, a10)); } -// Convert the lower single-precision (32-bit) floating-point element in a to a -// 32-bit integer with truncation, and store the result in dst. -// -// dst[31:0] := Convert_FP32_To_Int32_Truncate(a[31:0]) -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtt_ss2si -FORCE_INLINE int _mm_cvtt_ss2si(__m128 a) +// takes the lower two 32-bit values from a and swaps them and places in low end +// of result takes the higher two 32 bit values from a and swaps them and places +// in high end of result. +FORCE_INLINE __m128i _mm_shuffle_epi_2301(__m128i a) { - return vgetq_lane_s32(vcvtq_s32_f32(vreinterpretq_f32_m128(a)), 0); + int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a))); + int32x2_t a23 = vrev64_s32(vget_high_s32(vreinterpretq_s32_m128i(a))); + return vreinterpretq_m128i_s32(vcombine_s32(a01, a23)); } -// Convert packed single-precision (32-bit) floating-point elements in a to -// packed 32-bit integers with truncation, and store the results in dst. -// -// FOR j := 0 to 1 -// i := 32*j -// dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) -// ENDFOR -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttps_pi32 -#define _mm_cvttps_pi32(a) _mm_cvtt_ps2pi(a) +// rotates the least significant 32 bits into the most significant 32 bits, and +// shifts the rest down +FORCE_INLINE __m128i _mm_shuffle_epi_0321(__m128i a) +{ + return vreinterpretq_m128i_s32( + vextq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(a), 1)); +} -// Convert the lower single-precision (32-bit) floating-point element in a to a -// 32-bit integer with truncation, and store the result in dst. -// -// dst[31:0] := Convert_FP32_To_Int32_Truncate(a[31:0]) -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttss_si32 -#define _mm_cvttss_si32(a) _mm_cvtt_ss2si(a) +// rotates the most significant 32 bits into the least significant 32 bits, and +// shifts the rest up +FORCE_INLINE __m128i _mm_shuffle_epi_2103(__m128i a) +{ + return vreinterpretq_m128i_s32( + vextq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(a), 3)); +} -// Convert the lower single-precision (32-bit) floating-point element in a to a -// 64-bit integer with truncation, and store the result in dst. -// -// dst[63:0] := Convert_FP32_To_Int64_Truncate(a[31:0]) -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttss_si64 -FORCE_INLINE int64_t _mm_cvttss_si64(__m128 a) +// gets the lower 64 bits of a, and places it in the upper 64 bits +// gets the lower 64 bits of a and places it in the lower 64 bits +FORCE_INLINE __m128i _mm_shuffle_epi_1010(__m128i a) { - return vgetq_lane_s64( - vmovl_s32(vget_low_s32(vcvtq_s32_f32(vreinterpretq_f32_m128(a)))), 0); + int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a)); + return vreinterpretq_m128i_s32(vcombine_s32(a10, a10)); } -// Sets the 128-bit value to zero -// https://msdn.microsoft.com/en-us/library/vstudio/ys7dw0kh(v=vs.100).aspx -FORCE_INLINE __m128i _mm_setzero_si128(void) +// gets the lower 64 bits of a, swaps the 0 and 1 elements, and places it in the +// lower 64 bits gets the lower 64 bits of a, and places it in the upper 64 bits +FORCE_INLINE __m128i _mm_shuffle_epi_1001(__m128i a) { - return vreinterpretq_m128i_s32(vdupq_n_s32(0)); + int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a))); + int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a)); + return vreinterpretq_m128i_s32(vcombine_s32(a01, a10)); } -// Clears the four single-precision, floating-point values. -// https://msdn.microsoft.com/en-us/library/vstudio/tk1t2tbz(v=vs.100).aspx -FORCE_INLINE __m128 _mm_setzero_ps(void) +// gets the lower 64 bits of a, swaps the 0 and 1 elements and places it in the +// upper 64 bits gets the lower 64 bits of a, swaps the 0 and 1 elements, and +// places it in the lower 64 bits +FORCE_INLINE __m128i _mm_shuffle_epi_0101(__m128i a) { - return vreinterpretq_m128_f32(vdupq_n_f32(0)); + int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a))); + return vreinterpretq_m128i_s32(vcombine_s32(a01, a01)); } -// Return vector of type __m128d with all elements set to zero. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_setzero_pd -FORCE_INLINE __m128d _mm_setzero_pd(void) +FORCE_INLINE __m128i _mm_shuffle_epi_2211(__m128i a) { + int32x2_t a11 = vdup_lane_s32(vget_low_s32(vreinterpretq_s32_m128i(a)), 1); + int32x2_t a22 = vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 0); + return vreinterpretq_m128i_s32(vcombine_s32(a11, a22)); +} + +FORCE_INLINE __m128i _mm_shuffle_epi_0122(__m128i a) +{ + int32x2_t a22 = vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 0); + int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a))); + return vreinterpretq_m128i_s32(vcombine_s32(a22, a01)); +} + +FORCE_INLINE __m128i _mm_shuffle_epi_3332(__m128i a) +{ + int32x2_t a32 = vget_high_s32(vreinterpretq_s32_m128i(a)); + int32x2_t a33 = vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 1); + return vreinterpretq_m128i_s32(vcombine_s32(a32, a33)); +} + +// FORCE_INLINE __m128i _mm_shuffle_epi32_splat(__m128i a, __constrange(0,255) +// int imm) #if defined(__aarch64__) - return vreinterpretq_m128d_f64(vdupq_n_f64(0)); +#define _mm_shuffle_epi32_splat(a, imm) \ + __extension__({ \ + vreinterpretq_m128i_s32( \ + vdupq_laneq_s32(vreinterpretq_s32_m128i(a), (imm))); \ + }) #else - return vreinterpretq_m128d_f32(vdupq_n_f32(0)); +#define _mm_shuffle_epi32_splat(a, imm) \ + __extension__({ \ + vreinterpretq_m128i_s32( \ + vdupq_n_s32(vgetq_lane_s32(vreinterpretq_s32_m128i(a), (imm)))); \ + }) #endif -} -// Sets the four single-precision, floating-point values to w. +// NEON does not support a general purpose permute intrinsic +// Selects four specific single-precision, floating-point values from a and b, +// based on the mask i. // -// r0 := r1 := r2 := r3 := w +// C equivalent: +// __m128 _mm_shuffle_ps_default(__m128 a, __m128 b, +// __constrange(0, 255) int imm) { +// __m128 ret; +// ret[0] = a[imm & 0x3]; ret[1] = a[(imm >> 2) & 0x3]; +// ret[2] = b[(imm >> 4) & 0x03]; ret[3] = b[(imm >> 6) & 0x03]; +// return ret; +// } // -// https://msdn.microsoft.com/en-us/library/vstudio/2x1se8ha(v=vs.100).aspx -FORCE_INLINE __m128 _mm_set1_ps(float _w) +// https://msdn.microsoft.com/en-us/library/vstudio/5f0858x0(v=vs.100).aspx +#define _mm_shuffle_ps_default(a, b, imm) \ + __extension__({ \ + float32x4_t ret; \ + ret = vmovq_n_f32( \ + vgetq_lane_f32(vreinterpretq_f32_m128(a), (imm) & (0x3))); \ + ret = vsetq_lane_f32( \ + vgetq_lane_f32(vreinterpretq_f32_m128(a), ((imm) >> 2) & 0x3), \ + ret, 1); \ + ret = vsetq_lane_f32( \ + vgetq_lane_f32(vreinterpretq_f32_m128(b), ((imm) >> 4) & 0x3), \ + ret, 2); \ + ret = vsetq_lane_f32( \ + vgetq_lane_f32(vreinterpretq_f32_m128(b), ((imm) >> 6) & 0x3), \ + ret, 3); \ + vreinterpretq_m128_f32(ret); \ + }) + +// Shuffles the lower 4 signed or unsigned 16-bit integers in a as specified +// by imm. +// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/y41dkk37(v=vs.100) +// FORCE_INLINE __m128i _mm_shufflelo_epi16_function(__m128i a, +// __constrange(0,255) int +// imm) +#define _mm_shufflelo_epi16_function(a, imm) \ + __extension__({ \ + int16x8_t ret = vreinterpretq_s16_m128i(a); \ + int16x4_t lowBits = vget_low_s16(ret); \ + ret = vsetq_lane_s16(vget_lane_s16(lowBits, (imm) & (0x3)), ret, 0); \ + ret = vsetq_lane_s16(vget_lane_s16(lowBits, ((imm) >> 2) & 0x3), ret, \ + 1); \ + ret = vsetq_lane_s16(vget_lane_s16(lowBits, ((imm) >> 4) & 0x3), ret, \ + 2); \ + ret = vsetq_lane_s16(vget_lane_s16(lowBits, ((imm) >> 6) & 0x3), ret, \ + 3); \ + vreinterpretq_m128i_s16(ret); \ + }) + +// Shuffles the upper 4 signed or unsigned 16-bit integers in a as specified +// by imm. +// https://msdn.microsoft.com/en-us/library/13ywktbs(v=vs.100).aspx +// FORCE_INLINE __m128i _mm_shufflehi_epi16_function(__m128i a, +// __constrange(0,255) int +// imm) +#define _mm_shufflehi_epi16_function(a, imm) \ + __extension__({ \ + int16x8_t ret = vreinterpretq_s16_m128i(a); \ + int16x4_t highBits = vget_high_s16(ret); \ + ret = vsetq_lane_s16(vget_lane_s16(highBits, (imm) & (0x3)), ret, 4); \ + ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm) >> 2) & 0x3), ret, \ + 5); \ + ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm) >> 4) & 0x3), ret, \ + 6); \ + ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm) >> 6) & 0x3), ret, \ + 7); \ + vreinterpretq_m128i_s16(ret); \ + }) + +/* SSE */ + +// Adds the four single-precision, floating-point values of a and b. +// +// r0 := a0 + b0 +// r1 := a1 + b1 +// r2 := a2 + b2 +// r3 := a3 + b3 +// +// https://msdn.microsoft.com/en-us/library/vstudio/c9848chc(v=vs.100).aspx +FORCE_INLINE __m128 _mm_add_ps(__m128 a, __m128 b) { - return vreinterpretq_m128_f32(vdupq_n_f32(_w)); + return vreinterpretq_m128_f32( + vaddq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); } -// Sets the four single-precision, floating-point values to w. -// https://msdn.microsoft.com/en-us/library/vstudio/2x1se8ha(v=vs.100).aspx -FORCE_INLINE __m128 _mm_set_ps1(float _w) +// adds the scalar single-precision floating point values of a and b. +// https://msdn.microsoft.com/en-us/library/be94x2y6(v=vs.100).aspx +FORCE_INLINE __m128 _mm_add_ss(__m128 a, __m128 b) { - return vreinterpretq_m128_f32(vdupq_n_f32(_w)); + float32_t b0 = vgetq_lane_f32(vreinterpretq_f32_m128(b), 0); + float32x4_t value = vsetq_lane_f32(b0, vdupq_n_f32(0), 0); + // the upper values in the result must be the remnants of <a>. + return vreinterpretq_m128_f32(vaddq_f32(a, value)); } -// Sets the four single-precision, floating-point values to the four inputs. -// https://msdn.microsoft.com/en-us/library/vstudio/afh0zf75(v=vs.100).aspx -FORCE_INLINE __m128 _mm_set_ps(float w, float z, float y, float x) +// Computes the bitwise AND of the four single-precision, floating-point values +// of a and b. +// +// r0 := a0 & b0 +// r1 := a1 & b1 +// r2 := a2 & b2 +// r3 := a3 & b3 +// +// https://msdn.microsoft.com/en-us/library/vstudio/73ck1xc5(v=vs.100).aspx +FORCE_INLINE __m128 _mm_and_ps(__m128 a, __m128 b) { - float ALIGN_STRUCT(16) data[4] = {x, y, z, w}; - return vreinterpretq_m128_f32(vld1q_f32(data)); + return vreinterpretq_m128_s32( + vandq_s32(vreinterpretq_s32_m128(a), vreinterpretq_s32_m128(b))); } -// Copy single-precision (32-bit) floating-point element a to the lower element -// of dst, and zero the upper 3 elements. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set_ss -FORCE_INLINE __m128 _mm_set_ss(float a) +// Computes the bitwise AND-NOT of the four single-precision, floating-point +// values of a and b. +// +// r0 := ~a0 & b0 +// r1 := ~a1 & b1 +// r2 := ~a2 & b2 +// r3 := ~a3 & b3 +// +// https://msdn.microsoft.com/en-us/library/vstudio/68h7wd02(v=vs.100).aspx +FORCE_INLINE __m128 _mm_andnot_ps(__m128 a, __m128 b) { - float ALIGN_STRUCT(16) data[4] = {a, 0, 0, 0}; - return vreinterpretq_m128_f32(vld1q_f32(data)); + return vreinterpretq_m128_s32( + vbicq_s32(vreinterpretq_s32_m128(b), + vreinterpretq_s32_m128(a))); // *NOTE* argument swap } -// Sets the four single-precision, floating-point values to the four inputs in -// reverse order. -// https://msdn.microsoft.com/en-us/library/vstudio/d2172ct3(v=vs.100).aspx -FORCE_INLINE __m128 _mm_setr_ps(float w, float z, float y, float x) +// Average packed unsigned 16-bit integers in a and b, and store the results in +// dst. +// +// FOR j := 0 to 3 +// i := j*16 +// dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 +// ENDFOR +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_avg_pu16 +FORCE_INLINE __m64 _mm_avg_pu16(__m64 a, __m64 b) { - float ALIGN_STRUCT(16) data[4] = {w, z, y, x}; - return vreinterpretq_m128_f32(vld1q_f32(data)); + return vreinterpret_m64_u16( + vrhadd_u16(vreinterpret_u16_m64(a), vreinterpret_u16_m64(b))); } -// Sets the 8 signed 16-bit integer values in reverse order. +// Average packed unsigned 8-bit integers in a and b, and store the results in +// dst. // -// Return Value -// r0 := w0 -// r1 := w1 -// ... -// r7 := w7 -FORCE_INLINE __m128i _mm_setr_epi16(short w0, - short w1, - short w2, - short w3, - short w4, - short w5, - short w6, - short w7) +// FOR j := 0 to 7 +// i := j*8 +// dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 +// ENDFOR +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_avg_pu8 +FORCE_INLINE __m64 _mm_avg_pu8(__m64 a, __m64 b) { - int16_t ALIGN_STRUCT(16) data[8] = {w0, w1, w2, w3, w4, w5, w6, w7}; - return vreinterpretq_m128i_s16(vld1q_s16((int16_t *) data)); + return vreinterpret_m64_u8( + vrhadd_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b))); } -// Sets the 4 signed 32-bit integer values in reverse order -// https://technet.microsoft.com/en-us/library/security/27yb3ee5(v=vs.90).aspx -FORCE_INLINE __m128i _mm_setr_epi32(int i3, int i2, int i1, int i0) +// Compares for equality. +// https://msdn.microsoft.com/en-us/library/vstudio/36aectz5(v=vs.100).aspx +FORCE_INLINE __m128 _mm_cmpeq_ps(__m128 a, __m128 b) { - int32_t ALIGN_STRUCT(16) data[4] = {i3, i2, i1, i0}; - return vreinterpretq_m128i_s32(vld1q_s32(data)); + return vreinterpretq_m128_u32( + vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); } -// Set packed 64-bit integers in dst with the supplied values in reverse order. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_setr_epi64 -FORCE_INLINE __m128i _mm_setr_epi64(__m64 e1, __m64 e0) +// Compares for equality. +// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/k423z28e(v=vs.100) +FORCE_INLINE __m128 _mm_cmpeq_ss(__m128 a, __m128 b) { - return vreinterpretq_m128i_s64(vcombine_s64(e1, e0)); + return _mm_move_ss(a, _mm_cmpeq_ps(a, b)); } -// Sets the 16 signed 8-bit integer values to b. +// Compares for greater than or equal. +// https://msdn.microsoft.com/en-us/library/vstudio/fs813y2t(v=vs.100).aspx +FORCE_INLINE __m128 _mm_cmpge_ps(__m128 a, __m128 b) +{ + return vreinterpretq_m128_u32( + vcgeq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); +} + +// Compares for greater than or equal. +// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/kesh3ddc(v=vs.100) +FORCE_INLINE __m128 _mm_cmpge_ss(__m128 a, __m128 b) +{ + return _mm_move_ss(a, _mm_cmpge_ps(a, b)); +} + +// Compares for greater than. // -// r0 := b -// r1 := b -// ... -// r15 := b +// r0 := (a0 > b0) ? 0xffffffff : 0x0 +// r1 := (a1 > b1) ? 0xffffffff : 0x0 +// r2 := (a2 > b2) ? 0xffffffff : 0x0 +// r3 := (a3 > b3) ? 0xffffffff : 0x0 // -// https://msdn.microsoft.com/en-us/library/6e14xhyf(v=vs.100).aspx -FORCE_INLINE __m128i _mm_set1_epi8(signed char w) +// https://msdn.microsoft.com/en-us/library/vstudio/11dy102s(v=vs.100).aspx +FORCE_INLINE __m128 _mm_cmpgt_ps(__m128 a, __m128 b) { - return vreinterpretq_m128i_s8(vdupq_n_s8(w)); + return vreinterpretq_m128_u32( + vcgtq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); } -// Broadcast double-precision (64-bit) floating-point value a to all elements of -// dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set1_pd -FORCE_INLINE __m128d _mm_set1_pd(double d) +// Compares for greater than. +// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/1xyyyy9e(v=vs.100) +FORCE_INLINE __m128 _mm_cmpgt_ss(__m128 a, __m128 b) { -#if defined(__aarch64__) - return vreinterpretq_m128d_f64(vdupq_n_f64(d)); -#else - return vreinterpretq_m128d_s64(vdupq_n_s64(*(int64_t *) &d)); -#endif + return _mm_move_ss(a, _mm_cmpgt_ps(a, b)); } -// Sets the 8 signed 16-bit integer values to w. +// Compares for less than or equal. // -// r0 := w -// r1 := w -// ... -// r7 := w +// r0 := (a0 <= b0) ? 0xffffffff : 0x0 +// r1 := (a1 <= b1) ? 0xffffffff : 0x0 +// r2 := (a2 <= b2) ? 0xffffffff : 0x0 +// r3 := (a3 <= b3) ? 0xffffffff : 0x0 // -// https://msdn.microsoft.com/en-us/library/k0ya3x0e(v=vs.90).aspx -FORCE_INLINE __m128i _mm_set1_epi16(short w) +// https://msdn.microsoft.com/en-us/library/vstudio/1s75w83z(v=vs.100).aspx +FORCE_INLINE __m128 _mm_cmple_ps(__m128 a, __m128 b) { - return vreinterpretq_m128i_s16(vdupq_n_s16(w)); + return vreinterpretq_m128_u32( + vcleq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); } -// Sets the 16 signed 8-bit integer values. -// https://msdn.microsoft.com/en-us/library/x0cx8zd3(v=vs.90).aspx -FORCE_INLINE __m128i _mm_set_epi8(signed char b15, - signed char b14, - signed char b13, - signed char b12, - signed char b11, - signed char b10, - signed char b9, - signed char b8, - signed char b7, - signed char b6, - signed char b5, - signed char b4, - signed char b3, - signed char b2, - signed char b1, - signed char b0) +// Compares for less than or equal. +// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/a7x0hbhw(v=vs.100) +FORCE_INLINE __m128 _mm_cmple_ss(__m128 a, __m128 b) { - int8_t ALIGN_STRUCT(16) - data[16] = {(int8_t) b0, (int8_t) b1, (int8_t) b2, (int8_t) b3, - (int8_t) b4, (int8_t) b5, (int8_t) b6, (int8_t) b7, - (int8_t) b8, (int8_t) b9, (int8_t) b10, (int8_t) b11, - (int8_t) b12, (int8_t) b13, (int8_t) b14, (int8_t) b15}; - return (__m128i) vld1q_s8(data); + return _mm_move_ss(a, _mm_cmple_ps(a, b)); } -// Sets the 8 signed 16-bit integer values. -// https://msdn.microsoft.com/en-au/library/3e0fek84(v=vs.90).aspx -FORCE_INLINE __m128i _mm_set_epi16(short i7, - short i6, - short i5, - short i4, - short i3, - short i2, - short i1, - short i0) +// Compares for less than +// https://msdn.microsoft.com/en-us/library/vstudio/f330yhc8(v=vs.100).aspx +FORCE_INLINE __m128 _mm_cmplt_ps(__m128 a, __m128 b) { - int16_t ALIGN_STRUCT(16) data[8] = {i0, i1, i2, i3, i4, i5, i6, i7}; - return vreinterpretq_m128i_s16(vld1q_s16(data)); + return vreinterpretq_m128_u32( + vcltq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); } -// Sets the 16 signed 8-bit integer values in reverse order. -// https://msdn.microsoft.com/en-us/library/2khb9c7k(v=vs.90).aspx -FORCE_INLINE __m128i _mm_setr_epi8(signed char b0, - signed char b1, - signed char b2, - signed char b3, - signed char b4, - signed char b5, - signed char b6, - signed char b7, - signed char b8, - signed char b9, - signed char b10, - signed char b11, - signed char b12, - signed char b13, - signed char b14, - signed char b15) +// Compares for less than +// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/fy94wye7(v=vs.100) +FORCE_INLINE __m128 _mm_cmplt_ss(__m128 a, __m128 b) { - int8_t ALIGN_STRUCT(16) - data[16] = {(int8_t) b0, (int8_t) b1, (int8_t) b2, (int8_t) b3, - (int8_t) b4, (int8_t) b5, (int8_t) b6, (int8_t) b7, - (int8_t) b8, (int8_t) b9, (int8_t) b10, (int8_t) b11, - (int8_t) b12, (int8_t) b13, (int8_t) b14, (int8_t) b15}; - return (__m128i) vld1q_s8(data); + return _mm_move_ss(a, _mm_cmplt_ps(a, b)); } -// Sets the 4 signed 32-bit integer values to i. -// -// r0 := i -// r1 := i -// r2 := i -// r3 := I -// -// https://msdn.microsoft.com/en-us/library/vstudio/h4xscxat(v=vs.100).aspx -FORCE_INLINE __m128i _mm_set1_epi32(int _i) +// Compares for inequality. +// https://msdn.microsoft.com/en-us/library/sf44thbx(v=vs.100).aspx +FORCE_INLINE __m128 _mm_cmpneq_ps(__m128 a, __m128 b) { - return vreinterpretq_m128i_s32(vdupq_n_s32(_i)); + return vreinterpretq_m128_u32(vmvnq_u32( + vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)))); } -// Sets the 2 signed 64-bit integer values to i. -// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/whtfzhzk(v=vs.100) -FORCE_INLINE __m128i _mm_set1_epi64(__m64 _i) +// Compares for inequality. +// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/ekya8fh4(v=vs.100) +FORCE_INLINE __m128 _mm_cmpneq_ss(__m128 a, __m128 b) { - return vreinterpretq_m128i_s64(vdupq_n_s64((int64_t) _i)); + return _mm_move_ss(a, _mm_cmpneq_ps(a, b)); } -// Sets the 2 signed 64-bit integer values to i. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set1_epi64x -FORCE_INLINE __m128i _mm_set1_epi64x(int64_t _i) +// Compares for not greater than or equal. +// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/wsexys62(v=vs.100) +FORCE_INLINE __m128 _mm_cmpnge_ps(__m128 a, __m128 b) { - return vreinterpretq_m128i_s64(vdupq_n_s64(_i)); + return vreinterpretq_m128_u32(vmvnq_u32( + vcgeq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)))); } -// Sets the 4 signed 32-bit integer values. -// https://msdn.microsoft.com/en-us/library/vstudio/019beekt(v=vs.100).aspx -FORCE_INLINE __m128i _mm_set_epi32(int i3, int i2, int i1, int i0) +// Compares for not greater than or equal. +// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/fk2y80s8(v=vs.100) +FORCE_INLINE __m128 _mm_cmpnge_ss(__m128 a, __m128 b) { - int32_t ALIGN_STRUCT(16) data[4] = {i0, i1, i2, i3}; - return vreinterpretq_m128i_s32(vld1q_s32(data)); + return _mm_move_ss(a, _mm_cmpnge_ps(a, b)); } -// Returns the __m128i structure with its two 64-bit integer values -// initialized to the values of the two 64-bit integers passed in. -// https://msdn.microsoft.com/en-us/library/dk2sdw0h(v=vs.120).aspx -FORCE_INLINE __m128i _mm_set_epi64x(int64_t i1, int64_t i2) +// Compares for not greater than. +// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/d0xh7w0s(v=vs.100) +FORCE_INLINE __m128 _mm_cmpngt_ps(__m128 a, __m128 b) { - return vreinterpretq_m128i_s64( - vcombine_s64(vcreate_s64(i2), vcreate_s64(i1))); + return vreinterpretq_m128_u32(vmvnq_u32( + vcgtq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)))); } -// Returns the __m128i structure with its two 64-bit integer values -// initialized to the values of the two 64-bit integers passed in. -// https://msdn.microsoft.com/en-us/library/dk2sdw0h(v=vs.120).aspx -FORCE_INLINE __m128i _mm_set_epi64(__m64 i1, __m64 i2) +// Compares for not greater than. +// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/z7x9ydwh(v=vs.100) +FORCE_INLINE __m128 _mm_cmpngt_ss(__m128 a, __m128 b) { - return _mm_set_epi64x((int64_t) i1, (int64_t) i2); + return _mm_move_ss(a, _mm_cmpngt_ps(a, b)); } -// Set packed double-precision (64-bit) floating-point elements in dst with the -// supplied values. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set_pd -FORCE_INLINE __m128d _mm_set_pd(double e1, double e0) +// Compares for not less than or equal. +// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/6a330kxw(v=vs.100) +FORCE_INLINE __m128 _mm_cmpnle_ps(__m128 a, __m128 b) { - double ALIGN_STRUCT(16) data[2] = {e0, e1}; -#if defined(__aarch64__) - return vreinterpretq_m128d_f64(vld1q_f64((float64_t *) data)); -#else - return vreinterpretq_m128d_f32(vld1q_f32((float32_t *) data)); -#endif + return vreinterpretq_m128_u32(vmvnq_u32( + vcleq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)))); } -// Set packed double-precision (64-bit) floating-point elements in dst with the -// supplied values in reverse order. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_setr_pd -FORCE_INLINE __m128d _mm_setr_pd(double e1, double e0) +// Compares for not less than or equal. +// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/z7x9ydwh(v=vs.100) +FORCE_INLINE __m128 _mm_cmpnle_ss(__m128 a, __m128 b) { - return _mm_set_pd(e0, e1); + return _mm_move_ss(a, _mm_cmpnle_ps(a, b)); } -// Copy double-precision (64-bit) floating-point element a to the lower element -// of dst, and zero the upper element. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set_sd -FORCE_INLINE __m128d _mm_set_sd(double a) +// Compares for not less than. +// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/4686bbdw(v=vs.100) +FORCE_INLINE __m128 _mm_cmpnlt_ps(__m128 a, __m128 b) { - return _mm_set_pd(0, a); + return vreinterpretq_m128_u32(vmvnq_u32( + vcltq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)))); } -// Broadcast double-precision (64-bit) floating-point value a to all elements of -// dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set_pd1 -#define _mm_set_pd1 _mm_set1_pd +// Compares for not less than. +// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/56b9z2wf(v=vs.100) +FORCE_INLINE __m128 _mm_cmpnlt_ss(__m128 a, __m128 b) +{ + return _mm_move_ss(a, _mm_cmpnlt_ps(a, b)); +} -// Stores four single-precision, floating-point values. -// https://msdn.microsoft.com/en-us/library/vstudio/s3h4ay6y(v=vs.100).aspx -FORCE_INLINE void _mm_store_ps(float *p, __m128 a) +// Compares the four 32-bit floats in a and b to check if any values are NaN. +// Ordered compare between each value returns true for "orderable" and false for +// "not orderable" (NaN). +// https://msdn.microsoft.com/en-us/library/vstudio/0h9w00fx(v=vs.100).aspx see +// also: +// http://stackoverflow.com/questions/8627331/what-does-ordered-unordered-comparison-mean +// http://stackoverflow.com/questions/29349621/neon-isnanval-intrinsics +FORCE_INLINE __m128 _mm_cmpord_ps(__m128 a, __m128 b) { - vst1q_f32(p, vreinterpretq_f32_m128(a)); + // Note: NEON does not have ordered compare builtin + // Need to compare a eq a and b eq b to check for NaN + // Do AND of results to get final + uint32x4_t ceqaa = + vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a)); + uint32x4_t ceqbb = + vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b)); + return vreinterpretq_m128_u32(vandq_u32(ceqaa, ceqbb)); } -// Store the lower single-precision (32-bit) floating-point element from a into -// 4 contiguous elements in memory. mem_addr must be aligned on a 16-byte -// boundary or a general-protection exception may be generated. -// -// MEM[mem_addr+31:mem_addr] := a[31:0] -// MEM[mem_addr+63:mem_addr+32] := a[31:0] -// MEM[mem_addr+95:mem_addr+64] := a[31:0] -// MEM[mem_addr+127:mem_addr+96] := a[31:0] -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_store_ps1 -FORCE_INLINE void _mm_store_ps1(float *p, __m128 a) +// Compares for ordered. +// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/343t62da(v=vs.100) +FORCE_INLINE __m128 _mm_cmpord_ss(__m128 a, __m128 b) { - float32_t a0 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0); - vst1q_f32(p, vdupq_n_f32(a0)); + return _mm_move_ss(a, _mm_cmpord_ps(a, b)); } -// Store the lower single-precision (32-bit) floating-point element from a into -// 4 contiguous elements in memory. mem_addr must be aligned on a 16-byte -// boundary or a general-protection exception may be generated. -// -// MEM[mem_addr+31:mem_addr] := a[31:0] -// MEM[mem_addr+63:mem_addr+32] := a[31:0] -// MEM[mem_addr+95:mem_addr+64] := a[31:0] -// MEM[mem_addr+127:mem_addr+96] := a[31:0] -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_store1_ps -#define _mm_store1_ps _mm_store_ps1 +// Compares for unordered. +// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/khy6fk1t(v=vs.100) +FORCE_INLINE __m128 _mm_cmpunord_ps(__m128 a, __m128 b) +{ + uint32x4_t f32a = + vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a)); + uint32x4_t f32b = + vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b)); + return vreinterpretq_m128_u32(vmvnq_u32(vandq_u32(f32a, f32b))); +} -// Store 4 single-precision (32-bit) floating-point elements from a into memory -// in reverse order. mem_addr must be aligned on a 16-byte boundary or a -// general-protection exception may be generated. -// -// MEM[mem_addr+31:mem_addr] := a[127:96] -// MEM[mem_addr+63:mem_addr+32] := a[95:64] -// MEM[mem_addr+95:mem_addr+64] := a[63:32] -// MEM[mem_addr+127:mem_addr+96] := a[31:0] -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storer_ps -FORCE_INLINE void _mm_storer_ps(float *p, __m128 a) +// Compares for unordered. +// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/2as2387b(v=vs.100) +FORCE_INLINE __m128 _mm_cmpunord_ss(__m128 a, __m128 b) { - float32x4_t tmp = vrev64q_f32(vreinterpretq_f32_m128(a)); - float32x4_t rev = vextq_f32(tmp, tmp, 2); - vst1q_f32(p, rev); + return _mm_move_ss(a, _mm_cmpunord_ps(a, b)); } -// Stores four single-precision, floating-point values. -// https://msdn.microsoft.com/en-us/library/44e30x22(v=vs.100).aspx -FORCE_INLINE void _mm_storeu_ps(float *p, __m128 a) +// Compares the lower single-precision floating point scalar values of a and b +// using an equality operation. : +// https://msdn.microsoft.com/en-us/library/93yx2h2b(v=vs.100).aspx +FORCE_INLINE int _mm_comieq_ss(__m128 a, __m128 b) { - vst1q_f32(p, vreinterpretq_f32_m128(a)); + uint32x4_t a_eq_b = + vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)); + return vgetq_lane_u32(a_eq_b, 0) & 0x1; } -// Stores four 32-bit integer values as (as a __m128i value) at the address p. -// https://msdn.microsoft.com/en-us/library/vstudio/edk11s13(v=vs.100).aspx -FORCE_INLINE void _mm_store_si128(__m128i *p, __m128i a) +// Compares the lower single-precision floating point scalar values of a and b +// using a greater than or equal operation. : +// https://msdn.microsoft.com/en-us/library/8t80des6(v=vs.100).aspx +FORCE_INLINE int _mm_comige_ss(__m128 a, __m128 b) { - vst1q_s32((int32_t *) p, vreinterpretq_s32_m128i(a)); + uint32x4_t a_ge_b = + vcgeq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)); + return vgetq_lane_u32(a_ge_b, 0) & 0x1; } -// Stores four 32-bit integer values as (as a __m128i value) at the address p. -// https://msdn.microsoft.com/en-us/library/vstudio/edk11s13(v=vs.100).aspx -FORCE_INLINE void _mm_storeu_si128(__m128i *p, __m128i a) +// Compares the lower single-precision floating point scalar values of a and b +// using a greater than operation. : +// https://msdn.microsoft.com/en-us/library/b0738e0t(v=vs.100).aspx +FORCE_INLINE int _mm_comigt_ss(__m128 a, __m128 b) { - vst1q_s32((int32_t *) p, vreinterpretq_s32_m128i(a)); + uint32x4_t a_gt_b = + vcgtq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)); + return vgetq_lane_u32(a_gt_b, 0) & 0x1; } -// Stores the lower single - precision, floating - point value. -// https://msdn.microsoft.com/en-us/library/tzz10fbx(v=vs.100).aspx -FORCE_INLINE void _mm_store_ss(float *p, __m128 a) +// Compares the lower single-precision floating point scalar values of a and b +// using a less than or equal operation. : +// https://msdn.microsoft.com/en-us/library/1w4t7c57(v=vs.90).aspx +FORCE_INLINE int _mm_comile_ss(__m128 a, __m128 b) { - vst1q_lane_f32(p, vreinterpretq_f32_m128(a), 0); + uint32x4_t a_le_b = + vcleq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)); + return vgetq_lane_u32(a_le_b, 0) & 0x1; } -// Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point -// elements) from a into memory. mem_addr must be aligned on a 16-byte boundary -// or a general-protection exception may be generated. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_store_pd -FORCE_INLINE void _mm_store_pd(double *mem_addr, __m128d a) +// Compares the lower single-precision floating point scalar values of a and b +// using a less than operation. : +// https://msdn.microsoft.com/en-us/library/2kwe606b(v=vs.90).aspx Important +// note!! The documentation on MSDN is incorrect! If either of the values is a +// NAN the docs say you will get a one, but in fact, it will return a zero!! +FORCE_INLINE int _mm_comilt_ss(__m128 a, __m128 b) { -#if defined(__aarch64__) - vst1q_f64((float64_t *) mem_addr, vreinterpretq_f64_m128d(a)); -#else - vst1q_f32((float32_t *) mem_addr, vreinterpretq_f32_m128d(a)); -#endif + uint32x4_t a_lt_b = + vcltq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)); + return vgetq_lane_u32(a_lt_b, 0) & 0x1; } -// Store the upper double-precision (64-bit) floating-point element from a into -// memory. +// Compares the lower single-precision floating point scalar values of a and b +// using an inequality operation. : +// https://msdn.microsoft.com/en-us/library/bafh5e0a(v=vs.90).aspx +FORCE_INLINE int _mm_comineq_ss(__m128 a, __m128 b) +{ + return !_mm_comieq_ss(a, b); +} + +// Convert packed signed 32-bit integers in b to packed single-precision +// (32-bit) floating-point elements, store the results in the lower 2 elements +// of dst, and copy the upper 2 packed elements from a to the upper elements of +// dst. // -// MEM[mem_addr+63:mem_addr] := a[127:64] +// dst[31:0] := Convert_Int32_To_FP32(b[31:0]) +// dst[63:32] := Convert_Int32_To_FP32(b[63:32]) +// dst[95:64] := a[95:64] +// dst[127:96] := a[127:96] // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storeh_pd -FORCE_INLINE void _mm_storeh_pd(double *mem_addr, __m128d a) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_pi2ps +FORCE_INLINE __m128 _mm_cvt_pi2ps(__m128 a, __m64 b) { -#if defined(__aarch64__) - vst1_f64((float64_t *) mem_addr, vget_high_f64(vreinterpretq_f64_m128d(a))); -#else - vst1_f32((float32_t *) mem_addr, vget_high_f32(vreinterpretq_f32_m128d(a))); -#endif + return vreinterpretq_m128_f32( + vcombine_f32(vcvt_f32_s32(vreinterpret_s32_m64(b)), + vget_high_f32(vreinterpretq_f32_m128(a)))); } -// Store the lower double-precision (64-bit) floating-point element from a into -// memory. +// Convert packed single-precision (32-bit) floating-point elements in a to +// packed 32-bit integers, and store the results in dst. // -// MEM[mem_addr+63:mem_addr] := a[63:0] +// FOR j := 0 to 1 +// i := 32*j +// dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) +// ENDFOR // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storel_pd -FORCE_INLINE void _mm_storel_pd(double *mem_addr, __m128d a) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_ps2pi +FORCE_INLINE __m64 _mm_cvt_ps2pi(__m128 a) { #if defined(__aarch64__) - vst1_f64((float64_t *) mem_addr, vget_low_f64(vreinterpretq_f64_m128d(a))); + return vreinterpret_m64_s32( + vget_low_s32(vcvtnq_s32_f32(vrndiq_f32(vreinterpretq_f32_m128(a))))); #else - vst1_f32((float32_t *) mem_addr, vget_low_f32(vreinterpretq_f32_m128d(a))); + return vreinterpret_m64_s32(vcvt_s32_f32(vget_low_f32( + vreinterpretq_f32_m128(_mm_round_ps(a, _MM_FROUND_CUR_DIRECTION))))); #endif } -// Store 2 double-precision (64-bit) floating-point elements from a into memory -// in reverse order. mem_addr must be aligned on a 16-byte boundary or a -// general-protection exception may be generated. +// Convert the signed 32-bit integer b to a single-precision (32-bit) +// floating-point element, store the result in the lower element of dst, and +// copy the upper 3 packed elements from a to the upper elements of dst. // -// MEM[mem_addr+63:mem_addr] := a[127:64] -// MEM[mem_addr+127:mem_addr+64] := a[63:0] +// dst[31:0] := Convert_Int32_To_FP32(b[31:0]) +// dst[127:32] := a[127:32] // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storer_pd -FORCE_INLINE void _mm_storer_pd(double *mem_addr, __m128d a) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_si2ss +FORCE_INLINE __m128 _mm_cvt_si2ss(__m128 a, int b) { - float32x4_t f = vreinterpretq_f32_m128d(a); - _mm_store_pd(mem_addr, vreinterpretq_m128d_f32(vextq_f32(f, f, 2))); + return vreinterpretq_m128_f32( + vsetq_lane_f32((float) b, vreinterpretq_f32_m128(a), 0)); } -// Store the lower double-precision (64-bit) floating-point element from a into -// 2 contiguous elements in memory. mem_addr must be aligned on a 16-byte -// boundary or a general-protection exception may be generated. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_store_pd1 -FORCE_INLINE void _mm_store_pd1(double *mem_addr, __m128d a) +// Convert the lower single-precision (32-bit) floating-point element in a to a +// 32-bit integer, and store the result in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_ss2si +FORCE_INLINE int _mm_cvt_ss2si(__m128 a) { #if defined(__aarch64__) - float64x1_t a_low = vget_low_f64(vreinterpretq_f64_m128d(a)); - vst1q_f64((float64_t *) mem_addr, - vreinterpretq_f64_m128d(vcombine_f64(a_low, a_low))); + return vgetq_lane_s32(vcvtnq_s32_f32(vrndiq_f32(vreinterpretq_f32_m128(a))), + 0); #else - float32x2_t a_low = vget_low_f32(vreinterpretq_f32_m128d(a)); - vst1q_f32((float32_t *) mem_addr, - vreinterpretq_f32_m128d(vcombine_f32(a_low, a_low))); + float32_t data = vgetq_lane_f32( + vreinterpretq_f32_m128(_mm_round_ps(a, _MM_FROUND_CUR_DIRECTION)), 0); + return (int32_t) data; #endif } -// Store the lower double-precision (64-bit) floating-point element from a into -// 2 contiguous elements in memory. mem_addr must be aligned on a 16-byte -// boundary or a general-protection exception may be generated. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=9,526,5601&text=_mm_store1_pd -#define _mm_store1_pd _mm_store_pd1 - -// Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point -// elements) from a into memory. mem_addr does not need to be aligned on any -// particular boundary. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storeu_pd -FORCE_INLINE void _mm_storeu_pd(double *mem_addr, __m128d a) +// Convert packed 16-bit integers in a to packed single-precision (32-bit) +// floating-point elements, and store the results in dst. +// +// FOR j := 0 to 3 +// i := j*16 +// m := j*32 +// dst[m+31:m] := Convert_Int16_To_FP32(a[i+15:i]) +// ENDFOR +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpi16_ps +FORCE_INLINE __m128 _mm_cvtpi16_ps(__m64 a) { - _mm_store_pd(mem_addr, a); + return vreinterpretq_m128_f32( + vcvtq_f32_s32(vmovl_s16(vreinterpret_s16_m64(a)))); } -// Reads the lower 64 bits of b and stores them into the lower 64 bits of a. -// https://msdn.microsoft.com/en-us/library/hhwf428f%28v=vs.90%29.aspx -FORCE_INLINE void _mm_storel_epi64(__m128i *a, __m128i b) +// Convert packed 32-bit integers in b to packed single-precision (32-bit) +// floating-point elements, store the results in the lower 2 elements of dst, +// and copy the upper 2 packed elements from a to the upper elements of dst. +// +// dst[31:0] := Convert_Int32_To_FP32(b[31:0]) +// dst[63:32] := Convert_Int32_To_FP32(b[63:32]) +// dst[95:64] := a[95:64] +// dst[127:96] := a[127:96] +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpi32_ps +FORCE_INLINE __m128 _mm_cvtpi32_ps(__m128 a, __m64 b) { - uint64x1_t hi = vget_high_u64(vreinterpretq_u64_m128i(*a)); - uint64x1_t lo = vget_low_u64(vreinterpretq_u64_m128i(b)); - *a = vreinterpretq_m128i_u64(vcombine_u64(lo, hi)); + return vreinterpretq_m128_f32( + vcombine_f32(vcvt_f32_s32(vreinterpret_s32_m64(b)), + vget_high_f32(vreinterpretq_f32_m128(a)))); } -// Stores the lower two single-precision floating point values of a to the -// address p. +// Convert packed signed 32-bit integers in a to packed single-precision +// (32-bit) floating-point elements, store the results in the lower 2 elements +// of dst, then covert the packed signed 32-bit integers in b to +// single-precision (32-bit) floating-point element, and store the results in +// the upper 2 elements of dst. // -// *p0 := a0 -// *p1 := a1 +// dst[31:0] := Convert_Int32_To_FP32(a[31:0]) +// dst[63:32] := Convert_Int32_To_FP32(a[63:32]) +// dst[95:64] := Convert_Int32_To_FP32(b[31:0]) +// dst[127:96] := Convert_Int32_To_FP32(b[63:32]) // -// https://msdn.microsoft.com/en-us/library/h54t98ks(v=vs.90).aspx -FORCE_INLINE void _mm_storel_pi(__m64 *p, __m128 a) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpi32x2_ps +FORCE_INLINE __m128 _mm_cvtpi32x2_ps(__m64 a, __m64 b) { - *p = vreinterpret_m64_f32(vget_low_f32(a)); + return vreinterpretq_m128_f32(vcvtq_f32_s32( + vcombine_s32(vreinterpret_s32_m64(a), vreinterpret_s32_m64(b)))); } -// Stores the upper two single-precision, floating-point values of a to the -// address p. +// Convert the lower packed 8-bit integers in a to packed single-precision +// (32-bit) floating-point elements, and store the results in dst. // -// *p0 := a2 -// *p1 := a3 +// FOR j := 0 to 3 +// i := j*8 +// m := j*32 +// dst[m+31:m] := Convert_Int8_To_FP32(a[i+7:i]) +// ENDFOR // -// https://msdn.microsoft.com/en-us/library/a7525fs8(v%3dvs.90).aspx -FORCE_INLINE void _mm_storeh_pi(__m64 *p, __m128 a) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpi8_ps +FORCE_INLINE __m128 _mm_cvtpi8_ps(__m64 a) { - *p = vreinterpret_m64_f32(vget_high_f32(a)); + return vreinterpretq_m128_f32(vcvtq_f32_s32( + vmovl_s16(vget_low_s16(vmovl_s8(vreinterpret_s8_m64(a)))))); } -// Loads a single single-precision, floating-point value, copying it into all -// four words -// https://msdn.microsoft.com/en-us/library/vstudio/5cdkf716(v=vs.100).aspx -FORCE_INLINE __m128 _mm_load1_ps(const float *p) +// Convert packed single-precision (32-bit) floating-point elements in a to +// packed 16-bit integers, and store the results in dst. Note: this intrinsic +// will generate 0x7FFF, rather than 0x8000, for input values between 0x7FFF and +// 0x7FFFFFFF. +// +// FOR j := 0 to 3 +// i := 16*j +// k := 32*j +// IF a[k+31:k] >= FP32(0x7FFF) && a[k+31:k] <= FP32(0x7FFFFFFF) +// dst[i+15:i] := 0x7FFF +// ELSE +// dst[i+15:i] := Convert_FP32_To_Int16(a[k+31:k]) +// FI +// ENDFOR +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtps_pi16 +FORCE_INLINE __m64 _mm_cvtps_pi16(__m128 a) { - return vreinterpretq_m128_f32(vld1q_dup_f32(p)); + const __m128 i16Min = _mm_set_ps1((float) INT16_MIN); + const __m128 i16Max = _mm_set_ps1((float) INT16_MAX); + const __m128 i32Max = _mm_set_ps1((float) INT32_MAX); + const __m128i maxMask = _mm_castps_si128( + _mm_and_ps(_mm_cmpge_ps(a, i16Max), _mm_cmple_ps(a, i32Max))); + const __m128i betweenMask = _mm_castps_si128( + _mm_and_ps(_mm_cmpgt_ps(a, i16Min), _mm_cmplt_ps(a, i16Max))); + const __m128i minMask = _mm_cmpeq_epi32(_mm_or_si128(maxMask, betweenMask), + _mm_setzero_si128()); + __m128i max = _mm_and_si128(maxMask, _mm_set1_epi32(INT16_MAX)); + __m128i min = _mm_and_si128(minMask, _mm_set1_epi32(INT16_MIN)); + __m128i cvt = _mm_and_si128(betweenMask, _mm_cvtps_epi32(a)); + __m128i res32 = _mm_or_si128(_mm_or_si128(max, min), cvt); + return vreinterpret_m64_s16(vmovn_s32(vreinterpretq_s32_m128i(res32))); } -// Load a single-precision (32-bit) floating-point element from memory into all -// elements of dst. +// Convert packed single-precision (32-bit) floating-point elements in a to +// packed 32-bit integers, and store the results in dst. // -// dst[31:0] := MEM[mem_addr+31:mem_addr] -// dst[63:32] := MEM[mem_addr+31:mem_addr] -// dst[95:64] := MEM[mem_addr+31:mem_addr] -// dst[127:96] := MEM[mem_addr+31:mem_addr] +// FOR j := 0 to 1 +// i := 32*j +// dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) +// ENDFOR // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load_ps1 -#define _mm_load_ps1 _mm_load1_ps +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtps_pi32 +#define _mm_cvtps_pi32(a) _mm_cvt_ps2pi(a) -// Sets the lower two single-precision, floating-point values with 64 -// bits of data loaded from the address p; the upper two values are passed -// through from a. +// Convert packed single-precision (32-bit) floating-point elements in a to +// packed 8-bit integers, and store the results in lower 4 elements of dst. +// Note: this intrinsic will generate 0x7F, rather than 0x80, for input values +// between 0x7F and 0x7FFFFFFF. // -// Return Value -// r0 := *p0 -// r1 := *p1 -// r2 := a2 -// r3 := a3 +// FOR j := 0 to 3 +// i := 8*j +// k := 32*j +// IF a[k+31:k] >= FP32(0x7F) && a[k+31:k] <= FP32(0x7FFFFFFF) +// dst[i+7:i] := 0x7F +// ELSE +// dst[i+7:i] := Convert_FP32_To_Int8(a[k+31:k]) +// FI +// ENDFOR // -// https://msdn.microsoft.com/en-us/library/s57cyak2(v=vs.100).aspx -FORCE_INLINE __m128 _mm_loadl_pi(__m128 a, __m64 const *p) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtps_pi8 +FORCE_INLINE __m64 _mm_cvtps_pi8(__m128 a) +{ + const __m128 i8Min = _mm_set_ps1((float) INT8_MIN); + const __m128 i8Max = _mm_set_ps1((float) INT8_MAX); + const __m128 i32Max = _mm_set_ps1((float) INT32_MAX); + const __m128i maxMask = _mm_castps_si128( + _mm_and_ps(_mm_cmpge_ps(a, i8Max), _mm_cmple_ps(a, i32Max))); + const __m128i betweenMask = _mm_castps_si128( + _mm_and_ps(_mm_cmpgt_ps(a, i8Min), _mm_cmplt_ps(a, i8Max))); + const __m128i minMask = _mm_cmpeq_epi32(_mm_or_si128(maxMask, betweenMask), + _mm_setzero_si128()); + __m128i max = _mm_and_si128(maxMask, _mm_set1_epi32(INT8_MAX)); + __m128i min = _mm_and_si128(minMask, _mm_set1_epi32(INT8_MIN)); + __m128i cvt = _mm_and_si128(betweenMask, _mm_cvtps_epi32(a)); + __m128i res32 = _mm_or_si128(_mm_or_si128(max, min), cvt); + int16x4_t res16 = vmovn_s32(vreinterpretq_s32_m128i(res32)); + int8x8_t res8 = vmovn_s16(vcombine_s16(res16, res16)); + uint32_t bitMask[2] = {0xFFFFFFFF, 0}; + int8x8_t mask = vreinterpret_s8_u32(vld1_u32(bitMask)); + + return vreinterpret_m64_s8(vorr_s8(vand_s8(mask, res8), vdup_n_s8(0))); +} + +// Convert packed unsigned 16-bit integers in a to packed single-precision +// (32-bit) floating-point elements, and store the results in dst. +// +// FOR j := 0 to 3 +// i := j*16 +// m := j*32 +// dst[m+31:m] := Convert_UInt16_To_FP32(a[i+15:i]) +// ENDFOR +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpu16_ps +FORCE_INLINE __m128 _mm_cvtpu16_ps(__m64 a) { return vreinterpretq_m128_f32( - vcombine_f32(vld1_f32((const float32_t *) p), vget_high_f32(a))); + vcvtq_f32_u32(vmovl_u16(vreinterpret_u16_m64(a)))); } -// Load 4 single-precision (32-bit) floating-point elements from memory into dst -// in reverse order. mem_addr must be aligned on a 16-byte boundary or a -// general-protection exception may be generated. +// Convert the lower packed unsigned 8-bit integers in a to packed +// single-precision (32-bit) floating-point elements, and store the results in +// dst. // -// dst[31:0] := MEM[mem_addr+127:mem_addr+96] -// dst[63:32] := MEM[mem_addr+95:mem_addr+64] -// dst[95:64] := MEM[mem_addr+63:mem_addr+32] -// dst[127:96] := MEM[mem_addr+31:mem_addr] +// FOR j := 0 to 3 +// i := j*8 +// m := j*32 +// dst[m+31:m] := Convert_UInt8_To_FP32(a[i+7:i]) +// ENDFOR // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadr_ps -FORCE_INLINE __m128 _mm_loadr_ps(const float *p) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpu8_ps +FORCE_INLINE __m128 _mm_cvtpu8_ps(__m64 a) { - float32x4_t v = vrev64q_f32(vld1q_f32(p)); - return vreinterpretq_m128_f32(vextq_f32(v, v, 2)); + return vreinterpretq_m128_f32(vcvtq_f32_u32( + vmovl_u16(vget_low_u16(vmovl_u8(vreinterpret_u8_m64(a)))))); } -// Sets the upper two single-precision, floating-point values with 64 -// bits of data loaded from the address p; the lower two values are passed -// through from a. +// Convert the signed 32-bit integer b to a single-precision (32-bit) +// floating-point element, store the result in the lower element of dst, and +// copy the upper 3 packed elements from a to the upper elements of dst. // -// r0 := a0 -// r1 := a1 -// r2 := *p0 -// r3 := *p1 +// dst[31:0] := Convert_Int32_To_FP32(b[31:0]) +// dst[127:32] := a[127:32] // -// https://msdn.microsoft.com/en-us/library/w92wta0x(v%3dvs.100).aspx -FORCE_INLINE __m128 _mm_loadh_pi(__m128 a, __m64 const *p) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi32_ss +#define _mm_cvtsi32_ss(a, b) _mm_cvt_si2ss(a, b) + +// Convert the signed 64-bit integer b to a single-precision (32-bit) +// floating-point element, store the result in the lower element of dst, and +// copy the upper 3 packed elements from a to the upper elements of dst. +// +// dst[31:0] := Convert_Int64_To_FP32(b[63:0]) +// dst[127:32] := a[127:32] +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi64_ss +FORCE_INLINE __m128 _mm_cvtsi64_ss(__m128 a, int64_t b) { return vreinterpretq_m128_f32( - vcombine_f32(vget_low_f32(a), vld1_f32((const float32_t *) p))); + vsetq_lane_f32((float) b, vreinterpretq_f32_m128(a), 0)); } -// Loads four single-precision, floating-point values. -// https://msdn.microsoft.com/en-us/library/vstudio/zzd50xxt(v=vs.100).aspx -FORCE_INLINE __m128 _mm_load_ps(const float *p) +// Copy the lower single-precision (32-bit) floating-point element of a to dst. +// +// dst[31:0] := a[31:0] +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtss_f32 +FORCE_INLINE float _mm_cvtss_f32(__m128 a) { - return vreinterpretq_m128_f32(vld1q_f32(p)); + return vgetq_lane_f32(vreinterpretq_f32_m128(a), 0); } -// Loads four single-precision, floating-point values. -// https://msdn.microsoft.com/en-us/library/x1b16s7z%28v=vs.90%29.aspx -FORCE_INLINE __m128 _mm_loadu_ps(const float *p) +// Convert the lower single-precision (32-bit) floating-point element in a to a +// 32-bit integer, and store the result in dst. +// +// dst[31:0] := Convert_FP32_To_Int32(a[31:0]) +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtss_si32 +#define _mm_cvtss_si32(a) _mm_cvt_ss2si(a) + +// Convert the lower single-precision (32-bit) floating-point element in a to a +// 64-bit integer, and store the result in dst. +// +// dst[63:0] := Convert_FP32_To_Int64(a[31:0]) +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtss_si64 +FORCE_INLINE int64_t _mm_cvtss_si64(__m128 a) { - // for neon, alignment doesn't matter, so _mm_load_ps and _mm_loadu_ps are - // equivalent for neon - return vreinterpretq_m128_f32(vld1q_f32(p)); +#if defined(__aarch64__) + return (int64_t) vgetq_lane_f32(vrndiq_f32(vreinterpretq_f32_m128(a)), 0); +#else + float32_t data = vgetq_lane_f32( + vreinterpretq_f32_m128(_mm_round_ps(a, _MM_FROUND_CUR_DIRECTION)), 0); + return (int64_t) data; +#endif } -// Load unaligned 16-bit integer from memory into the first element of dst. +// Convert packed single-precision (32-bit) floating-point elements in a to +// packed 32-bit integers with truncation, and store the results in dst. // -// dst[15:0] := MEM[mem_addr+15:mem_addr] -// dst[MAX:16] := 0 +// FOR j := 0 to 1 +// i := 32*j +// dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) +// ENDFOR // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadu_si16 -FORCE_INLINE __m128i _mm_loadu_si16(const void *p) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtt_ps2pi +FORCE_INLINE __m64 _mm_cvtt_ps2pi(__m128 a) { - return vreinterpretq_m128i_s16( - vsetq_lane_s16(*(const int16_t *) p, vdupq_n_s16(0), 0)); + return vreinterpret_m64_s32( + vget_low_s32(vcvtq_s32_f32(vreinterpretq_f32_m128(a)))); } -// Load unaligned 64-bit integer from memory into the first element of dst. +// Convert the lower single-precision (32-bit) floating-point element in a to a +// 32-bit integer with truncation, and store the result in dst. // -// dst[63:0] := MEM[mem_addr+63:mem_addr] -// dst[MAX:64] := 0 +// dst[31:0] := Convert_FP32_To_Int32_Truncate(a[31:0]) // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadu_si64 -FORCE_INLINE __m128i _mm_loadu_si64(const void *p) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtt_ss2si +FORCE_INLINE int _mm_cvtt_ss2si(__m128 a) { - return vreinterpretq_m128i_s64( - vcombine_s64(vld1_s64((const int64_t *) p), vdup_n_s64(0))); + return vgetq_lane_s32(vcvtq_s32_f32(vreinterpretq_f32_m128(a)), 0); } -// Load a double-precision (64-bit) floating-point element from memory into the -// lower of dst, and zero the upper element. mem_addr does not need to be -// aligned on any particular boundary. +// Convert packed single-precision (32-bit) floating-point elements in a to +// packed 32-bit integers with truncation, and store the results in dst. // -// dst[63:0] := MEM[mem_addr+63:mem_addr] -// dst[127:64] := 0 +// FOR j := 0 to 1 +// i := 32*j +// dst[i+31:i] := Convert_FP32_To_Int32_Truncate(a[i+31:i]) +// ENDFOR // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load_sd -FORCE_INLINE __m128d _mm_load_sd(const double *p) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttps_pi32 +#define _mm_cvttps_pi32(a) _mm_cvtt_ps2pi(a) + +// Convert the lower single-precision (32-bit) floating-point element in a to a +// 32-bit integer with truncation, and store the result in dst. +// +// dst[31:0] := Convert_FP32_To_Int32_Truncate(a[31:0]) +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttss_si32 +#define _mm_cvttss_si32(a) _mm_cvtt_ss2si(a) + +// Convert the lower single-precision (32-bit) floating-point element in a to a +// 64-bit integer with truncation, and store the result in dst. +// +// dst[63:0] := Convert_FP32_To_Int64_Truncate(a[31:0]) +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttss_si64 +FORCE_INLINE int64_t _mm_cvttss_si64(__m128 a) { -#if defined(__aarch64__) - return vreinterpretq_m128d_f64(vsetq_lane_f64(*p, vdupq_n_f64(0), 0)); -#else - const float *fp = (const float *) p; - float ALIGN_STRUCT(16) data[4] = {fp[0], fp[1], 0, 0}; - return vreinterpretq_m128d_f32(vld1q_f32(data)); -#endif + return (int64_t) vgetq_lane_f32(vreinterpretq_f32_m128(a), 0); } -// Loads two double-precision from 16-byte aligned memory, floating-point -// values. +// Divides the four single-precision, floating-point values of a and b. // -// dst[127:0] := MEM[mem_addr+127:mem_addr] +// r0 := a0 / b0 +// r1 := a1 / b1 +// r2 := a2 / b2 +// r3 := a3 / b3 // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load_pd -FORCE_INLINE __m128d _mm_load_pd(const double *p) +// https://msdn.microsoft.com/en-us/library/edaw8147(v=vs.100).aspx +FORCE_INLINE __m128 _mm_div_ps(__m128 a, __m128 b) { -#if defined(__aarch64__) - return vreinterpretq_m128d_f64(vld1q_f64(p)); +#if defined(__aarch64__) && !SSE2NEON_PRECISE_DIV + return vreinterpretq_m128_f32( + vdivq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); #else - const float *fp = (const float *) p; - float ALIGN_STRUCT(16) data[4] = {fp[0], fp[1], fp[2], fp[3]}; - return vreinterpretq_m128d_f32(vld1q_f32(data)); + float32x4_t recip = vrecpeq_f32(vreinterpretq_f32_m128(b)); + recip = vmulq_f32(recip, vrecpsq_f32(recip, vreinterpretq_f32_m128(b))); +#if SSE2NEON_PRECISE_DIV + // Additional Netwon-Raphson iteration for accuracy + recip = vmulq_f32(recip, vrecpsq_f32(recip, vreinterpretq_f32_m128(b))); +#endif + return vreinterpretq_m128_f32(vmulq_f32(vreinterpretq_f32_m128(a), recip)); #endif } -// Loads two double-precision from unaligned memory, floating-point values. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadu_pd -FORCE_INLINE __m128d _mm_loadu_pd(const double *p) +// Divides the scalar single-precision floating point value of a by b. +// https://msdn.microsoft.com/en-us/library/4y73xa49(v=vs.100).aspx +FORCE_INLINE __m128 _mm_div_ss(__m128 a, __m128 b) { - return _mm_load_pd(p); + float32_t value = + vgetq_lane_f32(vreinterpretq_f32_m128(_mm_div_ps(a, b)), 0); + return vreinterpretq_m128_f32( + vsetq_lane_f32(value, vreinterpretq_f32_m128(a), 0)); } -// Loads an single - precision, floating - point value into the low word and -// clears the upper three words. -// https://msdn.microsoft.com/en-us/library/548bb9h4%28v=vs.90%29.aspx -FORCE_INLINE __m128 _mm_load_ss(const float *p) -{ - return vreinterpretq_m128_f32(vsetq_lane_f32(*p, vdupq_n_f32(0), 0)); -} +// Extract a 16-bit integer from a, selected with imm8, and store the result in +// the lower element of dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_extract_pi16 +#define _mm_extract_pi16(a, imm) \ + (int32_t) vget_lane_u16(vreinterpret_u16_m64(a), (imm)) -// Load 64-bit integer from memory into the first element of dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadl_epi64 -FORCE_INLINE __m128i _mm_loadl_epi64(__m128i const *p) +// Free aligned memory that was allocated with _mm_malloc. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_free +FORCE_INLINE void _mm_free(void *addr) { - /* Load the lower 64 bits of the value pointed to by p into the - * lower 64 bits of the result, zeroing the upper 64 bits of the result. - */ - return vreinterpretq_m128i_s32( - vcombine_s32(vld1_s32((int32_t const *) p), vcreate_s32(0))); + free(addr); } -// Load a double-precision (64-bit) floating-point element from memory into the -// lower element of dst, and copy the upper element from a to dst. mem_addr does -// not need to be aligned on any particular boundary. -// -// dst[63:0] := MEM[mem_addr+63:mem_addr] -// dst[127:64] := a[127:64] -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadl_pd -FORCE_INLINE __m128d _mm_loadl_pd(__m128d a, const double *p) +// Macro: Get the flush zero bits from the MXCSR control and status register. +// The flush zero may contain any of the following flags: _MM_FLUSH_ZERO_ON or +// _MM_FLUSH_ZERO_OFF +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_MM_GET_FLUSH_ZERO_MODE +FORCE_INLINE unsigned int _sse2neon_mm_get_flush_zero_mode() { + union { + fpcr_bitfield field; #if defined(__aarch64__) - return vreinterpretq_m128d_f64( - vcombine_f64(vld1_f64(p), vget_high_f64(vreinterpretq_f64_m128d(a)))); + uint64_t value; #else - return vreinterpretq_m128d_f32( - vcombine_f32(vld1_f32((const float *) p), - vget_high_f32(vreinterpretq_f32_m128d(a)))); + uint32_t value; #endif -} + } r; -// Load 2 double-precision (64-bit) floating-point elements from memory into dst -// in reverse order. mem_addr must be aligned on a 16-byte boundary or a -// general-protection exception may be generated. -// -// dst[63:0] := MEM[mem_addr+127:mem_addr+64] -// dst[127:64] := MEM[mem_addr+63:mem_addr] -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadr_pd -FORCE_INLINE __m128d _mm_loadr_pd(const double *p) -{ #if defined(__aarch64__) - float64x2_t v = vld1q_f64(p); - return vreinterpretq_m128d_f64(vextq_f64(v, v, 1)); + asm volatile("mrs %0, FPCR" : "=r"(r.value)); /* read */ #else - int64x2_t v = vld1q_s64((const int64_t *) p); - return vreinterpretq_m128d_s64(vextq_s64(v, v, 1)); + asm volatile("vmrs %0, FPSCR" : "=r"(r.value)); /* read */ #endif + + return r.field.bit24 ? _MM_FLUSH_ZERO_ON : _MM_FLUSH_ZERO_OFF; } -// Sets the low word to the single-precision, floating-point value of b -// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/35hdzazd(v=vs.100) -FORCE_INLINE __m128 _mm_move_ss(__m128 a, __m128 b) +// Macro: Get the rounding mode bits from the MXCSR control and status register. +// The rounding mode may contain any of the following flags: _MM_ROUND_NEAREST, +// _MM_ROUND_DOWN, _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_MM_GET_ROUNDING_MODE +FORCE_INLINE unsigned int _MM_GET_ROUNDING_MODE() { - return vreinterpretq_m128_f32( - vsetq_lane_f32(vgetq_lane_f32(vreinterpretq_f32_m128(b), 0), - vreinterpretq_f32_m128(a), 0)); + union { + fpcr_bitfield field; +#if defined(__aarch64__) + uint64_t value; +#else + uint32_t value; +#endif + } r; + +#if defined(__aarch64__) + asm volatile("mrs %0, FPCR" : "=r"(r.value)); /* read */ +#else + asm volatile("vmrs %0, FPSCR" : "=r"(r.value)); /* read */ +#endif + + if (r.field.bit22) { + return r.field.bit23 ? _MM_ROUND_TOWARD_ZERO : _MM_ROUND_UP; + } else { + return r.field.bit23 ? _MM_ROUND_DOWN : _MM_ROUND_NEAREST; + } } -// Move the lower double-precision (64-bit) floating-point element from b to the -// lower element of dst, and copy the upper element from a to the upper element -// of dst. -// -// dst[63:0] := b[63:0] -// dst[127:64] := a[127:64] -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_move_sd -FORCE_INLINE __m128d _mm_move_sd(__m128d a, __m128d b) +// Copy a to dst, and insert the 16-bit integer i into dst at the location +// specified by imm8. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_insert_pi16 +#define _mm_insert_pi16(a, b, imm) \ + __extension__({ \ + vreinterpret_m64_s16( \ + vset_lane_s16((b), vreinterpret_s16_m64(a), (imm))); \ + }) + +// Loads four single-precision, floating-point values. +// https://msdn.microsoft.com/en-us/library/vstudio/zzd50xxt(v=vs.100).aspx +FORCE_INLINE __m128 _mm_load_ps(const float *p) { - return vreinterpretq_m128d_f32( - vcombine_f32(vget_low_f32(vreinterpretq_f32_m128d(b)), - vget_high_f32(vreinterpretq_f32_m128d(a)))); + return vreinterpretq_m128_f32(vld1q_f32(p)); } -// Copy the lower 64-bit integer in a to the lower element of dst, and zero the -// upper element. +// Load a single-precision (32-bit) floating-point element from memory into all +// elements of dst. // -// dst[63:0] := a[63:0] -// dst[127:64] := 0 +// dst[31:0] := MEM[mem_addr+31:mem_addr] +// dst[63:32] := MEM[mem_addr+31:mem_addr] +// dst[95:64] := MEM[mem_addr+31:mem_addr] +// dst[127:96] := MEM[mem_addr+31:mem_addr] // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_move_epi64 -FORCE_INLINE __m128i _mm_move_epi64(__m128i a) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load_ps1 +#define _mm_load_ps1 _mm_load1_ps + +// Loads an single - precision, floating - point value into the low word and +// clears the upper three words. +// https://msdn.microsoft.com/en-us/library/548bb9h4%28v=vs.90%29.aspx +FORCE_INLINE __m128 _mm_load_ss(const float *p) { - return vreinterpretq_m128i_s64( - vsetq_lane_s64(0, vreinterpretq_s64_m128i(a), 1)); + return vreinterpretq_m128_f32(vsetq_lane_f32(*p, vdupq_n_f32(0), 0)); } -// Return vector of type __m128 with undefined elements. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_undefined_ps -FORCE_INLINE __m128 _mm_undefined_ps(void) +// Loads a single single-precision, floating-point value, copying it into all +// four words +// https://msdn.microsoft.com/en-us/library/vstudio/5cdkf716(v=vs.100).aspx +FORCE_INLINE __m128 _mm_load1_ps(const float *p) { -#if defined(__GNUC__) || defined(__clang__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wuninitialized" -#endif - __m128 a; - return a; -#if defined(__GNUC__) || defined(__clang__) -#pragma GCC diagnostic pop -#endif + return vreinterpretq_m128_f32(vld1q_dup_f32(p)); } -/* Logic/Binary operations */ - -// Computes the bitwise AND-NOT of the four single-precision, floating-point -// values of a and b. +// Sets the upper two single-precision, floating-point values with 64 +// bits of data loaded from the address p; the lower two values are passed +// through from a. // -// r0 := ~a0 & b0 -// r1 := ~a1 & b1 -// r2 := ~a2 & b2 -// r3 := ~a3 & b3 +// r0 := a0 +// r1 := a1 +// r2 := *p0 +// r3 := *p1 // -// https://msdn.microsoft.com/en-us/library/vstudio/68h7wd02(v=vs.100).aspx -FORCE_INLINE __m128 _mm_andnot_ps(__m128 a, __m128 b) +// https://msdn.microsoft.com/en-us/library/w92wta0x(v%3dvs.100).aspx +FORCE_INLINE __m128 _mm_loadh_pi(__m128 a, __m64 const *p) { - return vreinterpretq_m128_s32( - vbicq_s32(vreinterpretq_s32_m128(b), - vreinterpretq_s32_m128(a))); // *NOTE* argument swap + return vreinterpretq_m128_f32( + vcombine_f32(vget_low_f32(a), vld1_f32((const float32_t *) p))); } -// Compute the bitwise NOT of packed double-precision (64-bit) floating-point -// elements in a and then AND with b, and store the results in dst. +// Sets the lower two single-precision, floating-point values with 64 +// bits of data loaded from the address p; the upper two values are passed +// through from a. // -// FOR j := 0 to 1 -// i := j*64 -// dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) -// ENDFOR +// Return Value +// r0 := *p0 +// r1 := *p1 +// r2 := a2 +// r3 := a3 // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_andnot_pd -FORCE_INLINE __m128d _mm_andnot_pd(__m128d a, __m128d b) +// https://msdn.microsoft.com/en-us/library/s57cyak2(v=vs.100).aspx +FORCE_INLINE __m128 _mm_loadl_pi(__m128 a, __m64 const *p) { - // *NOTE* argument swap - return vreinterpretq_m128d_s64( - vbicq_s64(vreinterpretq_s64_m128d(b), vreinterpretq_s64_m128d(a))); + return vreinterpretq_m128_f32( + vcombine_f32(vld1_f32((const float32_t *) p), vget_high_f32(a))); } -// Computes the bitwise AND of the 128-bit value in b and the bitwise NOT of the -// 128-bit value in a. +// Load 4 single-precision (32-bit) floating-point elements from memory into dst +// in reverse order. mem_addr must be aligned on a 16-byte boundary or a +// general-protection exception may be generated. // -// r := (~a) & b +// dst[31:0] := MEM[mem_addr+127:mem_addr+96] +// dst[63:32] := MEM[mem_addr+95:mem_addr+64] +// dst[95:64] := MEM[mem_addr+63:mem_addr+32] +// dst[127:96] := MEM[mem_addr+31:mem_addr] // -// https://msdn.microsoft.com/en-us/library/vstudio/1beaceh8(v=vs.100).aspx -FORCE_INLINE __m128i _mm_andnot_si128(__m128i a, __m128i b) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadr_ps +FORCE_INLINE __m128 _mm_loadr_ps(const float *p) { - return vreinterpretq_m128i_s32( - vbicq_s32(vreinterpretq_s32_m128i(b), - vreinterpretq_s32_m128i(a))); // *NOTE* argument swap + float32x4_t v = vrev64q_f32(vld1q_f32(p)); + return vreinterpretq_m128_f32(vextq_f32(v, v, 2)); } -// Computes the bitwise AND of the 128-bit value in a and the 128-bit value in -// b. -// -// r := a & b -// -// https://msdn.microsoft.com/en-us/library/vstudio/6d1txsa8(v=vs.100).aspx -FORCE_INLINE __m128i _mm_and_si128(__m128i a, __m128i b) +// Loads four single-precision, floating-point values. +// https://msdn.microsoft.com/en-us/library/x1b16s7z%28v=vs.90%29.aspx +FORCE_INLINE __m128 _mm_loadu_ps(const float *p) { - return vreinterpretq_m128i_s32( - vandq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b))); + // for neon, alignment doesn't matter, so _mm_load_ps and _mm_loadu_ps are + // equivalent for neon + return vreinterpretq_m128_f32(vld1q_f32(p)); } -// Computes the bitwise AND of the four single-precision, floating-point values -// of a and b. +// Load unaligned 16-bit integer from memory into the first element of dst. // -// r0 := a0 & b0 -// r1 := a1 & b1 -// r2 := a2 & b2 -// r3 := a3 & b3 +// dst[15:0] := MEM[mem_addr+15:mem_addr] +// dst[MAX:16] := 0 // -// https://msdn.microsoft.com/en-us/library/vstudio/73ck1xc5(v=vs.100).aspx -FORCE_INLINE __m128 _mm_and_ps(__m128 a, __m128 b) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadu_si16 +FORCE_INLINE __m128i _mm_loadu_si16(const void *p) { - return vreinterpretq_m128_s32( - vandq_s32(vreinterpretq_s32_m128(a), vreinterpretq_s32_m128(b))); + return vreinterpretq_m128i_s16( + vsetq_lane_s16(*(const int16_t *) p, vdupq_n_s16(0), 0)); } -// Compute the bitwise AND of packed double-precision (64-bit) floating-point -// elements in a and b, and store the results in dst. +// Load unaligned 64-bit integer from memory into the first element of dst. // -// FOR j := 0 to 1 -// i := j*64 -// dst[i+63:i] := a[i+63:i] AND b[i+63:i] -// ENDFOR +// dst[63:0] := MEM[mem_addr+63:mem_addr] +// dst[MAX:64] := 0 // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_and_pd -FORCE_INLINE __m128d _mm_and_pd(__m128d a, __m128d b) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadu_si64 +FORCE_INLINE __m128i _mm_loadu_si64(const void *p) { - return vreinterpretq_m128d_s64( - vandq_s64(vreinterpretq_s64_m128d(a), vreinterpretq_s64_m128d(b))); + return vreinterpretq_m128i_s64( + vcombine_s64(vld1_s64((const int64_t *) p), vdup_n_s64(0))); } -// Computes the bitwise OR of the four single-precision, floating-point values -// of a and b. -// https://msdn.microsoft.com/en-us/library/vstudio/7ctdsyy0(v=vs.100).aspx -FORCE_INLINE __m128 _mm_or_ps(__m128 a, __m128 b) +// Allocate aligned blocks of memory. +// https://software.intel.com/en-us/ +// cpp-compiler-developer-guide-and-reference-allocating-and-freeing-aligned-memory-blocks +FORCE_INLINE void *_mm_malloc(size_t size, size_t align) { - return vreinterpretq_m128_s32( - vorrq_s32(vreinterpretq_s32_m128(a), vreinterpretq_s32_m128(b))); + void *ptr; + if (align == 1) + return malloc(size); + if (align == 2 || (sizeof(void *) == 8 && align == 4)) + align = sizeof(void *); + if (!posix_memalign(&ptr, align, size)) + return ptr; + return NULL; } -// Computes bitwise EXOR (exclusive-or) of the four single-precision, -// floating-point values of a and b. -// https://msdn.microsoft.com/en-us/library/ss6k3wk8(v=vs.100).aspx -FORCE_INLINE __m128 _mm_xor_ps(__m128 a, __m128 b) +// Conditionally store 8-bit integer elements from a into memory using mask +// (elements are not stored when the highest bit is not set in the corresponding +// element) and a non-temporal memory hint. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskmove_si64 +FORCE_INLINE void _mm_maskmove_si64(__m64 a, __m64 mask, char *mem_addr) { - return vreinterpretq_m128_s32( - veorq_s32(vreinterpretq_s32_m128(a), vreinterpretq_s32_m128(b))); + int8x8_t shr_mask = vshr_n_s8(vreinterpret_s8_m64(mask), 7); + __m128 b = _mm_load_ps((const float *) mem_addr); + int8x8_t masked = + vbsl_s8(vreinterpret_u8_s8(shr_mask), vreinterpret_s8_m64(a), + vreinterpret_s8_u64(vget_low_u64(vreinterpretq_u64_m128(b)))); + vst1_s8((int8_t *) mem_addr, masked); } -// Compute the bitwise XOR of packed double-precision (64-bit) floating-point -// elements in a and b, and store the results in dst. +// Conditionally store 8-bit integer elements from a into memory using mask +// (elements are not stored when the highest bit is not set in the corresponding +// element) and a non-temporal memory hint. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_maskmovq +#define _m_maskmovq(a, mask, mem_addr) _mm_maskmove_si64(a, mask, mem_addr) + +// Compare packed signed 16-bit integers in a and b, and store packed maximum +// values in dst. // -// FOR j := 0 to 1 -// i := j*64 -// dst[i+63:i] := a[i+63:i] XOR b[i+63:i] +// FOR j := 0 to 3 +// i := j*16 +// dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) // ENDFOR // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_xor_pd -FORCE_INLINE __m128d _mm_xor_pd(__m128d a, __m128d b) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_pi16 +FORCE_INLINE __m64 _mm_max_pi16(__m64 a, __m64 b) { - return vreinterpretq_m128d_s64( - veorq_s64(vreinterpretq_s64_m128d(a), vreinterpretq_s64_m128d(b))); + return vreinterpret_m64_s16( + vmax_s16(vreinterpret_s16_m64(a), vreinterpret_s16_m64(b))); } -// Compute the bitwise OR of packed double-precision (64-bit) floating-point -// elements in a and b, and store the results in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_or_pd -FORCE_INLINE __m128d _mm_or_pd(__m128d a, __m128d b) +// Computes the maximums of the four single-precision, floating-point values of +// a and b. +// https://msdn.microsoft.com/en-us/library/vstudio/ff5d607a(v=vs.100).aspx +FORCE_INLINE __m128 _mm_max_ps(__m128 a, __m128 b) { - return vreinterpretq_m128d_s64( - vorrq_s64(vreinterpretq_s64_m128d(a), vreinterpretq_s64_m128d(b))); +#if SSE2NEON_PRECISE_MINMAX + float32x4_t _a = vreinterpretq_f32_m128(a); + float32x4_t _b = vreinterpretq_f32_m128(b); + return vbslq_f32(vcltq_f32(_b, _a), _a, _b); +#else + return vreinterpretq_m128_f32( + vmaxq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); +#endif } -// Computes the bitwise OR of the 128-bit value in a and the 128-bit value in b. +// Compare packed unsigned 8-bit integers in a and b, and store packed maximum +// values in dst. // -// r := a | b +// FOR j := 0 to 7 +// i := j*8 +// dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) +// ENDFOR // -// https://msdn.microsoft.com/en-us/library/vstudio/ew8ty0db(v=vs.100).aspx -FORCE_INLINE __m128i _mm_or_si128(__m128i a, __m128i b) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_pu8 +FORCE_INLINE __m64 _mm_max_pu8(__m64 a, __m64 b) { - return vreinterpretq_m128i_s32( - vorrq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b))); + return vreinterpret_m64_u8( + vmax_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b))); } -// Computes the bitwise XOR of the 128-bit value in a and the 128-bit value in -// b. https://msdn.microsoft.com/en-us/library/fzt08www(v=vs.100).aspx -FORCE_INLINE __m128i _mm_xor_si128(__m128i a, __m128i b) +// Computes the maximum of the two lower scalar single-precision floating point +// values of a and b. +// https://msdn.microsoft.com/en-us/library/s6db5esz(v=vs.100).aspx +FORCE_INLINE __m128 _mm_max_ss(__m128 a, __m128 b) { - return vreinterpretq_m128i_s32( - veorq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b))); + float32_t value = vgetq_lane_f32(_mm_max_ps(a, b), 0); + return vreinterpretq_m128_f32( + vsetq_lane_f32(value, vreinterpretq_f32_m128(a), 0)); } -// Duplicate the low double-precision (64-bit) floating-point element from a, -// and store the results in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_movedup_pd -FORCE_INLINE __m128d _mm_movedup_pd(__m128d a) +// Compare packed signed 16-bit integers in a and b, and store packed minimum +// values in dst. +// +// FOR j := 0 to 3 +// i := j*16 +// dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) +// ENDFOR +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_pi16 +FORCE_INLINE __m64 _mm_min_pi16(__m64 a, __m64 b) { -#if (__aarch64__) - return vreinterpretq_m128d_f64( - vdupq_laneq_f64(vreinterpretq_f64_m128d(a), 0)); -#else - return vreinterpretq_m128d_u64( - vdupq_n_u64(vgetq_lane_u64(vreinterpretq_u64_m128d(a), 0))); -#endif + return vreinterpret_m64_s16( + vmin_s16(vreinterpret_s16_m64(a), vreinterpret_s16_m64(b))); } -// Duplicate odd-indexed single-precision (32-bit) floating-point elements -// from a, and store the results in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_movehdup_ps -FORCE_INLINE __m128 _mm_movehdup_ps(__m128 a) +// Computes the minima of the four single-precision, floating-point values of a +// and b. +// https://msdn.microsoft.com/en-us/library/vstudio/wh13kadz(v=vs.100).aspx +FORCE_INLINE __m128 _mm_min_ps(__m128 a, __m128 b) { -#if __has_builtin(__builtin_shufflevector) - return vreinterpretq_m128_f32(__builtin_shufflevector( - vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 1, 1, 3, 3)); +#if SSE2NEON_PRECISE_MINMAX + float32x4_t _a = vreinterpretq_f32_m128(a); + float32x4_t _b = vreinterpretq_f32_m128(b); + return vbslq_f32(vcltq_f32(_a, _b), _a, _b); #else - float32_t a1 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 1); - float32_t a3 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 3); - float ALIGN_STRUCT(16) data[4] = {a1, a1, a3, a3}; - return vreinterpretq_m128_f32(vld1q_f32(data)); + return vreinterpretq_m128_f32( + vminq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); #endif } -// Duplicate even-indexed single-precision (32-bit) floating-point elements -// from a, and store the results in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_moveldup_ps -FORCE_INLINE __m128 _mm_moveldup_ps(__m128 a) +// Compare packed unsigned 8-bit integers in a and b, and store packed minimum +// values in dst. +// +// FOR j := 0 to 7 +// i := j*8 +// dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) +// ENDFOR +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_pu8 +FORCE_INLINE __m64 _mm_min_pu8(__m64 a, __m64 b) { -#if __has_builtin(__builtin_shufflevector) - return vreinterpretq_m128_f32(__builtin_shufflevector( - vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 0, 0, 2, 2)); -#else - float32_t a0 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0); - float32_t a2 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 2); - float ALIGN_STRUCT(16) data[4] = {a0, a0, a2, a2}; - return vreinterpretq_m128_f32(vld1q_f32(data)); -#endif + return vreinterpret_m64_u8( + vmin_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b))); +} + +// Computes the minimum of the two lower scalar single-precision floating point +// values of a and b. +// https://msdn.microsoft.com/en-us/library/0a9y7xaa(v=vs.100).aspx +FORCE_INLINE __m128 _mm_min_ss(__m128 a, __m128 b) +{ + float32_t value = vgetq_lane_f32(_mm_min_ps(a, b), 0); + return vreinterpretq_m128_f32( + vsetq_lane_f32(value, vreinterpretq_f32_m128(a), 0)); +} + +// Sets the low word to the single-precision, floating-point value of b +// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/35hdzazd(v=vs.100) +FORCE_INLINE __m128 _mm_move_ss(__m128 a, __m128 b) +{ + return vreinterpretq_m128_f32( + vsetq_lane_f32(vgetq_lane_f32(vreinterpretq_f32_m128(b), 0), + vreinterpretq_f32_m128(a), 0)); } // Moves the upper two values of B into the lower two values of A. @@ -1577,315 +2093,419 @@ FORCE_INLINE __m128 _mm_movelh_ps(__m128 __A, __m128 __B) return vreinterpretq_m128_f32(vcombine_f32(a10, b10)); } -// Compute the absolute value of packed signed 32-bit integers in a, and store -// the unsigned results in dst. -// -// FOR j := 0 to 3 -// i := j*32 -// dst[i+31:i] := ABS(a[i+31:i]) -// ENDFOR -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_abs_epi32 -FORCE_INLINE __m128i _mm_abs_epi32(__m128i a) +// Create mask from the most significant bit of each 8-bit element in a, and +// store the result in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_movemask_pi8 +FORCE_INLINE int _mm_movemask_pi8(__m64 a) { - return vreinterpretq_m128i_s32(vabsq_s32(vreinterpretq_s32_m128i(a))); + uint8x8_t input = vreinterpret_u8_m64(a); +#if defined(__aarch64__) + static const int8x8_t shift = {0, 1, 2, 3, 4, 5, 6, 7}; + uint8x8_t tmp = vshr_n_u8(input, 7); + return vaddv_u8(vshl_u8(tmp, shift)); +#else + // Refer the implementation of `_mm_movemask_epi8` + uint16x4_t high_bits = vreinterpret_u16_u8(vshr_n_u8(input, 7)); + uint32x2_t paired16 = + vreinterpret_u32_u16(vsra_n_u16(high_bits, high_bits, 7)); + uint8x8_t paired32 = + vreinterpret_u8_u32(vsra_n_u32(paired16, paired16, 14)); + return vget_lane_u8(paired32, 0) | ((int) vget_lane_u8(paired32, 4) << 4); +#endif } -// Compute the absolute value of packed signed 16-bit integers in a, and store -// the unsigned results in dst. -// -// FOR j := 0 to 7 -// i := j*16 -// dst[i+15:i] := ABS(a[i+15:i]) -// ENDFOR -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_abs_epi16 -FORCE_INLINE __m128i _mm_abs_epi16(__m128i a) +// NEON does not provide this method +// Creates a 4-bit mask from the most significant bits of the four +// single-precision, floating-point values. +// https://msdn.microsoft.com/en-us/library/vstudio/4490ys29(v=vs.100).aspx +FORCE_INLINE int _mm_movemask_ps(__m128 a) { - return vreinterpretq_m128i_s16(vabsq_s16(vreinterpretq_s16_m128i(a))); + uint32x4_t input = vreinterpretq_u32_m128(a); +#if defined(__aarch64__) + static const int32x4_t shift = {0, 1, 2, 3}; + uint32x4_t tmp = vshrq_n_u32(input, 31); + return vaddvq_u32(vshlq_u32(tmp, shift)); +#else + // Uses the exact same method as _mm_movemask_epi8, see that for details. + // Shift out everything but the sign bits with a 32-bit unsigned shift + // right. + uint64x2_t high_bits = vreinterpretq_u64_u32(vshrq_n_u32(input, 31)); + // Merge the two pairs together with a 64-bit unsigned shift right + add. + uint8x16_t paired = + vreinterpretq_u8_u64(vsraq_n_u64(high_bits, high_bits, 31)); + // Extract the result. + return vgetq_lane_u8(paired, 0) | (vgetq_lane_u8(paired, 8) << 2); +#endif } -// Compute the absolute value of packed signed 8-bit integers in a, and store -// the unsigned results in dst. +// Multiplies the four single-precision, floating-point values of a and b. // -// FOR j := 0 to 15 -// i := j*8 -// dst[i+7:i] := ABS(a[i+7:i]) -// ENDFOR +// r0 := a0 * b0 +// r1 := a1 * b1 +// r2 := a2 * b2 +// r3 := a3 * b3 // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_abs_epi8 -FORCE_INLINE __m128i _mm_abs_epi8(__m128i a) +// https://msdn.microsoft.com/en-us/library/vstudio/22kbk6t9(v=vs.100).aspx +FORCE_INLINE __m128 _mm_mul_ps(__m128 a, __m128 b) { - return vreinterpretq_m128i_s8(vabsq_s8(vreinterpretq_s8_m128i(a))); + return vreinterpretq_m128_f32( + vmulq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); } -// Compute the absolute value of packed signed 32-bit integers in a, and store -// the unsigned results in dst. +// Multiply the lower single-precision (32-bit) floating-point element in a and +// b, store the result in the lower element of dst, and copy the upper 3 packed +// elements from a to the upper elements of dst. // -// FOR j := 0 to 1 -// i := j*32 -// dst[i+31:i] := ABS(a[i+31:i]) -// ENDFOR +// dst[31:0] := a[31:0] * b[31:0] +// dst[127:32] := a[127:32] // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_abs_pi32 -FORCE_INLINE __m64 _mm_abs_pi32(__m64 a) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_ss +FORCE_INLINE __m128 _mm_mul_ss(__m128 a, __m128 b) { - return vreinterpret_m64_s32(vabs_s32(vreinterpret_s32_m64(a))); + return _mm_move_ss(a, _mm_mul_ps(a, b)); } -// Compute the absolute value of packed signed 16-bit integers in a, and store -// the unsigned results in dst. -// -// FOR j := 0 to 3 -// i := j*16 -// dst[i+15:i] := ABS(a[i+15:i]) -// ENDFOR -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_abs_pi16 -FORCE_INLINE __m64 _mm_abs_pi16(__m64 a) +// Multiply the packed unsigned 16-bit integers in a and b, producing +// intermediate 32-bit integers, and store the high 16 bits of the intermediate +// integers in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mulhi_pu16 +FORCE_INLINE __m64 _mm_mulhi_pu16(__m64 a, __m64 b) { - return vreinterpret_m64_s16(vabs_s16(vreinterpret_s16_m64(a))); + return vreinterpret_m64_u16(vshrn_n_u32( + vmull_u16(vreinterpret_u16_m64(a), vreinterpret_u16_m64(b)), 16)); } -// Compute the absolute value of packed signed 8-bit integers in a, and store -// the unsigned results in dst. +// Computes the bitwise OR of the four single-precision, floating-point values +// of a and b. +// https://msdn.microsoft.com/en-us/library/vstudio/7ctdsyy0(v=vs.100).aspx +FORCE_INLINE __m128 _mm_or_ps(__m128 a, __m128 b) +{ + return vreinterpretq_m128_s32( + vorrq_s32(vreinterpretq_s32_m128(a), vreinterpretq_s32_m128(b))); +} + +// Average packed unsigned 8-bit integers in a and b, and store the results in +// dst. // // FOR j := 0 to 7 // i := j*8 -// dst[i+7:i] := ABS(a[i+7:i]) +// dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 // ENDFOR // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_abs_pi8 -FORCE_INLINE __m64 _mm_abs_pi8(__m64 a) -{ - return vreinterpret_m64_s8(vabs_s8(vreinterpret_s8_m64(a))); -} +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pavgb +#define _m_pavgb(a, b) _mm_avg_pu8(a, b) -// Concatenate 16-byte blocks in a and b into a 32-byte temporary result, shift -// the result right by imm8 bytes, and store the low 16 bytes in dst. +// Average packed unsigned 16-bit integers in a and b, and store the results in +// dst. // -// tmp[255:0] := ((a[127:0] << 128)[255:0] OR b[127:0]) >> (imm8*8) -// dst[127:0] := tmp[127:0] +// FOR j := 0 to 3 +// i := j*16 +// dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 +// ENDFOR // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_alignr_epi8 -#define _mm_alignr_epi8(a, b, imm) \ - __extension__({ \ - __m128i ret; \ - if (unlikely((imm) >= 32)) { \ - ret = _mm_setzero_si128(); \ - } else { \ - uint8x16_t tmp_low, tmp_high; \ - if (imm >= 16) { \ - const int idx = imm - 16; \ - tmp_low = vreinterpretq_u8_m128i(a); \ - tmp_high = vdupq_n_u8(0); \ - ret = \ - vreinterpretq_m128i_u8(vextq_u8(tmp_low, tmp_high, idx)); \ - } else { \ - const int idx = imm; \ - tmp_low = vreinterpretq_u8_m128i(b); \ - tmp_high = vreinterpretq_u8_m128i(a); \ - ret = \ - vreinterpretq_m128i_u8(vextq_u8(tmp_low, tmp_high, idx)); \ - } \ - } \ - ret; \ - }) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pavgw +#define _m_pavgw(a, b) _mm_avg_pu16(a, b) -// Concatenate 8-byte blocks in a and b into a 16-byte temporary result, shift -// the result right by imm8 bytes, and store the low 8 bytes in dst. -// -// tmp[127:0] := ((a[63:0] << 64)[127:0] OR b[63:0]) >> (imm8*8) -// dst[63:0] := tmp[63:0] -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_alignr_pi8 -#define _mm_alignr_pi8(a, b, imm) \ - __extension__({ \ - __m64 ret; \ - if (unlikely((imm) >= 16)) { \ - ret = vreinterpret_m64_s8(vdup_n_s8(0)); \ - } else { \ - uint8x8_t tmp_low, tmp_high; \ - if (imm >= 8) { \ - const int idx = imm - 8; \ - tmp_low = vreinterpret_u8_m64(a); \ - tmp_high = vdup_n_u8(0); \ - ret = vreinterpret_m64_u8(vext_u8(tmp_low, tmp_high, idx)); \ - } else { \ - const int idx = imm; \ - tmp_low = vreinterpret_u8_m64(b); \ - tmp_high = vreinterpret_u8_m64(a); \ - ret = vreinterpret_m64_u8(vext_u8(tmp_low, tmp_high, idx)); \ - } \ - } \ - ret; \ - }) +// Extract a 16-bit integer from a, selected with imm8, and store the result in +// the lower element of dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pextrw +#define _m_pextrw(a, imm) _mm_extract_pi16(a, imm) -// Takes the upper 64 bits of a and places it in the low end of the result -// Takes the lower 64 bits of b and places it into the high end of the result. -FORCE_INLINE __m128 _mm_shuffle_ps_1032(__m128 a, __m128 b) -{ - float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a)); - float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b)); - return vreinterpretq_m128_f32(vcombine_f32(a32, b10)); -} +// Copy a to dst, and insert the 16-bit integer i into dst at the location +// specified by imm8. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=m_pinsrw +#define _m_pinsrw(a, i, imm) _mm_insert_pi16(a, i, imm) -// takes the lower two 32-bit values from a and swaps them and places in high -// end of result takes the higher two 32 bit values from b and swaps them and -// places in low end of result. -FORCE_INLINE __m128 _mm_shuffle_ps_2301(__m128 a, __m128 b) +// Compare packed signed 16-bit integers in a and b, and store packed maximum +// values in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pmaxsw +#define _m_pmaxsw(a, b) _mm_max_pi16(a, b) + +// Compare packed unsigned 8-bit integers in a and b, and store packed maximum +// values in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pmaxub +#define _m_pmaxub(a, b) _mm_max_pu8(a, b) + +// Compare packed signed 16-bit integers in a and b, and store packed minimum +// values in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pminsw +#define _m_pminsw(a, b) _mm_min_pi16(a, b) + +// Compare packed unsigned 8-bit integers in a and b, and store packed minimum +// values in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pminub +#define _m_pminub(a, b) _mm_min_pu8(a, b) + +// Create mask from the most significant bit of each 8-bit element in a, and +// store the result in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pmovmskb +#define _m_pmovmskb(a) _mm_movemask_pi8(a) + +// Multiply the packed unsigned 16-bit integers in a and b, producing +// intermediate 32-bit integers, and store the high 16 bits of the intermediate +// integers in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pmulhuw +#define _m_pmulhuw(a, b) _mm_mulhi_pu16(a, b) + +// Loads one cache line of data from address p to a location closer to the +// processor. https://msdn.microsoft.com/en-us/library/84szxsww(v=vs.100).aspx +FORCE_INLINE void _mm_prefetch(const void *p, int i) { - float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a))); - float32x2_t b23 = vrev64_f32(vget_high_f32(vreinterpretq_f32_m128(b))); - return vreinterpretq_m128_f32(vcombine_f32(a01, b23)); + (void) i; + __builtin_prefetch(p); } -FORCE_INLINE __m128 _mm_shuffle_ps_0321(__m128 a, __m128 b) +// Compute the absolute differences of packed unsigned 8-bit integers in a and +// b, then horizontally sum each consecutive 8 differences to produce four +// unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low +// 16 bits of dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=m_psadbw +#define _m_psadbw(a, b) _mm_sad_pu8(a, b) + +// Shuffle 16-bit integers in a using the control in imm8, and store the results +// in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pshufw +#define _m_pshufw(a, imm) _mm_shuffle_pi16(a, imm) + +// Compute the approximate reciprocal of packed single-precision (32-bit) +// floating-point elements in a, and store the results in dst. The maximum +// relative error for this approximation is less than 1.5*2^-12. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rcp_ps +FORCE_INLINE __m128 _mm_rcp_ps(__m128 in) { - float32x2_t a21 = vget_high_f32( - vextq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 3)); - float32x2_t b03 = vget_low_f32( - vextq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b), 3)); - return vreinterpretq_m128_f32(vcombine_f32(a21, b03)); + float32x4_t recip = vrecpeq_f32(vreinterpretq_f32_m128(in)); + recip = vmulq_f32(recip, vrecpsq_f32(recip, vreinterpretq_f32_m128(in))); +#if SSE2NEON_PRECISE_DIV + // Additional Netwon-Raphson iteration for accuracy + recip = vmulq_f32(recip, vrecpsq_f32(recip, vreinterpretq_f32_m128(in))); +#endif + return vreinterpretq_m128_f32(recip); } -FORCE_INLINE __m128 _mm_shuffle_ps_2103(__m128 a, __m128 b) +// Compute the approximate reciprocal of the lower single-precision (32-bit) +// floating-point element in a, store the result in the lower element of dst, +// and copy the upper 3 packed elements from a to the upper elements of dst. The +// maximum relative error for this approximation is less than 1.5*2^-12. +// +// dst[31:0] := (1.0 / a[31:0]) +// dst[127:32] := a[127:32] +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rcp_ss +FORCE_INLINE __m128 _mm_rcp_ss(__m128 a) { - float32x2_t a03 = vget_low_f32( - vextq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 3)); - float32x2_t b21 = vget_high_f32( - vextq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b), 3)); - return vreinterpretq_m128_f32(vcombine_f32(a03, b21)); + return _mm_move_ss(a, _mm_rcp_ps(a)); } -FORCE_INLINE __m128 _mm_shuffle_ps_1010(__m128 a, __m128 b) +// Computes the approximations of the reciprocal square roots of the four +// single-precision floating point values of in. +// The current precision is 1% error. +// https://msdn.microsoft.com/en-us/library/22hfsh53(v=vs.100).aspx +FORCE_INLINE __m128 _mm_rsqrt_ps(__m128 in) { - float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a)); - float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b)); - return vreinterpretq_m128_f32(vcombine_f32(a10, b10)); + float32x4_t out = vrsqrteq_f32(vreinterpretq_f32_m128(in)); +#if SSE2NEON_PRECISE_SQRT + // Additional Netwon-Raphson iteration for accuracy + out = vmulq_f32( + out, vrsqrtsq_f32(vmulq_f32(vreinterpretq_f32_m128(in), out), out)); + out = vmulq_f32( + out, vrsqrtsq_f32(vmulq_f32(vreinterpretq_f32_m128(in), out), out)); +#endif + return vreinterpretq_m128_f32(out); } -FORCE_INLINE __m128 _mm_shuffle_ps_1001(__m128 a, __m128 b) +// Compute the approximate reciprocal square root of the lower single-precision +// (32-bit) floating-point element in a, store the result in the lower element +// of dst, and copy the upper 3 packed elements from a to the upper elements of +// dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rsqrt_ss +FORCE_INLINE __m128 _mm_rsqrt_ss(__m128 in) { - float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a))); - float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b)); - return vreinterpretq_m128_f32(vcombine_f32(a01, b10)); + return vsetq_lane_f32(vgetq_lane_f32(_mm_rsqrt_ps(in), 0), in, 0); } -FORCE_INLINE __m128 _mm_shuffle_ps_0101(__m128 a, __m128 b) +// Compute the absolute differences of packed unsigned 8-bit integers in a and +// b, then horizontally sum each consecutive 8 differences to produce four +// unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low +// 16 bits of dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sad_pu8 +FORCE_INLINE __m64 _mm_sad_pu8(__m64 a, __m64 b) { - float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a))); - float32x2_t b01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(b))); - return vreinterpretq_m128_f32(vcombine_f32(a01, b01)); + uint64x1_t t = vpaddl_u32(vpaddl_u16( + vpaddl_u8(vabd_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b))))); + return vreinterpret_m64_u16( + vset_lane_u16(vget_lane_u64(t, 0), vdup_n_u16(0), 0)); } -// keeps the low 64 bits of b in the low and puts the high 64 bits of a in the -// high -FORCE_INLINE __m128 _mm_shuffle_ps_3210(__m128 a, __m128 b) +// Macro: Set the flush zero bits of the MXCSR control and status register to +// the value in unsigned 32-bit integer a. The flush zero may contain any of the +// following flags: _MM_FLUSH_ZERO_ON or _MM_FLUSH_ZERO_OFF +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_MM_SET_FLUSH_ZERO_MODE +FORCE_INLINE void _sse2neon_mm_set_flush_zero_mode(unsigned int flag) { - float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a)); - float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b)); - return vreinterpretq_m128_f32(vcombine_f32(a10, b32)); + // AArch32 Advanced SIMD arithmetic always uses the Flush-to-zero setting, + // regardless of the value of the FZ bit. + union { + fpcr_bitfield field; +#if defined(__aarch64__) + uint64_t value; +#else + uint32_t value; +#endif + } r; + +#if defined(__aarch64__) + asm volatile("mrs %0, FPCR" : "=r"(r.value)); /* read */ +#else + asm volatile("vmrs %0, FPSCR" : "=r"(r.value)); /* read */ +#endif + + r.field.bit24 = (flag & _MM_FLUSH_ZERO_MASK) == _MM_FLUSH_ZERO_ON; + +#if defined(__aarch64__) + asm volatile("msr FPCR, %0" ::"r"(r)); /* write */ +#else + asm volatile("vmsr FPSCR, %0" ::"r"(r)); /* write */ +#endif } -FORCE_INLINE __m128 _mm_shuffle_ps_0011(__m128 a, __m128 b) +// Sets the four single-precision, floating-point values to the four inputs. +// https://msdn.microsoft.com/en-us/library/vstudio/afh0zf75(v=vs.100).aspx +FORCE_INLINE __m128 _mm_set_ps(float w, float z, float y, float x) { - float32x2_t a11 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(a)), 1); - float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0); - return vreinterpretq_m128_f32(vcombine_f32(a11, b00)); + float ALIGN_STRUCT(16) data[4] = {x, y, z, w}; + return vreinterpretq_m128_f32(vld1q_f32(data)); } -FORCE_INLINE __m128 _mm_shuffle_ps_0022(__m128 a, __m128 b) +// Sets the four single-precision, floating-point values to w. +// https://msdn.microsoft.com/en-us/library/vstudio/2x1se8ha(v=vs.100).aspx +FORCE_INLINE __m128 _mm_set_ps1(float _w) { - float32x2_t a22 = - vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 0); - float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0); - return vreinterpretq_m128_f32(vcombine_f32(a22, b00)); + return vreinterpretq_m128_f32(vdupq_n_f32(_w)); } -FORCE_INLINE __m128 _mm_shuffle_ps_2200(__m128 a, __m128 b) +// Macro: Set the rounding mode bits of the MXCSR control and status register to +// the value in unsigned 32-bit integer a. The rounding mode may contain any of +// the following flags: _MM_ROUND_NEAREST, _MM_ROUND_DOWN, _MM_ROUND_UP, +// _MM_ROUND_TOWARD_ZERO +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_MM_SET_ROUNDING_MODE +FORCE_INLINE void _MM_SET_ROUNDING_MODE(int rounding) { - float32x2_t a00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(a)), 0); - float32x2_t b22 = - vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(b)), 0); - return vreinterpretq_m128_f32(vcombine_f32(a00, b22)); + union { + fpcr_bitfield field; +#if defined(__aarch64__) + uint64_t value; +#else + uint32_t value; +#endif + } r; + +#if defined(__aarch64__) + asm volatile("mrs %0, FPCR" : "=r"(r.value)); /* read */ +#else + asm volatile("vmrs %0, FPSCR" : "=r"(r.value)); /* read */ +#endif + + switch (rounding) { + case _MM_ROUND_TOWARD_ZERO: + r.field.bit22 = 1; + r.field.bit23 = 1; + break; + case _MM_ROUND_DOWN: + r.field.bit22 = 0; + r.field.bit23 = 1; + break; + case _MM_ROUND_UP: + r.field.bit22 = 1; + r.field.bit23 = 0; + break; + default: //_MM_ROUND_NEAREST + r.field.bit22 = 0; + r.field.bit23 = 0; + } + +#if defined(__aarch64__) + asm volatile("msr FPCR, %0" ::"r"(r)); /* write */ +#else + asm volatile("vmsr FPSCR, %0" ::"r"(r)); /* write */ +#endif } -FORCE_INLINE __m128 _mm_shuffle_ps_3202(__m128 a, __m128 b) +// Copy single-precision (32-bit) floating-point element a to the lower element +// of dst, and zero the upper 3 elements. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set_ss +FORCE_INLINE __m128 _mm_set_ss(float a) { - float32_t a0 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0); - float32x2_t a22 = - vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 0); - float32x2_t a02 = vset_lane_f32(a0, a22, 1); /* TODO: use vzip ?*/ - float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b)); - return vreinterpretq_m128_f32(vcombine_f32(a02, b32)); + float ALIGN_STRUCT(16) data[4] = {a, 0, 0, 0}; + return vreinterpretq_m128_f32(vld1q_f32(data)); } -FORCE_INLINE __m128 _mm_shuffle_ps_1133(__m128 a, __m128 b) +// Sets the four single-precision, floating-point values to w. +// +// r0 := r1 := r2 := r3 := w +// +// https://msdn.microsoft.com/en-us/library/vstudio/2x1se8ha(v=vs.100).aspx +FORCE_INLINE __m128 _mm_set1_ps(float _w) { - float32x2_t a33 = - vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 1); - float32x2_t b11 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 1); - return vreinterpretq_m128_f32(vcombine_f32(a33, b11)); + return vreinterpretq_m128_f32(vdupq_n_f32(_w)); } -FORCE_INLINE __m128 _mm_shuffle_ps_2010(__m128 a, __m128 b) +FORCE_INLINE void _mm_setcsr(unsigned int a) { - float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a)); - float32_t b2 = vgetq_lane_f32(vreinterpretq_f32_m128(b), 2); - float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0); - float32x2_t b20 = vset_lane_f32(b2, b00, 1); - return vreinterpretq_m128_f32(vcombine_f32(a10, b20)); + _MM_SET_ROUNDING_MODE(a); } -FORCE_INLINE __m128 _mm_shuffle_ps_2001(__m128 a, __m128 b) +// Sets the four single-precision, floating-point values to the four inputs in +// reverse order. +// https://msdn.microsoft.com/en-us/library/vstudio/d2172ct3(v=vs.100).aspx +FORCE_INLINE __m128 _mm_setr_ps(float w, float z, float y, float x) { - float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a))); - float32_t b2 = vgetq_lane_f32(b, 2); - float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0); - float32x2_t b20 = vset_lane_f32(b2, b00, 1); - return vreinterpretq_m128_f32(vcombine_f32(a01, b20)); + float ALIGN_STRUCT(16) data[4] = {w, z, y, x}; + return vreinterpretq_m128_f32(vld1q_f32(data)); } -FORCE_INLINE __m128 _mm_shuffle_ps_2032(__m128 a, __m128 b) +// Clears the four single-precision, floating-point values. +// https://msdn.microsoft.com/en-us/library/vstudio/tk1t2tbz(v=vs.100).aspx +FORCE_INLINE __m128 _mm_setzero_ps(void) { - float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a)); - float32_t b2 = vgetq_lane_f32(b, 2); - float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0); - float32x2_t b20 = vset_lane_f32(b2, b00, 1); - return vreinterpretq_m128_f32(vcombine_f32(a32, b20)); + return vreinterpretq_m128_f32(vdupq_n_f32(0)); } -// NEON does not support a general purpose permute intrinsic -// Selects four specific single-precision, floating-point values from a and b, -// based on the mask i. -// -// C equivalent: -// __m128 _mm_shuffle_ps_default(__m128 a, __m128 b, -// __constrange(0, 255) int imm) { -// __m128 ret; -// ret[0] = a[imm & 0x3]; ret[1] = a[(imm >> 2) & 0x3]; -// ret[2] = b[(imm >> 4) & 0x03]; ret[3] = b[(imm >> 6) & 0x03]; -// return ret; -// } -// -// https://msdn.microsoft.com/en-us/library/vstudio/5f0858x0(v=vs.100).aspx -#define _mm_shuffle_ps_default(a, b, imm) \ +// Shuffle 16-bit integers in a using the control in imm8, and store the results +// in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shuffle_pi16 +#if __has_builtin(__builtin_shufflevector) +#define _mm_shuffle_pi16(a, imm) \ __extension__({ \ - float32x4_t ret; \ - ret = vmovq_n_f32( \ - vgetq_lane_f32(vreinterpretq_f32_m128(a), (imm) & (0x3))); \ - ret = vsetq_lane_f32( \ - vgetq_lane_f32(vreinterpretq_f32_m128(a), ((imm) >> 2) & 0x3), \ - ret, 1); \ - ret = vsetq_lane_f32( \ - vgetq_lane_f32(vreinterpretq_f32_m128(b), ((imm) >> 4) & 0x3), \ - ret, 2); \ - ret = vsetq_lane_f32( \ - vgetq_lane_f32(vreinterpretq_f32_m128(b), ((imm) >> 6) & 0x3), \ - ret, 3); \ - vreinterpretq_m128_f32(ret); \ + vreinterpret_m64_s16(__builtin_shufflevector( \ + vreinterpret_s16_m64(a), vreinterpret_s16_m64(a), (imm & 0x3), \ + ((imm >> 2) & 0x3), ((imm >> 4) & 0x3), ((imm >> 6) & 0x3))); \ }) +#else +#define _mm_shuffle_pi16(a, imm) \ + __extension__({ \ + int16x4_t ret; \ + ret = \ + vmov_n_s16(vget_lane_s16(vreinterpret_s16_m64(a), (imm) & (0x3))); \ + ret = vset_lane_s16( \ + vget_lane_s16(vreinterpret_s16_m64(a), ((imm) >> 2) & 0x3), ret, \ + 1); \ + ret = vset_lane_s16( \ + vget_lane_s16(vreinterpret_s16_m64(a), ((imm) >> 4) & 0x3), ret, \ + 2); \ + ret = vset_lane_s16( \ + vget_lane_s16(vreinterpret_s16_m64(a), ((imm) >> 6) & 0x3), ret, \ + 3); \ + vreinterpret_m64_s16(ret); \ + }) +#endif + +// Guarantees that every preceding store is globally visible before any +// subsequent store. +// https://msdn.microsoft.com/en-us/library/5h2w73d1%28v=vs.90%29.aspx +FORCE_INLINE void _mm_sfence(void) +{ + __sync_synchronize(); +} // FORCE_INLINE __m128 _mm_shuffle_ps(__m128 a, __m128 b, __constrange(0,255) // int imm) @@ -1963,1876 +2583,1721 @@ FORCE_INLINE __m128 _mm_shuffle_ps_2032(__m128 a, __m128 b) }) #endif -// Takes the upper 64 bits of a and places it in the low end of the result -// Takes the lower 64 bits of a and places it into the high end of the result. -FORCE_INLINE __m128i _mm_shuffle_epi_1032(__m128i a) +// Computes the approximations of square roots of the four single-precision, +// floating-point values of a. First computes reciprocal square roots and then +// reciprocals of the four values. +// +// r0 := sqrt(a0) +// r1 := sqrt(a1) +// r2 := sqrt(a2) +// r3 := sqrt(a3) +// +// https://msdn.microsoft.com/en-us/library/vstudio/8z67bwwk(v=vs.100).aspx +FORCE_INLINE __m128 _mm_sqrt_ps(__m128 in) { - int32x2_t a32 = vget_high_s32(vreinterpretq_s32_m128i(a)); - int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a)); - return vreinterpretq_m128i_s32(vcombine_s32(a32, a10)); +#if SSE2NEON_PRECISE_SQRT + float32x4_t recip = vrsqrteq_f32(vreinterpretq_f32_m128(in)); + + // Test for vrsqrteq_f32(0) -> positive infinity case. + // Change to zero, so that s * 1/sqrt(s) result is zero too. + const uint32x4_t pos_inf = vdupq_n_u32(0x7F800000); + const uint32x4_t div_by_zero = + vceqq_u32(pos_inf, vreinterpretq_u32_f32(recip)); + recip = vreinterpretq_f32_u32( + vandq_u32(vmvnq_u32(div_by_zero), vreinterpretq_u32_f32(recip))); + + // Additional Netwon-Raphson iteration for accuracy + recip = vmulq_f32( + vrsqrtsq_f32(vmulq_f32(recip, recip), vreinterpretq_f32_m128(in)), + recip); + recip = vmulq_f32( + vrsqrtsq_f32(vmulq_f32(recip, recip), vreinterpretq_f32_m128(in)), + recip); + + // sqrt(s) = s * 1/sqrt(s) + return vreinterpretq_m128_f32(vmulq_f32(vreinterpretq_f32_m128(in), recip)); +#elif defined(__aarch64__) + return vreinterpretq_m128_f32(vsqrtq_f32(vreinterpretq_f32_m128(in))); +#else + float32x4_t recipsq = vrsqrteq_f32(vreinterpretq_f32_m128(in)); + float32x4_t sq = vrecpeq_f32(recipsq); + return vreinterpretq_m128_f32(sq); +#endif } -// takes the lower two 32-bit values from a and swaps them and places in low end -// of result takes the higher two 32 bit values from a and swaps them and places -// in high end of result. -FORCE_INLINE __m128i _mm_shuffle_epi_2301(__m128i a) +// Computes the approximation of the square root of the scalar single-precision +// floating point value of in. +// https://msdn.microsoft.com/en-us/library/ahfsc22d(v=vs.100).aspx +FORCE_INLINE __m128 _mm_sqrt_ss(__m128 in) { - int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a))); - int32x2_t a23 = vrev64_s32(vget_high_s32(vreinterpretq_s32_m128i(a))); - return vreinterpretq_m128i_s32(vcombine_s32(a01, a23)); + float32_t value = + vgetq_lane_f32(vreinterpretq_f32_m128(_mm_sqrt_ps(in)), 0); + return vreinterpretq_m128_f32( + vsetq_lane_f32(value, vreinterpretq_f32_m128(in), 0)); } -// rotates the least significant 32 bits into the most signficant 32 bits, and -// shifts the rest down -FORCE_INLINE __m128i _mm_shuffle_epi_0321(__m128i a) +// Stores four single-precision, floating-point values. +// https://msdn.microsoft.com/en-us/library/vstudio/s3h4ay6y(v=vs.100).aspx +FORCE_INLINE void _mm_store_ps(float *p, __m128 a) { - return vreinterpretq_m128i_s32( - vextq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(a), 1)); + vst1q_f32(p, vreinterpretq_f32_m128(a)); } -// rotates the most significant 32 bits into the least signficant 32 bits, and -// shifts the rest up -FORCE_INLINE __m128i _mm_shuffle_epi_2103(__m128i a) +// Store the lower single-precision (32-bit) floating-point element from a into +// 4 contiguous elements in memory. mem_addr must be aligned on a 16-byte +// boundary or a general-protection exception may be generated. +// +// MEM[mem_addr+31:mem_addr] := a[31:0] +// MEM[mem_addr+63:mem_addr+32] := a[31:0] +// MEM[mem_addr+95:mem_addr+64] := a[31:0] +// MEM[mem_addr+127:mem_addr+96] := a[31:0] +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_store_ps1 +FORCE_INLINE void _mm_store_ps1(float *p, __m128 a) { - return vreinterpretq_m128i_s32( - vextq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(a), 3)); + float32_t a0 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0); + vst1q_f32(p, vdupq_n_f32(a0)); } -// gets the lower 64 bits of a, and places it in the upper 64 bits -// gets the lower 64 bits of a and places it in the lower 64 bits -FORCE_INLINE __m128i _mm_shuffle_epi_1010(__m128i a) +// Stores the lower single - precision, floating - point value. +// https://msdn.microsoft.com/en-us/library/tzz10fbx(v=vs.100).aspx +FORCE_INLINE void _mm_store_ss(float *p, __m128 a) { - int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a)); - return vreinterpretq_m128i_s32(vcombine_s32(a10, a10)); + vst1q_lane_f32(p, vreinterpretq_f32_m128(a), 0); } -// gets the lower 64 bits of a, swaps the 0 and 1 elements, and places it in the -// lower 64 bits gets the lower 64 bits of a, and places it in the upper 64 bits -FORCE_INLINE __m128i _mm_shuffle_epi_1001(__m128i a) +// Store the lower single-precision (32-bit) floating-point element from a into +// 4 contiguous elements in memory. mem_addr must be aligned on a 16-byte +// boundary or a general-protection exception may be generated. +// +// MEM[mem_addr+31:mem_addr] := a[31:0] +// MEM[mem_addr+63:mem_addr+32] := a[31:0] +// MEM[mem_addr+95:mem_addr+64] := a[31:0] +// MEM[mem_addr+127:mem_addr+96] := a[31:0] +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_store1_ps +#define _mm_store1_ps _mm_store_ps1 + +// Stores the upper two single-precision, floating-point values of a to the +// address p. +// +// *p0 := a2 +// *p1 := a3 +// +// https://msdn.microsoft.com/en-us/library/a7525fs8(v%3dvs.90).aspx +FORCE_INLINE void _mm_storeh_pi(__m64 *p, __m128 a) { - int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a))); - int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a)); - return vreinterpretq_m128i_s32(vcombine_s32(a01, a10)); + *p = vreinterpret_m64_f32(vget_high_f32(a)); } -// gets the lower 64 bits of a, swaps the 0 and 1 elements and places it in the -// upper 64 bits gets the lower 64 bits of a, swaps the 0 and 1 elements, and -// places it in the lower 64 bits -FORCE_INLINE __m128i _mm_shuffle_epi_0101(__m128i a) +// Stores the lower two single-precision floating point values of a to the +// address p. +// +// *p0 := a0 +// *p1 := a1 +// +// https://msdn.microsoft.com/en-us/library/h54t98ks(v=vs.90).aspx +FORCE_INLINE void _mm_storel_pi(__m64 *p, __m128 a) { - int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a))); - return vreinterpretq_m128i_s32(vcombine_s32(a01, a01)); + *p = vreinterpret_m64_f32(vget_low_f32(a)); } -FORCE_INLINE __m128i _mm_shuffle_epi_2211(__m128i a) +// Store 4 single-precision (32-bit) floating-point elements from a into memory +// in reverse order. mem_addr must be aligned on a 16-byte boundary or a +// general-protection exception may be generated. +// +// MEM[mem_addr+31:mem_addr] := a[127:96] +// MEM[mem_addr+63:mem_addr+32] := a[95:64] +// MEM[mem_addr+95:mem_addr+64] := a[63:32] +// MEM[mem_addr+127:mem_addr+96] := a[31:0] +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storer_ps +FORCE_INLINE void _mm_storer_ps(float *p, __m128 a) { - int32x2_t a11 = vdup_lane_s32(vget_low_s32(vreinterpretq_s32_m128i(a)), 1); - int32x2_t a22 = vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 0); - return vreinterpretq_m128i_s32(vcombine_s32(a11, a22)); + float32x4_t tmp = vrev64q_f32(vreinterpretq_f32_m128(a)); + float32x4_t rev = vextq_f32(tmp, tmp, 2); + vst1q_f32(p, rev); } -FORCE_INLINE __m128i _mm_shuffle_epi_0122(__m128i a) +// Stores four single-precision, floating-point values. +// https://msdn.microsoft.com/en-us/library/44e30x22(v=vs.100).aspx +FORCE_INLINE void _mm_storeu_ps(float *p, __m128 a) { - int32x2_t a22 = vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 0); - int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a))); - return vreinterpretq_m128i_s32(vcombine_s32(a22, a01)); + vst1q_f32(p, vreinterpretq_f32_m128(a)); } -FORCE_INLINE __m128i _mm_shuffle_epi_3332(__m128i a) +// Stores 16-bits of integer data a at the address p. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storeu_si16 +FORCE_INLINE void _mm_storeu_si16(void *p, __m128i a) { - int32x2_t a32 = vget_high_s32(vreinterpretq_s32_m128i(a)); - int32x2_t a33 = vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 1); - return vreinterpretq_m128i_s32(vcombine_s32(a32, a33)); + vst1q_lane_s16((int16_t *) p, vreinterpretq_s16_m128i(a), 0); } -// Shuffle packed 8-bit integers in a according to shuffle control mask in the -// corresponding 8-bit element of b, and store the results in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shuffle_epi8 -FORCE_INLINE __m128i _mm_shuffle_epi8(__m128i a, __m128i b) +// Stores 64-bits of integer data a at the address p. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storeu_si64 +FORCE_INLINE void _mm_storeu_si64(void *p, __m128i a) { - int8x16_t tbl = vreinterpretq_s8_m128i(a); // input a - uint8x16_t idx = vreinterpretq_u8_m128i(b); // input b - uint8x16_t idx_masked = - vandq_u8(idx, vdupq_n_u8(0x8F)); // avoid using meaningless bits -#if defined(__aarch64__) - return vreinterpretq_m128i_s8(vqtbl1q_s8(tbl, idx_masked)); -#elif defined(__GNUC__) - int8x16_t ret; - // %e and %f represent the even and odd D registers - // respectively. - __asm__ __volatile__( - "vtbl.8 %e[ret], {%e[tbl], %f[tbl]}, %e[idx]\n" - "vtbl.8 %f[ret], {%e[tbl], %f[tbl]}, %f[idx]\n" - : [ret] "=&w"(ret) - : [tbl] "w"(tbl), [idx] "w"(idx_masked)); - return vreinterpretq_m128i_s8(ret); -#else - // use this line if testing on aarch64 - int8x8x2_t a_split = {vget_low_s8(tbl), vget_high_s8(tbl)}; - return vreinterpretq_m128i_s8( - vcombine_s8(vtbl2_s8(a_split, vget_low_u8(idx_masked)), - vtbl2_s8(a_split, vget_high_u8(idx_masked)))); -#endif + vst1q_lane_s64((int64_t *) p, vreinterpretq_s64_m128i(a), 0); } -// C equivalent: -// __m128i _mm_shuffle_epi32_default(__m128i a, -// __constrange(0, 255) int imm) { -// __m128i ret; -// ret[0] = a[imm & 0x3]; ret[1] = a[(imm >> 2) & 0x3]; -// ret[2] = a[(imm >> 4) & 0x03]; ret[3] = a[(imm >> 6) & 0x03]; -// return ret; -// } -#define _mm_shuffle_epi32_default(a, imm) \ - __extension__({ \ - int32x4_t ret; \ - ret = vmovq_n_s32( \ - vgetq_lane_s32(vreinterpretq_s32_m128i(a), (imm) & (0x3))); \ - ret = vsetq_lane_s32( \ - vgetq_lane_s32(vreinterpretq_s32_m128i(a), ((imm) >> 2) & 0x3), \ - ret, 1); \ - ret = vsetq_lane_s32( \ - vgetq_lane_s32(vreinterpretq_s32_m128i(a), ((imm) >> 4) & 0x3), \ - ret, 2); \ - ret = vsetq_lane_s32( \ - vgetq_lane_s32(vreinterpretq_s32_m128i(a), ((imm) >> 6) & 0x3), \ - ret, 3); \ - vreinterpretq_m128i_s32(ret); \ - }) +// Store 64-bits of integer data from a into memory using a non-temporal memory +// hint. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_stream_pi +FORCE_INLINE void _mm_stream_pi(__m64 *p, __m64 a) +{ + vst1_s64((int64_t *) p, vreinterpret_s64_m64(a)); +} -// FORCE_INLINE __m128i _mm_shuffle_epi32_splat(__m128i a, __constrange(0,255) -// int imm) -#if defined(__aarch64__) -#define _mm_shuffle_epi32_splat(a, imm) \ - __extension__({ \ - vreinterpretq_m128i_s32( \ - vdupq_laneq_s32(vreinterpretq_s32_m128i(a), (imm))); \ - }) +// Store 128-bits (composed of 4 packed single-precision (32-bit) floating- +// point elements) from a into memory using a non-temporal memory hint. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_stream_ps +FORCE_INLINE void _mm_stream_ps(float *p, __m128 a) +{ +#if __has_builtin(__builtin_nontemporal_store) + __builtin_nontemporal_store(a, (float32x4_t *) p); #else -#define _mm_shuffle_epi32_splat(a, imm) \ - __extension__({ \ - vreinterpretq_m128i_s32( \ - vdupq_n_s32(vgetq_lane_s32(vreinterpretq_s32_m128i(a), (imm)))); \ - }) + vst1q_f32(p, vreinterpretq_f32_m128(a)); #endif +} -// Shuffles the 4 signed or unsigned 32-bit integers in a as specified by imm. -// https://msdn.microsoft.com/en-us/library/56f67xbk%28v=vs.90%29.aspx -// FORCE_INLINE __m128i _mm_shuffle_epi32(__m128i a, -// __constrange(0,255) int imm) -#if __has_builtin(__builtin_shufflevector) -#define _mm_shuffle_epi32(a, imm) \ - __extension__({ \ - int32x4_t _input = vreinterpretq_s32_m128i(a); \ - int32x4_t _shuf = __builtin_shufflevector( \ - _input, _input, (imm) & (0x3), ((imm) >> 2) & 0x3, \ - ((imm) >> 4) & 0x3, ((imm) >> 6) & 0x3); \ - vreinterpretq_m128i_s32(_shuf); \ - }) -#else // generic -#define _mm_shuffle_epi32(a, imm) \ - __extension__({ \ - __m128i ret; \ - switch (imm) { \ - case _MM_SHUFFLE(1, 0, 3, 2): \ - ret = _mm_shuffle_epi_1032((a)); \ - break; \ - case _MM_SHUFFLE(2, 3, 0, 1): \ - ret = _mm_shuffle_epi_2301((a)); \ - break; \ - case _MM_SHUFFLE(0, 3, 2, 1): \ - ret = _mm_shuffle_epi_0321((a)); \ - break; \ - case _MM_SHUFFLE(2, 1, 0, 3): \ - ret = _mm_shuffle_epi_2103((a)); \ - break; \ - case _MM_SHUFFLE(1, 0, 1, 0): \ - ret = _mm_shuffle_epi_1010((a)); \ - break; \ - case _MM_SHUFFLE(1, 0, 0, 1): \ - ret = _mm_shuffle_epi_1001((a)); \ - break; \ - case _MM_SHUFFLE(0, 1, 0, 1): \ - ret = _mm_shuffle_epi_0101((a)); \ - break; \ - case _MM_SHUFFLE(2, 2, 1, 1): \ - ret = _mm_shuffle_epi_2211((a)); \ - break; \ - case _MM_SHUFFLE(0, 1, 2, 2): \ - ret = _mm_shuffle_epi_0122((a)); \ - break; \ - case _MM_SHUFFLE(3, 3, 3, 2): \ - ret = _mm_shuffle_epi_3332((a)); \ - break; \ - case _MM_SHUFFLE(0, 0, 0, 0): \ - ret = _mm_shuffle_epi32_splat((a), 0); \ - break; \ - case _MM_SHUFFLE(1, 1, 1, 1): \ - ret = _mm_shuffle_epi32_splat((a), 1); \ - break; \ - case _MM_SHUFFLE(2, 2, 2, 2): \ - ret = _mm_shuffle_epi32_splat((a), 2); \ - break; \ - case _MM_SHUFFLE(3, 3, 3, 3): \ - ret = _mm_shuffle_epi32_splat((a), 3); \ - break; \ - default: \ - ret = _mm_shuffle_epi32_default((a), (imm)); \ - break; \ - } \ - ret; \ - }) -#endif +// Subtracts the four single-precision, floating-point values of a and b. +// +// r0 := a0 - b0 +// r1 := a1 - b1 +// r2 := a2 - b2 +// r3 := a3 - b3 +// +// https://msdn.microsoft.com/en-us/library/vstudio/1zad2k61(v=vs.100).aspx +FORCE_INLINE __m128 _mm_sub_ps(__m128 a, __m128 b) +{ + return vreinterpretq_m128_f32( + vsubq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); +} -// Shuffles the lower 4 signed or unsigned 16-bit integers in a as specified -// by imm. -// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/y41dkk37(v=vs.100) -// FORCE_INLINE __m128i _mm_shufflelo_epi16_function(__m128i a, -// __constrange(0,255) int -// imm) -#define _mm_shufflelo_epi16_function(a, imm) \ - __extension__({ \ - int16x8_t ret = vreinterpretq_s16_m128i(a); \ - int16x4_t lowBits = vget_low_s16(ret); \ - ret = vsetq_lane_s16(vget_lane_s16(lowBits, (imm) & (0x3)), ret, 0); \ - ret = vsetq_lane_s16(vget_lane_s16(lowBits, ((imm) >> 2) & 0x3), ret, \ - 1); \ - ret = vsetq_lane_s16(vget_lane_s16(lowBits, ((imm) >> 4) & 0x3), ret, \ - 2); \ - ret = vsetq_lane_s16(vget_lane_s16(lowBits, ((imm) >> 6) & 0x3), ret, \ - 3); \ - vreinterpretq_m128i_s16(ret); \ - }) +// Subtract the lower single-precision (32-bit) floating-point element in b from +// the lower single-precision (32-bit) floating-point element in a, store the +// result in the lower element of dst, and copy the upper 3 packed elements from +// a to the upper elements of dst. +// +// dst[31:0] := a[31:0] - b[31:0] +// dst[127:32] := a[127:32] +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_ss +FORCE_INLINE __m128 _mm_sub_ss(__m128 a, __m128 b) +{ + return _mm_move_ss(a, _mm_sub_ps(a, b)); +} -// FORCE_INLINE __m128i _mm_shufflelo_epi16(__m128i a, -// __constrange(0,255) int imm) -#if __has_builtin(__builtin_shufflevector) -#define _mm_shufflelo_epi16(a, imm) \ - __extension__({ \ - int16x8_t _input = vreinterpretq_s16_m128i(a); \ - int16x8_t _shuf = __builtin_shufflevector( \ - _input, _input, ((imm) & (0x3)), (((imm) >> 2) & 0x3), \ - (((imm) >> 4) & 0x3), (((imm) >> 6) & 0x3), 4, 5, 6, 7); \ - vreinterpretq_m128i_s16(_shuf); \ - }) -#else // generic -#define _mm_shufflelo_epi16(a, imm) _mm_shufflelo_epi16_function((a), (imm)) -#endif +// Macro: Transpose the 4x4 matrix formed by the 4 rows of single-precision +// (32-bit) floating-point elements in row0, row1, row2, and row3, and store the +// transposed matrix in these vectors (row0 now contains column 0, etc.). +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=MM_TRANSPOSE4_PS +#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \ + do { \ + float32x4x2_t ROW01 = vtrnq_f32(row0, row1); \ + float32x4x2_t ROW23 = vtrnq_f32(row2, row3); \ + row0 = vcombine_f32(vget_low_f32(ROW01.val[0]), \ + vget_low_f32(ROW23.val[0])); \ + row1 = vcombine_f32(vget_low_f32(ROW01.val[1]), \ + vget_low_f32(ROW23.val[1])); \ + row2 = vcombine_f32(vget_high_f32(ROW01.val[0]), \ + vget_high_f32(ROW23.val[0])); \ + row3 = vcombine_f32(vget_high_f32(ROW01.val[1]), \ + vget_high_f32(ROW23.val[1])); \ + } while (0) -// Shuffles the upper 4 signed or unsigned 16-bit integers in a as specified -// by imm. -// https://msdn.microsoft.com/en-us/library/13ywktbs(v=vs.100).aspx -// FORCE_INLINE __m128i _mm_shufflehi_epi16_function(__m128i a, -// __constrange(0,255) int -// imm) -#define _mm_shufflehi_epi16_function(a, imm) \ - __extension__({ \ - int16x8_t ret = vreinterpretq_s16_m128i(a); \ - int16x4_t highBits = vget_high_s16(ret); \ - ret = vsetq_lane_s16(vget_lane_s16(highBits, (imm) & (0x3)), ret, 4); \ - ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm) >> 2) & 0x3), ret, \ - 5); \ - ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm) >> 4) & 0x3), ret, \ - 6); \ - ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm) >> 6) & 0x3), ret, \ - 7); \ - vreinterpretq_m128i_s16(ret); \ - }) +// according to the documentation, these intrinsics behave the same as the +// non-'u' versions. We'll just alias them here. +#define _mm_ucomieq_ss _mm_comieq_ss +#define _mm_ucomige_ss _mm_comige_ss +#define _mm_ucomigt_ss _mm_comigt_ss +#define _mm_ucomile_ss _mm_comile_ss +#define _mm_ucomilt_ss _mm_comilt_ss +#define _mm_ucomineq_ss _mm_comineq_ss -// FORCE_INLINE __m128i _mm_shufflehi_epi16(__m128i a, -// __constrange(0,255) int imm) -#if __has_builtin(__builtin_shufflevector) -#define _mm_shufflehi_epi16(a, imm) \ - __extension__({ \ - int16x8_t _input = vreinterpretq_s16_m128i(a); \ - int16x8_t _shuf = __builtin_shufflevector( \ - _input, _input, 0, 1, 2, 3, ((imm) & (0x3)) + 4, \ - (((imm) >> 2) & 0x3) + 4, (((imm) >> 4) & 0x3) + 4, \ - (((imm) >> 6) & 0x3) + 4); \ - vreinterpretq_m128i_s16(_shuf); \ - }) -#else // generic -#define _mm_shufflehi_epi16(a, imm) _mm_shufflehi_epi16_function((a), (imm)) +// Return vector of type __m128i with undefined elements. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_undefined_si128 +FORCE_INLINE __m128i _mm_undefined_si128(void) +{ +#if defined(__GNUC__) || defined(__clang__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wuninitialized" #endif + __m128i a; + return a; +#if defined(__GNUC__) || defined(__clang__) +#pragma GCC diagnostic pop +#endif +} -// Shuffle double-precision (64-bit) floating-point elements using the control -// in imm8, and store the results in dst. +// Return vector of type __m128 with undefined elements. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_undefined_ps +FORCE_INLINE __m128 _mm_undefined_ps(void) +{ +#if defined(__GNUC__) || defined(__clang__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wuninitialized" +#endif + __m128 a; + return a; +#if defined(__GNUC__) || defined(__clang__) +#pragma GCC diagnostic pop +#endif +} + +// Selects and interleaves the upper two single-precision, floating-point values +// from a and b. // -// dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64] -// dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64] +// r0 := a2 +// r1 := b2 +// r2 := a3 +// r3 := b3 // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shuffle_pd -#if __has_builtin(__builtin_shufflevector) -#define _mm_shuffle_pd(a, b, imm8) \ - vreinterpretq_m128d_s64(__builtin_shufflevector( \ - vreinterpretq_s64_m128d(a), vreinterpretq_s64_m128d(b), imm8 & 0x1, \ - ((imm8 & 0x2) >> 1) + 2)) +// https://msdn.microsoft.com/en-us/library/skccxx7d%28v=vs.90%29.aspx +FORCE_INLINE __m128 _mm_unpackhi_ps(__m128 a, __m128 b) +{ +#if defined(__aarch64__) + return vreinterpretq_m128_f32( + vzip2q_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); #else -#define _mm_shuffle_pd(a, b, imm8) \ - _mm_castsi128_pd(_mm_set_epi64x( \ - vgetq_lane_s64(vreinterpretq_s64_m128d(b), (imm8 & 0x2) >> 1), \ - vgetq_lane_s64(vreinterpretq_s64_m128d(a), imm8 & 0x1))) + float32x2_t a1 = vget_high_f32(vreinterpretq_f32_m128(a)); + float32x2_t b1 = vget_high_f32(vreinterpretq_f32_m128(b)); + float32x2x2_t result = vzip_f32(a1, b1); + return vreinterpretq_m128_f32(vcombine_f32(result.val[0], result.val[1])); #endif +} -// Blend packed 16-bit integers from a and b using control mask imm8, and store -// the results in dst. +// Selects and interleaves the lower two single-precision, floating-point values +// from a and b. // -// FOR j := 0 to 7 -// i := j*16 -// IF imm8[j] -// dst[i+15:i] := b[i+15:i] -// ELSE -// dst[i+15:i] := a[i+15:i] -// FI -// ENDFOR -// FORCE_INLINE __m128i _mm_blend_epi16(__m128i a, __m128i b, -// __constrange(0,255) int imm) -#define _mm_blend_epi16(a, b, imm) \ - __extension__({ \ - const uint16_t _mask[8] = {((imm) & (1 << 0)) ? 0xFFFF : 0x0000, \ - ((imm) & (1 << 1)) ? 0xFFFF : 0x0000, \ - ((imm) & (1 << 2)) ? 0xFFFF : 0x0000, \ - ((imm) & (1 << 3)) ? 0xFFFF : 0x0000, \ - ((imm) & (1 << 4)) ? 0xFFFF : 0x0000, \ - ((imm) & (1 << 5)) ? 0xFFFF : 0x0000, \ - ((imm) & (1 << 6)) ? 0xFFFF : 0x0000, \ - ((imm) & (1 << 7)) ? 0xFFFF : 0x0000}; \ - uint16x8_t _mask_vec = vld1q_u16(_mask); \ - uint16x8_t _a = vreinterpretq_u16_m128i(a); \ - uint16x8_t _b = vreinterpretq_u16_m128i(b); \ - vreinterpretq_m128i_u16(vbslq_u16(_mask_vec, _b, _a)); \ - }) - -// Blend packed 8-bit integers from a and b using mask, and store the results in -// dst. +// r0 := a0 +// r1 := b0 +// r2 := a1 +// r3 := b1 // -// FOR j := 0 to 15 -// i := j*8 -// IF mask[i+7] -// dst[i+7:i] := b[i+7:i] -// ELSE -// dst[i+7:i] := a[i+7:i] -// FI -// ENDFOR -FORCE_INLINE __m128i _mm_blendv_epi8(__m128i _a, __m128i _b, __m128i _mask) +// https://msdn.microsoft.com/en-us/library/25st103b%28v=vs.90%29.aspx +FORCE_INLINE __m128 _mm_unpacklo_ps(__m128 a, __m128 b) { - // Use a signed shift right to create a mask with the sign bit - uint8x16_t mask = - vreinterpretq_u8_s8(vshrq_n_s8(vreinterpretq_s8_m128i(_mask), 7)); - uint8x16_t a = vreinterpretq_u8_m128i(_a); - uint8x16_t b = vreinterpretq_u8_m128i(_b); - return vreinterpretq_m128i_u8(vbslq_u8(mask, b, a)); +#if defined(__aarch64__) + return vreinterpretq_m128_f32( + vzip1q_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); +#else + float32x2_t a1 = vget_low_f32(vreinterpretq_f32_m128(a)); + float32x2_t b1 = vget_low_f32(vreinterpretq_f32_m128(b)); + float32x2x2_t result = vzip_f32(a1, b1); + return vreinterpretq_m128_f32(vcombine_f32(result.val[0], result.val[1])); +#endif } -/* Shifts */ +// Computes bitwise EXOR (exclusive-or) of the four single-precision, +// floating-point values of a and b. +// https://msdn.microsoft.com/en-us/library/ss6k3wk8(v=vs.100).aspx +FORCE_INLINE __m128 _mm_xor_ps(__m128 a, __m128 b) +{ + return vreinterpretq_m128_s32( + veorq_s32(vreinterpretq_s32_m128(a), vreinterpretq_s32_m128(b))); +} +/* SSE2 */ -// Shift packed 16-bit integers in a right by imm while shifting in sign -// bits, and store the results in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srai_epi16 -FORCE_INLINE __m128i _mm_srai_epi16(__m128i a, int imm) +// Adds the 8 signed or unsigned 16-bit integers in a to the 8 signed or +// unsigned 16-bit integers in b. +// https://msdn.microsoft.com/en-us/library/fceha5k4(v=vs.100).aspx +FORCE_INLINE __m128i _mm_add_epi16(__m128i a, __m128i b) { - const int count = (imm & ~15) ? 15 : imm; - return (__m128i) vshlq_s16((int16x8_t) a, vdupq_n_s16(-count)); + return vreinterpretq_m128i_s16( + vaddq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b))); } -// Shifts the 8 signed or unsigned 16-bit integers in a left by count bits while -// shifting in zeros. +// Adds the 4 signed or unsigned 32-bit integers in a to the 4 signed or +// unsigned 32-bit integers in b. // -// r0 := a0 << count -// r1 := a1 << count -// ... -// r7 := a7 << count +// r0 := a0 + b0 +// r1 := a1 + b1 +// r2 := a2 + b2 +// r3 := a3 + b3 // -// https://msdn.microsoft.com/en-us/library/es73bcsy(v=vs.90).aspx -#define _mm_slli_epi16(a, imm) \ - __extension__({ \ - __m128i ret; \ - if (unlikely((imm)) <= 0) { \ - ret = a; \ - } \ - if (unlikely((imm) > 15)) { \ - ret = _mm_setzero_si128(); \ - } else { \ - ret = vreinterpretq_m128i_s16( \ - vshlq_n_s16(vreinterpretq_s16_m128i(a), (imm))); \ - } \ - ret; \ - }) - -// Shifts the 4 signed or unsigned 32-bit integers in a left by count bits while -// shifting in zeros. : -// https://msdn.microsoft.com/en-us/library/z2k3bbtb%28v=vs.90%29.aspx -// FORCE_INLINE __m128i _mm_slli_epi32(__m128i a, __constrange(0,255) int imm) -FORCE_INLINE __m128i _mm_slli_epi32(__m128i a, int imm) +// https://msdn.microsoft.com/en-us/library/vstudio/09xs4fkk(v=vs.100).aspx +FORCE_INLINE __m128i _mm_add_epi32(__m128i a, __m128i b) { - if (unlikely(imm <= 0)) /* TODO: add constant range macro: [0, 255] */ - return a; - if (unlikely(imm > 31)) - return _mm_setzero_si128(); return vreinterpretq_m128i_s32( - vshlq_s32(vreinterpretq_s32_m128i(a), vdupq_n_s32(imm))); + vaddq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b))); } -// Shift packed 64-bit integers in a left by imm8 while shifting in zeros, and -// store the results in dst. -FORCE_INLINE __m128i _mm_slli_epi64(__m128i a, int imm) +// Adds the 4 signed or unsigned 64-bit integers in a to the 4 signed or +// unsigned 32-bit integers in b. +// https://msdn.microsoft.com/en-us/library/vstudio/09xs4fkk(v=vs.100).aspx +FORCE_INLINE __m128i _mm_add_epi64(__m128i a, __m128i b) { - if (unlikely(imm <= 0)) /* TODO: add constant range macro: [0, 255] */ - return a; - if (unlikely(imm > 63)) - return _mm_setzero_si128(); return vreinterpretq_m128i_s64( - vshlq_s64(vreinterpretq_s64_m128i(a), vdupq_n_s64(imm))); + vaddq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b))); } -// Shift packed 16-bit integers in a right by imm8 while shifting in zeros, and +// Adds the 16 signed or unsigned 8-bit integers in a to the 16 signed or +// unsigned 8-bit integers in b. +// https://technet.microsoft.com/en-us/subscriptions/yc7tcyzs(v=vs.90) +FORCE_INLINE __m128i _mm_add_epi8(__m128i a, __m128i b) +{ + return vreinterpretq_m128i_s8( + vaddq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b))); +} + +// Add packed double-precision (64-bit) floating-point elements in a and b, and // store the results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_add_pd +FORCE_INLINE __m128d _mm_add_pd(__m128d a, __m128d b) +{ +#if defined(__aarch64__) + return vreinterpretq_m128d_f64( + vaddq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))); +#else + double *da = (double *) &a; + double *db = (double *) &b; + double c[2]; + c[0] = da[0] + db[0]; + c[1] = da[1] + db[1]; + return vld1q_f32((float32_t *) c); +#endif +} + +// Add the lower double-precision (64-bit) floating-point element in a and b, +// store the result in the lower element of dst, and copy the upper element from +// a to the upper element of dst. // -// FOR j := 0 to 7 -// i := j*16 -// IF imm8[7:0] > 15 -// dst[i+15:i] := 0 -// ELSE -// dst[i+15:i] := ZeroExtend16(a[i+15:i] >> imm8[7:0]) -// FI -// ENDFOR +// dst[63:0] := a[63:0] + b[63:0] +// dst[127:64] := a[127:64] // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srli_epi16 -#define _mm_srli_epi16(a, imm) \ - __extension__({ \ - __m128i ret; \ - if (unlikely(imm) == 0) { \ - ret = a; \ - } \ - if (likely(0 < (imm) && (imm) < 16)) { \ - ret = vreinterpretq_m128i_u16( \ - vshlq_u16(vreinterpretq_u16_m128i(a), vdupq_n_s16(-imm))); \ - } else { \ - ret = _mm_setzero_si128(); \ - } \ - ret; \ - }) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_add_sd +FORCE_INLINE __m128d _mm_add_sd(__m128d a, __m128d b) +{ +#if defined(__aarch64__) + return _mm_move_sd(a, _mm_add_pd(a, b)); +#else + double *da = (double *) &a; + double *db = (double *) &b; + double c[2]; + c[0] = da[0] + db[0]; + c[1] = da[1]; + return vld1q_f32((float32_t *) c); +#endif +} -// Shift packed 32-bit integers in a right by imm8 while shifting in zeros, and -// store the results in dst. +// Add 64-bit integers a and b, and store the result in dst. // -// FOR j := 0 to 3 -// i := j*32 -// IF imm8[7:0] > 31 -// dst[i+31:i] := 0 -// ELSE -// dst[i+31:i] := ZeroExtend32(a[i+31:i] >> imm8[7:0]) -// FI -// ENDFOR +// dst[63:0] := a[63:0] + b[63:0] // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srli_epi32 -// FORCE_INLINE __m128i _mm_srli_epi32(__m128i a, __constrange(0,255) int imm) -#define _mm_srli_epi32(a, imm) \ - __extension__({ \ - __m128i ret; \ - if (unlikely((imm) == 0)) { \ - ret = a; \ - } \ - if (likely(0 < (imm) && (imm) < 32)) { \ - ret = vreinterpretq_m128i_u32( \ - vshlq_u32(vreinterpretq_u32_m128i(a), vdupq_n_s32(-imm))); \ - } else { \ - ret = _mm_setzero_si128(); \ - } \ - ret; \ - }) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_add_si64 +FORCE_INLINE __m64 _mm_add_si64(__m64 a, __m64 b) +{ + return vreinterpret_m64_s64( + vadd_s64(vreinterpret_s64_m64(a), vreinterpret_s64_m64(b))); +} -// Shift packed 64-bit integers in a right by imm8 while shifting in zeros, and -// store the results in dst. +// Adds the 8 signed 16-bit integers in a to the 8 signed 16-bit integers in b +// and saturates. // -// FOR j := 0 to 1 -// i := j*64 -// IF imm8[7:0] > 63 -// dst[i+63:i] := 0 -// ELSE -// dst[i+63:i] := ZeroExtend64(a[i+63:i] >> imm8[7:0]) -// FI -// ENDFOR +// r0 := SignedSaturate(a0 + b0) +// r1 := SignedSaturate(a1 + b1) +// ... +// r7 := SignedSaturate(a7 + b7) // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srli_epi64 -#define _mm_srli_epi64(a, imm) \ - __extension__({ \ - __m128i ret; \ - if (unlikely((imm) == 0)) { \ - ret = a; \ - } \ - if (likely(0 < (imm) && (imm) < 64)) { \ - ret = vreinterpretq_m128i_u64( \ - vshlq_u64(vreinterpretq_u64_m128i(a), vdupq_n_s64(-imm))); \ - } else { \ - ret = _mm_setzero_si128(); \ - } \ - ret; \ - }) +// https://msdn.microsoft.com/en-us/library/1a306ef8(v=vs.100).aspx +FORCE_INLINE __m128i _mm_adds_epi16(__m128i a, __m128i b) +{ + return vreinterpretq_m128i_s16( + vqaddq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b))); +} -// Shift packed 32-bit integers in a right by imm8 while shifting in sign bits, -// and store the results in dst. +// Add packed signed 8-bit integers in a and b using saturation, and store the +// results in dst. // -// FOR j := 0 to 3 -// i := j*32 -// IF imm8[7:0] > 31 -// dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) -// ELSE -// dst[i+31:i] := SignExtend32(a[i+31:i] >> imm8[7:0]) -// FI +// FOR j := 0 to 15 +// i := j*8 +// dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] ) // ENDFOR // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srai_epi32 -// FORCE_INLINE __m128i _mm_srai_epi32(__m128i a, __constrange(0,255) int imm) -#define _mm_srai_epi32(a, imm) \ - __extension__({ \ - __m128i ret; \ - if (unlikely((imm) == 0)) { \ - ret = a; \ - } \ - if (likely(0 < (imm) && (imm) < 32)) { \ - ret = vreinterpretq_m128i_s32( \ - vshlq_s32(vreinterpretq_s32_m128i(a), vdupq_n_s32(-imm))); \ - } else { \ - ret = vreinterpretq_m128i_s32( \ - vshrq_n_s32(vreinterpretq_s32_m128i(a), 31)); \ - } \ - ret; \ - }) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_adds_epi8 +FORCE_INLINE __m128i _mm_adds_epi8(__m128i a, __m128i b) +{ + return vreinterpretq_m128i_s8( + vqaddq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b))); +} -// Shifts the 128 - bit value in a right by imm bytes while shifting in -// zeros.imm must be an immediate. -// -// r := srl(a, imm*8) -// -// https://msdn.microsoft.com/en-us/library/305w28yz(v=vs.100).aspx -// FORCE_INLINE _mm_srli_si128(__m128i a, __constrange(0,255) int imm) -#define _mm_srli_si128(a, imm) \ - __extension__({ \ - __m128i ret; \ - if (unlikely((imm) <= 0)) { \ - ret = a; \ - } \ - if (unlikely((imm) > 15)) { \ - ret = _mm_setzero_si128(); \ - } else { \ - ret = vreinterpretq_m128i_s8( \ - vextq_s8(vreinterpretq_s8_m128i(a), vdupq_n_s8(0), (imm))); \ - } \ - ret; \ - }) +// Add packed unsigned 16-bit integers in a and b using saturation, and store +// the results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_adds_epu16 +FORCE_INLINE __m128i _mm_adds_epu16(__m128i a, __m128i b) +{ + return vreinterpretq_m128i_u16( + vqaddq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b))); +} -// Shifts the 128-bit value in a left by imm bytes while shifting in zeros. imm -// must be an immediate. -// -// r := a << (imm * 8) -// -// https://msdn.microsoft.com/en-us/library/34d3k2kt(v=vs.100).aspx -// FORCE_INLINE __m128i _mm_slli_si128(__m128i a, __constrange(0,255) int imm) -#define _mm_slli_si128(a, imm) \ - __extension__({ \ - __m128i ret; \ - if (unlikely((imm) <= 0)) { \ - ret = a; \ - } \ - if (unlikely((imm) > 15)) { \ - ret = _mm_setzero_si128(); \ - } else { \ - ret = vreinterpretq_m128i_s8(vextq_s8( \ - vdupq_n_s8(0), vreinterpretq_s8_m128i(a), 16 - (imm))); \ - } \ - ret; \ - }) +// Adds the 16 unsigned 8-bit integers in a to the 16 unsigned 8-bit integers in +// b and saturates.. +// https://msdn.microsoft.com/en-us/library/9hahyddy(v=vs.100).aspx +FORCE_INLINE __m128i _mm_adds_epu8(__m128i a, __m128i b) +{ + return vreinterpretq_m128i_u8( + vqaddq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b))); +} -// Shifts the 8 signed or unsigned 16-bit integers in a left by count bits while -// shifting in zeros. +// Compute the bitwise AND of packed double-precision (64-bit) floating-point +// elements in a and b, and store the results in dst. // -// r0 := a0 << count -// r1 := a1 << count -// ... -// r7 := a7 << count +// FOR j := 0 to 1 +// i := j*64 +// dst[i+63:i] := a[i+63:i] AND b[i+63:i] +// ENDFOR // -// https://msdn.microsoft.com/en-us/library/c79w388h(v%3dvs.90).aspx -FORCE_INLINE __m128i _mm_sll_epi16(__m128i a, __m128i count) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_and_pd +FORCE_INLINE __m128d _mm_and_pd(__m128d a, __m128d b) { - uint64_t c = vreinterpretq_nth_u64_m128i(count, 0); - if (unlikely(c > 15)) - return _mm_setzero_si128(); - - int16x8_t vc = vdupq_n_s16((int16_t) c); - return vreinterpretq_m128i_s16(vshlq_s16(vreinterpretq_s16_m128i(a), vc)); + return vreinterpretq_m128d_s64( + vandq_s64(vreinterpretq_s64_m128d(a), vreinterpretq_s64_m128d(b))); } -// Shifts the 4 signed or unsigned 32-bit integers in a left by count bits while -// shifting in zeros. +// Computes the bitwise AND of the 128-bit value in a and the 128-bit value in +// b. // -// r0 := a0 << count -// r1 := a1 << count -// r2 := a2 << count -// r3 := a3 << count +// r := a & b // -// https://msdn.microsoft.com/en-us/library/6fe5a6s9(v%3dvs.90).aspx -FORCE_INLINE __m128i _mm_sll_epi32(__m128i a, __m128i count) +// https://msdn.microsoft.com/en-us/library/vstudio/6d1txsa8(v=vs.100).aspx +FORCE_INLINE __m128i _mm_and_si128(__m128i a, __m128i b) { - uint64_t c = vreinterpretq_nth_u64_m128i(count, 0); - if (unlikely(c > 31)) - return _mm_setzero_si128(); - - int32x4_t vc = vdupq_n_s32((int32_t) c); - return vreinterpretq_m128i_s32(vshlq_s32(vreinterpretq_s32_m128i(a), vc)); + return vreinterpretq_m128i_s32( + vandq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b))); } -// Shifts the 2 signed or unsigned 64-bit integers in a left by count bits while -// shifting in zeros. +// Compute the bitwise NOT of packed double-precision (64-bit) floating-point +// elements in a and then AND with b, and store the results in dst. // -// r0 := a0 << count -// r1 := a1 << count +// FOR j := 0 to 1 +// i := j*64 +// dst[i+63:i] := ((NOT a[i+63:i]) AND b[i+63:i]) +// ENDFOR // -// https://msdn.microsoft.com/en-us/library/6ta9dffd(v%3dvs.90).aspx -FORCE_INLINE __m128i _mm_sll_epi64(__m128i a, __m128i count) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_andnot_pd +FORCE_INLINE __m128d _mm_andnot_pd(__m128d a, __m128d b) { - uint64_t c = vreinterpretq_nth_u64_m128i(count, 0); - if (unlikely(c > 63)) - return _mm_setzero_si128(); - - int64x2_t vc = vdupq_n_s64((int64_t) c); - return vreinterpretq_m128i_s64(vshlq_s64(vreinterpretq_s64_m128i(a), vc)); + // *NOTE* argument swap + return vreinterpretq_m128d_s64( + vbicq_s64(vreinterpretq_s64_m128d(b), vreinterpretq_s64_m128d(a))); } -// Shifts the 8 signed or unsigned 16-bit integers in a right by count bits -// while shifting in zeros. +// Computes the bitwise AND of the 128-bit value in b and the bitwise NOT of the +// 128-bit value in a. // -// r0 := srl(a0, count) -// r1 := srl(a1, count) -// ... -// r7 := srl(a7, count) +// r := (~a) & b // -// https://msdn.microsoft.com/en-us/library/wd5ax830(v%3dvs.90).aspx -FORCE_INLINE __m128i _mm_srl_epi16(__m128i a, __m128i count) +// https://msdn.microsoft.com/en-us/library/vstudio/1beaceh8(v=vs.100).aspx +FORCE_INLINE __m128i _mm_andnot_si128(__m128i a, __m128i b) { - uint64_t c = vreinterpretq_nth_u64_m128i(count, 0); - if (unlikely(c > 15)) - return _mm_setzero_si128(); - - int16x8_t vc = vdupq_n_s16(-(int16_t) c); - return vreinterpretq_m128i_u16(vshlq_u16(vreinterpretq_u16_m128i(a), vc)); + return vreinterpretq_m128i_s32( + vbicq_s32(vreinterpretq_s32_m128i(b), + vreinterpretq_s32_m128i(a))); // *NOTE* argument swap } -// Shifts the 4 signed or unsigned 32-bit integers in a right by count bits -// while shifting in zeros. +// Computes the average of the 8 unsigned 16-bit integers in a and the 8 +// unsigned 16-bit integers in b and rounds. // -// r0 := srl(a0, count) -// r1 := srl(a1, count) -// r2 := srl(a2, count) -// r3 := srl(a3, count) +// r0 := (a0 + b0) / 2 +// r1 := (a1 + b1) / 2 +// ... +// r7 := (a7 + b7) / 2 // -// https://msdn.microsoft.com/en-us/library/a9cbttf4(v%3dvs.90).aspx -FORCE_INLINE __m128i _mm_srl_epi32(__m128i a, __m128i count) +// https://msdn.microsoft.com/en-us/library/vstudio/y13ca3c8(v=vs.90).aspx +FORCE_INLINE __m128i _mm_avg_epu16(__m128i a, __m128i b) { - uint64_t c = vreinterpretq_nth_u64_m128i(count, 0); - if (unlikely(c > 31)) - return _mm_setzero_si128(); - - int32x4_t vc = vdupq_n_s32(-(int32_t) c); - return vreinterpretq_m128i_u32(vshlq_u32(vreinterpretq_u32_m128i(a), vc)); + return (__m128i) vrhaddq_u16(vreinterpretq_u16_m128i(a), + vreinterpretq_u16_m128i(b)); } -// Shifts the 2 signed or unsigned 64-bit integers in a right by count bits -// while shifting in zeros. +// Computes the average of the 16 unsigned 8-bit integers in a and the 16 +// unsigned 8-bit integers in b and rounds. // -// r0 := srl(a0, count) -// r1 := srl(a1, count) +// r0 := (a0 + b0) / 2 +// r1 := (a1 + b1) / 2 +// ... +// r15 := (a15 + b15) / 2 // -// https://msdn.microsoft.com/en-us/library/yf6cf9k8(v%3dvs.90).aspx -FORCE_INLINE __m128i _mm_srl_epi64(__m128i a, __m128i count) +// https://msdn.microsoft.com/en-us/library/vstudio/8zwh554a(v%3dvs.90).aspx +FORCE_INLINE __m128i _mm_avg_epu8(__m128i a, __m128i b) { - uint64_t c = vreinterpretq_nth_u64_m128i(count, 0); - if (unlikely(c > 63)) - return _mm_setzero_si128(); - - int64x2_t vc = vdupq_n_s64(-(int64_t) c); - return vreinterpretq_m128i_u64(vshlq_u64(vreinterpretq_u64_m128i(a), vc)); + return vreinterpretq_m128i_u8( + vrhaddq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b))); } -// NEON does not provide a version of this function. -// Creates a 16-bit mask from the most significant bits of the 16 signed or -// unsigned 8-bit integers in a and zero extends the upper bits. -// https://msdn.microsoft.com/en-us/library/vstudio/s090c8fk(v=vs.100).aspx -FORCE_INLINE int _mm_movemask_epi8(__m128i a) -{ - // Use increasingly wide shifts+adds to collect the sign bits - // together. - // Since the widening shifts would be rather confusing to follow in little - // endian, everything will be illustrated in big endian order instead. This - // has a different result - the bits would actually be reversed on a big - // endian machine. - - // Starting input (only half the elements are shown): - // 89 ff 1d c0 00 10 99 33 - uint8x16_t input = vreinterpretq_u8_m128i(a); - - // Shift out everything but the sign bits with an unsigned shift right. - // - // Bytes of the vector:: - // 89 ff 1d c0 00 10 99 33 - // \ \ \ \ \ \ \ \ high_bits = (uint16x4_t)(input >> 7) - // | | | | | | | | - // 01 01 00 01 00 00 01 00 - // - // Bits of first important lane(s): - // 10001001 (89) - // \______ - // | - // 00000001 (01) - uint16x8_t high_bits = vreinterpretq_u16_u8(vshrq_n_u8(input, 7)); - - // Merge the even lanes together with a 16-bit unsigned shift right + add. - // 'xx' represents garbage data which will be ignored in the final result. - // In the important bytes, the add functions like a binary OR. - // - // 01 01 00 01 00 00 01 00 - // \_ | \_ | \_ | \_ | paired16 = (uint32x4_t)(input + (input >> 7)) - // \| \| \| \| - // xx 03 xx 01 xx 00 xx 02 - // - // 00000001 00000001 (01 01) - // \_______ | - // \| - // xxxxxxxx xxxxxx11 (xx 03) - uint32x4_t paired16 = - vreinterpretq_u32_u16(vsraq_n_u16(high_bits, high_bits, 7)); +// Shift a left by imm8 bytes while shifting in zeros, and store the results in +// dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_bslli_si128 +#define _mm_bslli_si128(a, imm) _mm_slli_si128(a, imm) - // Repeat with a wider 32-bit shift + add. - // xx 03 xx 01 xx 00 xx 02 - // \____ | \____ | paired32 = (uint64x1_t)(paired16 + (paired16 >> - // 14)) - // \| \| - // xx xx xx 0d xx xx xx 02 - // - // 00000011 00000001 (03 01) - // \\_____ || - // '----.\|| - // xxxxxxxx xxxx1101 (xx 0d) - uint64x2_t paired32 = - vreinterpretq_u64_u32(vsraq_n_u32(paired16, paired16, 14)); +// Shift a right by imm8 bytes while shifting in zeros, and store the results in +// dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_bsrli_si128 +#define _mm_bsrli_si128(a, imm) _mm_srli_si128(a, imm) - // Last, an even wider 64-bit shift + add to get our result in the low 8 bit - // lanes. xx xx xx 0d xx xx xx 02 - // \_________ | paired64 = (uint8x8_t)(paired32 + (paired32 >> - // 28)) - // \| - // xx xx xx xx xx xx xx d2 - // - // 00001101 00000010 (0d 02) - // \ \___ | | - // '---. \| | - // xxxxxxxx 11010010 (xx d2) - uint8x16_t paired64 = - vreinterpretq_u8_u64(vsraq_n_u64(paired32, paired32, 28)); +// Cast vector of type __m128d to type __m128. This intrinsic is only used for +// compilation and does not generate any instructions, thus it has zero latency. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castpd_ps +FORCE_INLINE __m128 _mm_castpd_ps(__m128d a) +{ + return vreinterpretq_m128_s64(vreinterpretq_s64_m128d(a)); +} - // Extract the low 8 bits from each 64-bit lane with 2 8-bit extracts. - // xx xx xx xx xx xx xx d2 - // || return paired64[0] - // d2 - // Note: Little endian would return the correct value 4b (01001011) instead. - return vgetq_lane_u8(paired64, 0) | ((int) vgetq_lane_u8(paired64, 8) << 8); +// Cast vector of type __m128d to type __m128i. This intrinsic is only used for +// compilation and does not generate any instructions, thus it has zero latency. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castpd_si128 +FORCE_INLINE __m128i _mm_castpd_si128(__m128d a) +{ + return vreinterpretq_m128i_s64(vreinterpretq_s64_m128d(a)); } -// Copy the lower 64-bit integer in a to dst. -// -// dst[63:0] := a[63:0] -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_movepi64_pi64 -FORCE_INLINE __m64 _mm_movepi64_pi64(__m128i a) +// Cast vector of type __m128 to type __m128d. This intrinsic is only used for +// compilation and does not generate any instructions, thus it has zero latency. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castps_pd +FORCE_INLINE __m128d _mm_castps_pd(__m128 a) { - return vreinterpret_m64_s64(vget_low_s64(vreinterpretq_s64_m128i(a))); + return vreinterpretq_m128d_s32(vreinterpretq_s32_m128(a)); } -// Copy the 64-bit integer a to the lower element of dst, and zero the upper -// element. -// -// dst[63:0] := a[63:0] -// dst[127:64] := 0 -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_movpi64_epi64 -FORCE_INLINE __m128i _mm_movpi64_epi64(__m64 a) +// Applies a type cast to reinterpret four 32-bit floating point values passed +// in as a 128-bit parameter as packed 32-bit integers. +// https://msdn.microsoft.com/en-us/library/bb514099.aspx +FORCE_INLINE __m128i _mm_castps_si128(__m128 a) { - return vreinterpretq_m128i_s64( - vcombine_s64(vreinterpret_s64_m64(a), vdup_n_s64(0))); + return vreinterpretq_m128i_s32(vreinterpretq_s32_m128(a)); } -// NEON does not provide this method -// Creates a 4-bit mask from the most significant bits of the four -// single-precision, floating-point values. -// https://msdn.microsoft.com/en-us/library/vstudio/4490ys29(v=vs.100).aspx -FORCE_INLINE int _mm_movemask_ps(__m128 a) +// Cast vector of type __m128i to type __m128d. This intrinsic is only used for +// compilation and does not generate any instructions, thus it has zero latency. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castsi128_pd +FORCE_INLINE __m128d _mm_castsi128_pd(__m128i a) { - uint32x4_t input = vreinterpretq_u32_m128(a); #if defined(__aarch64__) - static const int32x4_t shift = {0, 1, 2, 3}; - uint32x4_t tmp = vshrq_n_u32(input, 31); - return vaddvq_u32(vshlq_u32(tmp, shift)); + return vreinterpretq_m128d_f64(vreinterpretq_f64_m128i(a)); #else - // Uses the exact same method as _mm_movemask_epi8, see that for details. - // Shift out everything but the sign bits with a 32-bit unsigned shift - // right. - uint64x2_t high_bits = vreinterpretq_u64_u32(vshrq_n_u32(input, 31)); - // Merge the two pairs together with a 64-bit unsigned shift right + add. - uint8x16_t paired = - vreinterpretq_u8_u64(vsraq_n_u64(high_bits, high_bits, 31)); - // Extract the result. - return vgetq_lane_u8(paired, 0) | (vgetq_lane_u8(paired, 8) << 2); + return vreinterpretq_m128d_f32(vreinterpretq_f32_m128i(a)); #endif } -// Compute the bitwise NOT of a and then AND with a 128-bit vector containing -// all 1's, and return 1 if the result is zero, otherwise return 0. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_test_all_ones -FORCE_INLINE int _mm_test_all_ones(__m128i a) +// Applies a type cast to reinterpret four 32-bit integers passed in as a +// 128-bit parameter as packed 32-bit floating point values. +// https://msdn.microsoft.com/en-us/library/bb514029.aspx +FORCE_INLINE __m128 _mm_castsi128_ps(__m128i a) { - return (uint64_t)(vgetq_lane_s64(a, 0) & vgetq_lane_s64(a, 1)) == - ~(uint64_t) 0; + return vreinterpretq_m128_s32(vreinterpretq_s32_m128i(a)); } -// Compute the bitwise AND of 128 bits (representing integer data) in a and -// mask, and return 1 if the result is zero, otherwise return 0. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_test_all_zeros -FORCE_INLINE int _mm_test_all_zeros(__m128i a, __m128i mask) +// Cache line containing p is flushed and invalidated from all caches in the +// coherency domain. : +// https://msdn.microsoft.com/en-us/library/ba08y07y(v=vs.100).aspx +FORCE_INLINE void _mm_clflush(void const *p) { - int64x2_t a_and_mask = - vandq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(mask)); - return (vgetq_lane_s64(a_and_mask, 0) | vgetq_lane_s64(a_and_mask, 1)) ? 0 - : 1; + (void) p; + // no corollary for Neon? } -/* Math operations */ +// Compares the 8 signed or unsigned 16-bit integers in a and the 8 signed or +// unsigned 16-bit integers in b for equality. +// https://msdn.microsoft.com/en-us/library/2ay060te(v=vs.100).aspx +FORCE_INLINE __m128i _mm_cmpeq_epi16(__m128i a, __m128i b) +{ + return vreinterpretq_m128i_u16( + vceqq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b))); +} -// Subtracts the four single-precision, floating-point values of a and b. -// -// r0 := a0 - b0 -// r1 := a1 - b1 -// r2 := a2 - b2 -// r3 := a3 - b3 -// -// https://msdn.microsoft.com/en-us/library/vstudio/1zad2k61(v=vs.100).aspx -FORCE_INLINE __m128 _mm_sub_ps(__m128 a, __m128 b) +// Compare packed 32-bit integers in a and b for equality, and store the results +// in dst +FORCE_INLINE __m128i _mm_cmpeq_epi32(__m128i a, __m128i b) { - return vreinterpretq_m128_f32( - vsubq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); + return vreinterpretq_m128i_u32( + vceqq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b))); } -// Subtract the lower single-precision (32-bit) floating-point element in b from -// the lower single-precision (32-bit) floating-point element in a, store the -// result in the lower element of dst, and copy the upper 3 packed elements from -// a to the upper elements of dst. -// -// dst[31:0] := a[31:0] - b[31:0] -// dst[127:32] := a[127:32] -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_ss -FORCE_INLINE __m128 _mm_sub_ss(__m128 a, __m128 b) +// Compares the 16 signed or unsigned 8-bit integers in a and the 16 signed or +// unsigned 8-bit integers in b for equality. +// https://msdn.microsoft.com/en-us/library/windows/desktop/bz5xk21a(v=vs.90).aspx +FORCE_INLINE __m128i _mm_cmpeq_epi8(__m128i a, __m128i b) { - return _mm_move_ss(a, _mm_sub_ps(a, b)); + return vreinterpretq_m128i_u8( + vceqq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b))); } -// Subtract 2 packed 64-bit integers in b from 2 packed 64-bit integers in a, -// and store the results in dst. -// r0 := a0 - b0 -// r1 := a1 - b1 -FORCE_INLINE __m128i _mm_sub_epi64(__m128i a, __m128i b) +// Compare packed double-precision (64-bit) floating-point elements in a and b +// for equality, and store the results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpeq_pd +FORCE_INLINE __m128d _mm_cmpeq_pd(__m128d a, __m128d b) { - return vreinterpretq_m128i_s64( - vsubq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b))); +#if defined(__aarch64__) + return vreinterpretq_m128d_u64( + vceqq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))); +#else + // (a == b) -> (a_lo == b_lo) && (a_hi == b_hi) + uint32x4_t cmp = + vceqq_u32(vreinterpretq_u32_m128d(a), vreinterpretq_u32_m128d(b)); + uint32x4_t swapped = vrev64q_u32(cmp); + return vreinterpretq_m128d_u32(vandq_u32(cmp, swapped)); +#endif } -// Subtracts the 4 signed or unsigned 32-bit integers of b from the 4 signed or -// unsigned 32-bit integers of a. -// -// r0 := a0 - b0 -// r1 := a1 - b1 -// r2 := a2 - b2 -// r3 := a3 - b3 -// -// https://msdn.microsoft.com/en-us/library/vstudio/fhh866h0(v=vs.100).aspx -FORCE_INLINE __m128i _mm_sub_epi32(__m128i a, __m128i b) +// Compare the lower double-precision (64-bit) floating-point elements in a and +// b for equality, store the result in the lower element of dst, and copy the +// upper element from a to the upper element of dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpeq_sd +FORCE_INLINE __m128d _mm_cmpeq_sd(__m128d a, __m128d b) { - return vreinterpretq_m128i_s32( - vsubq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b))); + return _mm_move_sd(a, _mm_cmpeq_pd(a, b)); } -// Subtract packed 16-bit integers in b from packed 16-bit integers in a, and -// store the results in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_epi16 -FORCE_INLINE __m128i _mm_sub_epi16(__m128i a, __m128i b) +// Compare packed double-precision (64-bit) floating-point elements in a and b +// for greater-than-or-equal, and store the results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpge_pd +FORCE_INLINE __m128d _mm_cmpge_pd(__m128d a, __m128d b) { - return vreinterpretq_m128i_s16( - vsubq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b))); +#if defined(__aarch64__) + return vreinterpretq_m128d_u64( + vcgeq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))); +#else + uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a)); + uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a)); + uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b)); + uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b)); + uint64_t d[2]; + d[0] = (*(double *) &a0) >= (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0); + d[1] = (*(double *) &a1) >= (*(double *) &b1) ? ~UINT64_C(0) : UINT64_C(0); + + return vreinterpretq_m128d_u64(vld1q_u64(d)); +#endif } -// Subtract packed 8-bit integers in b from packed 8-bit integers in a, and -// store the results in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_epi8 -FORCE_INLINE __m128i _mm_sub_epi8(__m128i a, __m128i b) +// Compare the lower double-precision (64-bit) floating-point elements in a and +// b for greater-than-or-equal, store the result in the lower element of dst, +// and copy the upper element from a to the upper element of dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpge_sd +FORCE_INLINE __m128d _mm_cmpge_sd(__m128d a, __m128d b) { - return vreinterpretq_m128i_s8( - vsubq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b))); +#if defined(__aarch64__) + return _mm_move_sd(a, _mm_cmpge_pd(a, b)); +#else + // expand "_mm_cmpge_pd()" to reduce unnecessary operations + uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a)); + uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a)); + uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b)); + uint64_t d[2]; + d[0] = (*(double *) &a0) >= (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0); + d[1] = a1; + + return vreinterpretq_m128d_u64(vld1q_u64(d)); +#endif } -// Subtract 64-bit integer b from 64-bit integer a, and store the result in dst. +// Compares the 8 signed 16-bit integers in a and the 8 signed 16-bit integers +// in b for greater than. // -// dst[63:0] := a[63:0] - b[63:0] +// r0 := (a0 > b0) ? 0xffff : 0x0 +// r1 := (a1 > b1) ? 0xffff : 0x0 +// ... +// r7 := (a7 > b7) ? 0xffff : 0x0 // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_si64 -FORCE_INLINE __m64 _mm_sub_si64(__m64 a, __m64 b) +// https://technet.microsoft.com/en-us/library/xd43yfsa(v=vs.100).aspx +FORCE_INLINE __m128i _mm_cmpgt_epi16(__m128i a, __m128i b) { - return vreinterpret_m64_s64( - vsub_s64(vreinterpret_s64_m64(a), vreinterpret_s64_m64(b))); + return vreinterpretq_m128i_u16( + vcgtq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b))); } -// Subtracts the 8 unsigned 16-bit integers of bfrom the 8 unsigned 16-bit -// integers of a and saturates.. -// https://technet.microsoft.com/en-us/subscriptions/index/f44y0s19(v=vs.90).aspx -FORCE_INLINE __m128i _mm_subs_epu16(__m128i a, __m128i b) +// Compares the 4 signed 32-bit integers in a and the 4 signed 32-bit integers +// in b for greater than. +// https://msdn.microsoft.com/en-us/library/vstudio/1s9f2z0y(v=vs.100).aspx +FORCE_INLINE __m128i _mm_cmpgt_epi32(__m128i a, __m128i b) { - return vreinterpretq_m128i_u16( - vqsubq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b))); + return vreinterpretq_m128i_u32( + vcgtq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b))); } -// Subtracts the 16 unsigned 8-bit integers of b from the 16 unsigned 8-bit -// integers of a and saturates. +// Compares the 16 signed 8-bit integers in a and the 16 signed 8-bit integers +// in b for greater than. // -// r0 := UnsignedSaturate(a0 - b0) -// r1 := UnsignedSaturate(a1 - b1) +// r0 := (a0 > b0) ? 0xff : 0x0 +// r1 := (a1 > b1) ? 0xff : 0x0 // ... -// r15 := UnsignedSaturate(a15 - b15) +// r15 := (a15 > b15) ? 0xff : 0x0 // -// https://technet.microsoft.com/en-us/subscriptions/yadkxc18(v=vs.90) -FORCE_INLINE __m128i _mm_subs_epu8(__m128i a, __m128i b) +// https://msdn.microsoft.com/zh-tw/library/wf45zt2b(v=vs.100).aspx +FORCE_INLINE __m128i _mm_cmpgt_epi8(__m128i a, __m128i b) { return vreinterpretq_m128i_u8( - vqsubq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b))); + vcgtq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b))); } -// Subtracts the 16 signed 8-bit integers of b from the 16 signed 8-bit integers -// of a and saturates. -// -// r0 := SignedSaturate(a0 - b0) -// r1 := SignedSaturate(a1 - b1) -// ... -// r15 := SignedSaturate(a15 - b15) -// -// https://technet.microsoft.com/en-us/subscriptions/by7kzks1(v=vs.90) -FORCE_INLINE __m128i _mm_subs_epi8(__m128i a, __m128i b) +// Compare packed double-precision (64-bit) floating-point elements in a and b +// for greater-than, and store the results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpgt_pd +FORCE_INLINE __m128d _mm_cmpgt_pd(__m128d a, __m128d b) { - return vreinterpretq_m128i_s8( - vqsubq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b))); +#if defined(__aarch64__) + return vreinterpretq_m128d_u64( + vcgtq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))); +#else + uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a)); + uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a)); + uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b)); + uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b)); + uint64_t d[2]; + d[0] = (*(double *) &a0) > (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0); + d[1] = (*(double *) &a1) > (*(double *) &b1) ? ~UINT64_C(0) : UINT64_C(0); + + return vreinterpretq_m128d_u64(vld1q_u64(d)); +#endif } -// Subtracts the 8 signed 16-bit integers of b from the 8 signed 16-bit integers -// of a and saturates. -// -// r0 := SignedSaturate(a0 - b0) -// r1 := SignedSaturate(a1 - b1) -// ... -// r7 := SignedSaturate(a7 - b7) -// -// https://technet.microsoft.com/en-us/subscriptions/3247z5b8(v=vs.90) -FORCE_INLINE __m128i _mm_subs_epi16(__m128i a, __m128i b) +// Compare the lower double-precision (64-bit) floating-point elements in a and +// b for greater-than, store the result in the lower element of dst, and copy +// the upper element from a to the upper element of dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpgt_sd +FORCE_INLINE __m128d _mm_cmpgt_sd(__m128d a, __m128d b) { - return vreinterpretq_m128i_s16( - vqsubq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b))); +#if defined(__aarch64__) + return _mm_move_sd(a, _mm_cmpgt_pd(a, b)); +#else + // expand "_mm_cmpge_pd()" to reduce unnecessary operations + uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a)); + uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a)); + uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b)); + uint64_t d[2]; + d[0] = (*(double *) &a0) > (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0); + d[1] = a1; + + return vreinterpretq_m128d_u64(vld1q_u64(d)); +#endif } -// Subtract packed double-precision (64-bit) floating-point elements in b from -// packed double-precision (64-bit) floating-point elements in a, and store the -// results in dst. -// -// FOR j := 0 to 1 -// i := j*64 -// dst[i+63:i] := a[i+63:i] - b[i+63:i] -// ENDFOR -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_sub_pd -FORCE_INLINE __m128d _mm_sub_pd(__m128d a, __m128d b) +// Compare packed double-precision (64-bit) floating-point elements in a and b +// for less-than-or-equal, and store the results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmple_pd +FORCE_INLINE __m128d _mm_cmple_pd(__m128d a, __m128d b) { #if defined(__aarch64__) - return vreinterpretq_m128d_f64( - vsubq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))); + return vreinterpretq_m128d_u64( + vcleq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))); #else - double *da = (double *) &a; - double *db = (double *) &b; - double c[2]; - c[0] = da[0] - db[0]; - c[1] = da[1] - db[1]; - return vld1q_f32((float32_t *) c); + uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a)); + uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a)); + uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b)); + uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b)); + uint64_t d[2]; + d[0] = (*(double *) &a0) <= (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0); + d[1] = (*(double *) &a1) <= (*(double *) &b1) ? ~UINT64_C(0) : UINT64_C(0); + + return vreinterpretq_m128d_u64(vld1q_u64(d)); #endif } -// Subtract the lower double-precision (64-bit) floating-point element in b from -// the lower double-precision (64-bit) floating-point element in a, store the -// result in the lower element of dst, and copy the upper element from a to the -// upper element of dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_sd -FORCE_INLINE __m128d _mm_sub_sd(__m128d a, __m128d b) +// Compare the lower double-precision (64-bit) floating-point elements in a and +// b for less-than-or-equal, store the result in the lower element of dst, and +// copy the upper element from a to the upper element of dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmple_sd +FORCE_INLINE __m128d _mm_cmple_sd(__m128d a, __m128d b) { - return _mm_move_sd(a, _mm_sub_pd(a, b)); +#if defined(__aarch64__) + return _mm_move_sd(a, _mm_cmple_pd(a, b)); +#else + // expand "_mm_cmpge_pd()" to reduce unnecessary operations + uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a)); + uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a)); + uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b)); + uint64_t d[2]; + d[0] = (*(double *) &a0) <= (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0); + d[1] = a1; + + return vreinterpretq_m128d_u64(vld1q_u64(d)); +#endif } -// Add packed unsigned 16-bit integers in a and b using saturation, and store -// the results in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_adds_epu16 -FORCE_INLINE __m128i _mm_adds_epu16(__m128i a, __m128i b) +// Compares the 8 signed 16-bit integers in a and the 8 signed 16-bit integers +// in b for less than. +// +// r0 := (a0 < b0) ? 0xffff : 0x0 +// r1 := (a1 < b1) ? 0xffff : 0x0 +// ... +// r7 := (a7 < b7) ? 0xffff : 0x0 +// +// https://technet.microsoft.com/en-us/library/t863edb2(v=vs.100).aspx +FORCE_INLINE __m128i _mm_cmplt_epi16(__m128i a, __m128i b) { return vreinterpretq_m128i_u16( - vqaddq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b))); + vcltq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b))); } -// Negate packed 8-bit integers in a when the corresponding signed -// 8-bit integer in b is negative, and store the results in dst. -// Element in dst are zeroed out when the corresponding element -// in b is zero. -// -// for i in 0..15 -// if b[i] < 0 -// r[i] := -a[i] -// else if b[i] == 0 -// r[i] := 0 -// else -// r[i] := a[i] -// fi -// done -FORCE_INLINE __m128i _mm_sign_epi8(__m128i _a, __m128i _b) + +// Compares the 4 signed 32-bit integers in a and the 4 signed 32-bit integers +// in b for less than. +// https://msdn.microsoft.com/en-us/library/vstudio/4ak0bf5d(v=vs.100).aspx +FORCE_INLINE __m128i _mm_cmplt_epi32(__m128i a, __m128i b) { - int8x16_t a = vreinterpretq_s8_m128i(_a); - int8x16_t b = vreinterpretq_s8_m128i(_b); + return vreinterpretq_m128i_u32( + vcltq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b))); +} - // signed shift right: faster than vclt - // (b < 0) ? 0xFF : 0 - uint8x16_t ltMask = vreinterpretq_u8_s8(vshrq_n_s8(b, 7)); +// Compares the 16 signed 8-bit integers in a and the 16 signed 8-bit integers +// in b for lesser than. +// https://msdn.microsoft.com/en-us/library/windows/desktop/9s46csht(v=vs.90).aspx +FORCE_INLINE __m128i _mm_cmplt_epi8(__m128i a, __m128i b) +{ + return vreinterpretq_m128i_u8( + vcltq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b))); +} - // (b == 0) ? 0xFF : 0 +// Compare packed double-precision (64-bit) floating-point elements in a and b +// for less-than, and store the results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmplt_pd +FORCE_INLINE __m128d _mm_cmplt_pd(__m128d a, __m128d b) +{ #if defined(__aarch64__) - int8x16_t zeroMask = vreinterpretq_s8_u8(vceqzq_s8(b)); + return vreinterpretq_m128d_u64( + vcltq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))); #else - int8x16_t zeroMask = vreinterpretq_s8_u8(vceqq_s8(b, vdupq_n_s8(0))); + uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a)); + uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a)); + uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b)); + uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b)); + uint64_t d[2]; + d[0] = (*(double *) &a0) < (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0); + d[1] = (*(double *) &a1) < (*(double *) &b1) ? ~UINT64_C(0) : UINT64_C(0); + + return vreinterpretq_m128d_u64(vld1q_u64(d)); #endif - - // bitwise select either a or nagative 'a' (vnegq_s8(a) return nagative 'a') - // based on ltMask - int8x16_t masked = vbslq_s8(ltMask, vnegq_s8(a), a); - // res = masked & (~zeroMask) - int8x16_t res = vbicq_s8(masked, zeroMask); - - return vreinterpretq_m128i_s8(res); } -// Negate packed 16-bit integers in a when the corresponding signed -// 16-bit integer in b is negative, and store the results in dst. -// Element in dst are zeroed out when the corresponding element -// in b is zero. -// -// for i in 0..7 -// if b[i] < 0 -// r[i] := -a[i] -// else if b[i] == 0 -// r[i] := 0 -// else -// r[i] := a[i] -// fi -// done -FORCE_INLINE __m128i _mm_sign_epi16(__m128i _a, __m128i _b) +// Compare the lower double-precision (64-bit) floating-point elements in a and +// b for less-than, store the result in the lower element of dst, and copy the +// upper element from a to the upper element of dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmplt_sd +FORCE_INLINE __m128d _mm_cmplt_sd(__m128d a, __m128d b) { - int16x8_t a = vreinterpretq_s16_m128i(_a); - int16x8_t b = vreinterpretq_s16_m128i(_b); - - // signed shift right: faster than vclt - // (b < 0) ? 0xFFFF : 0 - uint16x8_t ltMask = vreinterpretq_u16_s16(vshrq_n_s16(b, 15)); - // (b == 0) ? 0xFFFF : 0 #if defined(__aarch64__) - int16x8_t zeroMask = vreinterpretq_s16_u16(vceqzq_s16(b)); + return _mm_move_sd(a, _mm_cmplt_pd(a, b)); #else - int16x8_t zeroMask = vreinterpretq_s16_u16(vceqq_s16(b, vdupq_n_s16(0))); + uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a)); + uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a)); + uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b)); + uint64_t d[2]; + d[0] = (*(double *) &a0) < (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0); + d[1] = a1; + + return vreinterpretq_m128d_u64(vld1q_u64(d)); #endif - - // bitwise select either a or negative 'a' (vnegq_s16(a) equals to negative - // 'a') based on ltMask - int16x8_t masked = vbslq_s16(ltMask, vnegq_s16(a), a); - // res = masked & (~zeroMask) - int16x8_t res = vbicq_s16(masked, zeroMask); - return vreinterpretq_m128i_s16(res); } -// Negate packed 32-bit integers in a when the corresponding signed -// 32-bit integer in b is negative, and store the results in dst. -// Element in dst are zeroed out when the corresponding element -// in b is zero. -// -// for i in 0..3 -// if b[i] < 0 -// r[i] := -a[i] -// else if b[i] == 0 -// r[i] := 0 -// else -// r[i] := a[i] -// fi -// done -FORCE_INLINE __m128i _mm_sign_epi32(__m128i _a, __m128i _b) +// Compare packed double-precision (64-bit) floating-point elements in a and b +// for not-equal, and store the results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpneq_pd +FORCE_INLINE __m128d _mm_cmpneq_pd(__m128d a, __m128d b) { - int32x4_t a = vreinterpretq_s32_m128i(_a); - int32x4_t b = vreinterpretq_s32_m128i(_b); - - // signed shift right: faster than vclt - // (b < 0) ? 0xFFFFFFFF : 0 - uint32x4_t ltMask = vreinterpretq_u32_s32(vshrq_n_s32(b, 31)); - - // (b == 0) ? 0xFFFFFFFF : 0 #if defined(__aarch64__) - int32x4_t zeroMask = vreinterpretq_s32_u32(vceqzq_s32(b)); + return vreinterpretq_m128d_s32(vmvnq_s32(vreinterpretq_s32_u64( + vceqq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))))); #else - int32x4_t zeroMask = vreinterpretq_s32_u32(vceqq_s32(b, vdupq_n_s32(0))); + // (a == b) -> (a_lo == b_lo) && (a_hi == b_hi) + uint32x4_t cmp = + vceqq_u32(vreinterpretq_u32_m128d(a), vreinterpretq_u32_m128d(b)); + uint32x4_t swapped = vrev64q_u32(cmp); + return vreinterpretq_m128d_u32(vmvnq_u32(vandq_u32(cmp, swapped))); #endif - - // bitwise select either a or negative 'a' (vnegq_s32(a) equals to negative - // 'a') based on ltMask - int32x4_t masked = vbslq_s32(ltMask, vnegq_s32(a), a); - // res = masked & (~zeroMask) - int32x4_t res = vbicq_s32(masked, zeroMask); - return vreinterpretq_m128i_s32(res); } -// Negate packed 16-bit integers in a when the corresponding signed 16-bit -// integer in b is negative, and store the results in dst. Element in dst are -// zeroed out when the corresponding element in b is zero. -// -// FOR j := 0 to 3 -// i := j*16 -// IF b[i+15:i] < 0 -// dst[i+15:i] := -(a[i+15:i]) -// ELSE IF b[i+15:i] == 0 -// dst[i+15:i] := 0 -// ELSE -// dst[i+15:i] := a[i+15:i] -// FI -// ENDFOR -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sign_pi16 -FORCE_INLINE __m64 _mm_sign_pi16(__m64 _a, __m64 _b) +// Compare the lower double-precision (64-bit) floating-point elements in a and +// b for not-equal, store the result in the lower element of dst, and copy the +// upper element from a to the upper element of dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpneq_sd +FORCE_INLINE __m128d _mm_cmpneq_sd(__m128d a, __m128d b) { - int16x4_t a = vreinterpret_s16_m64(_a); - int16x4_t b = vreinterpret_s16_m64(_b); - - // signed shift right: faster than vclt - // (b < 0) ? 0xFFFF : 0 - uint16x4_t ltMask = vreinterpret_u16_s16(vshr_n_s16(b, 15)); + return _mm_move_sd(a, _mm_cmpneq_pd(a, b)); +} - // (b == 0) ? 0xFFFF : 0 +// Compare packed double-precision (64-bit) floating-point elements in a and b +// for not-greater-than-or-equal, and store the results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpnge_pd +FORCE_INLINE __m128d _mm_cmpnge_pd(__m128d a, __m128d b) +{ #if defined(__aarch64__) - int16x4_t zeroMask = vreinterpret_s16_u16(vceqz_s16(b)); + return vreinterpretq_m128d_u64(veorq_u64( + vcgeq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)), + vdupq_n_u64(UINT64_MAX))); #else - int16x4_t zeroMask = vreinterpret_s16_u16(vceq_s16(b, vdup_n_s16(0))); + uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a)); + uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a)); + uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b)); + uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b)); + uint64_t d[2]; + d[0] = + !((*(double *) &a0) >= (*(double *) &b0)) ? ~UINT64_C(0) : UINT64_C(0); + d[1] = + !((*(double *) &a1) >= (*(double *) &b1)) ? ~UINT64_C(0) : UINT64_C(0); + + return vreinterpretq_m128d_u64(vld1q_u64(d)); #endif - - // bitwise select either a or nagative 'a' (vneg_s16(a) return nagative 'a') - // based on ltMask - int16x4_t masked = vbsl_s16(ltMask, vneg_s16(a), a); - // res = masked & (~zeroMask) - int16x4_t res = vbic_s16(masked, zeroMask); - - return vreinterpret_m64_s16(res); } -// Negate packed 32-bit integers in a when the corresponding signed 32-bit -// integer in b is negative, and store the results in dst. Element in dst are -// zeroed out when the corresponding element in b is zero. -// -// FOR j := 0 to 1 -// i := j*32 -// IF b[i+31:i] < 0 -// dst[i+31:i] := -(a[i+31:i]) -// ELSE IF b[i+31:i] == 0 -// dst[i+31:i] := 0 -// ELSE -// dst[i+31:i] := a[i+31:i] -// FI -// ENDFOR -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sign_pi32 -FORCE_INLINE __m64 _mm_sign_pi32(__m64 _a, __m64 _b) +// Compare the lower double-precision (64-bit) floating-point elements in a and +// b for not-greater-than-or-equal, store the result in the lower element of +// dst, and copy the upper element from a to the upper element of dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpnge_sd +FORCE_INLINE __m128d _mm_cmpnge_sd(__m128d a, __m128d b) { - int32x2_t a = vreinterpret_s32_m64(_a); - int32x2_t b = vreinterpret_s32_m64(_b); - - // signed shift right: faster than vclt - // (b < 0) ? 0xFFFFFFFF : 0 - uint32x2_t ltMask = vreinterpret_u32_s32(vshr_n_s32(b, 31)); + return _mm_move_sd(a, _mm_cmpnge_pd(a, b)); +} - // (b == 0) ? 0xFFFFFFFF : 0 +// Compare packed double-precision (64-bit) floating-point elements in a and b +// for not-greater-than, and store the results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_cmpngt_pd +FORCE_INLINE __m128d _mm_cmpngt_pd(__m128d a, __m128d b) +{ #if defined(__aarch64__) - int32x2_t zeroMask = vreinterpret_s32_u32(vceqz_s32(b)); + return vreinterpretq_m128d_u64(veorq_u64( + vcgtq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)), + vdupq_n_u64(UINT64_MAX))); #else - int32x2_t zeroMask = vreinterpret_s32_u32(vceq_s32(b, vdup_n_s32(0))); + uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a)); + uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a)); + uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b)); + uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b)); + uint64_t d[2]; + d[0] = + !((*(double *) &a0) > (*(double *) &b0)) ? ~UINT64_C(0) : UINT64_C(0); + d[1] = + !((*(double *) &a1) > (*(double *) &b1)) ? ~UINT64_C(0) : UINT64_C(0); + + return vreinterpretq_m128d_u64(vld1q_u64(d)); #endif - - // bitwise select either a or nagative 'a' (vneg_s32(a) return nagative 'a') - // based on ltMask - int32x2_t masked = vbsl_s32(ltMask, vneg_s32(a), a); - // res = masked & (~zeroMask) - int32x2_t res = vbic_s32(masked, zeroMask); - - return vreinterpret_m64_s32(res); } -// Negate packed 8-bit integers in a when the corresponding signed 8-bit integer -// in b is negative, and store the results in dst. Element in dst are zeroed out -// when the corresponding element in b is zero. -// -// FOR j := 0 to 7 -// i := j*8 -// IF b[i+7:i] < 0 -// dst[i+7:i] := -(a[i+7:i]) -// ELSE IF b[i+7:i] == 0 -// dst[i+7:i] := 0 -// ELSE -// dst[i+7:i] := a[i+7:i] -// FI -// ENDFOR -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sign_pi8 -FORCE_INLINE __m64 _mm_sign_pi8(__m64 _a, __m64 _b) +// Compare the lower double-precision (64-bit) floating-point elements in a and +// b for not-greater-than, store the result in the lower element of dst, and +// copy the upper element from a to the upper element of dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpngt_sd +FORCE_INLINE __m128d _mm_cmpngt_sd(__m128d a, __m128d b) { - int8x8_t a = vreinterpret_s8_m64(_a); - int8x8_t b = vreinterpret_s8_m64(_b); - - // signed shift right: faster than vclt - // (b < 0) ? 0xFF : 0 - uint8x8_t ltMask = vreinterpret_u8_s8(vshr_n_s8(b, 7)); + return _mm_move_sd(a, _mm_cmpngt_pd(a, b)); +} - // (b == 0) ? 0xFF : 0 +// Compare packed double-precision (64-bit) floating-point elements in a and b +// for not-less-than-or-equal, and store the results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpnle_pd +FORCE_INLINE __m128d _mm_cmpnle_pd(__m128d a, __m128d b) +{ #if defined(__aarch64__) - int8x8_t zeroMask = vreinterpret_s8_u8(vceqz_s8(b)); + return vreinterpretq_m128d_u64(veorq_u64( + vcleq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)), + vdupq_n_u64(UINT64_MAX))); #else - int8x8_t zeroMask = vreinterpret_s8_u8(vceq_s8(b, vdup_n_s8(0))); + uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a)); + uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a)); + uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b)); + uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b)); + uint64_t d[2]; + d[0] = + !((*(double *) &a0) <= (*(double *) &b0)) ? ~UINT64_C(0) : UINT64_C(0); + d[1] = + !((*(double *) &a1) <= (*(double *) &b1)) ? ~UINT64_C(0) : UINT64_C(0); + + return vreinterpretq_m128d_u64(vld1q_u64(d)); #endif - - // bitwise select either a or nagative 'a' (vneg_s8(a) return nagative 'a') - // based on ltMask - int8x8_t masked = vbsl_s8(ltMask, vneg_s8(a), a); - // res = masked & (~zeroMask) - int8x8_t res = vbic_s8(masked, zeroMask); - - return vreinterpret_m64_s8(res); } -// Average packed unsigned 16-bit integers in a and b, and store the results in -// dst. -// -// FOR j := 0 to 3 -// i := j*16 -// dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 -// ENDFOR -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_avg_pu16 -FORCE_INLINE __m64 _mm_avg_pu16(__m64 a, __m64 b) +// Compare the lower double-precision (64-bit) floating-point elements in a and +// b for not-less-than-or-equal, store the result in the lower element of dst, +// and copy the upper element from a to the upper element of dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpnle_sd +FORCE_INLINE __m128d _mm_cmpnle_sd(__m128d a, __m128d b) { - return vreinterpret_m64_u16( - vrhadd_u16(vreinterpret_u16_m64(a), vreinterpret_u16_m64(b))); + return _mm_move_sd(a, _mm_cmpnle_pd(a, b)); } -// Average packed unsigned 8-bit integers in a and b, and store the results in -// dst. -// -// FOR j := 0 to 7 -// i := j*8 -// dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 -// ENDFOR -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_avg_pu8 -FORCE_INLINE __m64 _mm_avg_pu8(__m64 a, __m64 b) +// Compare packed double-precision (64-bit) floating-point elements in a and b +// for not-less-than, and store the results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpnlt_pd +FORCE_INLINE __m128d _mm_cmpnlt_pd(__m128d a, __m128d b) { - return vreinterpret_m64_u8( - vrhadd_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b))); +#if defined(__aarch64__) + return vreinterpretq_m128d_u64(veorq_u64( + vcltq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)), + vdupq_n_u64(UINT64_MAX))); +#else + uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a)); + uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a)); + uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b)); + uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b)); + uint64_t d[2]; + d[0] = + !((*(double *) &a0) < (*(double *) &b0)) ? ~UINT64_C(0) : UINT64_C(0); + d[1] = + !((*(double *) &a1) < (*(double *) &b1)) ? ~UINT64_C(0) : UINT64_C(0); + + return vreinterpretq_m128d_u64(vld1q_u64(d)); +#endif } -// Average packed unsigned 8-bit integers in a and b, and store the results in -// dst. -// -// FOR j := 0 to 7 -// i := j*8 -// dst[i+7:i] := (a[i+7:i] + b[i+7:i] + 1) >> 1 -// ENDFOR -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pavgb -#define _m_pavgb(a, b) _mm_avg_pu8(a, b) - -// Average packed unsigned 16-bit integers in a and b, and store the results in -// dst. -// -// FOR j := 0 to 3 -// i := j*16 -// dst[i+15:i] := (a[i+15:i] + b[i+15:i] + 1) >> 1 -// ENDFOR -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pavgw -#define _m_pavgw(a, b) _mm_avg_pu16(a, b) - -// Extract a 16-bit integer from a, selected with imm8, and store the result in -// the lower element of dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pextrw -#define _m_pextrw(a, imm) _mm_extract_pi16(a, imm) - -// Copy a to dst, and insert the 16-bit integer i into dst at the location -// specified by imm8. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=m_pinsrw -#define _m_pinsrw(a, i, imm) _mm_insert_pi16(a, i, imm) - -// Compare packed signed 16-bit integers in a and b, and store packed maximum -// values in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pmaxsw -#define _m_pmaxsw(a, b) _mm_max_pi16(a, b) - -// Compare packed unsigned 8-bit integers in a and b, and store packed maximum -// values in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pmaxub -#define _m_pmaxub(a, b) _mm_max_pu8(a, b) - -// Compare packed signed 16-bit integers in a and b, and store packed minimum -// values in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pminsw -#define _m_pminsw(a, b) _mm_min_pi16(a, b) - -// Compare packed unsigned 8-bit integers in a and b, and store packed minimum -// values in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pminub -#define _m_pminub(a, b) _mm_min_pu8(a, b) - -// Computes the average of the 16 unsigned 8-bit integers in a and the 16 -// unsigned 8-bit integers in b and rounds. -// -// r0 := (a0 + b0) / 2 -// r1 := (a1 + b1) / 2 -// ... -// r15 := (a15 + b15) / 2 -// -// https://msdn.microsoft.com/en-us/library/vstudio/8zwh554a(v%3dvs.90).aspx -FORCE_INLINE __m128i _mm_avg_epu8(__m128i a, __m128i b) +// Compare the lower double-precision (64-bit) floating-point elements in a and +// b for not-less-than, store the result in the lower element of dst, and copy +// the upper element from a to the upper element of dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpnlt_sd +FORCE_INLINE __m128d _mm_cmpnlt_sd(__m128d a, __m128d b) { - return vreinterpretq_m128i_u8( - vrhaddq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b))); + return _mm_move_sd(a, _mm_cmpnlt_pd(a, b)); } -// Computes the average of the 8 unsigned 16-bit integers in a and the 8 -// unsigned 16-bit integers in b and rounds. -// -// r0 := (a0 + b0) / 2 -// r1 := (a1 + b1) / 2 -// ... -// r7 := (a7 + b7) / 2 -// -// https://msdn.microsoft.com/en-us/library/vstudio/y13ca3c8(v=vs.90).aspx -FORCE_INLINE __m128i _mm_avg_epu16(__m128i a, __m128i b) +// Compare packed double-precision (64-bit) floating-point elements in a and b +// to see if neither is NaN, and store the results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpord_pd +FORCE_INLINE __m128d _mm_cmpord_pd(__m128d a, __m128d b) { - return (__m128i) vrhaddq_u16(vreinterpretq_u16_m128i(a), - vreinterpretq_u16_m128i(b)); +#if defined(__aarch64__) + // Excluding NaNs, any two floating point numbers can be compared. + uint64x2_t not_nan_a = + vceqq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(a)); + uint64x2_t not_nan_b = + vceqq_f64(vreinterpretq_f64_m128d(b), vreinterpretq_f64_m128d(b)); + return vreinterpretq_m128d_u64(vandq_u64(not_nan_a, not_nan_b)); +#else + uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a)); + uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a)); + uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b)); + uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b)); + uint64_t d[2]; + d[0] = ((*(double *) &a0) == (*(double *) &a0) && + (*(double *) &b0) == (*(double *) &b0)) + ? ~UINT64_C(0) + : UINT64_C(0); + d[1] = ((*(double *) &a1) == (*(double *) &a1) && + (*(double *) &b1) == (*(double *) &b1)) + ? ~UINT64_C(0) + : UINT64_C(0); + + return vreinterpretq_m128d_u64(vld1q_u64(d)); +#endif } -// Adds the four single-precision, floating-point values of a and b. -// -// r0 := a0 + b0 -// r1 := a1 + b1 -// r2 := a2 + b2 -// r3 := a3 + b3 -// -// https://msdn.microsoft.com/en-us/library/vstudio/c9848chc(v=vs.100).aspx -FORCE_INLINE __m128 _mm_add_ps(__m128 a, __m128 b) +// Compare the lower double-precision (64-bit) floating-point elements in a and +// b to see if neither is NaN, store the result in the lower element of dst, and +// copy the upper element from a to the upper element of dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpord_sd +FORCE_INLINE __m128d _mm_cmpord_sd(__m128d a, __m128d b) { - return vreinterpretq_m128_f32( - vaddq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); +#if defined(__aarch64__) + return _mm_move_sd(a, _mm_cmpord_pd(a, b)); +#else + uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a)); + uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b)); + uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a)); + uint64_t d[2]; + d[0] = ((*(double *) &a0) == (*(double *) &a0) && + (*(double *) &b0) == (*(double *) &b0)) + ? ~UINT64_C(0) + : UINT64_C(0); + d[1] = a1; + + return vreinterpretq_m128d_u64(vld1q_u64(d)); +#endif } -// Add packed double-precision (64-bit) floating-point elements in a and b, and -// store the results in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_add_pd -FORCE_INLINE __m128d _mm_add_pd(__m128d a, __m128d b) +// Compare packed double-precision (64-bit) floating-point elements in a and b +// to see if either is NaN, and store the results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpunord_pd +FORCE_INLINE __m128d _mm_cmpunord_pd(__m128d a, __m128d b) { #if defined(__aarch64__) - return vreinterpretq_m128d_f64( - vaddq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))); + // Two NaNs are not equal in comparison operation. + uint64x2_t not_nan_a = + vceqq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(a)); + uint64x2_t not_nan_b = + vceqq_f64(vreinterpretq_f64_m128d(b), vreinterpretq_f64_m128d(b)); + return vreinterpretq_m128d_s32( + vmvnq_s32(vreinterpretq_s32_u64(vandq_u64(not_nan_a, not_nan_b)))); #else - double *da = (double *) &a; - double *db = (double *) &b; - double c[2]; - c[0] = da[0] + db[0]; - c[1] = da[1] + db[1]; - return vld1q_f32((float32_t *) c); + uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a)); + uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a)); + uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b)); + uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b)); + uint64_t d[2]; + d[0] = ((*(double *) &a0) == (*(double *) &a0) && + (*(double *) &b0) == (*(double *) &b0)) + ? UINT64_C(0) + : ~UINT64_C(0); + d[1] = ((*(double *) &a1) == (*(double *) &a1) && + (*(double *) &b1) == (*(double *) &b1)) + ? UINT64_C(0) + : ~UINT64_C(0); + + return vreinterpretq_m128d_u64(vld1q_u64(d)); #endif } -// Add the lower double-precision (64-bit) floating-point element in a and b, -// store the result in the lower element of dst, and copy the upper element from -// a to the upper element of dst. -// -// dst[63:0] := a[63:0] + b[63:0] -// dst[127:64] := a[127:64] -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_add_sd -FORCE_INLINE __m128d _mm_add_sd(__m128d a, __m128d b) +// Compare the lower double-precision (64-bit) floating-point elements in a and +// b to see if either is NaN, store the result in the lower element of dst, and +// copy the upper element from a to the upper element of dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpunord_sd +FORCE_INLINE __m128d _mm_cmpunord_sd(__m128d a, __m128d b) { #if defined(__aarch64__) - return _mm_move_sd(a, _mm_add_pd(a, b)); + return _mm_move_sd(a, _mm_cmpunord_pd(a, b)); #else - double *da = (double *) &a; - double *db = (double *) &b; - double c[2]; - c[0] = da[0] + db[0]; - c[1] = da[1]; - return vld1q_f32((float32_t *) c); + uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a)); + uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b)); + uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a)); + uint64_t d[2]; + d[0] = ((*(double *) &a0) == (*(double *) &a0) && + (*(double *) &b0) == (*(double *) &b0)) + ? UINT64_C(0) + : ~UINT64_C(0); + d[1] = a1; + + return vreinterpretq_m128d_u64(vld1q_u64(d)); #endif } -// Add 64-bit integers a and b, and store the result in dst. -// -// dst[63:0] := a[63:0] + b[63:0] -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_add_si64 -FORCE_INLINE __m64 _mm_add_si64(__m64 a, __m64 b) +// Compare the lower double-precision (64-bit) floating-point element in a and b +// for greater-than-or-equal, and return the boolean result (0 or 1). +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comige_sd +FORCE_INLINE int _mm_comige_sd(__m128d a, __m128d b) { - return vreinterpret_m64_s64( - vadd_s64(vreinterpret_s64_m64(a), vreinterpret_s64_m64(b))); -} +#if defined(__aarch64__) + return vgetq_lane_u64(vcgeq_f64(a, b), 0) & 0x1; +#else + uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a)); + uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b)); -// adds the scalar single-precision floating point values of a and b. -// https://msdn.microsoft.com/en-us/library/be94x2y6(v=vs.100).aspx -FORCE_INLINE __m128 _mm_add_ss(__m128 a, __m128 b) -{ - float32_t b0 = vgetq_lane_f32(vreinterpretq_f32_m128(b), 0); - float32x4_t value = vsetq_lane_f32(b0, vdupq_n_f32(0), 0); - // the upper values in the result must be the remnants of <a>. - return vreinterpretq_m128_f32(vaddq_f32(a, value)); + return (*(double *) &a0 >= *(double *) &b0); +#endif } -// Adds the 4 signed or unsigned 64-bit integers in a to the 4 signed or -// unsigned 32-bit integers in b. -// https://msdn.microsoft.com/en-us/library/vstudio/09xs4fkk(v=vs.100).aspx -FORCE_INLINE __m128i _mm_add_epi64(__m128i a, __m128i b) +// Compare the lower double-precision (64-bit) floating-point element in a and b +// for greater-than, and return the boolean result (0 or 1). +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comigt_sd +FORCE_INLINE int _mm_comigt_sd(__m128d a, __m128d b) { - return vreinterpretq_m128i_s64( - vaddq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b))); +#if defined(__aarch64__) + return vgetq_lane_u64(vcgtq_f64(a, b), 0) & 0x1; +#else + uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a)); + uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b)); + + return (*(double *) &a0 > *(double *) &b0); +#endif } -// Adds the 4 signed or unsigned 32-bit integers in a to the 4 signed or -// unsigned 32-bit integers in b. -// -// r0 := a0 + b0 -// r1 := a1 + b1 -// r2 := a2 + b2 -// r3 := a3 + b3 -// -// https://msdn.microsoft.com/en-us/library/vstudio/09xs4fkk(v=vs.100).aspx -FORCE_INLINE __m128i _mm_add_epi32(__m128i a, __m128i b) +// Compare the lower double-precision (64-bit) floating-point element in a and b +// for less-than-or-equal, and return the boolean result (0 or 1). +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comile_sd +FORCE_INLINE int _mm_comile_sd(__m128d a, __m128d b) { - return vreinterpretq_m128i_s32( - vaddq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b))); +#if defined(__aarch64__) + return vgetq_lane_u64(vcleq_f64(a, b), 0) & 0x1; +#else + uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a)); + uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b)); + + return (*(double *) &a0 <= *(double *) &b0); +#endif } -// Adds the 8 signed or unsigned 16-bit integers in a to the 8 signed or -// unsigned 16-bit integers in b. -// https://msdn.microsoft.com/en-us/library/fceha5k4(v=vs.100).aspx -FORCE_INLINE __m128i _mm_add_epi16(__m128i a, __m128i b) +// Compare the lower double-precision (64-bit) floating-point element in a and b +// for less-than, and return the boolean result (0 or 1). +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comilt_sd +FORCE_INLINE int _mm_comilt_sd(__m128d a, __m128d b) { - return vreinterpretq_m128i_s16( - vaddq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b))); +#if defined(__aarch64__) + return vgetq_lane_u64(vcltq_f64(a, b), 0) & 0x1; +#else + uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a)); + uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b)); + + return (*(double *) &a0 < *(double *) &b0); +#endif } -// Adds the 16 signed or unsigned 8-bit integers in a to the 16 signed or -// unsigned 8-bit integers in b. -// https://technet.microsoft.com/en-us/subscriptions/yc7tcyzs(v=vs.90) -FORCE_INLINE __m128i _mm_add_epi8(__m128i a, __m128i b) +// Compare the lower double-precision (64-bit) floating-point element in a and b +// for equality, and return the boolean result (0 or 1). +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comieq_sd +FORCE_INLINE int _mm_comieq_sd(__m128d a, __m128d b) { - return vreinterpretq_m128i_s8( - vaddq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b))); +#if defined(__aarch64__) + return vgetq_lane_u64(vceqq_f64(a, b), 0) & 0x1; +#else + uint32x4_t a_not_nan = + vceqq_u32(vreinterpretq_u32_m128d(a), vreinterpretq_u32_m128d(a)); + uint32x4_t b_not_nan = + vceqq_u32(vreinterpretq_u32_m128d(b), vreinterpretq_u32_m128d(b)); + uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); + uint32x4_t a_eq_b = + vceqq_u32(vreinterpretq_u32_m128d(a), vreinterpretq_u32_m128d(b)); + uint64x2_t and_results = vandq_u64(vreinterpretq_u64_u32(a_and_b_not_nan), + vreinterpretq_u64_u32(a_eq_b)); + return vgetq_lane_u64(and_results, 0) & 0x1; +#endif } -// Adds the 8 signed 16-bit integers in a to the 8 signed 16-bit integers in b -// and saturates. -// -// r0 := SignedSaturate(a0 + b0) -// r1 := SignedSaturate(a1 + b1) -// ... -// r7 := SignedSaturate(a7 + b7) -// -// https://msdn.microsoft.com/en-us/library/1a306ef8(v=vs.100).aspx -FORCE_INLINE __m128i _mm_adds_epi16(__m128i a, __m128i b) +// Compare the lower double-precision (64-bit) floating-point element in a and b +// for not-equal, and return the boolean result (0 or 1). +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_comineq_sd +FORCE_INLINE int _mm_comineq_sd(__m128d a, __m128d b) { - return vreinterpretq_m128i_s16( - vqaddq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b))); + return !_mm_comieq_sd(a, b); } -// Add packed signed 8-bit integers in a and b using saturation, and store the -// results in dst. +// Convert packed signed 32-bit integers in a to packed double-precision +// (64-bit) floating-point elements, and store the results in dst. // -// FOR j := 0 to 15 -// i := j*8 -// dst[i+7:i] := Saturate8( a[i+7:i] + b[i+7:i] ) +// FOR j := 0 to 1 +// i := j*32 +// m := j*64 +// dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i]) // ENDFOR // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_adds_epi8 -FORCE_INLINE __m128i _mm_adds_epi8(__m128i a, __m128i b) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepi32_pd +FORCE_INLINE __m128d _mm_cvtepi32_pd(__m128i a) { - return vreinterpretq_m128i_s8( - vqaddq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b))); +#if defined(__aarch64__) + return vreinterpretq_m128d_f64( + vcvtq_f64_s64(vmovl_s32(vget_low_s32(vreinterpretq_s32_m128i(a))))); +#else + double a0 = (double) vgetq_lane_s32(vreinterpretq_s32_m128i(a), 0); + double a1 = (double) vgetq_lane_s32(vreinterpretq_s32_m128i(a), 1); + return _mm_set_pd(a1, a0); +#endif } -// Adds the 16 unsigned 8-bit integers in a to the 16 unsigned 8-bit integers in -// b and saturates.. -// https://msdn.microsoft.com/en-us/library/9hahyddy(v=vs.100).aspx -FORCE_INLINE __m128i _mm_adds_epu8(__m128i a, __m128i b) +// Converts the four signed 32-bit integer values of a to single-precision, +// floating-point values +// https://msdn.microsoft.com/en-us/library/vstudio/36bwxcx5(v=vs.100).aspx +FORCE_INLINE __m128 _mm_cvtepi32_ps(__m128i a) { - return vreinterpretq_m128i_u8( - vqaddq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b))); + return vreinterpretq_m128_f32(vcvtq_f32_s32(vreinterpretq_s32_m128i(a))); } -// Multiplies the 8 signed or unsigned 16-bit integers from a by the 8 signed or -// unsigned 16-bit integers from b. +// Convert packed double-precision (64-bit) floating-point elements in a to +// packed 32-bit integers, and store the results in dst. // -// r0 := (a0 * b0)[15:0] -// r1 := (a1 * b1)[15:0] -// ... -// r7 := (a7 * b7)[15:0] +// FOR j := 0 to 1 +// i := 32*j +// k := 64*j +// dst[i+31:i] := Convert_FP64_To_Int32(a[k+63:k]) +// ENDFOR // -// https://msdn.microsoft.com/en-us/library/vstudio/9ks1472s(v=vs.100).aspx -FORCE_INLINE __m128i _mm_mullo_epi16(__m128i a, __m128i b) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpd_epi32 +FORCE_INLINE __m128i _mm_cvtpd_epi32(__m128d a) { - return vreinterpretq_m128i_s16( - vmulq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b))); + __m128d rnd = _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION); + double d0 = ((double *) &rnd)[0]; + double d1 = ((double *) &rnd)[1]; + return _mm_set_epi32(0, 0, (int32_t) d1, (int32_t) d0); } -// Multiplies the 4 signed or unsigned 32-bit integers from a by the 4 signed or -// unsigned 32-bit integers from b. -// https://msdn.microsoft.com/en-us/library/vstudio/bb531409(v=vs.100).aspx -FORCE_INLINE __m128i _mm_mullo_epi32(__m128i a, __m128i b) +// Convert packed double-precision (64-bit) floating-point elements in a to +// packed 32-bit integers, and store the results in dst. +// +// FOR j := 0 to 1 +// i := 32*j +// k := 64*j +// dst[i+31:i] := Convert_FP64_To_Int32(a[k+63:k]) +// ENDFOR +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpd_pi32 +FORCE_INLINE __m64 _mm_cvtpd_pi32(__m128d a) { - return vreinterpretq_m128i_s32( - vmulq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b))); + __m128d rnd = _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION); + double d0 = ((double *) &rnd)[0]; + double d1 = ((double *) &rnd)[1]; + int32_t ALIGN_STRUCT(16) data[2] = {(int32_t) d0, (int32_t) d1}; + return vreinterpret_m64_s32(vld1_s32(data)); } -// Multiply the packed unsigned 16-bit integers in a and b, producing -// intermediate 32-bit integers, and store the high 16 bits of the intermediate -// integers in dst. +// Convert packed double-precision (64-bit) floating-point elements in a to +// packed single-precision (32-bit) floating-point elements, and store the +// results in dst. // -// FOR j := 0 to 3 -// i := j*16 -// tmp[31:0] := a[i+15:i] * b[i+15:i] -// dst[i+15:i] := tmp[31:16] +// FOR j := 0 to 1 +// i := 32*j +// k := 64*j +// dst[i+31:i] := Convert_FP64_To_FP32(a[k+64:k]) // ENDFOR +// dst[127:64] := 0 // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_pmulhuw -#define _m_pmulhuw(a, b) _mm_mulhi_pu16(a, b) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpd_ps +FORCE_INLINE __m128 _mm_cvtpd_ps(__m128d a) +{ +#if defined(__aarch64__) + float32x2_t tmp = vcvt_f32_f64(vreinterpretq_f64_m128d(a)); + return vreinterpretq_m128_f32(vcombine_f32(tmp, vdup_n_f32(0))); +#else + float a0 = (float) ((double *) &a)[0]; + float a1 = (float) ((double *) &a)[1]; + return _mm_set_ps(0, 0, a1, a0); +#endif +} -// Multiplies the four single-precision, floating-point values of a and b. +// Convert packed signed 32-bit integers in a to packed double-precision +// (64-bit) floating-point elements, and store the results in dst. // -// r0 := a0 * b0 -// r1 := a1 * b1 -// r2 := a2 * b2 -// r3 := a3 * b3 +// FOR j := 0 to 1 +// i := j*32 +// m := j*64 +// dst[m+63:m] := Convert_Int32_To_FP64(a[i+31:i]) +// ENDFOR // -// https://msdn.microsoft.com/en-us/library/vstudio/22kbk6t9(v=vs.100).aspx -FORCE_INLINE __m128 _mm_mul_ps(__m128 a, __m128 b) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpi32_pd +FORCE_INLINE __m128d _mm_cvtpi32_pd(__m64 a) { - return vreinterpretq_m128_f32( - vmulq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); +#if defined(__aarch64__) + return vreinterpretq_m128d_f64( + vcvtq_f64_s64(vmovl_s32(vreinterpret_s32_m64(a)))); +#else + double a0 = (double) vget_lane_s32(vreinterpret_s32_m64(a), 0); + double a1 = (double) vget_lane_s32(vreinterpret_s32_m64(a), 1); + return _mm_set_pd(a1, a0); +#endif } -// Multiply packed double-precision (64-bit) floating-point elements in a and b, -// and store the results in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_pd -FORCE_INLINE __m128d _mm_mul_pd(__m128d a, __m128d b) +// Converts the four single-precision, floating-point values of a to signed +// 32-bit integer values. +// +// r0 := (int) a0 +// r1 := (int) a1 +// r2 := (int) a2 +// r3 := (int) a3 +// +// https://msdn.microsoft.com/en-us/library/vstudio/xdc42k5e(v=vs.100).aspx +// *NOTE*. The default rounding mode on SSE is 'round to even', which ARMv7-A +// does not support! It is supported on ARMv8-A however. +FORCE_INLINE __m128i _mm_cvtps_epi32(__m128 a) { #if defined(__aarch64__) - return vreinterpretq_m128d_f64( - vmulq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))); + switch (_MM_GET_ROUNDING_MODE()) { + case _MM_ROUND_NEAREST: + return vreinterpretq_m128i_s32(vcvtnq_s32_f32(a)); + case _MM_ROUND_DOWN: + return vreinterpretq_m128i_s32(vcvtmq_s32_f32(a)); + case _MM_ROUND_UP: + return vreinterpretq_m128i_s32(vcvtpq_s32_f32(a)); + default: // _MM_ROUND_TOWARD_ZERO + return vreinterpretq_m128i_s32(vcvtq_s32_f32(a)); + } #else - double *da = (double *) &a; - double *db = (double *) &b; - double c[2]; - c[0] = da[0] * db[0]; - c[1] = da[1] * db[1]; - return vld1q_f32((float32_t *) c); + float *f = (float *) &a; + switch (_MM_GET_ROUNDING_MODE()) { + case _MM_ROUND_NEAREST: { + uint32x4_t signmask = vdupq_n_u32(0x80000000); + float32x4_t half = vbslq_f32(signmask, vreinterpretq_f32_m128(a), + vdupq_n_f32(0.5f)); /* +/- 0.5 */ + int32x4_t r_normal = vcvtq_s32_f32(vaddq_f32( + vreinterpretq_f32_m128(a), half)); /* round to integer: [a + 0.5]*/ + int32x4_t r_trunc = vcvtq_s32_f32( + vreinterpretq_f32_m128(a)); /* truncate to integer: [a] */ + int32x4_t plusone = vreinterpretq_s32_u32(vshrq_n_u32( + vreinterpretq_u32_s32(vnegq_s32(r_trunc)), 31)); /* 1 or 0 */ + int32x4_t r_even = vbicq_s32(vaddq_s32(r_trunc, plusone), + vdupq_n_s32(1)); /* ([a] + {0,1}) & ~1 */ + float32x4_t delta = vsubq_f32( + vreinterpretq_f32_m128(a), + vcvtq_f32_s32(r_trunc)); /* compute delta: delta = (a - [a]) */ + uint32x4_t is_delta_half = + vceqq_f32(delta, half); /* delta == +/- 0.5 */ + return vreinterpretq_m128i_s32( + vbslq_s32(is_delta_half, r_even, r_normal)); + } + case _MM_ROUND_DOWN: + return _mm_set_epi32(floorf(f[3]), floorf(f[2]), floorf(f[1]), + floorf(f[0])); + case _MM_ROUND_UP: + return _mm_set_epi32(ceilf(f[3]), ceilf(f[2]), ceilf(f[1]), + ceilf(f[0])); + default: // _MM_ROUND_TOWARD_ZERO + return _mm_set_epi32((int32_t) f[3], (int32_t) f[2], (int32_t) f[1], + (int32_t) f[0]); + } #endif } -// Multiply the lower double-precision (64-bit) floating-point element in a and -// b, store the result in the lower element of dst, and copy the upper element -// from a to the upper element of dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mul_sd -FORCE_INLINE __m128d _mm_mul_sd(__m128d a, __m128d b) +// Convert packed single-precision (32-bit) floating-point elements in a to +// packed double-precision (64-bit) floating-point elements, and store the +// results in dst. +// +// FOR j := 0 to 1 +// i := 64*j +// k := 32*j +// dst[i+63:i] := Convert_FP32_To_FP64(a[k+31:k]) +// ENDFOR +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtps_pd +FORCE_INLINE __m128d _mm_cvtps_pd(__m128 a) { - return _mm_move_sd(a, _mm_mul_pd(a, b)); +#if defined(__aarch64__) + return vreinterpretq_m128d_f64( + vcvt_f64_f32(vget_low_f32(vreinterpretq_f32_m128(a)))); +#else + double a0 = (double) vgetq_lane_f32(vreinterpretq_f32_m128(a), 0); + double a1 = (double) vgetq_lane_f32(vreinterpretq_f32_m128(a), 1); + return _mm_set_pd(a1, a0); +#endif } -// Multiply the lower single-precision (32-bit) floating-point element in a and -// b, store the result in the lower element of dst, and copy the upper 3 packed -// elements from a to the upper elements of dst. +// Copy the lower double-precision (64-bit) floating-point element of a to dst. // -// dst[31:0] := a[31:0] * b[31:0] -// dst[127:32] := a[127:32] +// dst[63:0] := a[63:0] // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_ss -FORCE_INLINE __m128 _mm_mul_ss(__m128 a, __m128 b) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsd_f64 +FORCE_INLINE double _mm_cvtsd_f64(__m128d a) { - return _mm_move_ss(a, _mm_mul_ps(a, b)); +#if defined(__aarch64__) + return (double) vgetq_lane_f64(vreinterpretq_f64_m128d(a), 0); +#else + return ((double *) &a)[0]; +#endif } -// Multiply the low unsigned 32-bit integers from each packed 64-bit element in -// a and b, and store the unsigned 64-bit results in dst. +// Convert the lower double-precision (64-bit) floating-point element in a to a +// 32-bit integer, and store the result in dst. // -// r0 := (a0 & 0xFFFFFFFF) * (b0 & 0xFFFFFFFF) -// r1 := (a2 & 0xFFFFFFFF) * (b2 & 0xFFFFFFFF) -FORCE_INLINE __m128i _mm_mul_epu32(__m128i a, __m128i b) +// dst[31:0] := Convert_FP64_To_Int32(a[63:0]) +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsd_si32 +FORCE_INLINE int32_t _mm_cvtsd_si32(__m128d a) { - // vmull_u32 upcasts instead of masking, so we downcast. - uint32x2_t a_lo = vmovn_u64(vreinterpretq_u64_m128i(a)); - uint32x2_t b_lo = vmovn_u64(vreinterpretq_u64_m128i(b)); - return vreinterpretq_m128i_u64(vmull_u32(a_lo, b_lo)); +#if defined(__aarch64__) + return (int32_t) vgetq_lane_f64(vrndiq_f64(vreinterpretq_f64_m128d(a)), 0); +#else + __m128d rnd = _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION); + double ret = ((double *) &rnd)[0]; + return (int32_t) ret; +#endif } -// Multiply the low unsigned 32-bit integers from a and b, and store the -// unsigned 64-bit result in dst. +// Convert the lower double-precision (64-bit) floating-point element in a to a +// 64-bit integer, and store the result in dst. // -// dst[63:0] := a[31:0] * b[31:0] +// dst[63:0] := Convert_FP64_To_Int64(a[63:0]) // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_su32 -FORCE_INLINE __m64 _mm_mul_su32(__m64 a, __m64 b) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsd_si64 +FORCE_INLINE int64_t _mm_cvtsd_si64(__m128d a) { - return vreinterpret_m64_u64(vget_low_u64( - vmull_u32(vreinterpret_u32_m64(a), vreinterpret_u32_m64(b)))); +#if defined(__aarch64__) + return (int64_t) vgetq_lane_f64(vrndiq_f64(vreinterpretq_f64_m128d(a)), 0); +#else + __m128d rnd = _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION); + double ret = ((double *) &rnd)[0]; + return (int64_t) ret; +#endif } -// Multiply the low signed 32-bit integers from each packed 64-bit element in -// a and b, and store the signed 64-bit results in dst. +// Convert the lower double-precision (64-bit) floating-point element in a to a +// 64-bit integer, and store the result in dst. // -// r0 := (int64_t)(int32_t)a0 * (int64_t)(int32_t)b0 -// r1 := (int64_t)(int32_t)a2 * (int64_t)(int32_t)b2 -FORCE_INLINE __m128i _mm_mul_epi32(__m128i a, __m128i b) +// dst[63:0] := Convert_FP64_To_Int64(a[63:0]) +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsd_si64x +#define _mm_cvtsd_si64x _mm_cvtsd_si64 + +// Convert the lower double-precision (64-bit) floating-point element in b to a +// single-precision (32-bit) floating-point element, store the result in the +// lower element of dst, and copy the upper 3 packed elements from a to the +// upper elements of dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsd_ss +FORCE_INLINE __m128 _mm_cvtsd_ss(__m128 a, __m128d b) { - // vmull_s32 upcasts instead of masking, so we downcast. - int32x2_t a_lo = vmovn_s64(vreinterpretq_s64_m128i(a)); - int32x2_t b_lo = vmovn_s64(vreinterpretq_s64_m128i(b)); - return vreinterpretq_m128i_s64(vmull_s32(a_lo, b_lo)); +#if defined(__aarch64__) + return vreinterpretq_m128_f32(vsetq_lane_f32( + vget_lane_f32(vcvt_f32_f64(vreinterpretq_f64_m128d(b)), 0), + vreinterpretq_f32_m128(a), 0)); +#else + return vreinterpretq_m128_f32(vsetq_lane_f32((float) ((double *) &b)[0], + vreinterpretq_f32_m128(a), 0)); +#endif } -// Multiplies the 8 signed 16-bit integers from a by the 8 signed 16-bit -// integers from b. +// Copy the lower 32-bit integer in a to dst. // -// r0 := (a0 * b0) + (a1 * b1) -// r1 := (a2 * b2) + (a3 * b3) -// r2 := (a4 * b4) + (a5 * b5) -// r3 := (a6 * b6) + (a7 * b7) -// https://msdn.microsoft.com/en-us/library/yht36sa6(v=vs.90).aspx -FORCE_INLINE __m128i _mm_madd_epi16(__m128i a, __m128i b) +// dst[31:0] := a[31:0] +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi128_si32 +FORCE_INLINE int _mm_cvtsi128_si32(__m128i a) { - int32x4_t low = vmull_s16(vget_low_s16(vreinterpretq_s16_m128i(a)), - vget_low_s16(vreinterpretq_s16_m128i(b))); - int32x4_t high = vmull_s16(vget_high_s16(vreinterpretq_s16_m128i(a)), - vget_high_s16(vreinterpretq_s16_m128i(b))); - - int32x2_t low_sum = vpadd_s32(vget_low_s32(low), vget_high_s32(low)); - int32x2_t high_sum = vpadd_s32(vget_low_s32(high), vget_high_s32(high)); - - return vreinterpretq_m128i_s32(vcombine_s32(low_sum, high_sum)); + return vgetq_lane_s32(vreinterpretq_s32_m128i(a), 0); } -// Multiply packed signed 16-bit integers in a and b, producing intermediate -// signed 32-bit integers. Shift right by 15 bits while rounding up, and store -// the packed 16-bit integers in dst. +// Copy the lower 64-bit integer in a to dst. // -// r0 := Round(((int32_t)a0 * (int32_t)b0) >> 15) -// r1 := Round(((int32_t)a1 * (int32_t)b1) >> 15) -// r2 := Round(((int32_t)a2 * (int32_t)b2) >> 15) -// ... -// r7 := Round(((int32_t)a7 * (int32_t)b7) >> 15) -FORCE_INLINE __m128i _mm_mulhrs_epi16(__m128i a, __m128i b) +// dst[63:0] := a[63:0] +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi128_si64 +FORCE_INLINE int64_t _mm_cvtsi128_si64(__m128i a) { - // Has issues due to saturation - // return vreinterpretq_m128i_s16(vqrdmulhq_s16(a, b)); - - // Multiply - int32x4_t mul_lo = vmull_s16(vget_low_s16(vreinterpretq_s16_m128i(a)), - vget_low_s16(vreinterpretq_s16_m128i(b))); - int32x4_t mul_hi = vmull_s16(vget_high_s16(vreinterpretq_s16_m128i(a)), - vget_high_s16(vreinterpretq_s16_m128i(b))); - - // Rounding narrowing shift right - // narrow = (int16_t)((mul + 16384) >> 15); - int16x4_t narrow_lo = vrshrn_n_s32(mul_lo, 15); - int16x4_t narrow_hi = vrshrn_n_s32(mul_hi, 15); - - // Join together - return vreinterpretq_m128i_s16(vcombine_s16(narrow_lo, narrow_hi)); + return vgetq_lane_s64(vreinterpretq_s64_m128i(a), 0); } -// Vertically multiply each unsigned 8-bit integer from a with the corresponding -// signed 8-bit integer from b, producing intermediate signed 16-bit integers. -// Horizontally add adjacent pairs of intermediate signed 16-bit integers, -// and pack the saturated results in dst. -// -// FOR j := 0 to 7 -// i := j*16 -// dst[i+15:i] := Saturate_To_Int16( a[i+15:i+8]*b[i+15:i+8] + -// a[i+7:i]*b[i+7:i] ) -// ENDFOR -FORCE_INLINE __m128i _mm_maddubs_epi16(__m128i _a, __m128i _b) +// Copy the lower 64-bit integer in a to dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi128_si64x +#define _mm_cvtsi128_si64x(a) _mm_cvtsi128_si64(a) + +// Convert the signed 32-bit integer b to a double-precision (64-bit) +// floating-point element, store the result in the lower element of dst, and +// copy the upper element from a to the upper element of dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi32_sd +FORCE_INLINE __m128d _mm_cvtsi32_sd(__m128d a, int32_t b) { #if defined(__aarch64__) - uint8x16_t a = vreinterpretq_u8_m128i(_a); - int8x16_t b = vreinterpretq_s8_m128i(_b); - int16x8_t tl = vmulq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(a))), - vmovl_s8(vget_low_s8(b))); - int16x8_t th = vmulq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(a))), - vmovl_s8(vget_high_s8(b))); - return vreinterpretq_m128i_s16( - vqaddq_s16(vuzp1q_s16(tl, th), vuzp2q_s16(tl, th))); + return vreinterpretq_m128d_f64( + vsetq_lane_f64((double) b, vreinterpretq_f64_m128d(a), 0)); #else - // This would be much simpler if x86 would choose to zero extend OR sign - // extend, not both. This could probably be optimized better. - uint16x8_t a = vreinterpretq_u16_m128i(_a); - int16x8_t b = vreinterpretq_s16_m128i(_b); - - // Zero extend a - int16x8_t a_odd = vreinterpretq_s16_u16(vshrq_n_u16(a, 8)); - int16x8_t a_even = vreinterpretq_s16_u16(vbicq_u16(a, vdupq_n_u16(0xff00))); - - // Sign extend by shifting left then shifting right. - int16x8_t b_even = vshrq_n_s16(vshlq_n_s16(b, 8), 8); - int16x8_t b_odd = vshrq_n_s16(b, 8); - - // multiply - int16x8_t prod1 = vmulq_s16(a_even, b_even); - int16x8_t prod2 = vmulq_s16(a_odd, b_odd); - - // saturated add - return vreinterpretq_m128i_s16(vqaddq_s16(prod1, prod2)); + double bf = (double) b; + return vreinterpretq_m128d_s64( + vsetq_lane_s64(*(int64_t *) &bf, vreinterpretq_s64_m128d(a), 0)); #endif } -// Computes the fused multiple add product of 32-bit floating point numbers. +// Copy the lower 64-bit integer in a to dst. // -// Return Value -// Multiplies A and B, and adds C to the temporary result before returning it. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_fmadd -FORCE_INLINE __m128 _mm_fmadd_ps(__m128 a, __m128 b, __m128 c) +// dst[63:0] := a[63:0] +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi128_si64x +#define _mm_cvtsi128_si64x(a) _mm_cvtsi128_si64(a) + +// Moves 32-bit integer a to the least significant 32 bits of an __m128 object, +// zero extending the upper bits. +// +// r0 := a +// r1 := 0x0 +// r2 := 0x0 +// r3 := 0x0 +// +// https://msdn.microsoft.com/en-us/library/ct3539ha%28v=vs.90%29.aspx +FORCE_INLINE __m128i _mm_cvtsi32_si128(int a) +{ + return vreinterpretq_m128i_s32(vsetq_lane_s32(a, vdupq_n_s32(0), 0)); +} + +// Convert the signed 64-bit integer b to a double-precision (64-bit) +// floating-point element, store the result in the lower element of dst, and +// copy the upper element from a to the upper element of dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi64_sd +FORCE_INLINE __m128d _mm_cvtsi64_sd(__m128d a, int64_t b) { #if defined(__aarch64__) - return vreinterpretq_m128_f32(vfmaq_f32(vreinterpretq_f32_m128(c), - vreinterpretq_f32_m128(b), - vreinterpretq_f32_m128(a))); + return vreinterpretq_m128d_f64( + vsetq_lane_f64((double) b, vreinterpretq_f64_m128d(a), 0)); #else - return _mm_add_ps(_mm_mul_ps(a, b), c); + double bf = (double) b; + return vreinterpretq_m128d_s64( + vsetq_lane_s64(*(int64_t *) &bf, vreinterpretq_s64_m128d(a), 0)); #endif } -// Alternatively add and subtract packed single-precision (32-bit) -// floating-point elements in a to/from packed elements in b, and store the -// results in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=addsub_ps -FORCE_INLINE __m128 _mm_addsub_ps(__m128 a, __m128 b) +// Moves 64-bit integer a to the least significant 64 bits of an __m128 object, +// zero extending the upper bits. +// +// r0 := a +// r1 := 0x0 +FORCE_INLINE __m128i _mm_cvtsi64_si128(int64_t a) { - __m128 mask = {-1.0f, 1.0f, -1.0f, 1.0f}; - return _mm_fmadd_ps(b, mask, a); + return vreinterpretq_m128i_s64(vsetq_lane_s64(a, vdupq_n_s64(0), 0)); } -// Horizontally add adjacent pairs of double-precision (64-bit) floating-point -// elements in a and b, and pack the results in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hadd_pd -FORCE_INLINE __m128d _mm_hadd_pd(__m128d a, __m128d b) +// Copy 64-bit integer a to the lower element of dst, and zero the upper +// element. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi64x_si128 +#define _mm_cvtsi64x_si128(a) _mm_cvtsi64_si128(a) + +// Convert the signed 64-bit integer b to a double-precision (64-bit) +// floating-point element, store the result in the lower element of dst, and +// copy the upper element from a to the upper element of dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi64x_sd +#define _mm_cvtsi64x_sd(a, b) _mm_cvtsi64_sd(a, b) + +// Convert the lower single-precision (32-bit) floating-point element in b to a +// double-precision (64-bit) floating-point element, store the result in the +// lower element of dst, and copy the upper element from a to the upper element +// of dst. +// +// dst[63:0] := Convert_FP32_To_FP64(b[31:0]) +// dst[127:64] := a[127:64] +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtss_sd +FORCE_INLINE __m128d _mm_cvtss_sd(__m128d a, __m128 b) { + double d = (double) vgetq_lane_f32(vreinterpretq_f32_m128(b), 0); #if defined(__aarch64__) return vreinterpretq_m128d_f64( - vpaddq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))); + vsetq_lane_f64(d, vreinterpretq_f64_m128d(a), 0)); #else - double *da = (double *) &a; - double *db = (double *) &b; - double c[] = {da[0] + da[1], db[0] + db[1]}; - return vreinterpretq_m128d_u64(vld1q_u64((uint64_t *) c)); + return vreinterpretq_m128d_s64( + vsetq_lane_s64(*(int64_t *) &d, vreinterpretq_s64_m128d(a), 0)); #endif } -// Compute the absolute differences of packed unsigned 8-bit integers in a and -// b, then horizontally sum each consecutive 8 differences to produce two -// unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low -// 16 bits of 64-bit elements in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sad_epu8 -FORCE_INLINE __m128i _mm_sad_epu8(__m128i a, __m128i b) +// Convert packed double-precision (64-bit) floating-point elements in a to +// packed 32-bit integers with truncation, and store the results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttpd_epi32 +FORCE_INLINE __m128i _mm_cvttpd_epi32(__m128d a) { - uint16x8_t t = vpaddlq_u8(vabdq_u8((uint8x16_t) a, (uint8x16_t) b)); - uint16_t r0 = t[0] + t[1] + t[2] + t[3]; - uint16_t r4 = t[4] + t[5] + t[6] + t[7]; - uint16x8_t r = vsetq_lane_u16(r0, vdupq_n_u16(0), 0); - return (__m128i) vsetq_lane_u16(r4, r, 4); + double a0 = ((double *) &a)[0]; + double a1 = ((double *) &a)[1]; + return _mm_set_epi32(0, 0, (int32_t) a1, (int32_t) a0); } -// Compute the absolute differences of packed unsigned 8-bit integers in a and -// b, then horizontally sum each consecutive 8 differences to produce four -// unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low -// 16 bits of dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sad_pu8 -FORCE_INLINE __m64 _mm_sad_pu8(__m64 a, __m64 b) +// Convert packed double-precision (64-bit) floating-point elements in a to +// packed 32-bit integers with truncation, and store the results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttpd_pi32 +FORCE_INLINE __m64 _mm_cvttpd_pi32(__m128d a) { - uint16x4_t t = - vpaddl_u8(vabd_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b))); - uint16_t r0 = t[0] + t[1] + t[2] + t[3]; - return vreinterpret_m64_u16(vset_lane_u16(r0, vdup_n_u16(0), 0)); + double a0 = ((double *) &a)[0]; + double a1 = ((double *) &a)[1]; + int32_t ALIGN_STRUCT(16) data[2] = {(int32_t) a0, (int32_t) a1}; + return vreinterpret_m64_s32(vld1_s32(data)); } -// Compute the absolute differences of packed unsigned 8-bit integers in a and -// b, then horizontally sum each consecutive 8 differences to produce four -// unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low -// 16 bits of dst. +// Converts the four single-precision, floating-point values of a to signed +// 32-bit integer values using truncate. +// https://msdn.microsoft.com/en-us/library/vstudio/1h005y6x(v=vs.100).aspx +FORCE_INLINE __m128i _mm_cvttps_epi32(__m128 a) +{ + return vreinterpretq_m128i_s32(vcvtq_s32_f32(vreinterpretq_f32_m128(a))); +} + +// Convert the lower double-precision (64-bit) floating-point element in a to a +// 32-bit integer with truncation, and store the result in dst. // -// FOR j := 0 to 7 -// i := j*8 -// tmp[i+7:i] := ABS(a[i+7:i] - b[i+7:i]) -// ENDFOR -// dst[15:0] := tmp[7:0] + tmp[15:8] + tmp[23:16] + tmp[31:24] + tmp[39:32] + -// tmp[47:40] + tmp[55:48] + tmp[63:56] dst[63:16] := 0 +// dst[63:0] := Convert_FP64_To_Int32_Truncate(a[63:0]) // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_m_psadbw -#define _m_psadbw(a, b) _mm_sad_pu8(a, b) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttsd_si32 +FORCE_INLINE int32_t _mm_cvttsd_si32(__m128d a) +{ + double ret = *((double *) &a); + return (int32_t) ret; +} -// Divides the four single-precision, floating-point values of a and b. +// Convert the lower double-precision (64-bit) floating-point element in a to a +// 64-bit integer with truncation, and store the result in dst. // -// r0 := a0 / b0 -// r1 := a1 / b1 -// r2 := a2 / b2 -// r3 := a3 / b3 +// dst[63:0] := Convert_FP64_To_Int64_Truncate(a[63:0]) // -// https://msdn.microsoft.com/en-us/library/edaw8147(v=vs.100).aspx -FORCE_INLINE __m128 _mm_div_ps(__m128 a, __m128 b) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttsd_si64 +FORCE_INLINE int64_t _mm_cvttsd_si64(__m128d a) { -#if defined(__aarch64__) && !SSE2NEON_PRECISE_DIV - return vreinterpretq_m128_f32( - vdivq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); +#if defined(__aarch64__) + return vgetq_lane_s64(vcvtq_s64_f64(vreinterpretq_f64_m128d(a)), 0); #else - float32x4_t recip = vrecpeq_f32(vreinterpretq_f32_m128(b)); - recip = vmulq_f32(recip, vrecpsq_f32(recip, vreinterpretq_f32_m128(b))); -#if SSE2NEON_PRECISE_DIV - // Additional Netwon-Raphson iteration for accuracy - recip = vmulq_f32(recip, vrecpsq_f32(recip, vreinterpretq_f32_m128(b))); -#endif - return vreinterpretq_m128_f32(vmulq_f32(vreinterpretq_f32_m128(a), recip)); + double ret = *((double *) &a); + return (int64_t) ret; #endif } -// Divides the scalar single-precision floating point value of a by b. -// https://msdn.microsoft.com/en-us/library/4y73xa49(v=vs.100).aspx -FORCE_INLINE __m128 _mm_div_ss(__m128 a, __m128 b) -{ - float32_t value = - vgetq_lane_f32(vreinterpretq_f32_m128(_mm_div_ps(a, b)), 0); - return vreinterpretq_m128_f32( - vsetq_lane_f32(value, vreinterpretq_f32_m128(a), 0)); -} +// Convert the lower double-precision (64-bit) floating-point element in a to a +// 64-bit integer with truncation, and store the result in dst. +// +// dst[63:0] := Convert_FP64_To_Int64_Truncate(a[63:0]) +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttsd_si64x +#define _mm_cvttsd_si64x(a) _mm_cvttsd_si64(a) // Divide packed double-precision (64-bit) floating-point elements in a by // packed elements in b, and store the results in dst. @@ -3875,266 +4340,230 @@ FORCE_INLINE __m128d _mm_div_sd(__m128d a, __m128d b) #endif } -// Compute the approximate reciprocal of packed single-precision (32-bit) -// floating-point elements in a, and store the results in dst. The maximum -// relative error for this approximation is less than 1.5*2^-12. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rcp_ps -FORCE_INLINE __m128 _mm_rcp_ps(__m128 in) +// Extracts the selected signed or unsigned 16-bit integer from a and zero +// extends. +// https://msdn.microsoft.com/en-us/library/6dceta0c(v=vs.100).aspx +// FORCE_INLINE int _mm_extract_epi16(__m128i a, __constrange(0,8) int imm) +#define _mm_extract_epi16(a, imm) \ + vgetq_lane_u16(vreinterpretq_u16_m128i(a), (imm)) + +// Inserts the least significant 16 bits of b into the selected 16-bit integer +// of a. +// https://msdn.microsoft.com/en-us/library/kaze8hz1%28v=vs.100%29.aspx +// FORCE_INLINE __m128i _mm_insert_epi16(__m128i a, int b, +// __constrange(0,8) int imm) +#define _mm_insert_epi16(a, b, imm) \ + __extension__({ \ + vreinterpretq_m128i_s16( \ + vsetq_lane_s16((b), vreinterpretq_s16_m128i(a), (imm))); \ + }) + +// Loads two double-precision from 16-byte aligned memory, floating-point +// values. +// +// dst[127:0] := MEM[mem_addr+127:mem_addr] +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load_pd +FORCE_INLINE __m128d _mm_load_pd(const double *p) { - float32x4_t recip = vrecpeq_f32(vreinterpretq_f32_m128(in)); - recip = vmulq_f32(recip, vrecpsq_f32(recip, vreinterpretq_f32_m128(in))); -#if SSE2NEON_PRECISE_DIV - // Additional Netwon-Raphson iteration for accuracy - recip = vmulq_f32(recip, vrecpsq_f32(recip, vreinterpretq_f32_m128(in))); +#if defined(__aarch64__) + return vreinterpretq_m128d_f64(vld1q_f64(p)); +#else + const float *fp = (const float *) p; + float ALIGN_STRUCT(16) data[4] = {fp[0], fp[1], fp[2], fp[3]}; + return vreinterpretq_m128d_f32(vld1q_f32(data)); #endif - return vreinterpretq_m128_f32(recip); } -// Compute the approximate reciprocal of the lower single-precision (32-bit) -// floating-point element in a, store the result in the lower element of dst, -// and copy the upper 3 packed elements from a to the upper elements of dst. The -// maximum relative error for this approximation is less than 1.5*2^-12. +// Load a double-precision (64-bit) floating-point element from memory into both +// elements of dst. // -// dst[31:0] := (1.0 / a[31:0]) -// dst[127:32] := a[127:32] +// dst[63:0] := MEM[mem_addr+63:mem_addr] +// dst[127:64] := MEM[mem_addr+63:mem_addr] // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rcp_ss -FORCE_INLINE __m128 _mm_rcp_ss(__m128 a) -{ - return _mm_move_ss(a, _mm_rcp_ps(a)); -} +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load_pd1 +#define _mm_load_pd1 _mm_load1_pd -// Computes the approximations of square roots of the four single-precision, -// floating-point values of a. First computes reciprocal square roots and then -// reciprocals of the four values. +// Load a double-precision (64-bit) floating-point element from memory into the +// lower of dst, and zero the upper element. mem_addr does not need to be +// aligned on any particular boundary. // -// r0 := sqrt(a0) -// r1 := sqrt(a1) -// r2 := sqrt(a2) -// r3 := sqrt(a3) +// dst[63:0] := MEM[mem_addr+63:mem_addr] +// dst[127:64] := 0 // -// https://msdn.microsoft.com/en-us/library/vstudio/8z67bwwk(v=vs.100).aspx -FORCE_INLINE __m128 _mm_sqrt_ps(__m128 in) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load_sd +FORCE_INLINE __m128d _mm_load_sd(const double *p) { -#if SSE2NEON_PRECISE_SQRT - float32x4_t recip = vrsqrteq_f32(vreinterpretq_f32_m128(in)); - - // Test for vrsqrteq_f32(0) -> positive infinity case. - // Change to zero, so that s * 1/sqrt(s) result is zero too. - const uint32x4_t pos_inf = vdupq_n_u32(0x7F800000); - const uint32x4_t div_by_zero = - vceqq_u32(pos_inf, vreinterpretq_u32_f32(recip)); - recip = vreinterpretq_f32_u32( - vandq_u32(vmvnq_u32(div_by_zero), vreinterpretq_u32_f32(recip))); - - // Additional Netwon-Raphson iteration for accuracy - recip = vmulq_f32( - vrsqrtsq_f32(vmulq_f32(recip, recip), vreinterpretq_f32_m128(in)), - recip); - recip = vmulq_f32( - vrsqrtsq_f32(vmulq_f32(recip, recip), vreinterpretq_f32_m128(in)), - recip); - - // sqrt(s) = s * 1/sqrt(s) - return vreinterpretq_m128_f32(vmulq_f32(vreinterpretq_f32_m128(in), recip)); -#elif defined(__aarch64__) - return vreinterpretq_m128_f32(vsqrtq_f32(vreinterpretq_f32_m128(in))); +#if defined(__aarch64__) + return vreinterpretq_m128d_f64(vsetq_lane_f64(*p, vdupq_n_f64(0), 0)); #else - float32x4_t recipsq = vrsqrteq_f32(vreinterpretq_f32_m128(in)); - float32x4_t sq = vrecpeq_f32(recipsq); - return vreinterpretq_m128_f32(sq); + const float *fp = (const float *) p; + float ALIGN_STRUCT(16) data[4] = {fp[0], fp[1], 0, 0}; + return vreinterpretq_m128d_f32(vld1q_f32(data)); #endif } -// Computes the approximation of the square root of the scalar single-precision -// floating point value of in. -// https://msdn.microsoft.com/en-us/library/ahfsc22d(v=vs.100).aspx -FORCE_INLINE __m128 _mm_sqrt_ss(__m128 in) +// Loads 128-bit value. : +// https://msdn.microsoft.com/en-us/library/atzzad1h(v=vs.80).aspx +FORCE_INLINE __m128i _mm_load_si128(const __m128i *p) { - float32_t value = - vgetq_lane_f32(vreinterpretq_f32_m128(_mm_sqrt_ps(in)), 0); - return vreinterpretq_m128_f32( - vsetq_lane_f32(value, vreinterpretq_f32_m128(in), 0)); + return vreinterpretq_m128i_s32(vld1q_s32((const int32_t *) p)); } -// Computes the approximations of the reciprocal square roots of the four -// single-precision floating point values of in. -// https://msdn.microsoft.com/en-us/library/22hfsh53(v=vs.100).aspx -FORCE_INLINE __m128 _mm_rsqrt_ps(__m128 in) +// Load a double-precision (64-bit) floating-point element from memory into both +// elements of dst. +// +// dst[63:0] := MEM[mem_addr+63:mem_addr] +// dst[127:64] := MEM[mem_addr+63:mem_addr] +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load1_pd +FORCE_INLINE __m128d _mm_load1_pd(const double *p) { - float32x4_t out = vrsqrteq_f32(vreinterpretq_f32_m128(in)); -#if SSE2NEON_PRECISE_RSQRT - // Additional Netwon-Raphson iteration for accuracy - out = vmulq_f32( - out, vrsqrtsq_f32(vmulq_f32(vreinterpretq_f32_m128(in), out), out)); - out = vmulq_f32( - out, vrsqrtsq_f32(vmulq_f32(vreinterpretq_f32_m128(in), out), out)); +#if defined(__aarch64__) + return vreinterpretq_m128d_f64(vld1q_dup_f64(p)); +#else + return vreinterpretq_m128d_s64(vdupq_n_s64(*(const int64_t *) p)); #endif - return vreinterpretq_m128_f32(out); } -// Compute the approximate reciprocal square root of the lower single-precision -// (32-bit) floating-point element in a, store the result in the lower element -// of dst, and copy the upper 3 packed elements from a to the upper elements of -// dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rsqrt_ss -FORCE_INLINE __m128 _mm_rsqrt_ss(__m128 in) +// Load a double-precision (64-bit) floating-point element from memory into the +// upper element of dst, and copy the lower element from a to dst. mem_addr does +// not need to be aligned on any particular boundary. +// +// dst[63:0] := a[63:0] +// dst[127:64] := MEM[mem_addr+63:mem_addr] +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadh_pd +FORCE_INLINE __m128d _mm_loadh_pd(__m128d a, const double *p) { - return vsetq_lane_f32(vgetq_lane_f32(_mm_rsqrt_ps(in), 0), in, 0); +#if defined(__aarch64__) + return vreinterpretq_m128d_f64( + vcombine_f64(vget_low_f64(vreinterpretq_f64_m128d(a)), vld1_f64(p))); +#else + return vreinterpretq_m128d_f32(vcombine_f32( + vget_low_f32(vreinterpretq_f32_m128d(a)), vld1_f32((const float *) p))); +#endif } -// Compare packed signed 16-bit integers in a and b, and store packed maximum -// values in dst. -// -// FOR j := 0 to 3 -// i := j*16 -// dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) -// ENDFOR -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_pi16 -FORCE_INLINE __m64 _mm_max_pi16(__m64 a, __m64 b) +// Load 64-bit integer from memory into the first element of dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadl_epi64 +FORCE_INLINE __m128i _mm_loadl_epi64(__m128i const *p) { - return vreinterpret_m64_s16( - vmax_s16(vreinterpret_s16_m64(a), vreinterpret_s16_m64(b))); + /* Load the lower 64 bits of the value pointed to by p into the + * lower 64 bits of the result, zeroing the upper 64 bits of the result. + */ + return vreinterpretq_m128i_s32( + vcombine_s32(vld1_s32((int32_t const *) p), vcreate_s32(0))); } -// Compare packed signed 16-bit integers in a and b, and store packed maximum -// values in dst. +// Load a double-precision (64-bit) floating-point element from memory into the +// lower element of dst, and copy the upper element from a to dst. mem_addr does +// not need to be aligned on any particular boundary. // -// FOR j := 0 to 3 -// i := j*16 -// dst[i+15:i] := MAX(a[i+15:i], b[i+15:i]) -// ENDFOR +// dst[63:0] := MEM[mem_addr+63:mem_addr] +// dst[127:64] := a[127:64] // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_pi16 -#define _m_pmaxsw(a, b) _mm_max_pi16(a, b) - -// Computes the maximums of the four single-precision, floating-point values of -// a and b. -// https://msdn.microsoft.com/en-us/library/vstudio/ff5d607a(v=vs.100).aspx -FORCE_INLINE __m128 _mm_max_ps(__m128 a, __m128 b) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadl_pd +FORCE_INLINE __m128d _mm_loadl_pd(__m128d a, const double *p) { -#if SSE2NEON_PRECISE_MINMAX - float32x4_t _a = vreinterpretq_f32_m128(a); - float32x4_t _b = vreinterpretq_f32_m128(b); - return vbslq_f32(vcltq_f32(_b, _a), _a, _b); +#if defined(__aarch64__) + return vreinterpretq_m128d_f64( + vcombine_f64(vld1_f64(p), vget_high_f64(vreinterpretq_f64_m128d(a)))); #else - return vreinterpretq_m128_f32( - vmaxq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); + return vreinterpretq_m128d_f32( + vcombine_f32(vld1_f32((const float *) p), + vget_high_f32(vreinterpretq_f32_m128d(a)))); #endif } -// Compare packed unsigned 8-bit integers in a and b, and store packed maximum -// values in dst. +// Load 2 double-precision (64-bit) floating-point elements from memory into dst +// in reverse order. mem_addr must be aligned on a 16-byte boundary or a +// general-protection exception may be generated. // -// FOR j := 0 to 7 -// i := j*8 -// dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) -// ENDFOR +// dst[63:0] := MEM[mem_addr+127:mem_addr+64] +// dst[127:64] := MEM[mem_addr+63:mem_addr] // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_pu8 -FORCE_INLINE __m64 _mm_max_pu8(__m64 a, __m64 b) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadr_pd +FORCE_INLINE __m128d _mm_loadr_pd(const double *p) { - return vreinterpret_m64_u8( - vmax_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b))); +#if defined(__aarch64__) + float64x2_t v = vld1q_f64(p); + return vreinterpretq_m128d_f64(vextq_f64(v, v, 1)); +#else + int64x2_t v = vld1q_s64((const int64_t *) p); + return vreinterpretq_m128d_s64(vextq_s64(v, v, 1)); +#endif } -// Compare packed unsigned 8-bit integers in a and b, and store packed maximum -// values in dst. -// -// FOR j := 0 to 7 -// i := j*8 -// dst[i+7:i] := MAX(a[i+7:i], b[i+7:i]) -// ENDFOR -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_pu8 -#define _m_pmaxub(a, b) _mm_max_pu8(a, b) - -// Compare packed signed 16-bit integers in a and b, and store packed minimum -// values in dst. -// -// FOR j := 0 to 3 -// i := j*16 -// dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) -// ENDFOR -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_pi16 -FORCE_INLINE __m64 _mm_min_pi16(__m64 a, __m64 b) +// Loads two double-precision from unaligned memory, floating-point values. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadu_pd +FORCE_INLINE __m128d _mm_loadu_pd(const double *p) { - return vreinterpret_m64_s16( - vmin_s16(vreinterpret_s16_m64(a), vreinterpret_s16_m64(b))); + return _mm_load_pd(p); } -// Compare packed signed 16-bit integers in a and b, and store packed minimum -// values in dst. -// -// FOR j := 0 to 3 -// i := j*16 -// dst[i+15:i] := MIN(a[i+15:i], b[i+15:i]) -// ENDFOR -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_pi16 -#define _m_pminsw(a, b) _mm_min_pi16(a, b) - -// Computes the minima of the four single-precision, floating-point values of a -// and b. -// https://msdn.microsoft.com/en-us/library/vstudio/wh13kadz(v=vs.100).aspx -FORCE_INLINE __m128 _mm_min_ps(__m128 a, __m128 b) +// Loads 128-bit value. : +// https://msdn.microsoft.com/zh-cn/library/f4k12ae8(v=vs.90).aspx +FORCE_INLINE __m128i _mm_loadu_si128(const __m128i *p) { -#if SSE2NEON_PRECISE_MINMAX - float32x4_t _a = vreinterpretq_f32_m128(a); - float32x4_t _b = vreinterpretq_f32_m128(b); - return vbslq_f32(vcltq_f32(_a, _b), _a, _b); -#else - return vreinterpretq_m128_f32( - vminq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); -#endif + return vreinterpretq_m128i_s32(vld1q_s32((const int32_t *) p)); } -// Compare packed unsigned 8-bit integers in a and b, and store packed minimum -// values in dst. +// Load unaligned 32-bit integer from memory into the first element of dst. // -// FOR j := 0 to 7 -// i := j*8 -// dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) -// ENDFOR +// dst[31:0] := MEM[mem_addr+31:mem_addr] +// dst[MAX:32] := 0 // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_pu8 -FORCE_INLINE __m64 _mm_min_pu8(__m64 a, __m64 b) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadu_si32 +FORCE_INLINE __m128i _mm_loadu_si32(const void *p) { - return vreinterpret_m64_u8( - vmin_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b))); + return vreinterpretq_m128i_s32( + vsetq_lane_s32(*(const int32_t *) p, vdupq_n_s32(0), 0)); } -// Compare packed unsigned 8-bit integers in a and b, and store packed minimum -// values in dst. -// -// FOR j := 0 to 7 -// i := j*8 -// dst[i+7:i] := MIN(a[i+7:i], b[i+7:i]) -// ENDFOR +// Multiplies the 8 signed 16-bit integers from a by the 8 signed 16-bit +// integers from b. // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_pu8 -#define _m_pminub(a, b) _mm_min_pu8(a, b) +// r0 := (a0 * b0) + (a1 * b1) +// r1 := (a2 * b2) + (a3 * b3) +// r2 := (a4 * b4) + (a5 * b5) +// r3 := (a6 * b6) + (a7 * b7) +// https://msdn.microsoft.com/en-us/library/yht36sa6(v=vs.90).aspx +FORCE_INLINE __m128i _mm_madd_epi16(__m128i a, __m128i b) +{ + int32x4_t low = vmull_s16(vget_low_s16(vreinterpretq_s16_m128i(a)), + vget_low_s16(vreinterpretq_s16_m128i(b))); + int32x4_t high = vmull_s16(vget_high_s16(vreinterpretq_s16_m128i(a)), + vget_high_s16(vreinterpretq_s16_m128i(b))); -// Computes the maximum of the two lower scalar single-precision floating point -// values of a and b. -// https://msdn.microsoft.com/en-us/library/s6db5esz(v=vs.100).aspx -FORCE_INLINE __m128 _mm_max_ss(__m128 a, __m128 b) + int32x2_t low_sum = vpadd_s32(vget_low_s32(low), vget_high_s32(low)); + int32x2_t high_sum = vpadd_s32(vget_low_s32(high), vget_high_s32(high)); + + return vreinterpretq_m128i_s32(vcombine_s32(low_sum, high_sum)); +} + +// Conditionally store 8-bit integer elements from a into memory using mask +// (elements are not stored when the highest bit is not set in the corresponding +// element) and a non-temporal memory hint. mem_addr does not need to be aligned +// on any particular boundary. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskmoveu_si128 +FORCE_INLINE void _mm_maskmoveu_si128(__m128i a, __m128i mask, char *mem_addr) { - float32_t value = vgetq_lane_f32(_mm_max_ps(a, b), 0); - return vreinterpretq_m128_f32( - vsetq_lane_f32(value, vreinterpretq_f32_m128(a), 0)); + int8x16_t shr_mask = vshrq_n_s8(vreinterpretq_s8_m128i(mask), 7); + __m128 b = _mm_load_ps((const float *) mem_addr); + int8x16_t masked = + vbslq_s8(vreinterpretq_u8_s8(shr_mask), vreinterpretq_s8_m128i(a), + vreinterpretq_s8_m128(b)); + vst1q_s8((int8_t *) mem_addr, masked); } -// Computes the minimum of the two lower scalar single-precision floating point -// values of a and b. -// https://msdn.microsoft.com/en-us/library/0a9y7xaa(v=vs.100).aspx -FORCE_INLINE __m128 _mm_min_ss(__m128 a, __m128 b) +// Computes the pairwise maxima of the 8 signed 16-bit integers from a and the 8 +// signed 16-bit integers from b. +// https://msdn.microsoft.com/en-us/LIBRary/3x060h7c(v=vs.100).aspx +FORCE_INLINE __m128i _mm_max_epi16(__m128i a, __m128i b) { - float32_t value = vgetq_lane_f32(_mm_min_ps(a, b), 0); - return vreinterpretq_m128_f32( - vsetq_lane_f32(value, vreinterpretq_f32_m128(a), 0)); + return vreinterpretq_m128i_s16( + vmaxq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b))); } // Computes the pairwise maxima of the 16 unsigned 8-bit integers from a and the @@ -4146,13 +4575,41 @@ FORCE_INLINE __m128i _mm_max_epu8(__m128i a, __m128i b) vmaxq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b))); } -// Computes the pairwise minima of the 16 unsigned 8-bit integers from a and the -// 16 unsigned 8-bit integers from b. -// https://msdn.microsoft.com/ko-kr/library/17k8cf58(v=vs.100).aspxx -FORCE_INLINE __m128i _mm_min_epu8(__m128i a, __m128i b) +// Compare packed double-precision (64-bit) floating-point elements in a and b, +// and store packed maximum values in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_pd +FORCE_INLINE __m128d _mm_max_pd(__m128d a, __m128d b) { - return vreinterpretq_m128i_u8( - vminq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b))); +#if defined(__aarch64__) + return vreinterpretq_m128d_f64( + vmaxq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))); +#else + uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a)); + uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a)); + uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b)); + uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b)); + uint64_t d[2]; + d[0] = (*(double *) &a0) > (*(double *) &b0) ? a0 : b0; + d[1] = (*(double *) &a1) > (*(double *) &b1) ? a1 : b1; + + return vreinterpretq_m128d_u64(vld1q_u64(d)); +#endif +} + +// Compare the lower double-precision (64-bit) floating-point elements in a and +// b, store the maximum value in the lower element of dst, and copy the upper +// element from a to the upper element of dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_sd +FORCE_INLINE __m128d _mm_max_sd(__m128d a, __m128d b) +{ +#if defined(__aarch64__) + return _mm_move_sd(a, _mm_max_pd(a, b)); +#else + double *da = (double *) &a; + double *db = (double *) &b; + double c[2] = {fmax(da[0], db[0]), da[1]}; + return vld1q_f32((float32_t *) c); +#endif } // Computes the pairwise minima of the 8 signed 16-bit integers from a and the 8 @@ -4164,110 +4621,246 @@ FORCE_INLINE __m128i _mm_min_epi16(__m128i a, __m128i b) vminq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b))); } -// Compare packed signed 8-bit integers in a and b, and store packed maximum -// values in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_epi8 -FORCE_INLINE __m128i _mm_max_epi8(__m128i a, __m128i b) +// Computes the pairwise minima of the 16 unsigned 8-bit integers from a and the +// 16 unsigned 8-bit integers from b. +// https://msdn.microsoft.com/ko-kr/library/17k8cf58(v=vs.100).aspxx +FORCE_INLINE __m128i _mm_min_epu8(__m128i a, __m128i b) { - return vreinterpretq_m128i_s8( - vmaxq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b))); + return vreinterpretq_m128i_u8( + vminq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b))); } -// Compare packed unsigned 16-bit integers in a and b, and store packed maximum -// values in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_epu16 -FORCE_INLINE __m128i _mm_max_epu16(__m128i a, __m128i b) +// Compare packed double-precision (64-bit) floating-point elements in a and b, +// and store packed minimum values in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_pd +FORCE_INLINE __m128d _mm_min_pd(__m128d a, __m128d b) { - return vreinterpretq_m128i_u16( - vmaxq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b))); +#if defined(__aarch64__) + return vreinterpretq_m128d_f64( + vminq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))); +#else + uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a)); + uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a)); + uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b)); + uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b)); + uint64_t d[2]; + d[0] = (*(double *) &a0) < (*(double *) &b0) ? a0 : b0; + d[1] = (*(double *) &a1) < (*(double *) &b1) ? a1 : b1; + return vreinterpretq_m128d_u64(vld1q_u64(d)); +#endif } -// Compare packed signed 8-bit integers in a and b, and store packed minimum -// values in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_epi8 -FORCE_INLINE __m128i _mm_min_epi8(__m128i a, __m128i b) +// Compare the lower double-precision (64-bit) floating-point elements in a and +// b, store the minimum value in the lower element of dst, and copy the upper +// element from a to the upper element of dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_sd +FORCE_INLINE __m128d _mm_min_sd(__m128d a, __m128d b) { - return vreinterpretq_m128i_s8( - vminq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b))); +#if defined(__aarch64__) + return _mm_move_sd(a, _mm_min_pd(a, b)); +#else + double *da = (double *) &a; + double *db = (double *) &b; + double c[2] = {fmin(da[0], db[0]), da[1]}; + return vld1q_f32((float32_t *) c); +#endif } -// Compare packed unsigned 16-bit integers in a and b, and store packed minimum -// values in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_epu16 -FORCE_INLINE __m128i _mm_min_epu16(__m128i a, __m128i b) +// Copy the lower 64-bit integer in a to the lower element of dst, and zero the +// upper element. +// +// dst[63:0] := a[63:0] +// dst[127:64] := 0 +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_move_epi64 +FORCE_INLINE __m128i _mm_move_epi64(__m128i a) { - return vreinterpretq_m128i_u16( - vminq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b))); + return vreinterpretq_m128i_s64( + vsetq_lane_s64(0, vreinterpretq_s64_m128i(a), 1)); } -// Computes the pairwise maxima of the 8 signed 16-bit integers from a and the 8 -// signed 16-bit integers from b. -// https://msdn.microsoft.com/en-us/LIBRary/3x060h7c(v=vs.100).aspx -FORCE_INLINE __m128i _mm_max_epi16(__m128i a, __m128i b) +// Move the lower double-precision (64-bit) floating-point element from b to the +// lower element of dst, and copy the upper element from a to the upper element +// of dst. +// +// dst[63:0] := b[63:0] +// dst[127:64] := a[127:64] +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_move_sd +FORCE_INLINE __m128d _mm_move_sd(__m128d a, __m128d b) { - return vreinterpretq_m128i_s16( - vmaxq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b))); + return vreinterpretq_m128d_f32( + vcombine_f32(vget_low_f32(vreinterpretq_f32_m128d(b)), + vget_high_f32(vreinterpretq_f32_m128d(a)))); } -// epi versions of min/max -// Computes the pariwise maximums of the four signed 32-bit integer values of a -// and b. +// NEON does not provide a version of this function. +// Creates a 16-bit mask from the most significant bits of the 16 signed or +// unsigned 8-bit integers in a and zero extends the upper bits. +// https://msdn.microsoft.com/en-us/library/vstudio/s090c8fk(v=vs.100).aspx +FORCE_INLINE int _mm_movemask_epi8(__m128i a) +{ + // Use increasingly wide shifts+adds to collect the sign bits + // together. + // Since the widening shifts would be rather confusing to follow in little + // endian, everything will be illustrated in big endian order instead. This + // has a different result - the bits would actually be reversed on a big + // endian machine. + + // Starting input (only half the elements are shown): + // 89 ff 1d c0 00 10 99 33 + uint8x16_t input = vreinterpretq_u8_m128i(a); + + // Shift out everything but the sign bits with an unsigned shift right. + // + // Bytes of the vector:: + // 89 ff 1d c0 00 10 99 33 + // \ \ \ \ \ \ \ \ high_bits = (uint16x4_t)(input >> 7) + // | | | | | | | | + // 01 01 00 01 00 00 01 00 + // + // Bits of first important lane(s): + // 10001001 (89) + // \______ + // | + // 00000001 (01) + uint16x8_t high_bits = vreinterpretq_u16_u8(vshrq_n_u8(input, 7)); + + // Merge the even lanes together with a 16-bit unsigned shift right + add. + // 'xx' represents garbage data which will be ignored in the final result. + // In the important bytes, the add functions like a binary OR. + // + // 01 01 00 01 00 00 01 00 + // \_ | \_ | \_ | \_ | paired16 = (uint32x4_t)(input + (input >> 7)) + // \| \| \| \| + // xx 03 xx 01 xx 00 xx 02 + // + // 00000001 00000001 (01 01) + // \_______ | + // \| + // xxxxxxxx xxxxxx11 (xx 03) + uint32x4_t paired16 = + vreinterpretq_u32_u16(vsraq_n_u16(high_bits, high_bits, 7)); + + // Repeat with a wider 32-bit shift + add. + // xx 03 xx 01 xx 00 xx 02 + // \____ | \____ | paired32 = (uint64x1_t)(paired16 + (paired16 >> + // 14)) + // \| \| + // xx xx xx 0d xx xx xx 02 + // + // 00000011 00000001 (03 01) + // \\_____ || + // '----.\|| + // xxxxxxxx xxxx1101 (xx 0d) + uint64x2_t paired32 = + vreinterpretq_u64_u32(vsraq_n_u32(paired16, paired16, 14)); + + // Last, an even wider 64-bit shift + add to get our result in the low 8 bit + // lanes. xx xx xx 0d xx xx xx 02 + // \_________ | paired64 = (uint8x8_t)(paired32 + (paired32 >> + // 28)) + // \| + // xx xx xx xx xx xx xx d2 + // + // 00001101 00000010 (0d 02) + // \ \___ | | + // '---. \| | + // xxxxxxxx 11010010 (xx d2) + uint8x16_t paired64 = + vreinterpretq_u8_u64(vsraq_n_u64(paired32, paired32, 28)); + + // Extract the low 8 bits from each 64-bit lane with 2 8-bit extracts. + // xx xx xx xx xx xx xx d2 + // || return paired64[0] + // d2 + // Note: Little endian would return the correct value 4b (01001011) instead. + return vgetq_lane_u8(paired64, 0) | ((int) vgetq_lane_u8(paired64, 8) << 8); +} + +// Set each bit of mask dst based on the most significant bit of the +// corresponding packed double-precision (64-bit) floating-point element in a. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_movemask_pd +FORCE_INLINE int _mm_movemask_pd(__m128d a) +{ + uint64x2_t input = vreinterpretq_u64_m128d(a); + uint64x2_t high_bits = vshrq_n_u64(input, 63); + return vgetq_lane_u64(high_bits, 0) | (vgetq_lane_u64(high_bits, 1) << 1); +} + +// Copy the lower 64-bit integer in a to dst. // -// A 128-bit parameter that can be defined with the following equations: -// r0 := (a0 > b0) ? a0 : b0 -// r1 := (a1 > b1) ? a1 : b1 -// r2 := (a2 > b2) ? a2 : b2 -// r3 := (a3 > b3) ? a3 : b3 +// dst[63:0] := a[63:0] // -// https://msdn.microsoft.com/en-us/library/vstudio/bb514055(v=vs.100).aspx -FORCE_INLINE __m128i _mm_max_epi32(__m128i a, __m128i b) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_movepi64_pi64 +FORCE_INLINE __m64 _mm_movepi64_pi64(__m128i a) { - return vreinterpretq_m128i_s32( - vmaxq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b))); + return vreinterpret_m64_s64(vget_low_s64(vreinterpretq_s64_m128i(a))); } -// Computes the pariwise minima of the four signed 32-bit integer values of a -// and b. +// Copy the 64-bit integer a to the lower element of dst, and zero the upper +// element. // -// A 128-bit parameter that can be defined with the following equations: -// r0 := (a0 < b0) ? a0 : b0 -// r1 := (a1 < b1) ? a1 : b1 -// r2 := (a2 < b2) ? a2 : b2 -// r3 := (a3 < b3) ? a3 : b3 +// dst[63:0] := a[63:0] +// dst[127:64] := 0 // -// https://msdn.microsoft.com/en-us/library/vstudio/bb531476(v=vs.100).aspx -FORCE_INLINE __m128i _mm_min_epi32(__m128i a, __m128i b) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_movpi64_epi64 +FORCE_INLINE __m128i _mm_movpi64_epi64(__m64 a) { - return vreinterpretq_m128i_s32( - vminq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b))); + return vreinterpretq_m128i_s64( + vcombine_s64(vreinterpret_s64_m64(a), vdup_n_s64(0))); } -// Compare packed unsigned 32-bit integers in a and b, and store packed maximum -// values in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_epu32 -FORCE_INLINE __m128i _mm_max_epu32(__m128i a, __m128i b) +// Multiply the low unsigned 32-bit integers from each packed 64-bit element in +// a and b, and store the unsigned 64-bit results in dst. +// +// r0 := (a0 & 0xFFFFFFFF) * (b0 & 0xFFFFFFFF) +// r1 := (a2 & 0xFFFFFFFF) * (b2 & 0xFFFFFFFF) +FORCE_INLINE __m128i _mm_mul_epu32(__m128i a, __m128i b) { - return vreinterpretq_m128i_u32( - vmaxq_u32(vreinterpretq_u32_m128i(a), vreinterpretq_u32_m128i(b))); + // vmull_u32 upcasts instead of masking, so we downcast. + uint32x2_t a_lo = vmovn_u64(vreinterpretq_u64_m128i(a)); + uint32x2_t b_lo = vmovn_u64(vreinterpretq_u64_m128i(b)); + return vreinterpretq_m128i_u64(vmull_u32(a_lo, b_lo)); } -// Compare packed unsigned 32-bit integers in a and b, and store packed minimum -// values in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_epu32 -FORCE_INLINE __m128i _mm_min_epu32(__m128i a, __m128i b) +// Multiply packed double-precision (64-bit) floating-point elements in a and b, +// and store the results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_pd +FORCE_INLINE __m128d _mm_mul_pd(__m128d a, __m128d b) { - return vreinterpretq_m128i_u32( - vminq_u32(vreinterpretq_u32_m128i(a), vreinterpretq_u32_m128i(b))); +#if defined(__aarch64__) + return vreinterpretq_m128d_f64( + vmulq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))); +#else + double *da = (double *) &a; + double *db = (double *) &b; + double c[2]; + c[0] = da[0] * db[0]; + c[1] = da[1] * db[1]; + return vld1q_f32((float32_t *) c); +#endif } -// Multiply the packed unsigned 16-bit integers in a and b, producing -// intermediate 32-bit integers, and store the high 16 bits of the intermediate -// integers in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mulhi_pu16 -FORCE_INLINE __m64 _mm_mulhi_pu16(__m64 a, __m64 b) +// Multiply the lower double-precision (64-bit) floating-point element in a and +// b, store the result in the lower element of dst, and copy the upper element +// from a to the upper element of dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_mul_sd +FORCE_INLINE __m128d _mm_mul_sd(__m128d a, __m128d b) { - return vreinterpret_m64_u16(vshrn_n_u32( - vmull_u16(vreinterpret_u16_m64(a), vreinterpret_u16_m64(b)), 16)); + return _mm_move_sd(a, _mm_mul_pd(a, b)); +} + +// Multiply the low unsigned 32-bit integers from a and b, and store the +// unsigned 64-bit result in dst. +// +// dst[63:0] := a[31:0] * b[31:0] +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mul_su32 +FORCE_INLINE __m64 _mm_mul_su32(__m64 a, __m64 b) +{ + return vreinterpret_m64_u64(vget_low_u64( + vmull_u32(vreinterpret_u32_m64(a), vreinterpret_u32_m64(b)))); } // Multiplies the 8 signed 16-bit integers from a by the 8 signed 16-bit @@ -4321,1341 +4914,2413 @@ FORCE_INLINE __m128i _mm_mulhi_epu16(__m128i a, __m128i b) #endif } -// Computes pairwise add of each argument as single-precision, floating-point -// values a and b. -// https://msdn.microsoft.com/en-us/library/yd9wecaa.aspx -FORCE_INLINE __m128 _mm_hadd_ps(__m128 a, __m128 b) +// Multiplies the 8 signed or unsigned 16-bit integers from a by the 8 signed or +// unsigned 16-bit integers from b. +// +// r0 := (a0 * b0)[15:0] +// r1 := (a1 * b1)[15:0] +// ... +// r7 := (a7 * b7)[15:0] +// +// https://msdn.microsoft.com/en-us/library/vstudio/9ks1472s(v=vs.100).aspx +FORCE_INLINE __m128i _mm_mullo_epi16(__m128i a, __m128i b) { -#if defined(__aarch64__) - return vreinterpretq_m128_f32( - vpaddq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); -#else - float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a)); - float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a)); - float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b)); - float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b)); - return vreinterpretq_m128_f32( - vcombine_f32(vpadd_f32(a10, a32), vpadd_f32(b10, b32))); -#endif + return vreinterpretq_m128i_s16( + vmulq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b))); } -// Computes pairwise add of each argument as a 16-bit signed or unsigned integer -// values a and b. -FORCE_INLINE __m128i _mm_hadd_epi16(__m128i _a, __m128i _b) +// Compute the bitwise OR of packed double-precision (64-bit) floating-point +// elements in a and b, and store the results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_or_pd +FORCE_INLINE __m128d _mm_or_pd(__m128d a, __m128d b) { - int16x8_t a = vreinterpretq_s16_m128i(_a); - int16x8_t b = vreinterpretq_s16_m128i(_b); -#if defined(__aarch64__) - return vreinterpretq_m128i_s16(vpaddq_s16(a, b)); -#else - return vreinterpretq_m128i_s16( - vcombine_s16(vpadd_s16(vget_low_s16(a), vget_high_s16(a)), - vpadd_s16(vget_low_s16(b), vget_high_s16(b)))); -#endif + return vreinterpretq_m128d_s64( + vorrq_s64(vreinterpretq_s64_m128d(a), vreinterpretq_s64_m128d(b))); } -// Horizontally substract adjacent pairs of single-precision (32-bit) -// floating-point elements in a and b, and pack the results in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hsub_ps -FORCE_INLINE __m128 _mm_hsub_ps(__m128 _a, __m128 _b) +// Computes the bitwise OR of the 128-bit value in a and the 128-bit value in b. +// +// r := a | b +// +// https://msdn.microsoft.com/en-us/library/vstudio/ew8ty0db(v=vs.100).aspx +FORCE_INLINE __m128i _mm_or_si128(__m128i a, __m128i b) { -#if defined(__aarch64__) - return vreinterpretq_m128_f32(vsubq_f32( - vuzp1q_f32(vreinterpretq_f32_m128(_a), vreinterpretq_f32_m128(_b)), - vuzp2q_f32(vreinterpretq_f32_m128(_a), vreinterpretq_f32_m128(_b)))); -#else - float32x4x2_t c = - vuzpq_f32(vreinterpretq_f32_m128(_a), vreinterpretq_f32_m128(_b)); - return vreinterpretq_m128_f32(vsubq_f32(c.val[0], c.val[1])); -#endif + return vreinterpretq_m128i_s32( + vorrq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b))); } -// Horizontally add adjacent pairs of 16-bit integers in a and b, and pack the -// signed 16-bit results in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hadd_pi16 -FORCE_INLINE __m64 _mm_hadd_pi16(__m64 a, __m64 b) +// Packs the 16 signed 16-bit integers from a and b into 8-bit integers and +// saturates. +// https://msdn.microsoft.com/en-us/library/k4y4f7w5%28v=vs.90%29.aspx +FORCE_INLINE __m128i _mm_packs_epi16(__m128i a, __m128i b) { - return vreinterpret_m64_s16( - vpadd_s16(vreinterpret_s16_m64(a), vreinterpret_s16_m64(b))); + return vreinterpretq_m128i_s8( + vcombine_s8(vqmovn_s16(vreinterpretq_s16_m128i(a)), + vqmovn_s16(vreinterpretq_s16_m128i(b)))); } -// Horizontally add adjacent pairs of 32-bit integers in a and b, and pack the -// signed 32-bit results in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hadd_pi32 -FORCE_INLINE __m64 _mm_hadd_pi32(__m64 a, __m64 b) +// Packs the 8 signed 32-bit integers from a and b into signed 16-bit integers +// and saturates. +// +// r0 := SignedSaturate(a0) +// r1 := SignedSaturate(a1) +// r2 := SignedSaturate(a2) +// r3 := SignedSaturate(a3) +// r4 := SignedSaturate(b0) +// r5 := SignedSaturate(b1) +// r6 := SignedSaturate(b2) +// r7 := SignedSaturate(b3) +// +// https://msdn.microsoft.com/en-us/library/393t56f9%28v=vs.90%29.aspx +FORCE_INLINE __m128i _mm_packs_epi32(__m128i a, __m128i b) { - return vreinterpret_m64_s32( - vpadd_s32(vreinterpret_s32_m64(a), vreinterpret_s32_m64(b))); + return vreinterpretq_m128i_s16( + vcombine_s16(vqmovn_s32(vreinterpretq_s32_m128i(a)), + vqmovn_s32(vreinterpretq_s32_m128i(b)))); } -// Computes pairwise difference of each argument as a 16-bit signed or unsigned -// integer values a and b. -FORCE_INLINE __m128i _mm_hsub_epi16(__m128i _a, __m128i _b) +// Packs the 16 signed 16 - bit integers from a and b into 8 - bit unsigned +// integers and saturates. +// +// r0 := UnsignedSaturate(a0) +// r1 := UnsignedSaturate(a1) +// ... +// r7 := UnsignedSaturate(a7) +// r8 := UnsignedSaturate(b0) +// r9 := UnsignedSaturate(b1) +// ... +// r15 := UnsignedSaturate(b7) +// +// https://msdn.microsoft.com/en-us/library/07ad1wx4(v=vs.100).aspx +FORCE_INLINE __m128i _mm_packus_epi16(const __m128i a, const __m128i b) { - int32x4_t a = vreinterpretq_s32_m128i(_a); - int32x4_t b = vreinterpretq_s32_m128i(_b); - // Interleave using vshrn/vmovn - // [a0|a2|a4|a6|b0|b2|b4|b6] - // [a1|a3|a5|a7|b1|b3|b5|b7] - int16x8_t ab0246 = vcombine_s16(vmovn_s32(a), vmovn_s32(b)); - int16x8_t ab1357 = vcombine_s16(vshrn_n_s32(a, 16), vshrn_n_s32(b, 16)); - // Subtract - return vreinterpretq_m128i_s16(vsubq_s16(ab0246, ab1357)); + return vreinterpretq_m128i_u8( + vcombine_u8(vqmovun_s16(vreinterpretq_s16_m128i(a)), + vqmovun_s16(vreinterpretq_s16_m128i(b)))); } -// Computes saturated pairwise sub of each argument as a 16-bit signed -// integer values a and b. -FORCE_INLINE __m128i _mm_hadds_epi16(__m128i _a, __m128i _b) +// Pause the processor. This is typically used in spin-wait loops and depending +// on the x86 processor typical values are in the 40-100 cycle range. The +// 'yield' instruction isn't a good fit beacuse it's effectively a nop on most +// Arm cores. Experience with several databases has shown has shown an 'isb' is +// a reasonable approximation. +FORCE_INLINE void _mm_pause() { -#if defined(__aarch64__) - int16x8_t a = vreinterpretq_s16_m128i(_a); - int16x8_t b = vreinterpretq_s16_m128i(_b); - return vreinterpretq_s64_s16( - vqaddq_s16(vuzp1q_s16(a, b), vuzp2q_s16(a, b))); -#else - int32x4_t a = vreinterpretq_s32_m128i(_a); - int32x4_t b = vreinterpretq_s32_m128i(_b); - // Interleave using vshrn/vmovn - // [a0|a2|a4|a6|b0|b2|b4|b6] - // [a1|a3|a5|a7|b1|b3|b5|b7] - int16x8_t ab0246 = vcombine_s16(vmovn_s32(a), vmovn_s32(b)); - int16x8_t ab1357 = vcombine_s16(vshrn_n_s32(a, 16), vshrn_n_s32(b, 16)); - // Saturated add - return vreinterpretq_m128i_s16(vqaddq_s16(ab0246, ab1357)); -#endif + __asm__ __volatile__("isb\n"); } -// Computes saturated pairwise difference of each argument as a 16-bit signed -// integer values a and b. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hsubs_epi16 -FORCE_INLINE __m128i _mm_hsubs_epi16(__m128i _a, __m128i _b) +// Compute the absolute differences of packed unsigned 8-bit integers in a and +// b, then horizontally sum each consecutive 8 differences to produce two +// unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low +// 16 bits of 64-bit elements in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sad_epu8 +FORCE_INLINE __m128i _mm_sad_epu8(__m128i a, __m128i b) { -#if defined(__aarch64__) - int16x8_t a = vreinterpretq_s16_m128i(_a); - int16x8_t b = vreinterpretq_s16_m128i(_b); - return vreinterpretq_s64_s16( - vqsubq_s16(vuzp1q_s16(a, b), vuzp2q_s16(a, b))); -#else - int32x4_t a = vreinterpretq_s32_m128i(_a); - int32x4_t b = vreinterpretq_s32_m128i(_b); - // Interleave using vshrn/vmovn - // [a0|a2|a4|a6|b0|b2|b4|b6] - // [a1|a3|a5|a7|b1|b3|b5|b7] - int16x8_t ab0246 = vcombine_s16(vmovn_s32(a), vmovn_s32(b)); - int16x8_t ab1357 = vcombine_s16(vshrn_n_s32(a, 16), vshrn_n_s32(b, 16)); - // Saturated subtract - return vreinterpretq_m128i_s16(vqsubq_s16(ab0246, ab1357)); -#endif + uint16x8_t t = vpaddlq_u8(vabdq_u8((uint8x16_t) a, (uint8x16_t) b)); + return vreinterpretq_m128i_u64(vpaddlq_u32(vpaddlq_u16(t))); } -// Computes pairwise add of each argument as a 32-bit signed or unsigned integer -// values a and b. -FORCE_INLINE __m128i _mm_hadd_epi32(__m128i _a, __m128i _b) +// Sets the 8 signed 16-bit integer values. +// https://msdn.microsoft.com/en-au/library/3e0fek84(v=vs.90).aspx +FORCE_INLINE __m128i _mm_set_epi16(short i7, + short i6, + short i5, + short i4, + short i3, + short i2, + short i1, + short i0) { - int32x4_t a = vreinterpretq_s32_m128i(_a); - int32x4_t b = vreinterpretq_s32_m128i(_b); - return vreinterpretq_m128i_s32( - vcombine_s32(vpadd_s32(vget_low_s32(a), vget_high_s32(a)), - vpadd_s32(vget_low_s32(b), vget_high_s32(b)))); + int16_t ALIGN_STRUCT(16) data[8] = {i0, i1, i2, i3, i4, i5, i6, i7}; + return vreinterpretq_m128i_s16(vld1q_s16(data)); } -// Computes pairwise difference of each argument as a 32-bit signed or unsigned -// integer values a and b. -FORCE_INLINE __m128i _mm_hsub_epi32(__m128i _a, __m128i _b) +// Sets the 4 signed 32-bit integer values. +// https://msdn.microsoft.com/en-us/library/vstudio/019beekt(v=vs.100).aspx +FORCE_INLINE __m128i _mm_set_epi32(int i3, int i2, int i1, int i0) { - int64x2_t a = vreinterpretq_s64_m128i(_a); - int64x2_t b = vreinterpretq_s64_m128i(_b); - // Interleave using vshrn/vmovn - // [a0|a2|b0|b2] - // [a1|a2|b1|b3] - int32x4_t ab02 = vcombine_s32(vmovn_s64(a), vmovn_s64(b)); - int32x4_t ab13 = vcombine_s32(vshrn_n_s64(a, 32), vshrn_n_s64(b, 32)); - // Subtract - return vreinterpretq_m128i_s32(vsubq_s32(ab02, ab13)); + int32_t ALIGN_STRUCT(16) data[4] = {i0, i1, i2, i3}; + return vreinterpretq_m128i_s32(vld1q_s32(data)); } -// Kahan summation for accurate summation of floating-point numbers. -// http://blog.zachbjornson.com/2019/08/11/fast-float-summation.html -FORCE_INLINE void _sse2neon_kadd_f32(float *sum, float *c, float y) +// Returns the __m128i structure with its two 64-bit integer values +// initialized to the values of the two 64-bit integers passed in. +// https://msdn.microsoft.com/en-us/library/dk2sdw0h(v=vs.120).aspx +FORCE_INLINE __m128i _mm_set_epi64(__m64 i1, __m64 i2) { - y -= *c; - float t = *sum + y; - *c = (t - *sum) - y; - *sum = t; + return _mm_set_epi64x((int64_t) i1, (int64_t) i2); } -// Conditionally multiply the packed single-precision (32-bit) floating-point -// elements in a and b using the high 4 bits in imm8, sum the four products, -// and conditionally store the sum in dst using the low 4 bits of imm. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_dp_ps -FORCE_INLINE __m128 _mm_dp_ps(__m128 a, __m128 b, const int imm) +// Returns the __m128i structure with its two 64-bit integer values +// initialized to the values of the two 64-bit integers passed in. +// https://msdn.microsoft.com/en-us/library/dk2sdw0h(v=vs.120).aspx +FORCE_INLINE __m128i _mm_set_epi64x(int64_t i1, int64_t i2) { -#if defined(__aarch64__) - /* shortcuts */ - if (imm == 0xFF) { - return _mm_set1_ps(vaddvq_f32(_mm_mul_ps(a, b))); - } - if (imm == 0x7F) { - float32x4_t m = _mm_mul_ps(a, b); - m[3] = 0; - return _mm_set1_ps(vaddvq_f32(m)); - } -#endif - - float s = 0, c = 0; - float32x4_t f32a = vreinterpretq_f32_m128(a); - float32x4_t f32b = vreinterpretq_f32_m128(b); - - /* To improve the accuracy of floating-point summation, Kahan algorithm - * is used for each operation. - */ - if (imm & (1 << 4)) - _sse2neon_kadd_f32(&s, &c, f32a[0] * f32b[0]); - if (imm & (1 << 5)) - _sse2neon_kadd_f32(&s, &c, f32a[1] * f32b[1]); - if (imm & (1 << 6)) - _sse2neon_kadd_f32(&s, &c, f32a[2] * f32b[2]); - if (imm & (1 << 7)) - _sse2neon_kadd_f32(&s, &c, f32a[3] * f32b[3]); - s += c; - - float32x4_t res = { - (imm & 0x1) ? s : 0, - (imm & 0x2) ? s : 0, - (imm & 0x4) ? s : 0, - (imm & 0x8) ? s : 0, - }; - return vreinterpretq_m128_f32(res); + return vreinterpretq_m128i_s64( + vcombine_s64(vcreate_s64(i2), vcreate_s64(i1))); } -/* Compare operations */ +// Sets the 16 signed 8-bit integer values. +// https://msdn.microsoft.com/en-us/library/x0cx8zd3(v=vs.90).aspx +FORCE_INLINE __m128i _mm_set_epi8(signed char b15, + signed char b14, + signed char b13, + signed char b12, + signed char b11, + signed char b10, + signed char b9, + signed char b8, + signed char b7, + signed char b6, + signed char b5, + signed char b4, + signed char b3, + signed char b2, + signed char b1, + signed char b0) +{ + int8_t ALIGN_STRUCT(16) + data[16] = {(int8_t) b0, (int8_t) b1, (int8_t) b2, (int8_t) b3, + (int8_t) b4, (int8_t) b5, (int8_t) b6, (int8_t) b7, + (int8_t) b8, (int8_t) b9, (int8_t) b10, (int8_t) b11, + (int8_t) b12, (int8_t) b13, (int8_t) b14, (int8_t) b15}; + return (__m128i) vld1q_s8(data); +} -// Compares for less than -// https://msdn.microsoft.com/en-us/library/vstudio/f330yhc8(v=vs.100).aspx -FORCE_INLINE __m128 _mm_cmplt_ps(__m128 a, __m128 b) +// Set packed double-precision (64-bit) floating-point elements in dst with the +// supplied values. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set_pd +FORCE_INLINE __m128d _mm_set_pd(double e1, double e0) { - return vreinterpretq_m128_u32( - vcltq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); + double ALIGN_STRUCT(16) data[2] = {e0, e1}; +#if defined(__aarch64__) + return vreinterpretq_m128d_f64(vld1q_f64((float64_t *) data)); +#else + return vreinterpretq_m128d_f32(vld1q_f32((float32_t *) data)); +#endif } -// Compares for less than -// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/fy94wye7(v=vs.100) -FORCE_INLINE __m128 _mm_cmplt_ss(__m128 a, __m128 b) +// Broadcast double-precision (64-bit) floating-point value a to all elements of +// dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set_pd1 +#define _mm_set_pd1 _mm_set1_pd + +// Copy double-precision (64-bit) floating-point element a to the lower element +// of dst, and zero the upper element. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set_sd +FORCE_INLINE __m128d _mm_set_sd(double a) { - return _mm_move_ss(a, _mm_cmplt_ps(a, b)); + return _mm_set_pd(0, a); } -// Compares for greater than. +// Sets the 8 signed 16-bit integer values to w. // -// r0 := (a0 > b0) ? 0xffffffff : 0x0 -// r1 := (a1 > b1) ? 0xffffffff : 0x0 -// r2 := (a2 > b2) ? 0xffffffff : 0x0 -// r3 := (a3 > b3) ? 0xffffffff : 0x0 +// r0 := w +// r1 := w +// ... +// r7 := w // -// https://msdn.microsoft.com/en-us/library/vstudio/11dy102s(v=vs.100).aspx -FORCE_INLINE __m128 _mm_cmpgt_ps(__m128 a, __m128 b) +// https://msdn.microsoft.com/en-us/library/k0ya3x0e(v=vs.90).aspx +FORCE_INLINE __m128i _mm_set1_epi16(short w) { - return vreinterpretq_m128_u32( - vcgtq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); + return vreinterpretq_m128i_s16(vdupq_n_s16(w)); } -// Compares for greater than. -// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/1xyyyy9e(v=vs.100) -FORCE_INLINE __m128 _mm_cmpgt_ss(__m128 a, __m128 b) +// Sets the 4 signed 32-bit integer values to i. +// +// r0 := i +// r1 := i +// r2 := i +// r3 := I +// +// https://msdn.microsoft.com/en-us/library/vstudio/h4xscxat(v=vs.100).aspx +FORCE_INLINE __m128i _mm_set1_epi32(int _i) { - return _mm_move_ss(a, _mm_cmpgt_ps(a, b)); + return vreinterpretq_m128i_s32(vdupq_n_s32(_i)); } -// Compares for greater than or equal. -// https://msdn.microsoft.com/en-us/library/vstudio/fs813y2t(v=vs.100).aspx -FORCE_INLINE __m128 _mm_cmpge_ps(__m128 a, __m128 b) +// Sets the 2 signed 64-bit integer values to i. +// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/whtfzhzk(v=vs.100) +FORCE_INLINE __m128i _mm_set1_epi64(__m64 _i) { - return vreinterpretq_m128_u32( - vcgeq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); + return vreinterpretq_m128i_s64(vdupq_n_s64((int64_t) _i)); } -// Compares for greater than or equal. -// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/kesh3ddc(v=vs.100) -FORCE_INLINE __m128 _mm_cmpge_ss(__m128 a, __m128 b) +// Sets the 2 signed 64-bit integer values to i. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set1_epi64x +FORCE_INLINE __m128i _mm_set1_epi64x(int64_t _i) { - return _mm_move_ss(a, _mm_cmpge_ps(a, b)); + return vreinterpretq_m128i_s64(vdupq_n_s64(_i)); } -// Compares for less than or equal. +// Sets the 16 signed 8-bit integer values to b. // -// r0 := (a0 <= b0) ? 0xffffffff : 0x0 -// r1 := (a1 <= b1) ? 0xffffffff : 0x0 -// r2 := (a2 <= b2) ? 0xffffffff : 0x0 -// r3 := (a3 <= b3) ? 0xffffffff : 0x0 +// r0 := b +// r1 := b +// ... +// r15 := b // -// https://msdn.microsoft.com/en-us/library/vstudio/1s75w83z(v=vs.100).aspx -FORCE_INLINE __m128 _mm_cmple_ps(__m128 a, __m128 b) +// https://msdn.microsoft.com/en-us/library/6e14xhyf(v=vs.100).aspx +FORCE_INLINE __m128i _mm_set1_epi8(signed char w) { - return vreinterpretq_m128_u32( - vcleq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); + return vreinterpretq_m128i_s8(vdupq_n_s8(w)); } -// Compares for less than or equal. -// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/a7x0hbhw(v=vs.100) -FORCE_INLINE __m128 _mm_cmple_ss(__m128 a, __m128 b) +// Broadcast double-precision (64-bit) floating-point value a to all elements of +// dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set1_pd +FORCE_INLINE __m128d _mm_set1_pd(double d) { - return _mm_move_ss(a, _mm_cmple_ps(a, b)); +#if defined(__aarch64__) + return vreinterpretq_m128d_f64(vdupq_n_f64(d)); +#else + return vreinterpretq_m128d_s64(vdupq_n_s64(*(int64_t *) &d)); +#endif } -// Compares for equality. -// https://msdn.microsoft.com/en-us/library/vstudio/36aectz5(v=vs.100).aspx -FORCE_INLINE __m128 _mm_cmpeq_ps(__m128 a, __m128 b) +// Sets the 8 signed 16-bit integer values in reverse order. +// +// Return Value +// r0 := w0 +// r1 := w1 +// ... +// r7 := w7 +FORCE_INLINE __m128i _mm_setr_epi16(short w0, + short w1, + short w2, + short w3, + short w4, + short w5, + short w6, + short w7) { - return vreinterpretq_m128_u32( - vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); + int16_t ALIGN_STRUCT(16) data[8] = {w0, w1, w2, w3, w4, w5, w6, w7}; + return vreinterpretq_m128i_s16(vld1q_s16((int16_t *) data)); } -// Compares for equality. -// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/k423z28e(v=vs.100) -FORCE_INLINE __m128 _mm_cmpeq_ss(__m128 a, __m128 b) +// Sets the 4 signed 32-bit integer values in reverse order +// https://technet.microsoft.com/en-us/library/security/27yb3ee5(v=vs.90).aspx +FORCE_INLINE __m128i _mm_setr_epi32(int i3, int i2, int i1, int i0) { - return _mm_move_ss(a, _mm_cmpeq_ps(a, b)); + int32_t ALIGN_STRUCT(16) data[4] = {i3, i2, i1, i0}; + return vreinterpretq_m128i_s32(vld1q_s32(data)); } -// Compares for inequality. -// https://msdn.microsoft.com/en-us/library/sf44thbx(v=vs.100).aspx -FORCE_INLINE __m128 _mm_cmpneq_ps(__m128 a, __m128 b) +// Set packed 64-bit integers in dst with the supplied values in reverse order. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_setr_epi64 +FORCE_INLINE __m128i _mm_setr_epi64(__m64 e1, __m64 e0) { - return vreinterpretq_m128_u32(vmvnq_u32( - vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)))); + return vreinterpretq_m128i_s64(vcombine_s64(e1, e0)); } -// Compares for inequality. -// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/ekya8fh4(v=vs.100) -FORCE_INLINE __m128 _mm_cmpneq_ss(__m128 a, __m128 b) +// Sets the 16 signed 8-bit integer values in reverse order. +// https://msdn.microsoft.com/en-us/library/2khb9c7k(v=vs.90).aspx +FORCE_INLINE __m128i _mm_setr_epi8(signed char b0, + signed char b1, + signed char b2, + signed char b3, + signed char b4, + signed char b5, + signed char b6, + signed char b7, + signed char b8, + signed char b9, + signed char b10, + signed char b11, + signed char b12, + signed char b13, + signed char b14, + signed char b15) { - return _mm_move_ss(a, _mm_cmpneq_ps(a, b)); + int8_t ALIGN_STRUCT(16) + data[16] = {(int8_t) b0, (int8_t) b1, (int8_t) b2, (int8_t) b3, + (int8_t) b4, (int8_t) b5, (int8_t) b6, (int8_t) b7, + (int8_t) b8, (int8_t) b9, (int8_t) b10, (int8_t) b11, + (int8_t) b12, (int8_t) b13, (int8_t) b14, (int8_t) b15}; + return (__m128i) vld1q_s8(data); } -// Compares for not greater than or equal. -// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/wsexys62(v=vs.100) -FORCE_INLINE __m128 _mm_cmpnge_ps(__m128 a, __m128 b) +// Set packed double-precision (64-bit) floating-point elements in dst with the +// supplied values in reverse order. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_setr_pd +FORCE_INLINE __m128d _mm_setr_pd(double e1, double e0) { - return _mm_cmplt_ps(a, b); + return _mm_set_pd(e0, e1); } -// Compares for not greater than or equal. -// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/fk2y80s8(v=vs.100) -FORCE_INLINE __m128 _mm_cmpnge_ss(__m128 a, __m128 b) +// Return vector of type __m128d with all elements set to zero. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_setzero_pd +FORCE_INLINE __m128d _mm_setzero_pd(void) { - return _mm_cmplt_ss(a, b); +#if defined(__aarch64__) + return vreinterpretq_m128d_f64(vdupq_n_f64(0)); +#else + return vreinterpretq_m128d_f32(vdupq_n_f32(0)); +#endif } -// Compares for not greater than. -// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/d0xh7w0s(v=vs.100) -FORCE_INLINE __m128 _mm_cmpngt_ps(__m128 a, __m128 b) +// Sets the 128-bit value to zero +// https://msdn.microsoft.com/en-us/library/vstudio/ys7dw0kh(v=vs.100).aspx +FORCE_INLINE __m128i _mm_setzero_si128(void) { - return _mm_cmple_ps(a, b); + return vreinterpretq_m128i_s32(vdupq_n_s32(0)); } -// Compares for not greater than. -// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/z7x9ydwh(v=vs.100) -FORCE_INLINE __m128 _mm_cmpngt_ss(__m128 a, __m128 b) +// Shuffles the 4 signed or unsigned 32-bit integers in a as specified by imm. +// https://msdn.microsoft.com/en-us/library/56f67xbk%28v=vs.90%29.aspx +// FORCE_INLINE __m128i _mm_shuffle_epi32(__m128i a, +// __constrange(0,255) int imm) +#if __has_builtin(__builtin_shufflevector) +#define _mm_shuffle_epi32(a, imm) \ + __extension__({ \ + int32x4_t _input = vreinterpretq_s32_m128i(a); \ + int32x4_t _shuf = __builtin_shufflevector( \ + _input, _input, (imm) & (0x3), ((imm) >> 2) & 0x3, \ + ((imm) >> 4) & 0x3, ((imm) >> 6) & 0x3); \ + vreinterpretq_m128i_s32(_shuf); \ + }) +#else // generic +#define _mm_shuffle_epi32(a, imm) \ + __extension__({ \ + __m128i ret; \ + switch (imm) { \ + case _MM_SHUFFLE(1, 0, 3, 2): \ + ret = _mm_shuffle_epi_1032((a)); \ + break; \ + case _MM_SHUFFLE(2, 3, 0, 1): \ + ret = _mm_shuffle_epi_2301((a)); \ + break; \ + case _MM_SHUFFLE(0, 3, 2, 1): \ + ret = _mm_shuffle_epi_0321((a)); \ + break; \ + case _MM_SHUFFLE(2, 1, 0, 3): \ + ret = _mm_shuffle_epi_2103((a)); \ + break; \ + case _MM_SHUFFLE(1, 0, 1, 0): \ + ret = _mm_shuffle_epi_1010((a)); \ + break; \ + case _MM_SHUFFLE(1, 0, 0, 1): \ + ret = _mm_shuffle_epi_1001((a)); \ + break; \ + case _MM_SHUFFLE(0, 1, 0, 1): \ + ret = _mm_shuffle_epi_0101((a)); \ + break; \ + case _MM_SHUFFLE(2, 2, 1, 1): \ + ret = _mm_shuffle_epi_2211((a)); \ + break; \ + case _MM_SHUFFLE(0, 1, 2, 2): \ + ret = _mm_shuffle_epi_0122((a)); \ + break; \ + case _MM_SHUFFLE(3, 3, 3, 2): \ + ret = _mm_shuffle_epi_3332((a)); \ + break; \ + case _MM_SHUFFLE(0, 0, 0, 0): \ + ret = _mm_shuffle_epi32_splat((a), 0); \ + break; \ + case _MM_SHUFFLE(1, 1, 1, 1): \ + ret = _mm_shuffle_epi32_splat((a), 1); \ + break; \ + case _MM_SHUFFLE(2, 2, 2, 2): \ + ret = _mm_shuffle_epi32_splat((a), 2); \ + break; \ + case _MM_SHUFFLE(3, 3, 3, 3): \ + ret = _mm_shuffle_epi32_splat((a), 3); \ + break; \ + default: \ + ret = _mm_shuffle_epi32_default((a), (imm)); \ + break; \ + } \ + ret; \ + }) +#endif + +// Shuffle double-precision (64-bit) floating-point elements using the control +// in imm8, and store the results in dst. +// +// dst[63:0] := (imm8[0] == 0) ? a[63:0] : a[127:64] +// dst[127:64] := (imm8[1] == 0) ? b[63:0] : b[127:64] +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shuffle_pd +#if __has_builtin(__builtin_shufflevector) +#define _mm_shuffle_pd(a, b, imm8) \ + vreinterpretq_m128d_s64(__builtin_shufflevector( \ + vreinterpretq_s64_m128d(a), vreinterpretq_s64_m128d(b), imm8 & 0x1, \ + ((imm8 & 0x2) >> 1) + 2)) +#else +#define _mm_shuffle_pd(a, b, imm8) \ + _mm_castsi128_pd(_mm_set_epi64x( \ + vgetq_lane_s64(vreinterpretq_s64_m128d(b), (imm8 & 0x2) >> 1), \ + vgetq_lane_s64(vreinterpretq_s64_m128d(a), imm8 & 0x1))) +#endif + +// FORCE_INLINE __m128i _mm_shufflehi_epi16(__m128i a, +// __constrange(0,255) int imm) +#if __has_builtin(__builtin_shufflevector) +#define _mm_shufflehi_epi16(a, imm) \ + __extension__({ \ + int16x8_t _input = vreinterpretq_s16_m128i(a); \ + int16x8_t _shuf = __builtin_shufflevector( \ + _input, _input, 0, 1, 2, 3, ((imm) & (0x3)) + 4, \ + (((imm) >> 2) & 0x3) + 4, (((imm) >> 4) & 0x3) + 4, \ + (((imm) >> 6) & 0x3) + 4); \ + vreinterpretq_m128i_s16(_shuf); \ + }) +#else // generic +#define _mm_shufflehi_epi16(a, imm) _mm_shufflehi_epi16_function((a), (imm)) +#endif + +// FORCE_INLINE __m128i _mm_shufflelo_epi16(__m128i a, +// __constrange(0,255) int imm) +#if __has_builtin(__builtin_shufflevector) +#define _mm_shufflelo_epi16(a, imm) \ + __extension__({ \ + int16x8_t _input = vreinterpretq_s16_m128i(a); \ + int16x8_t _shuf = __builtin_shufflevector( \ + _input, _input, ((imm) & (0x3)), (((imm) >> 2) & 0x3), \ + (((imm) >> 4) & 0x3), (((imm) >> 6) & 0x3), 4, 5, 6, 7); \ + vreinterpretq_m128i_s16(_shuf); \ + }) +#else // generic +#define _mm_shufflelo_epi16(a, imm) _mm_shufflelo_epi16_function((a), (imm)) +#endif + +// Shift packed 16-bit integers in a left by count while shifting in zeros, and +// store the results in dst. +// +// FOR j := 0 to 7 +// i := j*16 +// IF count[63:0] > 15 +// dst[i+15:i] := 0 +// ELSE +// dst[i+15:i] := ZeroExtend16(a[i+15:i] << count[63:0]) +// FI +// ENDFOR +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sll_epi16 +FORCE_INLINE __m128i _mm_sll_epi16(__m128i a, __m128i count) { - return _mm_cmple_ss(a, b); + uint64_t c = vreinterpretq_nth_u64_m128i(count, 0); + if (_sse2neon_unlikely(c & ~15)) + return _mm_setzero_si128(); + + int16x8_t vc = vdupq_n_s16((int16_t) c); + return vreinterpretq_m128i_s16(vshlq_s16(vreinterpretq_s16_m128i(a), vc)); } -// Compares for not less than or equal. -// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/6a330kxw(v=vs.100) -FORCE_INLINE __m128 _mm_cmpnle_ps(__m128 a, __m128 b) +// Shift packed 32-bit integers in a left by count while shifting in zeros, and +// store the results in dst. +// +// FOR j := 0 to 3 +// i := j*32 +// IF count[63:0] > 31 +// dst[i+31:i] := 0 +// ELSE +// dst[i+31:i] := ZeroExtend32(a[i+31:i] << count[63:0]) +// FI +// ENDFOR +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sll_epi32 +FORCE_INLINE __m128i _mm_sll_epi32(__m128i a, __m128i count) { - return _mm_cmpgt_ps(a, b); + uint64_t c = vreinterpretq_nth_u64_m128i(count, 0); + if (_sse2neon_unlikely(c & ~31)) + return _mm_setzero_si128(); + + int32x4_t vc = vdupq_n_s32((int32_t) c); + return vreinterpretq_m128i_s32(vshlq_s32(vreinterpretq_s32_m128i(a), vc)); } -// Compares for not less than or equal. -// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/z7x9ydwh(v=vs.100) -FORCE_INLINE __m128 _mm_cmpnle_ss(__m128 a, __m128 b) +// Shift packed 64-bit integers in a left by count while shifting in zeros, and +// store the results in dst. +// +// FOR j := 0 to 1 +// i := j*64 +// IF count[63:0] > 63 +// dst[i+63:i] := 0 +// ELSE +// dst[i+63:i] := ZeroExtend64(a[i+63:i] << count[63:0]) +// FI +// ENDFOR +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sll_epi64 +FORCE_INLINE __m128i _mm_sll_epi64(__m128i a, __m128i count) { - return _mm_cmpgt_ss(a, b); + uint64_t c = vreinterpretq_nth_u64_m128i(count, 0); + if (_sse2neon_unlikely(c & ~63)) + return _mm_setzero_si128(); + + int64x2_t vc = vdupq_n_s64((int64_t) c); + return vreinterpretq_m128i_s64(vshlq_s64(vreinterpretq_s64_m128i(a), vc)); } -// Compares for not less than. -// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/4686bbdw(v=vs.100) -FORCE_INLINE __m128 _mm_cmpnlt_ps(__m128 a, __m128 b) +// Shift packed 16-bit integers in a left by imm8 while shifting in zeros, and +// store the results in dst. +// +// FOR j := 0 to 7 +// i := j*16 +// IF imm8[7:0] > 15 +// dst[i+15:i] := 0 +// ELSE +// dst[i+15:i] := ZeroExtend16(a[i+15:i] << imm8[7:0]) +// FI +// ENDFOR +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_slli_epi16 +FORCE_INLINE __m128i _mm_slli_epi16(__m128i a, int imm) { - return _mm_cmpge_ps(a, b); + if (_sse2neon_unlikely(imm & ~15)) + return _mm_setzero_si128(); + return vreinterpretq_m128i_s16( + vshlq_s16(vreinterpretq_s16_m128i(a), vdupq_n_s16(imm))); } -// Compares for not less than. -// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/56b9z2wf(v=vs.100) -FORCE_INLINE __m128 _mm_cmpnlt_ss(__m128 a, __m128 b) +// Shift packed 32-bit integers in a left by imm8 while shifting in zeros, and +// store the results in dst. +// +// FOR j := 0 to 3 +// i := j*32 +// IF imm8[7:0] > 31 +// dst[i+31:i] := 0 +// ELSE +// dst[i+31:i] := ZeroExtend32(a[i+31:i] << imm8[7:0]) +// FI +// ENDFOR +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_slli_epi32 +FORCE_INLINE __m128i _mm_slli_epi32(__m128i a, int imm) { - return _mm_cmpge_ss(a, b); + if (_sse2neon_unlikely(imm & ~31)) + return _mm_setzero_si128(); + return vreinterpretq_m128i_s32( + vshlq_s32(vreinterpretq_s32_m128i(a), vdupq_n_s32(imm))); } -// Compares the 16 signed or unsigned 8-bit integers in a and the 16 signed or -// unsigned 8-bit integers in b for equality. -// https://msdn.microsoft.com/en-us/library/windows/desktop/bz5xk21a(v=vs.90).aspx -FORCE_INLINE __m128i _mm_cmpeq_epi8(__m128i a, __m128i b) +// Shift packed 64-bit integers in a left by imm8 while shifting in zeros, and +// store the results in dst. +// +// FOR j := 0 to 1 +// i := j*64 +// IF imm8[7:0] > 63 +// dst[i+63:i] := 0 +// ELSE +// dst[i+63:i] := ZeroExtend64(a[i+63:i] << imm8[7:0]) +// FI +// ENDFOR +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_slli_epi64 +FORCE_INLINE __m128i _mm_slli_epi64(__m128i a, int imm) +{ + if (_sse2neon_unlikely(imm & ~63)) + return _mm_setzero_si128(); + return vreinterpretq_m128i_s64( + vshlq_s64(vreinterpretq_s64_m128i(a), vdupq_n_s64(imm))); +} + +// Shift a left by imm8 bytes while shifting in zeros, and store the results in +// dst. +// +// tmp := imm8[7:0] +// IF tmp > 15 +// tmp := 16 +// FI +// dst[127:0] := a[127:0] << (tmp*8) +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_slli_si128 +FORCE_INLINE __m128i _mm_slli_si128(__m128i a, int imm) { + if (_sse2neon_unlikely(imm & ~15)) + return _mm_setzero_si128(); + uint8x16_t tmp[2] = {vdupq_n_u8(0), vreinterpretq_u8_m128i(a)}; return vreinterpretq_m128i_u8( - vceqq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b))); + vld1q_u8(((uint8_t const *) tmp) + (16 - imm))); } -// Compare packed double-precision (64-bit) floating-point elements in a and b -// for equality, and store the results in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmpeq_pd -FORCE_INLINE __m128d _mm_cmpeq_pd(__m128d a, __m128d b) +// Compute the square root of packed double-precision (64-bit) floating-point +// elements in a, and store the results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sqrt_pd +FORCE_INLINE __m128d _mm_sqrt_pd(__m128d a) { #if defined(__aarch64__) - return vreinterpretq_m128d_u64( - vceqq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))); + return vreinterpretq_m128d_f64(vsqrtq_f64(vreinterpretq_f64_m128d(a))); #else - // (a == b) -> (a_lo == b_lo) && (a_hi == b_hi) - uint32x4_t cmp = - vceqq_u32(vreinterpretq_u32_m128d(a), vreinterpretq_u32_m128d(b)); - uint32x4_t swapped = vrev64q_u32(cmp); - return vreinterpretq_m128d_u32(vandq_u32(cmp, swapped)); + double a0 = sqrt(((double *) &a)[0]); + double a1 = sqrt(((double *) &a)[1]); + return _mm_set_pd(a1, a0); #endif } -// Compares the 8 signed or unsigned 16-bit integers in a and the 8 signed or -// unsigned 16-bit integers in b for equality. -// https://msdn.microsoft.com/en-us/library/2ay060te(v=vs.100).aspx -FORCE_INLINE __m128i _mm_cmpeq_epi16(__m128i a, __m128i b) +// Compute the square root of the lower double-precision (64-bit) floating-point +// element in b, store the result in the lower element of dst, and copy the +// upper element from a to the upper element of dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sqrt_sd +FORCE_INLINE __m128d _mm_sqrt_sd(__m128d a, __m128d b) { - return vreinterpretq_m128i_u16( - vceqq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b))); +#if defined(__aarch64__) + return _mm_move_sd(a, _mm_sqrt_pd(b)); +#else + return _mm_set_pd(((double *) &a)[1], sqrt(((double *) &b)[0])); +#endif } -// Compare packed 32-bit integers in a and b for equality, and store the results -// in dst -FORCE_INLINE __m128i _mm_cmpeq_epi32(__m128i a, __m128i b) +// Shift packed 16-bit integers in a right by count while shifting in sign bits, +// and store the results in dst. +// +// FOR j := 0 to 7 +// i := j*16 +// IF count[63:0] > 15 +// dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) +// ELSE +// dst[i+15:i] := SignExtend16(a[i+15:i] >> count[63:0]) +// FI +// ENDFOR +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sra_epi16 +FORCE_INLINE __m128i _mm_sra_epi16(__m128i a, __m128i count) { - return vreinterpretq_m128i_u32( - vceqq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b))); + int64_t c = (int64_t) vget_low_s64((int64x2_t) count); + if (_sse2neon_unlikely(c & ~15)) + return _mm_cmplt_epi16(a, _mm_setzero_si128()); + return vreinterpretq_m128i_s16(vshlq_s16((int16x8_t) a, vdupq_n_s16(-c))); } -// Compare packed 64-bit integers in a and b for equality, and store the results -// in dst -FORCE_INLINE __m128i _mm_cmpeq_epi64(__m128i a, __m128i b) +// Shift packed 32-bit integers in a right by count while shifting in sign bits, +// and store the results in dst. +// +// FOR j := 0 to 3 +// i := j*32 +// IF count[63:0] > 31 +// dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) +// ELSE +// dst[i+31:i] := SignExtend32(a[i+31:i] >> count[63:0]) +// FI +// ENDFOR +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sra_epi32 +FORCE_INLINE __m128i _mm_sra_epi32(__m128i a, __m128i count) { -#if defined(__aarch64__) - return vreinterpretq_m128i_u64( - vceqq_u64(vreinterpretq_u64_m128i(a), vreinterpretq_u64_m128i(b))); -#else - // ARMv7 lacks vceqq_u64 - // (a == b) -> (a_lo == b_lo) && (a_hi == b_hi) - uint32x4_t cmp = - vceqq_u32(vreinterpretq_u32_m128i(a), vreinterpretq_u32_m128i(b)); - uint32x4_t swapped = vrev64q_u32(cmp); - return vreinterpretq_m128i_u32(vandq_u32(cmp, swapped)); -#endif + int64_t c = (int64_t) vget_low_s64((int64x2_t) count); + if (_sse2neon_unlikely(c & ~31)) + return _mm_cmplt_epi32(a, _mm_setzero_si128()); + return vreinterpretq_m128i_s32(vshlq_s32((int32x4_t) a, vdupq_n_s32(-c))); } -// Compares the 16 signed 8-bit integers in a and the 16 signed 8-bit integers -// in b for lesser than. -// https://msdn.microsoft.com/en-us/library/windows/desktop/9s46csht(v=vs.90).aspx -FORCE_INLINE __m128i _mm_cmplt_epi8(__m128i a, __m128i b) +// Shift packed 16-bit integers in a right by imm8 while shifting in sign +// bits, and store the results in dst. +// +// FOR j := 0 to 7 +// i := j*16 +// IF imm8[7:0] > 15 +// dst[i+15:i] := (a[i+15] ? 0xFFFF : 0x0) +// ELSE +// dst[i+15:i] := SignExtend16(a[i+15:i] >> imm8[7:0]) +// FI +// ENDFOR +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srai_epi16 +FORCE_INLINE __m128i _mm_srai_epi16(__m128i a, int imm) { - return vreinterpretq_m128i_u8( - vcltq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b))); + const int count = (imm & ~15) ? 15 : imm; + return (__m128i) vshlq_s16((int16x8_t) a, vdupq_n_s16(-count)); } -// Compares the 16 signed 8-bit integers in a and the 16 signed 8-bit integers -// in b for greater than. +// Shift packed 32-bit integers in a right by imm8 while shifting in sign bits, +// and store the results in dst. // -// r0 := (a0 > b0) ? 0xff : 0x0 -// r1 := (a1 > b1) ? 0xff : 0x0 -// ... -// r15 := (a15 > b15) ? 0xff : 0x0 +// FOR j := 0 to 3 +// i := j*32 +// IF imm8[7:0] > 31 +// dst[i+31:i] := (a[i+31] ? 0xFFFFFFFF : 0x0) +// ELSE +// dst[i+31:i] := SignExtend32(a[i+31:i] >> imm8[7:0]) +// FI +// ENDFOR // -// https://msdn.microsoft.com/zh-tw/library/wf45zt2b(v=vs.100).aspx -FORCE_INLINE __m128i _mm_cmpgt_epi8(__m128i a, __m128i b) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srai_epi32 +// FORCE_INLINE __m128i _mm_srai_epi32(__m128i a, __constrange(0,255) int imm) +#define _mm_srai_epi32(a, imm) \ + __extension__({ \ + __m128i ret; \ + if (_sse2neon_unlikely((imm) == 0)) { \ + ret = a; \ + } else if (_sse2neon_likely(0 < (imm) && (imm) < 32)) { \ + ret = vreinterpretq_m128i_s32( \ + vshlq_s32(vreinterpretq_s32_m128i(a), vdupq_n_s32(-imm))); \ + } else { \ + ret = vreinterpretq_m128i_s32( \ + vshrq_n_s32(vreinterpretq_s32_m128i(a), 31)); \ + } \ + ret; \ + }) + +// Shift packed 16-bit integers in a right by count while shifting in zeros, and +// store the results in dst. +// +// FOR j := 0 to 7 +// i := j*16 +// IF count[63:0] > 15 +// dst[i+15:i] := 0 +// ELSE +// dst[i+15:i] := ZeroExtend16(a[i+15:i] >> count[63:0]) +// FI +// ENDFOR +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srl_epi16 +FORCE_INLINE __m128i _mm_srl_epi16(__m128i a, __m128i count) { - return vreinterpretq_m128i_u8( - vcgtq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b))); + uint64_t c = vreinterpretq_nth_u64_m128i(count, 0); + if (_sse2neon_unlikely(c & ~15)) + return _mm_setzero_si128(); + + int16x8_t vc = vdupq_n_s16(-(int16_t) c); + return vreinterpretq_m128i_u16(vshlq_u16(vreinterpretq_u16_m128i(a), vc)); } -// Compares the 8 signed 16-bit integers in a and the 8 signed 16-bit integers -// in b for less than. +// Shift packed 32-bit integers in a right by count while shifting in zeros, and +// store the results in dst. // -// r0 := (a0 < b0) ? 0xffff : 0x0 -// r1 := (a1 < b1) ? 0xffff : 0x0 -// ... -// r7 := (a7 < b7) ? 0xffff : 0x0 +// FOR j := 0 to 3 +// i := j*32 +// IF count[63:0] > 31 +// dst[i+31:i] := 0 +// ELSE +// dst[i+31:i] := ZeroExtend32(a[i+31:i] >> count[63:0]) +// FI +// ENDFOR // -// https://technet.microsoft.com/en-us/library/t863edb2(v=vs.100).aspx -FORCE_INLINE __m128i _mm_cmplt_epi16(__m128i a, __m128i b) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srl_epi32 +FORCE_INLINE __m128i _mm_srl_epi32(__m128i a, __m128i count) { - return vreinterpretq_m128i_u16( - vcltq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b))); + uint64_t c = vreinterpretq_nth_u64_m128i(count, 0); + if (_sse2neon_unlikely(c & ~31)) + return _mm_setzero_si128(); + + int32x4_t vc = vdupq_n_s32(-(int32_t) c); + return vreinterpretq_m128i_u32(vshlq_u32(vreinterpretq_u32_m128i(a), vc)); } -// Compares the 8 signed 16-bit integers in a and the 8 signed 16-bit integers -// in b for greater than. +// Shift packed 64-bit integers in a right by count while shifting in zeros, and +// store the results in dst. // -// r0 := (a0 > b0) ? 0xffff : 0x0 -// r1 := (a1 > b1) ? 0xffff : 0x0 -// ... -// r7 := (a7 > b7) ? 0xffff : 0x0 +// FOR j := 0 to 1 +// i := j*64 +// IF count[63:0] > 63 +// dst[i+63:i] := 0 +// ELSE +// dst[i+63:i] := ZeroExtend64(a[i+63:i] >> count[63:0]) +// FI +// ENDFOR // -// https://technet.microsoft.com/en-us/library/xd43yfsa(v=vs.100).aspx -FORCE_INLINE __m128i _mm_cmpgt_epi16(__m128i a, __m128i b) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srl_epi64 +FORCE_INLINE __m128i _mm_srl_epi64(__m128i a, __m128i count) { - return vreinterpretq_m128i_u16( - vcgtq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b))); + uint64_t c = vreinterpretq_nth_u64_m128i(count, 0); + if (_sse2neon_unlikely(c & ~63)) + return _mm_setzero_si128(); + + int64x2_t vc = vdupq_n_s64(-(int64_t) c); + return vreinterpretq_m128i_u64(vshlq_u64(vreinterpretq_u64_m128i(a), vc)); } +// Shift packed 16-bit integers in a right by imm8 while shifting in zeros, and +// store the results in dst. +// +// FOR j := 0 to 7 +// i := j*16 +// IF imm8[7:0] > 15 +// dst[i+15:i] := 0 +// ELSE +// dst[i+15:i] := ZeroExtend16(a[i+15:i] >> imm8[7:0]) +// FI +// ENDFOR +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srli_epi16 +#define _mm_srli_epi16(a, imm) \ + __extension__({ \ + __m128i ret; \ + if (_sse2neon_unlikely(imm & ~15)) { \ + ret = _mm_setzero_si128(); \ + } else { \ + ret = vreinterpretq_m128i_u16( \ + vshlq_u16(vreinterpretq_u16_m128i(a), vdupq_n_s16(-imm))); \ + } \ + ret; \ + }) + +// Shift packed 32-bit integers in a right by imm8 while shifting in zeros, and +// store the results in dst. +// +// FOR j := 0 to 3 +// i := j*32 +// IF imm8[7:0] > 31 +// dst[i+31:i] := 0 +// ELSE +// dst[i+31:i] := ZeroExtend32(a[i+31:i] >> imm8[7:0]) +// FI +// ENDFOR +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srli_epi32 +// FORCE_INLINE __m128i _mm_srli_epi32(__m128i a, __constrange(0,255) int imm) +#define _mm_srli_epi32(a, imm) \ + __extension__({ \ + __m128i ret; \ + if (_sse2neon_unlikely(imm & ~31)) { \ + ret = _mm_setzero_si128(); \ + } else { \ + ret = vreinterpretq_m128i_u32( \ + vshlq_u32(vreinterpretq_u32_m128i(a), vdupq_n_s32(-imm))); \ + } \ + ret; \ + }) -// Compares the 4 signed 32-bit integers in a and the 4 signed 32-bit integers -// in b for less than. -// https://msdn.microsoft.com/en-us/library/vstudio/4ak0bf5d(v=vs.100).aspx -FORCE_INLINE __m128i _mm_cmplt_epi32(__m128i a, __m128i b) +// Shift packed 64-bit integers in a right by imm8 while shifting in zeros, and +// store the results in dst. +// +// FOR j := 0 to 1 +// i := j*64 +// IF imm8[7:0] > 63 +// dst[i+63:i] := 0 +// ELSE +// dst[i+63:i] := ZeroExtend64(a[i+63:i] >> imm8[7:0]) +// FI +// ENDFOR +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srli_epi64 +#define _mm_srli_epi64(a, imm) \ + __extension__({ \ + __m128i ret; \ + if (_sse2neon_unlikely(imm & ~63)) { \ + ret = _mm_setzero_si128(); \ + } else { \ + ret = vreinterpretq_m128i_u64( \ + vshlq_u64(vreinterpretq_u64_m128i(a), vdupq_n_s64(-imm))); \ + } \ + ret; \ + }) + +// Shift a right by imm8 bytes while shifting in zeros, and store the results in +// dst. +// +// tmp := imm8[7:0] +// IF tmp > 15 +// tmp := 16 +// FI +// dst[127:0] := a[127:0] >> (tmp*8) +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_srli_si128 +FORCE_INLINE __m128i _mm_srli_si128(__m128i a, int imm) { - return vreinterpretq_m128i_u32( - vcltq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b))); + if (_sse2neon_unlikely(imm & ~15)) + return _mm_setzero_si128(); + uint8x16_t tmp[2] = {vreinterpretq_u8_m128i(a), vdupq_n_u8(0)}; + return vreinterpretq_m128i_u8(vld1q_u8(((uint8_t const *) tmp) + imm)); } -// Compares the 4 signed 32-bit integers in a and the 4 signed 32-bit integers -// in b for greater than. -// https://msdn.microsoft.com/en-us/library/vstudio/1s9f2z0y(v=vs.100).aspx -FORCE_INLINE __m128i _mm_cmpgt_epi32(__m128i a, __m128i b) +// Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point +// elements) from a into memory. mem_addr must be aligned on a 16-byte boundary +// or a general-protection exception may be generated. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_store_pd +FORCE_INLINE void _mm_store_pd(double *mem_addr, __m128d a) { - return vreinterpretq_m128i_u32( - vcgtq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b))); +#if defined(__aarch64__) + vst1q_f64((float64_t *) mem_addr, vreinterpretq_f64_m128d(a)); +#else + vst1q_f32((float32_t *) mem_addr, vreinterpretq_f32_m128d(a)); +#endif } -// Compares the 2 signed 64-bit integers in a and the 2 signed 64-bit integers -// in b for greater than. -FORCE_INLINE __m128i _mm_cmpgt_epi64(__m128i a, __m128i b) +// Store the lower double-precision (64-bit) floating-point element from a into +// 2 contiguous elements in memory. mem_addr must be aligned on a 16-byte +// boundary or a general-protection exception may be generated. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_store_pd1 +FORCE_INLINE void _mm_store_pd1(double *mem_addr, __m128d a) { #if defined(__aarch64__) - return vreinterpretq_m128i_u64( - vcgtq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b))); + float64x1_t a_low = vget_low_f64(vreinterpretq_f64_m128d(a)); + vst1q_f64((float64_t *) mem_addr, + vreinterpretq_f64_m128d(vcombine_f64(a_low, a_low))); #else - // ARMv7 lacks vcgtq_s64. - // This is based off of Clang's SSE2 polyfill: - // (a > b) -> ((a_hi > b_hi) || (a_lo > b_lo && a_hi == b_hi)) - - // Mask the sign bit out since we need a signed AND an unsigned comparison - // and it is ugly to try and split them. - int32x4_t mask = vreinterpretq_s32_s64(vdupq_n_s64(0x80000000ull)); - int32x4_t a_mask = veorq_s32(vreinterpretq_s32_m128i(a), mask); - int32x4_t b_mask = veorq_s32(vreinterpretq_s32_m128i(b), mask); - // Check if a > b - int64x2_t greater = vreinterpretq_s64_u32(vcgtq_s32(a_mask, b_mask)); - // Copy upper mask to lower mask - // a_hi > b_hi - int64x2_t gt_hi = vshrq_n_s64(greater, 63); - // Copy lower mask to upper mask - // a_lo > b_lo - int64x2_t gt_lo = vsliq_n_s64(greater, greater, 32); - // Compare for equality - int64x2_t equal = vreinterpretq_s64_u32(vceqq_s32(a_mask, b_mask)); - // Copy upper mask to lower mask - // a_hi == b_hi - int64x2_t eq_hi = vshrq_n_s64(equal, 63); - // a_hi > b_hi || (a_lo > b_lo && a_hi == b_hi) - int64x2_t ret = vorrq_s64(gt_hi, vandq_s64(gt_lo, eq_hi)); - return vreinterpretq_m128i_s64(ret); + float32x2_t a_low = vget_low_f32(vreinterpretq_f32_m128d(a)); + vst1q_f32((float32_t *) mem_addr, + vreinterpretq_f32_m128d(vcombine_f32(a_low, a_low))); #endif } -// Compares the four 32-bit floats in a and b to check if any values are NaN. -// Ordered compare between each value returns true for "orderable" and false for -// "not orderable" (NaN). -// https://msdn.microsoft.com/en-us/library/vstudio/0h9w00fx(v=vs.100).aspx see -// also: -// http://stackoverflow.com/questions/8627331/what-does-ordered-unordered-comparison-mean -// http://stackoverflow.com/questions/29349621/neon-isnanval-intrinsics -FORCE_INLINE __m128 _mm_cmpord_ps(__m128 a, __m128 b) +// Store the lower double-precision (64-bit) floating-point element from a into +// memory. mem_addr does not need to be aligned on any particular boundary. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_store_sd +FORCE_INLINE void _mm_store_sd(double *mem_addr, __m128d a) { - // Note: NEON does not have ordered compare builtin - // Need to compare a eq a and b eq b to check for NaN - // Do AND of results to get final - uint32x4_t ceqaa = - vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a)); - uint32x4_t ceqbb = - vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b)); - return vreinterpretq_m128_u32(vandq_u32(ceqaa, ceqbb)); +#if defined(__aarch64__) + vst1_f64((float64_t *) mem_addr, vget_low_f64(vreinterpretq_f64_m128d(a))); +#else + vst1_u64((uint64_t *) mem_addr, vget_low_u64(vreinterpretq_u64_m128d(a))); +#endif } -// Compares for ordered. -// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/343t62da(v=vs.100) -FORCE_INLINE __m128 _mm_cmpord_ss(__m128 a, __m128 b) +// Stores four 32-bit integer values as (as a __m128i value) at the address p. +// https://msdn.microsoft.com/en-us/library/vstudio/edk11s13(v=vs.100).aspx +FORCE_INLINE void _mm_store_si128(__m128i *p, __m128i a) { - return _mm_move_ss(a, _mm_cmpord_ps(a, b)); + vst1q_s32((int32_t *) p, vreinterpretq_s32_m128i(a)); } -// Compares for unordered. -// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/khy6fk1t(v=vs.100) -FORCE_INLINE __m128 _mm_cmpunord_ps(__m128 a, __m128 b) +// Store the lower double-precision (64-bit) floating-point element from a into +// 2 contiguous elements in memory. mem_addr must be aligned on a 16-byte +// boundary or a general-protection exception may be generated. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=9,526,5601&text=_mm_store1_pd +#define _mm_store1_pd _mm_store_pd1 + +// Store the upper double-precision (64-bit) floating-point element from a into +// memory. +// +// MEM[mem_addr+63:mem_addr] := a[127:64] +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storeh_pd +FORCE_INLINE void _mm_storeh_pd(double *mem_addr, __m128d a) { - uint32x4_t f32a = - vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a)); - uint32x4_t f32b = - vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b)); - return vreinterpretq_m128_u32(vmvnq_u32(vandq_u32(f32a, f32b))); +#if defined(__aarch64__) + vst1_f64((float64_t *) mem_addr, vget_high_f64(vreinterpretq_f64_m128d(a))); +#else + vst1_f32((float32_t *) mem_addr, vget_high_f32(vreinterpretq_f32_m128d(a))); +#endif } -// Compares for unordered. -// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/2as2387b(v=vs.100) -FORCE_INLINE __m128 _mm_cmpunord_ss(__m128 a, __m128 b) +// Reads the lower 64 bits of b and stores them into the lower 64 bits of a. +// https://msdn.microsoft.com/en-us/library/hhwf428f%28v=vs.90%29.aspx +FORCE_INLINE void _mm_storel_epi64(__m128i *a, __m128i b) { - return _mm_move_ss(a, _mm_cmpunord_ps(a, b)); + uint64x1_t hi = vget_high_u64(vreinterpretq_u64_m128i(*a)); + uint64x1_t lo = vget_low_u64(vreinterpretq_u64_m128i(b)); + *a = vreinterpretq_m128i_u64(vcombine_u64(lo, hi)); } -// Compares the lower single-precision floating point scalar values of a and b -// using a less than operation. : -// https://msdn.microsoft.com/en-us/library/2kwe606b(v=vs.90).aspx Important -// note!! The documentation on MSDN is incorrect! If either of the values is a -// NAN the docs say you will get a one, but in fact, it will return a zero!! -FORCE_INLINE int _mm_comilt_ss(__m128 a, __m128 b) +// Store the lower double-precision (64-bit) floating-point element from a into +// memory. +// +// MEM[mem_addr+63:mem_addr] := a[63:0] +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storel_pd +FORCE_INLINE void _mm_storel_pd(double *mem_addr, __m128d a) { - uint32x4_t a_not_nan = - vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a)); - uint32x4_t b_not_nan = - vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b)); - uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); - uint32x4_t a_lt_b = - vcltq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)); - return (vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_lt_b), 0) != 0) ? 1 : 0; +#if defined(__aarch64__) + vst1_f64((float64_t *) mem_addr, vget_low_f64(vreinterpretq_f64_m128d(a))); +#else + vst1_f32((float32_t *) mem_addr, vget_low_f32(vreinterpretq_f32_m128d(a))); +#endif } -// Compares the lower single-precision floating point scalar values of a and b -// using a greater than operation. : -// https://msdn.microsoft.com/en-us/library/b0738e0t(v=vs.100).aspx -FORCE_INLINE int _mm_comigt_ss(__m128 a, __m128 b) +// Store 2 double-precision (64-bit) floating-point elements from a into memory +// in reverse order. mem_addr must be aligned on a 16-byte boundary or a +// general-protection exception may be generated. +// +// MEM[mem_addr+63:mem_addr] := a[127:64] +// MEM[mem_addr+127:mem_addr+64] := a[63:0] +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storer_pd +FORCE_INLINE void _mm_storer_pd(double *mem_addr, __m128d a) { - // return vgetq_lane_u32(vcgtq_f32(vreinterpretq_f32_m128(a), - // vreinterpretq_f32_m128(b)), 0); - uint32x4_t a_not_nan = - vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a)); - uint32x4_t b_not_nan = - vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b)); - uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); - uint32x4_t a_gt_b = - vcgtq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)); - return (vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0) ? 1 : 0; + float32x4_t f = vreinterpretq_f32_m128d(a); + _mm_store_pd(mem_addr, vreinterpretq_m128d_f32(vextq_f32(f, f, 2))); } -// Compares the lower single-precision floating point scalar values of a and b -// using a less than or equal operation. : -// https://msdn.microsoft.com/en-us/library/1w4t7c57(v=vs.90).aspx -FORCE_INLINE int _mm_comile_ss(__m128 a, __m128 b) +// Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point +// elements) from a into memory. mem_addr does not need to be aligned on any +// particular boundary. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storeu_pd +FORCE_INLINE void _mm_storeu_pd(double *mem_addr, __m128d a) { - // return vgetq_lane_u32(vcleq_f32(vreinterpretq_f32_m128(a), - // vreinterpretq_f32_m128(b)), 0); - uint32x4_t a_not_nan = - vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a)); - uint32x4_t b_not_nan = - vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b)); - uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); - uint32x4_t a_le_b = - vcleq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)); - return (vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_le_b), 0) != 0) ? 1 : 0; + _mm_store_pd(mem_addr, a); } -// Compares the lower single-precision floating point scalar values of a and b -// using a greater than or equal operation. : -// https://msdn.microsoft.com/en-us/library/8t80des6(v=vs.100).aspx -FORCE_INLINE int _mm_comige_ss(__m128 a, __m128 b) +// Stores 128-bits of integer data a at the address p. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storeu_si128 +FORCE_INLINE void _mm_storeu_si128(__m128i *p, __m128i a) { - // return vgetq_lane_u32(vcgeq_f32(vreinterpretq_f32_m128(a), - // vreinterpretq_f32_m128(b)), 0); - uint32x4_t a_not_nan = - vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a)); - uint32x4_t b_not_nan = - vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b)); - uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); - uint32x4_t a_ge_b = - vcgeq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)); - return (vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0) ? 1 : 0; + vst1q_s32((int32_t *) p, vreinterpretq_s32_m128i(a)); } -// Compares the lower single-precision floating point scalar values of a and b -// using an equality operation. : -// https://msdn.microsoft.com/en-us/library/93yx2h2b(v=vs.100).aspx -FORCE_INLINE int _mm_comieq_ss(__m128 a, __m128 b) +// Stores 32-bits of integer data a at the address p. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_storeu_si32 +FORCE_INLINE void _mm_storeu_si32(void *p, __m128i a) { - // return vgetq_lane_u32(vceqq_f32(vreinterpretq_f32_m128(a), - // vreinterpretq_f32_m128(b)), 0); - uint32x4_t a_not_nan = - vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a)); - uint32x4_t b_not_nan = - vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b)); - uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); - uint32x4_t a_eq_b = - vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)); - return (vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_eq_b), 0) != 0) ? 1 : 0; + vst1q_lane_s32((int32_t *) p, vreinterpretq_s32_m128i(a), 0); } -// Compares the lower single-precision floating point scalar values of a and b -// using an inequality operation. : -// https://msdn.microsoft.com/en-us/library/bafh5e0a(v=vs.90).aspx -FORCE_INLINE int _mm_comineq_ss(__m128 a, __m128 b) +// Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point +// elements) from a into memory using a non-temporal memory hint. mem_addr must +// be aligned on a 16-byte boundary or a general-protection exception may be +// generated. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_stream_pd +FORCE_INLINE void _mm_stream_pd(double *p, __m128d a) { - // return !vgetq_lane_u32(vceqq_f32(vreinterpretq_f32_m128(a), - // vreinterpretq_f32_m128(b)), 0); - uint32x4_t a_not_nan = - vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a)); - uint32x4_t b_not_nan = - vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b)); - uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan)); - uint32x4_t a_neq_b = vmvnq_u32( - vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); - return (vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_neq_b), 0) != 0) ? 1 : 0; +#if __has_builtin(__builtin_nontemporal_store) + __builtin_nontemporal_store(a, (float32x4_t *) p); +#elif defined(__aarch64__) + vst1q_f64(p, vreinterpretq_f64_m128d(a)); +#else + vst1q_s64((int64_t *) p, vreinterpretq_s64_m128d(a)); +#endif } -// according to the documentation, these intrinsics behave the same as the -// non-'u' versions. We'll just alias them here. -#define _mm_ucomieq_ss _mm_comieq_ss -#define _mm_ucomige_ss _mm_comige_ss -#define _mm_ucomigt_ss _mm_comigt_ss -#define _mm_ucomile_ss _mm_comile_ss -#define _mm_ucomilt_ss _mm_comilt_ss -#define _mm_ucomineq_ss _mm_comineq_ss +// Stores the data in a to the address p without polluting the caches. If the +// cache line containing address p is already in the cache, the cache will be +// updated. +// https://msdn.microsoft.com/en-us/library/ba08y07y%28v=vs.90%29.aspx +FORCE_INLINE void _mm_stream_si128(__m128i *p, __m128i a) +{ +#if __has_builtin(__builtin_nontemporal_store) + __builtin_nontemporal_store(a, p); +#else + vst1q_s64((int64_t *) p, vreinterpretq_s64_m128i(a)); +#endif +} -/* Conversions */ +// Store 32-bit integer a into memory using a non-temporal hint to minimize +// cache pollution. If the cache line containing address mem_addr is already in +// the cache, the cache will be updated. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_stream_si32 +FORCE_INLINE void _mm_stream_si32(int *p, int a) +{ + vst1q_lane_s32((int32_t *) p, vdupq_n_s32(a), 0); +} -// Convert packed signed 32-bit integers in b to packed single-precision -// (32-bit) floating-point elements, store the results in the lower 2 elements -// of dst, and copy the upper 2 packed elements from a to the upper elements of -// dst. +// Subtract packed 16-bit integers in b from packed 16-bit integers in a, and +// store the results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_epi16 +FORCE_INLINE __m128i _mm_sub_epi16(__m128i a, __m128i b) +{ + return vreinterpretq_m128i_s16( + vsubq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b))); +} + +// Subtracts the 4 signed or unsigned 32-bit integers of b from the 4 signed or +// unsigned 32-bit integers of a. // -// dst[31:0] := Convert_Int32_To_FP32(b[31:0]) -// dst[63:32] := Convert_Int32_To_FP32(b[63:32]) -// dst[95:64] := a[95:64] -// dst[127:96] := a[127:96] +// r0 := a0 - b0 +// r1 := a1 - b1 +// r2 := a2 - b2 +// r3 := a3 - b3 // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_pi2ps -FORCE_INLINE __m128 _mm_cvt_pi2ps(__m128 a, __m64 b) +// https://msdn.microsoft.com/en-us/library/vstudio/fhh866h0(v=vs.100).aspx +FORCE_INLINE __m128i _mm_sub_epi32(__m128i a, __m128i b) { - return vreinterpretq_m128_f32( - vcombine_f32(vcvt_f32_s32(vreinterpret_s32_m64(b)), - vget_high_f32(vreinterpretq_f32_m128(a)))); + return vreinterpretq_m128i_s32( + vsubq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b))); } -// Convert the signed 32-bit integer b to a single-precision (32-bit) -// floating-point element, store the result in the lower element of dst, and -// copy the upper 3 packed elements from a to the upper elements of dst. +// Subtract 2 packed 64-bit integers in b from 2 packed 64-bit integers in a, +// and store the results in dst. +// r0 := a0 - b0 +// r1 := a1 - b1 +FORCE_INLINE __m128i _mm_sub_epi64(__m128i a, __m128i b) +{ + return vreinterpretq_m128i_s64( + vsubq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b))); +} + +// Subtract packed 8-bit integers in b from packed 8-bit integers in a, and +// store the results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_epi8 +FORCE_INLINE __m128i _mm_sub_epi8(__m128i a, __m128i b) +{ + return vreinterpretq_m128i_s8( + vsubq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b))); +} + +// Subtract packed double-precision (64-bit) floating-point elements in b from +// packed double-precision (64-bit) floating-point elements in a, and store the +// results in dst. // -// dst[31:0] := Convert_Int32_To_FP32(b[31:0]) -// dst[127:32] := a[127:32] +// FOR j := 0 to 1 +// i := j*64 +// dst[i+63:i] := a[i+63:i] - b[i+63:i] +// ENDFOR // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_si2ss -FORCE_INLINE __m128 _mm_cvt_si2ss(__m128 a, int b) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_sub_pd +FORCE_INLINE __m128d _mm_sub_pd(__m128d a, __m128d b) { - return vreinterpretq_m128_f32( - vsetq_lane_f32((float) b, vreinterpretq_f32_m128(a), 0)); +#if defined(__aarch64__) + return vreinterpretq_m128d_f64( + vsubq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))); +#else + double *da = (double *) &a; + double *db = (double *) &b; + double c[2]; + c[0] = da[0] - db[0]; + c[1] = da[1] - db[1]; + return vld1q_f32((float32_t *) c); +#endif } -// Convert the signed 32-bit integer b to a single-precision (32-bit) -// floating-point element, store the result in the lower element of dst, and -// copy the upper 3 packed elements from a to the upper elements of dst. +// Subtract the lower double-precision (64-bit) floating-point element in b from +// the lower double-precision (64-bit) floating-point element in a, store the +// result in the lower element of dst, and copy the upper element from a to the +// upper element of dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_sd +FORCE_INLINE __m128d _mm_sub_sd(__m128d a, __m128d b) +{ + return _mm_move_sd(a, _mm_sub_pd(a, b)); +} + +// Subtract 64-bit integer b from 64-bit integer a, and store the result in dst. // -// dst[31:0] := Convert_Int32_To_FP32(b[31:0]) -// dst[127:32] := a[127:32] +// dst[63:0] := a[63:0] - b[63:0] // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi32_ss -#define _mm_cvtsi32_ss(a, b) _mm_cvt_si2ss(a, b) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sub_si64 +FORCE_INLINE __m64 _mm_sub_si64(__m64 a, __m64 b) +{ + return vreinterpret_m64_s64( + vsub_s64(vreinterpret_s64_m64(a), vreinterpret_s64_m64(b))); +} -// Convert the signed 64-bit integer b to a single-precision (32-bit) -// floating-point element, store the result in the lower element of dst, and -// copy the upper 3 packed elements from a to the upper elements of dst. +// Subtracts the 8 signed 16-bit integers of b from the 8 signed 16-bit integers +// of a and saturates. // -// dst[31:0] := Convert_Int64_To_FP32(b[63:0]) -// dst[127:32] := a[127:32] +// r0 := SignedSaturate(a0 - b0) +// r1 := SignedSaturate(a1 - b1) +// ... +// r7 := SignedSaturate(a7 - b7) // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi64_ss -FORCE_INLINE __m128 _mm_cvtsi64_ss(__m128 a, int64_t b) +// https://technet.microsoft.com/en-us/subscriptions/3247z5b8(v=vs.90) +FORCE_INLINE __m128i _mm_subs_epi16(__m128i a, __m128i b) { - return vreinterpretq_m128_f32( - vsetq_lane_f32((float) b, vreinterpretq_f32_m128(a), 0)); + return vreinterpretq_m128i_s16( + vqsubq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b))); } -// Convert the lower single-precision (32-bit) floating-point element in a to a -// 32-bit integer, and store the result in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_ss2si -FORCE_INLINE int _mm_cvt_ss2si(__m128 a) +// Subtracts the 16 signed 8-bit integers of b from the 16 signed 8-bit integers +// of a and saturates. +// +// r0 := SignedSaturate(a0 - b0) +// r1 := SignedSaturate(a1 - b1) +// ... +// r15 := SignedSaturate(a15 - b15) +// +// https://technet.microsoft.com/en-us/subscriptions/by7kzks1(v=vs.90) +FORCE_INLINE __m128i _mm_subs_epi8(__m128i a, __m128i b) { -#if defined(__aarch64__) - return vgetq_lane_s32(vcvtnq_s32_f32(vreinterpretq_f32_m128(a)), 0); -#else - float32_t data = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0); - float32_t diff = data - floor(data); - if (diff > 0.5) - return (int32_t) ceil(data); - if (unlikely(diff == 0.5)) { - int32_t f = (int32_t) floor(data); - int32_t c = (int32_t) ceil(data); - return c & 1 ? f : c; - } - return (int32_t) floor(data); -#endif + return vreinterpretq_m128i_s8( + vqsubq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b))); } -// Convert packed 16-bit integers in a to packed single-precision (32-bit) -// floating-point elements, and store the results in dst. +// Subtracts the 8 unsigned 16-bit integers of bfrom the 8 unsigned 16-bit +// integers of a and saturates.. +// https://technet.microsoft.com/en-us/subscriptions/index/f44y0s19(v=vs.90).aspx +FORCE_INLINE __m128i _mm_subs_epu16(__m128i a, __m128i b) +{ + return vreinterpretq_m128i_u16( + vqsubq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b))); +} + +// Subtracts the 16 unsigned 8-bit integers of b from the 16 unsigned 8-bit +// integers of a and saturates. // -// FOR j := 0 to 3 -// i := j*16 -// m := j*32 -// dst[m+31:m] := Convert_Int16_To_FP32(a[i+15:i]) -// ENDFOR +// r0 := UnsignedSaturate(a0 - b0) +// r1 := UnsignedSaturate(a1 - b1) +// ... +// r15 := UnsignedSaturate(a15 - b15) // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpi16_ps -FORCE_INLINE __m128 _mm_cvtpi16_ps(__m64 a) +// https://technet.microsoft.com/en-us/subscriptions/yadkxc18(v=vs.90) +FORCE_INLINE __m128i _mm_subs_epu8(__m128i a, __m128i b) { - return vreinterpretq_m128_f32( - vcvtq_f32_s32(vmovl_s16(vreinterpret_s16_m64(a)))); + return vreinterpretq_m128i_u8( + vqsubq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b))); } -// Convert packed 32-bit integers in b to packed single-precision (32-bit) -// floating-point elements, store the results in the lower 2 elements of dst, -// and copy the upper 2 packed elements from a to the upper elements of dst. +#define _mm_ucomieq_sd _mm_comieq_sd +#define _mm_ucomige_sd _mm_comige_sd +#define _mm_ucomigt_sd _mm_comigt_sd +#define _mm_ucomile_sd _mm_comile_sd +#define _mm_ucomilt_sd _mm_comilt_sd +#define _mm_ucomineq_sd _mm_comineq_sd + +// Return vector of type __m128d with undefined elements. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_undefined_pd +FORCE_INLINE __m128d _mm_undefined_pd(void) +{ +#if defined(__GNUC__) || defined(__clang__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wuninitialized" +#endif + __m128d a; + return a; +#if defined(__GNUC__) || defined(__clang__) +#pragma GCC diagnostic pop +#endif +} + +// Interleaves the upper 4 signed or unsigned 16-bit integers in a with the +// upper 4 signed or unsigned 16-bit integers in b. // -// dst[31:0] := Convert_Int32_To_FP32(b[31:0]) -// dst[63:32] := Convert_Int32_To_FP32(b[63:32]) -// dst[95:64] := a[95:64] -// dst[127:96] := a[127:96] +// r0 := a4 +// r1 := b4 +// r2 := a5 +// r3 := b5 +// r4 := a6 +// r5 := b6 +// r6 := a7 +// r7 := b7 // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpi32_ps -FORCE_INLINE __m128 _mm_cvtpi32_ps(__m128 a, __m64 b) +// https://msdn.microsoft.com/en-us/library/03196cz7(v=vs.100).aspx +FORCE_INLINE __m128i _mm_unpackhi_epi16(__m128i a, __m128i b) { - return vreinterpretq_m128_f32( - vcombine_f32(vcvt_f32_s32(vreinterpret_s32_m64(b)), - vget_high_f32(vreinterpretq_f32_m128(a)))); +#if defined(__aarch64__) + return vreinterpretq_m128i_s16( + vzip2q_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b))); +#else + int16x4_t a1 = vget_high_s16(vreinterpretq_s16_m128i(a)); + int16x4_t b1 = vget_high_s16(vreinterpretq_s16_m128i(b)); + int16x4x2_t result = vzip_s16(a1, b1); + return vreinterpretq_m128i_s16(vcombine_s16(result.val[0], result.val[1])); +#endif } -// Convert packed signed 32-bit integers in a to packed single-precision -// (32-bit) floating-point elements, store the results in the lower 2 elements -// of dst, then covert the packed signed 32-bit integers in b to -// single-precision (32-bit) floating-point element, and store the results in -// the upper 2 elements of dst. +// Interleaves the upper 2 signed or unsigned 32-bit integers in a with the +// upper 2 signed or unsigned 32-bit integers in b. +// https://msdn.microsoft.com/en-us/library/65sa7cbs(v=vs.100).aspx +FORCE_INLINE __m128i _mm_unpackhi_epi32(__m128i a, __m128i b) +{ +#if defined(__aarch64__) + return vreinterpretq_m128i_s32( + vzip2q_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b))); +#else + int32x2_t a1 = vget_high_s32(vreinterpretq_s32_m128i(a)); + int32x2_t b1 = vget_high_s32(vreinterpretq_s32_m128i(b)); + int32x2x2_t result = vzip_s32(a1, b1); + return vreinterpretq_m128i_s32(vcombine_s32(result.val[0], result.val[1])); +#endif +} + +// Interleaves the upper signed or unsigned 64-bit integer in a with the +// upper signed or unsigned 64-bit integer in b. // -// dst[31:0] := Convert_Int32_To_FP32(a[31:0]) -// dst[63:32] := Convert_Int32_To_FP32(a[63:32]) -// dst[95:64] := Convert_Int32_To_FP32(b[31:0]) -// dst[127:96] := Convert_Int32_To_FP32(b[63:32]) +// r0 := a1 +// r1 := b1 +FORCE_INLINE __m128i _mm_unpackhi_epi64(__m128i a, __m128i b) +{ + int64x1_t a_h = vget_high_s64(vreinterpretq_s64_m128i(a)); + int64x1_t b_h = vget_high_s64(vreinterpretq_s64_m128i(b)); + return vreinterpretq_m128i_s64(vcombine_s64(a_h, b_h)); +} + +// Interleaves the upper 8 signed or unsigned 8-bit integers in a with the upper +// 8 signed or unsigned 8-bit integers in b. // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpi32x2_ps -FORCE_INLINE __m128 _mm_cvtpi32x2_ps(__m64 a, __m64 b) +// r0 := a8 +// r1 := b8 +// r2 := a9 +// r3 := b9 +// ... +// r14 := a15 +// r15 := b15 +// +// https://msdn.microsoft.com/en-us/library/t5h7783k(v=vs.100).aspx +FORCE_INLINE __m128i _mm_unpackhi_epi8(__m128i a, __m128i b) { - return vreinterpretq_m128_f32(vcvtq_f32_s32( - vcombine_s32(vreinterpret_s32_m64(a), vreinterpret_s32_m64(b)))); +#if defined(__aarch64__) + return vreinterpretq_m128i_s8( + vzip2q_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b))); +#else + int8x8_t a1 = + vreinterpret_s8_s16(vget_high_s16(vreinterpretq_s16_m128i(a))); + int8x8_t b1 = + vreinterpret_s8_s16(vget_high_s16(vreinterpretq_s16_m128i(b))); + int8x8x2_t result = vzip_s8(a1, b1); + return vreinterpretq_m128i_s8(vcombine_s8(result.val[0], result.val[1])); +#endif } -// Convert the lower packed 8-bit integers in a to packed single-precision -// (32-bit) floating-point elements, and store the results in dst. +// Unpack and interleave double-precision (64-bit) floating-point elements from +// the high half of a and b, and store the results in dst. // -// FOR j := 0 to 3 -// i := j*8 -// m := j*32 -// dst[m+31:m] := Convert_Int8_To_FP32(a[i+7:i]) -// ENDFOR +// DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { +// dst[63:0] := src1[127:64] +// dst[127:64] := src2[127:64] +// RETURN dst[127:0] +// } +// dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpi8_ps -FORCE_INLINE __m128 _mm_cvtpi8_ps(__m64 a) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_unpackhi_pd +FORCE_INLINE __m128d _mm_unpackhi_pd(__m128d a, __m128d b) { - return vreinterpretq_m128_f32(vcvtq_f32_s32( - vmovl_s16(vget_low_s16(vmovl_s8(vreinterpret_s8_m64(a)))))); +#if defined(__aarch64__) + return vreinterpretq_m128d_f64( + vzip2q_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))); +#else + return vreinterpretq_m128d_s64( + vcombine_s64(vget_high_s64(vreinterpretq_s64_m128d(a)), + vget_high_s64(vreinterpretq_s64_m128d(b)))); +#endif } -// Convert packed unsigned 16-bit integers in a to packed single-precision -// (32-bit) floating-point elements, and store the results in dst. +// Interleaves the lower 4 signed or unsigned 16-bit integers in a with the +// lower 4 signed or unsigned 16-bit integers in b. // -// FOR j := 0 to 3 -// i := j*16 -// m := j*32 -// dst[m+31:m] := Convert_UInt16_To_FP32(a[i+15:i]) -// ENDFOR +// r0 := a0 +// r1 := b0 +// r2 := a1 +// r3 := b1 +// r4 := a2 +// r5 := b2 +// r6 := a3 +// r7 := b3 // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpu16_ps -FORCE_INLINE __m128 _mm_cvtpu16_ps(__m64 a) +// https://msdn.microsoft.com/en-us/library/btxb17bw%28v=vs.90%29.aspx +FORCE_INLINE __m128i _mm_unpacklo_epi16(__m128i a, __m128i b) { - return vreinterpretq_m128_f32( - vcvtq_f32_u32(vmovl_u16(vreinterpret_u16_m64(a)))); +#if defined(__aarch64__) + return vreinterpretq_m128i_s16( + vzip1q_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b))); +#else + int16x4_t a1 = vget_low_s16(vreinterpretq_s16_m128i(a)); + int16x4_t b1 = vget_low_s16(vreinterpretq_s16_m128i(b)); + int16x4x2_t result = vzip_s16(a1, b1); + return vreinterpretq_m128i_s16(vcombine_s16(result.val[0], result.val[1])); +#endif } -// Convert the lower packed unsigned 8-bit integers in a to packed -// single-precision (32-bit) floating-point elements, and store the results in -// dst. +// Interleaves the lower 2 signed or unsigned 32 - bit integers in a with the +// lower 2 signed or unsigned 32 - bit integers in b. // -// FOR j := 0 to 3 -// i := j*8 -// m := j*32 -// dst[m+31:m] := Convert_UInt8_To_FP32(a[i+7:i]) -// ENDFOR +// r0 := a0 +// r1 := b0 +// r2 := a1 +// r3 := b1 // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpu8_ps -FORCE_INLINE __m128 _mm_cvtpu8_ps(__m64 a) +// https://msdn.microsoft.com/en-us/library/x8atst9d(v=vs.100).aspx +FORCE_INLINE __m128i _mm_unpacklo_epi32(__m128i a, __m128i b) { - return vreinterpretq_m128_f32(vcvtq_f32_u32( - vmovl_u16(vget_low_u16(vmovl_u8(vreinterpret_u8_m64(a)))))); +#if defined(__aarch64__) + return vreinterpretq_m128i_s32( + vzip1q_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b))); +#else + int32x2_t a1 = vget_low_s32(vreinterpretq_s32_m128i(a)); + int32x2_t b1 = vget_low_s32(vreinterpretq_s32_m128i(b)); + int32x2x2_t result = vzip_s32(a1, b1); + return vreinterpretq_m128i_s32(vcombine_s32(result.val[0], result.val[1])); +#endif } -// Converts the four single-precision, floating-point values of a to signed -// 32-bit integer values using truncate. -// https://msdn.microsoft.com/en-us/library/vstudio/1h005y6x(v=vs.100).aspx -FORCE_INLINE __m128i _mm_cvttps_epi32(__m128 a) +FORCE_INLINE __m128i _mm_unpacklo_epi64(__m128i a, __m128i b) { - return vreinterpretq_m128i_s32(vcvtq_s32_f32(vreinterpretq_f32_m128(a))); + int64x1_t a_l = vget_low_s64(vreinterpretq_s64_m128i(a)); + int64x1_t b_l = vget_low_s64(vreinterpretq_s64_m128i(b)); + return vreinterpretq_m128i_s64(vcombine_s64(a_l, b_l)); } -// Convert the lower double-precision (64-bit) floating-point element in a to a -// 64-bit integer with truncation, and store the result in dst. +// Interleaves the lower 8 signed or unsigned 8-bit integers in a with the lower +// 8 signed or unsigned 8-bit integers in b. // -// dst[63:0] := Convert_FP64_To_Int64_Truncate(a[63:0]) +// r0 := a0 +// r1 := b0 +// r2 := a1 +// r3 := b1 +// ... +// r14 := a7 +// r15 := b7 // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttsd_si64 -FORCE_INLINE int64_t _mm_cvttsd_si64(__m128d a) +// https://msdn.microsoft.com/en-us/library/xf7k860c%28v=vs.90%29.aspx +FORCE_INLINE __m128i _mm_unpacklo_epi8(__m128i a, __m128i b) { #if defined(__aarch64__) - return vgetq_lane_s64(vcvtq_s64_f64(vreinterpretq_f64_m128d(a)), 0); + return vreinterpretq_m128i_s8( + vzip1q_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b))); #else - double ret = *((double *) &a); - return (int64_t) ret; + int8x8_t a1 = vreinterpret_s8_s16(vget_low_s16(vreinterpretq_s16_m128i(a))); + int8x8_t b1 = vreinterpret_s8_s16(vget_low_s16(vreinterpretq_s16_m128i(b))); + int8x8x2_t result = vzip_s8(a1, b1); + return vreinterpretq_m128i_s8(vcombine_s8(result.val[0], result.val[1])); #endif } -// Convert the lower double-precision (64-bit) floating-point element in a to a -// 64-bit integer with truncation, and store the result in dst. +// Unpack and interleave double-precision (64-bit) floating-point elements from +// the low half of a and b, and store the results in dst. // -// dst[63:0] := Convert_FP64_To_Int64_Truncate(a[63:0]) +// DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { +// dst[63:0] := src1[63:0] +// dst[127:64] := src2[63:0] +// RETURN dst[127:0] +// } +// dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvttsd_si64x -#define _mm_cvttsd_si64x(a) _mm_cvttsd_si64(a) - -// Converts the four signed 32-bit integer values of a to single-precision, -// floating-point values -// https://msdn.microsoft.com/en-us/library/vstudio/36bwxcx5(v=vs.100).aspx -FORCE_INLINE __m128 _mm_cvtepi32_ps(__m128i a) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_unpacklo_pd +FORCE_INLINE __m128d _mm_unpacklo_pd(__m128d a, __m128d b) { - return vreinterpretq_m128_f32(vcvtq_f32_s32(vreinterpretq_s32_m128i(a))); +#if defined(__aarch64__) + return vreinterpretq_m128d_f64( + vzip1q_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))); +#else + return vreinterpretq_m128d_s64( + vcombine_s64(vget_low_s64(vreinterpretq_s64_m128d(a)), + vget_low_s64(vreinterpretq_s64_m128d(b)))); +#endif } -// Converts the four unsigned 8-bit integers in the lower 16 bits to four -// unsigned 32-bit integers. -FORCE_INLINE __m128i _mm_cvtepu8_epi16(__m128i a) +// Compute the bitwise XOR of packed double-precision (64-bit) floating-point +// elements in a and b, and store the results in dst. +// +// FOR j := 0 to 1 +// i := j*64 +// dst[i+63:i] := a[i+63:i] XOR b[i+63:i] +// ENDFOR +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_xor_pd +FORCE_INLINE __m128d _mm_xor_pd(__m128d a, __m128d b) { - uint8x16_t u8x16 = vreinterpretq_u8_m128i(a); /* xxxx xxxx xxxx DCBA */ - uint16x8_t u16x8 = vmovl_u8(vget_low_u8(u8x16)); /* 0x0x 0x0x 0D0C 0B0A */ - return vreinterpretq_m128i_u16(u16x8); + return vreinterpretq_m128d_s64( + veorq_s64(vreinterpretq_s64_m128d(a), vreinterpretq_s64_m128d(b))); } -// Converts the four unsigned 8-bit integers in the lower 32 bits to four -// unsigned 32-bit integers. -// https://msdn.microsoft.com/en-us/library/bb531467%28v=vs.100%29.aspx -FORCE_INLINE __m128i _mm_cvtepu8_epi32(__m128i a) +// Computes the bitwise XOR of the 128-bit value in a and the 128-bit value in +// b. https://msdn.microsoft.com/en-us/library/fzt08www(v=vs.100).aspx +FORCE_INLINE __m128i _mm_xor_si128(__m128i a, __m128i b) { - uint8x16_t u8x16 = vreinterpretq_u8_m128i(a); /* xxxx xxxx xxxx DCBA */ - uint16x8_t u16x8 = vmovl_u8(vget_low_u8(u8x16)); /* 0x0x 0x0x 0D0C 0B0A */ - uint32x4_t u32x4 = vmovl_u16(vget_low_u16(u16x8)); /* 000D 000C 000B 000A */ - return vreinterpretq_m128i_u32(u32x4); + return vreinterpretq_m128i_s32( + veorq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b))); } -// Converts the two unsigned 8-bit integers in the lower 16 bits to two -// unsigned 64-bit integers. -FORCE_INLINE __m128i _mm_cvtepu8_epi64(__m128i a) +/* SSE3 */ + +// Alternatively add and subtract packed double-precision (64-bit) +// floating-point elements in a to/from packed elements in b, and store the +// results in dst. +// +// FOR j := 0 to 1 +// i := j*64 +// IF ((j & 1) == 0) +// dst[i+63:i] := a[i+63:i] - b[i+63:i] +// ELSE +// dst[i+63:i] := a[i+63:i] + b[i+63:i] +// FI +// ENDFOR +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_addsub_pd +FORCE_INLINE __m128d _mm_addsub_pd(__m128d a, __m128d b) { - uint8x16_t u8x16 = vreinterpretq_u8_m128i(a); /* xxxx xxxx xxxx xxBA */ - uint16x8_t u16x8 = vmovl_u8(vget_low_u8(u8x16)); /* 0x0x 0x0x 0x0x 0B0A */ - uint32x4_t u32x4 = vmovl_u16(vget_low_u16(u16x8)); /* 000x 000x 000B 000A */ - uint64x2_t u64x2 = vmovl_u32(vget_low_u32(u32x4)); /* 0000 000B 0000 000A */ - return vreinterpretq_m128i_u64(u64x2); + __m128d mask = _mm_set_pd(1.0f, -1.0f); +#if defined(__aarch64__) + return vreinterpretq_m128d_f64(vfmaq_f64(vreinterpretq_f64_m128d(a), + vreinterpretq_f64_m128d(b), + vreinterpretq_f64_m128d(mask))); +#else + return _mm_add_pd(_mm_mul_pd(b, mask), a); +#endif } -// Converts the four unsigned 8-bit integers in the lower 16 bits to four -// unsigned 32-bit integers. -FORCE_INLINE __m128i _mm_cvtepi8_epi16(__m128i a) +// Alternatively add and subtract packed single-precision (32-bit) +// floating-point elements in a to/from packed elements in b, and store the +// results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=addsub_ps +FORCE_INLINE __m128 _mm_addsub_ps(__m128 a, __m128 b) { - int8x16_t s8x16 = vreinterpretq_s8_m128i(a); /* xxxx xxxx xxxx DCBA */ - int16x8_t s16x8 = vmovl_s8(vget_low_s8(s8x16)); /* 0x0x 0x0x 0D0C 0B0A */ - return vreinterpretq_m128i_s16(s16x8); + __m128 mask = {-1.0f, 1.0f, -1.0f, 1.0f}; +#if defined(__aarch64__) || defined(__ARM_FEATURE_FMA) /* VFPv4+ */ + return vreinterpretq_m128_f32(vfmaq_f32(vreinterpretq_f32_m128(a), + vreinterpretq_f32_m128(mask), + vreinterpretq_f32_m128(b))); +#else + return _mm_add_ps(_mm_mul_ps(b, mask), a); +#endif } -// Converts the four unsigned 8-bit integers in the lower 32 bits to four -// unsigned 32-bit integers. -FORCE_INLINE __m128i _mm_cvtepi8_epi32(__m128i a) +// Horizontally add adjacent pairs of double-precision (64-bit) floating-point +// elements in a and b, and pack the results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hadd_pd +FORCE_INLINE __m128d _mm_hadd_pd(__m128d a, __m128d b) { - int8x16_t s8x16 = vreinterpretq_s8_m128i(a); /* xxxx xxxx xxxx DCBA */ - int16x8_t s16x8 = vmovl_s8(vget_low_s8(s8x16)); /* 0x0x 0x0x 0D0C 0B0A */ - int32x4_t s32x4 = vmovl_s16(vget_low_s16(s16x8)); /* 000D 000C 000B 000A */ - return vreinterpretq_m128i_s32(s32x4); +#if defined(__aarch64__) + return vreinterpretq_m128d_f64( + vpaddq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))); +#else + double *da = (double *) &a; + double *db = (double *) &b; + double c[] = {da[0] + da[1], db[0] + db[1]}; + return vreinterpretq_m128d_u64(vld1q_u64((uint64_t *) c)); +#endif } -// Converts the two signed 8-bit integers in the lower 32 bits to four -// signed 64-bit integers. -FORCE_INLINE __m128i _mm_cvtepi8_epi64(__m128i a) +// Computes pairwise add of each argument as single-precision, floating-point +// values a and b. +// https://msdn.microsoft.com/en-us/library/yd9wecaa.aspx +FORCE_INLINE __m128 _mm_hadd_ps(__m128 a, __m128 b) { - int8x16_t s8x16 = vreinterpretq_s8_m128i(a); /* xxxx xxxx xxxx xxBA */ - int16x8_t s16x8 = vmovl_s8(vget_low_s8(s8x16)); /* 0x0x 0x0x 0x0x 0B0A */ - int32x4_t s32x4 = vmovl_s16(vget_low_s16(s16x8)); /* 000x 000x 000B 000A */ - int64x2_t s64x2 = vmovl_s32(vget_low_s32(s32x4)); /* 0000 000B 0000 000A */ - return vreinterpretq_m128i_s64(s64x2); +#if defined(__aarch64__) + return vreinterpretq_m128_f32( + vpaddq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); +#else + float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a)); + float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a)); + float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b)); + float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b)); + return vreinterpretq_m128_f32( + vcombine_f32(vpadd_f32(a10, a32), vpadd_f32(b10, b32))); +#endif } -// Converts the four signed 16-bit integers in the lower 64 bits to four signed -// 32-bit integers. -FORCE_INLINE __m128i _mm_cvtepi16_epi32(__m128i a) +// Horizontally subtract adjacent pairs of double-precision (64-bit) +// floating-point elements in a and b, and pack the results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hsub_pd +FORCE_INLINE __m128d _mm_hsub_pd(__m128d _a, __m128d _b) { - return vreinterpretq_m128i_s32( - vmovl_s16(vget_low_s16(vreinterpretq_s16_m128i(a)))); +#if defined(__aarch64__) + return vreinterpretq_m128d_f64(vsubq_f64( + vuzp1q_f64(vreinterpretq_f64_m128d(_a), vreinterpretq_f64_m128d(_b)), + vuzp2q_f64(vreinterpretq_f64_m128d(_a), vreinterpretq_f64_m128d(_b)))); +#else + double *da = (double *) &_a; + double *db = (double *) &_b; + double c[] = {da[0] - da[1], db[0] - db[1]}; + return vreinterpretq_m128d_u64(vld1q_u64((uint64_t *) c)); +#endif } -// Converts the two signed 16-bit integers in the lower 32 bits two signed -// 32-bit integers. -FORCE_INLINE __m128i _mm_cvtepi16_epi64(__m128i a) +// Horizontally substract adjacent pairs of single-precision (32-bit) +// floating-point elements in a and b, and pack the results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hsub_ps +FORCE_INLINE __m128 _mm_hsub_ps(__m128 _a, __m128 _b) { - int16x8_t s16x8 = vreinterpretq_s16_m128i(a); /* xxxx xxxx xxxx 0B0A */ - int32x4_t s32x4 = vmovl_s16(vget_low_s16(s16x8)); /* 000x 000x 000B 000A */ - int64x2_t s64x2 = vmovl_s32(vget_low_s32(s32x4)); /* 0000 000B 0000 000A */ - return vreinterpretq_m128i_s64(s64x2); +#if defined(__aarch64__) + return vreinterpretq_m128_f32(vsubq_f32( + vuzp1q_f32(vreinterpretq_f32_m128(_a), vreinterpretq_f32_m128(_b)), + vuzp2q_f32(vreinterpretq_f32_m128(_a), vreinterpretq_f32_m128(_b)))); +#else + float32x4x2_t c = + vuzpq_f32(vreinterpretq_f32_m128(_a), vreinterpretq_f32_m128(_b)); + return vreinterpretq_m128_f32(vsubq_f32(c.val[0], c.val[1])); +#endif } -// Converts the four unsigned 16-bit integers in the lower 64 bits to four -// unsigned 32-bit integers. -FORCE_INLINE __m128i _mm_cvtepu16_epi32(__m128i a) +// Load 128-bits of integer data from unaligned memory into dst. This intrinsic +// may perform better than _mm_loadu_si128 when the data crosses a cache line +// boundary. +// +// dst[127:0] := MEM[mem_addr+127:mem_addr] +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_lddqu_si128 +#define _mm_lddqu_si128 _mm_loadu_si128 + +// Load a double-precision (64-bit) floating-point element from memory into both +// elements of dst. +// +// dst[63:0] := MEM[mem_addr+63:mem_addr] +// dst[127:64] := MEM[mem_addr+63:mem_addr] +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loaddup_pd +#define _mm_loaddup_pd _mm_load1_pd + +// Duplicate the low double-precision (64-bit) floating-point element from a, +// and store the results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_movedup_pd +FORCE_INLINE __m128d _mm_movedup_pd(__m128d a) { - return vreinterpretq_m128i_u32( - vmovl_u16(vget_low_u16(vreinterpretq_u16_m128i(a)))); +#if (__aarch64__) + return vreinterpretq_m128d_f64( + vdupq_laneq_f64(vreinterpretq_f64_m128d(a), 0)); +#else + return vreinterpretq_m128d_u64( + vdupq_n_u64(vgetq_lane_u64(vreinterpretq_u64_m128d(a), 0))); +#endif } -// Converts the two unsigned 16-bit integers in the lower 32 bits to two -// unsigned 64-bit integers. -FORCE_INLINE __m128i _mm_cvtepu16_epi64(__m128i a) +// Duplicate odd-indexed single-precision (32-bit) floating-point elements +// from a, and store the results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_movehdup_ps +FORCE_INLINE __m128 _mm_movehdup_ps(__m128 a) { - uint16x8_t u16x8 = vreinterpretq_u16_m128i(a); /* xxxx xxxx xxxx 0B0A */ - uint32x4_t u32x4 = vmovl_u16(vget_low_u16(u16x8)); /* 000x 000x 000B 000A */ - uint64x2_t u64x2 = vmovl_u32(vget_low_u32(u32x4)); /* 0000 000B 0000 000A */ - return vreinterpretq_m128i_u64(u64x2); +#if __has_builtin(__builtin_shufflevector) + return vreinterpretq_m128_f32(__builtin_shufflevector( + vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 1, 1, 3, 3)); +#else + float32_t a1 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 1); + float32_t a3 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 3); + float ALIGN_STRUCT(16) data[4] = {a1, a1, a3, a3}; + return vreinterpretq_m128_f32(vld1q_f32(data)); +#endif } -// Converts the two unsigned 32-bit integers in the lower 64 bits to two -// unsigned 64-bit integers. -FORCE_INLINE __m128i _mm_cvtepu32_epi64(__m128i a) +// Duplicate even-indexed single-precision (32-bit) floating-point elements +// from a, and store the results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_moveldup_ps +FORCE_INLINE __m128 _mm_moveldup_ps(__m128 a) { - return vreinterpretq_m128i_u64( - vmovl_u32(vget_low_u32(vreinterpretq_u32_m128i(a)))); +#if __has_builtin(__builtin_shufflevector) + return vreinterpretq_m128_f32(__builtin_shufflevector( + vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 0, 0, 2, 2)); +#else + float32_t a0 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0); + float32_t a2 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 2); + float ALIGN_STRUCT(16) data[4] = {a0, a0, a2, a2}; + return vreinterpretq_m128_f32(vld1q_f32(data)); +#endif } -// Converts the two signed 32-bit integers in the lower 64 bits to two signed -// 64-bit integers. -FORCE_INLINE __m128i _mm_cvtepi32_epi64(__m128i a) +/* SSSE3 */ + +// Compute the absolute value of packed signed 16-bit integers in a, and store +// the unsigned results in dst. +// +// FOR j := 0 to 7 +// i := j*16 +// dst[i+15:i] := ABS(a[i+15:i]) +// ENDFOR +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_abs_epi16 +FORCE_INLINE __m128i _mm_abs_epi16(__m128i a) { - return vreinterpretq_m128i_s64( - vmovl_s32(vget_low_s32(vreinterpretq_s32_m128i(a)))); + return vreinterpretq_m128i_s16(vabsq_s16(vreinterpretq_s16_m128i(a))); } -// Converts the four single-precision, floating-point values of a to signed -// 32-bit integer values. +// Compute the absolute value of packed signed 32-bit integers in a, and store +// the unsigned results in dst. // -// r0 := (int) a0 -// r1 := (int) a1 -// r2 := (int) a2 -// r3 := (int) a3 +// FOR j := 0 to 3 +// i := j*32 +// dst[i+31:i] := ABS(a[i+31:i]) +// ENDFOR // -// https://msdn.microsoft.com/en-us/library/vstudio/xdc42k5e(v=vs.100).aspx -// *NOTE*. The default rounding mode on SSE is 'round to even', which ARMv7-A -// does not support! It is supported on ARMv8-A however. -FORCE_INLINE __m128i _mm_cvtps_epi32(__m128 a) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_abs_epi32 +FORCE_INLINE __m128i _mm_abs_epi32(__m128i a) { -#if defined(__aarch64__) - return vreinterpretq_m128i_s32(vcvtnq_s32_f32(a)); -#else - uint32x4_t signmask = vdupq_n_u32(0x80000000); - float32x4_t half = vbslq_f32(signmask, vreinterpretq_f32_m128(a), - vdupq_n_f32(0.5f)); /* +/- 0.5 */ - int32x4_t r_normal = vcvtq_s32_f32(vaddq_f32( - vreinterpretq_f32_m128(a), half)); /* round to integer: [a + 0.5]*/ - int32x4_t r_trunc = - vcvtq_s32_f32(vreinterpretq_f32_m128(a)); /* truncate to integer: [a] */ - int32x4_t plusone = vreinterpretq_s32_u32(vshrq_n_u32( - vreinterpretq_u32_s32(vnegq_s32(r_trunc)), 31)); /* 1 or 0 */ - int32x4_t r_even = vbicq_s32(vaddq_s32(r_trunc, plusone), - vdupq_n_s32(1)); /* ([a] + {0,1}) & ~1 */ - float32x4_t delta = vsubq_f32( - vreinterpretq_f32_m128(a), - vcvtq_f32_s32(r_trunc)); /* compute delta: delta = (a - [a]) */ - uint32x4_t is_delta_half = vceqq_f32(delta, half); /* delta == +/- 0.5 */ - return vreinterpretq_m128i_s32(vbslq_s32(is_delta_half, r_even, r_normal)); -#endif + return vreinterpretq_m128i_s32(vabsq_s32(vreinterpretq_s32_m128i(a))); } -// Convert packed single-precision (32-bit) floating-point elements in a to -// packed 16-bit integers, and store the results in dst. Note: this intrinsic -// will generate 0x7FFF, rather than 0x8000, for input values between 0x7FFF and -// 0x7FFFFFFF. +// Compute the absolute value of packed signed 8-bit integers in a, and store +// the unsigned results in dst. // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtps_pi16 -FORCE_INLINE __m64 _mm_cvtps_pi16(__m128 a) +// FOR j := 0 to 15 +// i := j*8 +// dst[i+7:i] := ABS(a[i+7:i]) +// ENDFOR +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_abs_epi8 +FORCE_INLINE __m128i _mm_abs_epi8(__m128i a) { - return vreinterpret_m64_s16( - vmovn_s32(vreinterpretq_s32_m128i(_mm_cvtps_epi32(a)))); + return vreinterpretq_m128i_s8(vabsq_s8(vreinterpretq_s8_m128i(a))); } -// Copy the lower 32-bit integer in a to dst. +// Compute the absolute value of packed signed 16-bit integers in a, and store +// the unsigned results in dst. // -// dst[31:0] := a[31:0] +// FOR j := 0 to 3 +// i := j*16 +// dst[i+15:i] := ABS(a[i+15:i]) +// ENDFOR // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi128_si32 -FORCE_INLINE int _mm_cvtsi128_si32(__m128i a) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_abs_pi16 +FORCE_INLINE __m64 _mm_abs_pi16(__m64 a) { - return vgetq_lane_s32(vreinterpretq_s32_m128i(a), 0); + return vreinterpret_m64_s16(vabs_s16(vreinterpret_s16_m64(a))); } -// Copy the lower 64-bit integer in a to dst. +// Compute the absolute value of packed signed 32-bit integers in a, and store +// the unsigned results in dst. // -// dst[63:0] := a[63:0] +// FOR j := 0 to 1 +// i := j*32 +// dst[i+31:i] := ABS(a[i+31:i]) +// ENDFOR // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi128_si64 -FORCE_INLINE int64_t _mm_cvtsi128_si64(__m128i a) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_abs_pi32 +FORCE_INLINE __m64 _mm_abs_pi32(__m64 a) { - return vgetq_lane_s64(vreinterpretq_s64_m128i(a), 0); + return vreinterpret_m64_s32(vabs_s32(vreinterpret_s32_m64(a))); } -// Copy the lower 64-bit integer in a to dst. +// Compute the absolute value of packed signed 8-bit integers in a, and store +// the unsigned results in dst. // -// dst[63:0] := a[63:0] +// FOR j := 0 to 7 +// i := j*8 +// dst[i+7:i] := ABS(a[i+7:i]) +// ENDFOR // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsi128_si64x -#define _mm_cvtsi128_si64x(a) _mm_cvtsi128_si64(a) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_abs_pi8 +FORCE_INLINE __m64 _mm_abs_pi8(__m64 a) +{ + return vreinterpret_m64_s8(vabs_s8(vreinterpret_s8_m64(a))); +} -// Moves 32-bit integer a to the least significant 32 bits of an __m128 object, -// zero extending the upper bits. +// Concatenate 16-byte blocks in a and b into a 32-byte temporary result, shift +// the result right by imm8 bytes, and store the low 16 bytes in dst. // -// r0 := a -// r1 := 0x0 -// r2 := 0x0 -// r3 := 0x0 +// tmp[255:0] := ((a[127:0] << 128)[255:0] OR b[127:0]) >> (imm8*8) +// dst[127:0] := tmp[127:0] // -// https://msdn.microsoft.com/en-us/library/ct3539ha%28v=vs.90%29.aspx -FORCE_INLINE __m128i _mm_cvtsi32_si128(int a) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_alignr_epi8 +FORCE_INLINE __m128i _mm_alignr_epi8(__m128i a, __m128i b, int imm) { - return vreinterpretq_m128i_s32(vsetq_lane_s32(a, vdupq_n_s32(0), 0)); + if (_sse2neon_unlikely(imm & ~31)) + return _mm_setzero_si128(); + int idx; + uint8x16_t tmp[2]; + if (imm >= 16) { + idx = imm - 16; + tmp[0] = vreinterpretq_u8_m128i(a); + tmp[1] = vdupq_n_u8(0); + } else { + idx = imm; + tmp[0] = vreinterpretq_u8_m128i(b); + tmp[1] = vreinterpretq_u8_m128i(a); + } + return vreinterpretq_m128i_u8(vld1q_u8(((uint8_t const *) tmp) + idx)); } -// Moves 64-bit integer a to the least significant 64 bits of an __m128 object, -// zero extending the upper bits. +// Concatenate 8-byte blocks in a and b into a 16-byte temporary result, shift +// the result right by imm8 bytes, and store the low 8 bytes in dst. // -// r0 := a -// r1 := 0x0 -FORCE_INLINE __m128i _mm_cvtsi64_si128(int64_t a) +// tmp[127:0] := ((a[63:0] << 64)[127:0] OR b[63:0]) >> (imm8*8) +// dst[63:0] := tmp[63:0] +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_alignr_pi8 +#define _mm_alignr_pi8(a, b, imm) \ + __extension__({ \ + __m64 ret; \ + if (_sse2neon_unlikely((imm) >= 16)) { \ + ret = vreinterpret_m64_s8(vdup_n_s8(0)); \ + } else { \ + uint8x8_t tmp_low, tmp_high; \ + if (imm >= 8) { \ + const int idx = imm - 8; \ + tmp_low = vreinterpret_u8_m64(a); \ + tmp_high = vdup_n_u8(0); \ + ret = vreinterpret_m64_u8(vext_u8(tmp_low, tmp_high, idx)); \ + } else { \ + const int idx = imm; \ + tmp_low = vreinterpret_u8_m64(b); \ + tmp_high = vreinterpret_u8_m64(a); \ + ret = vreinterpret_m64_u8(vext_u8(tmp_low, tmp_high, idx)); \ + } \ + } \ + ret; \ + }) + +// Computes pairwise add of each argument as a 16-bit signed or unsigned integer +// values a and b. +FORCE_INLINE __m128i _mm_hadd_epi16(__m128i _a, __m128i _b) { - return vreinterpretq_m128i_s64(vsetq_lane_s64(a, vdupq_n_s64(0), 0)); + int16x8_t a = vreinterpretq_s16_m128i(_a); + int16x8_t b = vreinterpretq_s16_m128i(_b); +#if defined(__aarch64__) + return vreinterpretq_m128i_s16(vpaddq_s16(a, b)); +#else + return vreinterpretq_m128i_s16( + vcombine_s16(vpadd_s16(vget_low_s16(a), vget_high_s16(a)), + vpadd_s16(vget_low_s16(b), vget_high_s16(b)))); +#endif } -// Cast vector of type __m128 to type __m128d. This intrinsic is only used for -// compilation and does not generate any instructions, thus it has zero latency. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castps_pd -FORCE_INLINE __m128d _mm_castps_pd(__m128 a) +// Computes pairwise add of each argument as a 32-bit signed or unsigned integer +// values a and b. +FORCE_INLINE __m128i _mm_hadd_epi32(__m128i _a, __m128i _b) { - return vreinterpretq_m128d_s32(vreinterpretq_s32_m128(a)); + int32x4_t a = vreinterpretq_s32_m128i(_a); + int32x4_t b = vreinterpretq_s32_m128i(_b); + return vreinterpretq_m128i_s32( + vcombine_s32(vpadd_s32(vget_low_s32(a), vget_high_s32(a)), + vpadd_s32(vget_low_s32(b), vget_high_s32(b)))); } -// Applies a type cast to reinterpret four 32-bit floating point values passed -// in as a 128-bit parameter as packed 32-bit integers. -// https://msdn.microsoft.com/en-us/library/bb514099.aspx -FORCE_INLINE __m128i _mm_castps_si128(__m128 a) +// Horizontally add adjacent pairs of 16-bit integers in a and b, and pack the +// signed 16-bit results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hadd_pi16 +FORCE_INLINE __m64 _mm_hadd_pi16(__m64 a, __m64 b) { - return vreinterpretq_m128i_s32(vreinterpretq_s32_m128(a)); + return vreinterpret_m64_s16( + vpadd_s16(vreinterpret_s16_m64(a), vreinterpret_s16_m64(b))); } -// Cast vector of type __m128i to type __m128d. This intrinsic is only used for -// compilation and does not generate any instructions, thus it has zero latency. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castsi128_pd -FORCE_INLINE __m128d _mm_castsi128_pd(__m128i a) +// Horizontally add adjacent pairs of 32-bit integers in a and b, and pack the +// signed 32-bit results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hadd_pi32 +FORCE_INLINE __m64 _mm_hadd_pi32(__m64 a, __m64 b) +{ + return vreinterpret_m64_s32( + vpadd_s32(vreinterpret_s32_m64(a), vreinterpret_s32_m64(b))); +} + +// Computes saturated pairwise sub of each argument as a 16-bit signed +// integer values a and b. +FORCE_INLINE __m128i _mm_hadds_epi16(__m128i _a, __m128i _b) { #if defined(__aarch64__) - return vreinterpretq_m128d_f64(vreinterpretq_f64_m128i(a)); + int16x8_t a = vreinterpretq_s16_m128i(_a); + int16x8_t b = vreinterpretq_s16_m128i(_b); + return vreinterpretq_s64_s16( + vqaddq_s16(vuzp1q_s16(a, b), vuzp2q_s16(a, b))); #else - return vreinterpretq_m128d_f32(vreinterpretq_f32_m128i(a)); + int32x4_t a = vreinterpretq_s32_m128i(_a); + int32x4_t b = vreinterpretq_s32_m128i(_b); + // Interleave using vshrn/vmovn + // [a0|a2|a4|a6|b0|b2|b4|b6] + // [a1|a3|a5|a7|b1|b3|b5|b7] + int16x8_t ab0246 = vcombine_s16(vmovn_s32(a), vmovn_s32(b)); + int16x8_t ab1357 = vcombine_s16(vshrn_n_s32(a, 16), vshrn_n_s32(b, 16)); + // Saturated add + return vreinterpretq_m128i_s16(vqaddq_s16(ab0246, ab1357)); #endif } -// Applies a type cast to reinterpret four 32-bit integers passed in as a -// 128-bit parameter as packed 32-bit floating point values. -// https://msdn.microsoft.com/en-us/library/bb514029.aspx -FORCE_INLINE __m128 _mm_castsi128_ps(__m128i a) +// Horizontally add adjacent pairs of signed 16-bit integers in a and b using +// saturation, and pack the signed 16-bit results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hadds_pi16 +FORCE_INLINE __m64 _mm_hadds_pi16(__m64 _a, __m64 _b) { - return vreinterpretq_m128_s32(vreinterpretq_s32_m128i(a)); + int16x4_t a = vreinterpret_s16_m64(_a); + int16x4_t b = vreinterpret_s16_m64(_b); +#if defined(__aarch64__) + return vreinterpret_s64_s16(vqadd_s16(vuzp1_s16(a, b), vuzp2_s16(a, b))); +#else + int16x4x2_t res = vuzp_s16(a, b); + return vreinterpret_s64_s16(vqadd_s16(res.val[0], res.val[1])); +#endif } -// Loads 128-bit value. : -// https://msdn.microsoft.com/en-us/library/atzzad1h(v=vs.80).aspx -FORCE_INLINE __m128i _mm_load_si128(const __m128i *p) +// Computes pairwise difference of each argument as a 16-bit signed or unsigned +// integer values a and b. +FORCE_INLINE __m128i _mm_hsub_epi16(__m128i _a, __m128i _b) { - return vreinterpretq_m128i_s32(vld1q_s32((const int32_t *) p)); + int32x4_t a = vreinterpretq_s32_m128i(_a); + int32x4_t b = vreinterpretq_s32_m128i(_b); + // Interleave using vshrn/vmovn + // [a0|a2|a4|a6|b0|b2|b4|b6] + // [a1|a3|a5|a7|b1|b3|b5|b7] + int16x8_t ab0246 = vcombine_s16(vmovn_s32(a), vmovn_s32(b)); + int16x8_t ab1357 = vcombine_s16(vshrn_n_s32(a, 16), vshrn_n_s32(b, 16)); + // Subtract + return vreinterpretq_m128i_s16(vsubq_s16(ab0246, ab1357)); } -// Load a double-precision (64-bit) floating-point element from memory into both -// elements of dst. -// -// dst[63:0] := MEM[mem_addr+63:mem_addr] -// dst[127:64] := MEM[mem_addr+63:mem_addr] -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load1_pd -FORCE_INLINE __m128d _mm_load1_pd(const double *p) +// Computes pairwise difference of each argument as a 32-bit signed or unsigned +// integer values a and b. +FORCE_INLINE __m128i _mm_hsub_epi32(__m128i _a, __m128i _b) +{ + int64x2_t a = vreinterpretq_s64_m128i(_a); + int64x2_t b = vreinterpretq_s64_m128i(_b); + // Interleave using vshrn/vmovn + // [a0|a2|b0|b2] + // [a1|a2|b1|b3] + int32x4_t ab02 = vcombine_s32(vmovn_s64(a), vmovn_s64(b)); + int32x4_t ab13 = vcombine_s32(vshrn_n_s64(a, 32), vshrn_n_s64(b, 32)); + // Subtract + return vreinterpretq_m128i_s32(vsubq_s32(ab02, ab13)); +} + +// Horizontally subtract adjacent pairs of 16-bit integers in a and b, and pack +// the signed 16-bit results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hsub_pi16 +FORCE_INLINE __m64 _mm_hsub_pi16(__m64 _a, __m64 _b) +{ + int32x4_t ab = + vcombine_s32(vreinterpret_s32_m64(_a), vreinterpret_s32_m64(_b)); + + int16x4_t ab_low_bits = vmovn_s32(ab); + int16x4_t ab_high_bits = vshrn_n_s32(ab, 16); + + return vreinterpret_m64_s16(vsub_s16(ab_low_bits, ab_high_bits)); +} + +// Horizontally subtract adjacent pairs of 32-bit integers in a and b, and pack +// the signed 32-bit results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_hsub_pi32 +FORCE_INLINE __m64 _mm_hsub_pi32(__m64 _a, __m64 _b) { #if defined(__aarch64__) - return vreinterpretq_m128d_f64(vld1q_dup_f64(p)); + int32x2_t a = vreinterpret_s32_m64(_a); + int32x2_t b = vreinterpret_s32_m64(_b); + return vreinterpret_m64_s32(vsub_s32(vtrn1_s32(a, b), vtrn2_s32(a, b))); #else - return vreinterpretq_m128d_s64(vdupq_n_s64(*(const int64_t *) p)); + int32x2x2_t trn_ab = + vtrn_s32(vreinterpret_s32_m64(_a), vreinterpret_s32_m64(_b)); + return vreinterpret_m64_s32(vsub_s32(trn_ab.val[0], trn_ab.val[1])); #endif } -// Load a double-precision (64-bit) floating-point element from memory into both -// elements of dst. -// -// dst[63:0] := MEM[mem_addr+63:mem_addr] -// dst[127:64] := MEM[mem_addr+63:mem_addr] -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load_pd1 -#define _mm_load_pd1 _mm_load1_pd +// Computes saturated pairwise difference of each argument as a 16-bit signed +// integer values a and b. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hsubs_epi16 +FORCE_INLINE __m128i _mm_hsubs_epi16(__m128i _a, __m128i _b) +{ +#if defined(__aarch64__) + int16x8_t a = vreinterpretq_s16_m128i(_a); + int16x8_t b = vreinterpretq_s16_m128i(_b); + return vreinterpretq_s64_s16( + vqsubq_s16(vuzp1q_s16(a, b), vuzp2q_s16(a, b))); +#else + int32x4_t a = vreinterpretq_s32_m128i(_a); + int32x4_t b = vreinterpretq_s32_m128i(_b); + // Interleave using vshrn/vmovn + // [a0|a2|a4|a6|b0|b2|b4|b6] + // [a1|a3|a5|a7|b1|b3|b5|b7] + int16x8_t ab0246 = vcombine_s16(vmovn_s32(a), vmovn_s32(b)); + int16x8_t ab1357 = vcombine_s16(vshrn_n_s32(a, 16), vshrn_n_s32(b, 16)); + // Saturated subtract + return vreinterpretq_m128i_s16(vqsubq_s16(ab0246, ab1357)); +#endif +} -// Load a double-precision (64-bit) floating-point element from memory into the -// upper element of dst, and copy the lower element from a to dst. mem_addr does -// not need to be aligned on any particular boundary. -// -// dst[63:0] := a[63:0] -// dst[127:64] := MEM[mem_addr+63:mem_addr] -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadh_pd -FORCE_INLINE __m128d _mm_loadh_pd(__m128d a, const double *p) +// Horizontally subtract adjacent pairs of signed 16-bit integers in a and b +// using saturation, and pack the signed 16-bit results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_hsubs_pi16 +FORCE_INLINE __m64 _mm_hsubs_pi16(__m64 _a, __m64 _b) { + int16x4_t a = vreinterpret_s16_m64(_a); + int16x4_t b = vreinterpret_s16_m64(_b); #if defined(__aarch64__) - return vreinterpretq_m128d_f64( - vcombine_f64(vget_low_f64(vreinterpretq_f64_m128d(a)), vld1_f64(p))); + return vreinterpret_s64_s16(vqsub_s16(vuzp1_s16(a, b), vuzp2_s16(a, b))); #else - return vreinterpretq_m128d_f32(vcombine_f32( - vget_low_f32(vreinterpretq_f32_m128d(a)), vld1_f32((const float *) p))); + int16x4x2_t res = vuzp_s16(a, b); + return vreinterpret_s64_s16(vqsub_s16(res.val[0], res.val[1])); #endif } -// Load a double-precision (64-bit) floating-point element from memory into both -// elements of dst. -// -// dst[63:0] := MEM[mem_addr+63:mem_addr] -// dst[127:64] := MEM[mem_addr+63:mem_addr] +// Vertically multiply each unsigned 8-bit integer from a with the corresponding +// signed 8-bit integer from b, producing intermediate signed 16-bit integers. +// Horizontally add adjacent pairs of intermediate signed 16-bit integers, +// and pack the saturated results in dst. // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_load_pd1 -#define _mm_load_pd1 _mm_load1_pd +// FOR j := 0 to 7 +// i := j*16 +// dst[i+15:i] := Saturate_To_Int16( a[i+15:i+8]*b[i+15:i+8] + +// a[i+7:i]*b[i+7:i] ) +// ENDFOR +FORCE_INLINE __m128i _mm_maddubs_epi16(__m128i _a, __m128i _b) +{ +#if defined(__aarch64__) + uint8x16_t a = vreinterpretq_u8_m128i(_a); + int8x16_t b = vreinterpretq_s8_m128i(_b); + int16x8_t tl = vmulq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(a))), + vmovl_s8(vget_low_s8(b))); + int16x8_t th = vmulq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(a))), + vmovl_s8(vget_high_s8(b))); + return vreinterpretq_m128i_s16( + vqaddq_s16(vuzp1q_s16(tl, th), vuzp2q_s16(tl, th))); +#else + // This would be much simpler if x86 would choose to zero extend OR sign + // extend, not both. This could probably be optimized better. + uint16x8_t a = vreinterpretq_u16_m128i(_a); + int16x8_t b = vreinterpretq_s16_m128i(_b); -// Load a double-precision (64-bit) floating-point element from memory into both -// elements of dst. -// -// dst[63:0] := MEM[mem_addr+63:mem_addr] -// dst[127:64] := MEM[mem_addr+63:mem_addr] -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loaddup_pd -#define _mm_loaddup_pd _mm_load1_pd + // Zero extend a + int16x8_t a_odd = vreinterpretq_s16_u16(vshrq_n_u16(a, 8)); + int16x8_t a_even = vreinterpretq_s16_u16(vbicq_u16(a, vdupq_n_u16(0xff00))); -// Loads 128-bit value. : -// https://msdn.microsoft.com/zh-cn/library/f4k12ae8(v=vs.90).aspx -FORCE_INLINE __m128i _mm_loadu_si128(const __m128i *p) + // Sign extend by shifting left then shifting right. + int16x8_t b_even = vshrq_n_s16(vshlq_n_s16(b, 8), 8); + int16x8_t b_odd = vshrq_n_s16(b, 8); + + // multiply + int16x8_t prod1 = vmulq_s16(a_even, b_even); + int16x8_t prod2 = vmulq_s16(a_odd, b_odd); + + // saturated add + return vreinterpretq_m128i_s16(vqaddq_s16(prod1, prod2)); +#endif +} + +// Vertically multiply each unsigned 8-bit integer from a with the corresponding +// signed 8-bit integer from b, producing intermediate signed 16-bit integers. +// Horizontally add adjacent pairs of intermediate signed 16-bit integers, and +// pack the saturated results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maddubs_pi16 +FORCE_INLINE __m64 _mm_maddubs_pi16(__m64 _a, __m64 _b) { - return vreinterpretq_m128i_s32(vld1q_s32((const int32_t *) p)); + uint16x4_t a = vreinterpret_u16_m64(_a); + int16x4_t b = vreinterpret_s16_m64(_b); + + // Zero extend a + int16x4_t a_odd = vreinterpret_s16_u16(vshr_n_u16(a, 8)); + int16x4_t a_even = vreinterpret_s16_u16(vand_u16(a, vdup_n_u16(0xff))); + + // Sign extend by shifting left then shifting right. + int16x4_t b_even = vshr_n_s16(vshl_n_s16(b, 8), 8); + int16x4_t b_odd = vshr_n_s16(b, 8); + + // multiply + int16x4_t prod1 = vmul_s16(a_even, b_even); + int16x4_t prod2 = vmul_s16(a_odd, b_odd); + + // saturated add + return vreinterpret_m64_s16(vqadd_s16(prod1, prod2)); } -// Load unaligned 32-bit integer from memory into the first element of dst. -// -// dst[31:0] := MEM[mem_addr+31:mem_addr] -// dst[MAX:32] := 0 +// Multiply packed signed 16-bit integers in a and b, producing intermediate +// signed 32-bit integers. Shift right by 15 bits while rounding up, and store +// the packed 16-bit integers in dst. // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_loadu_si32 -FORCE_INLINE __m128i _mm_loadu_si32(const void *p) +// r0 := Round(((int32_t)a0 * (int32_t)b0) >> 15) +// r1 := Round(((int32_t)a1 * (int32_t)b1) >> 15) +// r2 := Round(((int32_t)a2 * (int32_t)b2) >> 15) +// ... +// r7 := Round(((int32_t)a7 * (int32_t)b7) >> 15) +FORCE_INLINE __m128i _mm_mulhrs_epi16(__m128i a, __m128i b) { - return vreinterpretq_m128i_s32( - vsetq_lane_s32(*(const int32_t *) p, vdupq_n_s32(0), 0)); + // Has issues due to saturation + // return vreinterpretq_m128i_s16(vqrdmulhq_s16(a, b)); + + // Multiply + int32x4_t mul_lo = vmull_s16(vget_low_s16(vreinterpretq_s16_m128i(a)), + vget_low_s16(vreinterpretq_s16_m128i(b))); + int32x4_t mul_hi = vmull_s16(vget_high_s16(vreinterpretq_s16_m128i(a)), + vget_high_s16(vreinterpretq_s16_m128i(b))); + + // Rounding narrowing shift right + // narrow = (int16_t)((mul + 16384) >> 15); + int16x4_t narrow_lo = vrshrn_n_s32(mul_lo, 15); + int16x4_t narrow_hi = vrshrn_n_s32(mul_hi, 15); + + // Join together + return vreinterpretq_m128i_s16(vcombine_s16(narrow_lo, narrow_hi)); } -// Convert packed double-precision (64-bit) floating-point elements in a to -// packed single-precision (32-bit) floating-point elements, and store the -// results in dst. +// Multiply packed signed 16-bit integers in a and b, producing intermediate +// signed 32-bit integers. Truncate each intermediate integer to the 18 most +// significant bits, round by adding 1, and store bits [16:1] to dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mulhrs_pi16 +FORCE_INLINE __m64 _mm_mulhrs_pi16(__m64 a, __m64 b) +{ + int32x4_t mul_extend = + vmull_s16((vreinterpret_s16_m64(a)), (vreinterpret_s16_m64(b))); + + // Rounding narrowing shift right + return vreinterpret_m64_s16(vrshrn_n_s32(mul_extend, 15)); +} + +// Shuffle packed 8-bit integers in a according to shuffle control mask in the +// corresponding 8-bit element of b, and store the results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shuffle_epi8 +FORCE_INLINE __m128i _mm_shuffle_epi8(__m128i a, __m128i b) +{ + int8x16_t tbl = vreinterpretq_s8_m128i(a); // input a + uint8x16_t idx = vreinterpretq_u8_m128i(b); // input b + uint8x16_t idx_masked = + vandq_u8(idx, vdupq_n_u8(0x8F)); // avoid using meaningless bits +#if defined(__aarch64__) + return vreinterpretq_m128i_s8(vqtbl1q_s8(tbl, idx_masked)); +#elif defined(__GNUC__) + int8x16_t ret; + // %e and %f represent the even and odd D registers + // respectively. + __asm__ __volatile__( + "vtbl.8 %e[ret], {%e[tbl], %f[tbl]}, %e[idx]\n" + "vtbl.8 %f[ret], {%e[tbl], %f[tbl]}, %f[idx]\n" + : [ret] "=&w"(ret) + : [tbl] "w"(tbl), [idx] "w"(idx_masked)); + return vreinterpretq_m128i_s8(ret); +#else + // use this line if testing on aarch64 + int8x8x2_t a_split = {vget_low_s8(tbl), vget_high_s8(tbl)}; + return vreinterpretq_m128i_s8( + vcombine_s8(vtbl2_s8(a_split, vget_low_u8(idx_masked)), + vtbl2_s8(a_split, vget_high_u8(idx_masked)))); +#endif +} + +// Shuffle packed 8-bit integers in a according to shuffle control mask in the +// corresponding 8-bit element of b, and store the results in dst. // -// FOR j := 0 to 1 -// i := 32*j -// k := 64*j -// dst[i+31:i] := Convert_FP64_To_FP32(a[k+64:k]) +// FOR j := 0 to 7 +// i := j*8 +// IF b[i+7] == 1 +// dst[i+7:i] := 0 +// ELSE +// index[2:0] := b[i+2:i] +// dst[i+7:i] := a[index*8+7:index*8] +// FI // ENDFOR -// dst[127:64] := 0 // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtpd_ps -FORCE_INLINE __m128 _mm_cvtpd_ps(__m128d a) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shuffle_pi8 +FORCE_INLINE __m64 _mm_shuffle_pi8(__m64 a, __m64 b) +{ + const int8x8_t controlMask = + vand_s8(vreinterpret_s8_m64(b), vdup_n_s8((int8_t)(0x1 << 7 | 0x07))); + int8x8_t res = vtbl1_s8(vreinterpret_s8_m64(a), controlMask); + return vreinterpret_m64_s8(res); +} + +// Negate packed 16-bit integers in a when the corresponding signed +// 16-bit integer in b is negative, and store the results in dst. +// Element in dst are zeroed out when the corresponding element +// in b is zero. +// +// for i in 0..7 +// if b[i] < 0 +// r[i] := -a[i] +// else if b[i] == 0 +// r[i] := 0 +// else +// r[i] := a[i] +// fi +// done +FORCE_INLINE __m128i _mm_sign_epi16(__m128i _a, __m128i _b) { + int16x8_t a = vreinterpretq_s16_m128i(_a); + int16x8_t b = vreinterpretq_s16_m128i(_b); + + // signed shift right: faster than vclt + // (b < 0) ? 0xFFFF : 0 + uint16x8_t ltMask = vreinterpretq_u16_s16(vshrq_n_s16(b, 15)); + // (b == 0) ? 0xFFFF : 0 #if defined(__aarch64__) - float32x2_t tmp = vcvt_f32_f64(vreinterpretq_f64_m128d(a)); - return vreinterpretq_m128_f32(vcombine_f32(tmp, vdup_n_f32(0))); + int16x8_t zeroMask = vreinterpretq_s16_u16(vceqzq_s16(b)); #else - float a0 = (float) ((double *) &a)[0]; - float a1 = (float) ((double *) &a)[1]; - return _mm_set_ps(0, 0, a1, a0); + int16x8_t zeroMask = vreinterpretq_s16_u16(vceqq_s16(b, vdupq_n_s16(0))); #endif + + // bitwise select either a or negative 'a' (vnegq_s16(a) equals to negative + // 'a') based on ltMask + int16x8_t masked = vbslq_s16(ltMask, vnegq_s16(a), a); + // res = masked & (~zeroMask) + int16x8_t res = vbicq_s16(masked, zeroMask); + return vreinterpretq_m128i_s16(res); } -// Copy the lower double-precision (64-bit) floating-point element of a to dst. +// Negate packed 32-bit integers in a when the corresponding signed +// 32-bit integer in b is negative, and store the results in dst. +// Element in dst are zeroed out when the corresponding element +// in b is zero. // -// dst[63:0] := a[63:0] +// for i in 0..3 +// if b[i] < 0 +// r[i] := -a[i] +// else if b[i] == 0 +// r[i] := 0 +// else +// r[i] := a[i] +// fi +// done +FORCE_INLINE __m128i _mm_sign_epi32(__m128i _a, __m128i _b) +{ + int32x4_t a = vreinterpretq_s32_m128i(_a); + int32x4_t b = vreinterpretq_s32_m128i(_b); + + // signed shift right: faster than vclt + // (b < 0) ? 0xFFFFFFFF : 0 + uint32x4_t ltMask = vreinterpretq_u32_s32(vshrq_n_s32(b, 31)); + + // (b == 0) ? 0xFFFFFFFF : 0 +#if defined(__aarch64__) + int32x4_t zeroMask = vreinterpretq_s32_u32(vceqzq_s32(b)); +#else + int32x4_t zeroMask = vreinterpretq_s32_u32(vceqq_s32(b, vdupq_n_s32(0))); +#endif + + // bitwise select either a or negative 'a' (vnegq_s32(a) equals to negative + // 'a') based on ltMask + int32x4_t masked = vbslq_s32(ltMask, vnegq_s32(a), a); + // res = masked & (~zeroMask) + int32x4_t res = vbicq_s32(masked, zeroMask); + return vreinterpretq_m128i_s32(res); +} + +// Negate packed 8-bit integers in a when the corresponding signed +// 8-bit integer in b is negative, and store the results in dst. +// Element in dst are zeroed out when the corresponding element +// in b is zero. // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtsd_f64 -FORCE_INLINE double _mm_cvtsd_f64(__m128d a) +// for i in 0..15 +// if b[i] < 0 +// r[i] := -a[i] +// else if b[i] == 0 +// r[i] := 0 +// else +// r[i] := a[i] +// fi +// done +FORCE_INLINE __m128i _mm_sign_epi8(__m128i _a, __m128i _b) { + int8x16_t a = vreinterpretq_s8_m128i(_a); + int8x16_t b = vreinterpretq_s8_m128i(_b); + + // signed shift right: faster than vclt + // (b < 0) ? 0xFF : 0 + uint8x16_t ltMask = vreinterpretq_u8_s8(vshrq_n_s8(b, 7)); + + // (b == 0) ? 0xFF : 0 #if defined(__aarch64__) - return (double) vgetq_lane_f64(vreinterpretq_f64_m128d(a), 0); + int8x16_t zeroMask = vreinterpretq_s8_u8(vceqzq_s8(b)); #else - return ((double *) &a)[0]; + int8x16_t zeroMask = vreinterpretq_s8_u8(vceqq_s8(b, vdupq_n_s8(0))); #endif + + // bitwise select either a or nagative 'a' (vnegq_s8(a) return nagative 'a') + // based on ltMask + int8x16_t masked = vbslq_s8(ltMask, vnegq_s8(a), a); + // res = masked & (~zeroMask) + int8x16_t res = vbicq_s8(masked, zeroMask); + + return vreinterpretq_m128i_s8(res); } -// Convert packed single-precision (32-bit) floating-point elements in a to -// packed double-precision (64-bit) floating-point elements, and store the -// results in dst. +// Negate packed 16-bit integers in a when the corresponding signed 16-bit +// integer in b is negative, and store the results in dst. Element in dst are +// zeroed out when the corresponding element in b is zero. // -// FOR j := 0 to 1 -// i := 64*j -// k := 32*j -// dst[i+63:i] := Convert_FP32_To_FP64(a[k+31:k]) +// FOR j := 0 to 3 +// i := j*16 +// IF b[i+15:i] < 0 +// dst[i+15:i] := -(a[i+15:i]) +// ELSE IF b[i+15:i] == 0 +// dst[i+15:i] := 0 +// ELSE +// dst[i+15:i] := a[i+15:i] +// FI // ENDFOR // -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtps_pd -FORCE_INLINE __m128d _mm_cvtps_pd(__m128 a) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sign_pi16 +FORCE_INLINE __m64 _mm_sign_pi16(__m64 _a, __m64 _b) { + int16x4_t a = vreinterpret_s16_m64(_a); + int16x4_t b = vreinterpret_s16_m64(_b); + + // signed shift right: faster than vclt + // (b < 0) ? 0xFFFF : 0 + uint16x4_t ltMask = vreinterpret_u16_s16(vshr_n_s16(b, 15)); + + // (b == 0) ? 0xFFFF : 0 #if defined(__aarch64__) - return vreinterpretq_m128d_f64( - vcvt_f64_f32(vget_low_f32(vreinterpretq_f32_m128(a)))); + int16x4_t zeroMask = vreinterpret_s16_u16(vceqz_s16(b)); #else - double a0 = (double) vgetq_lane_f32(vreinterpretq_f32_m128(a), 0); - double a1 = (double) vgetq_lane_f32(vreinterpretq_f32_m128(a), 1); - return _mm_set_pd(a1, a0); + int16x4_t zeroMask = vreinterpret_s16_u16(vceq_s16(b, vdup_n_s16(0))); #endif -} -// Cast vector of type __m128d to type __m128i. This intrinsic is only used for -// compilation and does not generate any instructions, thus it has zero latency. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castpd_si128 -FORCE_INLINE __m128i _mm_castpd_si128(__m128d a) -{ - return vreinterpretq_m128i_s64(vreinterpretq_s64_m128d(a)); + // bitwise select either a or nagative 'a' (vneg_s16(a) return nagative 'a') + // based on ltMask + int16x4_t masked = vbsl_s16(ltMask, vneg_s16(a), a); + // res = masked & (~zeroMask) + int16x4_t res = vbic_s16(masked, zeroMask); + + return vreinterpret_m64_s16(res); } -// Cast vector of type __m128d to type __m128. This intrinsic is only used for -// compilation and does not generate any instructions, thus it has zero latency. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_castpd_ps -FORCE_INLINE __m128 _mm_castpd_ps(__m128d a) +// Negate packed 32-bit integers in a when the corresponding signed 32-bit +// integer in b is negative, and store the results in dst. Element in dst are +// zeroed out when the corresponding element in b is zero. +// +// FOR j := 0 to 1 +// i := j*32 +// IF b[i+31:i] < 0 +// dst[i+31:i] := -(a[i+31:i]) +// ELSE IF b[i+31:i] == 0 +// dst[i+31:i] := 0 +// ELSE +// dst[i+31:i] := a[i+31:i] +// FI +// ENDFOR +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sign_pi32 +FORCE_INLINE __m64 _mm_sign_pi32(__m64 _a, __m64 _b) { - return vreinterpretq_m128_s64(vreinterpretq_s64_m128d(a)); + int32x2_t a = vreinterpret_s32_m64(_a); + int32x2_t b = vreinterpret_s32_m64(_b); + + // signed shift right: faster than vclt + // (b < 0) ? 0xFFFFFFFF : 0 + uint32x2_t ltMask = vreinterpret_u32_s32(vshr_n_s32(b, 31)); + + // (b == 0) ? 0xFFFFFFFF : 0 +#if defined(__aarch64__) + int32x2_t zeroMask = vreinterpret_s32_u32(vceqz_s32(b)); +#else + int32x2_t zeroMask = vreinterpret_s32_u32(vceq_s32(b, vdup_n_s32(0))); +#endif + + // bitwise select either a or nagative 'a' (vneg_s32(a) return nagative 'a') + // based on ltMask + int32x2_t masked = vbsl_s32(ltMask, vneg_s32(a), a); + // res = masked & (~zeroMask) + int32x2_t res = vbic_s32(masked, zeroMask); + + return vreinterpret_m64_s32(res); } -// Blend packed single-precision (32-bit) floating-point elements from a and b -// using mask, and store the results in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_blendv_ps -FORCE_INLINE __m128 _mm_blendv_ps(__m128 _a, __m128 _b, __m128 _mask) +// Negate packed 8-bit integers in a when the corresponding signed 8-bit integer +// in b is negative, and store the results in dst. Element in dst are zeroed out +// when the corresponding element in b is zero. +// +// FOR j := 0 to 7 +// i := j*8 +// IF b[i+7:i] < 0 +// dst[i+7:i] := -(a[i+7:i]) +// ELSE IF b[i+7:i] == 0 +// dst[i+7:i] := 0 +// ELSE +// dst[i+7:i] := a[i+7:i] +// FI +// ENDFOR +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_sign_pi8 +FORCE_INLINE __m64 _mm_sign_pi8(__m64 _a, __m64 _b) { - // Use a signed shift right to create a mask with the sign bit - uint32x4_t mask = - vreinterpretq_u32_s32(vshrq_n_s32(vreinterpretq_s32_m128(_mask), 31)); - float32x4_t a = vreinterpretq_f32_m128(_a); - float32x4_t b = vreinterpretq_f32_m128(_b); - return vreinterpretq_m128_f32(vbslq_f32(mask, b, a)); + int8x8_t a = vreinterpret_s8_m64(_a); + int8x8_t b = vreinterpret_s8_m64(_b); + + // signed shift right: faster than vclt + // (b < 0) ? 0xFF : 0 + uint8x8_t ltMask = vreinterpret_u8_s8(vshr_n_s8(b, 7)); + + // (b == 0) ? 0xFF : 0 +#if defined(__aarch64__) + int8x8_t zeroMask = vreinterpret_s8_u8(vceqz_s8(b)); +#else + int8x8_t zeroMask = vreinterpret_s8_u8(vceq_s8(b, vdup_n_s8(0))); +#endif + + // bitwise select either a or nagative 'a' (vneg_s8(a) return nagative 'a') + // based on ltMask + int8x8_t masked = vbsl_s8(ltMask, vneg_s8(a), a); + // res = masked & (~zeroMask) + int8x8_t res = vbic_s8(masked, zeroMask); + + return vreinterpret_m64_s8(res); } +/* SSE4.1 */ + +// Blend packed 16-bit integers from a and b using control mask imm8, and store +// the results in dst. +// +// FOR j := 0 to 7 +// i := j*16 +// IF imm8[j] +// dst[i+15:i] := b[i+15:i] +// ELSE +// dst[i+15:i] := a[i+15:i] +// FI +// ENDFOR +// FORCE_INLINE __m128i _mm_blend_epi16(__m128i a, __m128i b, +// __constrange(0,255) int imm) +#define _mm_blend_epi16(a, b, imm) \ + __extension__({ \ + const uint16_t ones = 0xffff; \ + const uint16_t zeros = 0x0000; \ + const uint16_t _mask[8] = {((imm) & (1 << 0)) ? ones : zeros, \ + ((imm) & (1 << 1)) ? ones : zeros, \ + ((imm) & (1 << 2)) ? ones : zeros, \ + ((imm) & (1 << 3)) ? ones : zeros, \ + ((imm) & (1 << 4)) ? ones : zeros, \ + ((imm) & (1 << 5)) ? ones : zeros, \ + ((imm) & (1 << 6)) ? ones : zeros, \ + ((imm) & (1 << 7)) ? ones : zeros}; \ + uint16x8_t _mask_vec = vld1q_u16(_mask); \ + uint16x8_t _a = vreinterpretq_u16_m128i(a); \ + uint16x8_t _b = vreinterpretq_u16_m128i(b); \ + vreinterpretq_m128i_u16(vbslq_u16(_mask_vec, _b, _a)); \ + }) + +// Blend packed double-precision (64-bit) floating-point elements from a and b +// using control mask imm8, and store the results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_blend_pd +#define _mm_blend_pd(a, b, imm) \ + __extension__({ \ + const uint64_t _mask[2] = { \ + ((imm) & (1 << 0)) ? ~UINT64_C(0) : UINT64_C(0), \ + ((imm) & (1 << 1)) ? ~UINT64_C(0) : UINT64_C(0)}; \ + uint64x2_t _mask_vec = vld1q_u64(_mask); \ + uint64x2_t _a = vreinterpretq_u64_m128d(a); \ + uint64x2_t _b = vreinterpretq_u64_m128d(b); \ + vreinterpretq_m128d_u64(vbslq_u64(_mask_vec, _b, _a)); \ + }) + // Blend packed single-precision (32-bit) floating-point elements from a and b // using mask, and store the results in dst. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_blend_ps @@ -5672,6 +7337,27 @@ FORCE_INLINE __m128 _mm_blend_ps(__m128 _a, __m128 _b, const char imm8) return vreinterpretq_m128_f32(vbslq_f32(mask, b, a)); } +// Blend packed 8-bit integers from a and b using mask, and store the results in +// dst. +// +// FOR j := 0 to 15 +// i := j*8 +// IF mask[i+7] +// dst[i+7:i] := b[i+7:i] +// ELSE +// dst[i+7:i] := a[i+7:i] +// FI +// ENDFOR +FORCE_INLINE __m128i _mm_blendv_epi8(__m128i _a, __m128i _b, __m128i _mask) +{ + // Use a signed shift right to create a mask with the sign bit + uint8x16_t mask = + vreinterpretq_u8_s8(vshrq_n_s8(vreinterpretq_s8_m128i(_mask), 7)); + uint8x16_t a = vreinterpretq_u8_m128i(_a); + uint8x16_t b = vreinterpretq_u8_m128i(_b); + return vreinterpretq_m128i_u8(vbslq_u8(mask, b, a)); +} + // Blend packed double-precision (64-bit) floating-point elements from a and b // using mask, and store the results in dst. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_blendv_pd @@ -5690,154 +7376,55 @@ FORCE_INLINE __m128d _mm_blendv_pd(__m128d _a, __m128d _b, __m128d _mask) #endif } -typedef struct { - uint16_t res0; - uint8_t res1 : 6; - uint8_t bit22 : 1; - uint8_t bit23 : 1; - uint8_t res2; -#if defined(__aarch64__) - uint32_t res3; -#endif -} fpcr_bitfield; - -// Macro: Set the rounding mode bits of the MXCSR control and status register to -// the value in unsigned 32-bit integer a. The rounding mode may contain any of -// the following flags: _MM_ROUND_NEAREST, _MM_ROUND_DOWN, _MM_ROUND_UP, -// _MM_ROUND_TOWARD_ZERO -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_MM_SET_ROUNDING_MODE -FORCE_INLINE void _MM_SET_ROUNDING_MODE(int rounding) -{ - union { - fpcr_bitfield field; -#if defined(__aarch64__) - uint64_t value; -#else - uint32_t value; -#endif - } r; - -#if defined(__aarch64__) - asm volatile("mrs %0, FPCR" : "=r"(r.value)); /* read */ -#else - asm volatile("vmrs %0, FPSCR" : "=r"(r.value)); /* read */ -#endif - - switch (rounding) { - case _MM_ROUND_TOWARD_ZERO: - r.field.bit22 = 1; - r.field.bit23 = 1; - break; - case _MM_ROUND_DOWN: - r.field.bit22 = 0; - r.field.bit23 = 1; - break; - case _MM_ROUND_UP: - r.field.bit22 = 1; - r.field.bit23 = 0; - break; - default: //_MM_ROUND_NEAREST - r.field.bit22 = 0; - r.field.bit23 = 0; - } - -#if defined(__aarch64__) - asm volatile("msr FPCR, %0" ::"r"(r)); /* write */ -#else - asm volatile("vmsr FPSCR, %0" ::"r"(r)); /* write */ -#endif -} - -FORCE_INLINE void _mm_setcsr(unsigned int a) +// Blend packed single-precision (32-bit) floating-point elements from a and b +// using mask, and store the results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_blendv_ps +FORCE_INLINE __m128 _mm_blendv_ps(__m128 _a, __m128 _b, __m128 _mask) { - _MM_SET_ROUNDING_MODE(a); + // Use a signed shift right to create a mask with the sign bit + uint32x4_t mask = + vreinterpretq_u32_s32(vshrq_n_s32(vreinterpretq_s32_m128(_mask), 31)); + float32x4_t a = vreinterpretq_f32_m128(_a); + float32x4_t b = vreinterpretq_f32_m128(_b); + return vreinterpretq_m128_f32(vbslq_f32(mask, b, a)); } -// Round the packed single-precision (32-bit) floating-point elements in a using -// the rounding parameter, and store the results as packed single-precision +// Round the packed double-precision (64-bit) floating-point elements in a up +// to an integer value, and store the results as packed double-precision // floating-point elements in dst. -// software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_round_ps -FORCE_INLINE __m128 _mm_round_ps(__m128 a, int rounding) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ceil_pd +FORCE_INLINE __m128d _mm_ceil_pd(__m128d a) { #if defined(__aarch64__) - switch (rounding) { - case (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC): - return vreinterpretq_m128_f32(vrndnq_f32(vreinterpretq_f32_m128(a))); - case (_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC): - return vreinterpretq_m128_f32(vrndmq_f32(vreinterpretq_f32_m128(a))); - case (_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC): - return vreinterpretq_m128_f32(vrndpq_f32(vreinterpretq_f32_m128(a))); - case (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC): - return vreinterpretq_m128_f32(vrndq_f32(vreinterpretq_f32_m128(a))); - default: //_MM_FROUND_CUR_DIRECTION - return vreinterpretq_m128_f32(vrndiq_f32(vreinterpretq_f32_m128(a))); - } + return vreinterpretq_m128d_f64(vrndpq_f64(vreinterpretq_f64_m128d(a))); #else - float *v_float = (float *) &a; - __m128 zero, neg_inf, pos_inf; - - switch (rounding) { - case (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC): - return _mm_cvtepi32_ps(_mm_cvtps_epi32(a)); - case (_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC): - return (__m128){floorf(v_float[0]), floorf(v_float[1]), - floorf(v_float[2]), floorf(v_float[3])}; - case (_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC): - return (__m128){ceilf(v_float[0]), ceilf(v_float[1]), ceilf(v_float[2]), - ceilf(v_float[3])}; - case (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC): - zero = _mm_set_ps(0.0f, 0.0f, 0.0f, 0.0f); - neg_inf = _mm_set_ps(floorf(v_float[0]), floorf(v_float[1]), - floorf(v_float[2]), floorf(v_float[3])); - pos_inf = _mm_set_ps(ceilf(v_float[0]), ceilf(v_float[1]), - ceilf(v_float[2]), ceilf(v_float[3])); - return _mm_blendv_ps(pos_inf, neg_inf, _mm_cmple_ps(a, zero)); - default: //_MM_FROUND_CUR_DIRECTION - return (__m128){roundf(v_float[0]), roundf(v_float[1]), - roundf(v_float[2]), roundf(v_float[3])}; - } + double *f = (double *) &a; + return _mm_set_pd(ceil(f[1]), ceil(f[0])); #endif } -// Convert packed single-precision (32-bit) floating-point elements in a to -// packed 32-bit integers, and store the results in dst. -// -// FOR j := 0 to 1 -// i := 32*j -// dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) -// ENDFOR -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvt_ps2pi -FORCE_INLINE __m64 _mm_cvt_ps2pi(__m128 a) +// Round the packed single-precision (32-bit) floating-point elements in a up to +// an integer value, and store the results as packed single-precision +// floating-point elements in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ceil_ps +FORCE_INLINE __m128 _mm_ceil_ps(__m128 a) { #if defined(__aarch64__) - return vreinterpret_m64_s32( - vget_low_s32(vcvtnq_s32_f32(vreinterpretq_f32_m128(a)))); + return vreinterpretq_m128_f32(vrndpq_f32(vreinterpretq_f32_m128(a))); #else - return vreinterpret_m64_s32( - vcvt_s32_f32(vget_low_f32(vreinterpretq_f32_m128( - _mm_round_ps(a, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC))))); + float *f = (float *) &a; + return _mm_set_ps(ceilf(f[3]), ceilf(f[2]), ceilf(f[1]), ceilf(f[0])); #endif } -// Convert packed single-precision (32-bit) floating-point elements in a to -// packed 32-bit integers, and store the results in dst. -// -// FOR j := 0 to 1 -// i := 32*j -// dst[i+31:i] := Convert_FP32_To_Int32(a[i+31:i]) -// ENDFOR -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtps_pi32 -#define _mm_cvtps_pi32(a) _mm_cvt_ps2pi(a) - -// Round the packed single-precision (32-bit) floating-point elements in a up to -// an integer value, and store the results as packed single-precision -// floating-point elements in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ceil_ps -FORCE_INLINE __m128 _mm_ceil_ps(__m128 a) +// Round the lower double-precision (64-bit) floating-point element in b up to +// an integer value, store the result as a double-precision floating-point +// element in the lower element of dst, and copy the upper element from a to the +// upper element of dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ceil_sd +FORCE_INLINE __m128d _mm_ceil_sd(__m128d a, __m128d b) { - return _mm_round_ps(a, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC); + return _mm_move_sd(a, _mm_ceil_pd(b)); } // Round the lower single-precision (32-bit) floating-point element in b up to @@ -5851,396 +7438,442 @@ FORCE_INLINE __m128 _mm_ceil_ps(__m128 a) // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_ceil_ss FORCE_INLINE __m128 _mm_ceil_ss(__m128 a, __m128 b) { - return _mm_move_ss( - a, _mm_round_ps(b, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC)); + return _mm_move_ss(a, _mm_ceil_ps(b)); } -// Round the packed single-precision (32-bit) floating-point elements in a down -// to an integer value, and store the results as packed single-precision -// floating-point elements in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_floor_ps -FORCE_INLINE __m128 _mm_floor_ps(__m128 a) +// Compare packed 64-bit integers in a and b for equality, and store the results +// in dst +FORCE_INLINE __m128i _mm_cmpeq_epi64(__m128i a, __m128i b) { - return _mm_round_ps(a, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC); +#if defined(__aarch64__) + return vreinterpretq_m128i_u64( + vceqq_u64(vreinterpretq_u64_m128i(a), vreinterpretq_u64_m128i(b))); +#else + // ARMv7 lacks vceqq_u64 + // (a == b) -> (a_lo == b_lo) && (a_hi == b_hi) + uint32x4_t cmp = + vceqq_u32(vreinterpretq_u32_m128i(a), vreinterpretq_u32_m128i(b)); + uint32x4_t swapped = vrev64q_u32(cmp); + return vreinterpretq_m128i_u32(vandq_u32(cmp, swapped)); +#endif } -// Round the lower single-precision (32-bit) floating-point element in b down to -// an integer value, store the result as a single-precision floating-point -// element in the lower element of dst, and copy the upper 3 packed elements -// from a to the upper elements of dst. -// -// dst[31:0] := FLOOR(b[31:0]) -// dst[127:32] := a[127:32] -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_floor_ss -FORCE_INLINE __m128 _mm_floor_ss(__m128 a, __m128 b) +// Converts the four signed 16-bit integers in the lower 64 bits to four signed +// 32-bit integers. +FORCE_INLINE __m128i _mm_cvtepi16_epi32(__m128i a) { - return _mm_move_ss( - a, _mm_round_ps(b, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC)); + return vreinterpretq_m128i_s32( + vmovl_s16(vget_low_s16(vreinterpretq_s16_m128i(a)))); } -// Load 128-bits of integer data from unaligned memory into dst. This intrinsic -// may perform better than _mm_loadu_si128 when the data crosses a cache line -// boundary. -// -// dst[127:0] := MEM[mem_addr+127:mem_addr] -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_lddqu_si128 -#define _mm_lddqu_si128 _mm_loadu_si128 +// Converts the two signed 16-bit integers in the lower 32 bits two signed +// 32-bit integers. +FORCE_INLINE __m128i _mm_cvtepi16_epi64(__m128i a) +{ + int16x8_t s16x8 = vreinterpretq_s16_m128i(a); /* xxxx xxxx xxxx 0B0A */ + int32x4_t s32x4 = vmovl_s16(vget_low_s16(s16x8)); /* 000x 000x 000B 000A */ + int64x2_t s64x2 = vmovl_s32(vget_low_s32(s32x4)); /* 0000 000B 0000 000A */ + return vreinterpretq_m128i_s64(s64x2); +} -/* Miscellaneous Operations */ +// Converts the two signed 32-bit integers in the lower 64 bits to two signed +// 64-bit integers. +FORCE_INLINE __m128i _mm_cvtepi32_epi64(__m128i a) +{ + return vreinterpretq_m128i_s64( + vmovl_s32(vget_low_s32(vreinterpretq_s32_m128i(a)))); +} -// Shifts the 8 signed 16-bit integers in a right by count bits while shifting -// in the sign bit. -// -// r0 := a0 >> count -// r1 := a1 >> count -// ... -// r7 := a7 >> count -// -// https://msdn.microsoft.com/en-us/library/3c9997dk(v%3dvs.90).aspx -FORCE_INLINE __m128i _mm_sra_epi16(__m128i a, __m128i count) +// Converts the four unsigned 8-bit integers in the lower 16 bits to four +// unsigned 32-bit integers. +FORCE_INLINE __m128i _mm_cvtepi8_epi16(__m128i a) { - int64_t c = (int64_t) vget_low_s64((int64x2_t) count); - if (unlikely(c > 15)) - return _mm_cmplt_epi16(a, _mm_setzero_si128()); - return vreinterpretq_m128i_s16(vshlq_s16((int16x8_t) a, vdupq_n_s16(-c))); + int8x16_t s8x16 = vreinterpretq_s8_m128i(a); /* xxxx xxxx xxxx DCBA */ + int16x8_t s16x8 = vmovl_s8(vget_low_s8(s8x16)); /* 0x0x 0x0x 0D0C 0B0A */ + return vreinterpretq_m128i_s16(s16x8); } -// Shifts the 4 signed 32-bit integers in a right by count bits while shifting -// in the sign bit. -// -// r0 := a0 >> count -// r1 := a1 >> count -// r2 := a2 >> count -// r3 := a3 >> count -// -// https://msdn.microsoft.com/en-us/library/ce40009e(v%3dvs.100).aspx -FORCE_INLINE __m128i _mm_sra_epi32(__m128i a, __m128i count) +// Converts the four unsigned 8-bit integers in the lower 32 bits to four +// unsigned 32-bit integers. +FORCE_INLINE __m128i _mm_cvtepi8_epi32(__m128i a) { - int64_t c = (int64_t) vget_low_s64((int64x2_t) count); - if (unlikely(c > 31)) - return _mm_cmplt_epi32(a, _mm_setzero_si128()); - return vreinterpretq_m128i_s32(vshlq_s32((int32x4_t) a, vdupq_n_s32(-c))); + int8x16_t s8x16 = vreinterpretq_s8_m128i(a); /* xxxx xxxx xxxx DCBA */ + int16x8_t s16x8 = vmovl_s8(vget_low_s8(s8x16)); /* 0x0x 0x0x 0D0C 0B0A */ + int32x4_t s32x4 = vmovl_s16(vget_low_s16(s16x8)); /* 000D 000C 000B 000A */ + return vreinterpretq_m128i_s32(s32x4); } -// Packs the 16 signed 16-bit integers from a and b into 8-bit integers and -// saturates. -// https://msdn.microsoft.com/en-us/library/k4y4f7w5%28v=vs.90%29.aspx -FORCE_INLINE __m128i _mm_packs_epi16(__m128i a, __m128i b) +// Converts the two signed 8-bit integers in the lower 32 bits to four +// signed 64-bit integers. +FORCE_INLINE __m128i _mm_cvtepi8_epi64(__m128i a) { - return vreinterpretq_m128i_s8( - vcombine_s8(vqmovn_s16(vreinterpretq_s16_m128i(a)), - vqmovn_s16(vreinterpretq_s16_m128i(b)))); + int8x16_t s8x16 = vreinterpretq_s8_m128i(a); /* xxxx xxxx xxxx xxBA */ + int16x8_t s16x8 = vmovl_s8(vget_low_s8(s8x16)); /* 0x0x 0x0x 0x0x 0B0A */ + int32x4_t s32x4 = vmovl_s16(vget_low_s16(s16x8)); /* 000x 000x 000B 000A */ + int64x2_t s64x2 = vmovl_s32(vget_low_s32(s32x4)); /* 0000 000B 0000 000A */ + return vreinterpretq_m128i_s64(s64x2); } -// Packs the 16 signed 16 - bit integers from a and b into 8 - bit unsigned -// integers and saturates. -// -// r0 := UnsignedSaturate(a0) -// r1 := UnsignedSaturate(a1) -// ... -// r7 := UnsignedSaturate(a7) -// r8 := UnsignedSaturate(b0) -// r9 := UnsignedSaturate(b1) -// ... -// r15 := UnsignedSaturate(b7) -// -// https://msdn.microsoft.com/en-us/library/07ad1wx4(v=vs.100).aspx -FORCE_INLINE __m128i _mm_packus_epi16(const __m128i a, const __m128i b) +// Converts the four unsigned 16-bit integers in the lower 64 bits to four +// unsigned 32-bit integers. +FORCE_INLINE __m128i _mm_cvtepu16_epi32(__m128i a) { - return vreinterpretq_m128i_u8( - vcombine_u8(vqmovun_s16(vreinterpretq_s16_m128i(a)), - vqmovun_s16(vreinterpretq_s16_m128i(b)))); + return vreinterpretq_m128i_u32( + vmovl_u16(vget_low_u16(vreinterpretq_u16_m128i(a)))); } -// Packs the 8 signed 32-bit integers from a and b into signed 16-bit integers -// and saturates. -// -// r0 := SignedSaturate(a0) -// r1 := SignedSaturate(a1) -// r2 := SignedSaturate(a2) -// r3 := SignedSaturate(a3) -// r4 := SignedSaturate(b0) -// r5 := SignedSaturate(b1) -// r6 := SignedSaturate(b2) -// r7 := SignedSaturate(b3) -// -// https://msdn.microsoft.com/en-us/library/393t56f9%28v=vs.90%29.aspx -FORCE_INLINE __m128i _mm_packs_epi32(__m128i a, __m128i b) +// Converts the two unsigned 16-bit integers in the lower 32 bits to two +// unsigned 64-bit integers. +FORCE_INLINE __m128i _mm_cvtepu16_epi64(__m128i a) { - return vreinterpretq_m128i_s16( - vcombine_s16(vqmovn_s32(vreinterpretq_s32_m128i(a)), - vqmovn_s32(vreinterpretq_s32_m128i(b)))); + uint16x8_t u16x8 = vreinterpretq_u16_m128i(a); /* xxxx xxxx xxxx 0B0A */ + uint32x4_t u32x4 = vmovl_u16(vget_low_u16(u16x8)); /* 000x 000x 000B 000A */ + uint64x2_t u64x2 = vmovl_u32(vget_low_u32(u32x4)); /* 0000 000B 0000 000A */ + return vreinterpretq_m128i_u64(u64x2); } -// Packs the 8 unsigned 32-bit integers from a and b into unsigned 16-bit -// integers and saturates. -// -// r0 := UnsignedSaturate(a0) -// r1 := UnsignedSaturate(a1) -// r2 := UnsignedSaturate(a2) -// r3 := UnsignedSaturate(a3) -// r4 := UnsignedSaturate(b0) -// r5 := UnsignedSaturate(b1) -// r6 := UnsignedSaturate(b2) -// r7 := UnsignedSaturate(b3) -FORCE_INLINE __m128i _mm_packus_epi32(__m128i a, __m128i b) +// Converts the two unsigned 32-bit integers in the lower 64 bits to two +// unsigned 64-bit integers. +FORCE_INLINE __m128i _mm_cvtepu32_epi64(__m128i a) { - return vreinterpretq_m128i_u16( - vcombine_u16(vqmovun_s32(vreinterpretq_s32_m128i(a)), - vqmovun_s32(vreinterpretq_s32_m128i(b)))); + return vreinterpretq_m128i_u64( + vmovl_u32(vget_low_u32(vreinterpretq_u32_m128i(a)))); } -// Interleaves the lower 8 signed or unsigned 8-bit integers in a with the lower -// 8 signed or unsigned 8-bit integers in b. -// -// r0 := a0 -// r1 := b0 -// r2 := a1 -// r3 := b1 -// ... -// r14 := a7 -// r15 := b7 -// -// https://msdn.microsoft.com/en-us/library/xf7k860c%28v=vs.90%29.aspx -FORCE_INLINE __m128i _mm_unpacklo_epi8(__m128i a, __m128i b) +// Zero extend packed unsigned 8-bit integers in a to packed 16-bit integers, +// and store the results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtepu8_epi16 +FORCE_INLINE __m128i _mm_cvtepu8_epi16(__m128i a) { -#if defined(__aarch64__) - return vreinterpretq_m128i_s8( - vzip1q_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b))); -#else - int8x8_t a1 = vreinterpret_s8_s16(vget_low_s16(vreinterpretq_s16_m128i(a))); - int8x8_t b1 = vreinterpret_s8_s16(vget_low_s16(vreinterpretq_s16_m128i(b))); - int8x8x2_t result = vzip_s8(a1, b1); - return vreinterpretq_m128i_s8(vcombine_s8(result.val[0], result.val[1])); -#endif + uint8x16_t u8x16 = vreinterpretq_u8_m128i(a); /* xxxx xxxx HGFE DCBA */ + uint16x8_t u16x8 = vmovl_u8(vget_low_u8(u8x16)); /* 0H0G 0F0E 0D0C 0B0A */ + return vreinterpretq_m128i_u16(u16x8); } -// Interleaves the lower 4 signed or unsigned 16-bit integers in a with the -// lower 4 signed or unsigned 16-bit integers in b. -// -// r0 := a0 -// r1 := b0 -// r2 := a1 -// r3 := b1 -// r4 := a2 -// r5 := b2 -// r6 := a3 -// r7 := b3 -// -// https://msdn.microsoft.com/en-us/library/btxb17bw%28v=vs.90%29.aspx -FORCE_INLINE __m128i _mm_unpacklo_epi16(__m128i a, __m128i b) +// Converts the four unsigned 8-bit integers in the lower 32 bits to four +// unsigned 32-bit integers. +// https://msdn.microsoft.com/en-us/library/bb531467%28v=vs.100%29.aspx +FORCE_INLINE __m128i _mm_cvtepu8_epi32(__m128i a) { -#if defined(__aarch64__) - return vreinterpretq_m128i_s16( - vzip1q_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b))); -#else - int16x4_t a1 = vget_low_s16(vreinterpretq_s16_m128i(a)); - int16x4_t b1 = vget_low_s16(vreinterpretq_s16_m128i(b)); - int16x4x2_t result = vzip_s16(a1, b1); - return vreinterpretq_m128i_s16(vcombine_s16(result.val[0], result.val[1])); -#endif + uint8x16_t u8x16 = vreinterpretq_u8_m128i(a); /* xxxx xxxx xxxx DCBA */ + uint16x8_t u16x8 = vmovl_u8(vget_low_u8(u8x16)); /* 0x0x 0x0x 0D0C 0B0A */ + uint32x4_t u32x4 = vmovl_u16(vget_low_u16(u16x8)); /* 000D 000C 000B 000A */ + return vreinterpretq_m128i_u32(u32x4); } -// Interleaves the lower 2 signed or unsigned 32 - bit integers in a with the -// lower 2 signed or unsigned 32 - bit integers in b. -// -// r0 := a0 -// r1 := b0 -// r2 := a1 -// r3 := b1 -// -// https://msdn.microsoft.com/en-us/library/x8atst9d(v=vs.100).aspx -FORCE_INLINE __m128i _mm_unpacklo_epi32(__m128i a, __m128i b) +// Converts the two unsigned 8-bit integers in the lower 16 bits to two +// unsigned 64-bit integers. +FORCE_INLINE __m128i _mm_cvtepu8_epi64(__m128i a) +{ + uint8x16_t u8x16 = vreinterpretq_u8_m128i(a); /* xxxx xxxx xxxx xxBA */ + uint16x8_t u16x8 = vmovl_u8(vget_low_u8(u8x16)); /* 0x0x 0x0x 0x0x 0B0A */ + uint32x4_t u32x4 = vmovl_u16(vget_low_u16(u16x8)); /* 000x 000x 000B 000A */ + uint64x2_t u64x2 = vmovl_u32(vget_low_u32(u32x4)); /* 0000 000B 0000 000A */ + return vreinterpretq_m128i_u64(u64x2); +} + +// Conditionally multiply the packed double-precision (64-bit) floating-point +// elements in a and b using the high 4 bits in imm8, sum the four products, and +// conditionally store the sum in dst using the low 4 bits of imm8. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_dp_pd +FORCE_INLINE __m128d _mm_dp_pd(__m128d a, __m128d b, const int imm) { + // Generate mask value from constant immediate bit value + const int64_t bit0Mask = imm & 0x01 ? UINT64_MAX : 0; + const int64_t bit1Mask = imm & 0x02 ? UINT64_MAX : 0; +#if !SSE2NEON_PRECISE_DP + const int64_t bit4Mask = imm & 0x10 ? UINT64_MAX : 0; + const int64_t bit5Mask = imm & 0x20 ? UINT64_MAX : 0; +#endif + // Conditional multiplication +#if !SSE2NEON_PRECISE_DP + __m128d mul = _mm_mul_pd(a, b); + const __m128d mulMask = + _mm_castsi128_pd(_mm_set_epi64x(bit5Mask, bit4Mask)); + __m128d tmp = _mm_and_pd(mul, mulMask); +#else #if defined(__aarch64__) - return vreinterpretq_m128i_s32( - vzip1q_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b))); + double d0 = (imm & 0x10) ? vgetq_lane_f64(vreinterpretq_f64_m128d(a), 0) * + vgetq_lane_f64(vreinterpretq_f64_m128d(b), 0) + : 0; + double d1 = (imm & 0x20) ? vgetq_lane_f64(vreinterpretq_f64_m128d(a), 1) * + vgetq_lane_f64(vreinterpretq_f64_m128d(b), 1) + : 0; #else - int32x2_t a1 = vget_low_s32(vreinterpretq_s32_m128i(a)); - int32x2_t b1 = vget_low_s32(vreinterpretq_s32_m128i(b)); - int32x2x2_t result = vzip_s32(a1, b1); - return vreinterpretq_m128i_s32(vcombine_s32(result.val[0], result.val[1])); + double d0 = (imm & 0x10) ? ((double *) &a)[0] * ((double *) &b)[0] : 0; + double d1 = (imm & 0x20) ? ((double *) &a)[1] * ((double *) &b)[1] : 0; +#endif + __m128d tmp = _mm_set_pd(d1, d0); #endif + // Sum the products +#if defined(__aarch64__) + double sum = vpaddd_f64(vreinterpretq_f64_m128d(tmp)); +#else + double sum = *((double *) &tmp) + *(((double *) &tmp) + 1); +#endif + // Conditionally store the sum + const __m128d sumMask = + _mm_castsi128_pd(_mm_set_epi64x(bit1Mask, bit0Mask)); + __m128d res = _mm_and_pd(_mm_set_pd1(sum), sumMask); + return res; } -FORCE_INLINE __m128i _mm_unpacklo_epi64(__m128i a, __m128i b) +// Conditionally multiply the packed single-precision (32-bit) floating-point +// elements in a and b using the high 4 bits in imm8, sum the four products, +// and conditionally store the sum in dst using the low 4 bits of imm. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_dp_ps +FORCE_INLINE __m128 _mm_dp_ps(__m128 a, __m128 b, const int imm) { - int64x1_t a_l = vget_low_s64(vreinterpretq_s64_m128i(a)); - int64x1_t b_l = vget_low_s64(vreinterpretq_s64_m128i(b)); - return vreinterpretq_m128i_s64(vcombine_s64(a_l, b_l)); +#if defined(__aarch64__) + /* shortcuts */ + if (imm == 0xFF) { + return _mm_set1_ps(vaddvq_f32(_mm_mul_ps(a, b))); + } + if (imm == 0x7F) { + float32x4_t m = _mm_mul_ps(a, b); + m[3] = 0; + return _mm_set1_ps(vaddvq_f32(m)); + } +#endif + + float s = 0, c = 0; + float32x4_t f32a = vreinterpretq_f32_m128(a); + float32x4_t f32b = vreinterpretq_f32_m128(b); + + /* To improve the accuracy of floating-point summation, Kahan algorithm + * is used for each operation. + */ + if (imm & (1 << 4)) + _sse2neon_kadd_f32(&s, &c, f32a[0] * f32b[0]); + if (imm & (1 << 5)) + _sse2neon_kadd_f32(&s, &c, f32a[1] * f32b[1]); + if (imm & (1 << 6)) + _sse2neon_kadd_f32(&s, &c, f32a[2] * f32b[2]); + if (imm & (1 << 7)) + _sse2neon_kadd_f32(&s, &c, f32a[3] * f32b[3]); + s += c; + + float32x4_t res = { + (imm & 0x1) ? s : 0, + (imm & 0x2) ? s : 0, + (imm & 0x4) ? s : 0, + (imm & 0x8) ? s : 0, + }; + return vreinterpretq_m128_f32(res); } -// Selects and interleaves the lower two single-precision, floating-point values -// from a and b. -// -// r0 := a0 -// r1 := b0 -// r2 := a1 -// r3 := b1 -// -// https://msdn.microsoft.com/en-us/library/25st103b%28v=vs.90%29.aspx -FORCE_INLINE __m128 _mm_unpacklo_ps(__m128 a, __m128 b) +// Extracts the selected signed or unsigned 32-bit integer from a and zero +// extends. +// FORCE_INLINE int _mm_extract_epi32(__m128i a, __constrange(0,4) int imm) +#define _mm_extract_epi32(a, imm) \ + vgetq_lane_s32(vreinterpretq_s32_m128i(a), (imm)) + +// Extracts the selected signed or unsigned 64-bit integer from a and zero +// extends. +// FORCE_INLINE __int64 _mm_extract_epi64(__m128i a, __constrange(0,2) int imm) +#define _mm_extract_epi64(a, imm) \ + vgetq_lane_s64(vreinterpretq_s64_m128i(a), (imm)) + +// Extracts the selected signed or unsigned 8-bit integer from a and zero +// extends. +// FORCE_INLINE int _mm_extract_epi8(__m128i a, __constrange(0,16) int imm) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_extract_epi8 +#define _mm_extract_epi8(a, imm) vgetq_lane_u8(vreinterpretq_u8_m128i(a), (imm)) + +// Extracts the selected single-precision (32-bit) floating-point from a. +// FORCE_INLINE int _mm_extract_ps(__m128 a, __constrange(0,4) int imm) +#define _mm_extract_ps(a, imm) vgetq_lane_s32(vreinterpretq_s32_m128(a), (imm)) + +// Round the packed double-precision (64-bit) floating-point elements in a down +// to an integer value, and store the results as packed double-precision +// floating-point elements in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_floor_pd +FORCE_INLINE __m128d _mm_floor_pd(__m128d a) { #if defined(__aarch64__) - return vreinterpretq_m128_f32( - vzip1q_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); + return vreinterpretq_m128d_f64(vrndmq_f64(vreinterpretq_f64_m128d(a))); #else - float32x2_t a1 = vget_low_f32(vreinterpretq_f32_m128(a)); - float32x2_t b1 = vget_low_f32(vreinterpretq_f32_m128(b)); - float32x2x2_t result = vzip_f32(a1, b1); - return vreinterpretq_m128_f32(vcombine_f32(result.val[0], result.val[1])); + double *f = (double *) &a; + return _mm_set_pd(floor(f[1]), floor(f[0])); #endif } -// Unpack and interleave double-precision (64-bit) floating-point elements from -// the low half of a and b, and store the results in dst. -// -// DEFINE INTERLEAVE_QWORDS(src1[127:0], src2[127:0]) { -// dst[63:0] := src1[63:0] -// dst[127:64] := src2[63:0] -// RETURN dst[127:0] -// } -// dst[127:0] := INTERLEAVE_QWORDS(a[127:0], b[127:0]) -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_unpacklo_pd -FORCE_INLINE __m128d _mm_unpacklo_pd(__m128d a, __m128d b) +// Round the packed single-precision (32-bit) floating-point elements in a down +// to an integer value, and store the results as packed single-precision +// floating-point elements in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_floor_ps +FORCE_INLINE __m128 _mm_floor_ps(__m128 a) { #if defined(__aarch64__) - return vreinterpretq_m128d_f64( - vzip1q_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))); + return vreinterpretq_m128_f32(vrndmq_f32(vreinterpretq_f32_m128(a))); #else - return vreinterpretq_m128d_s64( - vcombine_s64(vget_low_s64(vreinterpretq_s64_m128d(a)), - vget_low_s64(vreinterpretq_s64_m128d(b)))); + float *f = (float *) &a; + return _mm_set_ps(floorf(f[3]), floorf(f[2]), floorf(f[1]), floorf(f[0])); #endif } -// Unpack and interleave double-precision (64-bit) floating-point elements from -// the high half of a and b, and store the results in dst. -// -// DEFINE INTERLEAVE_HIGH_QWORDS(src1[127:0], src2[127:0]) { -// dst[63:0] := src1[127:64] -// dst[127:64] := src2[127:64] -// RETURN dst[127:0] -// } -// dst[127:0] := INTERLEAVE_HIGH_QWORDS(a[127:0], b[127:0]) -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_unpackhi_pd -FORCE_INLINE __m128d _mm_unpackhi_pd(__m128d a, __m128d b) +// Round the lower double-precision (64-bit) floating-point element in b down to +// an integer value, store the result as a double-precision floating-point +// element in the lower element of dst, and copy the upper element from a to the +// upper element of dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_floor_sd +FORCE_INLINE __m128d _mm_floor_sd(__m128d a, __m128d b) { -#if defined(__aarch64__) - return vreinterpretq_m128d_f64( - vzip2q_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b))); -#else - return vreinterpretq_m128d_s64( - vcombine_s64(vget_high_s64(vreinterpretq_s64_m128d(a)), - vget_high_s64(vreinterpretq_s64_m128d(b)))); -#endif + return _mm_move_sd(a, _mm_floor_pd(b)); } -// Selects and interleaves the upper two single-precision, floating-point values -// from a and b. +// Round the lower single-precision (32-bit) floating-point element in b down to +// an integer value, store the result as a single-precision floating-point +// element in the lower element of dst, and copy the upper 3 packed elements +// from a to the upper elements of dst. // -// r0 := a2 -// r1 := b2 -// r2 := a3 -// r3 := b3 +// dst[31:0] := FLOOR(b[31:0]) +// dst[127:32] := a[127:32] // -// https://msdn.microsoft.com/en-us/library/skccxx7d%28v=vs.90%29.aspx -FORCE_INLINE __m128 _mm_unpackhi_ps(__m128 a, __m128 b) +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_floor_ss +FORCE_INLINE __m128 _mm_floor_ss(__m128 a, __m128 b) { -#if defined(__aarch64__) - return vreinterpretq_m128_f32( - vzip2q_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))); -#else - float32x2_t a1 = vget_high_f32(vreinterpretq_f32_m128(a)); - float32x2_t b1 = vget_high_f32(vreinterpretq_f32_m128(b)); - float32x2x2_t result = vzip_f32(a1, b1); - return vreinterpretq_m128_f32(vcombine_f32(result.val[0], result.val[1])); -#endif + return _mm_move_ss(a, _mm_floor_ps(b)); } -// Interleaves the upper 8 signed or unsigned 8-bit integers in a with the upper -// 8 signed or unsigned 8-bit integers in b. +// Inserts the least significant 32 bits of b into the selected 32-bit integer +// of a. +// FORCE_INLINE __m128i _mm_insert_epi32(__m128i a, int b, +// __constrange(0,4) int imm) +#define _mm_insert_epi32(a, b, imm) \ + __extension__({ \ + vreinterpretq_m128i_s32( \ + vsetq_lane_s32((b), vreinterpretq_s32_m128i(a), (imm))); \ + }) + +// Inserts the least significant 64 bits of b into the selected 64-bit integer +// of a. +// FORCE_INLINE __m128i _mm_insert_epi64(__m128i a, __int64 b, +// __constrange(0,2) int imm) +#define _mm_insert_epi64(a, b, imm) \ + __extension__({ \ + vreinterpretq_m128i_s64( \ + vsetq_lane_s64((b), vreinterpretq_s64_m128i(a), (imm))); \ + }) + +// Inserts the least significant 8 bits of b into the selected 8-bit integer +// of a. +// FORCE_INLINE __m128i _mm_insert_epi8(__m128i a, int b, +// __constrange(0,16) int imm) +#define _mm_insert_epi8(a, b, imm) \ + __extension__({ \ + vreinterpretq_m128i_s8( \ + vsetq_lane_s8((b), vreinterpretq_s8_m128i(a), (imm))); \ + }) + +// Copy a to tmp, then insert a single-precision (32-bit) floating-point +// element from b into tmp using the control in imm8. Store tmp to dst using +// the mask in imm8 (elements are zeroed out when the corresponding bit is set). +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=insert_ps +#define _mm_insert_ps(a, b, imm8) \ + __extension__({ \ + float32x4_t tmp1 = \ + vsetq_lane_f32(vgetq_lane_f32(b, (imm8 >> 6) & 0x3), \ + vreinterpretq_f32_m128(a), 0); \ + float32x4_t tmp2 = \ + vsetq_lane_f32(vgetq_lane_f32(tmp1, 0), vreinterpretq_f32_m128(a), \ + ((imm8 >> 4) & 0x3)); \ + const uint32_t data[4] = {((imm8) & (1 << 0)) ? UINT32_MAX : 0, \ + ((imm8) & (1 << 1)) ? UINT32_MAX : 0, \ + ((imm8) & (1 << 2)) ? UINT32_MAX : 0, \ + ((imm8) & (1 << 3)) ? UINT32_MAX : 0}; \ + uint32x4_t mask = vld1q_u32(data); \ + float32x4_t all_zeros = vdupq_n_f32(0); \ + \ + vreinterpretq_m128_f32( \ + vbslq_f32(mask, all_zeros, vreinterpretq_f32_m128(tmp2))); \ + }) + +// epi versions of min/max +// Computes the pariwise maximums of the four signed 32-bit integer values of a +// and b. // -// r0 := a8 -// r1 := b8 -// r2 := a9 -// r3 := b9 -// ... -// r14 := a15 -// r15 := b15 +// A 128-bit parameter that can be defined with the following equations: +// r0 := (a0 > b0) ? a0 : b0 +// r1 := (a1 > b1) ? a1 : b1 +// r2 := (a2 > b2) ? a2 : b2 +// r3 := (a3 > b3) ? a3 : b3 // -// https://msdn.microsoft.com/en-us/library/t5h7783k(v=vs.100).aspx -FORCE_INLINE __m128i _mm_unpackhi_epi8(__m128i a, __m128i b) +// https://msdn.microsoft.com/en-us/library/vstudio/bb514055(v=vs.100).aspx +FORCE_INLINE __m128i _mm_max_epi32(__m128i a, __m128i b) +{ + return vreinterpretq_m128i_s32( + vmaxq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b))); +} + +// Compare packed signed 8-bit integers in a and b, and store packed maximum +// values in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_epi8 +FORCE_INLINE __m128i _mm_max_epi8(__m128i a, __m128i b) { -#if defined(__aarch64__) return vreinterpretq_m128i_s8( - vzip2q_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b))); -#else - int8x8_t a1 = - vreinterpret_s8_s16(vget_high_s16(vreinterpretq_s16_m128i(a))); - int8x8_t b1 = - vreinterpret_s8_s16(vget_high_s16(vreinterpretq_s16_m128i(b))); - int8x8x2_t result = vzip_s8(a1, b1); - return vreinterpretq_m128i_s8(vcombine_s8(result.val[0], result.val[1])); -#endif + vmaxq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b))); } -// Interleaves the upper 4 signed or unsigned 16-bit integers in a with the -// upper 4 signed or unsigned 16-bit integers in b. +// Compare packed unsigned 16-bit integers in a and b, and store packed maximum +// values in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_epu16 +FORCE_INLINE __m128i _mm_max_epu16(__m128i a, __m128i b) +{ + return vreinterpretq_m128i_u16( + vmaxq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b))); +} + +// Compare packed unsigned 32-bit integers in a and b, and store packed maximum +// values in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_epu32 +FORCE_INLINE __m128i _mm_max_epu32(__m128i a, __m128i b) +{ + return vreinterpretq_m128i_u32( + vmaxq_u32(vreinterpretq_u32_m128i(a), vreinterpretq_u32_m128i(b))); +} + +// Computes the pariwise minima of the four signed 32-bit integer values of a +// and b. // -// r0 := a4 -// r1 := b4 -// r2 := a5 -// r3 := b5 -// r4 := a6 -// r5 := b6 -// r6 := a7 -// r7 := b7 +// A 128-bit parameter that can be defined with the following equations: +// r0 := (a0 < b0) ? a0 : b0 +// r1 := (a1 < b1) ? a1 : b1 +// r2 := (a2 < b2) ? a2 : b2 +// r3 := (a3 < b3) ? a3 : b3 // -// https://msdn.microsoft.com/en-us/library/03196cz7(v=vs.100).aspx -FORCE_INLINE __m128i _mm_unpackhi_epi16(__m128i a, __m128i b) +// https://msdn.microsoft.com/en-us/library/vstudio/bb531476(v=vs.100).aspx +FORCE_INLINE __m128i _mm_min_epi32(__m128i a, __m128i b) { -#if defined(__aarch64__) - return vreinterpretq_m128i_s16( - vzip2q_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b))); -#else - int16x4_t a1 = vget_high_s16(vreinterpretq_s16_m128i(a)); - int16x4_t b1 = vget_high_s16(vreinterpretq_s16_m128i(b)); - int16x4x2_t result = vzip_s16(a1, b1); - return vreinterpretq_m128i_s16(vcombine_s16(result.val[0], result.val[1])); -#endif + return vreinterpretq_m128i_s32( + vminq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b))); } -// Interleaves the upper 2 signed or unsigned 32-bit integers in a with the -// upper 2 signed or unsigned 32-bit integers in b. -// https://msdn.microsoft.com/en-us/library/65sa7cbs(v=vs.100).aspx -FORCE_INLINE __m128i _mm_unpackhi_epi32(__m128i a, __m128i b) +// Compare packed signed 8-bit integers in a and b, and store packed minimum +// values in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_epi8 +FORCE_INLINE __m128i _mm_min_epi8(__m128i a, __m128i b) { -#if defined(__aarch64__) - return vreinterpretq_m128i_s32( - vzip2q_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b))); -#else - int32x2_t a1 = vget_high_s32(vreinterpretq_s32_m128i(a)); - int32x2_t b1 = vget_high_s32(vreinterpretq_s32_m128i(b)); - int32x2x2_t result = vzip_s32(a1, b1); - return vreinterpretq_m128i_s32(vcombine_s32(result.val[0], result.val[1])); -#endif + return vreinterpretq_m128i_s8( + vminq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b))); } -// Interleaves the upper signed or unsigned 64-bit integer in a with the -// upper signed or unsigned 64-bit integer in b. -// -// r0 := a1 -// r1 := b1 -FORCE_INLINE __m128i _mm_unpackhi_epi64(__m128i a, __m128i b) +// Compare packed unsigned 16-bit integers in a and b, and store packed minimum +// values in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_min_epu16 +FORCE_INLINE __m128i _mm_min_epu16(__m128i a, __m128i b) { - int64x1_t a_h = vget_high_s64(vreinterpretq_s64_m128i(a)); - int64x1_t b_h = vget_high_s64(vreinterpretq_s64_m128i(b)); - return vreinterpretq_m128i_s64(vcombine_s64(a_h, b_h)); + return vreinterpretq_m128i_u16( + vminq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b))); +} + +// Compare packed unsigned 32-bit integers in a and b, and store packed minimum +// values in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_max_epu32 +FORCE_INLINE __m128i _mm_min_epu32(__m128i a, __m128i b) +{ + return vreinterpretq_m128i_u32( + vminq_u32(vreinterpretq_u32_m128i(a), vreinterpretq_u32_m128i(b))); } // Horizontally compute the minimum amongst the packed unsigned 16-bit integers @@ -6296,6 +7929,339 @@ FORCE_INLINE __m128i _mm_minpos_epu16(__m128i a) return dst; } +// Compute the sum of absolute differences (SADs) of quadruplets of unsigned +// 8-bit integers in a compared to those in b, and store the 16-bit results in +// dst. Eight SADs are performed using one quadruplet from b and eight +// quadruplets from a. One quadruplet is selected from b starting at on the +// offset specified in imm8. Eight quadruplets are formed from sequential 8-bit +// integers selected from a starting at the offset specified in imm8. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mpsadbw_epu8 +FORCE_INLINE __m128i _mm_mpsadbw_epu8(__m128i a, __m128i b, const int imm) +{ + uint8x16_t _a, _b; + + switch (imm & 0x4) { + case 0: + // do nothing + _a = vreinterpretq_u8_m128i(a); + break; + case 4: + _a = vreinterpretq_u8_u32(vextq_u32(vreinterpretq_u32_m128i(a), + vreinterpretq_u32_m128i(a), 1)); + break; + default: +#if defined(__GNUC__) || defined(__clang__) + __builtin_unreachable(); +#endif + break; + } + + switch (imm & 0x3) { + case 0: + _b = vreinterpretq_u8_u32( + vdupq_n_u32(vgetq_lane_u32(vreinterpretq_u32_m128i(b), 0))); + break; + case 1: + _b = vreinterpretq_u8_u32( + vdupq_n_u32(vgetq_lane_u32(vreinterpretq_u32_m128i(b), 1))); + break; + case 2: + _b = vreinterpretq_u8_u32( + vdupq_n_u32(vgetq_lane_u32(vreinterpretq_u32_m128i(b), 2))); + break; + case 3: + _b = vreinterpretq_u8_u32( + vdupq_n_u32(vgetq_lane_u32(vreinterpretq_u32_m128i(b), 3))); + break; + default: +#if defined(__GNUC__) || defined(__clang__) + __builtin_unreachable(); +#endif + break; + } + + int16x8_t c04, c15, c26, c37; + uint8x8_t low_b = vget_low_u8(_b); + c04 = vabsq_s16(vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(_a), low_b))); + _a = vextq_u8(_a, _a, 1); + c15 = vabsq_s16(vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(_a), low_b))); + _a = vextq_u8(_a, _a, 1); + c26 = vabsq_s16(vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(_a), low_b))); + _a = vextq_u8(_a, _a, 1); + c37 = vabsq_s16(vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(_a), low_b))); +#if defined(__aarch64__) + // |0|4|2|6| + c04 = vpaddq_s16(c04, c26); + // |1|5|3|7| + c15 = vpaddq_s16(c15, c37); + + int32x4_t trn1_c = + vtrn1q_s32(vreinterpretq_s32_s16(c04), vreinterpretq_s32_s16(c15)); + int32x4_t trn2_c = + vtrn2q_s32(vreinterpretq_s32_s16(c04), vreinterpretq_s32_s16(c15)); + return vreinterpretq_m128i_s16(vpaddq_s16(vreinterpretq_s16_s32(trn1_c), + vreinterpretq_s16_s32(trn2_c))); +#else + int16x4_t c01, c23, c45, c67; + c01 = vpadd_s16(vget_low_s16(c04), vget_low_s16(c15)); + c23 = vpadd_s16(vget_low_s16(c26), vget_low_s16(c37)); + c45 = vpadd_s16(vget_high_s16(c04), vget_high_s16(c15)); + c67 = vpadd_s16(vget_high_s16(c26), vget_high_s16(c37)); + + return vreinterpretq_m128i_s16( + vcombine_s16(vpadd_s16(c01, c23), vpadd_s16(c45, c67))); +#endif +} + +// Multiply the low signed 32-bit integers from each packed 64-bit element in +// a and b, and store the signed 64-bit results in dst. +// +// r0 := (int64_t)(int32_t)a0 * (int64_t)(int32_t)b0 +// r1 := (int64_t)(int32_t)a2 * (int64_t)(int32_t)b2 +FORCE_INLINE __m128i _mm_mul_epi32(__m128i a, __m128i b) +{ + // vmull_s32 upcasts instead of masking, so we downcast. + int32x2_t a_lo = vmovn_s64(vreinterpretq_s64_m128i(a)); + int32x2_t b_lo = vmovn_s64(vreinterpretq_s64_m128i(b)); + return vreinterpretq_m128i_s64(vmull_s32(a_lo, b_lo)); +} + +// Multiplies the 4 signed or unsigned 32-bit integers from a by the 4 signed or +// unsigned 32-bit integers from b. +// https://msdn.microsoft.com/en-us/library/vstudio/bb531409(v=vs.100).aspx +FORCE_INLINE __m128i _mm_mullo_epi32(__m128i a, __m128i b) +{ + return vreinterpretq_m128i_s32( + vmulq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b))); +} + +// Packs the 8 unsigned 32-bit integers from a and b into unsigned 16-bit +// integers and saturates. +// +// r0 := UnsignedSaturate(a0) +// r1 := UnsignedSaturate(a1) +// r2 := UnsignedSaturate(a2) +// r3 := UnsignedSaturate(a3) +// r4 := UnsignedSaturate(b0) +// r5 := UnsignedSaturate(b1) +// r6 := UnsignedSaturate(b2) +// r7 := UnsignedSaturate(b3) +FORCE_INLINE __m128i _mm_packus_epi32(__m128i a, __m128i b) +{ + return vreinterpretq_m128i_u16( + vcombine_u16(vqmovun_s32(vreinterpretq_s32_m128i(a)), + vqmovun_s32(vreinterpretq_s32_m128i(b)))); +} + +// Round the packed double-precision (64-bit) floating-point elements in a using +// the rounding parameter, and store the results as packed double-precision +// floating-point elements in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_round_pd +FORCE_INLINE __m128d _mm_round_pd(__m128d a, int rounding) +{ +#if defined(__aarch64__) + switch (rounding) { + case (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC): + return vreinterpretq_m128d_f64(vrndnq_f64(vreinterpretq_f64_m128d(a))); + case (_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC): + return _mm_floor_pd(a); + case (_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC): + return _mm_ceil_pd(a); + case (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC): + return vreinterpretq_m128d_f64(vrndq_f64(vreinterpretq_f64_m128d(a))); + default: //_MM_FROUND_CUR_DIRECTION + return vreinterpretq_m128d_f64(vrndiq_f64(vreinterpretq_f64_m128d(a))); + } +#else + double *v_double = (double *) &a; + + if (rounding == (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC) || + (rounding == _MM_FROUND_CUR_DIRECTION && + _MM_GET_ROUNDING_MODE() == _MM_ROUND_NEAREST)) { + double res[2], tmp; + for (int i = 0; i < 2; i++) { + tmp = (v_double[i] < 0) ? -v_double[i] : v_double[i]; + double roundDown = floor(tmp); // Round down value + double roundUp = ceil(tmp); // Round up value + double diffDown = tmp - roundDown; + double diffUp = roundUp - tmp; + if (diffDown < diffUp) { + /* If it's closer to the round down value, then use it */ + res[i] = roundDown; + } else if (diffDown > diffUp) { + /* If it's closer to the round up value, then use it */ + res[i] = roundUp; + } else { + /* If it's equidistant between round up and round down value, + * pick the one which is an even number */ + double half = roundDown / 2; + if (half != floor(half)) { + /* If the round down value is odd, return the round up value + */ + res[i] = roundUp; + } else { + /* If the round up value is odd, return the round down value + */ + res[i] = roundDown; + } + } + res[i] = (v_double[i] < 0) ? -res[i] : res[i]; + } + return _mm_set_pd(res[1], res[0]); + } else if (rounding == (_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC) || + (rounding == _MM_FROUND_CUR_DIRECTION && + _MM_GET_ROUNDING_MODE() == _MM_ROUND_DOWN)) { + return _mm_floor_pd(a); + } else if (rounding == (_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC) || + (rounding == _MM_FROUND_CUR_DIRECTION && + _MM_GET_ROUNDING_MODE() == _MM_ROUND_UP)) { + return _mm_ceil_pd(a); + } + return _mm_set_pd(v_double[1] > 0 ? floor(v_double[1]) : ceil(v_double[1]), + v_double[0] > 0 ? floor(v_double[0]) : ceil(v_double[0])); +#endif +} + +// Round the packed single-precision (32-bit) floating-point elements in a using +// the rounding parameter, and store the results as packed single-precision +// floating-point elements in dst. +// software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_round_ps +FORCE_INLINE __m128 _mm_round_ps(__m128 a, int rounding) +{ +#if defined(__aarch64__) + switch (rounding) { + case (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC): + return vreinterpretq_m128_f32(vrndnq_f32(vreinterpretq_f32_m128(a))); + case (_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC): + return _mm_floor_ps(a); + case (_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC): + return _mm_ceil_ps(a); + case (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC): + return vreinterpretq_m128_f32(vrndq_f32(vreinterpretq_f32_m128(a))); + default: //_MM_FROUND_CUR_DIRECTION + return vreinterpretq_m128_f32(vrndiq_f32(vreinterpretq_f32_m128(a))); + } +#else + float *v_float = (float *) &a; + + if (rounding == (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC) || + (rounding == _MM_FROUND_CUR_DIRECTION && + _MM_GET_ROUNDING_MODE() == _MM_ROUND_NEAREST)) { + uint32x4_t signmask = vdupq_n_u32(0x80000000); + float32x4_t half = vbslq_f32(signmask, vreinterpretq_f32_m128(a), + vdupq_n_f32(0.5f)); /* +/- 0.5 */ + int32x4_t r_normal = vcvtq_s32_f32(vaddq_f32( + vreinterpretq_f32_m128(a), half)); /* round to integer: [a + 0.5]*/ + int32x4_t r_trunc = vcvtq_s32_f32( + vreinterpretq_f32_m128(a)); /* truncate to integer: [a] */ + int32x4_t plusone = vreinterpretq_s32_u32(vshrq_n_u32( + vreinterpretq_u32_s32(vnegq_s32(r_trunc)), 31)); /* 1 or 0 */ + int32x4_t r_even = vbicq_s32(vaddq_s32(r_trunc, plusone), + vdupq_n_s32(1)); /* ([a] + {0,1}) & ~1 */ + float32x4_t delta = vsubq_f32( + vreinterpretq_f32_m128(a), + vcvtq_f32_s32(r_trunc)); /* compute delta: delta = (a - [a]) */ + uint32x4_t is_delta_half = + vceqq_f32(delta, half); /* delta == +/- 0.5 */ + return vreinterpretq_m128_f32( + vcvtq_f32_s32(vbslq_s32(is_delta_half, r_even, r_normal))); + } else if (rounding == (_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC) || + (rounding == _MM_FROUND_CUR_DIRECTION && + _MM_GET_ROUNDING_MODE() == _MM_ROUND_DOWN)) { + return _mm_floor_ps(a); + } else if (rounding == (_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC) || + (rounding == _MM_FROUND_CUR_DIRECTION && + _MM_GET_ROUNDING_MODE() == _MM_ROUND_UP)) { + return _mm_ceil_ps(a); + } + return _mm_set_ps(v_float[3] > 0 ? floorf(v_float[3]) : ceilf(v_float[3]), + v_float[2] > 0 ? floorf(v_float[2]) : ceilf(v_float[2]), + v_float[1] > 0 ? floorf(v_float[1]) : ceilf(v_float[1]), + v_float[0] > 0 ? floorf(v_float[0]) : ceilf(v_float[0])); +#endif +} + +// Round the lower double-precision (64-bit) floating-point element in b using +// the rounding parameter, store the result as a double-precision floating-point +// element in the lower element of dst, and copy the upper element from a to the +// upper element of dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_round_sd +FORCE_INLINE __m128d _mm_round_sd(__m128d a, __m128d b, int rounding) +{ + return _mm_move_sd(a, _mm_round_pd(b, rounding)); +} + +// Round the lower single-precision (32-bit) floating-point element in b using +// the rounding parameter, store the result as a single-precision floating-point +// element in the lower element of dst, and copy the upper 3 packed elements +// from a to the upper elements of dst. Rounding is done according to the +// rounding[3:0] parameter, which can be one of: +// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and +// suppress exceptions +// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and +// suppress exceptions +// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress +// exceptions +// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress +// exceptions _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see +// _MM_SET_ROUNDING_MODE +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_round_ss +FORCE_INLINE __m128 _mm_round_ss(__m128 a, __m128 b, int rounding) +{ + return _mm_move_ss(a, _mm_round_ps(b, rounding)); +} + +// Load 128-bits of integer data from memory into dst using a non-temporal +// memory hint. mem_addr must be aligned on a 16-byte boundary or a +// general-protection exception may be generated. +// +// dst[127:0] := MEM[mem_addr+127:mem_addr] +// +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_stream_load_si128 +FORCE_INLINE __m128i _mm_stream_load_si128(__m128i *p) +{ +#if __has_builtin(__builtin_nontemporal_store) + return __builtin_nontemporal_load(p); +#else + return vreinterpretq_m128i_s64(vld1q_s64((int64_t *) p)); +#endif +} + +// Compute the bitwise NOT of a and then AND with a 128-bit vector containing +// all 1's, and return 1 if the result is zero, otherwise return 0. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_test_all_ones +FORCE_INLINE int _mm_test_all_ones(__m128i a) +{ + return (uint64_t)(vgetq_lane_s64(a, 0) & vgetq_lane_s64(a, 1)) == + ~(uint64_t) 0; +} + +// Compute the bitwise AND of 128 bits (representing integer data) in a and +// mask, and return 1 if the result is zero, otherwise return 0. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_test_all_zeros +FORCE_INLINE int _mm_test_all_zeros(__m128i a, __m128i mask) +{ + int64x2_t a_and_mask = + vandq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(mask)); + return !(vgetq_lane_s64(a_and_mask, 0) | vgetq_lane_s64(a_and_mask, 1)); +} + +// Compute the bitwise AND of 128 bits (representing integer data) in a and +// mask, and set ZF to 1 if the result is zero, otherwise set ZF to 0. Compute +// the bitwise NOT of a and then AND with mask, and set CF to 1 if the result is +// zero, otherwise set CF to 0. Return 1 if both the ZF and CF values are zero, +// otherwise return 0. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=mm_test_mix_ones_zero +FORCE_INLINE int _mm_test_mix_ones_zeros(__m128i a, __m128i mask) +{ + uint64x2_t zf = + vandq_u64(vreinterpretq_u64_m128i(mask), vreinterpretq_u64_m128i(a)); + uint64x2_t cf = + vbicq_u64(vreinterpretq_u64_m128i(mask), vreinterpretq_u64_m128i(a)); + uint64x2_t result = vandq_u64(zf, cf); + return !(vgetq_lane_u64(result, 0) | vgetq_lane_u64(result, 1)); +} + // Compute the bitwise AND of 128 bits (representing integer data) in a and b, // and set ZF to 1 if the result is zero, otherwise set ZF to 0. Compute the // bitwise NOT of a and then AND with b, and set CF to 1 if the result is zero, @@ -6312,6 +8278,14 @@ FORCE_INLINE int _mm_testc_si128(__m128i a, __m128i b) // Compute the bitwise AND of 128 bits (representing integer data) in a and b, // and set ZF to 1 if the result is zero, otherwise set ZF to 0. Compute the // bitwise NOT of a and then AND with b, and set CF to 1 if the result is zero, +// otherwise set CF to 0. Return 1 if both the ZF and CF values are zero, +// otherwise return 0. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_testnzc_si128 +#define _mm_testnzc_si128(a, b) _mm_test_mix_ones_zeros(a, b) + +// Compute the bitwise AND of 128 bits (representing integer data) in a and b, +// and set ZF to 1 if the result is zero, otherwise set ZF to 0. Compute the +// bitwise NOT of a and then AND with b, and set CF to 1 if the result is zero, // otherwise set CF to 0. Return the ZF value. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_testz_si128 FORCE_INLINE int _mm_testz_si128(__m128i a, __m128i b) @@ -6321,299 +8295,93 @@ FORCE_INLINE int _mm_testz_si128(__m128i a, __m128i b) return !(vgetq_lane_s64(s64, 0) | vgetq_lane_s64(s64, 1)); } -// Extracts the selected signed or unsigned 8-bit integer from a and zero -// extends. -// FORCE_INLINE int _mm_extract_epi8(__m128i a, __constrange(0,16) int imm) -#define _mm_extract_epi8(a, imm) vgetq_lane_u8(vreinterpretq_u8_m128i(a), (imm)) - -// Inserts the least significant 8 bits of b into the selected 8-bit integer -// of a. -// FORCE_INLINE __m128i _mm_insert_epi8(__m128i a, int b, -// __constrange(0,16) int imm) -#define _mm_insert_epi8(a, b, imm) \ - __extension__({ \ - vreinterpretq_m128i_s8( \ - vsetq_lane_s8((b), vreinterpretq_s8_m128i(a), (imm))); \ - }) - -// Extracts the selected signed or unsigned 16-bit integer from a and zero -// extends. -// https://msdn.microsoft.com/en-us/library/6dceta0c(v=vs.100).aspx -// FORCE_INLINE int _mm_extract_epi16(__m128i a, __constrange(0,8) int imm) -#define _mm_extract_epi16(a, imm) \ - vgetq_lane_u16(vreinterpretq_u16_m128i(a), (imm)) - -// Inserts the least significant 16 bits of b into the selected 16-bit integer -// of a. -// https://msdn.microsoft.com/en-us/library/kaze8hz1%28v=vs.100%29.aspx -// FORCE_INLINE __m128i _mm_insert_epi16(__m128i a, int b, -// __constrange(0,8) int imm) -#define _mm_insert_epi16(a, b, imm) \ - __extension__({ \ - vreinterpretq_m128i_s16( \ - vsetq_lane_s16((b), vreinterpretq_s16_m128i(a), (imm))); \ - }) - -// Copy a to dst, and insert the 16-bit integer i into dst at the location -// specified by imm8. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_insert_pi16 -#define _mm_insert_pi16(a, b, imm) \ - __extension__({ \ - vreinterpret_m64_s16( \ - vset_lane_s16((b), vreinterpret_s16_m64(a), (imm))); \ - }) - -// Extracts the selected signed or unsigned 32-bit integer from a and zero -// extends. -// FORCE_INLINE int _mm_extract_epi32(__m128i a, __constrange(0,4) int imm) -#define _mm_extract_epi32(a, imm) \ - vgetq_lane_s32(vreinterpretq_s32_m128i(a), (imm)) - -// Extracts the selected single-precision (32-bit) floating-point from a. -// FORCE_INLINE int _mm_extract_ps(__m128 a, __constrange(0,4) int imm) -#define _mm_extract_ps(a, imm) vgetq_lane_s32(vreinterpretq_s32_m128(a), (imm)) - -// Inserts the least significant 32 bits of b into the selected 32-bit integer -// of a. -// FORCE_INLINE __m128i _mm_insert_epi32(__m128i a, int b, -// __constrange(0,4) int imm) -#define _mm_insert_epi32(a, b, imm) \ - __extension__({ \ - vreinterpretq_m128i_s32( \ - vsetq_lane_s32((b), vreinterpretq_s32_m128i(a), (imm))); \ - }) - -// Extracts the selected signed or unsigned 64-bit integer from a and zero -// extends. -// FORCE_INLINE __int64 _mm_extract_epi64(__m128i a, __constrange(0,2) int imm) -#define _mm_extract_epi64(a, imm) \ - vgetq_lane_s64(vreinterpretq_s64_m128i(a), (imm)) - -// Inserts the least significant 64 bits of b into the selected 64-bit integer -// of a. -// FORCE_INLINE __m128i _mm_insert_epi64(__m128i a, __int64 b, -// __constrange(0,2) int imm) -#define _mm_insert_epi64(a, b, imm) \ - __extension__({ \ - vreinterpretq_m128i_s64( \ - vsetq_lane_s64((b), vreinterpretq_s64_m128i(a), (imm))); \ - }) +/* SSE4.2 */ -// Count the number of bits set to 1 in unsigned 32-bit integer a, and -// return that count in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_popcnt_u32 -FORCE_INLINE int _mm_popcnt_u32(unsigned int a) +// Compares the 2 signed 64-bit integers in a and the 2 signed 64-bit integers +// in b for greater than. +FORCE_INLINE __m128i _mm_cmpgt_epi64(__m128i a, __m128i b) { #if defined(__aarch64__) -#if __has_builtin(__builtin_popcount) - return __builtin_popcount(a); + return vreinterpretq_m128i_u64( + vcgtq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b))); #else - return (int) vaddlv_u8(vcnt_u8(vcreate_u8((uint64_t) a))); -#endif -#else - uint32_t count = 0; - uint8x8_t input_val, count8x8_val; - uint16x4_t count16x4_val; - uint32x2_t count32x2_val; - - input_val = vld1_u8((uint8_t *) &a); - count8x8_val = vcnt_u8(input_val); - count16x4_val = vpaddl_u8(count8x8_val); - count32x2_val = vpaddl_u16(count16x4_val); - - vst1_u32(&count, count32x2_val); - return count; + return vreinterpretq_m128i_s64(vshrq_n_s64( + vqsubq_s64(vreinterpretq_s64_m128i(b), vreinterpretq_s64_m128i(a)), + 63)); #endif } -// Count the number of bits set to 1 in unsigned 64-bit integer a, and -// return that count in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_popcnt_u64 -FORCE_INLINE int64_t _mm_popcnt_u64(uint64_t a) +// Starting with the initial value in crc, accumulates a CRC32 value for +// unsigned 16-bit integer v. +// https://msdn.microsoft.com/en-us/library/bb531411(v=vs.100) +FORCE_INLINE uint32_t _mm_crc32_u16(uint32_t crc, uint16_t v) { -#if defined(__aarch64__) -#if __has_builtin(__builtin_popcountll) - return __builtin_popcountll(a); -#else - return (int64_t) vaddlv_u8(vcnt_u8(vcreate_u8(a))); -#endif +#if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32) + __asm__ __volatile__("crc32ch %w[c], %w[c], %w[v]\n\t" + : [c] "+r"(crc) + : [v] "r"(v)); #else - uint64_t count = 0; - uint8x8_t input_val, count8x8_val; - uint16x4_t count16x4_val; - uint32x2_t count32x2_val; - uint64x1_t count64x1_val; - - input_val = vld1_u8((uint8_t *) &a); - count8x8_val = vcnt_u8(input_val); - count16x4_val = vpaddl_u8(count8x8_val); - count32x2_val = vpaddl_u16(count16x4_val); - count64x1_val = vpaddl_u32(count32x2_val); - vst1_u64(&count, count64x1_val); - return count; + crc = _mm_crc32_u8(crc, v & 0xff); + crc = _mm_crc32_u8(crc, (v >> 8) & 0xff); #endif + return crc; } -// Macro: Transpose the 4x4 matrix formed by the 4 rows of single-precision -// (32-bit) floating-point elements in row0, row1, row2, and row3, and store the -// transposed matrix in these vectors (row0 now contains column 0, etc.). -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=MM_TRANSPOSE4_PS -#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \ - do { \ - float32x4x2_t ROW01 = vtrnq_f32(row0, row1); \ - float32x4x2_t ROW23 = vtrnq_f32(row2, row3); \ - row0 = vcombine_f32(vget_low_f32(ROW01.val[0]), \ - vget_low_f32(ROW23.val[0])); \ - row1 = vcombine_f32(vget_low_f32(ROW01.val[1]), \ - vget_low_f32(ROW23.val[1])); \ - row2 = vcombine_f32(vget_high_f32(ROW01.val[0]), \ - vget_high_f32(ROW23.val[0])); \ - row3 = vcombine_f32(vget_high_f32(ROW01.val[1]), \ - vget_high_f32(ROW23.val[1])); \ - } while (0) - -/* Crypto Extensions */ - -#if defined(__ARM_FEATURE_CRYPTO) -// Wraps vmull_p64 -FORCE_INLINE uint64x2_t _sse2neon_vmull_p64(uint64x1_t _a, uint64x1_t _b) -{ - poly64_t a = vget_lane_p64(vreinterpret_p64_u64(_a), 0); - poly64_t b = vget_lane_p64(vreinterpret_p64_u64(_b), 0); - return vreinterpretq_u64_p128(vmull_p64(a, b)); -} -#else // ARMv7 polyfill -// ARMv7/some A64 lacks vmull_p64, but it has vmull_p8. -// -// vmull_p8 calculates 8 8-bit->16-bit polynomial multiplies, but we need a -// 64-bit->128-bit polynomial multiply. -// -// It needs some work and is somewhat slow, but it is still faster than all -// known scalar methods. -// -// Algorithm adapted to C from -// https://www.workofard.com/2017/07/ghash-for-low-end-cores/, which is adapted -// from "Fast Software Polynomial Multiplication on ARM Processors Using the -// NEON Engine" by Danilo Camara, Conrado Gouvea, Julio Lopez and Ricardo Dahab -// (https://hal.inria.fr/hal-01506572) -static uint64x2_t _sse2neon_vmull_p64(uint64x1_t _a, uint64x1_t _b) +// Starting with the initial value in crc, accumulates a CRC32 value for +// unsigned 32-bit integer v. +// https://msdn.microsoft.com/en-us/library/bb531394(v=vs.100) +FORCE_INLINE uint32_t _mm_crc32_u32(uint32_t crc, uint32_t v) { - poly8x8_t a = vreinterpret_p8_u64(_a); - poly8x8_t b = vreinterpret_p8_u64(_b); - - // Masks - uint8x16_t k48_32 = vcombine_u8(vcreate_u8(0x0000ffffffffffff), - vcreate_u8(0x00000000ffffffff)); - uint8x16_t k16_00 = vcombine_u8(vcreate_u8(0x000000000000ffff), - vcreate_u8(0x0000000000000000)); - - // Do the multiplies, rotating with vext to get all combinations - uint8x16_t d = vreinterpretq_u8_p16(vmull_p8(a, b)); // D = A0 * B0 - uint8x16_t e = - vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 1))); // E = A0 * B1 - uint8x16_t f = - vreinterpretq_u8_p16(vmull_p8(vext_p8(a, a, 1), b)); // F = A1 * B0 - uint8x16_t g = - vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 2))); // G = A0 * B2 - uint8x16_t h = - vreinterpretq_u8_p16(vmull_p8(vext_p8(a, a, 2), b)); // H = A2 * B0 - uint8x16_t i = - vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 3))); // I = A0 * B3 - uint8x16_t j = - vreinterpretq_u8_p16(vmull_p8(vext_p8(a, a, 3), b)); // J = A3 * B0 - uint8x16_t k = - vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 4))); // L = A0 * B4 - - // Add cross products - uint8x16_t l = veorq_u8(e, f); // L = E + F - uint8x16_t m = veorq_u8(g, h); // M = G + H - uint8x16_t n = veorq_u8(i, j); // N = I + J - - // Interleave. Using vzip1 and vzip2 prevents Clang from emitting TBL - // instructions. -#if defined(__aarch64__) - uint8x16_t lm_p0 = vreinterpretq_u8_u64( - vzip1q_u64(vreinterpretq_u64_u8(l), vreinterpretq_u64_u8(m))); - uint8x16_t lm_p1 = vreinterpretq_u8_u64( - vzip2q_u64(vreinterpretq_u64_u8(l), vreinterpretq_u64_u8(m))); - uint8x16_t nk_p0 = vreinterpretq_u8_u64( - vzip1q_u64(vreinterpretq_u64_u8(n), vreinterpretq_u64_u8(k))); - uint8x16_t nk_p1 = vreinterpretq_u8_u64( - vzip2q_u64(vreinterpretq_u64_u8(n), vreinterpretq_u64_u8(k))); +#if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32) + __asm__ __volatile__("crc32cw %w[c], %w[c], %w[v]\n\t" + : [c] "+r"(crc) + : [v] "r"(v)); #else - uint8x16_t lm_p0 = vcombine_u8(vget_low_u8(l), vget_low_u8(m)); - uint8x16_t lm_p1 = vcombine_u8(vget_high_u8(l), vget_high_u8(m)); - uint8x16_t nk_p0 = vcombine_u8(vget_low_u8(n), vget_low_u8(k)); - uint8x16_t nk_p1 = vcombine_u8(vget_high_u8(n), vget_high_u8(k)); + crc = _mm_crc32_u16(crc, v & 0xffff); + crc = _mm_crc32_u16(crc, (v >> 16) & 0xffff); #endif - // t0 = (L) (P0 + P1) << 8 - // t1 = (M) (P2 + P3) << 16 - uint8x16_t t0t1_tmp = veorq_u8(lm_p0, lm_p1); - uint8x16_t t0t1_h = vandq_u8(lm_p1, k48_32); - uint8x16_t t0t1_l = veorq_u8(t0t1_tmp, t0t1_h); - - // t2 = (N) (P4 + P5) << 24 - // t3 = (K) (P6 + P7) << 32 - uint8x16_t t2t3_tmp = veorq_u8(nk_p0, nk_p1); - uint8x16_t t2t3_h = vandq_u8(nk_p1, k16_00); - uint8x16_t t2t3_l = veorq_u8(t2t3_tmp, t2t3_h); + return crc; +} - // De-interleave -#if defined(__aarch64__) - uint8x16_t t0 = vreinterpretq_u8_u64( - vuzp1q_u64(vreinterpretq_u64_u8(t0t1_l), vreinterpretq_u64_u8(t0t1_h))); - uint8x16_t t1 = vreinterpretq_u8_u64( - vuzp2q_u64(vreinterpretq_u64_u8(t0t1_l), vreinterpretq_u64_u8(t0t1_h))); - uint8x16_t t2 = vreinterpretq_u8_u64( - vuzp1q_u64(vreinterpretq_u64_u8(t2t3_l), vreinterpretq_u64_u8(t2t3_h))); - uint8x16_t t3 = vreinterpretq_u8_u64( - vuzp2q_u64(vreinterpretq_u64_u8(t2t3_l), vreinterpretq_u64_u8(t2t3_h))); +// Starting with the initial value in crc, accumulates a CRC32 value for +// unsigned 64-bit integer v. +// https://msdn.microsoft.com/en-us/library/bb514033(v=vs.100) +FORCE_INLINE uint64_t _mm_crc32_u64(uint64_t crc, uint64_t v) +{ +#if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32) + __asm__ __volatile__("crc32cx %w[c], %w[c], %x[v]\n\t" + : [c] "+r"(crc) + : [v] "r"(v)); #else - uint8x16_t t1 = vcombine_u8(vget_high_u8(t0t1_l), vget_high_u8(t0t1_h)); - uint8x16_t t0 = vcombine_u8(vget_low_u8(t0t1_l), vget_low_u8(t0t1_h)); - uint8x16_t t3 = vcombine_u8(vget_high_u8(t2t3_l), vget_high_u8(t2t3_h)); - uint8x16_t t2 = vcombine_u8(vget_low_u8(t2t3_l), vget_low_u8(t2t3_h)); + crc = _mm_crc32_u32((uint32_t)(crc), v & 0xffffffff); + crc = _mm_crc32_u32((uint32_t)(crc), (v >> 32) & 0xffffffff); #endif - // Shift the cross products - uint8x16_t t0_shift = vextq_u8(t0, t0, 15); // t0 << 8 - uint8x16_t t1_shift = vextq_u8(t1, t1, 14); // t1 << 16 - uint8x16_t t2_shift = vextq_u8(t2, t2, 13); // t2 << 24 - uint8x16_t t3_shift = vextq_u8(t3, t3, 12); // t3 << 32 - - // Accumulate the products - uint8x16_t cross1 = veorq_u8(t0_shift, t1_shift); - uint8x16_t cross2 = veorq_u8(t2_shift, t3_shift); - uint8x16_t mix = veorq_u8(d, cross1); - uint8x16_t r = veorq_u8(mix, cross2); - return vreinterpretq_u64_u8(r); + return crc; } -#endif // ARMv7 polyfill -// Perform a carry-less multiplication of two 64-bit integers, selected from a -// and b according to imm8, and store the results in dst. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_clmulepi64_si128 -FORCE_INLINE __m128i _mm_clmulepi64_si128(__m128i _a, __m128i _b, const int imm) +// Starting with the initial value in crc, accumulates a CRC32 value for +// unsigned 8-bit integer v. +// https://msdn.microsoft.com/en-us/library/bb514036(v=vs.100) +FORCE_INLINE uint32_t _mm_crc32_u8(uint32_t crc, uint8_t v) { - uint64x2_t a = vreinterpretq_u64_m128i(_a); - uint64x2_t b = vreinterpretq_u64_m128i(_b); - switch (imm & 0x11) { - case 0x00: - return vreinterpretq_m128i_u64( - _sse2neon_vmull_p64(vget_low_u64(a), vget_low_u64(b))); - case 0x01: - return vreinterpretq_m128i_u64( - _sse2neon_vmull_p64(vget_high_u64(a), vget_low_u64(b))); - case 0x10: - return vreinterpretq_m128i_u64( - _sse2neon_vmull_p64(vget_low_u64(a), vget_high_u64(b))); - case 0x11: - return vreinterpretq_m128i_u64( - _sse2neon_vmull_p64(vget_high_u64(a), vget_high_u64(b))); - default: - abort(); +#if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32) + __asm__ __volatile__("crc32cb %w[c], %w[c], %w[v]\n\t" + : [c] "+r"(crc) + : [v] "r"(v)); +#else + crc ^= v; + for (int bit = 0; bit < 8; bit++) { + if (crc & 1) + crc = (crc >> 1) ^ UINT32_C(0x82f63b78); + else + crc = (crc >> 1); } +#endif + return crc; } +/* AES */ + #if !defined(__ARM_FEATURE_CRYPTO) /* clang-format off */ #define SSE2NEON_AES_DATA(w) \ @@ -6752,22 +8520,22 @@ FORCE_INLINE __m128i _mm_aesenclast_si128(__m128i a, __m128i RoundKey) { /* FIXME: optimized for NEON */ uint8_t v[4][4] = { - [0] = {SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 0)], - SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 5)], - SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 10)], - SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 15)]}, - [1] = {SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 4)], - SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 9)], - SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 14)], - SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 3)]}, - [2] = {SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 8)], - SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 13)], - SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 2)], - SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 7)]}, - [3] = {SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 12)], - SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 1)], - SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 6)], - SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 11)]}, + {SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 0)], + SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 5)], + SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 10)], + SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 15)]}, + {SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 4)], + SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 9)], + SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 14)], + SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 3)]}, + {SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 8)], + SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 13)], + SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 2)], + SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 7)]}, + {SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 12)], + SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 1)], + SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 6)], + SSE2NEON_sbox[vreinterpretq_nth_u8_m128i(a, 11)]}, }; for (int i = 0; i < 16; i++) vreinterpretq_nth_u8_m128i(a, i) = @@ -6833,155 +8601,134 @@ FORCE_INLINE __m128i _mm_aeskeygenassist_si128(__m128i a, const int rcon) } #endif -/* Streaming Extensions */ +/* Others */ -// Guarantees that every preceding store is globally visible before any -// subsequent store. -// https://msdn.microsoft.com/en-us/library/5h2w73d1%28v=vs.90%29.aspx -FORCE_INLINE void _mm_sfence(void) +// Perform a carry-less multiplication of two 64-bit integers, selected from a +// and b according to imm8, and store the results in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_clmulepi64_si128 +FORCE_INLINE __m128i _mm_clmulepi64_si128(__m128i _a, __m128i _b, const int imm) { - __sync_synchronize(); + uint64x2_t a = vreinterpretq_u64_m128i(_a); + uint64x2_t b = vreinterpretq_u64_m128i(_b); + switch (imm & 0x11) { + case 0x00: + return vreinterpretq_m128i_u64( + _sse2neon_vmull_p64(vget_low_u64(a), vget_low_u64(b))); + case 0x01: + return vreinterpretq_m128i_u64( + _sse2neon_vmull_p64(vget_high_u64(a), vget_low_u64(b))); + case 0x10: + return vreinterpretq_m128i_u64( + _sse2neon_vmull_p64(vget_low_u64(a), vget_high_u64(b))); + case 0x11: + return vreinterpretq_m128i_u64( + _sse2neon_vmull_p64(vget_high_u64(a), vget_high_u64(b))); + default: + abort(); + } } -// Store 128-bits (composed of 4 packed single-precision (32-bit) floating- -// point elements) from a into memory using a non-temporal memory hint. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_stream_ps -FORCE_INLINE void _mm_stream_ps(float *p, __m128 a) +FORCE_INLINE unsigned int _sse2neon_mm_get_denormals_zero_mode() { -#if __has_builtin(__builtin_nontemporal_store) - __builtin_nontemporal_store(a, (float32x4_t *) p); + union { + fpcr_bitfield field; +#if defined(__aarch64__) + uint64_t value; #else - vst1q_f32(p, vreinterpretq_f32_m128(a)); + uint32_t value; #endif -} + } r; -// Stores the data in a to the address p without polluting the caches. If the -// cache line containing address p is already in the cache, the cache will be -// updated. -// https://msdn.microsoft.com/en-us/library/ba08y07y%28v=vs.90%29.aspx -FORCE_INLINE void _mm_stream_si128(__m128i *p, __m128i a) -{ -#if __has_builtin(__builtin_nontemporal_store) - __builtin_nontemporal_store(a, p); +#if defined(__aarch64__) + asm volatile("mrs %0, FPCR" : "=r"(r.value)); /* read */ #else - vst1q_s64((int64_t *) p, vreinterpretq_s64_m128i(a)); + asm volatile("vmrs %0, FPSCR" : "=r"(r.value)); /* read */ #endif + + return r.field.bit24 ? _MM_DENORMALS_ZERO_ON : _MM_DENORMALS_ZERO_OFF; } -// Load 128-bits of integer data from memory into dst using a non-temporal -// memory hint. mem_addr must be aligned on a 16-byte boundary or a -// general-protection exception may be generated. -// -// dst[127:0] := MEM[mem_addr+127:mem_addr] -// -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_stream_load_si128 -FORCE_INLINE __m128i _mm_stream_load_si128(__m128i *p) +// Count the number of bits set to 1 in unsigned 32-bit integer a, and +// return that count in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_popcnt_u32 +FORCE_INLINE int _mm_popcnt_u32(unsigned int a) { -#if __has_builtin(__builtin_nontemporal_store) - return __builtin_nontemporal_load(p); +#if defined(__aarch64__) +#if __has_builtin(__builtin_popcount) + return __builtin_popcount(a); #else - return vreinterpretq_m128i_s64(vld1q_s64((int64_t *) p)); + return (int) vaddlv_u8(vcnt_u8(vcreate_u8((uint64_t) a))); #endif -} - -// Cache line containing p is flushed and invalidated from all caches in the -// coherency domain. : -// https://msdn.microsoft.com/en-us/library/ba08y07y(v=vs.100).aspx -FORCE_INLINE void _mm_clflush(void const *p) -{ - (void) p; - // no corollary for Neon? -} +#else + uint32_t count = 0; + uint8x8_t input_val, count8x8_val; + uint16x4_t count16x4_val; + uint32x2_t count32x2_val; -// Allocate aligned blocks of memory. -// https://software.intel.com/en-us/ -// cpp-compiler-developer-guide-and-reference-allocating-and-freeing-aligned-memory-blocks -FORCE_INLINE void *_mm_malloc(size_t size, size_t align) -{ - void *ptr; - if (align == 1) - return malloc(size); - if (align == 2 || (sizeof(void *) == 8 && align == 4)) - align = sizeof(void *); - if (!posix_memalign(&ptr, align, size)) - return ptr; - return NULL; -} + input_val = vld1_u8((uint8_t *) &a); + count8x8_val = vcnt_u8(input_val); + count16x4_val = vpaddl_u8(count8x8_val); + count32x2_val = vpaddl_u16(count16x4_val); -// Free aligned memory that was allocated with _mm_malloc. -// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_free -FORCE_INLINE void _mm_free(void *addr) -{ - free(addr); + vst1_u32(&count, count32x2_val); + return count; +#endif } -// Starting with the initial value in crc, accumulates a CRC32 value for -// unsigned 8-bit integer v. -// https://msdn.microsoft.com/en-us/library/bb514036(v=vs.100) -FORCE_INLINE uint32_t _mm_crc32_u8(uint32_t crc, uint8_t v) +// Count the number of bits set to 1 in unsigned 64-bit integer a, and +// return that count in dst. +// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_popcnt_u64 +FORCE_INLINE int64_t _mm_popcnt_u64(uint64_t a) { -#if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32) - __asm__ __volatile__("crc32cb %w[c], %w[c], %w[v]\n\t" - : [c] "+r"(crc) - : [v] "r"(v)); +#if defined(__aarch64__) +#if __has_builtin(__builtin_popcountll) + return __builtin_popcountll(a); #else - crc ^= v; - for (int bit = 0; bit < 8; bit++) { - if (crc & 1) - crc = (crc >> 1) ^ UINT32_C(0x82f63b78); - else - crc = (crc >> 1); - } + return (int64_t) vaddlv_u8(vcnt_u8(vcreate_u8(a))); +#endif +#else + uint64_t count = 0; + uint8x8_t input_val, count8x8_val; + uint16x4_t count16x4_val; + uint32x2_t count32x2_val; + uint64x1_t count64x1_val; + + input_val = vld1_u8((uint8_t *) &a); + count8x8_val = vcnt_u8(input_val); + count16x4_val = vpaddl_u8(count8x8_val); + count32x2_val = vpaddl_u16(count16x4_val); + count64x1_val = vpaddl_u32(count32x2_val); + vst1_u64(&count, count64x1_val); + return count; #endif - return crc; } -// Starting with the initial value in crc, accumulates a CRC32 value for -// unsigned 16-bit integer v. -// https://msdn.microsoft.com/en-us/library/bb531411(v=vs.100) -FORCE_INLINE uint32_t _mm_crc32_u16(uint32_t crc, uint16_t v) +FORCE_INLINE void _sse2neon_mm_set_denormals_zero_mode(unsigned int flag) { -#if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32) - __asm__ __volatile__("crc32ch %w[c], %w[c], %w[v]\n\t" - : [c] "+r"(crc) - : [v] "r"(v)); + // AArch32 Advanced SIMD arithmetic always uses the Flush-to-zero setting, + // regardless of the value of the FZ bit. + union { + fpcr_bitfield field; +#if defined(__aarch64__) + uint64_t value; #else - crc = _mm_crc32_u8(crc, v & 0xff); - crc = _mm_crc32_u8(crc, (v >> 8) & 0xff); + uint32_t value; #endif - return crc; -} + } r; -// Starting with the initial value in crc, accumulates a CRC32 value for -// unsigned 32-bit integer v. -// https://msdn.microsoft.com/en-us/library/bb531394(v=vs.100) -FORCE_INLINE uint32_t _mm_crc32_u32(uint32_t crc, uint32_t v) -{ -#if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32) - __asm__ __volatile__("crc32cw %w[c], %w[c], %w[v]\n\t" - : [c] "+r"(crc) - : [v] "r"(v)); +#if defined(__aarch64__) + asm volatile("mrs %0, FPCR" : "=r"(r.value)); /* read */ #else - crc = _mm_crc32_u16(crc, v & 0xffff); - crc = _mm_crc32_u16(crc, (v >> 16) & 0xffff); + asm volatile("vmrs %0, FPSCR" : "=r"(r.value)); /* read */ #endif - return crc; -} -// Starting with the initial value in crc, accumulates a CRC32 value for -// unsigned 64-bit integer v. -// https://msdn.microsoft.com/en-us/library/bb514033(v=vs.100) -FORCE_INLINE uint64_t _mm_crc32_u64(uint64_t crc, uint64_t v) -{ -#if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32) - __asm__ __volatile__("crc32cx %w[c], %w[c], %x[v]\n\t" - : [c] "+r"(crc) - : [v] "r"(v)); + r.field.bit24 = (flag & _MM_DENORMALS_ZERO_MASK) == _MM_DENORMALS_ZERO_ON; + +#if defined(__aarch64__) + asm volatile("msr FPCR, %0" ::"r"(r)); /* write */ #else - crc = _mm_crc32_u32((uint32_t)(crc), v & 0xffffffff); - crc = _mm_crc32_u32((uint32_t)(crc), (v >> 32) & 0xffffffff); + asm volatile("vmsr FPSCR, %0" ::"r"(r)); /* write */ #endif - return crc; } #if defined(__GNUC__) || defined(__clang__) @@ -6993,4 +8740,4 @@ FORCE_INLINE uint64_t _mm_crc32_u64(uint64_t crc, uint64_t v) #pragma GCC pop_options #endif -#endif +#endif
\ No newline at end of file diff --git a/thirdparty/embree/common/simd/simd.h b/thirdparty/embree/common/simd/simd.h index 195506b530..34e37b08b1 100644 --- a/thirdparty/embree/common/simd/simd.h +++ b/thirdparty/embree/common/simd/simd.h @@ -6,7 +6,7 @@ #include "../math/math.h" /* include SSE wrapper classes */ -#if defined(__SSE__) +#if defined(__SSE__) || defined(__ARM_NEON) # include "sse.h" #endif diff --git a/thirdparty/embree/common/simd/sse.h b/thirdparty/embree/common/simd/sse.h index 1465fb4fb0..04d90533dd 100644 --- a/thirdparty/embree/common/simd/sse.h +++ b/thirdparty/embree/common/simd/sse.h @@ -11,7 +11,7 @@ namespace embree { -#if defined(__SSE4_1__) +#if defined(__aarch64__) || defined(__SSE4_1__) __forceinline __m128 blendv_ps(__m128 f, __m128 t, __m128 mask) { return _mm_blendv_ps(f,t,mask); } diff --git a/thirdparty/embree/common/simd/vboold4_avx.h b/thirdparty/embree/common/simd/vboold4_avx.h index 7db0d1c5c1..450bd7a4eb 100644 --- a/thirdparty/embree/common/simd/vboold4_avx.h +++ b/thirdparty/embree/common/simd/vboold4_avx.h @@ -62,7 +62,11 @@ namespace embree //////////////////////////////////////////////////////////////////////////////// __forceinline vboold(FalseTy) : v(_mm256_setzero_pd()) {} +#if !defined(__aarch64__) __forceinline vboold(TrueTy) : v(_mm256_cmp_pd(_mm256_setzero_pd(), _mm256_setzero_pd(), _CMP_EQ_OQ)) {} +#else + __forceinline vboold(TrueTy) : v(_mm256_cmpeq_pd(_mm256_setzero_pd(), _mm256_setzero_pd())) {} +#endif //////////////////////////////////////////////////////////////////////////////// /// Array Access @@ -107,9 +111,10 @@ namespace embree /// Movement/Shifting/Shuffling Functions //////////////////////////////////////////////////////////////////////////////// +#if !defined(__aarch64__) __forceinline vboold4 unpacklo(const vboold4& a, const vboold4& b) { return _mm256_unpacklo_pd(a, b); } __forceinline vboold4 unpackhi(const vboold4& a, const vboold4& b) { return _mm256_unpackhi_pd(a, b); } - +#endif #if defined(__AVX2__) template<int i0, int i1, int i2, int i3> diff --git a/thirdparty/embree/common/simd/vboolf16_avx512.h b/thirdparty/embree/common/simd/vboolf16_avx512.h index 19841dcea8..86b718f025 100644 --- a/thirdparty/embree/common/simd/vboolf16_avx512.h +++ b/thirdparty/embree/common/simd/vboolf16_avx512.h @@ -116,7 +116,7 @@ namespace embree __forceinline size_t popcnt (const vboolf16& a) { return popcnt(a.v); } //////////////////////////////////////////////////////////////////////////////// - /// Convertion Operations + /// Conversion Operations //////////////////////////////////////////////////////////////////////////////// __forceinline unsigned int toInt (const vboolf16& a) { return mm512_mask2int(a); } diff --git a/thirdparty/embree/common/simd/vboolf4_sse2.h b/thirdparty/embree/common/simd/vboolf4_sse2.h index fa84b1b6ee..9e0fdf5c6f 100644 --- a/thirdparty/embree/common/simd/vboolf4_sse2.h +++ b/thirdparty/embree/common/simd/vboolf4_sse2.h @@ -36,9 +36,11 @@ namespace embree __forceinline vboolf(__m128 input) : v(input) {} __forceinline operator const __m128&() const { return v; } + #if !defined(__EMSCRIPTEN__) __forceinline operator const __m128i() const { return _mm_castps_si128(v); } __forceinline operator const __m128d() const { return _mm_castps_pd(v); } - + #endif + __forceinline vboolf(bool a) : v(mm_lookupmask_ps[(size_t(a) << 3) | (size_t(a) << 2) | (size_t(a) << 1) | size_t(a)]) {} __forceinline vboolf(bool a, bool b) @@ -100,7 +102,7 @@ namespace embree __forceinline vboolf4 operator ==(const vboolf4& a, const vboolf4& b) { return _mm_castsi128_ps(_mm_cmpeq_epi32(a, b)); } __forceinline vboolf4 select(const vboolf4& m, const vboolf4& t, const vboolf4& f) { -#if defined(__SSE4_1__) +#if defined(__aarch64__) || defined(__SSE4_1__) return _mm_blendv_ps(f, t, m); #else return _mm_or_ps(_mm_and_ps(m, t), _mm_andnot_ps(m, f)); @@ -114,6 +116,17 @@ namespace embree __forceinline vboolf4 unpacklo(const vboolf4& a, const vboolf4& b) { return _mm_unpacklo_ps(a, b); } __forceinline vboolf4 unpackhi(const vboolf4& a, const vboolf4& b) { return _mm_unpackhi_ps(a, b); } +#if defined(__aarch64__) + template<int i0, int i1, int i2, int i3> + __forceinline vboolf4 shuffle(const vboolf4& v) { + return vreinterpretq_f32_u8(vqtbl1q_u8( vreinterpretq_u8_s32(v), _MN_SHUFFLE(i0, i1, i2, i3))); + } + + template<int i0, int i1, int i2, int i3> + __forceinline vboolf4 shuffle(const vboolf4& a, const vboolf4& b) { + return vreinterpretq_f32_u8(vqtbl2q_u8( (uint8x16x2_t){(uint8x16_t)a.v, (uint8x16_t)b.v}, _MF_SHUFFLE(i0, i1, i2, i3))); + } +#else template<int i0, int i1, int i2, int i3> __forceinline vboolf4 shuffle(const vboolf4& v) { return _mm_castsi128_ps(_mm_shuffle_epi32(v, _MM_SHUFFLE(i3, i2, i1, i0))); @@ -123,6 +136,7 @@ namespace embree __forceinline vboolf4 shuffle(const vboolf4& a, const vboolf4& b) { return _mm_shuffle_ps(a, b, _MM_SHUFFLE(i3, i2, i1, i0)); } +#endif template<int i0> __forceinline vboolf4 shuffle(const vboolf4& v) { @@ -135,7 +149,7 @@ namespace embree template<> __forceinline vboolf4 shuffle<0, 1, 0, 1>(const vboolf4& v) { return _mm_castpd_ps(_mm_movedup_pd(v)); } #endif -#if defined(__SSE4_1__) +#if defined(__SSE4_1__) && !defined(__aarch64__) template<int dst, int src, int clr> __forceinline vboolf4 insert(const vboolf4& a, const vboolf4& b) { return _mm_insert_ps(a, b, (dst << 4) | (src << 6) | clr); } template<int dst, int src> __forceinline vboolf4 insert(const vboolf4& a, const vboolf4& b) { return insert<dst, src, 0>(a, b); } template<int dst> __forceinline vboolf4 insert(const vboolf4& a, const bool b) { return insert<dst, 0>(a, vboolf4(b)); } @@ -157,7 +171,9 @@ namespace embree __forceinline bool none(const vboolf4& valid, const vboolf4& b) { return none(valid & b); } __forceinline size_t movemask(const vboolf4& a) { return _mm_movemask_ps(a); } -#if defined(__SSE4_2__) +#if defined(__aarch64__) + __forceinline size_t popcnt(const vboolf4& a) { return vaddvq_s32(vandq_u32(vreinterpretq_u32_f32(a.v),_mm_set1_epi32(1))); } +#elif defined(__SSE4_2__) __forceinline size_t popcnt(const vboolf4& a) { return popcnt((size_t)_mm_movemask_ps(a)); } #else __forceinline size_t popcnt(const vboolf4& a) { return bool(a[0])+bool(a[1])+bool(a[2])+bool(a[3]); } diff --git a/thirdparty/embree/common/simd/vboolf8_avx.h b/thirdparty/embree/common/simd/vboolf8_avx.h index ba77cc3c5e..18cede19c6 100644 --- a/thirdparty/embree/common/simd/vboolf8_avx.h +++ b/thirdparty/embree/common/simd/vboolf8_avx.h @@ -76,7 +76,7 @@ namespace embree //////////////////////////////////////////////////////////////////////////////// __forceinline vboolf(FalseTy) : v(_mm256_setzero_ps()) {} - __forceinline vboolf(TrueTy) : v(_mm256_cmp_ps(_mm256_setzero_ps(), _mm256_setzero_ps(), _CMP_EQ_OQ)) {} + __forceinline vboolf(TrueTy) : v(_mm256_castsi256_ps(_mm256_set1_epi32(0xFFFFFFFF))) {} //////////////////////////////////////////////////////////////////////////////// /// Array Access diff --git a/thirdparty/embree/common/simd/vdouble4_avx.h b/thirdparty/embree/common/simd/vdouble4_avx.h index 55326de7dd..208bb7ac99 100644 --- a/thirdparty/embree/common/simd/vdouble4_avx.h +++ b/thirdparty/embree/common/simd/vdouble4_avx.h @@ -189,13 +189,20 @@ namespace embree __forceinline vboold4 operator >=(const vdouble4& a, const vdouble4& b) { return _mm256_cmp_pd_mask(a, b, _MM_CMPINT_GE); } __forceinline vboold4 operator > (const vdouble4& a, const vdouble4& b) { return _mm256_cmp_pd_mask(a, b, _MM_CMPINT_GT); } __forceinline vboold4 operator <=(const vdouble4& a, const vdouble4& b) { return _mm256_cmp_pd_mask(a, b, _MM_CMPINT_LE); } -#else +#elif !defined(__aarch64__) __forceinline vboold4 operator ==(const vdouble4& a, const vdouble4& b) { return _mm256_cmp_pd(a, b, _CMP_EQ_OQ); } __forceinline vboold4 operator !=(const vdouble4& a, const vdouble4& b) { return _mm256_cmp_pd(a, b, _CMP_NEQ_UQ); } __forceinline vboold4 operator < (const vdouble4& a, const vdouble4& b) { return _mm256_cmp_pd(a, b, _CMP_LT_OS); } __forceinline vboold4 operator >=(const vdouble4& a, const vdouble4& b) { return _mm256_cmp_pd(a, b, _CMP_NLT_US); } __forceinline vboold4 operator > (const vdouble4& a, const vdouble4& b) { return _mm256_cmp_pd(a, b, _CMP_NLE_US); } __forceinline vboold4 operator <=(const vdouble4& a, const vdouble4& b) { return _mm256_cmp_pd(a, b, _CMP_LE_OS); } +#else + __forceinline vboold4 operator ==(const vdouble4& a, const vdouble4& b) { return _mm256_cmpeq_pd(a, b); } + __forceinline vboold4 operator !=(const vdouble4& a, const vdouble4& b) { return _mm256_cmpneq_pd(a, b); } + __forceinline vboold4 operator < (const vdouble4& a, const vdouble4& b) { return _mm256_cmplt_pd(a, b); } + __forceinline vboold4 operator >=(const vdouble4& a, const vdouble4& b) { return _mm256_cmpnlt_pd(a, b); } + __forceinline vboold4 operator > (const vdouble4& a, const vdouble4& b) { return _mm256_cmpnle_pd(a, b); } + __forceinline vboold4 operator <=(const vdouble4& a, const vdouble4& b) { return _mm256_cmple_pd(a, b); } #endif __forceinline vboold4 operator ==(const vdouble4& a, double b) { return a == vdouble4(b); } diff --git a/thirdparty/embree/common/simd/vfloat16_avx512.h b/thirdparty/embree/common/simd/vfloat16_avx512.h index 9f1e2459c4..75c471cc0c 100644 --- a/thirdparty/embree/common/simd/vfloat16_avx512.h +++ b/thirdparty/embree/common/simd/vfloat16_avx512.h @@ -177,9 +177,10 @@ namespace embree __forceinline vfloat16 abs (const vfloat16& a) { return _mm512_castsi512_ps(_mm512_and_epi32(_mm512_castps_si512(a),_mm512_set1_epi32(0x7FFFFFFF))); } __forceinline vfloat16 signmsk(const vfloat16& a) { return _mm512_castsi512_ps(_mm512_and_epi32(_mm512_castps_si512(a),_mm512_set1_epi32(0x80000000))); } - __forceinline vfloat16 rcp(const vfloat16& a) { + __forceinline vfloat16 rcp(const vfloat16& a) + { const vfloat16 r = _mm512_rcp14_ps(a); - return _mm512_mul_ps(r, _mm512_fnmadd_ps(r, a, vfloat16(2.0f))); + return _mm512_fmadd_ps(r, _mm512_fnmadd_ps(a, r, vfloat16(1.0)), r); // computes r + r * (1 - a*r) } __forceinline vfloat16 sqr (const vfloat16& a) { return _mm512_mul_ps(a,a); } diff --git a/thirdparty/embree/common/simd/vfloat4_sse2.h b/thirdparty/embree/common/simd/vfloat4_sse2.h index 5215bf9730..6d7e11fe72 100644 --- a/thirdparty/embree/common/simd/vfloat4_sse2.h +++ b/thirdparty/embree/common/simd/vfloat4_sse2.h @@ -42,6 +42,11 @@ namespace embree __forceinline vfloat(float a, float b, float c, float d) : v(_mm_set_ps(d, c, b, a)) {} __forceinline explicit vfloat(const vint4& a) : v(_mm_cvtepi32_ps(a)) {} +#if defined(__aarch64__) + __forceinline explicit vfloat(const vuint4& x) { + v = vcvtq_f32_u32(vreinterpretq_u32_s32(x.v)); + } +#else __forceinline explicit vfloat(const vuint4& x) { const __m128i a = _mm_and_si128(x,_mm_set1_epi32(0x7FFFFFFF)); const __m128i b = _mm_and_si128(_mm_srai_epi32(x,31),_mm_set1_epi32(0x4F000000)); //0x4F000000 = 2^31 @@ -49,7 +54,7 @@ namespace embree const __m128 bf = _mm_castsi128_ps(b); v = _mm_add_ps(af,bf); } - +#endif //////////////////////////////////////////////////////////////////////////////// /// Constants //////////////////////////////////////////////////////////////////////////////// @@ -107,7 +112,11 @@ namespace embree #endif } -#if defined(__SSE4_1__) +#if defined(__aarch64__) + static __forceinline vfloat4 load(const char* ptr) { + return __m128(_mm_load4epi8_f32(((__m128i*)ptr))); + } +#elif defined(__SSE4_1__) static __forceinline vfloat4 load(const char* ptr) { return _mm_cvtepi32_ps(_mm_cvtepi8_epi32(_mm_loadu_si128((__m128i*)ptr))); } @@ -117,7 +126,11 @@ namespace embree } #endif -#if defined(__SSE4_1__) +#if defined(__aarch64__) + static __forceinline vfloat4 load(const unsigned char* ptr) { + return __m128(_mm_load4epu8_f32(((__m128i*)ptr))); + } +#elif defined(__SSE4_1__) static __forceinline vfloat4 load(const unsigned char* ptr) { return _mm_cvtepi32_ps(_mm_cvtepu8_epi32(_mm_loadu_si128((__m128i*)ptr))); } @@ -128,7 +141,11 @@ namespace embree } #endif -#if defined(__SSE4_1__) +#if defined(__aarch64__) + static __forceinline vfloat4 load(const short* ptr) { + return __m128(_mm_load4epi16_f32(((__m128i*)ptr))); + } +#elif defined(__SSE4_1__) static __forceinline vfloat4 load(const short* ptr) { return _mm_cvtepi32_ps(_mm_cvtepi16_epi32(_mm_loadu_si128((__m128i*)ptr))); } @@ -145,15 +162,19 @@ namespace embree static __forceinline void store_nt(void* ptr, const vfloat4& v) { #if defined (__SSE4_1__) +#if defined(__aarch64__) _mm_stream_ps((float*)ptr,v); #else + _mm_stream_ps((float*)ptr,v); +#endif +#else _mm_store_ps((float*)ptr,v); #endif } template<int scale = 4> static __forceinline vfloat4 gather(const float* ptr, const vint4& index) { -#if defined(__AVX2__) +#if defined(__AVX2__) && !defined(__aarch64__) return _mm_i32gather_ps(ptr, index, scale); #else return vfloat4( @@ -169,7 +190,7 @@ namespace embree vfloat4 r = zero; #if defined(__AVX512VL__) return _mm_mmask_i32gather_ps(r, mask, index, ptr, scale); -#elif defined(__AVX2__) +#elif defined(__AVX2__) && !defined(__aarch64__) return _mm_mask_i32gather_ps(r, ptr, index, mask, scale); #else if (likely(mask[0])) r[0] = *(float*)(((char*)ptr)+scale*index[0]); @@ -223,8 +244,8 @@ namespace embree friend __forceinline vfloat4 select(const vboolf4& m, const vfloat4& t, const vfloat4& f) { #if defined(__AVX512VL__) return _mm_mask_blend_ps(m, f, t); -#elif defined(__SSE4_1__) - return _mm_blendv_ps(f, t, m); +#elif defined(__SSE4_1__) || (defined(__aarch64__)) + return _mm_blendv_ps(f, t, m); #else return _mm_or_ps(_mm_and_ps(m, t), _mm_andnot_ps(m, f)); #endif @@ -256,18 +277,34 @@ namespace embree __forceinline vfloat4 toFloat(const vint4& a) { return vfloat4(a); } __forceinline vfloat4 operator +(const vfloat4& a) { return a; } +#if defined(__aarch64__) + __forceinline vfloat4 operator -(const vfloat4& a) { + return vnegq_f32(a); + } +#else __forceinline vfloat4 operator -(const vfloat4& a) { return _mm_xor_ps(a, _mm_castsi128_ps(_mm_set1_epi32(0x80000000))); } +#endif +#if defined(__aarch64__) + __forceinline vfloat4 abs(const vfloat4& a) { return _mm_abs_ps(a); } +#else __forceinline vfloat4 abs(const vfloat4& a) { return _mm_and_ps(a, _mm_castsi128_ps(_mm_set1_epi32(0x7fffffff))); } +#endif + #if defined(__AVX512VL__) __forceinline vfloat4 sign(const vfloat4& a) { return _mm_mask_blend_ps(_mm_cmp_ps_mask(a, vfloat4(zero), _CMP_LT_OQ), vfloat4(one), -vfloat4(one)); } #else __forceinline vfloat4 sign(const vfloat4& a) { return blendv_ps(vfloat4(one), -vfloat4(one), _mm_cmplt_ps(a, vfloat4(zero))); } #endif + __forceinline vfloat4 signmsk(const vfloat4& a) { return _mm_and_ps(a,_mm_castsi128_ps(_mm_set1_epi32(0x80000000))); } - + __forceinline vfloat4 rcp(const vfloat4& a) { +#if defined(__aarch64__) + return vfloat4(vdivq_f32(vdupq_n_f32(1.0f),a.v)); +#else + #if defined(__AVX512VL__) const vfloat4 r = _mm_rcp14_ps(a); #else @@ -275,30 +312,39 @@ namespace embree #endif #if defined(__AVX2__) - return _mm_mul_ps(r,_mm_fnmadd_ps(r, a, vfloat4(2.0f))); + return _mm_fmadd_ps(r, _mm_fnmadd_ps(a, r, vfloat4(1.0f)), r); // computes r + r * (1 - a * r) #else - return _mm_mul_ps(r,_mm_sub_ps(vfloat4(2.0f), _mm_mul_ps(r, a))); + return _mm_add_ps(r,_mm_mul_ps(r, _mm_sub_ps(vfloat4(1.0f), _mm_mul_ps(a, r)))); // computes r + r * (1 - a * r) #endif + +#endif //defined(__aarch64__) } __forceinline vfloat4 sqr (const vfloat4& a) { return _mm_mul_ps(a,a); } __forceinline vfloat4 sqrt(const vfloat4& a) { return _mm_sqrt_ps(a); } __forceinline vfloat4 rsqrt(const vfloat4& a) { +#if defined(__aarch64__) + vfloat4 r = _mm_rsqrt_ps(a); + r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a, r), r)); + r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a, r), r)); + r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a, r), r)); + return r; +#else + #if defined(__AVX512VL__) vfloat4 r = _mm_rsqrt14_ps(a); #else vfloat4 r = _mm_rsqrt_ps(a); #endif -#if defined(__ARM_NEON) - r = _mm_fmadd_ps(_mm_set1_ps(1.5f), r, _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r))); - r = _mm_fmadd_ps(_mm_set1_ps(1.5f), r, _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r))); -#elif defined(__AVX2__) +#if defined(__AVX2__) r = _mm_fmadd_ps(_mm_set1_ps(1.5f), r, _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r))); #else r = _mm_add_ps(_mm_mul_ps(_mm_set1_ps(1.5f), r), _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r))); #endif + +#endif return r; } @@ -344,7 +390,8 @@ namespace embree __forceinline vfloat4 max(const vfloat4& a, float b) { return _mm_max_ps(a,vfloat4(b)); } __forceinline vfloat4 max(float a, const vfloat4& b) { return _mm_max_ps(vfloat4(a),b); } -#if defined(__SSE4_1__) +#if defined(__SSE4_1__) || defined(__aarch64__) + __forceinline vfloat4 mini(const vfloat4& a, const vfloat4& b) { const vint4 ai = _mm_castps_si128(a); const vint4 bi = _mm_castps_si128(b); @@ -393,9 +440,10 @@ namespace embree __forceinline vfloat4 nmsub(const vfloat4& a, const vfloat4& b, const vfloat4& c) { return _mm_fnmsub_ps(a,b,c); } #else __forceinline vfloat4 madd (const vfloat4& a, const vfloat4& b, const vfloat4& c) { return a*b+c; } - __forceinline vfloat4 msub (const vfloat4& a, const vfloat4& b, const vfloat4& c) { return a*b-c; } __forceinline vfloat4 nmadd(const vfloat4& a, const vfloat4& b, const vfloat4& c) { return -a*b+c;} __forceinline vfloat4 nmsub(const vfloat4& a, const vfloat4& b, const vfloat4& c) { return -a*b-c; } + __forceinline vfloat4 msub (const vfloat4& a, const vfloat4& b, const vfloat4& c) { return a*b-c; } + #endif //////////////////////////////////////////////////////////////////////////////// @@ -429,8 +477,13 @@ namespace embree __forceinline vboolf4 operator ==(const vfloat4& a, const vfloat4& b) { return _mm_cmpeq_ps (a, b); } __forceinline vboolf4 operator !=(const vfloat4& a, const vfloat4& b) { return _mm_cmpneq_ps(a, b); } __forceinline vboolf4 operator < (const vfloat4& a, const vfloat4& b) { return _mm_cmplt_ps (a, b); } +#if defined(__aarch64__) + __forceinline vboolf4 operator >=(const vfloat4& a, const vfloat4& b) { return _mm_cmpge_ps (a, b); } + __forceinline vboolf4 operator > (const vfloat4& a, const vfloat4& b) { return _mm_cmpgt_ps (a, b); } +#else __forceinline vboolf4 operator >=(const vfloat4& a, const vfloat4& b) { return _mm_cmpnlt_ps(a, b); } __forceinline vboolf4 operator > (const vfloat4& a, const vfloat4& b) { return _mm_cmpnle_ps(a, b); } +#endif __forceinline vboolf4 operator <=(const vfloat4& a, const vfloat4& b) { return _mm_cmple_ps (a, b); } #endif @@ -484,7 +537,7 @@ namespace embree return select(vboolf4(mask), t, f); #endif } - + __forceinline vfloat4 lerp(const vfloat4& a, const vfloat4& b, const vfloat4& t) { return madd(t,b-a,a); } @@ -506,10 +559,10 @@ namespace embree //////////////////////////////////////////////////////////////////////////////// #if defined(__aarch64__) - __forceinline vfloat4 floor(const vfloat4& a) { return vrndmq_f32(a.v); } - __forceinline vfloat4 ceil (const vfloat4& a) { return vrndpq_f32(a.v); } - __forceinline vfloat4 trunc(const vfloat4& a) { return vrndq_f32(a.v); } - __forceinline vfloat4 round(const vfloat4& a) { return vrndnq_f32(a.v); } + __forceinline vfloat4 floor(const vfloat4& a) { return vrndmq_f32(a.v); } // towards -inf + __forceinline vfloat4 ceil (const vfloat4& a) { return vrndpq_f32(a.v); } // toward +inf + __forceinline vfloat4 trunc(const vfloat4& a) { return vrndq_f32(a.v); } // towards 0 + __forceinline vfloat4 round(const vfloat4& a) { return vrndnq_f32(a.v); } // to nearest, ties to even. NOTE(LTE): arm clang uses vrndnq, old gcc uses vrndqn? #elif defined (__SSE4_1__) __forceinline vfloat4 floor(const vfloat4& a) { return _mm_round_ps(a, _MM_FROUND_TO_NEG_INF ); } __forceinline vfloat4 ceil (const vfloat4& a) { return _mm_round_ps(a, _MM_FROUND_TO_POS_INF ); } @@ -524,7 +577,9 @@ namespace embree __forceinline vfloat4 frac(const vfloat4& a) { return a-floor(a); } __forceinline vint4 floori(const vfloat4& a) { -#if defined(__SSE4_1__) +#if defined(__aarch64__) + return vcvtq_s32_f32(floor(a)); +#elif defined(__SSE4_1__) return vint4(floor(a)); #else return vint4(a-vfloat4(0.5f)); @@ -538,6 +593,16 @@ namespace embree __forceinline vfloat4 unpacklo(const vfloat4& a, const vfloat4& b) { return _mm_unpacklo_ps(a, b); } __forceinline vfloat4 unpackhi(const vfloat4& a, const vfloat4& b) { return _mm_unpackhi_ps(a, b); } +#if defined(__aarch64__) + template<int i0, int i1, int i2, int i3> + __forceinline vfloat4 shuffle(const vfloat4& v) { + return vreinterpretq_f32_u8(vqtbl1q_u8( (uint8x16_t)v.v, _MN_SHUFFLE(i0, i1, i2, i3))); + } + template<int i0, int i1, int i2, int i3> + __forceinline vfloat4 shuffle(const vfloat4& a, const vfloat4& b) { + return vreinterpretq_f32_u8(vqtbl2q_u8( (uint8x16x2_t){(uint8x16_t)a.v, (uint8x16_t)b.v}, _MF_SHUFFLE(i0, i1, i2, i3))); + } +#else template<int i0, int i1, int i2, int i3> __forceinline vfloat4 shuffle(const vfloat4& v) { return _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v), _MM_SHUFFLE(i3, i2, i1, i0))); @@ -547,8 +612,9 @@ namespace embree __forceinline vfloat4 shuffle(const vfloat4& a, const vfloat4& b) { return _mm_shuffle_ps(a, b, _MM_SHUFFLE(i3, i2, i1, i0)); } +#endif -#if defined(__SSE3__) +#if defined(__SSE3__) && !defined(__aarch64__) template<> __forceinline vfloat4 shuffle<0, 0, 2, 2>(const vfloat4& v) { return _mm_moveldup_ps(v); } template<> __forceinline vfloat4 shuffle<1, 1, 3, 3>(const vfloat4& v) { return _mm_movehdup_ps(v); } template<> __forceinline vfloat4 shuffle<0, 1, 0, 1>(const vfloat4& v) { return _mm_castpd_ps(_mm_movedup_pd(_mm_castps_pd(v))); } @@ -559,10 +625,14 @@ namespace embree return shuffle<i,i,i,i>(v); } +#if defined(__aarch64__) + template<int i> __forceinline float extract(const vfloat4& a) { return a[i]; } +#else template<int i> __forceinline float extract (const vfloat4& a) { return _mm_cvtss_f32(shuffle<i>(a)); } template<> __forceinline float extract<0>(const vfloat4& a) { return _mm_cvtss_f32(a); } +#endif -#if defined (__SSE4_1__) +#if defined (__SSE4_1__) && !defined(__aarch64__) template<int dst, int src, int clr> __forceinline vfloat4 insert(const vfloat4& a, const vfloat4& b) { return _mm_insert_ps(a, b, (dst << 4) | (src << 6) | clr); } template<int dst, int src> __forceinline vfloat4 insert(const vfloat4& a, const vfloat4& b) { return insert<dst, src, 0>(a, b); } template<int dst> __forceinline vfloat4 insert(const vfloat4& a, const float b) { return insert<dst, 0>(a, _mm_set_ss(b)); } @@ -664,14 +734,25 @@ namespace embree //////////////////////////////////////////////////////////////////////////////// /// Reductions //////////////////////////////////////////////////////////////////////////////// - +#if defined(__aarch64__) + __forceinline vfloat4 vreduce_min(const vfloat4& v) { float h = vminvq_f32(v); return vdupq_n_f32(h); } + __forceinline vfloat4 vreduce_max(const vfloat4& v) { float h = vmaxvq_f32(v); return vdupq_n_f32(h); } + __forceinline vfloat4 vreduce_add(const vfloat4& v) { float h = vaddvq_f32(v); return vdupq_n_f32(h); } +#else __forceinline vfloat4 vreduce_min(const vfloat4& v) { vfloat4 h = min(shuffle<1,0,3,2>(v),v); return min(shuffle<2,3,0,1>(h),h); } __forceinline vfloat4 vreduce_max(const vfloat4& v) { vfloat4 h = max(shuffle<1,0,3,2>(v),v); return max(shuffle<2,3,0,1>(h),h); } __forceinline vfloat4 vreduce_add(const vfloat4& v) { vfloat4 h = shuffle<1,0,3,2>(v) + v ; return shuffle<2,3,0,1>(h) + h ; } +#endif +#if defined(__aarch64__) + __forceinline float reduce_min(const vfloat4& v) { return vminvq_f32(v); } + __forceinline float reduce_max(const vfloat4& v) { return vmaxvq_f32(v); } + __forceinline float reduce_add(const vfloat4& v) { return vaddvq_f32(v); } +#else __forceinline float reduce_min(const vfloat4& v) { return _mm_cvtss_f32(vreduce_min(v)); } __forceinline float reduce_max(const vfloat4& v) { return _mm_cvtss_f32(vreduce_max(v)); } __forceinline float reduce_add(const vfloat4& v) { return _mm_cvtss_f32(vreduce_add(v)); } +#endif __forceinline size_t select_min(const vboolf4& valid, const vfloat4& v) { @@ -687,7 +768,7 @@ namespace embree } //////////////////////////////////////////////////////////////////////////////// - /// Euclidian Space Operators + /// Euclidean Space Operators //////////////////////////////////////////////////////////////////////////////// __forceinline float dot(const vfloat4& a, const vfloat4& b) { diff --git a/thirdparty/embree/common/simd/vfloat8_avx.h b/thirdparty/embree/common/simd/vfloat8_avx.h index 13446454e8..b09d5e641d 100644 --- a/thirdparty/embree/common/simd/vfloat8_avx.h +++ b/thirdparty/embree/common/simd/vfloat8_avx.h @@ -107,11 +107,11 @@ namespace embree static __forceinline void store (const vboolf8& mask, void* ptr, const vfloat8& v) { _mm256_mask_store_ps ((float*)ptr,mask,v); } static __forceinline void storeu(const vboolf8& mask, void* ptr, const vfloat8& v) { _mm256_mask_storeu_ps((float*)ptr,mask,v); } #else - static __forceinline vfloat8 load (const vboolf8& mask, const void* ptr) { return _mm256_maskload_ps((float*)ptr,(__m256i)mask); } - static __forceinline vfloat8 loadu(const vboolf8& mask, const void* ptr) { return _mm256_maskload_ps((float*)ptr,(__m256i)mask); } + static __forceinline vfloat8 load (const vboolf8& mask, const void* ptr) { return _mm256_maskload_ps((float*)ptr,_mm256_castps_si256(mask.v)); } + static __forceinline vfloat8 loadu(const vboolf8& mask, const void* ptr) { return _mm256_maskload_ps((float*)ptr,_mm256_castps_si256(mask.v)); } - static __forceinline void store (const vboolf8& mask, void* ptr, const vfloat8& v) { _mm256_maskstore_ps((float*)ptr,(__m256i)mask,v); } - static __forceinline void storeu(const vboolf8& mask, void* ptr, const vfloat8& v) { _mm256_maskstore_ps((float*)ptr,(__m256i)mask,v); } + static __forceinline void store (const vboolf8& mask, void* ptr, const vfloat8& v) { _mm256_maskstore_ps((float*)ptr,_mm256_castps_si256(mask.v),v); } + static __forceinline void storeu(const vboolf8& mask, void* ptr, const vfloat8& v) { _mm256_maskstore_ps((float*)ptr,_mm256_castps_si256(mask.v),v); } #endif #if defined(__AVX2__) @@ -126,7 +126,7 @@ namespace embree template<int scale = 4> static __forceinline vfloat8 gather(const float* ptr, const vint8& index) { -#if defined(__AVX2__) +#if defined(__AVX2__) && !defined(__aarch64__) return _mm256_i32gather_ps(ptr, index ,scale); #else return vfloat8( @@ -146,7 +146,7 @@ namespace embree vfloat8 r = zero; #if defined(__AVX512VL__) return _mm256_mmask_i32gather_ps(r, mask, index, ptr, scale); -#elif defined(__AVX2__) +#elif defined(__AVX2__) && !defined(__aarch64__) return _mm256_mask_i32gather_ps(r, ptr, index, mask, scale); #else if (likely(mask[0])) r[0] = *(float*)(((char*)ptr)+scale*index[0]); @@ -215,20 +215,52 @@ namespace embree __forceinline vfloat8 toFloat(const vint8& a) { return vfloat8(a); } __forceinline vfloat8 operator +(const vfloat8& a) { return a; } +#if !defined(__aarch64__) __forceinline vfloat8 operator -(const vfloat8& a) { const __m256 mask = _mm256_castsi256_ps(_mm256_set1_epi32(0x80000000)); return _mm256_xor_ps(a, mask); } +#else + __forceinline vfloat8 operator -(const vfloat8& a) { + __m256 res; + res.lo = vnegq_f32(a.v.lo); + res.hi = vnegq_f32(a.v.hi); + return res; +} +#endif + +#if !defined(__aarch64__) __forceinline vfloat8 abs(const vfloat8& a) { const __m256 mask = _mm256_castsi256_ps(_mm256_set1_epi32(0x7fffffff)); return _mm256_and_ps(a, mask); } +#else +__forceinline vfloat8 abs(const vfloat8& a) { + __m256 res; + res.lo = vabsq_f32(a.v.lo); + res.hi = vabsq_f32(a.v.hi); + return res; +} +#endif + +#if !defined(__aarch64__) __forceinline vfloat8 sign (const vfloat8& a) { return _mm256_blendv_ps(vfloat8(one), -vfloat8(one), _mm256_cmp_ps(a, vfloat8(zero), _CMP_NGE_UQ)); } +#else + __forceinline vfloat8 sign (const vfloat8& a) { return _mm256_blendv_ps(vfloat8(one), -vfloat8(one), _mm256_cmplt_ps(a, vfloat8(zero))); } +#endif __forceinline vfloat8 signmsk(const vfloat8& a) { return _mm256_and_ps(a,_mm256_castsi256_ps(_mm256_set1_epi32(0x80000000))); } static __forceinline vfloat8 rcp(const vfloat8& a) { +#if defined(__aarch64__) + vfloat8 ret; + const float32x4_t one = vdupq_n_f32(1.0f); + ret.v.lo = vdivq_f32(one, a.v.lo); + ret.v.hi = vdivq_f32(one, a.v.hi); + return ret; +#endif + #if defined(__AVX512VL__) const vfloat8 r = _mm256_rcp14_ps(a); #else @@ -236,9 +268,12 @@ namespace embree #endif #if defined(__AVX2__) - return _mm256_mul_ps(r, _mm256_fnmadd_ps(r, a, vfloat8(2.0f))); + // First, compute 1 - a * r (which will be very close to 0) + const vfloat8 h_n = _mm256_fnmadd_ps(a, r, vfloat8(1.0f)); + // Then compute r + r * h_n + return _mm256_fmadd_ps(r, h_n, r); #else - return _mm256_mul_ps(r, _mm256_sub_ps(vfloat8(2.0f), _mm256_mul_ps(r, a))); + return _mm256_add_ps(r,_mm256_mul_ps(r, _mm256_sub_ps(vfloat8(1.0f), _mm256_mul_ps(a, r)))); // computes r + r * (1 - a * r) #endif } __forceinline vfloat8 sqr (const vfloat8& a) { return _mm256_mul_ps(a,a); } @@ -384,7 +419,7 @@ namespace embree static __forceinline vfloat8 select(const vboolf8& m, const vfloat8& t, const vfloat8& f) { return _mm256_mask_blend_ps(m, f, t); } -#else +#elif !defined(__aarch64__) static __forceinline vboolf8 operator ==(const vfloat8& a, const vfloat8& b) { return _mm256_cmp_ps(a, b, _CMP_EQ_OQ); } static __forceinline vboolf8 operator !=(const vfloat8& a, const vfloat8& b) { return _mm256_cmp_ps(a, b, _CMP_NEQ_UQ); } static __forceinline vboolf8 operator < (const vfloat8& a, const vfloat8& b) { return _mm256_cmp_ps(a, b, _CMP_LT_OS); } @@ -395,6 +430,18 @@ namespace embree static __forceinline vfloat8 select(const vboolf8& m, const vfloat8& t, const vfloat8& f) { return _mm256_blendv_ps(f, t, m); } +#else + static __forceinline vboolf8 operator ==(const vfloat8& a, const vfloat8& b) { return _mm256_cmpeq_ps(a, b); } + static __forceinline vboolf8 operator !=(const vfloat8& a, const vfloat8& b) { return _mm256_cmpneq_ps(a, b); } + static __forceinline vboolf8 operator < (const vfloat8& a, const vfloat8& b) { return _mm256_cmplt_ps(a, b); } + static __forceinline vboolf8 operator >=(const vfloat8& a, const vfloat8& b) { return _mm256_cmpge_ps(a, b); } + static __forceinline vboolf8 operator > (const vfloat8& a, const vfloat8& b) { return _mm256_cmpgt_ps(a, b); } + static __forceinline vboolf8 operator <=(const vfloat8& a, const vfloat8& b) { return _mm256_cmple_ps(a, b); } + + static __forceinline vfloat8 select(const vboolf8& m, const vfloat8& t, const vfloat8& f) { + return _mm256_blendv_ps(f, t, m); + } + #endif template<int mask> @@ -463,10 +510,17 @@ namespace embree /// Rounding Functions //////////////////////////////////////////////////////////////////////////////// +#if !defined(__aarch64__) __forceinline vfloat8 floor(const vfloat8& a) { return _mm256_round_ps(a, _MM_FROUND_TO_NEG_INF ); } __forceinline vfloat8 ceil (const vfloat8& a) { return _mm256_round_ps(a, _MM_FROUND_TO_POS_INF ); } __forceinline vfloat8 trunc(const vfloat8& a) { return _mm256_round_ps(a, _MM_FROUND_TO_ZERO ); } __forceinline vfloat8 round(const vfloat8& a) { return _mm256_round_ps(a, _MM_FROUND_TO_NEAREST_INT); } +#else + __forceinline vfloat8 floor(const vfloat8& a) { return _mm256_floor_ps(a); } + __forceinline vfloat8 ceil (const vfloat8& a) { return _mm256_ceil_ps(a); } +#endif + + __forceinline vfloat8 frac (const vfloat8& a) { return a-floor(a); } //////////////////////////////////////////////////////////////////////////////// @@ -501,9 +555,11 @@ namespace embree return _mm256_shuffle_ps(a, b, _MM_SHUFFLE(i3, i2, i1, i0)); } +#if !defined(__aarch64__) template<> __forceinline vfloat8 shuffle<0, 0, 2, 2>(const vfloat8& v) { return _mm256_moveldup_ps(v); } template<> __forceinline vfloat8 shuffle<1, 1, 3, 3>(const vfloat8& v) { return _mm256_movehdup_ps(v); } template<> __forceinline vfloat8 shuffle<0, 1, 0, 1>(const vfloat8& v) { return _mm256_castpd_ps(_mm256_movedup_pd(_mm256_castps_pd(v))); } +#endif __forceinline vfloat8 broadcast(const float* ptr) { return _mm256_broadcast_ss(ptr); } template<size_t i> __forceinline vfloat8 insert4(const vfloat8& a, const vfloat4& b) { return _mm256_insertf128_ps(a, b, i); } @@ -512,7 +568,7 @@ namespace embree __forceinline float toScalar(const vfloat8& v) { return _mm_cvtss_f32(_mm256_castps256_ps128(v)); } -#if defined (__AVX2__) +#if defined (__AVX2__) && !defined(__aarch64__) static __forceinline vfloat8 permute(const vfloat8& a, const __m256i& index) { return _mm256_permutevar8x32_ps(a, index); } @@ -609,7 +665,7 @@ namespace embree //////////////////////////////////////////////////////////////////////////////// /// Reductions //////////////////////////////////////////////////////////////////////////////// - +#if !defined(__aarch64__) __forceinline vfloat8 vreduce_min2(const vfloat8& v) { return min(v,shuffle<1,0,3,2>(v)); } __forceinline vfloat8 vreduce_min4(const vfloat8& v) { vfloat8 v1 = vreduce_min2(v); return min(v1,shuffle<2,3,0,1>(v1)); } __forceinline vfloat8 vreduce_min (const vfloat8& v) { vfloat8 v1 = vreduce_min4(v); return min(v1,shuffle4<1,0>(v1)); } @@ -625,7 +681,14 @@ namespace embree __forceinline float reduce_min(const vfloat8& v) { return toScalar(vreduce_min(v)); } __forceinline float reduce_max(const vfloat8& v) { return toScalar(vreduce_max(v)); } __forceinline float reduce_add(const vfloat8& v) { return toScalar(vreduce_add(v)); } +#else + __forceinline float reduce_min(const vfloat8& v) { return vminvq_f32(_mm_min_ps(v.v.lo,v.v.hi)); } + __forceinline float reduce_max(const vfloat8& v) { return vmaxvq_f32(_mm_max_ps(v.v.lo,v.v.hi)); } + __forceinline vfloat8 vreduce_min(const vfloat8& v) { return vfloat8(reduce_min(v)); } + __forceinline vfloat8 vreduce_max(const vfloat8& v) { return vfloat8(reduce_max(v)); } + __forceinline float reduce_add(const vfloat8& v) { return vaddvq_f32(_mm_add_ps(v.v.lo,v.v.hi)); } +#endif __forceinline size_t select_min(const vboolf8& valid, const vfloat8& v) { const vfloat8 a = select(valid,v,vfloat8(pos_inf)); @@ -642,7 +705,7 @@ namespace embree //////////////////////////////////////////////////////////////////////////////// - /// Euclidian Space Operators (pairs of Vec3fa's) + /// Euclidean Space Operators (pairs of Vec3fa's) //////////////////////////////////////////////////////////////////////////////// //__forceinline vfloat8 dot(const vfloat8& a, const vfloat8& b) { diff --git a/thirdparty/embree/common/simd/vint4_sse2.h b/thirdparty/embree/common/simd/vint4_sse2.h index 9814d5c71c..eea03a771e 100644 --- a/thirdparty/embree/common/simd/vint4_sse2.h +++ b/thirdparty/embree/common/simd/vint4_sse2.h @@ -106,7 +106,14 @@ namespace embree #endif -#if defined(__SSE4_1__) +#if defined(__aarch64__) + static __forceinline vint4 load(const unsigned char* ptr) { + return _mm_load4epu8_epi32(((__m128i*)ptr)); + } + static __forceinline vint4 loadu(const unsigned char* ptr) { + return _mm_load4epu8_epi32(((__m128i*)ptr)); + } +#elif defined(__SSE4_1__) static __forceinline vint4 load(const unsigned char* ptr) { return _mm_cvtepu8_epi32(_mm_loadl_epi64((__m128i*)ptr)); } @@ -127,7 +134,9 @@ namespace embree #endif static __forceinline vint4 load(const unsigned short* ptr) { -#if defined (__SSE4_1__) +#if defined(__aarch64__) + return __m128i(vmovl_u16(vld1_u16(ptr))); +#elif defined (__SSE4_1__) return _mm_cvtepu16_epi32(_mm_loadu_si128((__m128i*)ptr)); #else return vint4(ptr[0],ptr[1],ptr[2],ptr[3]); @@ -135,7 +144,12 @@ namespace embree } static __forceinline void store(unsigned char* ptr, const vint4& v) { -#if defined(__SSE4_1__) +#if defined(__aarch64__) + int32x4_t x = v; + uint16x4_t y = vqmovn_u32(uint32x4_t(x)); + uint8x8_t z = vqmovn_u16(vcombine_u16(y, y)); + vst1_lane_u32((uint32_t *)ptr,uint32x2_t(z), 0); +#elif defined(__SSE4_1__) __m128i x = v; x = _mm_packus_epi32(x, x); x = _mm_packus_epi16(x, x); @@ -147,20 +161,26 @@ namespace embree } static __forceinline void store(unsigned short* ptr, const vint4& v) { +#if defined(__aarch64__) + uint32x4_t x = uint32x4_t(v.v); + uint16x4_t y = vqmovn_u32(x); + vst1_u16(ptr, y); +#else for (size_t i=0;i<4;i++) ptr[i] = (unsigned short)v[i]; +#endif } static __forceinline vint4 load_nt(void* ptr) { -#if defined(__SSE4_1__) - return _mm_stream_load_si128((__m128i*)ptr); +#if defined(__aarch64__) || defined(__SSE4_1__) + return _mm_stream_load_si128((__m128i*)ptr); #else return _mm_load_si128((__m128i*)ptr); #endif } static __forceinline void store_nt(void* ptr, const vint4& v) { -#if defined(__SSE4_1__) +#if !defined(__aarch64__) && defined(__SSE4_1__) _mm_stream_ps((float*)ptr, _mm_castsi128_ps(v)); #else _mm_store_si128((__m128i*)ptr,v); @@ -169,7 +189,7 @@ namespace embree template<int scale = 4> static __forceinline vint4 gather(const int* ptr, const vint4& index) { -#if defined(__AVX2__) +#if defined(__AVX2__) && !defined(__aarch64__) return _mm_i32gather_epi32(ptr, index, scale); #else return vint4( @@ -185,7 +205,7 @@ namespace embree vint4 r = zero; #if defined(__AVX512VL__) return _mm_mmask_i32gather_epi32(r, mask, index, ptr, scale); -#elif defined(__AVX2__) +#elif defined(__AVX2__) && !defined(__aarch64__) return _mm_mask_i32gather_epi32(r, ptr, index, mask, scale); #else if (likely(mask[0])) r[0] = *(int*)(((char*)ptr)+scale*index[0]); @@ -222,7 +242,7 @@ namespace embree #endif } -#if defined(__x86_64__) +#if defined(__x86_64__) || defined(__aarch64__) static __forceinline vint4 broadcast64(long long a) { return _mm_set1_epi64x(a); } #endif @@ -236,6 +256,8 @@ namespace embree friend __forceinline vint4 select(const vboolf4& m, const vint4& t, const vint4& f) { #if defined(__AVX512VL__) return _mm_mask_blend_epi32(m, (__m128i)f, (__m128i)t); +#elif defined(__aarch64__) + return _mm_castps_si128(_mm_blendv_ps((__m128)f.v,(__m128) t.v, (__m128)m.v)); #elif defined(__SSE4_1__) return _mm_castps_si128(_mm_blendv_ps(_mm_castsi128_ps(f), _mm_castsi128_ps(t), m)); #else @@ -256,7 +278,9 @@ namespace embree __forceinline vint4 operator +(const vint4& a) { return a; } __forceinline vint4 operator -(const vint4& a) { return _mm_sub_epi32(_mm_setzero_si128(), a); } -#if defined(__SSSE3__) +#if defined(__aarch64__) + __forceinline vint4 abs(const vint4& a) { return vabsq_s32(a.v); } +#elif defined(__SSSE3__) __forceinline vint4 abs(const vint4& a) { return _mm_abs_epi32(a); } #endif @@ -272,7 +296,7 @@ namespace embree __forceinline vint4 operator -(const vint4& a, int b) { return a - vint4(b); } __forceinline vint4 operator -(int a, const vint4& b) { return vint4(a) - b; } -#if defined(__SSE4_1__) +#if (defined(__aarch64__)) || defined(__SSE4_1__) __forceinline vint4 operator *(const vint4& a, const vint4& b) { return _mm_mullo_epi32(a, b); } #else __forceinline vint4 operator *(const vint4& a, const vint4& b) { return vint4(a[0]*b[0],a[1]*b[1],a[2]*b[2],a[3]*b[3]); } @@ -292,8 +316,8 @@ namespace embree __forceinline vint4 operator ^(const vint4& a, int b) { return a ^ vint4(b); } __forceinline vint4 operator ^(int a, const vint4& b) { return vint4(a) ^ b; } - __forceinline vint4 operator <<(const vint4& a, int n) { return _mm_slli_epi32(a, n); } - __forceinline vint4 operator >>(const vint4& a, int n) { return _mm_srai_epi32(a, n); } + __forceinline vint4 operator <<(const vint4& a, const int n) { return _mm_slli_epi32(a, n); } + __forceinline vint4 operator >>(const vint4& a, const int n) { return _mm_srai_epi32(a, n); } __forceinline vint4 sll (const vint4& a, int b) { return _mm_slli_epi32(a, b); } __forceinline vint4 sra (const vint4& a, int b) { return _mm_srai_epi32(a, b); } @@ -309,7 +333,7 @@ namespace embree __forceinline vint4& operator -=(vint4& a, const vint4& b) { return a = a - b; } __forceinline vint4& operator -=(vint4& a, int b) { return a = a - b; } -#if defined(__SSE4_1__) +#if (defined(__aarch64__)) || defined(__SSE4_1__) __forceinline vint4& operator *=(vint4& a, const vint4& b) { return a = a * b; } __forceinline vint4& operator *=(vint4& a, int b) { return a = a * b; } #endif @@ -393,7 +417,7 @@ namespace embree #endif } -#if defined(__SSE4_1__) +#if defined(__aarch64__) || defined(__SSE4_1__) __forceinline vint4 min(const vint4& a, const vint4& b) { return _mm_min_epi32(a, b); } __forceinline vint4 max(const vint4& a, const vint4& b) { return _mm_max_epi32(a, b); } @@ -417,6 +441,16 @@ namespace embree __forceinline vint4 unpacklo(const vint4& a, const vint4& b) { return _mm_castps_si128(_mm_unpacklo_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b))); } __forceinline vint4 unpackhi(const vint4& a, const vint4& b) { return _mm_castps_si128(_mm_unpackhi_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b))); } +#if defined(__aarch64__) + template<int i0, int i1, int i2, int i3> + __forceinline vint4 shuffle(const vint4& v) { + return vreinterpretq_s32_u8(vqtbl1q_u8( (uint8x16_t)v.v, _MN_SHUFFLE(i0, i1, i2, i3))); + } + template<int i0, int i1, int i2, int i3> + __forceinline vint4 shuffle(const vint4& a, const vint4& b) { + return vreinterpretq_s32_u8(vqtbl2q_u8( (uint8x16x2_t){(uint8x16_t)a.v, (uint8x16_t)b.v}, _MF_SHUFFLE(i0, i1, i2, i3))); + } +#else template<int i0, int i1, int i2, int i3> __forceinline vint4 shuffle(const vint4& v) { return _mm_shuffle_epi32(v, _MM_SHUFFLE(i3, i2, i1, i0)); @@ -426,7 +460,7 @@ namespace embree __forceinline vint4 shuffle(const vint4& a, const vint4& b) { return _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b), _MM_SHUFFLE(i3, i2, i1, i0))); } - +#endif #if defined(__SSE3__) template<> __forceinline vint4 shuffle<0, 0, 2, 2>(const vint4& v) { return _mm_castps_si128(_mm_moveldup_ps(_mm_castsi128_ps(v))); } template<> __forceinline vint4 shuffle<1, 1, 3, 3>(const vint4& v) { return _mm_castps_si128(_mm_movehdup_ps(_mm_castsi128_ps(v))); } @@ -438,7 +472,7 @@ namespace embree return shuffle<i,i,i,i>(v); } -#if defined(__SSE4_1__) +#if defined(__SSE4_1__) && !defined(__aarch64__) template<int src> __forceinline int extract(const vint4& b) { return _mm_extract_epi32(b, src); } template<int dst> __forceinline vint4 insert(const vint4& a, const int b) { return _mm_insert_epi32(a, b, dst); } #else @@ -446,18 +480,27 @@ namespace embree template<int dst> __forceinline vint4 insert(const vint4& a, int b) { vint4 c = a; c[dst&3] = b; return c; } #endif - template<> __forceinline int extract<0>(const vint4& b) { return _mm_cvtsi128_si32(b); } - + __forceinline int toScalar(const vint4& v) { return _mm_cvtsi128_si32(v); } - - __forceinline size_t toSizeT(const vint4& v) { + +#if defined(__aarch64__) + __forceinline size_t toSizeT(const vint4& v) { + uint64x2_t x = uint64x2_t(v.v); + return x[0]; + } +#else +__forceinline size_t toSizeT(const vint4& v) { #if defined(__WIN32__) && !defined(__X86_64__) // win32 workaround return toScalar(v); +#elif defined(__ARM_NEON) + // FIXME(LTE): Do we need a swap(i.e. use lane 1)? + return vgetq_lane_u64(*(reinterpret_cast<const uint64x2_t *>(&v)), 0); #else return _mm_cvtsi128_si64(v); #endif } +#endif #if defined(__AVX512VL__) @@ -475,7 +518,17 @@ namespace embree /// Reductions //////////////////////////////////////////////////////////////////////////////// -#if defined(__SSE4_1__) +#if defined(__aarch64__) || defined(__SSE4_1__) + +#if defined(__aarch64__) + __forceinline vint4 vreduce_min(const vint4& v) { int h = vminvq_s32(v); return vdupq_n_s32(h); } + __forceinline vint4 vreduce_max(const vint4& v) { int h = vmaxvq_s32(v); return vdupq_n_s32(h); } + __forceinline vint4 vreduce_add(const vint4& v) { int h = vaddvq_s32(v); return vdupq_n_s32(h); } + + __forceinline int reduce_min(const vint4& v) { return vminvq_s32(v); } + __forceinline int reduce_max(const vint4& v) { return vmaxvq_s32(v); } + __forceinline int reduce_add(const vint4& v) { return vaddvq_s32(v); } +#else __forceinline vint4 vreduce_min(const vint4& v) { vint4 h = min(shuffle<1,0,3,2>(v),v); return min(shuffle<2,3,0,1>(h),h); } __forceinline vint4 vreduce_max(const vint4& v) { vint4 h = max(shuffle<1,0,3,2>(v),v); return max(shuffle<2,3,0,1>(h),h); } __forceinline vint4 vreduce_add(const vint4& v) { vint4 h = shuffle<1,0,3,2>(v) + v ; return shuffle<2,3,0,1>(h) + h ; } @@ -483,6 +536,7 @@ namespace embree __forceinline int reduce_min(const vint4& v) { return toScalar(vreduce_min(v)); } __forceinline int reduce_max(const vint4& v) { return toScalar(vreduce_max(v)); } __forceinline int reduce_add(const vint4& v) { return toScalar(vreduce_add(v)); } +#endif __forceinline size_t select_min(const vint4& v) { return bsf(movemask(v == vreduce_min(v))); } __forceinline size_t select_max(const vint4& v) { return bsf(movemask(v == vreduce_max(v))); } @@ -502,7 +556,7 @@ namespace embree /// Sorting networks //////////////////////////////////////////////////////////////////////////////// -#if defined(__SSE4_1__) +#if (defined(__aarch64__)) || defined(__SSE4_1__) __forceinline vint4 usort_ascending(const vint4& v) { diff --git a/thirdparty/embree/common/simd/vint8_avx.h b/thirdparty/embree/common/simd/vint8_avx.h index f43e9a8c22..48f5a9b203 100644 --- a/thirdparty/embree/common/simd/vint8_avx.h +++ b/thirdparty/embree/common/simd/vint8_avx.h @@ -79,8 +79,8 @@ namespace embree static __forceinline void store (void* ptr, const vint8& f) { _mm256_store_ps((float*)ptr,_mm256_castsi256_ps(f)); } static __forceinline void storeu(void* ptr, const vint8& f) { _mm256_storeu_ps((float*)ptr,_mm256_castsi256_ps(f)); } - static __forceinline void store (const vboolf8& mask, void* ptr, const vint8& f) { _mm256_maskstore_ps((float*)ptr,(__m256i)mask,_mm256_castsi256_ps(f)); } - static __forceinline void storeu(const vboolf8& mask, void* ptr, const vint8& f) { _mm256_maskstore_ps((float*)ptr,(__m256i)mask,_mm256_castsi256_ps(f)); } + static __forceinline void store (const vboolf8& mask, void* ptr, const vint8& f) { _mm256_maskstore_ps((float*)ptr,_mm256_castps_si256(mask.v),_mm256_castsi256_ps(f)); } + static __forceinline void storeu(const vboolf8& mask, void* ptr, const vint8& f) { _mm256_maskstore_ps((float*)ptr,_mm256_castps_si256(mask.v),_mm256_castsi256_ps(f)); } static __forceinline void store_nt(void* ptr, const vint8& v) { _mm256_stream_ps((float*)ptr,_mm256_castsi256_ps(v)); diff --git a/thirdparty/embree/common/simd/vint8_avx2.h b/thirdparty/embree/common/simd/vint8_avx2.h index e04737ffbe..d48efac3f4 100644 --- a/thirdparty/embree/common/simd/vint8_avx2.h +++ b/thirdparty/embree/common/simd/vint8_avx2.h @@ -393,6 +393,7 @@ namespace embree __forceinline int toScalar(const vint8& v) { return _mm_cvtsi128_si32(_mm256_castsi256_si128(v)); } +#if !defined(__aarch64__) __forceinline vint8 permute(const vint8& v, const __m256i& index) { return _mm256_permutevar8x32_epi32(v, index); } @@ -410,6 +411,9 @@ namespace embree #endif } +#endif + + //////////////////////////////////////////////////////////////////////////////// /// Reductions //////////////////////////////////////////////////////////////////////////////// diff --git a/thirdparty/embree/common/simd/vuint4_sse2.h b/thirdparty/embree/common/simd/vuint4_sse2.h index 0601b9ab80..f7817da6be 100644 --- a/thirdparty/embree/common/simd/vuint4_sse2.h +++ b/thirdparty/embree/common/simd/vuint4_sse2.h @@ -95,7 +95,14 @@ namespace embree static __forceinline void storeu(const vboolf4& mask, void* ptr, const vuint4& i) { storeu(ptr,select(mask,i,loadu(ptr))); } #endif -#if defined(__SSE4_1__) +#if defined(__aarch64__) + static __forceinline vuint4 load(const unsigned char* ptr) { + return _mm_load4epu8_epi32(((__m128i*)ptr)); + } + static __forceinline vuint4 loadu(const unsigned char* ptr) { + return _mm_load4epu8_epi32(((__m128i*)ptr)); + } +#elif defined(__SSE4_1__) static __forceinline vuint4 load(const unsigned char* ptr) { return _mm_cvtepu8_epi32(_mm_loadl_epi64((__m128i*)ptr)); } @@ -107,7 +114,9 @@ namespace embree #endif static __forceinline vuint4 load(const unsigned short* ptr) { -#if defined (__SSE4_1__) +#if defined(__aarch64__) + return _mm_load4epu16_epi32(((__m128i*)ptr)); +#elif defined (__SSE4_1__) return _mm_cvtepu16_epi32(_mm_loadu_si128((__m128i*)ptr)); #else return vuint4(ptr[0],ptr[1],ptr[2],ptr[3]); @@ -115,7 +124,7 @@ namespace embree } static __forceinline vuint4 load_nt(void* ptr) { -#if defined(__SSE4_1__) +#if (defined(__aarch64__)) || defined(__SSE4_1__) return _mm_stream_load_si128((__m128i*)ptr); #else return _mm_load_si128((__m128i*)ptr); @@ -123,8 +132,8 @@ namespace embree } static __forceinline void store_nt(void* ptr, const vuint4& v) { -#if defined(__SSE4_1__) - _mm_stream_ps((float*)ptr,_mm_castsi128_ps(v)); +#if !defined(__aarch64__) && defined(__SSE4_1__) + _mm_stream_ps((float*)ptr, _mm_castsi128_ps(v)); #else _mm_store_si128((__m128i*)ptr,v); #endif @@ -132,7 +141,7 @@ namespace embree template<int scale = 4> static __forceinline vuint4 gather(const unsigned int* ptr, const vint4& index) { -#if defined(__AVX2__) +#if defined(__AVX2__) && !defined(__aarch64__) return _mm_i32gather_epi32((const int*)ptr, index, scale); #else return vuint4( @@ -148,7 +157,7 @@ namespace embree vuint4 r = zero; #if defined(__AVX512VL__) return _mm_mmask_i32gather_epi32(r, mask, index, ptr, scale); -#elif defined(__AVX2__) +#elif defined(__AVX2__) && !defined(__aarch64__) return _mm_mask_i32gather_epi32(r, (const int*)ptr, index, mask, scale); #else if (likely(mask[0])) r[0] = *(unsigned int*)(((char*)ptr)+scale*index[0]); @@ -344,6 +353,16 @@ namespace embree __forceinline vuint4 unpacklo(const vuint4& a, const vuint4& b) { return _mm_castps_si128(_mm_unpacklo_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b))); } __forceinline vuint4 unpackhi(const vuint4& a, const vuint4& b) { return _mm_castps_si128(_mm_unpackhi_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b))); } +#if defined(__aarch64__) + template<int i0, int i1, int i2, int i3> + __forceinline vuint4 shuffle(const vuint4& v) { + return vreinterpretq_s32_u8(vqtbl1q_u8( (uint8x16_t)v.v, _MN_SHUFFLE(i0, i1, i2, i3))); + } + template<int i0, int i1, int i2, int i3> + __forceinline vuint4 shuffle(const vuint4& a, const vuint4& b) { + return vreinterpretq_s32_u8(vqtbl2q_u8( (uint8x16x2_t){(uint8x16_t)a.v, (uint8x16_t)b.v}, _MF_SHUFFLE(i0, i1, i2, i3))); + } +#else template<int i0, int i1, int i2, int i3> __forceinline vuint4 shuffle(const vuint4& v) { return _mm_shuffle_epi32(v, _MM_SHUFFLE(i3, i2, i1, i0)); @@ -353,7 +372,7 @@ namespace embree __forceinline vuint4 shuffle(const vuint4& a, const vuint4& b) { return _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b), _MM_SHUFFLE(i3, i2, i1, i0))); } - +#endif #if defined(__SSE3__) template<> __forceinline vuint4 shuffle<0, 0, 2, 2>(const vuint4& v) { return _mm_castps_si128(_mm_moveldup_ps(_mm_castsi128_ps(v))); } template<> __forceinline vuint4 shuffle<1, 1, 3, 3>(const vuint4& v) { return _mm_castps_si128(_mm_movehdup_ps(_mm_castsi128_ps(v))); } @@ -365,7 +384,7 @@ namespace embree return shuffle<i,i,i,i>(v); } -#if defined(__SSE4_1__) +#if defined(__SSE4_1__) && !defined(__aarch64__) template<int src> __forceinline unsigned int extract(const vuint4& b) { return _mm_extract_epi32(b, src); } template<int dst> __forceinline vuint4 insert(const vuint4& a, const unsigned b) { return _mm_insert_epi32(a, b, dst); } #else @@ -373,7 +392,6 @@ namespace embree template<int dst> __forceinline vuint4 insert(const vuint4& a, const unsigned b) { vuint4 c = a; c[dst&3] = b; return c; } #endif - template<> __forceinline unsigned int extract<0>(const vuint4& b) { return _mm_cvtsi128_si32(b); } __forceinline unsigned int toScalar(const vuint4& v) { return _mm_cvtsi128_si32(v); } diff --git a/thirdparty/embree/common/simd/vuint8_avx.h b/thirdparty/embree/common/simd/vuint8_avx.h index 589cd9d731..cb8b5158c1 100644 --- a/thirdparty/embree/common/simd/vuint8_avx.h +++ b/thirdparty/embree/common/simd/vuint8_avx.h @@ -77,8 +77,8 @@ namespace embree static __forceinline void store (void* ptr, const vuint8& f) { _mm256_store_ps((float*)ptr,_mm256_castsi256_ps(f)); } static __forceinline void storeu(void* ptr, const vuint8& f) { _mm256_storeu_ps((float*)ptr,_mm256_castsi256_ps(f)); } - static __forceinline void store (const vboolf8& mask, void* ptr, const vuint8& f) { _mm256_maskstore_ps((float*)ptr,(__m256i)mask,_mm256_castsi256_ps(f)); } - static __forceinline void storeu(const vboolf8& mask, void* ptr, const vuint8& f) { _mm256_maskstore_ps((float*)ptr,(__m256i)mask,_mm256_castsi256_ps(f)); } + static __forceinline void store (const vboolf8& mask, void* ptr, const vuint8& f) { _mm256_maskstore_ps((float*)ptr,_mm256_castps_si256(mask.v),_mm256_castsi256_ps(f)); } + static __forceinline void storeu(const vboolf8& mask, void* ptr, const vuint8& f) { _mm256_maskstore_ps((float*)ptr,_mm256_castps_si256(mask.v),_mm256_castsi256_ps(f)); } static __forceinline void store_nt(void* ptr, const vuint8& v) { _mm256_stream_ps((float*)ptr,_mm256_castsi256_ps(v)); diff --git a/thirdparty/embree/common/simd/vuint8_avx2.h b/thirdparty/embree/common/simd/vuint8_avx2.h index 17b994522f..959143724b 100644 --- a/thirdparty/embree/common/simd/vuint8_avx2.h +++ b/thirdparty/embree/common/simd/vuint8_avx2.h @@ -385,6 +385,7 @@ namespace embree __forceinline int toScalar(const vuint8& v) { return _mm_cvtsi128_si32(_mm256_castsi256_si128(v)); } +#if !defined(__aarch64__) __forceinline vuint8 permute(const vuint8& v, const __m256i& index) { return _mm256_permutevar8x32_epi32(v, index); } @@ -401,6 +402,7 @@ namespace embree return _mm256_alignr_epi8(a, b, 4*i); #endif } +#endif // !defined(__aarch64__) //////////////////////////////////////////////////////////////////////////////// /// Reductions diff --git a/thirdparty/embree/common/simd/wasm/emulation.h b/thirdparty/embree/common/simd/wasm/emulation.h new file mode 100644 index 0000000000..778ab4ae6a --- /dev/null +++ b/thirdparty/embree/common/simd/wasm/emulation.h @@ -0,0 +1,13 @@ +// Copyright 2009-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +// According to https://emscripten.org/docs/porting/simd.html, _MM_SET_EXCEPTION_MASK and +// _mm_setcsr are unavailable in WebAssembly. + +#define _MM_SET_EXCEPTION_MASK(x) + +__forceinline void _mm_setcsr(unsigned int) +{ +} diff --git a/thirdparty/embree/common/sys/array.h b/thirdparty/embree/common/sys/array.h index dd9190c52a..e96939b63d 100644 --- a/thirdparty/embree/common/sys/array.h +++ b/thirdparty/embree/common/sys/array.h @@ -59,8 +59,8 @@ namespace embree /********************** Iterators ****************************/ - __forceinline T* begin() const { return items; }; - __forceinline T* end () const { return items+M; }; + __forceinline T* begin() const { return (T*)items; }; + __forceinline T* end () const { return (T*)items+M; }; /********************** Capacity ****************************/ @@ -101,8 +101,8 @@ namespace embree __forceinline T& at(size_t i) { assert(i < M); return items[i]; } __forceinline const T& at(size_t i) const { assert(i < M); return items[i]; } - __forceinline T& front() const { assert(M > 0); return items[0]; }; - __forceinline T& back () const { assert(M > 0); return items[M-1]; }; + __forceinline T& front() { assert(M > 0); return items[0]; }; + __forceinline T& back () { assert(M > 0); return items[M-1]; }; __forceinline T* data() { return items; }; __forceinline const T* data() const { return items; }; @@ -139,7 +139,7 @@ namespace embree __forceinline Ty& operator[](const unsigned i) { assert(i<N); return data[i]; } __forceinline const Ty& operator[](const unsigned i) const { assert(i<N); return data[i]; } -#if defined(__64BIT__) +#if defined(__64BIT__) || defined(__EMSCRIPTEN__) __forceinline Ty& operator[](const size_t i) { assert(i<N); return data[i]; } __forceinline const Ty& operator[](const size_t i) const { assert(i<N); return data[i]; } #endif @@ -196,7 +196,7 @@ namespace embree __forceinline Ty& operator[](const int i) { assert(i>=0 && i<max_total_elements); resize(i+1); return data[i]; } __forceinline Ty& operator[](const unsigned i) { assert(i<max_total_elements); resize(i+1); return data[i]; } -#if defined(__64BIT__) +#if defined(__64BIT__) || defined(__EMSCRIPTEN__) __forceinline Ty& operator[](const size_t i) { assert(i<max_total_elements); resize(i+1); return data[i]; } #endif diff --git a/thirdparty/embree/common/sys/barrier.h b/thirdparty/embree/common/sys/barrier.h index 37fc036291..c56513a2ed 100644 --- a/thirdparty/embree/common/sys/barrier.h +++ b/thirdparty/embree/common/sys/barrier.h @@ -24,7 +24,7 @@ namespace embree BarrierSys& operator= (const BarrierSys& other) DELETED; // do not implement public: - /*! intializes the barrier with some number of threads */ + /*! initializes the barrier with some number of threads */ void init(size_t count); /*! lets calling thread wait in barrier */ @@ -94,7 +94,7 @@ namespace embree LinearBarrierActive& operator= (const LinearBarrierActive& other) DELETED; // do not implement public: - /*! intializes the barrier with some number of threads */ + /*! initializes the barrier with some number of threads */ void init(size_t threadCount); /*! thread with threadIndex waits in the barrier */ diff --git a/thirdparty/embree/common/sys/intrinsics.h b/thirdparty/embree/common/sys/intrinsics.h index ed8dd7d40a..2c2f6eccda 100644 --- a/thirdparty/embree/common/sys/intrinsics.h +++ b/thirdparty/embree/common/sys/intrinsics.h @@ -13,6 +13,9 @@ #include "../simd/arm/emulation.h" #else #include <immintrin.h> +#if defined(__EMSCRIPTEN__) +#include "../simd/wasm/emulation.h" +#endif #endif #if defined(__BMI__) && defined(__GNUC__) && !defined(__INTEL_COMPILER) @@ -24,24 +27,26 @@ #endif #endif -#if defined(__LZCNT__) +#if defined(__aarch64__) #if !defined(_lzcnt_u32) - #define _lzcnt_u32 __lzcnt32 + #define _lzcnt_u32 __builtin_clz #endif - #if !defined(_lzcnt_u64) - #define _lzcnt_u64 __lzcnt64 +#else + #if defined(__LZCNT__) + #if !defined(_lzcnt_u32) + #define _lzcnt_u32 __lzcnt32 + #endif + #if !defined(_lzcnt_u64) + #define _lzcnt_u64 __lzcnt64 + #endif #endif #endif #if defined(__WIN32__) -// -- GODOT start -- -#if !defined(NOMINMAX) -// -- GODOT end -- -#define NOMINMAX -// -- GODOT start -- -#endif -#include "windows.h" -// -- GODOT end -- +# if !defined(NOMINMAX) +# define NOMINMAX +# endif +# include <windows.h> #endif /* normally defined in pmmintrin.h, but we always need this */ @@ -69,7 +74,7 @@ namespace embree } __forceinline int bsf(int v) { -#if defined(__AVX2__) +#if defined(__AVX2__) && !defined(__aarch64__) return _tzcnt_u32(v); #else unsigned long r = 0; _BitScanForward(&r,v); return r; @@ -77,7 +82,7 @@ namespace embree } __forceinline unsigned bsf(unsigned v) { -#if defined(__AVX2__) +#if defined(__AVX2__) && !defined(__aarch64__) return _tzcnt_u32(v); #else unsigned long r = 0; _BitScanForward(&r,v); return r; @@ -118,7 +123,7 @@ namespace embree #endif __forceinline int bsr(int v) { -#if defined(__AVX2__) +#if defined(__AVX2__) && !defined(__aarch64__) return 31 - _lzcnt_u32(v); #else unsigned long r = 0; _BitScanReverse(&r,v); return r; @@ -126,7 +131,7 @@ namespace embree } __forceinline unsigned bsr(unsigned v) { -#if defined(__AVX2__) +#if defined(__AVX2__) && !defined(__aarch64__) return 31 - _lzcnt_u32(v); #else unsigned long r = 0; _BitScanReverse(&r,v); return r; @@ -145,7 +150,7 @@ namespace embree __forceinline int lzcnt(const int x) { -#if defined(__AVX2__) +#if defined(__AVX2__) && !defined(__aarch64__) return _lzcnt_u32(x); #else if (unlikely(x == 0)) return 32; @@ -214,15 +219,26 @@ namespace embree #elif defined(__X86_ASM__) __forceinline void __cpuid(int out[4], int op) { - asm volatile ("cpuid" : "=a"(out[0]), "=b"(out[1]), "=c"(out[2]), "=d"(out[3]) : "a"(op)); +#if defined(__ARM_NEON) + if (op == 0) { // Get CPU name + out[0] = 0x41524d20; + out[1] = 0x41524d20; + out[2] = 0x41524d20; + out[3] = 0x41524d20; + } +#else + asm volatile ("cpuid" : "=a"(out[0]), "=b"(out[1]), "=c"(out[2]), "=d"(out[3]) : "a"(op)); +#endif } - + +#if !defined(__ARM_NEON) __forceinline void __cpuid_count(int out[4], int op1, int op2) { asm volatile ("cpuid" : "=a"(out[0]), "=b"(out[1]), "=c"(out[2]), "=d"(out[3]) : "a"(op1), "c"(op2)); } - #endif - + +#endif + __forceinline uint64_t read_tsc() { #if defined(__X86_ASM__) uint32_t high,low; @@ -235,30 +251,38 @@ namespace embree } __forceinline int bsf(int v) { -#if defined(__AVX2__) +#if defined(__ARM_NEON) + return __builtin_ctz(v); +#else +#if defined(__AVX2__) return _tzcnt_u32(v); #elif defined(__X86_ASM__) int r = 0; asm ("bsf %1,%0" : "=r"(r) : "r"(v)); return r; #else return __builtin_ctz(v); #endif +#endif } #if defined(__64BIT__) __forceinline unsigned bsf(unsigned v) { -#if defined(__AVX2__) +#if defined(__ARM_NEON) + return __builtin_ctz(v); +#else +#if defined(__AVX2__) return _tzcnt_u32(v); #elif defined(__X86_ASM__) unsigned r = 0; asm ("bsf %1,%0" : "=r"(r) : "r"(v)); return r; #else return __builtin_ctz(v); #endif +#endif } #endif __forceinline size_t bsf(size_t v) { -#if defined(__AVX2__) +#if defined(__AVX2__) && !defined(__aarch64__) #if defined(__X86_64__) return _tzcnt_u64(v); #else @@ -295,7 +319,7 @@ namespace embree } __forceinline int bsr(int v) { -#if defined(__AVX2__) +#if defined(__AVX2__) && !defined(__aarch64__) return 31 - _lzcnt_u32(v); #elif defined(__X86_ASM__) int r = 0; asm ("bsr %1,%0" : "=r"(r) : "r"(v)); return r; @@ -304,7 +328,7 @@ namespace embree #endif } -#if defined(__64BIT__) +#if defined(__64BIT__) || defined(__EMSCRIPTEN__) __forceinline unsigned bsr(unsigned v) { #if defined(__AVX2__) return 31 - _lzcnt_u32(v); @@ -317,7 +341,7 @@ namespace embree #endif __forceinline size_t bsr(size_t v) { -#if defined(__AVX2__) +#if defined(__AVX2__) && !defined(__aarch64__) #if defined(__X86_64__) return 63 - _lzcnt_u64(v); #else @@ -332,7 +356,7 @@ namespace embree __forceinline int lzcnt(const int x) { -#if defined(__AVX2__) +#if defined(__AVX2__) && !defined(__aarch64__) return _lzcnt_u32(x); #else if (unlikely(x == 0)) return 32; @@ -341,18 +365,18 @@ namespace embree } __forceinline size_t blsr(size_t v) { -#if defined(__AVX2__) -#if defined(__INTEL_COMPILER) +#if defined(__AVX2__) && !defined(__aarch64__) + #if defined(__INTEL_COMPILER) return _blsr_u64(v); + #else + #if defined(__X86_64__) + return __blsr_u64(v); + #else + return __blsr_u32(v); + #endif + #endif #else -#if defined(__X86_64__) - return __blsr_u64(v); -#else - return __blsr_u32(v); -#endif -#endif -#else - return v & (v-1); + return v & (v-1); #endif } @@ -368,7 +392,7 @@ namespace embree #if defined(__X86_ASM__) int r = 0; asm ("bts %1,%0" : "=r"(r) : "r"(i), "0"(v) : "flags"); return r; #else - return (v | (v << i)); + return (v | (1 << i)); #endif } @@ -376,7 +400,7 @@ namespace embree #if defined(__X86_ASM__) int r = 0; asm ("btr %1,%0" : "=r"(r) : "r"(i), "0"(v) : "flags"); return r; #else - return (v & ~(v << i)); + return (v & ~(1 << i)); #endif } @@ -392,7 +416,7 @@ namespace embree #if defined(__X86_ASM__) size_t r = 0; asm ("bts %1,%0" : "=r"(r) : "r"(i), "0"(v) : "flags"); return r; #else - return (v | (v << i)); + return (v | (1 << i)); #endif } @@ -400,7 +424,7 @@ namespace embree #if defined(__X86_ASM__) size_t r = 0; asm ("btr %1,%0" : "=r"(r) : "r"(i), "0"(v) : "flags"); return r; #else - return (v & ~(v << i)); + return (v & ~(1 << i)); #endif } @@ -435,8 +459,8 @@ namespace embree #endif #endif -#if defined(__SSE4_2__) - +#if defined(__SSE4_2__) || defined(__ARM_NEON) + __forceinline int popcnt(int in) { return _mm_popcnt_u32(in); } @@ -483,14 +507,14 @@ namespace embree #endif } - __forceinline void prefetchL1EX(const void* ptr) { - prefetchEX(ptr); + __forceinline void prefetchL1EX(const void* ptr) { + prefetchEX(ptr); } - - __forceinline void prefetchL2EX(const void* ptr) { - prefetchEX(ptr); + + __forceinline void prefetchL2EX(const void* ptr) { + prefetchEX(ptr); } -#if defined(__AVX2__) +#if defined(__AVX2__) && !defined(__aarch64__) __forceinline unsigned int pext(unsigned int a, unsigned int b) { return _pext_u32(a, b); } __forceinline unsigned int pdep(unsigned int a, unsigned int b) { return _pdep_u32(a, b); } #if defined(__X86_64__) diff --git a/thirdparty/embree/common/sys/mutex.cpp b/thirdparty/embree/common/sys/mutex.cpp index 789feaf2d8..8212deaa49 100644 --- a/thirdparty/embree/common/sys/mutex.cpp +++ b/thirdparty/embree/common/sys/mutex.cpp @@ -36,6 +36,7 @@ namespace embree MAYBE_UNUSED bool ok = pthread_mutex_destroy((pthread_mutex_t*)mutex) == 0; assert(ok); delete (pthread_mutex_t*)mutex; + mutex = nullptr; } void MutexSys::lock() diff --git a/thirdparty/embree/common/sys/mutex.h b/thirdparty/embree/common/sys/mutex.h index 4cb3626d92..26af6c582c 100644 --- a/thirdparty/embree/common/sys/mutex.h +++ b/thirdparty/embree/common/sys/mutex.h @@ -7,6 +7,7 @@ #include "intrinsics.h" #include "atomic.h" +#define CPU_CACHELINE_SIZE 64 namespace embree { /*! system mutex */ @@ -83,6 +84,11 @@ namespace embree atomic<bool> flag; }; + class PaddedSpinLock : public SpinLock + { + private: + char padding[CPU_CACHELINE_SIZE - sizeof(SpinLock)]; + }; /*! safe mutex lock and unlock helper */ template<typename Mutex> class Lock { public: diff --git a/thirdparty/embree/common/sys/platform.h b/thirdparty/embree/common/sys/platform.h index 3e386c4944..728bf6ed7d 100644 --- a/thirdparty/embree/common/sys/platform.h +++ b/thirdparty/embree/common/sys/platform.h @@ -92,16 +92,19 @@ //////////////////////////////////////////////////////////////////////////////// #ifdef __WIN32__ -#define dll_export __declspec(dllexport) -#define dll_import __declspec(dllimport) +# if defined(EMBREE_STATIC_LIB) +# define dll_export +# define dll_import +# else +# define dll_export __declspec(dllexport) +# define dll_import __declspec(dllimport) +# endif #else -#define dll_export __attribute__ ((visibility ("default"))) -#define dll_import +# define dll_export __attribute__ ((visibility ("default"))) +# define dll_import #endif -// -- GODOT start -- #if defined(__WIN32__) && !defined(__MINGW32__) -// -- GODOT end -- #if !defined(__noinline) #define __noinline __declspec(noinline) #endif @@ -151,9 +154,7 @@ #define DELETED = delete #endif -// -- GODOT start -- #if !defined(likely) -// -- GODOT end -- #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) #define likely(expr) (expr) #define unlikely(expr) (expr) @@ -161,9 +162,7 @@ #define likely(expr) __builtin_expect((bool)(expr),true ) #define unlikely(expr) __builtin_expect((bool)(expr),false) #endif -// -- GODOT start -- #endif -// -- GODOT end -- //////////////////////////////////////////////////////////////////////////////// /// Error handling and debugging @@ -252,6 +251,7 @@ __forceinline std::string toString(long long value) { #pragma warning(disable:4800) // forcing value to bool 'true' or 'false' (performance warning) //#pragma warning(disable:4267) // '=' : conversion from 'size_t' to 'unsigned long', possible loss of data #pragma warning(disable:4244) // 'argument' : conversion from 'ssize_t' to 'unsigned int', possible loss of data +#pragma warning(disable:4267) // conversion from 'size_t' to 'const int', possible loss of data //#pragma warning(disable:4355) // 'this' : used in base member initializer list //#pragma warning(disable:391 ) // '<=' : signed / unsigned mismatch //#pragma warning(disable:4018) // '<' : signed / unsigned mismatch diff --git a/thirdparty/embree/common/sys/sysinfo.cpp b/thirdparty/embree/common/sys/sysinfo.cpp index f1a59e511e..c98f61fa53 100644 --- a/thirdparty/embree/common/sys/sysinfo.cpp +++ b/thirdparty/embree/common/sys/sysinfo.cpp @@ -21,7 +21,11 @@ namespace embree std::string getPlatformName() { -#if defined(__LINUX__) && !defined(__64BIT__) +#if defined(__ANDROID__) && !defined(__64BIT__) + return "Android (32bit)"; +#elif defined(__ANDROID__) && defined(__64BIT__) + return "Android (64bit)"; +#elif defined(__LINUX__) && !defined(__64BIT__) return "Linux (32bit)"; #elif defined(__LINUX__) && defined(__64BIT__) return "Linux (64bit)"; @@ -248,9 +252,7 @@ namespace embree #if defined(__X86_ASM__) __noinline int64_t get_xcr0() { -// -- GODOT start -- -#if defined (__WIN32__) && !defined (__MINGW32__) -// -- GODOT end -- +#if defined (__WIN32__) && !defined (__MINGW32__) && defined(_XCR_XFEATURE_ENABLED_MASK) int64_t xcr0 = 0; // int64_t is workaround for compiler bug under VS2013, Win32 xcr0 = _xgetbv(0); return xcr0; @@ -337,9 +339,24 @@ namespace embree if (cpuid_leaf_7[ECX] & CPU_FEATURE_BIT_AVX512VBMI) cpu_features |= CPU_FEATURE_AVX512VBMI; return cpu_features; -#elif defined(__ARM_NEON) - /* emulated features with sse2neon */ - return CPU_FEATURE_SSE|CPU_FEATURE_SSE2|CPU_FEATURE_XMM_ENABLED; + +#elif defined(__ARM_NEON) || defined(__EMSCRIPTEN__) + + int cpu_features = CPU_FEATURE_NEON|CPU_FEATURE_SSE|CPU_FEATURE_SSE2; + cpu_features |= CPU_FEATURE_SSE3|CPU_FEATURE_SSSE3|CPU_FEATURE_SSE42; + cpu_features |= CPU_FEATURE_XMM_ENABLED; + cpu_features |= CPU_FEATURE_YMM_ENABLED; + cpu_features |= CPU_FEATURE_SSE41 | CPU_FEATURE_RDRAND | CPU_FEATURE_F16C; + cpu_features |= CPU_FEATURE_POPCNT; + cpu_features |= CPU_FEATURE_AVX; + cpu_features |= CPU_FEATURE_AVX2; + cpu_features |= CPU_FEATURE_FMA3; + cpu_features |= CPU_FEATURE_LZCNT; + cpu_features |= CPU_FEATURE_BMI1; + cpu_features |= CPU_FEATURE_BMI2; + cpu_features |= CPU_FEATURE_NEON_2X; + return cpu_features; + #else /* Unknown CPU. */ return 0; @@ -376,6 +393,8 @@ namespace embree if (features & CPU_FEATURE_AVX512VL) str += "AVX512VL "; if (features & CPU_FEATURE_AVX512IFMA) str += "AVX512IFMA "; if (features & CPU_FEATURE_AVX512VBMI) str += "AVX512VBMI "; + if (features & CPU_FEATURE_NEON) str += "NEON "; + if (features & CPU_FEATURE_NEON_2X) str += "2xNEON "; return str; } @@ -390,6 +409,9 @@ namespace embree if (isa == AVX) return "AVX"; if (isa == AVX2) return "AVX2"; if (isa == AVX512) return "AVX512"; + + if (isa == NEON) return "NEON"; + if (isa == NEON_2X) return "2xNEON"; return "UNKNOWN"; } @@ -410,6 +432,9 @@ namespace embree if (hasISA(features,AVXI)) v += "AVXI "; if (hasISA(features,AVX2)) v += "AVX2 "; if (hasISA(features,AVX512)) v += "AVX512 "; + + if (hasISA(features,NEON)) v += "NEON "; + if (hasISA(features,NEON_2X)) v += "2xNEON "; return v; } } @@ -613,6 +638,10 @@ namespace embree #include <sys/time.h> #include <pthread.h> +#if defined(__EMSCRIPTEN__) +#include <emscripten.h> +#endif + namespace embree { unsigned int getNumberOfLogicalThreads() @@ -620,12 +649,25 @@ namespace embree static int nThreads = -1; if (nThreads != -1) return nThreads; -// -- GODOT start -- -// #if defined(__MACOSX__) #if defined(__MACOSX__) || defined(__ANDROID__) -// -- GODOT end -- nThreads = sysconf(_SC_NPROCESSORS_ONLN); // does not work in Linux LXC container assert(nThreads); +#elif defined(__EMSCRIPTEN__) + // WebAssembly supports pthreads, but not pthread_getaffinity_np. Get the number of logical + // threads from the browser or Node.js using JavaScript. + nThreads = MAIN_THREAD_EM_ASM_INT({ + const isBrowser = typeof window !== 'undefined'; + const isNode = typeof process !== 'undefined' && process.versions != null && + process.versions.node != null; + if (isBrowser) { + // Return 1 if the browser does not expose hardwareConcurrency. + return window.navigator.hardwareConcurrency || 1; + } else if (isNode) { + return require('os').cpus().length; + } else { + return 1; + } + }); #else cpu_set_t set; if (pthread_getaffinity_np(pthread_self(), sizeof(set), &set) == 0) diff --git a/thirdparty/embree/common/sys/sysinfo.h b/thirdparty/embree/common/sys/sysinfo.h index 72351d12e4..cefd39a0f6 100644 --- a/thirdparty/embree/common/sys/sysinfo.h +++ b/thirdparty/embree/common/sys/sysinfo.h @@ -55,7 +55,12 @@ # define isa sse # define ISA SSE # define ISA_STR "SSE" -#else +#elif defined(__ARM_NEON) +// NOTE(LTE): Use sse2 for `isa` for the compatibility at the moment. +#define isa sse2 +#define ISA NEON +#define ISA_STR "NEON" +#else #error Unknown ISA #endif @@ -133,7 +138,9 @@ namespace embree static const int CPU_FEATURE_XMM_ENABLED = 1 << 25; static const int CPU_FEATURE_YMM_ENABLED = 1 << 26; static const int CPU_FEATURE_ZMM_ENABLED = 1 << 27; - + static const int CPU_FEATURE_NEON = 1 << 28; + static const int CPU_FEATURE_NEON_2X = 1 << 29; + /*! get CPU features */ int getCPUFeatures(); @@ -154,6 +161,8 @@ namespace embree static const int AVXI = AVX | CPU_FEATURE_F16C | CPU_FEATURE_RDRAND; static const int AVX2 = AVXI | CPU_FEATURE_AVX2 | CPU_FEATURE_FMA3 | CPU_FEATURE_BMI1 | CPU_FEATURE_BMI2 | CPU_FEATURE_LZCNT; static const int AVX512 = AVX2 | CPU_FEATURE_AVX512F | CPU_FEATURE_AVX512DQ | CPU_FEATURE_AVX512CD | CPU_FEATURE_AVX512BW | CPU_FEATURE_AVX512VL | CPU_FEATURE_ZMM_ENABLED; + static const int NEON = CPU_FEATURE_NEON | CPU_FEATURE_SSE | CPU_FEATURE_SSE2; + static const int NEON_2X = CPU_FEATURE_NEON_2X | AVX2; /*! converts ISA bitvector into a string */ std::string stringOfISA(int features); diff --git a/thirdparty/embree/common/sys/thread.cpp b/thirdparty/embree/common/sys/thread.cpp index f4014be89b..530c3c7810 100644 --- a/thirdparty/embree/common/sys/thread.cpp +++ b/thirdparty/embree/common/sys/thread.cpp @@ -10,6 +10,9 @@ #include "../simd/arm/emulation.h" #else #include <xmmintrin.h> +#if defined(__EMSCRIPTEN__) +#include "../simd/wasm/emulation.h" +#endif #endif #if defined(PTHREADS_WIN32) @@ -158,9 +161,7 @@ namespace embree /// Linux Platform //////////////////////////////////////////////////////////////////////////////// -// -- GODOT start -- #if defined(__LINUX__) && !defined(__ANDROID__) -// -- GODOT end -- #include <fstream> #include <sstream> @@ -219,6 +220,8 @@ namespace embree /* find correct thread to affinitize to */ cpu_set_t set; + CPU_ZERO(&set); + if (pthread_getaffinity_np(pthread_self(), sizeof(set), &set) == 0) { for (int i=0, j=0; i<CPU_SETSIZE; i++) @@ -241,7 +244,8 @@ namespace embree { cpu_set_t cset; CPU_ZERO(&cset); - size_t threadID = mapThreadID(affinity); + //size_t threadID = mapThreadID(affinity); // this is not working properly in LXC containers when some processors are disabled + size_t threadID = affinity; CPU_SET(threadID, &cset); pthread_setaffinity_np(pthread_self(), sizeof(cset), &cset); @@ -249,7 +253,6 @@ namespace embree } #endif -// -- GODOT start -- //////////////////////////////////////////////////////////////////////////////// /// Android Platform //////////////////////////////////////////////////////////////////////////////// @@ -269,7 +272,6 @@ namespace embree } } #endif -// -- GODOT end -- //////////////////////////////////////////////////////////////////////////////// /// FreeBSD Platform @@ -294,6 +296,21 @@ namespace embree #endif //////////////////////////////////////////////////////////////////////////////// +/// WebAssembly Platform +//////////////////////////////////////////////////////////////////////////////// + +#if defined(__EMSCRIPTEN__) +namespace embree +{ + /*! set affinity of the calling thread */ + void setAffinity(ssize_t affinity) + { + // Setting thread affinity is not supported in WASM. + } +} +#endif + +//////////////////////////////////////////////////////////////////////////////// /// MacOSX Platform //////////////////////////////////////////////////////////////////////////////// @@ -379,9 +396,7 @@ namespace embree pthread_attr_destroy(&attr); /* set affinity */ -// -- GODOT start -- #if defined(__LINUX__) && !defined(__ANDROID__) -// -- GODOT end -- if (threadID >= 0) { cpu_set_t cset; CPU_ZERO(&cset); @@ -396,7 +411,6 @@ namespace embree CPU_SET(threadID, &cset); pthread_setaffinity_np(*tid, sizeof(cset), &cset); } -// -- GODOT start -- #elif defined(__ANDROID__) if (threadID >= 0) { cpu_set_t cset; @@ -405,7 +419,6 @@ namespace embree sched_setaffinity(pthread_gettid_np(*tid), sizeof(cset), &cset); } #endif -// -- GODOT end -- return thread_t(tid); } @@ -424,14 +437,12 @@ namespace embree /*! destroy a hardware thread by its handle */ void destroyThread(thread_t tid) { -// -- GODOT start -- #if defined(__ANDROID__) - FATAL("Can't destroy threads on Android."); + FATAL("Can't destroy threads on Android."); // pthread_cancel not implemented. #else pthread_cancel(*(pthread_t*)tid); delete (pthread_t*)tid; #endif -// -- GODOT end -- } /*! creates thread local storage */ diff --git a/thirdparty/embree/common/sys/vector.h b/thirdparty/embree/common/sys/vector.h index f832626789..d05e1deb18 100644 --- a/thirdparty/embree/common/sys/vector.h +++ b/thirdparty/embree/common/sys/vector.h @@ -127,14 +127,15 @@ namespace embree { assert(!empty()); size_active--; - alloc.destroy(&items[size_active]); + items[size_active].~T(); } __forceinline void clear() { /* destroy elements */ - for (size_t i=0; i<size_active; i++) - alloc.destroy(&items[i]); + for (size_t i=0; i<size_active; i++){ + items[i].~T(); + } /* free memory */ alloc.deallocate(items,size_alloced); @@ -178,8 +179,9 @@ namespace embree /* destroy elements */ if (new_active < size_active) { - for (size_t i=new_active; i<size_active; i++) - alloc.destroy(&items[i]); + for (size_t i=new_active; i<size_active; i++){ + items[i].~T(); + } size_active = new_active; } @@ -195,7 +197,7 @@ namespace embree items = alloc.allocate(new_alloced); for (size_t i=0; i<size_active; i++) { ::new (&items[i]) T(std::move(old_items[i])); - alloc.destroy(&old_items[i]); + old_items[i].~T(); } for (size_t i=size_active; i<new_active; i++) { diff --git a/thirdparty/embree/common/tasking/taskschedulerinternal.h b/thirdparty/embree/common/tasking/taskschedulerinternal.h index 8fa6bb12fa..6cc2495195 100644 --- a/thirdparty/embree/common/tasking/taskschedulerinternal.h +++ b/thirdparty/embree/common/tasking/taskschedulerinternal.h @@ -143,7 +143,7 @@ namespace embree /* allocate new task on right side of stack */ size_t oldStackPtr = stackPtr; TaskFunction* func = new (alloc(sizeof(ClosureTaskFunction<Closure>))) ClosureTaskFunction<Closure>(closure); - new (&tasks[right]) Task(func,thread.task,oldStackPtr,size); + new (&(tasks[right.load()])) Task(func,thread.task,oldStackPtr,size); right++; /* also move left pointer */ diff --git a/thirdparty/embree/common/tasking/taskschedulertbb.h b/thirdparty/embree/common/tasking/taskschedulertbb.h index 35bd49849f..042ba7bc4c 100644 --- a/thirdparty/embree/common/tasking/taskschedulertbb.h +++ b/thirdparty/embree/common/tasking/taskschedulertbb.h @@ -11,14 +11,8 @@ #include "../sys/condition.h" #include "../sys/ref.h" -#if defined(__WIN32__) -// -- GODOT start -- -#if !defined(NOMINMAX) -// -- GODOT end -- +#if defined(__WIN32__) && !defined(NOMINMAX) # define NOMINMAX -// -- GODOT start -- -#endif -// -- GODOT end -- #endif // We need to define these to avoid implicit linkage against |