diff options
author | Rémi Verschelde <rverschelde@gmail.com> | 2023-02-10 18:44:23 +0100 |
---|---|---|
committer | Rémi Verschelde <rverschelde@gmail.com> | 2023-02-10 18:44:23 +0100 |
commit | ac683301302ef02eeb5045e9a7e80587ae9760dd (patch) | |
tree | cbdaa505744c1eb2a5a2fb2265d29fa92f0f3cfd /thirdparty/libwebp/src/dsp/dec_sse2.c | |
parent | e52a2e3864741bba58ba07e89220830201b54266 (diff) | |
parent | d8e8517d1174e8593338329a0535da178444817a (diff) |
Merge pull request #72045 from DeeJayLSP/update_libwebp
libwebp: Sync with upstream 1.3.0
Diffstat (limited to 'thirdparty/libwebp/src/dsp/dec_sse2.c')
-rw-r--r-- | thirdparty/libwebp/src/dsp/dec_sse2.c | 93 |
1 files changed, 47 insertions, 46 deletions
diff --git a/thirdparty/libwebp/src/dsp/dec_sse2.c b/thirdparty/libwebp/src/dsp/dec_sse2.c index 873aa59e8a..01e6bcb636 100644 --- a/thirdparty/libwebp/src/dsp/dec_sse2.c +++ b/thirdparty/libwebp/src/dsp/dec_sse2.c @@ -158,10 +158,10 @@ static void Transform_SSE2(const int16_t* in, uint8_t* dst, int do_two) { dst3 = _mm_loadl_epi64((__m128i*)(dst + 3 * BPS)); } else { // Load four bytes/pixels per line. - dst0 = _mm_cvtsi32_si128(WebPMemToUint32(dst + 0 * BPS)); - dst1 = _mm_cvtsi32_si128(WebPMemToUint32(dst + 1 * BPS)); - dst2 = _mm_cvtsi32_si128(WebPMemToUint32(dst + 2 * BPS)); - dst3 = _mm_cvtsi32_si128(WebPMemToUint32(dst + 3 * BPS)); + dst0 = _mm_cvtsi32_si128(WebPMemToInt32(dst + 0 * BPS)); + dst1 = _mm_cvtsi32_si128(WebPMemToInt32(dst + 1 * BPS)); + dst2 = _mm_cvtsi32_si128(WebPMemToInt32(dst + 2 * BPS)); + dst3 = _mm_cvtsi32_si128(WebPMemToInt32(dst + 3 * BPS)); } // Convert to 16b. dst0 = _mm_unpacklo_epi8(dst0, zero); @@ -187,10 +187,10 @@ static void Transform_SSE2(const int16_t* in, uint8_t* dst, int do_two) { _mm_storel_epi64((__m128i*)(dst + 3 * BPS), dst3); } else { // Store four bytes/pixels per line. - WebPUint32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32(dst0)); - WebPUint32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32(dst1)); - WebPUint32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(dst2)); - WebPUint32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(dst3)); + WebPInt32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32(dst0)); + WebPInt32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32(dst1)); + WebPInt32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(dst2)); + WebPInt32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(dst3)); } } } @@ -213,10 +213,10 @@ static void TransformAC3(const int16_t* in, uint8_t* dst) { const __m128i m3 = _mm_subs_epi16(B, d4); const __m128i zero = _mm_setzero_si128(); // Load the source pixels. - __m128i dst0 = _mm_cvtsi32_si128(WebPMemToUint32(dst + 0 * BPS)); - __m128i dst1 = _mm_cvtsi32_si128(WebPMemToUint32(dst + 1 * BPS)); - __m128i dst2 = _mm_cvtsi32_si128(WebPMemToUint32(dst + 2 * BPS)); - __m128i dst3 = _mm_cvtsi32_si128(WebPMemToUint32(dst + 3 * BPS)); + __m128i dst0 = _mm_cvtsi32_si128(WebPMemToInt32(dst + 0 * BPS)); + __m128i dst1 = _mm_cvtsi32_si128(WebPMemToInt32(dst + 1 * BPS)); + __m128i dst2 = _mm_cvtsi32_si128(WebPMemToInt32(dst + 2 * BPS)); + __m128i dst3 = _mm_cvtsi32_si128(WebPMemToInt32(dst + 3 * BPS)); // Convert to 16b. dst0 = _mm_unpacklo_epi8(dst0, zero); dst1 = _mm_unpacklo_epi8(dst1, zero); @@ -233,10 +233,10 @@ static void TransformAC3(const int16_t* in, uint8_t* dst) { dst2 = _mm_packus_epi16(dst2, dst2); dst3 = _mm_packus_epi16(dst3, dst3); // Store the results. - WebPUint32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32(dst0)); - WebPUint32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32(dst1)); - WebPUint32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(dst2)); - WebPUint32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(dst3)); + WebPInt32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32(dst0)); + WebPInt32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32(dst1)); + WebPInt32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(dst2)); + WebPInt32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(dst3)); } #undef MUL #endif // USE_TRANSFORM_AC3 @@ -477,11 +477,11 @@ static WEBP_INLINE void Load8x4_SSE2(const uint8_t* const b, int stride, // A0 = 63 62 61 60 23 22 21 20 43 42 41 40 03 02 01 00 // A1 = 73 72 71 70 33 32 31 30 53 52 51 50 13 12 11 10 const __m128i A0 = _mm_set_epi32( - WebPMemToUint32(&b[6 * stride]), WebPMemToUint32(&b[2 * stride]), - WebPMemToUint32(&b[4 * stride]), WebPMemToUint32(&b[0 * stride])); + WebPMemToInt32(&b[6 * stride]), WebPMemToInt32(&b[2 * stride]), + WebPMemToInt32(&b[4 * stride]), WebPMemToInt32(&b[0 * stride])); const __m128i A1 = _mm_set_epi32( - WebPMemToUint32(&b[7 * stride]), WebPMemToUint32(&b[3 * stride]), - WebPMemToUint32(&b[5 * stride]), WebPMemToUint32(&b[1 * stride])); + WebPMemToInt32(&b[7 * stride]), WebPMemToInt32(&b[3 * stride]), + WebPMemToInt32(&b[5 * stride]), WebPMemToInt32(&b[1 * stride])); // B0 = 53 43 52 42 51 41 50 40 13 03 12 02 11 01 10 00 // B1 = 73 63 72 62 71 61 70 60 33 23 32 22 31 21 30 20 @@ -540,7 +540,7 @@ static WEBP_INLINE void Store4x4_SSE2(__m128i* const x, uint8_t* dst, int stride) { int i; for (i = 0; i < 4; ++i, dst += stride) { - WebPUint32ToMem(dst, _mm_cvtsi128_si32(*x)); + WebPInt32ToMem(dst, _mm_cvtsi128_si32(*x)); *x = _mm_srli_si128(*x, 4); } } @@ -908,10 +908,10 @@ static void VE4_SSE2(uint8_t* dst) { // vertical const __m128i lsb = _mm_and_si128(_mm_xor_si128(ABCDEFGH, CDEFGH00), one); const __m128i b = _mm_subs_epu8(a, lsb); const __m128i avg = _mm_avg_epu8(b, BCDEFGH0); - const uint32_t vals = _mm_cvtsi128_si32(avg); + const int vals = _mm_cvtsi128_si32(avg); int i; for (i = 0; i < 4; ++i) { - WebPUint32ToMem(dst + i * BPS, vals); + WebPInt32ToMem(dst + i * BPS, vals); } } @@ -925,10 +925,10 @@ static void LD4_SSE2(uint8_t* dst) { // Down-Left const __m128i lsb = _mm_and_si128(_mm_xor_si128(ABCDEFGH, CDEFGHH0), one); const __m128i avg2 = _mm_subs_epu8(avg1, lsb); const __m128i abcdefg = _mm_avg_epu8(avg2, BCDEFGH0); - WebPUint32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32( abcdefg )); - WebPUint32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 1))); - WebPUint32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 2))); - WebPUint32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 3))); + WebPInt32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32( abcdefg )); + WebPInt32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 1))); + WebPInt32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 2))); + WebPInt32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 3))); } static void VR4_SSE2(uint8_t* dst) { // Vertical-Right @@ -946,10 +946,10 @@ static void VR4_SSE2(uint8_t* dst) { // Vertical-Right const __m128i lsb = _mm_and_si128(_mm_xor_si128(IXABCD, ABCD0), one); const __m128i avg2 = _mm_subs_epu8(avg1, lsb); const __m128i efgh = _mm_avg_epu8(avg2, XABCD); - WebPUint32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32( abcd )); - WebPUint32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32( efgh )); - WebPUint32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_slli_si128(abcd, 1))); - WebPUint32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(_mm_slli_si128(efgh, 1))); + WebPInt32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32( abcd )); + WebPInt32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32( efgh )); + WebPInt32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_slli_si128(abcd, 1))); + WebPInt32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(_mm_slli_si128(efgh, 1))); // these two are hard to implement in SSE2, so we keep the C-version: DST(0, 2) = AVG3(J, I, X); @@ -970,11 +970,12 @@ static void VL4_SSE2(uint8_t* dst) { // Vertical-Left const __m128i abbc = _mm_or_si128(ab, bc); const __m128i lsb2 = _mm_and_si128(abbc, lsb1); const __m128i avg4 = _mm_subs_epu8(avg3, lsb2); - const uint32_t extra_out = _mm_cvtsi128_si32(_mm_srli_si128(avg4, 4)); - WebPUint32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32( avg1 )); - WebPUint32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32( avg4 )); - WebPUint32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(avg1, 1))); - WebPUint32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(avg4, 1))); + const uint32_t extra_out = + (uint32_t)_mm_cvtsi128_si32(_mm_srli_si128(avg4, 4)); + WebPInt32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32( avg1 )); + WebPInt32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32( avg4 )); + WebPInt32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(avg1, 1))); + WebPInt32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(avg4, 1))); // these two are hard to get and irregular DST(3, 2) = (extra_out >> 0) & 0xff; @@ -990,7 +991,7 @@ static void RD4_SSE2(uint8_t* dst) { // Down-right const uint32_t K = dst[-1 + 2 * BPS]; const uint32_t L = dst[-1 + 3 * BPS]; const __m128i LKJI_____ = - _mm_cvtsi32_si128(L | (K << 8) | (J << 16) | (I << 24)); + _mm_cvtsi32_si128((int)(L | (K << 8) | (J << 16) | (I << 24))); const __m128i LKJIXABCD = _mm_or_si128(LKJI_____, ____XABCD); const __m128i KJIXABCD_ = _mm_srli_si128(LKJIXABCD, 1); const __m128i JIXABCD__ = _mm_srli_si128(LKJIXABCD, 2); @@ -998,10 +999,10 @@ static void RD4_SSE2(uint8_t* dst) { // Down-right const __m128i lsb = _mm_and_si128(_mm_xor_si128(JIXABCD__, LKJIXABCD), one); const __m128i avg2 = _mm_subs_epu8(avg1, lsb); const __m128i abcdefg = _mm_avg_epu8(avg2, KJIXABCD_); - WebPUint32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32( abcdefg )); - WebPUint32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 1))); - WebPUint32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 2))); - WebPUint32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 3))); + WebPInt32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32( abcdefg )); + WebPInt32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 1))); + WebPInt32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 2))); + WebPInt32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 3))); } #undef DST @@ -1015,13 +1016,13 @@ static WEBP_INLINE void TrueMotion_SSE2(uint8_t* dst, int size) { const __m128i zero = _mm_setzero_si128(); int y; if (size == 4) { - const __m128i top_values = _mm_cvtsi32_si128(WebPMemToUint32(top)); + const __m128i top_values = _mm_cvtsi32_si128(WebPMemToInt32(top)); const __m128i top_base = _mm_unpacklo_epi8(top_values, zero); for (y = 0; y < 4; ++y, dst += BPS) { const int val = dst[-1] - top[-1]; const __m128i base = _mm_set1_epi16(val); const __m128i out = _mm_packus_epi16(_mm_add_epi16(base, top_base), zero); - WebPUint32ToMem(dst, _mm_cvtsi128_si32(out)); + WebPInt32ToMem(dst, _mm_cvtsi128_si32(out)); } } else if (size == 8) { const __m128i top_values = _mm_loadl_epi64((const __m128i*)top); @@ -1062,7 +1063,7 @@ static void VE16_SSE2(uint8_t* dst) { static void HE16_SSE2(uint8_t* dst) { // horizontal int j; for (j = 16; j > 0; --j) { - const __m128i values = _mm_set1_epi8(dst[-1]); + const __m128i values = _mm_set1_epi8((char)dst[-1]); _mm_storeu_si128((__m128i*)dst, values); dst += BPS; } @@ -1070,7 +1071,7 @@ static void HE16_SSE2(uint8_t* dst) { // horizontal static WEBP_INLINE void Put16_SSE2(uint8_t v, uint8_t* dst) { int j; - const __m128i values = _mm_set1_epi8(v); + const __m128i values = _mm_set1_epi8((char)v); for (j = 0; j < 16; ++j) { _mm_storeu_si128((__m128i*)(dst + j * BPS), values); } @@ -1130,7 +1131,7 @@ static void VE8uv_SSE2(uint8_t* dst) { // vertical // helper for chroma-DC predictions static WEBP_INLINE void Put8x8uv_SSE2(uint8_t v, uint8_t* dst) { int j; - const __m128i values = _mm_set1_epi8(v); + const __m128i values = _mm_set1_epi8((char)v); for (j = 0; j < 8; ++j) { _mm_storel_epi64((__m128i*)(dst + j * BPS), values); } |