summaryrefslogtreecommitdiff
path: root/thirdparty/etcpak
diff options
context:
space:
mode:
authorK. S. Ernest (iFire) Lee <ernest.lee@chibifire.com>2021-04-06 22:05:56 -0700
committerRémi Verschelde <rverschelde@gmail.com>2021-04-13 00:12:12 +0200
commitd840165a324b5c218ca3a4882f030986855c8383 (patch)
tree7c5f4b091ecc647f2b6ed48135945f84c6dc2de9 /thirdparty/etcpak
parentb895071895cffcbcda7f4156d7175ba5b8068852 (diff)
Add `etcpak` library for faster ETC/ETC2/S3TC imports.
- `etc` module was renamed to `etcpak` and modified to use the new library. - PKM importer is removed in the process, it's obsolete. - Old library `etc2comp` is removed. - S3TC compression no longer done via `squish` (but decompression still is). - Slight modifications to etcpak sources for MinGW compatibility, to fix LLVM `-Wc++11-narrowing` errors, and to allow using vendored or system libpng. Co-authored-by: Rémi Verschelde <rverschelde@gmail.com>
Diffstat (limited to 'thirdparty/etcpak')
-rw-r--r--thirdparty/etcpak/AUTHORS.txt3
-rw-r--r--thirdparty/etcpak/Bitmap.cpp216
-rw-r--r--thirdparty/etcpak/Bitmap.hpp50
-rw-r--r--thirdparty/etcpak/BitmapDownsampled.cpp86
-rw-r--r--thirdparty/etcpak/BitmapDownsampled.hpp13
-rw-r--r--thirdparty/etcpak/BlockData.cpp1296
-rw-r--r--thirdparty/etcpak/BlockData.hpp56
-rw-r--r--thirdparty/etcpak/ColorSpace.cpp114
-rw-r--r--thirdparty/etcpak/ColorSpace.hpp36
-rw-r--r--thirdparty/etcpak/DataProvider.cpp77
-rw-r--r--thirdparty/etcpak/DataProvider.hpp41
-rw-r--r--thirdparty/etcpak/Debug.cpp31
-rw-r--r--thirdparty/etcpak/Debug.hpp27
-rw-r--r--thirdparty/etcpak/Dither.cpp120
-rw-r--r--thirdparty/etcpak/Dither.hpp21
-rw-r--r--thirdparty/etcpak/Error.cpp48
-rw-r--r--thirdparty/etcpak/Error.hpp9
-rw-r--r--thirdparty/etcpak/ForceInline.hpp20
-rw-r--r--thirdparty/etcpak/LICENSE.txt26
-rw-r--r--thirdparty/etcpak/Math.hpp92
-rw-r--r--thirdparty/etcpak/MipMap.hpp11
-rw-r--r--thirdparty/etcpak/ProcessCommon.hpp50
-rw-r--r--thirdparty/etcpak/ProcessDxtc.cpp956
-rw-r--r--thirdparty/etcpak/ProcessDxtc.hpp11
-rw-r--r--thirdparty/etcpak/ProcessRGB.cpp3100
-rw-r--r--thirdparty/etcpak/ProcessRGB.hpp13
-rw-r--r--thirdparty/etcpak/Semaphore.hpp46
-rw-r--r--thirdparty/etcpak/System.cpp68
-rw-r--r--thirdparty/etcpak/System.hpp15
-rw-r--r--thirdparty/etcpak/Tables.cpp221
-rw-r--r--thirdparty/etcpak/Tables.hpp49
-rw-r--r--thirdparty/etcpak/TaskDispatch.cpp115
-rw-r--r--thirdparty/etcpak/TaskDispatch.hpp34
-rw-r--r--thirdparty/etcpak/Timing.cpp8
-rw-r--r--thirdparty/etcpak/Timing.hpp8
-rw-r--r--thirdparty/etcpak/Vector.hpp222
-rw-r--r--thirdparty/etcpak/lz4/lz4.c1516
-rw-r--r--thirdparty/etcpak/lz4/lz4.h360
-rw-r--r--thirdparty/etcpak/mmap.cpp38
-rw-r--r--thirdparty/etcpak/mmap.hpp19
-rw-r--r--thirdparty/etcpak/patches/libpng-unbundle.patch13
-rw-r--r--thirdparty/etcpak/patches/llvm-c++11-narrowing-errors.patch64
-rw-r--r--thirdparty/etcpak/patches/windows-mingw-fixes.patch63
43 files changed, 9382 insertions, 0 deletions
diff --git a/thirdparty/etcpak/AUTHORS.txt b/thirdparty/etcpak/AUTHORS.txt
new file mode 100644
index 0000000000..e7bae62c85
--- /dev/null
+++ b/thirdparty/etcpak/AUTHORS.txt
@@ -0,0 +1,3 @@
+Bartosz Taudul <wolf@nereid.pl>
+Daniel Jungmann <el.3d.source@gmail.com>
+Florian Penzkofer <fp@nullptr.de>
diff --git a/thirdparty/etcpak/Bitmap.cpp b/thirdparty/etcpak/Bitmap.cpp
new file mode 100644
index 0000000000..ef318318ac
--- /dev/null
+++ b/thirdparty/etcpak/Bitmap.cpp
@@ -0,0 +1,216 @@
+#include <ctype.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+
+#include <png.h>
+#include "lz4/lz4.h"
+
+#include "Bitmap.hpp"
+#include "Debug.hpp"
+
+Bitmap::Bitmap( const char* fn, unsigned int lines, bool bgr )
+ : m_block( nullptr )
+ , m_lines( lines )
+ , m_alpha( true )
+ , m_sema( 0 )
+{
+ FILE* f = fopen( fn, "rb" );
+ assert( f );
+
+ char buf[4];
+ fread( buf, 1, 4, f );
+ if( memcmp( buf, "raw4", 4 ) == 0 )
+ {
+ uint8_t a;
+ fread( &a, 1, 1, f );
+ m_alpha = a == 1;
+ uint32_t d;
+ fread( &d, 1, 4, f );
+ m_size.x = d;
+ fread( &d, 1, 4, f );
+ m_size.y = d;
+ DBGPRINT( "Raw bitmap " << fn << " " << m_size.x << "x" << m_size.y );
+
+ assert( m_size.x % 4 == 0 );
+ assert( m_size.y % 4 == 0 );
+
+ int32_t csize;
+ fread( &csize, 1, 4, f );
+ char* cbuf = new char[csize];
+ fread( cbuf, 1, csize, f );
+ fclose( f );
+
+ m_block = m_data = new uint32_t[m_size.x*m_size.y];
+ m_linesLeft = m_size.y / 4;
+
+ LZ4_decompress_fast( cbuf, (char*)m_data, m_size.x*m_size.y*4 );
+ delete[] cbuf;
+
+ for( int i=0; i<m_size.y/4; i++ )
+ {
+ m_sema.unlock();
+ }
+ }
+ else
+ {
+ fseek( f, 0, SEEK_SET );
+
+ unsigned int sig_read = 0;
+ int bit_depth, color_type, interlace_type;
+
+ png_structp png_ptr = png_create_read_struct( PNG_LIBPNG_VER_STRING, NULL, NULL, NULL );
+ png_infop info_ptr = png_create_info_struct( png_ptr );
+ setjmp( png_jmpbuf( png_ptr ) );
+
+ png_init_io( png_ptr, f );
+ png_set_sig_bytes( png_ptr, sig_read );
+
+ png_uint_32 w, h;
+
+ png_read_info( png_ptr, info_ptr );
+ png_get_IHDR( png_ptr, info_ptr, &w, &h, &bit_depth, &color_type, &interlace_type, NULL, NULL );
+
+ m_size = v2i( w, h );
+
+ png_set_strip_16( png_ptr );
+ if( color_type == PNG_COLOR_TYPE_PALETTE )
+ {
+ png_set_palette_to_rgb( png_ptr );
+ }
+ else if( color_type == PNG_COLOR_TYPE_GRAY && bit_depth < 8 )
+ {
+ png_set_expand_gray_1_2_4_to_8( png_ptr );
+ }
+ if( png_get_valid( png_ptr, info_ptr, PNG_INFO_tRNS ) )
+ {
+ png_set_tRNS_to_alpha( png_ptr );
+ }
+ if( color_type == PNG_COLOR_TYPE_GRAY_ALPHA )
+ {
+ png_set_gray_to_rgb(png_ptr);
+ }
+ if( bgr )
+ {
+ png_set_bgr(png_ptr);
+ }
+
+ switch( color_type )
+ {
+ case PNG_COLOR_TYPE_PALETTE:
+ if( !png_get_valid( png_ptr, info_ptr, PNG_INFO_tRNS ) )
+ {
+ png_set_filler( png_ptr, 0xff, PNG_FILLER_AFTER );
+ m_alpha = false;
+ }
+ break;
+ case PNG_COLOR_TYPE_GRAY_ALPHA:
+ png_set_gray_to_rgb( png_ptr );
+ break;
+ case PNG_COLOR_TYPE_RGB:
+ png_set_filler( png_ptr, 0xff, PNG_FILLER_AFTER );
+ m_alpha = false;
+ break;
+ default:
+ break;
+ }
+
+ DBGPRINT( "Bitmap " << fn << " " << w << "x" << h );
+
+ assert( w % 4 == 0 );
+ assert( h % 4 == 0 );
+
+ m_block = m_data = new uint32_t[w*h];
+ m_linesLeft = h / 4;
+
+ m_load = std::async( std::launch::async, [this, f, png_ptr, info_ptr]() mutable
+ {
+ auto ptr = m_data;
+ unsigned int lines = 0;
+ for( int i=0; i<m_size.y / 4; i++ )
+ {
+ for( int j=0; j<4; j++ )
+ {
+ png_read_rows( png_ptr, (png_bytepp)&ptr, NULL, 1 );
+ ptr += m_size.x;
+ }
+ lines++;
+ if( lines >= m_lines )
+ {
+ lines = 0;
+ m_sema.unlock();
+ }
+ }
+
+ if( lines != 0 )
+ {
+ m_sema.unlock();
+ }
+
+ png_read_end( png_ptr, info_ptr );
+ png_destroy_read_struct( &png_ptr, &info_ptr, NULL );
+ fclose( f );
+ } );
+ }
+}
+
+Bitmap::Bitmap( const v2i& size )
+ : m_data( new uint32_t[size.x*size.y] )
+ , m_block( nullptr )
+ , m_lines( 1 )
+ , m_linesLeft( size.y / 4 )
+ , m_size( size )
+ , m_sema( 0 )
+{
+}
+
+Bitmap::Bitmap( const Bitmap& src, unsigned int lines )
+ : m_lines( lines )
+ , m_alpha( src.Alpha() )
+ , m_sema( 0 )
+{
+}
+
+Bitmap::~Bitmap()
+{
+ delete[] m_data;
+}
+
+void Bitmap::Write( const char* fn )
+{
+ FILE* f = fopen( fn, "wb" );
+ assert( f );
+
+ png_structp png_ptr = png_create_write_struct( PNG_LIBPNG_VER_STRING, NULL, NULL, NULL );
+ png_infop info_ptr = png_create_info_struct( png_ptr );
+ setjmp( png_jmpbuf( png_ptr ) );
+ png_init_io( png_ptr, f );
+
+ png_set_IHDR( png_ptr, info_ptr, m_size.x, m_size.y, 8, PNG_COLOR_TYPE_RGB_ALPHA, PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE );
+
+ png_write_info( png_ptr, info_ptr );
+
+ uint32_t* ptr = m_data;
+ for( int i=0; i<m_size.y; i++ )
+ {
+ png_write_rows( png_ptr, (png_bytepp)(&ptr), 1 );
+ ptr += m_size.x;
+ }
+
+ png_write_end( png_ptr, info_ptr );
+ png_destroy_write_struct( &png_ptr, &info_ptr );
+
+ fclose( f );
+}
+
+const uint32_t* Bitmap::NextBlock( unsigned int& lines, bool& done )
+{
+ std::lock_guard<std::mutex> lock( m_lock );
+ lines = std::min( m_lines, m_linesLeft );
+ auto ret = m_block;
+ m_sema.lock();
+ m_block += m_size.x * 4 * lines;
+ m_linesLeft -= lines;
+ done = m_linesLeft == 0;
+ return ret;
+}
diff --git a/thirdparty/etcpak/Bitmap.hpp b/thirdparty/etcpak/Bitmap.hpp
new file mode 100644
index 0000000000..fae8c936ed
--- /dev/null
+++ b/thirdparty/etcpak/Bitmap.hpp
@@ -0,0 +1,50 @@
+#ifndef __DARKRL__BITMAP_HPP__
+#define __DARKRL__BITMAP_HPP__
+
+#include <future>
+#include <memory>
+#include <mutex>
+#include <stdint.h>
+
+#include "Semaphore.hpp"
+#include "Vector.hpp"
+
+enum class Channels
+{
+ RGB,
+ Alpha
+};
+
+class Bitmap
+{
+public:
+ Bitmap( const char* fn, unsigned int lines, bool bgr );
+ Bitmap( const v2i& size );
+ virtual ~Bitmap();
+
+ void Write( const char* fn );
+
+ uint32_t* Data() { if( m_load.valid() ) m_load.wait(); return m_data; }
+ const uint32_t* Data() const { if( m_load.valid() ) m_load.wait(); return m_data; }
+ const v2i& Size() const { return m_size; }
+ bool Alpha() const { return m_alpha; }
+
+ const uint32_t* NextBlock( unsigned int& lines, bool& done );
+
+protected:
+ Bitmap( const Bitmap& src, unsigned int lines );
+
+ uint32_t* m_data;
+ uint32_t* m_block;
+ unsigned int m_lines;
+ unsigned int m_linesLeft;
+ v2i m_size;
+ bool m_alpha;
+ Semaphore m_sema;
+ std::mutex m_lock;
+ std::future<void> m_load;
+};
+
+typedef std::shared_ptr<Bitmap> BitmapPtr;
+
+#endif
diff --git a/thirdparty/etcpak/BitmapDownsampled.cpp b/thirdparty/etcpak/BitmapDownsampled.cpp
new file mode 100644
index 0000000000..0eb0d81185
--- /dev/null
+++ b/thirdparty/etcpak/BitmapDownsampled.cpp
@@ -0,0 +1,86 @@
+#include <string.h>
+#include <utility>
+
+#include "BitmapDownsampled.hpp"
+#include "Debug.hpp"
+
+BitmapDownsampled::BitmapDownsampled( const Bitmap& bmp, unsigned int lines )
+ : Bitmap( bmp, lines )
+{
+ m_size.x = std::max( 1, bmp.Size().x / 2 );
+ m_size.y = std::max( 1, bmp.Size().y / 2 );
+
+ int w = std::max( m_size.x, 4 );
+ int h = std::max( m_size.y, 4 );
+
+ DBGPRINT( "Subbitmap " << m_size.x << "x" << m_size.y );
+
+ m_block = m_data = new uint32_t[w*h];
+
+ if( m_size.x < w || m_size.y < h )
+ {
+ memset( m_data, 0, w*h*sizeof( uint32_t ) );
+ m_linesLeft = h / 4;
+ unsigned int lines = 0;
+ for( int i=0; i<h/4; i++ )
+ {
+ for( int j=0; j<4; j++ )
+ {
+ lines++;
+ if( lines > m_lines )
+ {
+ lines = 0;
+ m_sema.unlock();
+ }
+ }
+ }
+ if( lines != 0 )
+ {
+ m_sema.unlock();
+ }
+ }
+ else
+ {
+ m_linesLeft = h / 4;
+ m_load = std::async( std::launch::async, [this, &bmp, w, h]() mutable
+ {
+ auto ptr = m_data;
+ auto src1 = bmp.Data();
+ auto src2 = src1 + bmp.Size().x;
+ unsigned int lines = 0;
+ for( int i=0; i<h/4; i++ )
+ {
+ for( int j=0; j<4; j++ )
+ {
+ for( int k=0; k<m_size.x; k++ )
+ {
+ int r = ( ( *src1 & 0x000000FF ) + ( *(src1+1) & 0x000000FF ) + ( *src2 & 0x000000FF ) + ( *(src2+1) & 0x000000FF ) ) / 4;
+ int g = ( ( ( *src1 & 0x0000FF00 ) + ( *(src1+1) & 0x0000FF00 ) + ( *src2 & 0x0000FF00 ) + ( *(src2+1) & 0x0000FF00 ) ) / 4 ) & 0x0000FF00;
+ int b = ( ( ( *src1 & 0x00FF0000 ) + ( *(src1+1) & 0x00FF0000 ) + ( *src2 & 0x00FF0000 ) + ( *(src2+1) & 0x00FF0000 ) ) / 4 ) & 0x00FF0000;
+ int a = ( ( ( ( ( *src1 & 0xFF000000 ) >> 8 ) + ( ( *(src1+1) & 0xFF000000 ) >> 8 ) + ( ( *src2 & 0xFF000000 ) >> 8 ) + ( ( *(src2+1) & 0xFF000000 ) >> 8 ) ) / 4 ) & 0x00FF0000 ) << 8;
+ *ptr++ = r | g | b | a;
+ src1 += 2;
+ src2 += 2;
+ }
+ src1 += m_size.x * 2;
+ src2 += m_size.x * 2;
+ }
+ lines++;
+ if( lines >= m_lines )
+ {
+ lines = 0;
+ m_sema.unlock();
+ }
+ }
+
+ if( lines != 0 )
+ {
+ m_sema.unlock();
+ }
+ } );
+ }
+}
+
+BitmapDownsampled::~BitmapDownsampled()
+{
+}
diff --git a/thirdparty/etcpak/BitmapDownsampled.hpp b/thirdparty/etcpak/BitmapDownsampled.hpp
new file mode 100644
index 0000000000..b7313808df
--- /dev/null
+++ b/thirdparty/etcpak/BitmapDownsampled.hpp
@@ -0,0 +1,13 @@
+#ifndef __DARKRL__BITMAPDOWNSAMPLED_HPP__
+#define __DARKRL__BITMAPDOWNSAMPLED_HPP__
+
+#include "Bitmap.hpp"
+
+class BitmapDownsampled : public Bitmap
+{
+public:
+ BitmapDownsampled( const Bitmap& bmp, unsigned int lines );
+ ~BitmapDownsampled();
+};
+
+#endif
diff --git a/thirdparty/etcpak/BlockData.cpp b/thirdparty/etcpak/BlockData.cpp
new file mode 100644
index 0000000000..4906e69492
--- /dev/null
+++ b/thirdparty/etcpak/BlockData.cpp
@@ -0,0 +1,1296 @@
+#include <assert.h>
+#include <string.h>
+
+#include "BlockData.hpp"
+#include "ColorSpace.hpp"
+#include "Debug.hpp"
+#include "MipMap.hpp"
+#include "mmap.hpp"
+#include "ProcessRGB.hpp"
+#include "ProcessDxtc.hpp"
+#include "Tables.hpp"
+#include "TaskDispatch.hpp"
+
+#ifdef __ARM_NEON
+# include <arm_neon.h>
+#endif
+
+#if defined __SSE4_1__ || defined __AVX2__ || defined _MSC_VER
+# ifdef _MSC_VER
+# include <intrin.h>
+# include <Windows.h>
+# define _bswap(x) _byteswap_ulong(x)
+# define _bswap64(x) _byteswap_uint64(x)
+# else
+# include <x86intrin.h>
+# endif
+#endif
+
+#ifndef _bswap
+# define _bswap(x) __builtin_bswap32(x)
+# define _bswap64(x) __builtin_bswap64(x)
+#endif
+
+static uint8_t table59T58H[8] = { 3,6,11,16,23,32,41,64 };
+
+BlockData::BlockData( const char* fn )
+ : m_file( fopen( fn, "rb" ) )
+{
+ assert( m_file );
+ fseek( m_file, 0, SEEK_END );
+ m_maplen = ftell( m_file );
+ fseek( m_file, 0, SEEK_SET );
+ m_data = (uint8_t*)mmap( nullptr, m_maplen, PROT_READ, MAP_SHARED, fileno( m_file ), 0 );
+
+ auto data32 = (uint32_t*)m_data;
+ if( *data32 == 0x03525650 )
+ {
+ // PVR
+ switch( *(data32+2) )
+ {
+ case 6:
+ m_type = Etc1;
+ break;
+ case 7:
+ m_type = Dxt1;
+ break;
+ case 11:
+ m_type = Dxt5;
+ break;
+ case 22:
+ m_type = Etc2_RGB;
+ break;
+ case 23:
+ m_type = Etc2_RGBA;
+ break;
+ default:
+ assert( false );
+ break;
+ }
+
+ m_size.y = *(data32+6);
+ m_size.x = *(data32+7);
+ m_dataOffset = 52 + *(data32+12);
+ }
+ else if( *data32 == 0x58544BAB )
+ {
+ // KTX
+ switch( *(data32+7) )
+ {
+ case 0x9274:
+ m_type = Etc2_RGB;
+ break;
+ case 0x9278:
+ m_type = Etc2_RGBA;
+ break;
+ default:
+ assert( false );
+ break;
+ }
+
+ m_size.x = *(data32+9);
+ m_size.y = *(data32+10);
+ m_dataOffset = sizeof( uint32_t ) * 17 + *(data32+15);
+ }
+ else
+ {
+ assert( false );
+ }
+}
+
+static uint8_t* OpenForWriting( const char* fn, size_t len, const v2i& size, FILE** f, int levels, BlockData::Type type )
+{
+ *f = fopen( fn, "wb+" );
+ assert( *f );
+ fseek( *f, len - 1, SEEK_SET );
+ const char zero = 0;
+ fwrite( &zero, 1, 1, *f );
+ fseek( *f, 0, SEEK_SET );
+
+ auto ret = (uint8_t*)mmap( nullptr, len, PROT_WRITE, MAP_SHARED, fileno( *f ), 0 );
+ auto dst = (uint32_t*)ret;
+
+ *dst++ = 0x03525650; // version
+ *dst++ = 0; // flags
+ switch( type ) // pixelformat[0]
+ {
+ case BlockData::Etc1:
+ *dst++ = 6;
+ break;
+ case BlockData::Etc2_RGB:
+ *dst++ = 22;
+ break;
+ case BlockData::Etc2_RGBA:
+ *dst++ = 23;
+ break;
+ case BlockData::Dxt1:
+ *dst++ = 7;
+ break;
+ case BlockData::Dxt5:
+ *dst++ = 11;
+ break;
+ default:
+ assert( false );
+ break;
+ }
+ *dst++ = 0; // pixelformat[1]
+ *dst++ = 0; // colourspace
+ *dst++ = 0; // channel type
+ *dst++ = size.y; // height
+ *dst++ = size.x; // width
+ *dst++ = 1; // depth
+ *dst++ = 1; // num surfs
+ *dst++ = 1; // num faces
+ *dst++ = levels; // mipmap count
+ *dst++ = 0; // metadata size
+
+ return ret;
+}
+
+static int AdjustSizeForMipmaps( const v2i& size, int levels )
+{
+ int len = 0;
+ v2i current = size;
+ for( int i=1; i<levels; i++ )
+ {
+ assert( current.x != 1 || current.y != 1 );
+ current.x = std::max( 1, current.x / 2 );
+ current.y = std::max( 1, current.y / 2 );
+ len += std::max( 4, current.x ) * std::max( 4, current.y ) / 2;
+ }
+ assert( current.x == 1 && current.y == 1 );
+ return len;
+}
+
+BlockData::BlockData( const char* fn, const v2i& size, bool mipmap, Type type )
+ : m_size( size )
+ , m_dataOffset( 52 )
+ , m_maplen( m_size.x*m_size.y/2 )
+ , m_type( type )
+{
+ assert( m_size.x%4 == 0 && m_size.y%4 == 0 );
+
+ uint32_t cnt = m_size.x * m_size.y / 16;
+ DBGPRINT( cnt << " blocks" );
+
+ int levels = 1;
+
+ if( mipmap )
+ {
+ levels = NumberOfMipLevels( size );
+ DBGPRINT( "Number of mipmaps: " << levels );
+ m_maplen += AdjustSizeForMipmaps( size, levels );
+ }
+
+ if( type == Etc2_RGBA || type == Dxt5 ) m_maplen *= 2;
+
+ m_maplen += m_dataOffset;
+ m_data = OpenForWriting( fn, m_maplen, m_size, &m_file, levels, type );
+}
+
+BlockData::BlockData( const v2i& size, bool mipmap, Type type )
+ : m_size( size )
+ , m_dataOffset( 52 )
+ , m_file( nullptr )
+ , m_maplen( m_size.x*m_size.y/2 )
+ , m_type( type )
+{
+ assert( m_size.x%4 == 0 && m_size.y%4 == 0 );
+ if( mipmap )
+ {
+ const int levels = NumberOfMipLevels( size );
+ m_maplen += AdjustSizeForMipmaps( size, levels );
+ }
+
+ if( type == Etc2_RGBA || type == Dxt5 ) m_maplen *= 2;
+
+ m_maplen += m_dataOffset;
+ m_data = new uint8_t[m_maplen];
+}
+
+BlockData::~BlockData()
+{
+ if( m_file )
+ {
+ munmap( m_data, m_maplen );
+ fclose( m_file );
+ }
+ else
+ {
+ delete[] m_data;
+ }
+}
+
+void BlockData::Process( const uint32_t* src, uint32_t blocks, size_t offset, size_t width, Channels type, bool dither )
+{
+ auto dst = ((uint64_t*)( m_data + m_dataOffset )) + offset;
+
+ if( type == Channels::Alpha )
+ {
+ if( m_type != Etc1 )
+ {
+ CompressEtc2Alpha( src, dst, blocks, width );
+ }
+ else
+ {
+ CompressEtc1Alpha( src, dst, blocks, width );
+ }
+ }
+ else
+ {
+ switch( m_type )
+ {
+ case Etc1:
+ if( dither )
+ {
+ CompressEtc1RgbDither( src, dst, blocks, width );
+ }
+ else
+ {
+ CompressEtc1Rgb( src, dst, blocks, width );
+ }
+ break;
+ case Etc2_RGB:
+ CompressEtc2Rgb( src, dst, blocks, width );
+ break;
+ case Dxt1:
+ if( dither )
+ {
+ CompressDxt1Dither( src, dst, blocks, width );
+ }
+ else
+ {
+ CompressDxt1( src, dst, blocks, width );
+ }
+ break;
+ default:
+ assert( false );
+ break;
+ }
+ }
+}
+
+void BlockData::ProcessRGBA( const uint32_t* src, uint32_t blocks, size_t offset, size_t width )
+{
+ auto dst = ((uint64_t*)( m_data + m_dataOffset )) + offset * 2;
+
+ switch( m_type )
+ {
+ case Etc2_RGBA:
+ CompressEtc2Rgba( src, dst, blocks, width );
+ break;
+ case Dxt5:
+ CompressDxt5( src, dst, blocks, width );
+ break;
+ default:
+ assert( false );
+ break;
+ }
+}
+
+namespace
+{
+
+static etcpak_force_inline int32_t expand6(uint32_t value)
+{
+ return (value << 2) | (value >> 4);
+}
+
+static etcpak_force_inline int32_t expand7(uint32_t value)
+{
+ return (value << 1) | (value >> 6);
+}
+
+static etcpak_force_inline void DecodeT( uint64_t block, uint32_t* dst, uint32_t w )
+{
+ const auto r0 = ( block >> 24 ) & 0x1B;
+ const auto rh0 = ( r0 >> 3 ) & 0x3;
+ const auto rl0 = r0 & 0x3;
+ const auto g0 = ( block >> 20 ) & 0xF;
+ const auto b0 = ( block >> 16 ) & 0xF;
+
+ const auto r1 = ( block >> 12 ) & 0xF;
+ const auto g1 = ( block >> 8 ) & 0xF;
+ const auto b1 = ( block >> 4 ) & 0xF;
+
+ const auto cr0 = ( ( rh0 << 6 ) | ( rl0 << 4 ) | ( rh0 << 2 ) | rl0);
+ const auto cg0 = ( g0 << 4 ) | g0;
+ const auto cb0 = ( b0 << 4 ) | b0;
+
+ const auto cr1 = ( r1 << 4 ) | r1;
+ const auto cg1 = ( g1 << 4 ) | g1;
+ const auto cb1 = ( b1 << 4 ) | b1;
+
+ const auto codeword_hi = ( block >> 2 ) & 0x3;
+ const auto codeword_lo = block & 0x1;
+ const auto codeword = ( codeword_hi << 1 ) | codeword_lo;
+
+ const auto c2r = clampu8( cr1 + table59T58H[codeword] );
+ const auto c2g = clampu8( cg1 + table59T58H[codeword] );
+ const auto c2b = clampu8( cb1 + table59T58H[codeword] );
+
+ const auto c3r = clampu8( cr1 - table59T58H[codeword] );
+ const auto c3g = clampu8( cg1 - table59T58H[codeword] );
+ const auto c3b = clampu8( cb1 - table59T58H[codeword] );
+
+ const uint32_t col_tab[4] = {
+ uint32_t(cr0 | ( cg0 << 8 ) | ( cb0 << 16 ) | 0xFF000000),
+ uint32_t(c2r | ( c2g << 8 ) | ( c2b << 16 ) | 0xFF000000),
+ uint32_t(cr1 | ( cg1 << 8 ) | ( cb1 << 16 ) | 0xFF000000),
+ uint32_t(c3r | ( c3g << 8 ) | ( c3b << 16 ) | 0xFF000000)
+ };
+
+ const uint32_t indexes = ( block >> 32 ) & 0xFFFFFFFF;
+ for( uint8_t j = 0; j < 4; j++ )
+ {
+ for( uint8_t i = 0; i < 4; i++ )
+ {
+ //2bit indices distributed on two lane 16bit numbers
+ const uint8_t index = ( ( ( indexes >> ( j + i * 4 + 16 ) ) & 0x1 ) << 1) | ( ( indexes >> ( j + i * 4 ) ) & 0x1);
+ dst[j * w + i] = col_tab[index];
+ }
+ }
+}
+
+static etcpak_force_inline void DecodeTAlpha( uint64_t block, uint64_t alpha, uint32_t* dst, uint32_t w )
+{
+ const auto r0 = ( block >> 24 ) & 0x1B;
+ const auto rh0 = ( r0 >> 3 ) & 0x3;
+ const auto rl0 = r0 & 0x3;
+ const auto g0 = ( block >> 20 ) & 0xF;
+ const auto b0 = ( block >> 16 ) & 0xF;
+
+ const auto r1 = ( block >> 12 ) & 0xF;
+ const auto g1 = ( block >> 8 ) & 0xF;
+ const auto b1 = ( block >> 4 ) & 0xF;
+
+ const auto cr0 = ( ( rh0 << 6 ) | ( rl0 << 4 ) | ( rh0 << 2 ) | rl0);
+ const auto cg0 = ( g0 << 4 ) | g0;
+ const auto cb0 = ( b0 << 4 ) | b0;
+
+ const auto cr1 = ( r1 << 4 ) | r1;
+ const auto cg1 = ( g1 << 4 ) | g1;
+ const auto cb1 = ( b1 << 4 ) | b1;
+
+ const auto codeword_hi = ( block >> 2 ) & 0x3;
+ const auto codeword_lo = block & 0x1;
+ const auto codeword = (codeword_hi << 1) | codeword_lo;
+
+ const int32_t base = alpha >> 56;
+ const int32_t mul = ( alpha >> 52 ) & 0xF;
+ const auto tbl = g_alpha[( alpha >> 48 ) & 0xF];
+
+ const auto c2r = clampu8( cr1 + table59T58H[codeword] );
+ const auto c2g = clampu8( cg1 + table59T58H[codeword] );
+ const auto c2b = clampu8( cb1 + table59T58H[codeword] );
+
+ const auto c3r = clampu8( cr1 - table59T58H[codeword] );
+ const auto c3g = clampu8( cg1 - table59T58H[codeword] );
+ const auto c3b = clampu8( cb1 - table59T58H[codeword] );
+
+ const uint32_t col_tab[4] = {
+ uint32_t(cr0 | ( cg0 << 8 ) | ( cb0 << 16 )),
+ uint32_t(c2r | ( c2g << 8 ) | ( c2b << 16 )),
+ uint32_t(cr1 | ( cg1 << 8 ) | ( cb1 << 16 )),
+ uint32_t(c3r | ( c3g << 8 ) | ( c3b << 16 ))
+ };
+
+ const uint32_t indexes = ( block >> 32 ) & 0xFFFFFFFF;
+ for( uint8_t j = 0; j < 4; j++ )
+ {
+ for( uint8_t i = 0; i < 4; i++ )
+ {
+ //2bit indices distributed on two lane 16bit numbers
+ const uint8_t index = ( ( ( indexes >> ( j + i * 4 + 16 ) ) & 0x1 ) << 1 ) | ( ( indexes >> ( j + i * 4 ) ) & 0x1 );
+ const auto amod = tbl[( alpha >> ( 45 - j * 3 - i * 12 ) ) & 0x7];
+ const uint32_t a = clampu8( base + amod * mul );
+ dst[j * w + i] = col_tab[index] | ( a << 24 );
+ }
+ }
+}
+
+static etcpak_force_inline void DecodeH( uint64_t block, uint32_t* dst, uint32_t w )
+{
+ const uint32_t indexes = ( block >> 32 ) & 0xFFFFFFFF;
+
+ const auto r0444 = ( block >> 27 ) & 0xF;
+ const auto g0444 = ( ( block >> 20 ) & 0x1 ) | ( ( ( block >> 24 ) & 0x7 ) << 1 );
+ const auto b0444 = ( ( block >> 15 ) & 0x7 ) | ( ( ( block >> 19 ) & 0x1 ) << 3 );
+
+ const auto r1444 = ( block >> 11 ) & 0xF;
+ const auto g1444 = ( block >> 7 ) & 0xF;
+ const auto b1444 = ( block >> 3 ) & 0xF;
+
+ const auto r0 = ( r0444 << 4 ) | r0444;
+ const auto g0 = ( g0444 << 4 ) | g0444;
+ const auto b0 = ( b0444 << 4 ) | b0444;
+
+ const auto r1 = ( r1444 << 4 ) | r1444;
+ const auto g1 = ( g1444 << 4 ) | g1444;
+ const auto b1 = ( b1444 << 4 ) | b1444;
+
+ const auto codeword_hi = ( ( block & 0x1 ) << 1 ) | ( ( block & 0x4 ) );
+ const auto c0 = ( r0444 << 8 ) | ( g0444 << 4 ) | ( b0444 << 0 );
+ const auto c1 = ( block >> 3 ) & ( ( 1 << 12 ) - 1 );
+ const auto codeword_lo = ( c0 >= c1 ) ? 1 : 0;
+ const auto codeword = codeword_hi | codeword_lo;
+
+ const uint32_t col_tab[] = {
+ uint32_t(clampu8( r0 + table59T58H[codeword] ) | ( clampu8( g0 + table59T58H[codeword] ) << 8 ) | ( clampu8( b0 + table59T58H[codeword] ) << 16 )),
+ uint32_t(clampu8( r0 - table59T58H[codeword] ) | ( clampu8( g0 - table59T58H[codeword] ) << 8 ) | ( clampu8( b0 - table59T58H[codeword] ) << 16 )),
+ uint32_t(clampu8( r1 + table59T58H[codeword] ) | ( clampu8( g1 + table59T58H[codeword] ) << 8 ) | ( clampu8( b1 + table59T58H[codeword] ) << 16 )),
+ uint32_t(clampu8( r1 - table59T58H[codeword] ) | ( clampu8( g1 - table59T58H[codeword] ) << 8 ) | ( clampu8( b1 - table59T58H[codeword] ) << 16 ))
+ };
+
+ for( uint8_t j = 0; j < 4; j++ )
+ {
+ for( uint8_t i = 0; i < 4; i++ )
+ {
+ const uint8_t index = ( ( ( indexes >> ( j + i * 4 + 16 ) ) & 0x1 ) << 1 ) | ( ( indexes >> ( j + i * 4 ) ) & 0x1 );
+ dst[j * w + i] = col_tab[index] | 0xFF000000;
+ }
+ }
+}
+
+static etcpak_force_inline void DecodeHAlpha( uint64_t block, uint64_t alpha, uint32_t* dst, uint32_t w )
+{
+ const uint32_t indexes = ( block >> 32 ) & 0xFFFFFFFF;
+
+ const auto r0444 = ( block >> 27 ) & 0xF;
+ const auto g0444 = ( ( block >> 20 ) & 0x1 ) | ( ( ( block >> 24 ) & 0x7 ) << 1 );
+ const auto b0444 = ( ( block >> 15 ) & 0x7 ) | ( ( ( block >> 19 ) & 0x1 ) << 3 );
+
+ const auto r1444 = ( block >> 11 ) & 0xF;
+ const auto g1444 = ( block >> 7 ) & 0xF;
+ const auto b1444 = ( block >> 3 ) & 0xF;
+
+ const auto r0 = ( r0444 << 4 ) | r0444;
+ const auto g0 = ( g0444 << 4 ) | g0444;
+ const auto b0 = ( b0444 << 4 ) | b0444;
+
+ const auto r1 = ( r1444 << 4 ) | r1444;
+ const auto g1 = ( g1444 << 4 ) | g1444;
+ const auto b1 = ( b1444 << 4 ) | b1444;
+
+ const auto codeword_hi = ( ( block & 0x1 ) << 1 ) | ( ( block & 0x4 ) );
+ const auto c0 = ( r0444 << 8 ) | ( g0444 << 4 ) | ( b0444 << 0 );
+ const auto c1 = ( block >> 3 ) & ( ( 1 << 12 ) - 1 );
+ const auto codeword_lo = ( c0 >= c1 ) ? 1 : 0;
+ const auto codeword = codeword_hi | codeword_lo;
+
+ const int32_t base = alpha >> 56;
+ const int32_t mul = ( alpha >> 52 ) & 0xF;
+ const auto tbl = g_alpha[(alpha >> 48) & 0xF];
+
+ const uint32_t col_tab[] = {
+ uint32_t(clampu8( r0 + table59T58H[codeword] ) | ( clampu8( g0 + table59T58H[codeword] ) << 8 ) | ( clampu8( b0 + table59T58H[codeword] ) << 16 )),
+ uint32_t(clampu8( r0 - table59T58H[codeword] ) | ( clampu8( g0 - table59T58H[codeword] ) << 8 ) | ( clampu8( b0 - table59T58H[codeword] ) << 16 )),
+ uint32_t(clampu8( r1 + table59T58H[codeword] ) | ( clampu8( g1 + table59T58H[codeword] ) << 8 ) | ( clampu8( b1 + table59T58H[codeword] ) << 16 )),
+ uint32_t(clampu8( r1 - table59T58H[codeword] ) | ( clampu8( g1 - table59T58H[codeword] ) << 8 ) | ( clampu8( b1 - table59T58H[codeword] ) << 16 ))
+ };
+
+ for( uint8_t j = 0; j < 4; j++ )
+ {
+ for( uint8_t i = 0; i < 4; i++ )
+ {
+ const uint8_t index = ( ( ( indexes >> ( j + i * 4 + 16 ) ) & 0x1 ) << 1 ) | ( ( indexes >> ( j + i * 4 ) ) & 0x1 );
+ const auto amod = tbl[( alpha >> ( 45 - j * 3 - i * 12) ) & 0x7];
+ const uint32_t a = clampu8( base + amod * mul );
+ dst[j * w + i] = col_tab[index] | ( a << 24 );
+ }
+ }
+}
+
+static etcpak_force_inline void DecodePlanar( uint64_t block, uint32_t* dst, uint32_t w )
+{
+ const auto bv = expand6((block >> ( 0 + 32)) & 0x3F);
+ const auto gv = expand7((block >> ( 6 + 32)) & 0x7F);
+ const auto rv = expand6((block >> (13 + 32)) & 0x3F);
+
+ const auto bh = expand6((block >> (19 + 32)) & 0x3F);
+ const auto gh = expand7((block >> (25 + 32)) & 0x7F);
+
+ const auto rh0 = (block >> (32 - 32)) & 0x01;
+ const auto rh1 = ((block >> (34 - 32)) & 0x1F) << 1;
+ const auto rh = expand6(rh0 | rh1);
+
+ const auto bo0 = (block >> (39 - 32)) & 0x07;
+ const auto bo1 = ((block >> (43 - 32)) & 0x3) << 3;
+ const auto bo2 = ((block >> (48 - 32)) & 0x1) << 5;
+ const auto bo = expand6(bo0 | bo1 | bo2);
+ const auto go0 = (block >> (49 - 32)) & 0x3F;
+ const auto go1 = ((block >> (56 - 32)) & 0x01) << 6;
+ const auto go = expand7(go0 | go1);
+ const auto ro = expand6((block >> (57 - 32)) & 0x3F);
+
+#ifdef __ARM_NEON
+ uint64_t init = uint64_t(uint16_t(rh-ro)) | ( uint64_t(uint16_t(gh-go)) << 16 ) | ( uint64_t(uint16_t(bh-bo)) << 32 );
+ int16x8_t chco = vreinterpretq_s16_u64( vdupq_n_u64( init ) );
+ init = uint64_t(uint16_t( (rv-ro) - 4 * (rh-ro) )) | ( uint64_t(uint16_t( (gv-go) - 4 * (gh-go) )) << 16 ) | ( uint64_t(uint16_t( (bv-bo) - 4 * (bh-bo) )) << 32 );
+ int16x8_t cvco = vreinterpretq_s16_u64( vdupq_n_u64( init ) );
+ init = uint64_t(4*ro+2) | ( uint64_t(4*go+2) << 16 ) | ( uint64_t(4*bo+2) << 32 ) | ( uint64_t(0xFFF) << 48 );
+ int16x8_t col = vreinterpretq_s16_u64( vdupq_n_u64( init ) );
+
+ for( int j=0; j<4; j++ )
+ {
+ for( int i=0; i<4; i++ )
+ {
+ uint8x8_t c = vqshrun_n_s16( col, 2 );
+ vst1_lane_u32( dst+j*w+i, vreinterpret_u32_u8( c ), 0 );
+ col = vaddq_s16( col, chco );
+ }
+ col = vaddq_s16( col, cvco );
+ }
+#elif defined __AVX2__
+ const auto R0 = 4*ro+2;
+ const auto G0 = 4*go+2;
+ const auto B0 = 4*bo+2;
+ const auto RHO = rh-ro;
+ const auto GHO = gh-go;
+ const auto BHO = bh-bo;
+
+ __m256i cvco = _mm256_setr_epi16( rv - ro, gv - go, bv - bo, 0, rv - ro, gv - go, bv - bo, 0, rv - ro, gv - go, bv - bo, 0, rv - ro, gv - go, bv - bo, 0 );
+ __m256i col = _mm256_setr_epi16( R0, G0, B0, 0xFFF, R0+RHO, G0+GHO, B0+BHO, 0xFFF, R0+2*RHO, G0+2*GHO, B0+2*BHO, 0xFFF, R0+3*RHO, G0+3*GHO, B0+3*BHO, 0xFFF );
+
+ for( int j=0; j<4; j++ )
+ {
+ __m256i c = _mm256_srai_epi16( col, 2 );
+ __m128i s = _mm_packus_epi16( _mm256_castsi256_si128( c ), _mm256_extracti128_si256( c, 1 ) );
+ _mm_storeu_si128( (__m128i*)(dst+j*w), s );
+ col = _mm256_add_epi16( col, cvco );
+ }
+#elif defined __SSE4_1__
+ __m128i chco = _mm_setr_epi16( rh - ro, gh - go, bh - bo, 0, 0, 0, 0, 0 );
+ __m128i cvco = _mm_setr_epi16( (rv - ro) - 4 * (rh - ro), (gv - go) - 4 * (gh - go), (bv - bo) - 4 * (bh - bo), 0, 0, 0, 0, 0 );
+ __m128i col = _mm_setr_epi16( 4*ro+2, 4*go+2, 4*bo+2, 0xFFF, 0, 0, 0, 0 );
+
+ for( int j=0; j<4; j++ )
+ {
+ for( int i=0; i<4; i++ )
+ {
+ __m128i c = _mm_srai_epi16( col, 2 );
+ __m128i s = _mm_packus_epi16( c, c );
+ dst[j*w+i] = _mm_cvtsi128_si32( s );
+ col = _mm_add_epi16( col, chco );
+ }
+ col = _mm_add_epi16( col, cvco );
+ }
+#else
+ for( int j=0; j<4; j++ )
+ {
+ for( int i=0; i<4; i++ )
+ {
+ const uint32_t r = (i * (rh - ro) + j * (rv - ro) + 4 * ro + 2) >> 2;
+ const uint32_t g = (i * (gh - go) + j * (gv - go) + 4 * go + 2) >> 2;
+ const uint32_t b = (i * (bh - bo) + j * (bv - bo) + 4 * bo + 2) >> 2;
+ if( ( ( r | g | b ) & ~0xFF ) == 0 )
+ {
+ dst[j*w+i] = r | ( g << 8 ) | ( b << 16 ) | 0xFF000000;
+ }
+ else
+ {
+ const auto rc = clampu8( r );
+ const auto gc = clampu8( g );
+ const auto bc = clampu8( b );
+ dst[j*w+i] = rc | ( gc << 8 ) | ( bc << 16 ) | 0xFF000000;
+ }
+ }
+ }
+#endif
+}
+
+static etcpak_force_inline void DecodePlanarAlpha( uint64_t block, uint64_t alpha, uint32_t* dst, uint32_t w )
+{
+ const auto bv = expand6((block >> ( 0 + 32)) & 0x3F);
+ const auto gv = expand7((block >> ( 6 + 32)) & 0x7F);
+ const auto rv = expand6((block >> (13 + 32)) & 0x3F);
+
+ const auto bh = expand6((block >> (19 + 32)) & 0x3F);
+ const auto gh = expand7((block >> (25 + 32)) & 0x7F);
+
+ const auto rh0 = (block >> (32 - 32)) & 0x01;
+ const auto rh1 = ((block >> (34 - 32)) & 0x1F) << 1;
+ const auto rh = expand6(rh0 | rh1);
+
+ const auto bo0 = (block >> (39 - 32)) & 0x07;
+ const auto bo1 = ((block >> (43 - 32)) & 0x3) << 3;
+ const auto bo2 = ((block >> (48 - 32)) & 0x1) << 5;
+ const auto bo = expand6(bo0 | bo1 | bo2);
+ const auto go0 = (block >> (49 - 32)) & 0x3F;
+ const auto go1 = ((block >> (56 - 32)) & 0x01) << 6;
+ const auto go = expand7(go0 | go1);
+ const auto ro = expand6((block >> (57 - 32)) & 0x3F);
+
+ const int32_t base = alpha >> 56;
+ const int32_t mul = ( alpha >> 52 ) & 0xF;
+ const auto tbl = g_alpha[( alpha >> 48 ) & 0xF];
+
+#ifdef __ARM_NEON
+ uint64_t init = uint64_t(uint16_t(rh-ro)) | ( uint64_t(uint16_t(gh-go)) << 16 ) | ( uint64_t(uint16_t(bh-bo)) << 32 );
+ int16x8_t chco = vreinterpretq_s16_u64( vdupq_n_u64( init ) );
+ init = uint64_t(uint16_t( (rv-ro) - 4 * (rh-ro) )) | ( uint64_t(uint16_t( (gv-go) - 4 * (gh-go) )) << 16 ) | ( uint64_t(uint16_t( (bv-bo) - 4 * (bh-bo) )) << 32 );
+ int16x8_t cvco = vreinterpretq_s16_u64( vdupq_n_u64( init ) );
+ init = uint64_t(4*ro+2) | ( uint64_t(4*go+2) << 16 ) | ( uint64_t(4*bo+2) << 32 );
+ int16x8_t col = vreinterpretq_s16_u64( vdupq_n_u64( init ) );
+
+ for( int j=0; j<4; j++ )
+ {
+ for( int i=0; i<4; i++ )
+ {
+ const auto amod = tbl[(alpha >> ( 45 - j*3 - i*12 )) & 0x7];
+ const uint32_t a = clampu8( base + amod * mul );
+ uint8x8_t c = vqshrun_n_s16( col, 2 );
+ dst[j*w+i] = vget_lane_u32( vreinterpret_u32_u8( c ), 0 ) | ( a << 24 );
+ col = vaddq_s16( col, chco );
+ }
+ col = vaddq_s16( col, cvco );
+ }
+#elif defined __SSE4_1__
+ __m128i chco = _mm_setr_epi16( rh - ro, gh - go, bh - bo, 0, 0, 0, 0, 0 );
+ __m128i cvco = _mm_setr_epi16( (rv - ro) - 4 * (rh - ro), (gv - go) - 4 * (gh - go), (bv - bo) - 4 * (bh - bo), 0, 0, 0, 0, 0 );
+ __m128i col = _mm_setr_epi16( 4*ro+2, 4*go+2, 4*bo+2, 0, 0, 0, 0, 0 );
+
+ for( int j=0; j<4; j++ )
+ {
+ for( int i=0; i<4; i++ )
+ {
+ const auto amod = tbl[(alpha >> ( 45 - j*3 - i*12 )) & 0x7];
+ const uint32_t a = clampu8( base + amod * mul );
+ __m128i c = _mm_srai_epi16( col, 2 );
+ __m128i s = _mm_packus_epi16( c, c );
+ dst[j*w+i] = _mm_cvtsi128_si32( s ) | ( a << 24 );
+ col = _mm_add_epi16( col, chco );
+ }
+ col = _mm_add_epi16( col, cvco );
+ }
+#else
+ for (auto j = 0; j < 4; j++)
+ {
+ for (auto i = 0; i < 4; i++)
+ {
+ const uint32_t r = (i * (rh - ro) + j * (rv - ro) + 4 * ro + 2) >> 2;
+ const uint32_t g = (i * (gh - go) + j * (gv - go) + 4 * go + 2) >> 2;
+ const uint32_t b = (i * (bh - bo) + j * (bv - bo) + 4 * bo + 2) >> 2;
+ const auto amod = tbl[(alpha >> ( 45 - j*3 - i*12 )) & 0x7];
+ const uint32_t a = clampu8( base + amod * mul );
+ if( ( ( r | g | b ) & ~0xFF ) == 0 )
+ {
+ dst[j*w+i] = r | ( g << 8 ) | ( b << 16 ) | ( a << 24 );
+ }
+ else
+ {
+ const auto rc = clampu8( r );
+ const auto gc = clampu8( g );
+ const auto bc = clampu8( b );
+ dst[j*w+i] = rc | ( gc << 8 ) | ( bc << 16 ) | ( a << 24 );
+ }
+ }
+ }
+#endif
+}
+
+}
+
+BitmapPtr BlockData::Decode()
+{
+ switch( m_type )
+ {
+ case Etc1:
+ case Etc2_RGB:
+ return DecodeRGB();
+ case Etc2_RGBA:
+ return DecodeRGBA();
+ case Dxt1:
+ return DecodeDxt1();
+ case Dxt5:
+ return DecodeDxt5();
+ default:
+ assert( false );
+ return nullptr;
+ }
+}
+
+static etcpak_force_inline uint64_t ConvertByteOrder( uint64_t d )
+{
+ uint32_t word[2];
+ memcpy( word, &d, 8 );
+ word[0] = _bswap( word[0] );
+ word[1] = _bswap( word[1] );
+ memcpy( &d, word, 8 );
+ return d;
+}
+
+static etcpak_force_inline void DecodeRGBPart( uint64_t d, uint32_t* dst, uint32_t w )
+{
+ d = ConvertByteOrder( d );
+
+ uint32_t br[2], bg[2], bb[2];
+
+ if( d & 0x2 )
+ {
+ int32_t dr, dg, db;
+
+ uint32_t r0 = ( d & 0xF8000000 ) >> 27;
+ uint32_t g0 = ( d & 0x00F80000 ) >> 19;
+ uint32_t b0 = ( d & 0x0000F800 ) >> 11;
+
+ dr = ( int32_t(d) << 5 ) >> 29;
+ dg = ( int32_t(d) << 13 ) >> 29;
+ db = ( int32_t(d) << 21 ) >> 29;
+
+ int32_t r1 = int32_t(r0) + dr;
+ int32_t g1 = int32_t(g0) + dg;
+ int32_t b1 = int32_t(b0) + db;
+
+ // T mode
+ if ( (r1 < 0) || (r1 > 31) )
+ {
+ DecodeT( d, dst, w );
+ return;
+ }
+
+ // H mode
+ if ((g1 < 0) || (g1 > 31))
+ {
+ DecodeH( d, dst, w );
+ return;
+ }
+
+ // P mode
+ if( (b1 < 0) || (b1 > 31) )
+ {
+ DecodePlanar( d, dst, w );
+ return;
+ }
+
+ br[0] = ( r0 << 3 ) | ( r0 >> 2 );
+ br[1] = ( r1 << 3 ) | ( r1 >> 2 );
+ bg[0] = ( g0 << 3 ) | ( g0 >> 2 );
+ bg[1] = ( g1 << 3 ) | ( g1 >> 2 );
+ bb[0] = ( b0 << 3 ) | ( b0 >> 2 );
+ bb[1] = ( b1 << 3 ) | ( b1 >> 2 );
+ }
+ else
+ {
+ br[0] = ( ( d & 0xF0000000 ) >> 24 ) | ( ( d & 0xF0000000 ) >> 28 );
+ br[1] = ( ( d & 0x0F000000 ) >> 20 ) | ( ( d & 0x0F000000 ) >> 24 );
+ bg[0] = ( ( d & 0x00F00000 ) >> 16 ) | ( ( d & 0x00F00000 ) >> 20 );
+ bg[1] = ( ( d & 0x000F0000 ) >> 12 ) | ( ( d & 0x000F0000 ) >> 16 );
+ bb[0] = ( ( d & 0x0000F000 ) >> 8 ) | ( ( d & 0x0000F000 ) >> 12 );
+ bb[1] = ( ( d & 0x00000F00 ) >> 4 ) | ( ( d & 0x00000F00 ) >> 8 );
+ }
+
+ unsigned int tcw[2];
+ tcw[0] = ( d & 0xE0 ) >> 5;
+ tcw[1] = ( d & 0x1C ) >> 2;
+
+ uint32_t b1 = ( d >> 32 ) & 0xFFFF;
+ uint32_t b2 = ( d >> 48 );
+
+ b1 = ( b1 | ( b1 << 8 ) ) & 0x00FF00FF;
+ b1 = ( b1 | ( b1 << 4 ) ) & 0x0F0F0F0F;
+ b1 = ( b1 | ( b1 << 2 ) ) & 0x33333333;
+ b1 = ( b1 | ( b1 << 1 ) ) & 0x55555555;
+
+ b2 = ( b2 | ( b2 << 8 ) ) & 0x00FF00FF;
+ b2 = ( b2 | ( b2 << 4 ) ) & 0x0F0F0F0F;
+ b2 = ( b2 | ( b2 << 2 ) ) & 0x33333333;
+ b2 = ( b2 | ( b2 << 1 ) ) & 0x55555555;
+
+ uint32_t idx = b1 | ( b2 << 1 );
+
+ if( d & 0x1 )
+ {
+ for( int i=0; i<4; i++ )
+ {
+ for( int j=0; j<4; j++ )
+ {
+ const auto mod = g_table[tcw[j/2]][idx & 0x3];
+ const auto r = br[j/2] + mod;
+ const auto g = bg[j/2] + mod;
+ const auto b = bb[j/2] + mod;
+ if( ( ( r | g | b ) & ~0xFF ) == 0 )
+ {
+ dst[j*w+i] = r | ( g << 8 ) | ( b << 16 ) | 0xFF000000;
+ }
+ else
+ {
+ const auto rc = clampu8( r );
+ const auto gc = clampu8( g );
+ const auto bc = clampu8( b );
+ dst[j*w+i] = rc | ( gc << 8 ) | ( bc << 16 ) | 0xFF000000;
+ }
+ idx >>= 2;
+ }
+ }
+ }
+ else
+ {
+ for( int i=0; i<4; i++ )
+ {
+ const auto tbl = g_table[tcw[i/2]];
+ const auto cr = br[i/2];
+ const auto cg = bg[i/2];
+ const auto cb = bb[i/2];
+
+ for( int j=0; j<4; j++ )
+ {
+ const auto mod = tbl[idx & 0x3];
+ const auto r = cr + mod;
+ const auto g = cg + mod;
+ const auto b = cb + mod;
+ if( ( ( r | g | b ) & ~0xFF ) == 0 )
+ {
+ dst[j*w+i] = r | ( g << 8 ) | ( b << 16 ) | 0xFF000000;
+ }
+ else
+ {
+ const auto rc = clampu8( r );
+ const auto gc = clampu8( g );
+ const auto bc = clampu8( b );
+ dst[j*w+i] = rc | ( gc << 8 ) | ( bc << 16 ) | 0xFF000000;
+ }
+ idx >>= 2;
+ }
+ }
+ }
+}
+
+static etcpak_force_inline void DecodeRGBAPart( uint64_t d, uint64_t alpha, uint32_t* dst, uint32_t w )
+{
+ d = ConvertByteOrder( d );
+ alpha = _bswap64( alpha );
+
+ uint32_t br[2], bg[2], bb[2];
+
+ if( d & 0x2 )
+ {
+ int32_t dr, dg, db;
+
+ uint32_t r0 = ( d & 0xF8000000 ) >> 27;
+ uint32_t g0 = ( d & 0x00F80000 ) >> 19;
+ uint32_t b0 = ( d & 0x0000F800 ) >> 11;
+
+ dr = ( int32_t(d) << 5 ) >> 29;
+ dg = ( int32_t(d) << 13 ) >> 29;
+ db = ( int32_t(d) << 21 ) >> 29;
+
+ int32_t r1 = int32_t(r0) + dr;
+ int32_t g1 = int32_t(g0) + dg;
+ int32_t b1 = int32_t(b0) + db;
+
+ // T mode
+ if ( (r1 < 0) || (r1 > 31) )
+ {
+ DecodeTAlpha( d, alpha, dst, w );
+ return;
+ }
+
+ // H mode
+ if ( (g1 < 0) || (g1 > 31) )
+ {
+ DecodeHAlpha( d, alpha, dst, w );
+ return;
+ }
+
+ // P mode
+ if ( (b1 < 0) || (b1 > 31) )
+ {
+ DecodePlanarAlpha( d, alpha, dst, w );
+ return;
+ }
+
+ br[0] = ( r0 << 3 ) | ( r0 >> 2 );
+ br[1] = ( r1 << 3 ) | ( r1 >> 2 );
+ bg[0] = ( g0 << 3 ) | ( g0 >> 2 );
+ bg[1] = ( g1 << 3 ) | ( g1 >> 2 );
+ bb[0] = ( b0 << 3 ) | ( b0 >> 2 );
+ bb[1] = ( b1 << 3 ) | ( b1 >> 2 );
+ }
+ else
+ {
+ br[0] = ( ( d & 0xF0000000 ) >> 24 ) | ( ( d & 0xF0000000 ) >> 28 );
+ br[1] = ( ( d & 0x0F000000 ) >> 20 ) | ( ( d & 0x0F000000 ) >> 24 );
+ bg[0] = ( ( d & 0x00F00000 ) >> 16 ) | ( ( d & 0x00F00000 ) >> 20 );
+ bg[1] = ( ( d & 0x000F0000 ) >> 12 ) | ( ( d & 0x000F0000 ) >> 16 );
+ bb[0] = ( ( d & 0x0000F000 ) >> 8 ) | ( ( d & 0x0000F000 ) >> 12 );
+ bb[1] = ( ( d & 0x00000F00 ) >> 4 ) | ( ( d & 0x00000F00 ) >> 8 );
+ }
+
+ unsigned int tcw[2];
+ tcw[0] = ( d & 0xE0 ) >> 5;
+ tcw[1] = ( d & 0x1C ) >> 2;
+
+ uint32_t b1 = ( d >> 32 ) & 0xFFFF;
+ uint32_t b2 = ( d >> 48 );
+
+ b1 = ( b1 | ( b1 << 8 ) ) & 0x00FF00FF;
+ b1 = ( b1 | ( b1 << 4 ) ) & 0x0F0F0F0F;
+ b1 = ( b1 | ( b1 << 2 ) ) & 0x33333333;
+ b1 = ( b1 | ( b1 << 1 ) ) & 0x55555555;
+
+ b2 = ( b2 | ( b2 << 8 ) ) & 0x00FF00FF;
+ b2 = ( b2 | ( b2 << 4 ) ) & 0x0F0F0F0F;
+ b2 = ( b2 | ( b2 << 2 ) ) & 0x33333333;
+ b2 = ( b2 | ( b2 << 1 ) ) & 0x55555555;
+
+ uint32_t idx = b1 | ( b2 << 1 );
+
+ const int32_t base = alpha >> 56;
+ const int32_t mul = ( alpha >> 52 ) & 0xF;
+ const auto atbl = g_alpha[( alpha >> 48 ) & 0xF];
+
+ if( d & 0x1 )
+ {
+ for( int i=0; i<4; i++ )
+ {
+ for( int j=0; j<4; j++ )
+ {
+ const auto mod = g_table[tcw[j/2]][idx & 0x3];
+ const auto r = br[j/2] + mod;
+ const auto g = bg[j/2] + mod;
+ const auto b = bb[j/2] + mod;
+ const auto amod = atbl[(alpha >> ( 45 - j*3 - i*12 )) & 0x7];
+ const uint32_t a = clampu8( base + amod * mul );
+ if( ( ( r | g | b ) & ~0xFF ) == 0 )
+ {
+ dst[j*w+i] = r | ( g << 8 ) | ( b << 16 ) | ( a << 24 );
+ }
+ else
+ {
+ const auto rc = clampu8( r );
+ const auto gc = clampu8( g );
+ const auto bc = clampu8( b );
+ dst[j*w+i] = rc | ( gc << 8 ) | ( bc << 16 ) | ( a << 24 );
+ }
+ idx >>= 2;
+ }
+ }
+ }
+ else
+ {
+ for( int i=0; i<4; i++ )
+ {
+ const auto tbl = g_table[tcw[i/2]];
+ const auto cr = br[i/2];
+ const auto cg = bg[i/2];
+ const auto cb = bb[i/2];
+
+ for( int j=0; j<4; j++ )
+ {
+ const auto mod = tbl[idx & 0x3];
+ const auto r = cr + mod;
+ const auto g = cg + mod;
+ const auto b = cb + mod;
+ const auto amod = atbl[(alpha >> ( 45 - j*3 - i*12 )) & 0x7];
+ const uint32_t a = clampu8( base + amod * mul );
+ if( ( ( r | g | b ) & ~0xFF ) == 0 )
+ {
+ dst[j*w+i] = r | ( g << 8 ) | ( b << 16 ) | ( a << 24 );
+ }
+ else
+ {
+ const auto rc = clampu8( r );
+ const auto gc = clampu8( g );
+ const auto bc = clampu8( b );
+ dst[j*w+i] = rc | ( gc << 8 ) | ( bc << 16 ) | ( a << 24 );
+ }
+ idx >>= 2;
+ }
+ }
+ }
+}
+
+BitmapPtr BlockData::DecodeRGB()
+{
+ auto ret = std::make_shared<Bitmap>( m_size );
+
+ const uint64_t* src = (const uint64_t*)( m_data + m_dataOffset );
+ uint32_t* dst = ret->Data();
+
+ for( int y=0; y<m_size.y/4; y++ )
+ {
+ for( int x=0; x<m_size.x/4; x++ )
+ {
+ uint64_t d = *src++;
+ DecodeRGBPart( d, dst, m_size.x );
+ dst += 4;
+ }
+ dst += m_size.x*3;
+ }
+
+ return ret;
+}
+
+BitmapPtr BlockData::DecodeRGBA()
+{
+ auto ret = std::make_shared<Bitmap>( m_size );
+
+ const uint64_t* src = (const uint64_t*)( m_data + m_dataOffset );
+ uint32_t* dst = ret->Data();
+
+ for( int y=0; y<m_size.y/4; y++ )
+ {
+ for( int x=0; x<m_size.x/4; x++ )
+ {
+ uint64_t a = *src++;
+ uint64_t d = *src++;
+ DecodeRGBAPart( d, a, dst, m_size.x );
+ dst += 4;
+ }
+ dst += m_size.x*3;
+ }
+
+ return ret;
+}
+
+static etcpak_force_inline void DecodeDxt1Part( uint64_t d, uint32_t* dst, uint32_t w )
+{
+ uint8_t* in = (uint8_t*)&d;
+ uint16_t c0, c1;
+ uint32_t idx;
+ memcpy( &c0, in, 2 );
+ memcpy( &c1, in+2, 2 );
+ memcpy( &idx, in+4, 4 );
+
+ uint8_t r0 = ( ( c0 & 0xF800 ) >> 8 ) | ( ( c0 & 0xF800 ) >> 13 );
+ uint8_t g0 = ( ( c0 & 0x07E0 ) >> 3 ) | ( ( c0 & 0x07E0 ) >> 9 );
+ uint8_t b0 = ( ( c0 & 0x001F ) << 3 ) | ( ( c0 & 0x001F ) >> 2 );
+
+ uint8_t r1 = ( ( c1 & 0xF800 ) >> 8 ) | ( ( c1 & 0xF800 ) >> 13 );
+ uint8_t g1 = ( ( c1 & 0x07E0 ) >> 3 ) | ( ( c1 & 0x07E0 ) >> 9 );
+ uint8_t b1 = ( ( c1 & 0x001F ) << 3 ) | ( ( c1 & 0x001F ) >> 2 );
+
+ uint32_t dict[4];
+
+ dict[0] = 0xFF000000 | ( b0 << 16 ) | ( g0 << 8 ) | r0;
+ dict[1] = 0xFF000000 | ( b1 << 16 ) | ( g1 << 8 ) | r1;
+
+ uint32_t r, g, b;
+ if( c0 > c1 )
+ {
+ r = (2*r0+r1)/3;
+ g = (2*g0+g1)/3;
+ b = (2*b0+b1)/3;
+ dict[2] = 0xFF000000 | ( b << 16 ) | ( g << 8 ) | r;
+ r = (2*r1+r0)/3;
+ g = (2*g1+g0)/3;
+ b = (2*b1+b0)/3;
+ dict[3] = 0xFF000000 | ( b << 16 ) | ( g << 8 ) | r;
+ }
+ else
+ {
+ r = (int(r0)+r1)/2;
+ g = (int(g0)+g1)/2;
+ b = (int(b0)+b1)/2;
+ dict[2] = 0xFF000000 | ( b << 16 ) | ( g << 8 ) | r;
+ dict[3] = 0xFF000000;
+ }
+
+ memcpy( dst+0, dict + (idx & 0x3), 4 );
+ idx >>= 2;
+ memcpy( dst+1, dict + (idx & 0x3), 4 );
+ idx >>= 2;
+ memcpy( dst+2, dict + (idx & 0x3), 4 );
+ idx >>= 2;
+ memcpy( dst+3, dict + (idx & 0x3), 4 );
+ idx >>= 2;
+ dst += w;
+
+ memcpy( dst+0, dict + (idx & 0x3), 4 );
+ idx >>= 2;
+ memcpy( dst+1, dict + (idx & 0x3), 4 );
+ idx >>= 2;
+ memcpy( dst+2, dict + (idx & 0x3), 4 );
+ idx >>= 2;
+ memcpy( dst+3, dict + (idx & 0x3), 4 );
+ idx >>= 2;
+ dst += w;
+
+ memcpy( dst+0, dict + (idx & 0x3), 4 );
+ idx >>= 2;
+ memcpy( dst+1, dict + (idx & 0x3), 4 );
+ idx >>= 2;
+ memcpy( dst+2, dict + (idx & 0x3), 4 );
+ idx >>= 2;
+ memcpy( dst+3, dict + (idx & 0x3), 4 );
+ idx >>= 2;
+ dst += w;
+
+ memcpy( dst+0, dict + (idx & 0x3), 4 );
+ idx >>= 2;
+ memcpy( dst+1, dict + (idx & 0x3), 4 );
+ idx >>= 2;
+ memcpy( dst+2, dict + (idx & 0x3), 4 );
+ idx >>= 2;
+ memcpy( dst+3, dict + (idx & 0x3), 4 );
+}
+
+static etcpak_force_inline void DecodeDxt5Part( uint64_t a, uint64_t d, uint32_t* dst, uint32_t w )
+{
+ uint8_t* ain = (uint8_t*)&a;
+ uint8_t a0, a1;
+ uint64_t aidx = 0;
+ memcpy( &a0, ain, 1 );
+ memcpy( &a1, ain+1, 1 );
+ memcpy( &aidx, ain+2, 6 );
+
+ uint8_t* in = (uint8_t*)&d;
+ uint16_t c0, c1;
+ uint32_t idx;
+ memcpy( &c0, in, 2 );
+ memcpy( &c1, in+2, 2 );
+ memcpy( &idx, in+4, 4 );
+
+ uint32_t adict[8];
+ adict[0] = a0 << 24;
+ adict[1] = a1 << 24;
+ if( a0 > a1 )
+ {
+ adict[2] = ( (6*a0+1*a1)/7 ) << 24;
+ adict[3] = ( (5*a0+2*a1)/7 ) << 24;
+ adict[4] = ( (4*a0+3*a1)/7 ) << 24;
+ adict[5] = ( (3*a0+4*a1)/7 ) << 24;
+ adict[6] = ( (2*a0+5*a1)/7 ) << 24;
+ adict[7] = ( (1*a0+6*a1)/7 ) << 24;
+ }
+ else
+ {
+ adict[2] = ( (4*a0+1*a1)/5 ) << 24;
+ adict[3] = ( (3*a0+2*a1)/5 ) << 24;
+ adict[4] = ( (2*a0+3*a1)/5 ) << 24;
+ adict[5] = ( (1*a0+4*a1)/5 ) << 24;
+ adict[6] = 0;
+ adict[7] = 0xFF000000;
+ }
+
+ uint8_t r0 = ( ( c0 & 0xF800 ) >> 8 ) | ( ( c0 & 0xF800 ) >> 13 );
+ uint8_t g0 = ( ( c0 & 0x07E0 ) >> 3 ) | ( ( c0 & 0x07E0 ) >> 9 );
+ uint8_t b0 = ( ( c0 & 0x001F ) << 3 ) | ( ( c0 & 0x001F ) >> 2 );
+
+ uint8_t r1 = ( ( c1 & 0xF800 ) >> 8 ) | ( ( c1 & 0xF800 ) >> 13 );
+ uint8_t g1 = ( ( c1 & 0x07E0 ) >> 3 ) | ( ( c1 & 0x07E0 ) >> 9 );
+ uint8_t b1 = ( ( c1 & 0x001F ) << 3 ) | ( ( c1 & 0x001F ) >> 2 );
+
+ uint32_t dict[4];
+
+ dict[0] = ( b0 << 16 ) | ( g0 << 8 ) | r0;
+ dict[1] = ( b1 << 16 ) | ( g1 << 8 ) | r1;
+
+ uint32_t r, g, b;
+ if( c0 > c1 )
+ {
+ r = (2*r0+r1)/3;
+ g = (2*g0+g1)/3;
+ b = (2*b0+b1)/3;
+ dict[2] = ( b << 16 ) | ( g << 8 ) | r;
+ r = (2*r1+r0)/3;
+ g = (2*g1+g0)/3;
+ b = (2*b1+b0)/3;
+ dict[3] = ( b << 16 ) | ( g << 8 ) | r;
+ }
+ else
+ {
+ r = (int(r0)+r1)/2;
+ g = (int(g0)+g1)/2;
+ b = (int(b0)+b1)/2;
+ dict[2] = ( b << 16 ) | ( g << 8 ) | r;
+ dict[3] = 0;
+ }
+
+ dst[0] = dict[idx & 0x3] | adict[aidx & 0x7];
+ idx >>= 2;
+ aidx >>= 3;
+ dst[1] = dict[idx & 0x3] | adict[aidx & 0x7];
+ idx >>= 2;
+ aidx >>= 3;
+ dst[2] = dict[idx & 0x3] | adict[aidx & 0x7];
+ idx >>= 2;
+ aidx >>= 3;
+ dst[3] = dict[idx & 0x3] | adict[aidx & 0x7];
+ idx >>= 2;
+ aidx >>= 3;
+ dst += w;
+
+ dst[0] = dict[idx & 0x3] | adict[aidx & 0x7];
+ idx >>= 2;
+ aidx >>= 3;
+ dst[1] = dict[idx & 0x3] | adict[aidx & 0x7];
+ idx >>= 2;
+ aidx >>= 3;
+ dst[2] = dict[idx & 0x3] | adict[aidx & 0x7];
+ idx >>= 2;
+ aidx >>= 3;
+ dst[3] = dict[idx & 0x3] | adict[aidx & 0x7];
+ idx >>= 2;
+ aidx >>= 3;
+ dst += w;
+
+ dst[0] = dict[idx & 0x3] | adict[aidx & 0x7];
+ idx >>= 2;
+ aidx >>= 3;
+ dst[1] = dict[idx & 0x3] | adict[aidx & 0x7];
+ idx >>= 2;
+ aidx >>= 3;
+ dst[2] = dict[idx & 0x3] | adict[aidx & 0x7];
+ idx >>= 2;
+ aidx >>= 3;
+ dst[3] = dict[idx & 0x3] | adict[aidx & 0x7];
+ idx >>= 2;
+ aidx >>= 3;
+ dst += w;
+
+ dst[0] = dict[idx & 0x3] | adict[aidx & 0x7];
+ idx >>= 2;
+ aidx >>= 3;
+ dst[1] = dict[idx & 0x3] | adict[aidx & 0x7];
+ idx >>= 2;
+ aidx >>= 3;
+ dst[2] = dict[idx & 0x3] | adict[aidx & 0x7];
+ idx >>= 2;
+ aidx >>= 3;
+ dst[3] = dict[idx & 0x3] | adict[aidx & 0x7];
+}
+
+BitmapPtr BlockData::DecodeDxt1()
+{
+ auto ret = std::make_shared<Bitmap>( m_size );
+
+ const uint64_t* src = (const uint64_t*)( m_data + m_dataOffset );
+ uint32_t* dst = ret->Data();
+
+ for( int y=0; y<m_size.y/4; y++ )
+ {
+ for( int x=0; x<m_size.x/4; x++ )
+ {
+ uint64_t d = *src++;
+ DecodeDxt1Part( d, dst, m_size.x );
+ dst += 4;
+ }
+ dst += m_size.x*3;
+ }
+
+ return ret;
+}
+
+BitmapPtr BlockData::DecodeDxt5()
+{
+ auto ret = std::make_shared<Bitmap>( m_size );
+
+ const uint64_t* src = (const uint64_t*)( m_data + m_dataOffset );
+ uint32_t* dst = ret->Data();
+
+ for( int y=0; y<m_size.y/4; y++ )
+ {
+ for( int x=0; x<m_size.x/4; x++ )
+ {
+ uint64_t a = *src++;
+ uint64_t d = *src++;
+ DecodeDxt5Part( a, d, dst, m_size.x );
+ dst += 4;
+ }
+ dst += m_size.x*3;
+ }
+
+ return ret;
+}
diff --git a/thirdparty/etcpak/BlockData.hpp b/thirdparty/etcpak/BlockData.hpp
new file mode 100644
index 0000000000..209e35b4e6
--- /dev/null
+++ b/thirdparty/etcpak/BlockData.hpp
@@ -0,0 +1,56 @@
+#ifndef __BLOCKDATA_HPP__
+#define __BLOCKDATA_HPP__
+
+#include <condition_variable>
+#include <future>
+#include <memory>
+#include <mutex>
+#include <stdint.h>
+#include <stdio.h>
+#include <vector>
+
+#include "Bitmap.hpp"
+#include "ForceInline.hpp"
+#include "Vector.hpp"
+
+class BlockData
+{
+public:
+ enum Type
+ {
+ Etc1,
+ Etc2_RGB,
+ Etc2_RGBA,
+ Dxt1,
+ Dxt5
+ };
+
+ BlockData( const char* fn );
+ BlockData( const char* fn, const v2i& size, bool mipmap, Type type );
+ BlockData( const v2i& size, bool mipmap, Type type );
+ ~BlockData();
+
+ BitmapPtr Decode();
+
+ void Process( const uint32_t* src, uint32_t blocks, size_t offset, size_t width, Channels type, bool dither );
+ void ProcessRGBA( const uint32_t* src, uint32_t blocks, size_t offset, size_t width );
+
+ const v2i& Size() const { return m_size; }
+
+private:
+ etcpak_no_inline BitmapPtr DecodeRGB();
+ etcpak_no_inline BitmapPtr DecodeRGBA();
+ etcpak_no_inline BitmapPtr DecodeDxt1();
+ etcpak_no_inline BitmapPtr DecodeDxt5();
+
+ uint8_t* m_data;
+ v2i m_size;
+ size_t m_dataOffset;
+ FILE* m_file;
+ size_t m_maplen;
+ Type m_type;
+};
+
+typedef std::shared_ptr<BlockData> BlockDataPtr;
+
+#endif
diff --git a/thirdparty/etcpak/ColorSpace.cpp b/thirdparty/etcpak/ColorSpace.cpp
new file mode 100644
index 0000000000..0411541066
--- /dev/null
+++ b/thirdparty/etcpak/ColorSpace.cpp
@@ -0,0 +1,114 @@
+#include <math.h>
+#include <stdint.h>
+
+#include "Math.hpp"
+#include "ColorSpace.hpp"
+
+namespace Color
+{
+
+ static const XYZ white( v3b( 255, 255, 255 ) );
+ static const v3f rwhite( 1.f / white.x, 1.f / white.y, 1.f / white.z );
+
+
+ XYZ::XYZ( float _x, float _y, float _z )
+ : x( _x )
+ , y( _y )
+ , z( _z )
+ {
+ }
+
+ XYZ::XYZ( const v3b& rgb )
+ {
+ const float r = rgb.x / 255.f;
+ const float g = rgb.y / 255.f;
+ const float b = rgb.z / 255.f;
+
+ const float rl = sRGB2linear( r );
+ const float gl = sRGB2linear( g );
+ const float bl = sRGB2linear( b );
+
+ x = 0.4124f * rl + 0.3576f * gl + 0.1805f * bl;
+ y = 0.2126f * rl + 0.7152f * gl + 0.0722f * bl;
+ z = 0.0193f * rl + 0.1192f * gl + 0.9505f * bl;
+ }
+
+ static float revlab( float t )
+ {
+ const float p1 = 6.f/29.f;
+ const float p2 = 4.f/29.f;
+
+ if( t > p1 )
+ {
+ return t*t*t;
+ }
+ else
+ {
+ return 3 * sq( p1 ) * ( t - p2 );
+ }
+ }
+
+ XYZ::XYZ( const Lab& lab )
+ {
+ y = white.y * revlab( 1.f/116.f * ( lab.L + 16 ) );
+ x = white.x * revlab( 1.f/116.f * ( lab.L + 16 ) + 1.f/500.f * lab.a );
+ z = white.z * revlab( 1.f/116.f * ( lab.L + 16 ) - 1.f/200.f * lab.b );
+ }
+
+ v3i XYZ::RGB() const
+ {
+ const float rl = 3.2406f * x - 1.5372f * y - 0.4986f * z;
+ const float gl = -0.9689f * x + 1.8758f * y + 0.0415f * z;
+ const float bl = 0.0557f * x - 0.2040f * y + 1.0570f * z;
+
+ const float r = linear2sRGB( rl );
+ const float g = linear2sRGB( gl );
+ const float b = linear2sRGB( bl );
+
+ return v3i( clampu8( int32_t( r * 255 ) ), clampu8( int32_t( g * 255 ) ), clampu8( int32_t( b * 255 ) ) );
+ }
+
+
+ Lab::Lab()
+ : L( 0 )
+ , a( 0 )
+ , b( 0 )
+ {
+ }
+
+ Lab::Lab( float L, float a, float b )
+ : L( L )
+ , a( a )
+ , b( b )
+ {
+ }
+
+ static float labfunc( float t )
+ {
+ const float p1 = (6.f/29.f)*(6.f/29.f)*(6.f/29.f);
+ const float p2 = (1.f/3.f)*(29.f/6.f)*(29.f/6.f);
+ const float p3 = (4.f/29.f);
+
+ if( t > p1 )
+ {
+ return pow( t, 1.f/3.f );
+ }
+ else
+ {
+ return p2 * t + p3;
+ }
+ }
+
+ Lab::Lab( const XYZ& xyz )
+ {
+ L = 116 * labfunc( xyz.y * rwhite.y ) - 16;
+ a = 500 * ( labfunc( xyz.x * rwhite.x ) - labfunc( xyz.y * rwhite.y ) );
+ b = 200 * ( labfunc( xyz.y * rwhite.y ) - labfunc( xyz.z * rwhite.z ) );
+ }
+
+ Lab::Lab( const v3b& rgb )
+ {
+ new(this) Lab( XYZ( rgb ) );
+ }
+
+}
diff --git a/thirdparty/etcpak/ColorSpace.hpp b/thirdparty/etcpak/ColorSpace.hpp
new file mode 100644
index 0000000000..c9d0a9cf3f
--- /dev/null
+++ b/thirdparty/etcpak/ColorSpace.hpp
@@ -0,0 +1,36 @@
+#ifndef __DARKRL__COLORSPACE_HPP__
+#define __DARKRL__COLORSPACE_HPP__
+
+#include "Vector.hpp"
+
+namespace Color
+{
+
+ class Lab;
+
+ class XYZ
+ {
+ public:
+ XYZ( float x, float y, float z );
+ XYZ( const v3b& rgb );
+ XYZ( const Lab& lab );
+
+ v3i RGB() const;
+
+ float x, y, z;
+ };
+
+ class Lab
+ {
+ public:
+ Lab();
+ Lab( float L, float a, float b );
+ Lab( const XYZ& xyz );
+ Lab( const v3b& rgb );
+
+ float L, a, b;
+ };
+
+}
+
+#endif
diff --git a/thirdparty/etcpak/DataProvider.cpp b/thirdparty/etcpak/DataProvider.cpp
new file mode 100644
index 0000000000..6bd4b105ed
--- /dev/null
+++ b/thirdparty/etcpak/DataProvider.cpp
@@ -0,0 +1,77 @@
+#include <assert.h>
+#include <utility>
+
+#include "BitmapDownsampled.hpp"
+#include "DataProvider.hpp"
+#include "MipMap.hpp"
+
+DataProvider::DataProvider( const char* fn, bool mipmap, bool bgr )
+ : m_offset( 0 )
+ , m_mipmap( mipmap )
+ , m_done( false )
+ , m_lines( 32 )
+{
+ m_bmp.emplace_back( new Bitmap( fn, m_lines, bgr ) );
+ m_current = m_bmp[0].get();
+}
+
+DataProvider::~DataProvider()
+{
+}
+
+unsigned int DataProvider::NumberOfParts() const
+{
+ unsigned int parts = ( ( m_bmp[0]->Size().y / 4 ) + m_lines - 1 ) / m_lines;
+
+ if( m_mipmap )
+ {
+ v2i current = m_bmp[0]->Size();
+ int levels = NumberOfMipLevels( current );
+ unsigned int lines = m_lines;
+ for( int i=1; i<levels; i++ )
+ {
+ assert( current.x != 1 || current.y != 1 );
+ current.x = std::max( 1, current.x / 2 );
+ current.y = std::max( 1, current.y / 2 );
+ lines *= 2;
+ parts += ( ( std::max( 4, current.y ) / 4 ) + lines - 1 ) / lines;
+ }
+ assert( current.x == 1 && current.y == 1 );
+ }
+
+ return parts;
+}
+
+DataPart DataProvider::NextPart()
+{
+ assert( !m_done );
+
+ unsigned int lines = m_lines;
+ bool done;
+
+ const auto ptr = m_current->NextBlock( lines, done );
+ DataPart ret = {
+ ptr,
+ std::max<unsigned int>( 4, m_current->Size().x ),
+ lines,
+ m_offset
+ };
+
+ m_offset += m_current->Size().x / 4 * lines;
+
+ if( done )
+ {
+ if( m_mipmap && ( m_current->Size().x != 1 || m_current->Size().y != 1 ) )
+ {
+ m_lines *= 2;
+ m_bmp.emplace_back( new BitmapDownsampled( *m_current, m_lines ) );
+ m_current = m_bmp[m_bmp.size()-1].get();
+ }
+ else
+ {
+ m_done = true;
+ }
+ }
+
+ return ret;
+}
diff --git a/thirdparty/etcpak/DataProvider.hpp b/thirdparty/etcpak/DataProvider.hpp
new file mode 100644
index 0000000000..e773801ed6
--- /dev/null
+++ b/thirdparty/etcpak/DataProvider.hpp
@@ -0,0 +1,41 @@
+#ifndef __DATAPROVIDER_HPP__
+#define __DATAPROVIDER_HPP__
+
+#include <memory>
+#include <stdint.h>
+#include <vector>
+
+#include "Bitmap.hpp"
+
+struct DataPart
+{
+ const uint32_t* src;
+ unsigned int width;
+ unsigned int lines;
+ unsigned int offset;
+};
+
+class DataProvider
+{
+public:
+ DataProvider( const char* fn, bool mipmap, bool bgr );
+ ~DataProvider();
+
+ unsigned int NumberOfParts() const;
+
+ DataPart NextPart();
+
+ bool Alpha() const { return m_bmp[0]->Alpha(); }
+ const v2i& Size() const { return m_bmp[0]->Size(); }
+ const Bitmap& ImageData() const { return *m_bmp[0]; }
+
+private:
+ std::vector<std::unique_ptr<Bitmap>> m_bmp;
+ Bitmap* m_current;
+ unsigned int m_offset;
+ unsigned int m_lines;
+ bool m_mipmap;
+ bool m_done;
+};
+
+#endif
diff --git a/thirdparty/etcpak/Debug.cpp b/thirdparty/etcpak/Debug.cpp
new file mode 100644
index 0000000000..72dc4e0526
--- /dev/null
+++ b/thirdparty/etcpak/Debug.cpp
@@ -0,0 +1,31 @@
+#include <algorithm>
+#include <vector>
+#include "Debug.hpp"
+
+static std::vector<DebugLog::Callback*> s_callbacks;
+
+void DebugLog::Message( const char* msg )
+{
+ for( auto it = s_callbacks.begin(); it != s_callbacks.end(); ++it )
+ {
+ (*it)->OnDebugMessage( msg );
+ }
+}
+
+void DebugLog::AddCallback( Callback* c )
+{
+ const auto it = std::find( s_callbacks.begin(), s_callbacks.end(), c );
+ if( it == s_callbacks.end() )
+ {
+ s_callbacks.push_back( c );
+ }
+}
+
+void DebugLog::RemoveCallback( Callback* c )
+{
+ const auto it = std::find( s_callbacks.begin(), s_callbacks.end(), c );
+ if( it != s_callbacks.end() )
+ {
+ s_callbacks.erase( it );
+ }
+}
diff --git a/thirdparty/etcpak/Debug.hpp b/thirdparty/etcpak/Debug.hpp
new file mode 100644
index 0000000000..524eaa7baf
--- /dev/null
+++ b/thirdparty/etcpak/Debug.hpp
@@ -0,0 +1,27 @@
+#ifndef __DARKRL__DEBUG_HPP__
+#define __DARKRL__DEBUG_HPP__
+
+#ifdef DEBUG
+# include <sstream>
+# define DBGPRINT(msg) { std::stringstream __buf; __buf << msg; DebugLog::Message( __buf.str().c_str() ); }
+#else
+# define DBGPRINT(msg) ((void)0)
+#endif
+
+class DebugLog
+{
+public:
+ struct Callback
+ {
+ virtual void OnDebugMessage( const char* msg ) = 0;
+ };
+
+ static void Message( const char* msg );
+ static void AddCallback( Callback* c );
+ static void RemoveCallback( Callback* c );
+
+private:
+ DebugLog() {}
+};
+
+#endif
diff --git a/thirdparty/etcpak/Dither.cpp b/thirdparty/etcpak/Dither.cpp
new file mode 100644
index 0000000000..355686f26b
--- /dev/null
+++ b/thirdparty/etcpak/Dither.cpp
@@ -0,0 +1,120 @@
+#include <algorithm>
+#include <string.h>
+
+#include "Dither.hpp"
+#include "Math.hpp"
+#ifdef __SSE4_1__
+# ifdef _MSC_VER
+# include <intrin.h>
+# include <Windows.h>
+# else
+# include <x86intrin.h>
+# endif
+#endif
+
+#ifdef __AVX2__
+void DitherAvx2( uint8_t* data, __m128i px0, __m128i px1, __m128i px2, __m128i px3 )
+{
+ static constexpr uint8_t a31[] = { 0, 0, 0, 1, 2, 0, 4, 0, 0, 2, 0, 0, 4, 0, 3, 0 };
+ static constexpr uint8_t a63[] = { 0, 0, 0, 0, 1, 0, 2, 0, 0, 1, 0, 0, 2, 0, 1, 0 };
+ static constexpr uint8_t s31[] = { 5, 0, 4, 0, 0, 2, 0, 1, 3, 0, 4, 0, 0, 0, 0, 2 };
+ static constexpr uint8_t s63[] = { 2, 0, 2, 0, 0, 1, 0, 0, 1, 0, 2, 0, 0, 0, 0, 1 };
+
+ const __m256i BayerAdd0 = _mm256_setr_epi8(
+ a31[0], a63[0], a31[0], 0, a31[1], a63[1], a31[1], 0, a31[2], a63[2], a31[2], 0, a31[3], a63[3], a31[3], 0,
+ a31[4], a63[4], a31[4], 0, a31[5], a63[5], a31[5], 0, a31[6], a63[6], a31[6], 0, a31[7], a63[7], a31[7], 0
+ );
+ const __m256i BayerAdd1 = _mm256_setr_epi8(
+ a31[8], a63[8], a31[8], 0, a31[9], a63[9], a31[9], 0, a31[10], a63[10], a31[10], 0, a31[11], a63[11], a31[11], 0,
+ a31[12], a63[12], a31[12], 0, a31[13], a63[13], a31[13], 0, a31[14], a63[14], a31[14], 0, a31[15], a63[15], a31[15], 0
+ );
+ const __m256i BayerSub0 = _mm256_setr_epi8(
+ s31[0], s63[0], s31[0], 0, s31[1], s63[1], s31[1], 0, s31[2], s63[2], s31[2], 0, s31[3], s63[3], s31[3], 0,
+ s31[4], s63[4], s31[4], 0, s31[5], s63[5], s31[5], 0, s31[6], s63[6], s31[6], 0, s31[7], s63[7], s31[7], 0
+ );
+ const __m256i BayerSub1 = _mm256_setr_epi8(
+ s31[8], s63[8], s31[8], 0, s31[9], s63[9], s31[9], 0, s31[10], s63[10], s31[10], 0, s31[11], s63[11], s31[11], 0,
+ s31[12], s63[12], s31[12], 0, s31[13], s63[13], s31[13], 0, s31[14], s63[14], s31[14], 0, s31[15], s63[15], s31[15], 0
+ );
+
+ __m256i l0 = _mm256_inserti128_si256( _mm256_castsi128_si256( px0 ), px1, 1 );
+ __m256i l1 = _mm256_inserti128_si256( _mm256_castsi128_si256( px2 ), px3, 1 );
+
+ __m256i a0 = _mm256_adds_epu8( l0, BayerAdd0 );
+ __m256i a1 = _mm256_adds_epu8( l1, BayerAdd1 );
+ __m256i s0 = _mm256_subs_epu8( a0, BayerSub0 );
+ __m256i s1 = _mm256_subs_epu8( a1, BayerSub1 );
+
+ _mm256_storeu_si256( (__m256i*)(data ), s0 );
+ _mm256_storeu_si256( (__m256i*)(data+32), s1 );
+
+}
+#endif
+
+void Dither( uint8_t* data )
+{
+#ifdef __AVX2__
+ static constexpr uint8_t a31[] = { 0, 0, 0, 1, 2, 0, 4, 0, 0, 2, 0, 0, 4, 0, 3, 0 };
+ static constexpr uint8_t a63[] = { 0, 0, 0, 0, 1, 0, 2, 0, 0, 1, 0, 0, 2, 0, 1, 0 };
+ static constexpr uint8_t s31[] = { 5, 0, 4, 0, 0, 2, 0, 1, 3, 0, 4, 0, 0, 0, 0, 2 };
+ static constexpr uint8_t s63[] = { 2, 0, 2, 0, 0, 1, 0, 0, 1, 0, 2, 0, 0, 0, 0, 1 };
+
+ const __m256i BayerAdd0 = _mm256_setr_epi8(
+ a31[0], a63[0], a31[0], 0, a31[1], a63[1], a31[1], 0, a31[2], a63[2], a31[2], 0, a31[3], a63[3], a31[3], 0,
+ a31[4], a63[4], a31[4], 0, a31[5], a63[5], a31[5], 0, a31[6], a63[6], a31[6], 0, a31[7], a63[7], a31[7], 0
+ );
+ const __m256i BayerAdd1 = _mm256_setr_epi8(
+ a31[8], a63[8], a31[8], 0, a31[9], a63[9], a31[9], 0, a31[10], a63[10], a31[10], 0, a31[11], a63[11], a31[11], 0,
+ a31[12], a63[12], a31[12], 0, a31[13], a63[13], a31[13], 0, a31[14], a63[14], a31[14], 0, a31[15], a63[15], a31[15], 0
+ );
+ const __m256i BayerSub0 = _mm256_setr_epi8(
+ s31[0], s63[0], s31[0], 0, s31[1], s63[1], s31[1], 0, s31[2], s63[2], s31[2], 0, s31[3], s63[3], s31[3], 0,
+ s31[4], s63[4], s31[4], 0, s31[5], s63[5], s31[5], 0, s31[6], s63[6], s31[6], 0, s31[7], s63[7], s31[7], 0
+ );
+ const __m256i BayerSub1 = _mm256_setr_epi8(
+ s31[8], s63[8], s31[8], 0, s31[9], s63[9], s31[9], 0, s31[10], s63[10], s31[10], 0, s31[11], s63[11], s31[11], 0,
+ s31[12], s63[12], s31[12], 0, s31[13], s63[13], s31[13], 0, s31[14], s63[14], s31[14], 0, s31[15], s63[15], s31[15], 0
+ );
+
+ __m256i px0 = _mm256_loadu_si256( (__m256i*)(data ) );
+ __m256i px1 = _mm256_loadu_si256( (__m256i*)(data+32) );
+
+ __m256i a0 = _mm256_adds_epu8( px0, BayerAdd0 );
+ __m256i a1 = _mm256_adds_epu8( px1, BayerAdd1 );
+ __m256i s0 = _mm256_subs_epu8( a0, BayerSub0 );
+ __m256i s1 = _mm256_subs_epu8( a1, BayerSub1 );
+
+ _mm256_storeu_si256( (__m256i*)(data ), s0 );
+ _mm256_storeu_si256( (__m256i*)(data+32), s1 );
+#else
+ static constexpr int8_t Bayer31[16] = {
+ ( 0-8)*2/3, ( 8-8)*2/3, ( 2-8)*2/3, (10-8)*2/3,
+ (12-8)*2/3, ( 4-8)*2/3, (14-8)*2/3, ( 6-8)*2/3,
+ ( 3-8)*2/3, (11-8)*2/3, ( 1-8)*2/3, ( 9-8)*2/3,
+ (15-8)*2/3, ( 7-8)*2/3, (13-8)*2/3, ( 5-8)*2/3
+ };
+ static constexpr int8_t Bayer63[16] = {
+ ( 0-8)*2/6, ( 8-8)*2/6, ( 2-8)*2/6, (10-8)*2/6,
+ (12-8)*2/6, ( 4-8)*2/6, (14-8)*2/6, ( 6-8)*2/6,
+ ( 3-8)*2/6, (11-8)*2/6, ( 1-8)*2/6, ( 9-8)*2/6,
+ (15-8)*2/6, ( 7-8)*2/6, (13-8)*2/6, ( 5-8)*2/6
+ };
+
+ for( int i=0; i<16; i++ )
+ {
+ uint32_t col;
+ memcpy( &col, data, 4 );
+ uint8_t r = col & 0xFF;
+ uint8_t g = ( col >> 8 ) & 0xFF;
+ uint8_t b = ( col >> 16 ) & 0xFF;
+
+ r = clampu8( r + Bayer31[i] );
+ g = clampu8( g + Bayer63[i] );
+ b = clampu8( b + Bayer31[i] );
+
+ col = r | ( g << 8 ) | ( b << 16 );
+ memcpy( data, &col, 4 );
+ data += 4;
+ }
+#endif
+}
diff --git a/thirdparty/etcpak/Dither.hpp b/thirdparty/etcpak/Dither.hpp
new file mode 100644
index 0000000000..e43ce5676d
--- /dev/null
+++ b/thirdparty/etcpak/Dither.hpp
@@ -0,0 +1,21 @@
+#ifndef __DITHER_HPP__
+#define __DITHER_HPP__
+
+#include <stddef.h>
+#include <stdint.h>
+
+#ifdef __AVX2__
+# ifdef _MSC_VER
+# include <intrin.h>
+# else
+# include <x86intrin.h>
+# endif
+#endif
+
+void Dither( uint8_t* data );
+
+#ifdef __AVX2__
+void DitherAvx2( uint8_t* data, __m128i px0, __m128i px1, __m128i px2, __m128i px3 );
+#endif
+
+#endif
diff --git a/thirdparty/etcpak/Error.cpp b/thirdparty/etcpak/Error.cpp
new file mode 100644
index 0000000000..014ecdab66
--- /dev/null
+++ b/thirdparty/etcpak/Error.cpp
@@ -0,0 +1,48 @@
+#include <stdint.h>
+
+#include "Error.hpp"
+#include "Math.hpp"
+
+float CalcMSE3( const Bitmap& bmp, const Bitmap& out )
+{
+ float err = 0;
+
+ const uint32_t* p1 = bmp.Data();
+ const uint32_t* p2 = out.Data();
+ size_t cnt = bmp.Size().x * bmp.Size().y;
+
+ for( size_t i=0; i<cnt; i++ )
+ {
+ uint32_t c1 = *p1++;
+ uint32_t c2 = *p2++;
+
+ err += sq( ( c1 & 0x000000FF ) - ( c2 & 0x000000FF ) );
+ err += sq( ( ( c1 & 0x0000FF00 ) >> 8 ) - ( ( c2 & 0x0000FF00 ) >> 8 ) );
+ err += sq( ( ( c1 & 0x00FF0000 ) >> 16 ) - ( ( c2 & 0x00FF0000 ) >> 16 ) );
+ }
+
+ err /= cnt * 3;
+
+ return err;
+}
+
+float CalcMSE1( const Bitmap& bmp, const Bitmap& out )
+{
+ float err = 0;
+
+ const uint32_t* p1 = bmp.Data();
+ const uint32_t* p2 = out.Data();
+ size_t cnt = bmp.Size().x * bmp.Size().y;
+
+ for( size_t i=0; i<cnt; i++ )
+ {
+ uint32_t c1 = *p1++;
+ uint32_t c2 = *p2++;
+
+ err += sq( ( c1 >> 24 ) - ( c2 & 0xFF ) );
+ }
+
+ err /= cnt;
+
+ return err;
+}
diff --git a/thirdparty/etcpak/Error.hpp b/thirdparty/etcpak/Error.hpp
new file mode 100644
index 0000000000..9817754b74
--- /dev/null
+++ b/thirdparty/etcpak/Error.hpp
@@ -0,0 +1,9 @@
+#ifndef __ERROR_HPP__
+#define __ERROR_HPP__
+
+#include "Bitmap.hpp"
+
+float CalcMSE3( const Bitmap& bmp, const Bitmap& out );
+float CalcMSE1( const Bitmap& bmp, const Bitmap& out );
+
+#endif
diff --git a/thirdparty/etcpak/ForceInline.hpp b/thirdparty/etcpak/ForceInline.hpp
new file mode 100644
index 0000000000..b6f012841b
--- /dev/null
+++ b/thirdparty/etcpak/ForceInline.hpp
@@ -0,0 +1,20 @@
+#ifndef __FORCEINLINE_HPP__
+#define __FORCEINLINE_HPP__
+
+#if defined(__GNUC__)
+# define etcpak_force_inline __attribute__((always_inline)) inline
+#elif defined(_MSC_VER)
+# define etcpak_force_inline __forceinline
+#else
+# define etcpak_force_inline inline
+#endif
+
+#if defined(__GNUC__)
+# define etcpak_no_inline __attribute__((noinline))
+#elif defined(_MSC_VER)
+# define etcpak_no_inline __declspec(noinline)
+#else
+# define etcpak_no_inline
+#endif
+
+#endif
diff --git a/thirdparty/etcpak/LICENSE.txt b/thirdparty/etcpak/LICENSE.txt
new file mode 100644
index 0000000000..59e85d6ea5
--- /dev/null
+++ b/thirdparty/etcpak/LICENSE.txt
@@ -0,0 +1,26 @@
+etcpak, an extremely fast ETC compression utility (https://github.com/wolfpld/etcpak)
+
+Copyright (c) 2013-2021, Bartosz Taudul <wolf@nereid.pl>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of the <organization> nor the
+ names of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/thirdparty/etcpak/Math.hpp b/thirdparty/etcpak/Math.hpp
new file mode 100644
index 0000000000..994e1ac4ea
--- /dev/null
+++ b/thirdparty/etcpak/Math.hpp
@@ -0,0 +1,92 @@
+#ifndef __DARKRL__MATH_HPP__
+#define __DARKRL__MATH_HPP__
+
+#include <algorithm>
+#include <cmath>
+#include <stdint.h>
+
+#include "ForceInline.hpp"
+
+template<typename T>
+static etcpak_force_inline T AlignPOT( T val )
+{
+ if( val == 0 ) return 1;
+ val--;
+ for( unsigned int i=1; i<sizeof( T ) * 8; i <<= 1 )
+ {
+ val |= val >> i;
+ }
+ return val + 1;
+}
+
+static etcpak_force_inline int CountSetBits( uint32_t val )
+{
+ val -= ( val >> 1 ) & 0x55555555;
+ val = ( ( val >> 2 ) & 0x33333333 ) + ( val & 0x33333333 );
+ val = ( ( val >> 4 ) + val ) & 0x0f0f0f0f;
+ val += val >> 8;
+ val += val >> 16;
+ return val & 0x0000003f;
+}
+
+static etcpak_force_inline int CountLeadingZeros( uint32_t val )
+{
+ val |= val >> 1;
+ val |= val >> 2;
+ val |= val >> 4;
+ val |= val >> 8;
+ val |= val >> 16;
+ return 32 - CountSetBits( val );
+}
+
+static etcpak_force_inline float sRGB2linear( float v )
+{
+ const float a = 0.055f;
+ if( v <= 0.04045f )
+ {
+ return v / 12.92f;
+ }
+ else
+ {
+ return pow( ( v + a ) / ( 1 + a ), 2.4f );
+ }
+}
+
+static etcpak_force_inline float linear2sRGB( float v )
+{
+ const float a = 0.055f;
+ if( v <= 0.0031308f )
+ {
+ return 12.92f * v;
+ }
+ else
+ {
+ return ( 1 + a ) * pow( v, 1/2.4f ) - a;
+ }
+}
+
+template<class T>
+static etcpak_force_inline T SmoothStep( T x )
+{
+ return x*x*(3-2*x);
+}
+
+static etcpak_force_inline uint8_t clampu8( int32_t val )
+{
+ if( ( val & ~0xFF ) == 0 ) return val;
+ return ( ( ~val ) >> 31 ) & 0xFF;
+}
+
+template<class T>
+static etcpak_force_inline T sq( T val )
+{
+ return val * val;
+}
+
+static etcpak_force_inline int mul8bit( int a, int b )
+{
+ int t = a*b + 128;
+ return ( t + ( t >> 8 ) ) >> 8;
+}
+
+#endif
diff --git a/thirdparty/etcpak/MipMap.hpp b/thirdparty/etcpak/MipMap.hpp
new file mode 100644
index 0000000000..d3b4bc9e7c
--- /dev/null
+++ b/thirdparty/etcpak/MipMap.hpp
@@ -0,0 +1,11 @@
+#ifndef __MIPMAP_HPP__
+#define __MIPMAP_HPP__
+
+#include "Vector.hpp"
+
+inline int NumberOfMipLevels( const v2i& size )
+{
+ return (int)floor( log2( std::max( size.x, size.y ) ) ) + 1;
+}
+
+#endif
diff --git a/thirdparty/etcpak/ProcessCommon.hpp b/thirdparty/etcpak/ProcessCommon.hpp
new file mode 100644
index 0000000000..657d68888f
--- /dev/null
+++ b/thirdparty/etcpak/ProcessCommon.hpp
@@ -0,0 +1,50 @@
+#ifndef __PROCESSCOMMON_HPP__
+#define __PROCESSCOMMON_HPP__
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+
+template<class T>
+static size_t GetLeastError( const T* err, size_t num )
+{
+ size_t idx = 0;
+ for( size_t i=1; i<num; i++ )
+ {
+ if( err[i] < err[idx] )
+ {
+ idx = i;
+ }
+ }
+ return idx;
+}
+
+static uint64_t FixByteOrder( uint64_t d )
+{
+ return ( ( d & 0x00000000FFFFFFFF ) ) |
+ ( ( d & 0xFF00000000000000 ) >> 24 ) |
+ ( ( d & 0x000000FF00000000 ) << 24 ) |
+ ( ( d & 0x00FF000000000000 ) >> 8 ) |
+ ( ( d & 0x0000FF0000000000 ) << 8 );
+}
+
+template<class T, class S>
+static uint64_t EncodeSelectors( uint64_t d, const T terr[2][8], const S tsel[16][8], const uint32_t* id )
+{
+ size_t tidx[2];
+ tidx[0] = GetLeastError( terr[0], 8 );
+ tidx[1] = GetLeastError( terr[1], 8 );
+
+ d |= tidx[0] << 26;
+ d |= tidx[1] << 29;
+ for( int i=0; i<16; i++ )
+ {
+ uint64_t t = tsel[i][tidx[id[i]%2]];
+ d |= ( t & 0x1 ) << ( i + 32 );
+ d |= ( t & 0x2 ) << ( i + 47 );
+ }
+
+ return d;
+}
+
+#endif
diff --git a/thirdparty/etcpak/ProcessDxtc.cpp b/thirdparty/etcpak/ProcessDxtc.cpp
new file mode 100644
index 0000000000..508d55fd75
--- /dev/null
+++ b/thirdparty/etcpak/ProcessDxtc.cpp
@@ -0,0 +1,956 @@
+#include "Dither.hpp"
+#include "ForceInline.hpp"
+#include "ProcessDxtc.hpp"
+
+#include <assert.h>
+#include <stdint.h>
+#include <string.h>
+
+#ifdef __ARM_NEON
+# include <arm_neon.h>
+#endif
+
+#if defined __AVX__ && !defined __SSE4_1__
+# define __SSE4_1__
+#endif
+
+#if defined __SSE4_1__ || defined __AVX2__
+# ifdef _MSC_VER
+# include <intrin.h>
+# else
+# include <x86intrin.h>
+# ifndef _mm256_cvtsi256_si32
+# define _mm256_cvtsi256_si32( v ) ( _mm_cvtsi128_si32( _mm256_castsi256_si128( v ) ) )
+# endif
+# endif
+#endif
+
+
+static etcpak_force_inline uint16_t to565( uint8_t r, uint8_t g, uint8_t b )
+{
+ return ( ( r & 0xF8 ) << 8 ) | ( ( g & 0xFC ) << 3 ) | ( b >> 3 );
+}
+
+static etcpak_force_inline uint16_t to565( uint32_t c )
+{
+ return
+ ( ( c & 0xF80000 ) >> 19 ) |
+ ( ( c & 0x00FC00 ) >> 5 ) |
+ ( ( c & 0x0000F8 ) << 8 );
+}
+
+static const uint8_t DxtcIndexTable[256] = {
+ 85, 87, 86, 84, 93, 95, 94, 92, 89, 91, 90, 88, 81, 83, 82, 80,
+ 117, 119, 118, 116, 125, 127, 126, 124, 121, 123, 122, 120, 113, 115, 114, 112,
+ 101, 103, 102, 100, 109, 111, 110, 108, 105, 107, 106, 104, 97, 99, 98, 96,
+ 69, 71, 70, 68, 77, 79, 78, 76, 73, 75, 74, 72, 65, 67, 66, 64,
+ 213, 215, 214, 212, 221, 223, 222, 220, 217, 219, 218, 216, 209, 211, 210, 208,
+ 245, 247, 246, 244, 253, 255, 254, 252, 249, 251, 250, 248, 241, 243, 242, 240,
+ 229, 231, 230, 228, 237, 239, 238, 236, 233, 235, 234, 232, 225, 227, 226, 224,
+ 197, 199, 198, 196, 205, 207, 206, 204, 201, 203, 202, 200, 193, 195, 194, 192,
+ 149, 151, 150, 148, 157, 159, 158, 156, 153, 155, 154, 152, 145, 147, 146, 144,
+ 181, 183, 182, 180, 189, 191, 190, 188, 185, 187, 186, 184, 177, 179, 178, 176,
+ 165, 167, 166, 164, 173, 175, 174, 172, 169, 171, 170, 168, 161, 163, 162, 160,
+ 133, 135, 134, 132, 141, 143, 142, 140, 137, 139, 138, 136, 129, 131, 130, 128,
+ 21, 23, 22, 20, 29, 31, 30, 28, 25, 27, 26, 24, 17, 19, 18, 16,
+ 53, 55, 54, 52, 61, 63, 62, 60, 57, 59, 58, 56, 49, 51, 50, 48,
+ 37, 39, 38, 36, 45, 47, 46, 44, 41, 43, 42, 40, 33, 35, 34, 32,
+ 5, 7, 6, 4, 13, 15, 14, 12, 9, 11, 10, 8, 1, 3, 2, 0
+};
+
+static const uint8_t AlphaIndexTable_SSE[64] = {
+ 9, 15, 14, 13, 12, 11, 10, 8, 57, 63, 62, 61, 60, 59, 58, 56,
+ 49, 55, 54, 53, 52, 51, 50, 48, 41, 47, 46, 45, 44, 43, 42, 40,
+ 33, 39, 38, 37, 36, 35, 34, 32, 25, 31, 30, 29, 28, 27, 26, 24,
+ 17, 23, 22, 21, 20, 19, 18, 16, 1, 7, 6, 5, 4, 3, 2, 0,
+};
+
+static const uint16_t DivTable[255*3+1] = {
+ 0xffff, 0xffff, 0xffff, 0xffff, 0xcccc, 0xaaaa, 0x9249, 0x8000, 0x71c7, 0x6666, 0x5d17, 0x5555, 0x4ec4, 0x4924, 0x4444, 0x4000,
+ 0x3c3c, 0x38e3, 0x35e5, 0x3333, 0x30c3, 0x2e8b, 0x2c85, 0x2aaa, 0x28f5, 0x2762, 0x25ed, 0x2492, 0x234f, 0x2222, 0x2108, 0x2000,
+ 0x1f07, 0x1e1e, 0x1d41, 0x1c71, 0x1bac, 0x1af2, 0x1a41, 0x1999, 0x18f9, 0x1861, 0x17d0, 0x1745, 0x16c1, 0x1642, 0x15c9, 0x1555,
+ 0x14e5, 0x147a, 0x1414, 0x13b1, 0x1352, 0x12f6, 0x129e, 0x1249, 0x11f7, 0x11a7, 0x115b, 0x1111, 0x10c9, 0x1084, 0x1041, 0x1000,
+ 0x0fc0, 0x0f83, 0x0f48, 0x0f0f, 0x0ed7, 0x0ea0, 0x0e6c, 0x0e38, 0x0e07, 0x0dd6, 0x0da7, 0x0d79, 0x0d4c, 0x0d20, 0x0cf6, 0x0ccc,
+ 0x0ca4, 0x0c7c, 0x0c56, 0x0c30, 0x0c0c, 0x0be8, 0x0bc5, 0x0ba2, 0x0b81, 0x0b60, 0x0b40, 0x0b21, 0x0b02, 0x0ae4, 0x0ac7, 0x0aaa,
+ 0x0a8e, 0x0a72, 0x0a57, 0x0a3d, 0x0a23, 0x0a0a, 0x09f1, 0x09d8, 0x09c0, 0x09a9, 0x0991, 0x097b, 0x0964, 0x094f, 0x0939, 0x0924,
+ 0x090f, 0x08fb, 0x08e7, 0x08d3, 0x08c0, 0x08ad, 0x089a, 0x0888, 0x0876, 0x0864, 0x0853, 0x0842, 0x0831, 0x0820, 0x0810, 0x0800,
+ 0x07f0, 0x07e0, 0x07d1, 0x07c1, 0x07b3, 0x07a4, 0x0795, 0x0787, 0x0779, 0x076b, 0x075d, 0x0750, 0x0743, 0x0736, 0x0729, 0x071c,
+ 0x070f, 0x0703, 0x06f7, 0x06eb, 0x06df, 0x06d3, 0x06c8, 0x06bc, 0x06b1, 0x06a6, 0x069b, 0x0690, 0x0685, 0x067b, 0x0670, 0x0666,
+ 0x065c, 0x0652, 0x0648, 0x063e, 0x0634, 0x062b, 0x0621, 0x0618, 0x060f, 0x0606, 0x05fd, 0x05f4, 0x05eb, 0x05e2, 0x05d9, 0x05d1,
+ 0x05c9, 0x05c0, 0x05b8, 0x05b0, 0x05a8, 0x05a0, 0x0598, 0x0590, 0x0588, 0x0581, 0x0579, 0x0572, 0x056b, 0x0563, 0x055c, 0x0555,
+ 0x054e, 0x0547, 0x0540, 0x0539, 0x0532, 0x052b, 0x0525, 0x051e, 0x0518, 0x0511, 0x050b, 0x0505, 0x04fe, 0x04f8, 0x04f2, 0x04ec,
+ 0x04e6, 0x04e0, 0x04da, 0x04d4, 0x04ce, 0x04c8, 0x04c3, 0x04bd, 0x04b8, 0x04b2, 0x04ad, 0x04a7, 0x04a2, 0x049c, 0x0497, 0x0492,
+ 0x048d, 0x0487, 0x0482, 0x047d, 0x0478, 0x0473, 0x046e, 0x0469, 0x0465, 0x0460, 0x045b, 0x0456, 0x0452, 0x044d, 0x0448, 0x0444,
+ 0x043f, 0x043b, 0x0436, 0x0432, 0x042d, 0x0429, 0x0425, 0x0421, 0x041c, 0x0418, 0x0414, 0x0410, 0x040c, 0x0408, 0x0404, 0x0400,
+ 0x03fc, 0x03f8, 0x03f4, 0x03f0, 0x03ec, 0x03e8, 0x03e4, 0x03e0, 0x03dd, 0x03d9, 0x03d5, 0x03d2, 0x03ce, 0x03ca, 0x03c7, 0x03c3,
+ 0x03c0, 0x03bc, 0x03b9, 0x03b5, 0x03b2, 0x03ae, 0x03ab, 0x03a8, 0x03a4, 0x03a1, 0x039e, 0x039b, 0x0397, 0x0394, 0x0391, 0x038e,
+ 0x038b, 0x0387, 0x0384, 0x0381, 0x037e, 0x037b, 0x0378, 0x0375, 0x0372, 0x036f, 0x036c, 0x0369, 0x0366, 0x0364, 0x0361, 0x035e,
+ 0x035b, 0x0358, 0x0355, 0x0353, 0x0350, 0x034d, 0x034a, 0x0348, 0x0345, 0x0342, 0x0340, 0x033d, 0x033a, 0x0338, 0x0335, 0x0333,
+ 0x0330, 0x032e, 0x032b, 0x0329, 0x0326, 0x0324, 0x0321, 0x031f, 0x031c, 0x031a, 0x0317, 0x0315, 0x0313, 0x0310, 0x030e, 0x030c,
+ 0x0309, 0x0307, 0x0305, 0x0303, 0x0300, 0x02fe, 0x02fc, 0x02fa, 0x02f7, 0x02f5, 0x02f3, 0x02f1, 0x02ef, 0x02ec, 0x02ea, 0x02e8,
+ 0x02e6, 0x02e4, 0x02e2, 0x02e0, 0x02de, 0x02dc, 0x02da, 0x02d8, 0x02d6, 0x02d4, 0x02d2, 0x02d0, 0x02ce, 0x02cc, 0x02ca, 0x02c8,
+ 0x02c6, 0x02c4, 0x02c2, 0x02c0, 0x02be, 0x02bc, 0x02bb, 0x02b9, 0x02b7, 0x02b5, 0x02b3, 0x02b1, 0x02b0, 0x02ae, 0x02ac, 0x02aa,
+ 0x02a8, 0x02a7, 0x02a5, 0x02a3, 0x02a1, 0x02a0, 0x029e, 0x029c, 0x029b, 0x0299, 0x0297, 0x0295, 0x0294, 0x0292, 0x0291, 0x028f,
+ 0x028d, 0x028c, 0x028a, 0x0288, 0x0287, 0x0285, 0x0284, 0x0282, 0x0280, 0x027f, 0x027d, 0x027c, 0x027a, 0x0279, 0x0277, 0x0276,
+ 0x0274, 0x0273, 0x0271, 0x0270, 0x026e, 0x026d, 0x026b, 0x026a, 0x0268, 0x0267, 0x0265, 0x0264, 0x0263, 0x0261, 0x0260, 0x025e,
+ 0x025d, 0x025c, 0x025a, 0x0259, 0x0257, 0x0256, 0x0255, 0x0253, 0x0252, 0x0251, 0x024f, 0x024e, 0x024d, 0x024b, 0x024a, 0x0249,
+ 0x0247, 0x0246, 0x0245, 0x0243, 0x0242, 0x0241, 0x0240, 0x023e, 0x023d, 0x023c, 0x023b, 0x0239, 0x0238, 0x0237, 0x0236, 0x0234,
+ 0x0233, 0x0232, 0x0231, 0x0230, 0x022e, 0x022d, 0x022c, 0x022b, 0x022a, 0x0229, 0x0227, 0x0226, 0x0225, 0x0224, 0x0223, 0x0222,
+ 0x0220, 0x021f, 0x021e, 0x021d, 0x021c, 0x021b, 0x021a, 0x0219, 0x0218, 0x0216, 0x0215, 0x0214, 0x0213, 0x0212, 0x0211, 0x0210,
+ 0x020f, 0x020e, 0x020d, 0x020c, 0x020b, 0x020a, 0x0209, 0x0208, 0x0207, 0x0206, 0x0205, 0x0204, 0x0203, 0x0202, 0x0201, 0x0200,
+ 0x01ff, 0x01fe, 0x01fd, 0x01fc, 0x01fb, 0x01fa, 0x01f9, 0x01f8, 0x01f7, 0x01f6, 0x01f5, 0x01f4, 0x01f3, 0x01f2, 0x01f1, 0x01f0,
+ 0x01ef, 0x01ee, 0x01ed, 0x01ec, 0x01eb, 0x01ea, 0x01e9, 0x01e9, 0x01e8, 0x01e7, 0x01e6, 0x01e5, 0x01e4, 0x01e3, 0x01e2, 0x01e1,
+ 0x01e0, 0x01e0, 0x01df, 0x01de, 0x01dd, 0x01dc, 0x01db, 0x01da, 0x01da, 0x01d9, 0x01d8, 0x01d7, 0x01d6, 0x01d5, 0x01d4, 0x01d4,
+ 0x01d3, 0x01d2, 0x01d1, 0x01d0, 0x01cf, 0x01cf, 0x01ce, 0x01cd, 0x01cc, 0x01cb, 0x01cb, 0x01ca, 0x01c9, 0x01c8, 0x01c7, 0x01c7,
+ 0x01c6, 0x01c5, 0x01c4, 0x01c3, 0x01c3, 0x01c2, 0x01c1, 0x01c0, 0x01c0, 0x01bf, 0x01be, 0x01bd, 0x01bd, 0x01bc, 0x01bb, 0x01ba,
+ 0x01ba, 0x01b9, 0x01b8, 0x01b7, 0x01b7, 0x01b6, 0x01b5, 0x01b4, 0x01b4, 0x01b3, 0x01b2, 0x01b2, 0x01b1, 0x01b0, 0x01af, 0x01af,
+ 0x01ae, 0x01ad, 0x01ad, 0x01ac, 0x01ab, 0x01aa, 0x01aa, 0x01a9, 0x01a8, 0x01a8, 0x01a7, 0x01a6, 0x01a6, 0x01a5, 0x01a4, 0x01a4,
+ 0x01a3, 0x01a2, 0x01a2, 0x01a1, 0x01a0, 0x01a0, 0x019f, 0x019e, 0x019e, 0x019d, 0x019c, 0x019c, 0x019b, 0x019a, 0x019a, 0x0199,
+ 0x0198, 0x0198, 0x0197, 0x0197, 0x0196, 0x0195, 0x0195, 0x0194, 0x0193, 0x0193, 0x0192, 0x0192, 0x0191, 0x0190, 0x0190, 0x018f,
+ 0x018f, 0x018e, 0x018d, 0x018d, 0x018c, 0x018b, 0x018b, 0x018a, 0x018a, 0x0189, 0x0189, 0x0188, 0x0187, 0x0187, 0x0186, 0x0186,
+ 0x0185, 0x0184, 0x0184, 0x0183, 0x0183, 0x0182, 0x0182, 0x0181, 0x0180, 0x0180, 0x017f, 0x017f, 0x017e, 0x017e, 0x017d, 0x017d,
+ 0x017c, 0x017b, 0x017b, 0x017a, 0x017a, 0x0179, 0x0179, 0x0178, 0x0178, 0x0177, 0x0177, 0x0176, 0x0175, 0x0175, 0x0174, 0x0174,
+ 0x0173, 0x0173, 0x0172, 0x0172, 0x0171, 0x0171, 0x0170, 0x0170, 0x016f, 0x016f, 0x016e, 0x016e, 0x016d, 0x016d, 0x016c, 0x016c,
+ 0x016b, 0x016b, 0x016a, 0x016a, 0x0169, 0x0169, 0x0168, 0x0168, 0x0167, 0x0167, 0x0166, 0x0166, 0x0165, 0x0165, 0x0164, 0x0164,
+ 0x0163, 0x0163, 0x0162, 0x0162, 0x0161, 0x0161, 0x0160, 0x0160, 0x015f, 0x015f, 0x015e, 0x015e, 0x015d, 0x015d, 0x015d, 0x015c,
+ 0x015c, 0x015b, 0x015b, 0x015a, 0x015a, 0x0159, 0x0159, 0x0158, 0x0158, 0x0158, 0x0157, 0x0157, 0x0156, 0x0156
+};
+static const uint16_t DivTableNEON[255*3+1] = {
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x1c71, 0x1af2, 0x1999, 0x1861, 0x1745, 0x1642, 0x1555, 0x147a, 0x13b1, 0x12f6, 0x1249, 0x11a7, 0x1111, 0x1084, 0x1000,
+ 0x0f83, 0x0f0f, 0x0ea0, 0x0e38, 0x0dd6, 0x0d79, 0x0d20, 0x0ccc, 0x0c7c, 0x0c30, 0x0be8, 0x0ba2, 0x0b60, 0x0b21, 0x0ae4, 0x0aaa,
+ 0x0a72, 0x0a3d, 0x0a0a, 0x09d8, 0x09a9, 0x097b, 0x094f, 0x0924, 0x08fb, 0x08d3, 0x08ad, 0x0888, 0x0864, 0x0842, 0x0820, 0x0800,
+ 0x07e0, 0x07c1, 0x07a4, 0x0787, 0x076b, 0x0750, 0x0736, 0x071c, 0x0703, 0x06eb, 0x06d3, 0x06bc, 0x06a6, 0x0690, 0x067b, 0x0666,
+ 0x0652, 0x063e, 0x062b, 0x0618, 0x0606, 0x05f4, 0x05e2, 0x05d1, 0x05c0, 0x05b0, 0x05a0, 0x0590, 0x0581, 0x0572, 0x0563, 0x0555,
+ 0x0547, 0x0539, 0x052b, 0x051e, 0x0511, 0x0505, 0x04f8, 0x04ec, 0x04e0, 0x04d4, 0x04c8, 0x04bd, 0x04b2, 0x04a7, 0x049c, 0x0492,
+ 0x0487, 0x047d, 0x0473, 0x0469, 0x0460, 0x0456, 0x044d, 0x0444, 0x043b, 0x0432, 0x0429, 0x0421, 0x0418, 0x0410, 0x0408, 0x0400,
+ 0x03f8, 0x03f0, 0x03e8, 0x03e0, 0x03d9, 0x03d2, 0x03ca, 0x03c3, 0x03bc, 0x03b5, 0x03ae, 0x03a8, 0x03a1, 0x039b, 0x0394, 0x038e,
+ 0x0387, 0x0381, 0x037b, 0x0375, 0x036f, 0x0369, 0x0364, 0x035e, 0x0358, 0x0353, 0x034d, 0x0348, 0x0342, 0x033d, 0x0338, 0x0333,
+ 0x032e, 0x0329, 0x0324, 0x031f, 0x031a, 0x0315, 0x0310, 0x030c, 0x0307, 0x0303, 0x02fe, 0x02fa, 0x02f5, 0x02f1, 0x02ec, 0x02e8,
+ 0x02e4, 0x02e0, 0x02dc, 0x02d8, 0x02d4, 0x02d0, 0x02cc, 0x02c8, 0x02c4, 0x02c0, 0x02bc, 0x02b9, 0x02b5, 0x02b1, 0x02ae, 0x02aa,
+ 0x02a7, 0x02a3, 0x02a0, 0x029c, 0x0299, 0x0295, 0x0292, 0x028f, 0x028c, 0x0288, 0x0285, 0x0282, 0x027f, 0x027c, 0x0279, 0x0276,
+ 0x0273, 0x0270, 0x026d, 0x026a, 0x0267, 0x0264, 0x0261, 0x025e, 0x025c, 0x0259, 0x0256, 0x0253, 0x0251, 0x024e, 0x024b, 0x0249,
+ 0x0246, 0x0243, 0x0241, 0x023e, 0x023c, 0x0239, 0x0237, 0x0234, 0x0232, 0x0230, 0x022d, 0x022b, 0x0229, 0x0226, 0x0224, 0x0222,
+ 0x021f, 0x021d, 0x021b, 0x0219, 0x0216, 0x0214, 0x0212, 0x0210, 0x020e, 0x020c, 0x020a, 0x0208, 0x0206, 0x0204, 0x0202, 0x0200,
+ 0x01fe, 0x01fc, 0x01fa, 0x01f8, 0x01f6, 0x01f4, 0x01f2, 0x01f0, 0x01ee, 0x01ec, 0x01ea, 0x01e9, 0x01e7, 0x01e5, 0x01e3, 0x01e1,
+ 0x01e0, 0x01de, 0x01dc, 0x01da, 0x01d9, 0x01d7, 0x01d5, 0x01d4, 0x01d2, 0x01d0, 0x01cf, 0x01cd, 0x01cb, 0x01ca, 0x01c8, 0x01c7,
+ 0x01c5, 0x01c3, 0x01c2, 0x01c0, 0x01bf, 0x01bd, 0x01bc, 0x01ba, 0x01b9, 0x01b7, 0x01b6, 0x01b4, 0x01b3, 0x01b2, 0x01b0, 0x01af,
+ 0x01ad, 0x01ac, 0x01aa, 0x01a9, 0x01a8, 0x01a6, 0x01a5, 0x01a4, 0x01a2, 0x01a1, 0x01a0, 0x019e, 0x019d, 0x019c, 0x019a, 0x0199,
+ 0x0198, 0x0197, 0x0195, 0x0194, 0x0193, 0x0192, 0x0190, 0x018f, 0x018e, 0x018d, 0x018b, 0x018a, 0x0189, 0x0188, 0x0187, 0x0186,
+ 0x0184, 0x0183, 0x0182, 0x0181, 0x0180, 0x017f, 0x017e, 0x017d, 0x017b, 0x017a, 0x0179, 0x0178, 0x0177, 0x0176, 0x0175, 0x0174,
+ 0x0173, 0x0172, 0x0171, 0x0170, 0x016f, 0x016e, 0x016d, 0x016c, 0x016b, 0x016a, 0x0169, 0x0168, 0x0167, 0x0166, 0x0165, 0x0164,
+ 0x0163, 0x0162, 0x0161, 0x0160, 0x015f, 0x015e, 0x015d, 0x015c, 0x015b, 0x015a, 0x0159, 0x0158, 0x0158, 0x0157, 0x0156, 0x0155,
+ 0x0154, 0x0153, 0x0152, 0x0151, 0x0150, 0x0150, 0x014f, 0x014e, 0x014d, 0x014c, 0x014b, 0x014a, 0x014a, 0x0149, 0x0148, 0x0147,
+ 0x0146, 0x0146, 0x0145, 0x0144, 0x0143, 0x0142, 0x0142, 0x0141, 0x0140, 0x013f, 0x013e, 0x013e, 0x013d, 0x013c, 0x013b, 0x013b,
+ 0x013a, 0x0139, 0x0138, 0x0138, 0x0137, 0x0136, 0x0135, 0x0135, 0x0134, 0x0133, 0x0132, 0x0132, 0x0131, 0x0130, 0x0130, 0x012f,
+ 0x012e, 0x012e, 0x012d, 0x012c, 0x012b, 0x012b, 0x012a, 0x0129, 0x0129, 0x0128, 0x0127, 0x0127, 0x0126, 0x0125, 0x0125, 0x0124,
+ 0x0123, 0x0123, 0x0122, 0x0121, 0x0121, 0x0120, 0x0120, 0x011f, 0x011e, 0x011e, 0x011d, 0x011c, 0x011c, 0x011b, 0x011b, 0x011a,
+ 0x0119, 0x0119, 0x0118, 0x0118, 0x0117, 0x0116, 0x0116, 0x0115, 0x0115, 0x0114, 0x0113, 0x0113, 0x0112, 0x0112, 0x0111, 0x0111,
+ 0x0110, 0x010f, 0x010f, 0x010e, 0x010e, 0x010d, 0x010d, 0x010c, 0x010c, 0x010b, 0x010a, 0x010a, 0x0109, 0x0109, 0x0108, 0x0108,
+ 0x0107, 0x0107, 0x0106, 0x0106, 0x0105, 0x0105, 0x0104, 0x0104, 0x0103, 0x0103, 0x0102, 0x0102, 0x0101, 0x0101, 0x0100, 0x0100,
+ 0x00ff, 0x00ff, 0x00fe, 0x00fe, 0x00fd, 0x00fd, 0x00fc, 0x00fc, 0x00fb, 0x00fb, 0x00fa, 0x00fa, 0x00f9, 0x00f9, 0x00f8, 0x00f8,
+ 0x00f7, 0x00f7, 0x00f6, 0x00f6, 0x00f5, 0x00f5, 0x00f4, 0x00f4, 0x00f4, 0x00f3, 0x00f3, 0x00f2, 0x00f2, 0x00f1, 0x00f1, 0x00f0,
+ 0x00f0, 0x00f0, 0x00ef, 0x00ef, 0x00ee, 0x00ee, 0x00ed, 0x00ed, 0x00ed, 0x00ec, 0x00ec, 0x00eb, 0x00eb, 0x00ea, 0x00ea, 0x00ea,
+ 0x00e9, 0x00e9, 0x00e8, 0x00e8, 0x00e7, 0x00e7, 0x00e7, 0x00e6, 0x00e6, 0x00e5, 0x00e5, 0x00e5, 0x00e4, 0x00e4, 0x00e3, 0x00e3,
+ 0x00e3, 0x00e2, 0x00e2, 0x00e1, 0x00e1, 0x00e1, 0x00e0, 0x00e0, 0x00e0, 0x00df, 0x00df, 0x00de, 0x00de, 0x00de, 0x00dd, 0x00dd,
+ 0x00dd, 0x00dc, 0x00dc, 0x00db, 0x00db, 0x00db, 0x00da, 0x00da, 0x00da, 0x00d9, 0x00d9, 0x00d9, 0x00d8, 0x00d8, 0x00d7, 0x00d7,
+ 0x00d7, 0x00d6, 0x00d6, 0x00d6, 0x00d5, 0x00d5, 0x00d5, 0x00d4, 0x00d4, 0x00d4, 0x00d3, 0x00d3, 0x00d3, 0x00d2, 0x00d2, 0x00d2,
+ 0x00d1, 0x00d1, 0x00d1, 0x00d0, 0x00d0, 0x00d0, 0x00cf, 0x00cf, 0x00cf, 0x00ce, 0x00ce, 0x00ce, 0x00cd, 0x00cd, 0x00cd, 0x00cc,
+ 0x00cc, 0x00cc, 0x00cb, 0x00cb, 0x00cb, 0x00ca, 0x00ca, 0x00ca, 0x00c9, 0x00c9, 0x00c9, 0x00c9, 0x00c8, 0x00c8, 0x00c8, 0x00c7,
+ 0x00c7, 0x00c7, 0x00c6, 0x00c6, 0x00c6, 0x00c5, 0x00c5, 0x00c5, 0x00c5, 0x00c4, 0x00c4, 0x00c4, 0x00c3, 0x00c3, 0x00c3, 0x00c3,
+ 0x00c2, 0x00c2, 0x00c2, 0x00c1, 0x00c1, 0x00c1, 0x00c1, 0x00c0, 0x00c0, 0x00c0, 0x00bf, 0x00bf, 0x00bf, 0x00bf, 0x00be, 0x00be,
+ 0x00be, 0x00bd, 0x00bd, 0x00bd, 0x00bd, 0x00bc, 0x00bc, 0x00bc, 0x00bc, 0x00bb, 0x00bb, 0x00bb, 0x00ba, 0x00ba, 0x00ba, 0x00ba,
+ 0x00b9, 0x00b9, 0x00b9, 0x00b9, 0x00b8, 0x00b8, 0x00b8, 0x00b8, 0x00b7, 0x00b7, 0x00b7, 0x00b7, 0x00b6, 0x00b6, 0x00b6, 0x00b6,
+ 0x00b5, 0x00b5, 0x00b5, 0x00b5, 0x00b4, 0x00b4, 0x00b4, 0x00b4, 0x00b3, 0x00b3, 0x00b3, 0x00b3, 0x00b2, 0x00b2, 0x00b2, 0x00b2,
+ 0x00b1, 0x00b1, 0x00b1, 0x00b1, 0x00b0, 0x00b0, 0x00b0, 0x00b0, 0x00af, 0x00af, 0x00af, 0x00af, 0x00ae, 0x00ae, 0x00ae, 0x00ae,
+ 0x00ae, 0x00ad, 0x00ad, 0x00ad, 0x00ad, 0x00ac, 0x00ac, 0x00ac, 0x00ac, 0x00ac, 0x00ab, 0x00ab, 0x00ab, 0x00ab,
+};
+
+static const uint16_t DivTableAlpha[256] = {
+ 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xe38e, 0xcccc, 0xba2e, 0xaaaa, 0x9d89, 0x9249, 0x8888, 0x8000,
+ 0x7878, 0x71c7, 0x6bca, 0x6666, 0x6186, 0x5d17, 0x590b, 0x5555, 0x51eb, 0x4ec4, 0x4bda, 0x4924, 0x469e, 0x4444, 0x4210, 0x4000,
+ 0x3e0f, 0x3c3c, 0x3a83, 0x38e3, 0x3759, 0x35e5, 0x3483, 0x3333, 0x31f3, 0x30c3, 0x2fa0, 0x2e8b, 0x2d82, 0x2c85, 0x2b93, 0x2aaa,
+ 0x29cb, 0x28f5, 0x2828, 0x2762, 0x26a4, 0x25ed, 0x253c, 0x2492, 0x23ee, 0x234f, 0x22b6, 0x2222, 0x2192, 0x2108, 0x2082, 0x2000,
+ 0x1f81, 0x1f07, 0x1e91, 0x1e1e, 0x1dae, 0x1d41, 0x1cd8, 0x1c71, 0x1c0e, 0x1bac, 0x1b4e, 0x1af2, 0x1a98, 0x1a41, 0x19ec, 0x1999,
+ 0x1948, 0x18f9, 0x18ac, 0x1861, 0x1818, 0x17d0, 0x178a, 0x1745, 0x1702, 0x16c1, 0x1681, 0x1642, 0x1605, 0x15c9, 0x158e, 0x1555,
+ 0x151d, 0x14e5, 0x14af, 0x147a, 0x1446, 0x1414, 0x13e2, 0x13b1, 0x1381, 0x1352, 0x1323, 0x12f6, 0x12c9, 0x129e, 0x1273, 0x1249,
+ 0x121f, 0x11f7, 0x11cf, 0x11a7, 0x1181, 0x115b, 0x1135, 0x1111, 0x10ec, 0x10c9, 0x10a6, 0x1084, 0x1062, 0x1041, 0x1020, 0x1000,
+ 0x0fe0, 0x0fc0, 0x0fa2, 0x0f83, 0x0f66, 0x0f48, 0x0f2b, 0x0f0f, 0x0ef2, 0x0ed7, 0x0ebb, 0x0ea0, 0x0e86, 0x0e6c, 0x0e52, 0x0e38,
+ 0x0e1f, 0x0e07, 0x0dee, 0x0dd6, 0x0dbe, 0x0da7, 0x0d90, 0x0d79, 0x0d62, 0x0d4c, 0x0d36, 0x0d20, 0x0d0b, 0x0cf6, 0x0ce1, 0x0ccc,
+ 0x0cb8, 0x0ca4, 0x0c90, 0x0c7c, 0x0c69, 0x0c56, 0x0c43, 0x0c30, 0x0c1e, 0x0c0c, 0x0bfa, 0x0be8, 0x0bd6, 0x0bc5, 0x0bb3, 0x0ba2,
+ 0x0b92, 0x0b81, 0x0b70, 0x0b60, 0x0b50, 0x0b40, 0x0b30, 0x0b21, 0x0b11, 0x0b02, 0x0af3, 0x0ae4, 0x0ad6, 0x0ac7, 0x0ab8, 0x0aaa,
+ 0x0a9c, 0x0a8e, 0x0a80, 0x0a72, 0x0a65, 0x0a57, 0x0a4a, 0x0a3d, 0x0a30, 0x0a23, 0x0a16, 0x0a0a, 0x09fd, 0x09f1, 0x09e4, 0x09d8,
+ 0x09cc, 0x09c0, 0x09b4, 0x09a9, 0x099d, 0x0991, 0x0986, 0x097b, 0x0970, 0x0964, 0x095a, 0x094f, 0x0944, 0x0939, 0x092f, 0x0924,
+ 0x091a, 0x090f, 0x0905, 0x08fb, 0x08f1, 0x08e7, 0x08dd, 0x08d3, 0x08ca, 0x08c0, 0x08b7, 0x08ad, 0x08a4, 0x089a, 0x0891, 0x0888,
+ 0x087f, 0x0876, 0x086d, 0x0864, 0x085b, 0x0853, 0x084a, 0x0842, 0x0839, 0x0831, 0x0828, 0x0820, 0x0818, 0x0810, 0x0808, 0x0800,
+};
+
+static etcpak_force_inline uint64_t ProcessRGB( const uint8_t* src )
+{
+#ifdef __SSE4_1__
+ __m128i px0 = _mm_loadu_si128(((__m128i*)src) + 0);
+ __m128i px1 = _mm_loadu_si128(((__m128i*)src) + 1);
+ __m128i px2 = _mm_loadu_si128(((__m128i*)src) + 2);
+ __m128i px3 = _mm_loadu_si128(((__m128i*)src) + 3);
+
+ __m128i smask = _mm_set1_epi32( 0xF8FCF8 );
+ __m128i sd0 = _mm_and_si128( px0, smask );
+ __m128i sd1 = _mm_and_si128( px1, smask );
+ __m128i sd2 = _mm_and_si128( px2, smask );
+ __m128i sd3 = _mm_and_si128( px3, smask );
+
+ __m128i sc = _mm_shuffle_epi32(sd0, _MM_SHUFFLE(0, 0, 0, 0));
+
+ __m128i sc0 = _mm_cmpeq_epi8(sd0, sc);
+ __m128i sc1 = _mm_cmpeq_epi8(sd1, sc);
+ __m128i sc2 = _mm_cmpeq_epi8(sd2, sc);
+ __m128i sc3 = _mm_cmpeq_epi8(sd3, sc);
+
+ __m128i sm0 = _mm_and_si128(sc0, sc1);
+ __m128i sm1 = _mm_and_si128(sc2, sc3);
+ __m128i sm = _mm_and_si128(sm0, sm1);
+
+ if( _mm_testc_si128(sm, _mm_set1_epi32(-1)) )
+ {
+ uint32_t c;
+ memcpy( &c, src, 4 );
+ return uint64_t( to565( c ) ) << 16;
+ }
+
+ __m128i min0 = _mm_min_epu8( px0, px1 );
+ __m128i min1 = _mm_min_epu8( px2, px3 );
+ __m128i min2 = _mm_min_epu8( min0, min1 );
+
+ __m128i max0 = _mm_max_epu8( px0, px1 );
+ __m128i max1 = _mm_max_epu8( px2, px3 );
+ __m128i max2 = _mm_max_epu8( max0, max1 );
+
+ __m128i min3 = _mm_shuffle_epi32( min2, _MM_SHUFFLE( 2, 3, 0, 1 ) );
+ __m128i max3 = _mm_shuffle_epi32( max2, _MM_SHUFFLE( 2, 3, 0, 1 ) );
+ __m128i min4 = _mm_min_epu8( min2, min3 );
+ __m128i max4 = _mm_max_epu8( max2, max3 );
+
+ __m128i min5 = _mm_shuffle_epi32( min4, _MM_SHUFFLE( 0, 0, 2, 2 ) );
+ __m128i max5 = _mm_shuffle_epi32( max4, _MM_SHUFFLE( 0, 0, 2, 2 ) );
+ __m128i rmin = _mm_min_epu8( min4, min5 );
+ __m128i rmax = _mm_max_epu8( max4, max5 );
+
+ __m128i range1 = _mm_subs_epu8( rmax, rmin );
+ __m128i range2 = _mm_sad_epu8( rmax, rmin );
+
+ uint32_t vrange = _mm_cvtsi128_si32( range2 ) >> 1;
+ __m128i range = _mm_set1_epi16( DivTable[vrange] );
+
+ __m128i inset1 = _mm_srli_epi16( range1, 4 );
+ __m128i inset = _mm_and_si128( inset1, _mm_set1_epi8( 0xF ) );
+ __m128i min = _mm_adds_epu8( rmin, inset );
+ __m128i max = _mm_subs_epu8( rmax, inset );
+
+ __m128i c0 = _mm_subs_epu8( px0, rmin );
+ __m128i c1 = _mm_subs_epu8( px1, rmin );
+ __m128i c2 = _mm_subs_epu8( px2, rmin );
+ __m128i c3 = _mm_subs_epu8( px3, rmin );
+
+ __m128i is0 = _mm_maddubs_epi16( c0, _mm_set1_epi8( 1 ) );
+ __m128i is1 = _mm_maddubs_epi16( c1, _mm_set1_epi8( 1 ) );
+ __m128i is2 = _mm_maddubs_epi16( c2, _mm_set1_epi8( 1 ) );
+ __m128i is3 = _mm_maddubs_epi16( c3, _mm_set1_epi8( 1 ) );
+
+ __m128i s0 = _mm_hadd_epi16( is0, is1 );
+ __m128i s1 = _mm_hadd_epi16( is2, is3 );
+
+ __m128i m0 = _mm_mulhi_epu16( s0, range );
+ __m128i m1 = _mm_mulhi_epu16( s1, range );
+
+ __m128i p0 = _mm_packus_epi16( m0, m1 );
+
+ __m128i p1 = _mm_or_si128( _mm_srai_epi32( p0, 6 ), _mm_srai_epi32( p0, 12 ) );
+ __m128i p2 = _mm_or_si128( _mm_srai_epi32( p0, 18 ), p0 );
+ __m128i p3 = _mm_or_si128( p1, p2 );
+ __m128i p =_mm_shuffle_epi8( p3, _mm_set1_epi32( 0x0C080400 ) );
+
+ uint32_t vmin = _mm_cvtsi128_si32( min );
+ uint32_t vmax = _mm_cvtsi128_si32( max );
+ uint32_t vp = _mm_cvtsi128_si32( p );
+
+ return uint64_t( ( uint64_t( to565( vmin ) ) << 16 ) | to565( vmax ) | ( uint64_t( vp ) << 32 ) );
+#elif defined __ARM_NEON
+# ifdef __aarch64__
+ uint8x16x4_t px = vld4q_u8( src );
+
+ uint8x16_t lr = px.val[0];
+ uint8x16_t lg = px.val[1];
+ uint8x16_t lb = px.val[2];
+
+ uint8_t rmaxr = vmaxvq_u8( lr );
+ uint8_t rmaxg = vmaxvq_u8( lg );
+ uint8_t rmaxb = vmaxvq_u8( lb );
+
+ uint8_t rminr = vminvq_u8( lr );
+ uint8_t rming = vminvq_u8( lg );
+ uint8_t rminb = vminvq_u8( lb );
+
+ int rr = rmaxr - rminr;
+ int rg = rmaxg - rming;
+ int rb = rmaxb - rminb;
+
+ int vrange1 = rr + rg + rb;
+ uint16_t vrange2 = DivTableNEON[vrange1];
+
+ uint8_t insetr = rr >> 4;
+ uint8_t insetg = rg >> 4;
+ uint8_t insetb = rb >> 4;
+
+ uint8_t minr = rminr + insetr;
+ uint8_t ming = rming + insetg;
+ uint8_t minb = rminb + insetb;
+
+ uint8_t maxr = rmaxr - insetr;
+ uint8_t maxg = rmaxg - insetg;
+ uint8_t maxb = rmaxb - insetb;
+
+ uint8x16_t cr = vsubq_u8( lr, vdupq_n_u8( rminr ) );
+ uint8x16_t cg = vsubq_u8( lg, vdupq_n_u8( rming ) );
+ uint8x16_t cb = vsubq_u8( lb, vdupq_n_u8( rminb ) );
+
+ uint16x8_t is0l = vaddl_u8( vget_low_u8( cr ), vget_low_u8( cg ) );
+ uint16x8_t is0h = vaddl_u8( vget_high_u8( cr ), vget_high_u8( cg ) );
+ uint16x8_t is1l = vaddw_u8( is0l, vget_low_u8( cb ) );
+ uint16x8_t is1h = vaddw_u8( is0h, vget_high_u8( cb ) );
+
+ int16x8_t range = vdupq_n_s16( vrange2 );
+ uint16x8_t m0 = vreinterpretq_u16_s16( vqdmulhq_s16( vreinterpretq_s16_u16( is1l ), range ) );
+ uint16x8_t m1 = vreinterpretq_u16_s16( vqdmulhq_s16( vreinterpretq_s16_u16( is1h ), range ) );
+
+ uint8x8_t p00 = vmovn_u16( m0 );
+ uint8x8_t p01 = vmovn_u16( m1 );
+ uint8x16_t p0 = vcombine_u8( p00, p01 );
+
+ uint32x4_t p1 = vaddq_u32( vshrq_n_u32( vreinterpretq_u32_u8( p0 ), 6 ), vshrq_n_u32( vreinterpretq_u32_u8( p0 ), 12 ) );
+ uint32x4_t p2 = vaddq_u32( vshrq_n_u32( vreinterpretq_u32_u8( p0 ), 18 ), vreinterpretq_u32_u8( p0 ) );
+ uint32x4_t p3 = vaddq_u32( p1, p2 );
+
+ uint16x4x2_t p4 = vuzp_u16( vget_low_u16( vreinterpretq_u16_u32( p3 ) ), vget_high_u16( vreinterpretq_u16_u32( p3 ) ) );
+ uint8x8x2_t p = vuzp_u8( vreinterpret_u8_u16( p4.val[0] ), vreinterpret_u8_u16( p4.val[0] ) );
+
+ uint32_t vp;
+ vst1_lane_u32( &vp, vreinterpret_u32_u8( p.val[0] ), 0 );
+
+ return uint64_t( ( uint64_t( to565( minr, ming, minb ) ) << 16 ) | to565( maxr, maxg, maxb ) | ( uint64_t( vp ) << 32 ) );
+# else
+ uint32x4_t px0 = vld1q_u32( (uint32_t*)src );
+ uint32x4_t px1 = vld1q_u32( (uint32_t*)src + 4 );
+ uint32x4_t px2 = vld1q_u32( (uint32_t*)src + 8 );
+ uint32x4_t px3 = vld1q_u32( (uint32_t*)src + 12 );
+
+ uint32x4_t smask = vdupq_n_u32( 0xF8FCF8 );
+ uint32x4_t sd0 = vandq_u32( smask, px0 );
+ uint32x4_t sd1 = vandq_u32( smask, px1 );
+ uint32x4_t sd2 = vandq_u32( smask, px2 );
+ uint32x4_t sd3 = vandq_u32( smask, px3 );
+
+ uint32x4_t sc = vdupq_n_u32( sd0[0] );
+
+ uint32x4_t sc0 = vceqq_u32( sd0, sc );
+ uint32x4_t sc1 = vceqq_u32( sd1, sc );
+ uint32x4_t sc2 = vceqq_u32( sd2, sc );
+ uint32x4_t sc3 = vceqq_u32( sd3, sc );
+
+ uint32x4_t sm0 = vandq_u32( sc0, sc1 );
+ uint32x4_t sm1 = vandq_u32( sc2, sc3 );
+ int64x2_t sm = vreinterpretq_s64_u32( vandq_u32( sm0, sm1 ) );
+
+ if( sm[0] == -1 && sm[1] == -1 )
+ {
+ return uint64_t( to565( src[0], src[1], src[2] ) ) << 16;
+ }
+
+ uint32x4_t mask = vdupq_n_u32( 0xFFFFFF );
+ uint8x16_t l0 = vreinterpretq_u8_u32( vandq_u32( mask, px0 ) );
+ uint8x16_t l1 = vreinterpretq_u8_u32( vandq_u32( mask, px1 ) );
+ uint8x16_t l2 = vreinterpretq_u8_u32( vandq_u32( mask, px2 ) );
+ uint8x16_t l3 = vreinterpretq_u8_u32( vandq_u32( mask, px3 ) );
+
+ uint8x16_t min0 = vminq_u8( l0, l1 );
+ uint8x16_t min1 = vminq_u8( l2, l3 );
+ uint8x16_t min2 = vminq_u8( min0, min1 );
+
+ uint8x16_t max0 = vmaxq_u8( l0, l1 );
+ uint8x16_t max1 = vmaxq_u8( l2, l3 );
+ uint8x16_t max2 = vmaxq_u8( max0, max1 );
+
+ uint8x16_t min3 = vreinterpretq_u8_u32( vrev64q_u32( vreinterpretq_u32_u8( min2 ) ) );
+ uint8x16_t max3 = vreinterpretq_u8_u32( vrev64q_u32( vreinterpretq_u32_u8( max2 ) ) );
+
+ uint8x16_t min4 = vminq_u8( min2, min3 );
+ uint8x16_t max4 = vmaxq_u8( max2, max3 );
+
+ uint8x16_t min5 = vcombine_u8( vget_high_u8( min4 ), vget_low_u8( min4 ) );
+ uint8x16_t max5 = vcombine_u8( vget_high_u8( max4 ), vget_low_u8( max4 ) );
+
+ uint8x16_t rmin = vminq_u8( min4, min5 );
+ uint8x16_t rmax = vmaxq_u8( max4, max5 );
+
+ uint8x16_t range1 = vsubq_u8( rmax, rmin );
+ uint8x8_t range2 = vget_low_u8( range1 );
+ uint8x8x2_t range3 = vzip_u8( range2, vdup_n_u8( 0 ) );
+ uint16x4_t range4 = vreinterpret_u16_u8( range3.val[0] );
+
+ uint16_t vrange1;
+ uint16x4_t range5 = vpadd_u16( range4, range4 );
+ uint16x4_t range6 = vpadd_u16( range5, range5 );
+ vst1_lane_u16( &vrange1, range6, 0 );
+
+ uint32_t vrange2 = ( 2 << 16 ) / uint32_t( vrange1 + 1 );
+ uint16x8_t range = vdupq_n_u16( vrange2 );
+
+ uint8x16_t inset = vshrq_n_u8( range1, 4 );
+ uint8x16_t min = vaddq_u8( rmin, inset );
+ uint8x16_t max = vsubq_u8( rmax, inset );
+
+ uint8x16_t c0 = vsubq_u8( l0, rmin );
+ uint8x16_t c1 = vsubq_u8( l1, rmin );
+ uint8x16_t c2 = vsubq_u8( l2, rmin );
+ uint8x16_t c3 = vsubq_u8( l3, rmin );
+
+ uint16x8_t is0 = vpaddlq_u8( c0 );
+ uint16x8_t is1 = vpaddlq_u8( c1 );
+ uint16x8_t is2 = vpaddlq_u8( c2 );
+ uint16x8_t is3 = vpaddlq_u8( c3 );
+
+ uint16x4_t is4 = vpadd_u16( vget_low_u16( is0 ), vget_high_u16( is0 ) );
+ uint16x4_t is5 = vpadd_u16( vget_low_u16( is1 ), vget_high_u16( is1 ) );
+ uint16x4_t is6 = vpadd_u16( vget_low_u16( is2 ), vget_high_u16( is2 ) );
+ uint16x4_t is7 = vpadd_u16( vget_low_u16( is3 ), vget_high_u16( is3 ) );
+
+ uint16x8_t s0 = vcombine_u16( is4, is5 );
+ uint16x8_t s1 = vcombine_u16( is6, is7 );
+
+ uint16x8_t m0 = vreinterpretq_u16_s16( vqdmulhq_s16( vreinterpretq_s16_u16( s0 ), vreinterpretq_s16_u16( range ) ) );
+ uint16x8_t m1 = vreinterpretq_u16_s16( vqdmulhq_s16( vreinterpretq_s16_u16( s1 ), vreinterpretq_s16_u16( range ) ) );
+
+ uint8x8_t p00 = vmovn_u16( m0 );
+ uint8x8_t p01 = vmovn_u16( m1 );
+ uint8x16_t p0 = vcombine_u8( p00, p01 );
+
+ uint32x4_t p1 = vaddq_u32( vshrq_n_u32( vreinterpretq_u32_u8( p0 ), 6 ), vshrq_n_u32( vreinterpretq_u32_u8( p0 ), 12 ) );
+ uint32x4_t p2 = vaddq_u32( vshrq_n_u32( vreinterpretq_u32_u8( p0 ), 18 ), vreinterpretq_u32_u8( p0 ) );
+ uint32x4_t p3 = vaddq_u32( p1, p2 );
+
+ uint16x4x2_t p4 = vuzp_u16( vget_low_u16( vreinterpretq_u16_u32( p3 ) ), vget_high_u16( vreinterpretq_u16_u32( p3 ) ) );
+ uint8x8x2_t p = vuzp_u8( vreinterpret_u8_u16( p4.val[0] ), vreinterpret_u8_u16( p4.val[0] ) );
+
+ uint32_t vmin, vmax, vp;
+ vst1q_lane_u32( &vmin, vreinterpretq_u32_u8( min ), 0 );
+ vst1q_lane_u32( &vmax, vreinterpretq_u32_u8( max ), 0 );
+ vst1_lane_u32( &vp, vreinterpret_u32_u8( p.val[0] ), 0 );
+
+ return uint64_t( ( uint64_t( to565( vmin ) ) << 16 ) | to565( vmax ) | ( uint64_t( vp ) << 32 ) );
+# endif
+#else
+ uint32_t ref;
+ memcpy( &ref, src, 4 );
+ uint32_t refMask = ref & 0xF8FCF8;
+ auto stmp = src + 4;
+ for( int i=1; i<16; i++ )
+ {
+ uint32_t px;
+ memcpy( &px, stmp, 4 );
+ if( ( px & 0xF8FCF8 ) != refMask ) break;
+ stmp += 4;
+ }
+ if( stmp == src + 64 )
+ {
+ return uint64_t( to565( ref ) ) << 16;
+ }
+
+ uint8_t min[3] = { src[0], src[1], src[2] };
+ uint8_t max[3] = { src[0], src[1], src[2] };
+ auto tmp = src + 4;
+ for( int i=1; i<16; i++ )
+ {
+ for( int j=0; j<3; j++ )
+ {
+ if( tmp[j] < min[j] ) min[j] = tmp[j];
+ else if( tmp[j] > max[j] ) max[j] = tmp[j];
+ }
+ tmp += 4;
+ }
+
+ const uint32_t range = DivTable[max[0] - min[0] + max[1] - min[1] + max[2] - min[2]];
+ const uint32_t rmin = min[0] + min[1] + min[2];
+ for( int i=0; i<3; i++ )
+ {
+ const uint8_t inset = ( max[i] - min[i] ) >> 4;
+ min[i] += inset;
+ max[i] -= inset;
+ }
+
+ uint32_t data = 0;
+ for( int i=0; i<16; i++ )
+ {
+ const uint32_t c = src[0] + src[1] + src[2] - rmin;
+ const uint8_t idx = ( c * range ) >> 16;
+ data |= idx << (i*2);
+ src += 4;
+ }
+
+ return uint64_t( ( uint64_t( to565( min[0], min[1], min[2] ) ) << 16 ) | to565( max[0], max[1], max[2] ) | ( uint64_t( data ) << 32 ) );
+#endif
+}
+
+#ifdef __AVX2__
+static etcpak_force_inline void ProcessRGB_AVX( const uint8_t* src, char*& dst )
+{
+ __m256i px0 = _mm256_loadu_si256(((__m256i*)src) + 0);
+ __m256i px1 = _mm256_loadu_si256(((__m256i*)src) + 1);
+ __m256i px2 = _mm256_loadu_si256(((__m256i*)src) + 2);
+ __m256i px3 = _mm256_loadu_si256(((__m256i*)src) + 3);
+
+ __m256i smask = _mm256_set1_epi32( 0xF8FCF8 );
+ __m256i sd0 = _mm256_and_si256( px0, smask );
+ __m256i sd1 = _mm256_and_si256( px1, smask );
+ __m256i sd2 = _mm256_and_si256( px2, smask );
+ __m256i sd3 = _mm256_and_si256( px3, smask );
+
+ __m256i sc = _mm256_shuffle_epi32(sd0, _MM_SHUFFLE(0, 0, 0, 0));
+
+ __m256i sc0 = _mm256_cmpeq_epi8(sd0, sc);
+ __m256i sc1 = _mm256_cmpeq_epi8(sd1, sc);
+ __m256i sc2 = _mm256_cmpeq_epi8(sd2, sc);
+ __m256i sc3 = _mm256_cmpeq_epi8(sd3, sc);
+
+ __m256i sm0 = _mm256_and_si256(sc0, sc1);
+ __m256i sm1 = _mm256_and_si256(sc2, sc3);
+ __m256i sm = _mm256_and_si256(sm0, sm1);
+
+ const int64_t solid0 = 1 - _mm_testc_si128( _mm256_castsi256_si128( sm ), _mm_set1_epi32( -1 ) );
+ const int64_t solid1 = 1 - _mm_testc_si128( _mm256_extracti128_si256( sm, 1 ), _mm_set1_epi32( -1 ) );
+
+ if( solid0 + solid1 == 0 )
+ {
+ const auto c0 = uint64_t( to565( src[0], src[1], src[2] ) );
+ const auto c1 = uint64_t( to565( src[16], src[17], src[18] ) );
+ memcpy( dst, &c0, 8 );
+ memcpy( dst+8, &c1, 8 );
+ dst += 16;
+ return;
+ }
+
+ __m256i min0 = _mm256_min_epu8( px0, px1 );
+ __m256i min1 = _mm256_min_epu8( px2, px3 );
+ __m256i min2 = _mm256_min_epu8( min0, min1 );
+
+ __m256i max0 = _mm256_max_epu8( px0, px1 );
+ __m256i max1 = _mm256_max_epu8( px2, px3 );
+ __m256i max2 = _mm256_max_epu8( max0, max1 );
+
+ __m256i min3 = _mm256_shuffle_epi32( min2, _MM_SHUFFLE( 2, 3, 0, 1 ) );
+ __m256i max3 = _mm256_shuffle_epi32( max2, _MM_SHUFFLE( 2, 3, 0, 1 ) );
+ __m256i min4 = _mm256_min_epu8( min2, min3 );
+ __m256i max4 = _mm256_max_epu8( max2, max3 );
+
+ __m256i min5 = _mm256_shuffle_epi32( min4, _MM_SHUFFLE( 0, 0, 2, 2 ) );
+ __m256i max5 = _mm256_shuffle_epi32( max4, _MM_SHUFFLE( 0, 0, 2, 2 ) );
+ __m256i rmin = _mm256_min_epu8( min4, min5 );
+ __m256i rmax = _mm256_max_epu8( max4, max5 );
+
+ __m256i range1 = _mm256_subs_epu8( rmax, rmin );
+ __m256i range2 = _mm256_sad_epu8( rmax, rmin );
+
+ uint16_t vrange0 = DivTable[_mm256_cvtsi256_si32( range2 ) >> 1];
+ uint16_t vrange1 = DivTable[_mm256_extract_epi16( range2, 8 ) >> 1];
+ __m256i range00 = _mm256_set1_epi16( vrange0 );
+ __m256i range = _mm256_inserti128_si256( range00, _mm_set1_epi16( vrange1 ), 1 );
+
+ __m256i inset1 = _mm256_srli_epi16( range1, 4 );
+ __m256i inset = _mm256_and_si256( inset1, _mm256_set1_epi8( 0xF ) );
+ __m256i min = _mm256_adds_epu8( rmin, inset );
+ __m256i max = _mm256_subs_epu8( rmax, inset );
+
+ __m256i c0 = _mm256_subs_epu8( px0, rmin );
+ __m256i c1 = _mm256_subs_epu8( px1, rmin );
+ __m256i c2 = _mm256_subs_epu8( px2, rmin );
+ __m256i c3 = _mm256_subs_epu8( px3, rmin );
+
+ __m256i is0 = _mm256_maddubs_epi16( c0, _mm256_set1_epi8( 1 ) );
+ __m256i is1 = _mm256_maddubs_epi16( c1, _mm256_set1_epi8( 1 ) );
+ __m256i is2 = _mm256_maddubs_epi16( c2, _mm256_set1_epi8( 1 ) );
+ __m256i is3 = _mm256_maddubs_epi16( c3, _mm256_set1_epi8( 1 ) );
+
+ __m256i s0 = _mm256_hadd_epi16( is0, is1 );
+ __m256i s1 = _mm256_hadd_epi16( is2, is3 );
+
+ __m256i m0 = _mm256_mulhi_epu16( s0, range );
+ __m256i m1 = _mm256_mulhi_epu16( s1, range );
+
+ __m256i p0 = _mm256_packus_epi16( m0, m1 );
+
+ __m256i p1 = _mm256_or_si256( _mm256_srai_epi32( p0, 6 ), _mm256_srai_epi32( p0, 12 ) );
+ __m256i p2 = _mm256_or_si256( _mm256_srai_epi32( p0, 18 ), p0 );
+ __m256i p3 = _mm256_or_si256( p1, p2 );
+ __m256i p =_mm256_shuffle_epi8( p3, _mm256_set1_epi32( 0x0C080400 ) );
+
+ __m256i mm0 = _mm256_unpacklo_epi8( _mm256_setzero_si256(), min );
+ __m256i mm1 = _mm256_unpacklo_epi8( _mm256_setzero_si256(), max );
+ __m256i mm2 = _mm256_unpacklo_epi64( mm1, mm0 );
+ __m256i mmr = _mm256_slli_epi64( _mm256_srli_epi64( mm2, 11 ), 11 );
+ __m256i mmg = _mm256_slli_epi64( _mm256_srli_epi64( mm2, 26 ), 5 );
+ __m256i mmb = _mm256_srli_epi64( _mm256_slli_epi64( mm2, 16 ), 59 );
+ __m256i mm3 = _mm256_or_si256( mmr, mmg );
+ __m256i mm4 = _mm256_or_si256( mm3, mmb );
+ __m256i mm5 = _mm256_shuffle_epi8( mm4, _mm256_set1_epi32( 0x09080100 ) );
+
+ __m256i d0 = _mm256_unpacklo_epi32( mm5, p );
+ __m256i d1 = _mm256_permute4x64_epi64( d0, _MM_SHUFFLE( 3, 2, 2, 0 ) );
+ __m128i d2 = _mm256_castsi256_si128( d1 );
+
+ __m128i mask = _mm_set_epi64x( 0xFFFF0000 | -solid1, 0xFFFF0000 | -solid0 );
+ __m128i d3 = _mm_and_si128( d2, mask );
+ _mm_storeu_si128( (__m128i*)dst, d3 );
+
+ for( int j=4; j<8; j++ ) dst[j] = (char)DxtcIndexTable[(uint8_t)dst[j]];
+ for( int j=12; j<16; j++ ) dst[j] = (char)DxtcIndexTable[(uint8_t)dst[j]];
+
+ dst += 16;
+}
+#endif
+
+static const uint8_t AlphaIndexTable[8] = { 1, 7, 6, 5, 4, 3, 2, 0 };
+
+static etcpak_force_inline uint64_t ProcessAlpha( const uint8_t* src )
+{
+ uint8_t solid8 = *src;
+ uint16_t solid16 = uint16_t( solid8 ) | ( uint16_t( solid8 ) << 8 );
+ uint32_t solid32 = uint32_t( solid16 ) | ( uint32_t( solid16 ) << 16 );
+ uint64_t solid64 = uint64_t( solid32 ) | ( uint64_t( solid32 ) << 32 );
+ if( memcmp( src, &solid64, 8 ) == 0 && memcmp( src+8, &solid64, 8 ) == 0 )
+ {
+ return solid8;
+ }
+
+ uint8_t min = src[0];
+ uint8_t max = min;
+ for( int i=1; i<16; i++ )
+ {
+ const auto v = src[i];
+ if( v > max ) max = v;
+ else if( v < min ) min = v;
+ }
+
+ uint32_t range = ( 8 << 13 ) / ( 1 + max - min );
+ uint64_t data = 0;
+ for( int i=0; i<16; i++ )
+ {
+ uint8_t a = src[i] - min;
+ uint64_t idx = AlphaIndexTable[( a * range ) >> 13];
+ data |= idx << (i*3);
+ }
+
+ return max | ( min << 8 ) | ( data << 16 );
+}
+
+#ifdef __SSE4_1__
+static etcpak_force_inline uint64_t ProcessRGB_SSE( __m128i px0, __m128i px1, __m128i px2, __m128i px3 )
+{
+ __m128i smask = _mm_set1_epi32( 0xF8FCF8 );
+ __m128i sd0 = _mm_and_si128( px0, smask );
+ __m128i sd1 = _mm_and_si128( px1, smask );
+ __m128i sd2 = _mm_and_si128( px2, smask );
+ __m128i sd3 = _mm_and_si128( px3, smask );
+
+ __m128i sc = _mm_shuffle_epi32(sd0, _MM_SHUFFLE(0, 0, 0, 0));
+
+ __m128i sc0 = _mm_cmpeq_epi8(sd0, sc);
+ __m128i sc1 = _mm_cmpeq_epi8(sd1, sc);
+ __m128i sc2 = _mm_cmpeq_epi8(sd2, sc);
+ __m128i sc3 = _mm_cmpeq_epi8(sd3, sc);
+
+ __m128i sm0 = _mm_and_si128(sc0, sc1);
+ __m128i sm1 = _mm_and_si128(sc2, sc3);
+ __m128i sm = _mm_and_si128(sm0, sm1);
+
+ if( _mm_testc_si128(sm, _mm_set1_epi32(-1)) )
+ {
+ return uint64_t( to565( _mm_cvtsi128_si32( px0 ) ) ) << 16;
+ }
+
+ px0 = _mm_and_si128( px0, _mm_set1_epi32( 0xFFFFFF ) );
+ px1 = _mm_and_si128( px1, _mm_set1_epi32( 0xFFFFFF ) );
+ px2 = _mm_and_si128( px2, _mm_set1_epi32( 0xFFFFFF ) );
+ px3 = _mm_and_si128( px3, _mm_set1_epi32( 0xFFFFFF ) );
+
+ __m128i min0 = _mm_min_epu8( px0, px1 );
+ __m128i min1 = _mm_min_epu8( px2, px3 );
+ __m128i min2 = _mm_min_epu8( min0, min1 );
+
+ __m128i max0 = _mm_max_epu8( px0, px1 );
+ __m128i max1 = _mm_max_epu8( px2, px3 );
+ __m128i max2 = _mm_max_epu8( max0, max1 );
+
+ __m128i min3 = _mm_shuffle_epi32( min2, _MM_SHUFFLE( 2, 3, 0, 1 ) );
+ __m128i max3 = _mm_shuffle_epi32( max2, _MM_SHUFFLE( 2, 3, 0, 1 ) );
+ __m128i min4 = _mm_min_epu8( min2, min3 );
+ __m128i max4 = _mm_max_epu8( max2, max3 );
+
+ __m128i min5 = _mm_shuffle_epi32( min4, _MM_SHUFFLE( 0, 0, 2, 2 ) );
+ __m128i max5 = _mm_shuffle_epi32( max4, _MM_SHUFFLE( 0, 0, 2, 2 ) );
+ __m128i rmin = _mm_min_epu8( min4, min5 );
+ __m128i rmax = _mm_max_epu8( max4, max5 );
+
+ __m128i range1 = _mm_subs_epu8( rmax, rmin );
+ __m128i range2 = _mm_sad_epu8( rmax, rmin );
+
+ uint32_t vrange = _mm_cvtsi128_si32( range2 ) >> 1;
+ __m128i range = _mm_set1_epi16( DivTable[vrange] );
+
+ __m128i inset1 = _mm_srli_epi16( range1, 4 );
+ __m128i inset = _mm_and_si128( inset1, _mm_set1_epi8( 0xF ) );
+ __m128i min = _mm_adds_epu8( rmin, inset );
+ __m128i max = _mm_subs_epu8( rmax, inset );
+
+ __m128i c0 = _mm_subs_epu8( px0, rmin );
+ __m128i c1 = _mm_subs_epu8( px1, rmin );
+ __m128i c2 = _mm_subs_epu8( px2, rmin );
+ __m128i c3 = _mm_subs_epu8( px3, rmin );
+
+ __m128i is0 = _mm_maddubs_epi16( c0, _mm_set1_epi8( 1 ) );
+ __m128i is1 = _mm_maddubs_epi16( c1, _mm_set1_epi8( 1 ) );
+ __m128i is2 = _mm_maddubs_epi16( c2, _mm_set1_epi8( 1 ) );
+ __m128i is3 = _mm_maddubs_epi16( c3, _mm_set1_epi8( 1 ) );
+
+ __m128i s0 = _mm_hadd_epi16( is0, is1 );
+ __m128i s1 = _mm_hadd_epi16( is2, is3 );
+
+ __m128i m0 = _mm_mulhi_epu16( s0, range );
+ __m128i m1 = _mm_mulhi_epu16( s1, range );
+
+ __m128i p0 = _mm_packus_epi16( m0, m1 );
+
+ __m128i p1 = _mm_or_si128( _mm_srai_epi32( p0, 6 ), _mm_srai_epi32( p0, 12 ) );
+ __m128i p2 = _mm_or_si128( _mm_srai_epi32( p0, 18 ), p0 );
+ __m128i p3 = _mm_or_si128( p1, p2 );
+ __m128i p =_mm_shuffle_epi8( p3, _mm_set1_epi32( 0x0C080400 ) );
+
+ uint32_t vmin = _mm_cvtsi128_si32( min );
+ uint32_t vmax = _mm_cvtsi128_si32( max );
+ uint32_t vp = _mm_cvtsi128_si32( p );
+
+ return uint64_t( ( uint64_t( to565( vmin ) ) << 16 ) | to565( vmax ) | ( uint64_t( vp ) << 32 ) );
+}
+
+static etcpak_force_inline uint64_t ProcessAlpha_SSE( __m128i px0, __m128i px1, __m128i px2, __m128i px3 )
+{
+ __m128i mask = _mm_setr_epi32( 0x0f0b0703, -1, -1, -1 );
+
+ __m128i m0 = _mm_shuffle_epi8( px0, mask );
+ __m128i m1 = _mm_shuffle_epi8( px1, _mm_shuffle_epi32( mask, _MM_SHUFFLE( 3, 3, 0, 3 ) ) );
+ __m128i m2 = _mm_shuffle_epi8( px2, _mm_shuffle_epi32( mask, _MM_SHUFFLE( 3, 0, 3, 3 ) ) );
+ __m128i m3 = _mm_shuffle_epi8( px3, _mm_shuffle_epi32( mask, _MM_SHUFFLE( 0, 3, 3, 3 ) ) );
+ __m128i m4 = _mm_or_si128( m0, m1 );
+ __m128i m5 = _mm_or_si128( m2, m3 );
+ __m128i a = _mm_or_si128( m4, m5 );
+
+ __m128i solidCmp = _mm_shuffle_epi8( a, _mm_setzero_si128() );
+ __m128i cmpRes = _mm_cmpeq_epi8( a, solidCmp );
+ if( _mm_testc_si128( cmpRes, _mm_set1_epi32( -1 ) ) )
+ {
+ return _mm_cvtsi128_si32( a ) & 0xFF;
+ }
+
+ __m128i a1 = _mm_shuffle_epi32( a, _MM_SHUFFLE( 2, 3, 0, 1 ) );
+ __m128i max1 = _mm_max_epu8( a, a1 );
+ __m128i min1 = _mm_min_epu8( a, a1 );
+ __m128i amax2 = _mm_shuffle_epi32( max1, _MM_SHUFFLE( 0, 0, 2, 2 ) );
+ __m128i amin2 = _mm_shuffle_epi32( min1, _MM_SHUFFLE( 0, 0, 2, 2 ) );
+ __m128i max2 = _mm_max_epu8( max1, amax2 );
+ __m128i min2 = _mm_min_epu8( min1, amin2 );
+ __m128i amax3 = _mm_alignr_epi8( max2, max2, 2 );
+ __m128i amin3 = _mm_alignr_epi8( min2, min2, 2 );
+ __m128i max3 = _mm_max_epu8( max2, amax3 );
+ __m128i min3 = _mm_min_epu8( min2, amin3 );
+ __m128i amax4 = _mm_alignr_epi8( max3, max3, 1 );
+ __m128i amin4 = _mm_alignr_epi8( min3, min3, 1 );
+ __m128i max = _mm_max_epu8( max3, amax4 );
+ __m128i min = _mm_min_epu8( min3, amin4 );
+ __m128i minmax = _mm_unpacklo_epi8( max, min );
+
+ __m128i r = _mm_sub_epi8( max, min );
+ int range = _mm_cvtsi128_si32( r ) & 0xFF;
+ __m128i rv = _mm_set1_epi16( DivTableAlpha[range] );
+
+ __m128i v = _mm_sub_epi8( a, min );
+
+ __m128i lo16 = _mm_unpacklo_epi8( v, _mm_setzero_si128() );
+ __m128i hi16 = _mm_unpackhi_epi8( v, _mm_setzero_si128() );
+
+ __m128i lomul = _mm_mulhi_epu16( lo16, rv );
+ __m128i himul = _mm_mulhi_epu16( hi16, rv );
+
+ __m128i p0 = _mm_packus_epi16( lomul, himul );
+ __m128i p1 = _mm_or_si128( _mm_and_si128( p0, _mm_set1_epi16( 0x3F ) ), _mm_srai_epi16( _mm_and_si128( p0, _mm_set1_epi16( 0x3F00 ) ), 5 ) );
+ __m128i p2 = _mm_packus_epi16( p1, p1 );
+
+ uint64_t pi = _mm_cvtsi128_si64( p2 );
+ uint64_t data = 0;
+ for( int i=0; i<8; i++ )
+ {
+ uint64_t idx = AlphaIndexTable_SSE[(pi>>(i*8)) & 0x3F];
+ data |= idx << (i*6);
+ }
+ return (uint64_t)(uint16_t)_mm_cvtsi128_si32( minmax ) | ( data << 16 );
+}
+#endif
+
+void CompressDxt1( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width )
+{
+#ifdef __AVX2__
+ if( width%8 == 0 )
+ {
+ blocks /= 2;
+ uint32_t buf[8*4];
+ int i = 0;
+ char* dst8 = (char*)dst;
+
+ do
+ {
+ auto tmp = (char*)buf;
+ memcpy( tmp, src + width * 0, 8*4 );
+ memcpy( tmp + 8*4, src + width * 1, 8*4 );
+ memcpy( tmp + 16*4, src + width * 2, 8*4 );
+ memcpy( tmp + 24*4, src + width * 3, 8*4 );
+ src += 8;
+ if( ++i == width/8 )
+ {
+ src += width * 3;
+ i = 0;
+ }
+
+ ProcessRGB_AVX( (uint8_t*)buf, dst8 );
+ }
+ while( --blocks );
+ }
+ else
+#endif
+ {
+ uint32_t buf[4*4];
+ int i = 0;
+
+ auto ptr = dst;
+ do
+ {
+ auto tmp = (char*)buf;
+ memcpy( tmp, src + width * 0, 4*4 );
+ memcpy( tmp + 4*4, src + width * 1, 4*4 );
+ memcpy( tmp + 8*4, src + width * 2, 4*4 );
+ memcpy( tmp + 12*4, src + width * 3, 4*4 );
+ src += 4;
+ if( ++i == width/4 )
+ {
+ src += width * 3;
+ i = 0;
+ }
+
+ const auto c = ProcessRGB( (uint8_t*)buf );
+ uint8_t fix[8];
+ memcpy( fix, &c, 8 );
+ for( int j=4; j<8; j++ ) fix[j] = DxtcIndexTable[fix[j]];
+ memcpy( ptr, fix, sizeof( uint64_t ) );
+ ptr++;
+ }
+ while( --blocks );
+ }
+}
+
+void CompressDxt1Dither( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width )
+{
+ uint32_t buf[4*4];
+ int i = 0;
+
+ auto ptr = dst;
+ do
+ {
+ auto tmp = (char*)buf;
+ memcpy( tmp, src + width * 0, 4*4 );
+ memcpy( tmp + 4*4, src + width * 1, 4*4 );
+ memcpy( tmp + 8*4, src + width * 2, 4*4 );
+ memcpy( tmp + 12*4, src + width * 3, 4*4 );
+ src += 4;
+ if( ++i == width/4 )
+ {
+ src += width * 3;
+ i = 0;
+ }
+
+ Dither( (uint8_t*)buf );
+
+ const auto c = ProcessRGB( (uint8_t*)buf );
+ uint8_t fix[8];
+ memcpy( fix, &c, 8 );
+ for( int j=4; j<8; j++ ) fix[j] = DxtcIndexTable[fix[j]];
+ memcpy( ptr, fix, sizeof( uint64_t ) );
+ ptr++;
+ }
+ while( --blocks );
+}
+
+void CompressDxt5( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width )
+{
+ int i = 0;
+ auto ptr = dst;
+ do
+ {
+#ifdef __SSE4_1__
+ __m128i px0 = _mm_loadu_si128( (__m128i*)( src + width * 0 ) );
+ __m128i px1 = _mm_loadu_si128( (__m128i*)( src + width * 1 ) );
+ __m128i px2 = _mm_loadu_si128( (__m128i*)( src + width * 2 ) );
+ __m128i px3 = _mm_loadu_si128( (__m128i*)( src + width * 3 ) );
+
+ src += 4;
+ if( ++i == width/4 )
+ {
+ src += width * 3;
+ i = 0;
+ }
+
+ *ptr++ = ProcessAlpha_SSE( px0, px1, px2, px3 );
+
+ const auto c = ProcessRGB_SSE( px0, px1, px2, px3 );
+ uint8_t fix[8];
+ memcpy( fix, &c, 8 );
+ for( int j=4; j<8; j++ ) fix[j] = DxtcIndexTable[fix[j]];
+ memcpy( ptr, fix, sizeof( uint64_t ) );
+ ptr++;
+#else
+ uint32_t rgba[4*4];
+ uint8_t alpha[4*4];
+
+ auto tmp = (char*)rgba;
+ memcpy( tmp, src + width * 0, 4*4 );
+ memcpy( tmp + 4*4, src + width * 1, 4*4 );
+ memcpy( tmp + 8*4, src + width * 2, 4*4 );
+ memcpy( tmp + 12*4, src + width * 3, 4*4 );
+ src += 4;
+ if( ++i == width/4 )
+ {
+ src += width * 3;
+ i = 0;
+ }
+
+ for( int i=0; i<16; i++ )
+ {
+ alpha[i] = rgba[i] >> 24;
+ rgba[i] &= 0xFFFFFF;
+ }
+ *ptr++ = ProcessAlpha( alpha );
+
+ const auto c = ProcessRGB( (uint8_t*)rgba );
+ uint8_t fix[8];
+ memcpy( fix, &c, 8 );
+ for( int j=4; j<8; j++ ) fix[j] = DxtcIndexTable[fix[j]];
+ memcpy( ptr, fix, sizeof( uint64_t ) );
+ ptr++;
+#endif
+ }
+ while( --blocks );
+}
diff --git a/thirdparty/etcpak/ProcessDxtc.hpp b/thirdparty/etcpak/ProcessDxtc.hpp
new file mode 100644
index 0000000000..8e0b12e4bd
--- /dev/null
+++ b/thirdparty/etcpak/ProcessDxtc.hpp
@@ -0,0 +1,11 @@
+#ifndef __PROCESSDXT1_HPP__
+#define __PROCESSDXT1_HPP__
+
+#include <stddef.h>
+#include <stdint.h>
+
+void CompressDxt1( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width );
+void CompressDxt1Dither( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width );
+void CompressDxt5( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width );
+
+#endif
diff --git a/thirdparty/etcpak/ProcessRGB.cpp b/thirdparty/etcpak/ProcessRGB.cpp
new file mode 100644
index 0000000000..7f4524d105
--- /dev/null
+++ b/thirdparty/etcpak/ProcessRGB.cpp
@@ -0,0 +1,3100 @@
+#include <array>
+#include <string.h>
+#include <limits>
+
+#ifdef __ARM_NEON
+# include <arm_neon.h>
+#endif
+
+#include "Dither.hpp"
+#include "ForceInline.hpp"
+#include "Math.hpp"
+#include "ProcessCommon.hpp"
+#include "ProcessRGB.hpp"
+#include "Tables.hpp"
+#include "Vector.hpp"
+#if defined __SSE4_1__ || defined __AVX2__ || defined _MSC_VER
+# ifdef _MSC_VER
+# include <intrin.h>
+# include <Windows.h>
+# define _bswap(x) _byteswap_ulong(x)
+# define _bswap64(x) _byteswap_uint64(x)
+# else
+# include <x86intrin.h>
+# endif
+#endif
+
+#ifndef _bswap
+# define _bswap(x) __builtin_bswap32(x)
+# define _bswap64(x) __builtin_bswap64(x)
+#endif
+
+namespace
+{
+
+#if defined _MSC_VER && !defined __clang__
+static etcpak_force_inline unsigned long _bit_scan_forward( unsigned long mask )
+{
+ unsigned long ret;
+ _BitScanForward( &ret, mask );
+ return ret;
+}
+#endif
+
+typedef std::array<uint16_t, 4> v4i;
+
+#ifdef __AVX2__
+static etcpak_force_inline __m256i Sum4_AVX2( const uint8_t* data) noexcept
+{
+ __m128i d0 = _mm_loadu_si128(((__m128i*)data) + 0);
+ __m128i d1 = _mm_loadu_si128(((__m128i*)data) + 1);
+ __m128i d2 = _mm_loadu_si128(((__m128i*)data) + 2);
+ __m128i d3 = _mm_loadu_si128(((__m128i*)data) + 3);
+
+ __m128i dm0 = _mm_and_si128(d0, _mm_set1_epi32(0x00FFFFFF));
+ __m128i dm1 = _mm_and_si128(d1, _mm_set1_epi32(0x00FFFFFF));
+ __m128i dm2 = _mm_and_si128(d2, _mm_set1_epi32(0x00FFFFFF));
+ __m128i dm3 = _mm_and_si128(d3, _mm_set1_epi32(0x00FFFFFF));
+
+ __m256i t0 = _mm256_cvtepu8_epi16(dm0);
+ __m256i t1 = _mm256_cvtepu8_epi16(dm1);
+ __m256i t2 = _mm256_cvtepu8_epi16(dm2);
+ __m256i t3 = _mm256_cvtepu8_epi16(dm3);
+
+ __m256i sum0 = _mm256_add_epi16(t0, t1);
+ __m256i sum1 = _mm256_add_epi16(t2, t3);
+
+ __m256i s0 = _mm256_permute2x128_si256(sum0, sum1, (0) | (3 << 4)); // 0, 0, 3, 3
+ __m256i s1 = _mm256_permute2x128_si256(sum0, sum1, (1) | (2 << 4)); // 1, 1, 2, 2
+
+ __m256i s2 = _mm256_permute4x64_epi64(s0, _MM_SHUFFLE(1, 3, 0, 2));
+ __m256i s3 = _mm256_permute4x64_epi64(s0, _MM_SHUFFLE(0, 2, 1, 3));
+ __m256i s4 = _mm256_permute4x64_epi64(s1, _MM_SHUFFLE(3, 1, 0, 2));
+ __m256i s5 = _mm256_permute4x64_epi64(s1, _MM_SHUFFLE(2, 0, 1, 3));
+
+ __m256i sum5 = _mm256_add_epi16(s2, s3); // 3, 0, 3, 0
+ __m256i sum6 = _mm256_add_epi16(s4, s5); // 2, 1, 1, 2
+ return _mm256_add_epi16(sum5, sum6); // 3+2, 0+1, 3+1, 3+2
+}
+
+static etcpak_force_inline __m256i Average_AVX2( const __m256i data) noexcept
+{
+ __m256i a = _mm256_add_epi16(data, _mm256_set1_epi16(4));
+
+ return _mm256_srli_epi16(a, 3);
+}
+
+static etcpak_force_inline __m128i CalcErrorBlock_AVX2( const __m256i data, const v4i a[8]) noexcept
+{
+ //
+ __m256i a0 = _mm256_load_si256((__m256i*)a[0].data());
+ __m256i a1 = _mm256_load_si256((__m256i*)a[4].data());
+
+ // err = 8 * ( sq( average[0] ) + sq( average[1] ) + sq( average[2] ) );
+ __m256i a4 = _mm256_madd_epi16(a0, a0);
+ __m256i a5 = _mm256_madd_epi16(a1, a1);
+
+ __m256i a6 = _mm256_hadd_epi32(a4, a5);
+ __m256i a7 = _mm256_slli_epi32(a6, 3);
+
+ __m256i a8 = _mm256_add_epi32(a7, _mm256_set1_epi32(0x3FFFFFFF)); // Big value to prevent negative values, but small enough to prevent overflow
+
+ // average is not swapped
+ // err -= block[0] * 2 * average[0];
+ // err -= block[1] * 2 * average[1];
+ // err -= block[2] * 2 * average[2];
+ __m256i a2 = _mm256_slli_epi16(a0, 1);
+ __m256i a3 = _mm256_slli_epi16(a1, 1);
+ __m256i b0 = _mm256_madd_epi16(a2, data);
+ __m256i b1 = _mm256_madd_epi16(a3, data);
+
+ __m256i b2 = _mm256_hadd_epi32(b0, b1);
+ __m256i b3 = _mm256_sub_epi32(a8, b2);
+ __m256i b4 = _mm256_hadd_epi32(b3, b3);
+
+ __m256i b5 = _mm256_permutevar8x32_epi32(b4, _mm256_set_epi32(0, 0, 0, 0, 5, 1, 4, 0));
+
+ return _mm256_castsi256_si128(b5);
+}
+
+static etcpak_force_inline void ProcessAverages_AVX2(const __m256i d, v4i a[8] ) noexcept
+{
+ __m256i t = _mm256_add_epi16(_mm256_mullo_epi16(d, _mm256_set1_epi16(31)), _mm256_set1_epi16(128));
+
+ __m256i c = _mm256_srli_epi16(_mm256_add_epi16(t, _mm256_srli_epi16(t, 8)), 8);
+
+ __m256i c1 = _mm256_shuffle_epi32(c, _MM_SHUFFLE(3, 2, 3, 2));
+ __m256i diff = _mm256_sub_epi16(c, c1);
+ diff = _mm256_max_epi16(diff, _mm256_set1_epi16(-4));
+ diff = _mm256_min_epi16(diff, _mm256_set1_epi16(3));
+
+ __m256i co = _mm256_add_epi16(c1, diff);
+
+ c = _mm256_blend_epi16(co, c, 0xF0);
+
+ __m256i a0 = _mm256_or_si256(_mm256_slli_epi16(c, 3), _mm256_srli_epi16(c, 2));
+
+ _mm256_store_si256((__m256i*)a[4].data(), a0);
+
+ __m256i t0 = _mm256_add_epi16(_mm256_mullo_epi16(d, _mm256_set1_epi16(15)), _mm256_set1_epi16(128));
+ __m256i t1 = _mm256_srli_epi16(_mm256_add_epi16(t0, _mm256_srli_epi16(t0, 8)), 8);
+
+ __m256i t2 = _mm256_or_si256(t1, _mm256_slli_epi16(t1, 4));
+
+ _mm256_store_si256((__m256i*)a[0].data(), t2);
+}
+
+static etcpak_force_inline uint64_t EncodeAverages_AVX2( const v4i a[8], size_t idx ) noexcept
+{
+ uint64_t d = ( idx << 24 );
+ size_t base = idx << 1;
+
+ __m128i a0 = _mm_load_si128((const __m128i*)a[base].data());
+
+ __m128i r0, r1;
+
+ if( ( idx & 0x2 ) == 0 )
+ {
+ r0 = _mm_srli_epi16(a0, 4);
+
+ __m128i a1 = _mm_unpackhi_epi64(r0, r0);
+ r1 = _mm_slli_epi16(a1, 4);
+ }
+ else
+ {
+ __m128i a1 = _mm_and_si128(a0, _mm_set1_epi16(-8));
+
+ r0 = _mm_unpackhi_epi64(a1, a1);
+ __m128i a2 = _mm_sub_epi16(a1, r0);
+ __m128i a3 = _mm_srai_epi16(a2, 3);
+ r1 = _mm_and_si128(a3, _mm_set1_epi16(0x07));
+ }
+
+ __m128i r2 = _mm_or_si128(r0, r1);
+ // do missing swap for average values
+ __m128i r3 = _mm_shufflelo_epi16(r2, _MM_SHUFFLE(3, 0, 1, 2));
+ __m128i r4 = _mm_packus_epi16(r3, _mm_setzero_si128());
+ d |= _mm_cvtsi128_si32(r4);
+
+ return d;
+}
+
+static etcpak_force_inline uint64_t CheckSolid_AVX2( const uint8_t* src ) noexcept
+{
+ __m256i d0 = _mm256_loadu_si256(((__m256i*)src) + 0);
+ __m256i d1 = _mm256_loadu_si256(((__m256i*)src) + 1);
+
+ __m256i c = _mm256_broadcastd_epi32(_mm256_castsi256_si128(d0));
+
+ __m256i c0 = _mm256_cmpeq_epi8(d0, c);
+ __m256i c1 = _mm256_cmpeq_epi8(d1, c);
+
+ __m256i m = _mm256_and_si256(c0, c1);
+
+ if (!_mm256_testc_si256(m, _mm256_set1_epi32(-1)))
+ {
+ return 0;
+ }
+
+ return 0x02000000 |
+ ( (unsigned int)( src[0] & 0xF8 ) << 16 ) |
+ ( (unsigned int)( src[1] & 0xF8 ) << 8 ) |
+ ( (unsigned int)( src[2] & 0xF8 ) );
+}
+
+static etcpak_force_inline __m128i PrepareAverages_AVX2( v4i a[8], const uint8_t* src) noexcept
+{
+ __m256i sum4 = Sum4_AVX2( src );
+
+ ProcessAverages_AVX2(Average_AVX2( sum4 ), a );
+
+ return CalcErrorBlock_AVX2( sum4, a);
+}
+
+static etcpak_force_inline __m128i PrepareAverages_AVX2( v4i a[8], const __m256i sum4) noexcept
+{
+ ProcessAverages_AVX2(Average_AVX2( sum4 ), a );
+
+ return CalcErrorBlock_AVX2( sum4, a);
+}
+
+static etcpak_force_inline void FindBestFit_4x2_AVX2( uint32_t terr[2][8], uint32_t tsel[8], v4i a[8], const uint32_t offset, const uint8_t* data) noexcept
+{
+ __m256i sel0 = _mm256_setzero_si256();
+ __m256i sel1 = _mm256_setzero_si256();
+
+ for (unsigned int j = 0; j < 2; ++j)
+ {
+ unsigned int bid = offset + 1 - j;
+
+ __m256i squareErrorSum = _mm256_setzero_si256();
+
+ __m128i a0 = _mm_loadl_epi64((const __m128i*)a[bid].data());
+ __m256i a1 = _mm256_broadcastq_epi64(a0);
+
+ // Processing one full row each iteration
+ for (size_t i = 0; i < 8; i += 4)
+ {
+ __m128i rgb = _mm_loadu_si128((const __m128i*)(data + i * 4));
+
+ __m256i rgb16 = _mm256_cvtepu8_epi16(rgb);
+ __m256i d = _mm256_sub_epi16(a1, rgb16);
+
+ // The scaling values are divided by two and rounded, to allow the differences to be in the range of signed int16
+ // This produces slightly different results, but is significant faster
+ __m256i pixel0 = _mm256_madd_epi16(d, _mm256_set_epi16(0, 38, 76, 14, 0, 38, 76, 14, 0, 38, 76, 14, 0, 38, 76, 14));
+ __m256i pixel1 = _mm256_packs_epi32(pixel0, pixel0);
+ __m256i pixel2 = _mm256_hadd_epi16(pixel1, pixel1);
+ __m128i pixel3 = _mm256_castsi256_si128(pixel2);
+
+ __m128i pix0 = _mm_broadcastw_epi16(pixel3);
+ __m128i pix1 = _mm_broadcastw_epi16(_mm_srli_epi32(pixel3, 16));
+ __m256i pixel = _mm256_insertf128_si256(_mm256_castsi128_si256(pix0), pix1, 1);
+
+ // Processing first two pixels of the row
+ {
+ __m256i pix = _mm256_abs_epi16(pixel);
+
+ // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
+ // Since the selector table is symmetrical, we need to calculate the difference only for half of the entries.
+ __m256i error0 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[0])));
+ __m256i error1 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[1])));
+
+ __m256i minIndex0 = _mm256_and_si256(_mm256_cmpgt_epi16(error0, error1), _mm256_set1_epi16(1));
+ __m256i minError = _mm256_min_epi16(error0, error1);
+
+ // Exploiting symmetry of the selector table and use the sign bit
+ // This produces slightly different results, but is significant faster
+ __m256i minIndex1 = _mm256_srli_epi16(pixel, 15);
+
+ // Interleaving values so madd instruction can be used
+ __m256i minErrorLo = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(1, 1, 0, 0));
+ __m256i minErrorHi = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(3, 3, 2, 2));
+
+ __m256i minError2 = _mm256_unpacklo_epi16(minErrorLo, minErrorHi);
+ // Squaring the minimum error to produce correct values when adding
+ __m256i squareError = _mm256_madd_epi16(minError2, minError2);
+
+ squareErrorSum = _mm256_add_epi32(squareErrorSum, squareError);
+
+ // Packing selector bits
+ __m256i minIndexLo2 = _mm256_sll_epi16(minIndex0, _mm_cvtsi64_si128(i + j * 8));
+ __m256i minIndexHi2 = _mm256_sll_epi16(minIndex1, _mm_cvtsi64_si128(i + j * 8));
+
+ sel0 = _mm256_or_si256(sel0, minIndexLo2);
+ sel1 = _mm256_or_si256(sel1, minIndexHi2);
+ }
+
+ pixel3 = _mm256_extracti128_si256(pixel2, 1);
+ pix0 = _mm_broadcastw_epi16(pixel3);
+ pix1 = _mm_broadcastw_epi16(_mm_srli_epi32(pixel3, 16));
+ pixel = _mm256_insertf128_si256(_mm256_castsi128_si256(pix0), pix1, 1);
+
+ // Processing second two pixels of the row
+ {
+ __m256i pix = _mm256_abs_epi16(pixel);
+
+ // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
+ // Since the selector table is symmetrical, we need to calculate the difference only for half of the entries.
+ __m256i error0 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[0])));
+ __m256i error1 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[1])));
+
+ __m256i minIndex0 = _mm256_and_si256(_mm256_cmpgt_epi16(error0, error1), _mm256_set1_epi16(1));
+ __m256i minError = _mm256_min_epi16(error0, error1);
+
+ // Exploiting symmetry of the selector table and use the sign bit
+ __m256i minIndex1 = _mm256_srli_epi16(pixel, 15);
+
+ // Interleaving values so madd instruction can be used
+ __m256i minErrorLo = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(1, 1, 0, 0));
+ __m256i minErrorHi = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(3, 3, 2, 2));
+
+ __m256i minError2 = _mm256_unpacklo_epi16(minErrorLo, minErrorHi);
+ // Squaring the minimum error to produce correct values when adding
+ __m256i squareError = _mm256_madd_epi16(minError2, minError2);
+
+ squareErrorSum = _mm256_add_epi32(squareErrorSum, squareError);
+
+ // Packing selector bits
+ __m256i minIndexLo2 = _mm256_sll_epi16(minIndex0, _mm_cvtsi64_si128(i + j * 8));
+ __m256i minIndexHi2 = _mm256_sll_epi16(minIndex1, _mm_cvtsi64_si128(i + j * 8));
+ __m256i minIndexLo3 = _mm256_slli_epi16(minIndexLo2, 2);
+ __m256i minIndexHi3 = _mm256_slli_epi16(minIndexHi2, 2);
+
+ sel0 = _mm256_or_si256(sel0, minIndexLo3);
+ sel1 = _mm256_or_si256(sel1, minIndexHi3);
+ }
+ }
+
+ data += 8 * 4;
+
+ _mm256_store_si256((__m256i*)terr[1 - j], squareErrorSum);
+ }
+
+ // Interleave selector bits
+ __m256i minIndexLo0 = _mm256_unpacklo_epi16(sel0, sel1);
+ __m256i minIndexHi0 = _mm256_unpackhi_epi16(sel0, sel1);
+
+ __m256i minIndexLo1 = _mm256_permute2x128_si256(minIndexLo0, minIndexHi0, (0) | (2 << 4));
+ __m256i minIndexHi1 = _mm256_permute2x128_si256(minIndexLo0, minIndexHi0, (1) | (3 << 4));
+
+ __m256i minIndexHi2 = _mm256_slli_epi32(minIndexHi1, 1);
+
+ __m256i sel = _mm256_or_si256(minIndexLo1, minIndexHi2);
+
+ _mm256_store_si256((__m256i*)tsel, sel);
+}
+
+static etcpak_force_inline void FindBestFit_2x4_AVX2( uint32_t terr[2][8], uint32_t tsel[8], v4i a[8], const uint32_t offset, const uint8_t* data) noexcept
+{
+ __m256i sel0 = _mm256_setzero_si256();
+ __m256i sel1 = _mm256_setzero_si256();
+
+ __m256i squareErrorSum0 = _mm256_setzero_si256();
+ __m256i squareErrorSum1 = _mm256_setzero_si256();
+
+ __m128i a0 = _mm_loadl_epi64((const __m128i*)a[offset + 1].data());
+ __m128i a1 = _mm_loadl_epi64((const __m128i*)a[offset + 0].data());
+
+ __m128i a2 = _mm_broadcastq_epi64(a0);
+ __m128i a3 = _mm_broadcastq_epi64(a1);
+ __m256i a4 = _mm256_insertf128_si256(_mm256_castsi128_si256(a2), a3, 1);
+
+ // Processing one full row each iteration
+ for (size_t i = 0; i < 16; i += 4)
+ {
+ __m128i rgb = _mm_loadu_si128((const __m128i*)(data + i * 4));
+
+ __m256i rgb16 = _mm256_cvtepu8_epi16(rgb);
+ __m256i d = _mm256_sub_epi16(a4, rgb16);
+
+ // The scaling values are divided by two and rounded, to allow the differences to be in the range of signed int16
+ // This produces slightly different results, but is significant faster
+ __m256i pixel0 = _mm256_madd_epi16(d, _mm256_set_epi16(0, 38, 76, 14, 0, 38, 76, 14, 0, 38, 76, 14, 0, 38, 76, 14));
+ __m256i pixel1 = _mm256_packs_epi32(pixel0, pixel0);
+ __m256i pixel2 = _mm256_hadd_epi16(pixel1, pixel1);
+ __m128i pixel3 = _mm256_castsi256_si128(pixel2);
+
+ __m128i pix0 = _mm_broadcastw_epi16(pixel3);
+ __m128i pix1 = _mm_broadcastw_epi16(_mm_srli_epi32(pixel3, 16));
+ __m256i pixel = _mm256_insertf128_si256(_mm256_castsi128_si256(pix0), pix1, 1);
+
+ // Processing first two pixels of the row
+ {
+ __m256i pix = _mm256_abs_epi16(pixel);
+
+ // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
+ // Since the selector table is symmetrical, we need to calculate the difference only for half of the entries.
+ __m256i error0 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[0])));
+ __m256i error1 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[1])));
+
+ __m256i minIndex0 = _mm256_and_si256(_mm256_cmpgt_epi16(error0, error1), _mm256_set1_epi16(1));
+ __m256i minError = _mm256_min_epi16(error0, error1);
+
+ // Exploiting symmetry of the selector table and use the sign bit
+ __m256i minIndex1 = _mm256_srli_epi16(pixel, 15);
+
+ // Interleaving values so madd instruction can be used
+ __m256i minErrorLo = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(1, 1, 0, 0));
+ __m256i minErrorHi = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(3, 3, 2, 2));
+
+ __m256i minError2 = _mm256_unpacklo_epi16(minErrorLo, minErrorHi);
+ // Squaring the minimum error to produce correct values when adding
+ __m256i squareError = _mm256_madd_epi16(minError2, minError2);
+
+ squareErrorSum0 = _mm256_add_epi32(squareErrorSum0, squareError);
+
+ // Packing selector bits
+ __m256i minIndexLo2 = _mm256_sll_epi16(minIndex0, _mm_cvtsi64_si128(i));
+ __m256i minIndexHi2 = _mm256_sll_epi16(minIndex1, _mm_cvtsi64_si128(i));
+
+ sel0 = _mm256_or_si256(sel0, minIndexLo2);
+ sel1 = _mm256_or_si256(sel1, minIndexHi2);
+ }
+
+ pixel3 = _mm256_extracti128_si256(pixel2, 1);
+ pix0 = _mm_broadcastw_epi16(pixel3);
+ pix1 = _mm_broadcastw_epi16(_mm_srli_epi32(pixel3, 16));
+ pixel = _mm256_insertf128_si256(_mm256_castsi128_si256(pix0), pix1, 1);
+
+ // Processing second two pixels of the row
+ {
+ __m256i pix = _mm256_abs_epi16(pixel);
+
+ // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
+ // Since the selector table is symmetrical, we need to calculate the difference only for half of the entries.
+ __m256i error0 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[0])));
+ __m256i error1 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[1])));
+
+ __m256i minIndex0 = _mm256_and_si256(_mm256_cmpgt_epi16(error0, error1), _mm256_set1_epi16(1));
+ __m256i minError = _mm256_min_epi16(error0, error1);
+
+ // Exploiting symmetry of the selector table and use the sign bit
+ __m256i minIndex1 = _mm256_srli_epi16(pixel, 15);
+
+ // Interleaving values so madd instruction can be used
+ __m256i minErrorLo = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(1, 1, 0, 0));
+ __m256i minErrorHi = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(3, 3, 2, 2));
+
+ __m256i minError2 = _mm256_unpacklo_epi16(minErrorLo, minErrorHi);
+ // Squaring the minimum error to produce correct values when adding
+ __m256i squareError = _mm256_madd_epi16(minError2, minError2);
+
+ squareErrorSum1 = _mm256_add_epi32(squareErrorSum1, squareError);
+
+ // Packing selector bits
+ __m256i minIndexLo2 = _mm256_sll_epi16(minIndex0, _mm_cvtsi64_si128(i));
+ __m256i minIndexHi2 = _mm256_sll_epi16(minIndex1, _mm_cvtsi64_si128(i));
+ __m256i minIndexLo3 = _mm256_slli_epi16(minIndexLo2, 2);
+ __m256i minIndexHi3 = _mm256_slli_epi16(minIndexHi2, 2);
+
+ sel0 = _mm256_or_si256(sel0, minIndexLo3);
+ sel1 = _mm256_or_si256(sel1, minIndexHi3);
+ }
+ }
+
+ _mm256_store_si256((__m256i*)terr[1], squareErrorSum0);
+ _mm256_store_si256((__m256i*)terr[0], squareErrorSum1);
+
+ // Interleave selector bits
+ __m256i minIndexLo0 = _mm256_unpacklo_epi16(sel0, sel1);
+ __m256i minIndexHi0 = _mm256_unpackhi_epi16(sel0, sel1);
+
+ __m256i minIndexLo1 = _mm256_permute2x128_si256(minIndexLo0, minIndexHi0, (0) | (2 << 4));
+ __m256i minIndexHi1 = _mm256_permute2x128_si256(minIndexLo0, minIndexHi0, (1) | (3 << 4));
+
+ __m256i minIndexHi2 = _mm256_slli_epi32(minIndexHi1, 1);
+
+ __m256i sel = _mm256_or_si256(minIndexLo1, minIndexHi2);
+
+ _mm256_store_si256((__m256i*)tsel, sel);
+}
+
+static etcpak_force_inline uint64_t EncodeSelectors_AVX2( uint64_t d, const uint32_t terr[2][8], const uint32_t tsel[8], const bool rotate) noexcept
+{
+ size_t tidx[2];
+
+ // Get index of minimum error (terr[0] and terr[1])
+ __m256i err0 = _mm256_load_si256((const __m256i*)terr[0]);
+ __m256i err1 = _mm256_load_si256((const __m256i*)terr[1]);
+
+ __m256i errLo = _mm256_permute2x128_si256(err0, err1, (0) | (2 << 4));
+ __m256i errHi = _mm256_permute2x128_si256(err0, err1, (1) | (3 << 4));
+
+ __m256i errMin0 = _mm256_min_epu32(errLo, errHi);
+
+ __m256i errMin1 = _mm256_shuffle_epi32(errMin0, _MM_SHUFFLE(2, 3, 0, 1));
+ __m256i errMin2 = _mm256_min_epu32(errMin0, errMin1);
+
+ __m256i errMin3 = _mm256_shuffle_epi32(errMin2, _MM_SHUFFLE(1, 0, 3, 2));
+ __m256i errMin4 = _mm256_min_epu32(errMin3, errMin2);
+
+ __m256i errMin5 = _mm256_permute2x128_si256(errMin4, errMin4, (0) | (0 << 4));
+ __m256i errMin6 = _mm256_permute2x128_si256(errMin4, errMin4, (1) | (1 << 4));
+
+ __m256i errMask0 = _mm256_cmpeq_epi32(errMin5, err0);
+ __m256i errMask1 = _mm256_cmpeq_epi32(errMin6, err1);
+
+ uint32_t mask0 = _mm256_movemask_epi8(errMask0);
+ uint32_t mask1 = _mm256_movemask_epi8(errMask1);
+
+ tidx[0] = _bit_scan_forward(mask0) >> 2;
+ tidx[1] = _bit_scan_forward(mask1) >> 2;
+
+ d |= tidx[0] << 26;
+ d |= tidx[1] << 29;
+
+ unsigned int t0 = tsel[tidx[0]];
+ unsigned int t1 = tsel[tidx[1]];
+
+ if (!rotate)
+ {
+ t0 &= 0xFF00FF00;
+ t1 &= 0x00FF00FF;
+ }
+ else
+ {
+ t0 &= 0xCCCCCCCC;
+ t1 &= 0x33333333;
+ }
+
+ // Flip selectors from sign bit
+ unsigned int t2 = (t0 | t1) ^ 0xFFFF0000;
+
+ return d | static_cast<uint64_t>(_bswap(t2)) << 32;
+}
+
+static etcpak_force_inline __m128i r6g7b6_AVX2(__m128 cof, __m128 chf, __m128 cvf) noexcept
+{
+ __m128i co = _mm_cvttps_epi32(cof);
+ __m128i ch = _mm_cvttps_epi32(chf);
+ __m128i cv = _mm_cvttps_epi32(cvf);
+
+ __m128i coh = _mm_packus_epi32(co, ch);
+ __m128i cv0 = _mm_packus_epi32(cv, _mm_setzero_si128());
+
+ __m256i cohv0 = _mm256_inserti128_si256(_mm256_castsi128_si256(coh), cv0, 1);
+ __m256i cohv1 = _mm256_min_epu16(cohv0, _mm256_set1_epi16(1023));
+
+ __m256i cohv2 = _mm256_sub_epi16(cohv1, _mm256_set1_epi16(15));
+ __m256i cohv3 = _mm256_srai_epi16(cohv2, 1);
+
+ __m256i cohvrb0 = _mm256_add_epi16(cohv3, _mm256_set1_epi16(11));
+ __m256i cohvrb1 = _mm256_add_epi16(cohv3, _mm256_set1_epi16(4));
+ __m256i cohvg0 = _mm256_add_epi16(cohv3, _mm256_set1_epi16(9));
+ __m256i cohvg1 = _mm256_add_epi16(cohv3, _mm256_set1_epi16(6));
+
+ __m256i cohvrb2 = _mm256_srai_epi16(cohvrb0, 7);
+ __m256i cohvrb3 = _mm256_srai_epi16(cohvrb1, 7);
+ __m256i cohvg2 = _mm256_srai_epi16(cohvg0, 8);
+ __m256i cohvg3 = _mm256_srai_epi16(cohvg1, 8);
+
+ __m256i cohvrb4 = _mm256_sub_epi16(cohvrb0, cohvrb2);
+ __m256i cohvrb5 = _mm256_sub_epi16(cohvrb4, cohvrb3);
+ __m256i cohvg4 = _mm256_sub_epi16(cohvg0, cohvg2);
+ __m256i cohvg5 = _mm256_sub_epi16(cohvg4, cohvg3);
+
+ __m256i cohvrb6 = _mm256_srai_epi16(cohvrb5, 3);
+ __m256i cohvg6 = _mm256_srai_epi16(cohvg5, 2);
+
+ __m256i cohv4 = _mm256_blend_epi16(cohvg6, cohvrb6, 0x55);
+
+ __m128i cohv5 = _mm_packus_epi16(_mm256_castsi256_si128(cohv4), _mm256_extracti128_si256(cohv4, 1));
+ return _mm_shuffle_epi8(cohv5, _mm_setr_epi8(6, 5, 4, -1, 2, 1, 0, -1, 10, 9, 8, -1, -1, -1, -1, -1));
+}
+
+struct Plane
+{
+ uint64_t plane;
+ uint64_t error;
+ __m256i sum4;
+};
+
+static etcpak_force_inline Plane Planar_AVX2(const uint8_t* src)
+{
+ __m128i d0 = _mm_loadu_si128(((__m128i*)src) + 0);
+ __m128i d1 = _mm_loadu_si128(((__m128i*)src) + 1);
+ __m128i d2 = _mm_loadu_si128(((__m128i*)src) + 2);
+ __m128i d3 = _mm_loadu_si128(((__m128i*)src) + 3);
+
+ __m128i rgb0 = _mm_shuffle_epi8(d0, _mm_setr_epi8(0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, -1, -1, -1, -1));
+ __m128i rgb1 = _mm_shuffle_epi8(d1, _mm_setr_epi8(0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, -1, -1, -1, -1));
+ __m128i rgb2 = _mm_shuffle_epi8(d2, _mm_setr_epi8(0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, -1, -1, -1, -1));
+ __m128i rgb3 = _mm_shuffle_epi8(d3, _mm_setr_epi8(0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, -1, -1, -1, -1));
+
+ __m128i rg0 = _mm_unpacklo_epi32(rgb0, rgb1);
+ __m128i rg1 = _mm_unpacklo_epi32(rgb2, rgb3);
+ __m128i b0 = _mm_unpackhi_epi32(rgb0, rgb1);
+ __m128i b1 = _mm_unpackhi_epi32(rgb2, rgb3);
+
+ // swap channels
+ __m128i b8 = _mm_unpacklo_epi64(rg0, rg1);
+ __m128i g8 = _mm_unpackhi_epi64(rg0, rg1);
+ __m128i r8 = _mm_unpacklo_epi64(b0, b1);
+
+ __m128i t0 = _mm_sad_epu8(r8, _mm_setzero_si128());
+ __m128i t1 = _mm_sad_epu8(g8, _mm_setzero_si128());
+ __m128i t2 = _mm_sad_epu8(b8, _mm_setzero_si128());
+
+ __m128i r8s = _mm_shuffle_epi8(r8, _mm_set_epi8(0xF, 0xE, 0xB, 0xA, 0x7, 0x6, 0x3, 0x2, 0xD, 0xC, 0x9, 0x8, 0x5, 0x4, 0x1, 0x0));
+ __m128i g8s = _mm_shuffle_epi8(g8, _mm_set_epi8(0xF, 0xE, 0xB, 0xA, 0x7, 0x6, 0x3, 0x2, 0xD, 0xC, 0x9, 0x8, 0x5, 0x4, 0x1, 0x0));
+ __m128i b8s = _mm_shuffle_epi8(b8, _mm_set_epi8(0xF, 0xE, 0xB, 0xA, 0x7, 0x6, 0x3, 0x2, 0xD, 0xC, 0x9, 0x8, 0x5, 0x4, 0x1, 0x0));
+
+ __m128i s0 = _mm_sad_epu8(r8s, _mm_setzero_si128());
+ __m128i s1 = _mm_sad_epu8(g8s, _mm_setzero_si128());
+ __m128i s2 = _mm_sad_epu8(b8s, _mm_setzero_si128());
+
+ __m256i sr0 = _mm256_insertf128_si256(_mm256_castsi128_si256(t0), s0, 1);
+ __m256i sg0 = _mm256_insertf128_si256(_mm256_castsi128_si256(t1), s1, 1);
+ __m256i sb0 = _mm256_insertf128_si256(_mm256_castsi128_si256(t2), s2, 1);
+
+ __m256i sr1 = _mm256_slli_epi64(sr0, 32);
+ __m256i sg1 = _mm256_slli_epi64(sg0, 16);
+
+ __m256i srb = _mm256_or_si256(sr1, sb0);
+ __m256i srgb = _mm256_or_si256(srb, sg1);
+
+ __m128i t3 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(t0), _mm_castsi128_ps(t1), _MM_SHUFFLE(2, 0, 2, 0)));
+ __m128i t4 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(3, 1, 2, 0));
+ __m128i t5 = _mm_hadd_epi32(t3, t4);
+ __m128i t6 = _mm_shuffle_epi32(t5, _MM_SHUFFLE(1, 1, 1, 1));
+ __m128i t7 = _mm_shuffle_epi32(t5, _MM_SHUFFLE(2, 2, 2, 2));
+
+ __m256i sr = _mm256_broadcastw_epi16(t5);
+ __m256i sg = _mm256_broadcastw_epi16(t6);
+ __m256i sb = _mm256_broadcastw_epi16(t7);
+
+ __m256i r08 = _mm256_cvtepu8_epi16(r8);
+ __m256i g08 = _mm256_cvtepu8_epi16(g8);
+ __m256i b08 = _mm256_cvtepu8_epi16(b8);
+
+ __m256i r16 = _mm256_slli_epi16(r08, 4);
+ __m256i g16 = _mm256_slli_epi16(g08, 4);
+ __m256i b16 = _mm256_slli_epi16(b08, 4);
+
+ __m256i difR0 = _mm256_sub_epi16(r16, sr);
+ __m256i difG0 = _mm256_sub_epi16(g16, sg);
+ __m256i difB0 = _mm256_sub_epi16(b16, sb);
+
+ __m256i difRyz = _mm256_madd_epi16(difR0, _mm256_set_epi16(255, 85, -85, -255, 255, 85, -85, -255, 255, 85, -85, -255, 255, 85, -85, -255));
+ __m256i difGyz = _mm256_madd_epi16(difG0, _mm256_set_epi16(255, 85, -85, -255, 255, 85, -85, -255, 255, 85, -85, -255, 255, 85, -85, -255));
+ __m256i difByz = _mm256_madd_epi16(difB0, _mm256_set_epi16(255, 85, -85, -255, 255, 85, -85, -255, 255, 85, -85, -255, 255, 85, -85, -255));
+
+ __m256i difRxz = _mm256_madd_epi16(difR0, _mm256_set_epi16(255, 255, 255, 255, 85, 85, 85, 85, -85, -85, -85, -85, -255, -255, -255, -255));
+ __m256i difGxz = _mm256_madd_epi16(difG0, _mm256_set_epi16(255, 255, 255, 255, 85, 85, 85, 85, -85, -85, -85, -85, -255, -255, -255, -255));
+ __m256i difBxz = _mm256_madd_epi16(difB0, _mm256_set_epi16(255, 255, 255, 255, 85, 85, 85, 85, -85, -85, -85, -85, -255, -255, -255, -255));
+
+ __m256i difRGyz = _mm256_hadd_epi32(difRyz, difGyz);
+ __m256i difByzxz = _mm256_hadd_epi32(difByz, difBxz);
+
+ __m256i difRGxz = _mm256_hadd_epi32(difRxz, difGxz);
+
+ __m128i sumRGyz = _mm_add_epi32(_mm256_castsi256_si128(difRGyz), _mm256_extracti128_si256(difRGyz, 1));
+ __m128i sumByzxz = _mm_add_epi32(_mm256_castsi256_si128(difByzxz), _mm256_extracti128_si256(difByzxz, 1));
+ __m128i sumRGxz = _mm_add_epi32(_mm256_castsi256_si128(difRGxz), _mm256_extracti128_si256(difRGxz, 1));
+
+ __m128i sumRGByz = _mm_hadd_epi32(sumRGyz, sumByzxz);
+ __m128i sumRGByzxz = _mm_hadd_epi32(sumRGxz, sumByzxz);
+
+ __m128i sumRGBxz = _mm_shuffle_epi32(sumRGByzxz, _MM_SHUFFLE(2, 3, 1, 0));
+
+ __m128 sumRGByzf = _mm_cvtepi32_ps(sumRGByz);
+ __m128 sumRGBxzf = _mm_cvtepi32_ps(sumRGBxz);
+
+ const float value = (255 * 255 * 8.0f + 85 * 85 * 8.0f) * 16.0f;
+
+ __m128 scale = _mm_set1_ps(-4.0f / value);
+
+ __m128 af = _mm_mul_ps(sumRGBxzf, scale);
+ __m128 bf = _mm_mul_ps(sumRGByzf, scale);
+
+ __m128 df = _mm_mul_ps(_mm_cvtepi32_ps(t5), _mm_set1_ps(4.0f / 16.0f));
+
+ // calculating the three colors RGBO, RGBH, and RGBV. RGB = df - af * x - bf * y;
+ __m128 cof0 = _mm_fnmadd_ps(af, _mm_set1_ps(-255.0f), _mm_fnmadd_ps(bf, _mm_set1_ps(-255.0f), df));
+ __m128 chf0 = _mm_fnmadd_ps(af, _mm_set1_ps( 425.0f), _mm_fnmadd_ps(bf, _mm_set1_ps(-255.0f), df));
+ __m128 cvf0 = _mm_fnmadd_ps(af, _mm_set1_ps(-255.0f), _mm_fnmadd_ps(bf, _mm_set1_ps( 425.0f), df));
+
+ // convert to r6g7b6
+ __m128i cohv = r6g7b6_AVX2(cof0, chf0, cvf0);
+
+ uint64_t rgbho = _mm_extract_epi64(cohv, 0);
+ uint32_t rgbv0 = _mm_extract_epi32(cohv, 2);
+
+ // Error calculation
+ auto ro0 = (rgbho >> 48) & 0x3F;
+ auto go0 = (rgbho >> 40) & 0x7F;
+ auto bo0 = (rgbho >> 32) & 0x3F;
+ auto ro1 = (ro0 >> 4) | (ro0 << 2);
+ auto go1 = (go0 >> 6) | (go0 << 1);
+ auto bo1 = (bo0 >> 4) | (bo0 << 2);
+ auto ro2 = (ro1 << 2) + 2;
+ auto go2 = (go1 << 2) + 2;
+ auto bo2 = (bo1 << 2) + 2;
+
+ __m256i ro3 = _mm256_set1_epi16(ro2);
+ __m256i go3 = _mm256_set1_epi16(go2);
+ __m256i bo3 = _mm256_set1_epi16(bo2);
+
+ auto rh0 = (rgbho >> 16) & 0x3F;
+ auto gh0 = (rgbho >> 8) & 0x7F;
+ auto bh0 = (rgbho >> 0) & 0x3F;
+ auto rh1 = (rh0 >> 4) | (rh0 << 2);
+ auto gh1 = (gh0 >> 6) | (gh0 << 1);
+ auto bh1 = (bh0 >> 4) | (bh0 << 2);
+
+ auto rh2 = rh1 - ro1;
+ auto gh2 = gh1 - go1;
+ auto bh2 = bh1 - bo1;
+
+ __m256i rh3 = _mm256_set1_epi16(rh2);
+ __m256i gh3 = _mm256_set1_epi16(gh2);
+ __m256i bh3 = _mm256_set1_epi16(bh2);
+
+ auto rv0 = (rgbv0 >> 16) & 0x3F;
+ auto gv0 = (rgbv0 >> 8) & 0x7F;
+ auto bv0 = (rgbv0 >> 0) & 0x3F;
+ auto rv1 = (rv0 >> 4) | (rv0 << 2);
+ auto gv1 = (gv0 >> 6) | (gv0 << 1);
+ auto bv1 = (bv0 >> 4) | (bv0 << 2);
+
+ auto rv2 = rv1 - ro1;
+ auto gv2 = gv1 - go1;
+ auto bv2 = bv1 - bo1;
+
+ __m256i rv3 = _mm256_set1_epi16(rv2);
+ __m256i gv3 = _mm256_set1_epi16(gv2);
+ __m256i bv3 = _mm256_set1_epi16(bv2);
+
+ __m256i x = _mm256_set_epi16(3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0);
+
+ __m256i rh4 = _mm256_mullo_epi16(rh3, x);
+ __m256i gh4 = _mm256_mullo_epi16(gh3, x);
+ __m256i bh4 = _mm256_mullo_epi16(bh3, x);
+
+ __m256i y = _mm256_set_epi16(3, 2, 1, 0, 3, 2, 1, 0, 3, 2, 1, 0, 3, 2, 1, 0);
+
+ __m256i rv4 = _mm256_mullo_epi16(rv3, y);
+ __m256i gv4 = _mm256_mullo_epi16(gv3, y);
+ __m256i bv4 = _mm256_mullo_epi16(bv3, y);
+
+ __m256i rxy = _mm256_add_epi16(rh4, rv4);
+ __m256i gxy = _mm256_add_epi16(gh4, gv4);
+ __m256i bxy = _mm256_add_epi16(bh4, bv4);
+
+ __m256i rp0 = _mm256_add_epi16(rxy, ro3);
+ __m256i gp0 = _mm256_add_epi16(gxy, go3);
+ __m256i bp0 = _mm256_add_epi16(bxy, bo3);
+
+ __m256i rp1 = _mm256_srai_epi16(rp0, 2);
+ __m256i gp1 = _mm256_srai_epi16(gp0, 2);
+ __m256i bp1 = _mm256_srai_epi16(bp0, 2);
+
+ __m256i rp2 = _mm256_max_epi16(_mm256_min_epi16(rp1, _mm256_set1_epi16(255)), _mm256_setzero_si256());
+ __m256i gp2 = _mm256_max_epi16(_mm256_min_epi16(gp1, _mm256_set1_epi16(255)), _mm256_setzero_si256());
+ __m256i bp2 = _mm256_max_epi16(_mm256_min_epi16(bp1, _mm256_set1_epi16(255)), _mm256_setzero_si256());
+
+ __m256i rdif = _mm256_sub_epi16(r08, rp2);
+ __m256i gdif = _mm256_sub_epi16(g08, gp2);
+ __m256i bdif = _mm256_sub_epi16(b08, bp2);
+
+ __m256i rerr = _mm256_mullo_epi16(rdif, _mm256_set1_epi16(38));
+ __m256i gerr = _mm256_mullo_epi16(gdif, _mm256_set1_epi16(76));
+ __m256i berr = _mm256_mullo_epi16(bdif, _mm256_set1_epi16(14));
+
+ __m256i sum0 = _mm256_add_epi16(rerr, gerr);
+ __m256i sum1 = _mm256_add_epi16(sum0, berr);
+
+ __m256i sum2 = _mm256_madd_epi16(sum1, sum1);
+
+ __m128i sum3 = _mm_add_epi32(_mm256_castsi256_si128(sum2), _mm256_extracti128_si256(sum2, 1));
+
+ uint32_t err0 = _mm_extract_epi32(sum3, 0);
+ uint32_t err1 = _mm_extract_epi32(sum3, 1);
+ uint32_t err2 = _mm_extract_epi32(sum3, 2);
+ uint32_t err3 = _mm_extract_epi32(sum3, 3);
+
+ uint64_t error = err0 + err1 + err2 + err3;
+ /**/
+
+ uint32_t rgbv = ( rgbv0 & 0x3F ) | ( ( rgbv0 >> 2 ) & 0x1FC0 ) | ( ( rgbv0 >> 3 ) & 0x7E000 );
+ uint64_t rgbho0_ = ( rgbho & 0x3F0000003F ) | ( ( rgbho >> 2 ) & 0x1FC000001FC0 ) | ( ( rgbho >> 3 ) & 0x7E0000007E000 );
+ uint64_t rgbho0 = ( rgbho0_ & 0x7FFFF ) | ( ( rgbho0_ >> 13 ) & 0x3FFFF80000 );
+
+ uint32_t hi = rgbv | ((rgbho0 & 0x1FFF) << 19);
+ rgbho0 >>= 13;
+ uint32_t lo = ( rgbho0 & 0x1 ) | ( ( rgbho0 & 0x1FE ) << 1 ) | ( ( rgbho0 & 0x600 ) << 2 ) | ( ( rgbho0 & 0x3F800 ) << 5 ) | ( ( rgbho0 & 0x1FC0000 ) << 6 );
+
+ uint32_t idx = ( ( rgbho >> 33 ) & 0xF ) | ( ( rgbho >> 41 ) & 0x10 ) | ( ( rgbho >> 48 ) & 0x20 );
+ lo |= g_flags[idx];
+ uint64_t result = static_cast<uint32_t>(_bswap(lo));
+ result |= static_cast<uint64_t>(static_cast<uint32_t>(_bswap(hi))) << 32;
+
+ Plane plane;
+
+ plane.plane = result;
+ plane.error = error;
+ plane.sum4 = _mm256_permute4x64_epi64(srgb, _MM_SHUFFLE(2, 3, 0, 1));
+
+ return plane;
+}
+
+static etcpak_force_inline uint64_t EncodeSelectors_AVX2( uint64_t d, const uint32_t terr[2][8], const uint32_t tsel[8], const bool rotate, const uint64_t value, const uint32_t error) noexcept
+{
+ size_t tidx[2];
+
+ // Get index of minimum error (terr[0] and terr[1])
+ __m256i err0 = _mm256_load_si256((const __m256i*)terr[0]);
+ __m256i err1 = _mm256_load_si256((const __m256i*)terr[1]);
+
+ __m256i errLo = _mm256_permute2x128_si256(err0, err1, (0) | (2 << 4));
+ __m256i errHi = _mm256_permute2x128_si256(err0, err1, (1) | (3 << 4));
+
+ __m256i errMin0 = _mm256_min_epu32(errLo, errHi);
+
+ __m256i errMin1 = _mm256_shuffle_epi32(errMin0, _MM_SHUFFLE(2, 3, 0, 1));
+ __m256i errMin2 = _mm256_min_epu32(errMin0, errMin1);
+
+ __m256i errMin3 = _mm256_shuffle_epi32(errMin2, _MM_SHUFFLE(1, 0, 3, 2));
+ __m256i errMin4 = _mm256_min_epu32(errMin3, errMin2);
+
+ __m256i errMin5 = _mm256_permute2x128_si256(errMin4, errMin4, (0) | (0 << 4));
+ __m256i errMin6 = _mm256_permute2x128_si256(errMin4, errMin4, (1) | (1 << 4));
+
+ __m256i errMask0 = _mm256_cmpeq_epi32(errMin5, err0);
+ __m256i errMask1 = _mm256_cmpeq_epi32(errMin6, err1);
+
+ uint32_t mask0 = _mm256_movemask_epi8(errMask0);
+ uint32_t mask1 = _mm256_movemask_epi8(errMask1);
+
+ tidx[0] = _bit_scan_forward(mask0) >> 2;
+ tidx[1] = _bit_scan_forward(mask1) >> 2;
+
+ if ((terr[0][tidx[0]] + terr[1][tidx[1]]) >= error)
+ {
+ return value;
+ }
+
+ d |= tidx[0] << 26;
+ d |= tidx[1] << 29;
+
+ unsigned int t0 = tsel[tidx[0]];
+ unsigned int t1 = tsel[tidx[1]];
+
+ if (!rotate)
+ {
+ t0 &= 0xFF00FF00;
+ t1 &= 0x00FF00FF;
+ }
+ else
+ {
+ t0 &= 0xCCCCCCCC;
+ t1 &= 0x33333333;
+ }
+
+ // Flip selectors from sign bit
+ unsigned int t2 = (t0 | t1) ^ 0xFFFF0000;
+
+ return d | static_cast<uint64_t>(_bswap(t2)) << 32;
+}
+
+#endif
+
+static etcpak_force_inline void Average( const uint8_t* data, v4i* a )
+{
+#ifdef __SSE4_1__
+ __m128i d0 = _mm_loadu_si128(((__m128i*)data) + 0);
+ __m128i d1 = _mm_loadu_si128(((__m128i*)data) + 1);
+ __m128i d2 = _mm_loadu_si128(((__m128i*)data) + 2);
+ __m128i d3 = _mm_loadu_si128(((__m128i*)data) + 3);
+
+ __m128i d0l = _mm_unpacklo_epi8(d0, _mm_setzero_si128());
+ __m128i d0h = _mm_unpackhi_epi8(d0, _mm_setzero_si128());
+ __m128i d1l = _mm_unpacklo_epi8(d1, _mm_setzero_si128());
+ __m128i d1h = _mm_unpackhi_epi8(d1, _mm_setzero_si128());
+ __m128i d2l = _mm_unpacklo_epi8(d2, _mm_setzero_si128());
+ __m128i d2h = _mm_unpackhi_epi8(d2, _mm_setzero_si128());
+ __m128i d3l = _mm_unpacklo_epi8(d3, _mm_setzero_si128());
+ __m128i d3h = _mm_unpackhi_epi8(d3, _mm_setzero_si128());
+
+ __m128i sum0 = _mm_add_epi16(d0l, d1l);
+ __m128i sum1 = _mm_add_epi16(d0h, d1h);
+ __m128i sum2 = _mm_add_epi16(d2l, d3l);
+ __m128i sum3 = _mm_add_epi16(d2h, d3h);
+
+ __m128i sum0l = _mm_unpacklo_epi16(sum0, _mm_setzero_si128());
+ __m128i sum0h = _mm_unpackhi_epi16(sum0, _mm_setzero_si128());
+ __m128i sum1l = _mm_unpacklo_epi16(sum1, _mm_setzero_si128());
+ __m128i sum1h = _mm_unpackhi_epi16(sum1, _mm_setzero_si128());
+ __m128i sum2l = _mm_unpacklo_epi16(sum2, _mm_setzero_si128());
+ __m128i sum2h = _mm_unpackhi_epi16(sum2, _mm_setzero_si128());
+ __m128i sum3l = _mm_unpacklo_epi16(sum3, _mm_setzero_si128());
+ __m128i sum3h = _mm_unpackhi_epi16(sum3, _mm_setzero_si128());
+
+ __m128i b0 = _mm_add_epi32(sum0l, sum0h);
+ __m128i b1 = _mm_add_epi32(sum1l, sum1h);
+ __m128i b2 = _mm_add_epi32(sum2l, sum2h);
+ __m128i b3 = _mm_add_epi32(sum3l, sum3h);
+
+ __m128i a0 = _mm_srli_epi32(_mm_add_epi32(_mm_add_epi32(b2, b3), _mm_set1_epi32(4)), 3);
+ __m128i a1 = _mm_srli_epi32(_mm_add_epi32(_mm_add_epi32(b0, b1), _mm_set1_epi32(4)), 3);
+ __m128i a2 = _mm_srli_epi32(_mm_add_epi32(_mm_add_epi32(b1, b3), _mm_set1_epi32(4)), 3);
+ __m128i a3 = _mm_srli_epi32(_mm_add_epi32(_mm_add_epi32(b0, b2), _mm_set1_epi32(4)), 3);
+
+ _mm_storeu_si128((__m128i*)&a[0], _mm_packus_epi32(_mm_shuffle_epi32(a0, _MM_SHUFFLE(3, 0, 1, 2)), _mm_shuffle_epi32(a1, _MM_SHUFFLE(3, 0, 1, 2))));
+ _mm_storeu_si128((__m128i*)&a[2], _mm_packus_epi32(_mm_shuffle_epi32(a2, _MM_SHUFFLE(3, 0, 1, 2)), _mm_shuffle_epi32(a3, _MM_SHUFFLE(3, 0, 1, 2))));
+#elif defined __ARM_NEON
+ uint8x16x2_t t0 = vzipq_u8(vld1q_u8(data + 0), uint8x16_t());
+ uint8x16x2_t t1 = vzipq_u8(vld1q_u8(data + 16), uint8x16_t());
+ uint8x16x2_t t2 = vzipq_u8(vld1q_u8(data + 32), uint8x16_t());
+ uint8x16x2_t t3 = vzipq_u8(vld1q_u8(data + 48), uint8x16_t());
+
+ uint16x8x2_t d0 = { vreinterpretq_u16_u8(t0.val[0]), vreinterpretq_u16_u8(t0.val[1]) };
+ uint16x8x2_t d1 = { vreinterpretq_u16_u8(t1.val[0]), vreinterpretq_u16_u8(t1.val[1]) };
+ uint16x8x2_t d2 = { vreinterpretq_u16_u8(t2.val[0]), vreinterpretq_u16_u8(t2.val[1]) };
+ uint16x8x2_t d3 = { vreinterpretq_u16_u8(t3.val[0]), vreinterpretq_u16_u8(t3.val[1]) };
+
+ uint16x8x2_t s0 = vzipq_u16(vreinterpretq_u16_s16( vaddq_s16(vreinterpretq_s16_u16( d0.val[0] ), vreinterpretq_s16_u16( d1.val[0] ) ) ), uint16x8_t());
+ uint16x8x2_t s1 = vzipq_u16(vreinterpretq_u16_s16( vaddq_s16(vreinterpretq_s16_u16( d0.val[1] ), vreinterpretq_s16_u16( d1.val[1] ) ) ), uint16x8_t());
+ uint16x8x2_t s2 = vzipq_u16(vreinterpretq_u16_s16( vaddq_s16(vreinterpretq_s16_u16( d2.val[0] ), vreinterpretq_s16_u16( d3.val[0] ) ) ), uint16x8_t());
+ uint16x8x2_t s3 = vzipq_u16(vreinterpretq_u16_s16( vaddq_s16(vreinterpretq_s16_u16( d2.val[1] ), vreinterpretq_s16_u16( d3.val[1] ) ) ), uint16x8_t());
+
+ uint32x4x2_t sum0 = { vreinterpretq_u32_u16(s0.val[0]), vreinterpretq_u32_u16(s0.val[1]) };
+ uint32x4x2_t sum1 = { vreinterpretq_u32_u16(s1.val[0]), vreinterpretq_u32_u16(s1.val[1]) };
+ uint32x4x2_t sum2 = { vreinterpretq_u32_u16(s2.val[0]), vreinterpretq_u32_u16(s2.val[1]) };
+ uint32x4x2_t sum3 = { vreinterpretq_u32_u16(s3.val[0]), vreinterpretq_u32_u16(s3.val[1]) };
+
+ uint32x4_t b0 = vaddq_u32(sum0.val[0], sum0.val[1]);
+ uint32x4_t b1 = vaddq_u32(sum1.val[0], sum1.val[1]);
+ uint32x4_t b2 = vaddq_u32(sum2.val[0], sum2.val[1]);
+ uint32x4_t b3 = vaddq_u32(sum3.val[0], sum3.val[1]);
+
+ uint32x4_t a0 = vshrq_n_u32(vqaddq_u32(vqaddq_u32(b2, b3), vdupq_n_u32(4)), 3);
+ uint32x4_t a1 = vshrq_n_u32(vqaddq_u32(vqaddq_u32(b0, b1), vdupq_n_u32(4)), 3);
+ uint32x4_t a2 = vshrq_n_u32(vqaddq_u32(vqaddq_u32(b1, b3), vdupq_n_u32(4)), 3);
+ uint32x4_t a3 = vshrq_n_u32(vqaddq_u32(vqaddq_u32(b0, b2), vdupq_n_u32(4)), 3);
+
+ uint16x8_t o0 = vcombine_u16(vqmovun_s32(vreinterpretq_s32_u32( a0 )), vqmovun_s32(vreinterpretq_s32_u32( a1 )));
+ uint16x8_t o1 = vcombine_u16(vqmovun_s32(vreinterpretq_s32_u32( a2 )), vqmovun_s32(vreinterpretq_s32_u32( a3 )));
+
+ a[0] = v4i{o0[2], o0[1], o0[0], 0};
+ a[1] = v4i{o0[6], o0[5], o0[4], 0};
+ a[2] = v4i{o1[2], o1[1], o1[0], 0};
+ a[3] = v4i{o1[6], o1[5], o1[4], 0};
+#else
+ uint32_t r[4];
+ uint32_t g[4];
+ uint32_t b[4];
+
+ memset(r, 0, sizeof(r));
+ memset(g, 0, sizeof(g));
+ memset(b, 0, sizeof(b));
+
+ for( int j=0; j<4; j++ )
+ {
+ for( int i=0; i<4; i++ )
+ {
+ int index = (j & 2) + (i >> 1);
+ b[index] += *data++;
+ g[index] += *data++;
+ r[index] += *data++;
+ data++;
+ }
+ }
+
+ a[0] = v4i{ uint16_t( (r[2] + r[3] + 4) / 8 ), uint16_t( (g[2] + g[3] + 4) / 8 ), uint16_t( (b[2] + b[3] + 4) / 8 ), 0};
+ a[1] = v4i{ uint16_t( (r[0] + r[1] + 4) / 8 ), uint16_t( (g[0] + g[1] + 4) / 8 ), uint16_t( (b[0] + b[1] + 4) / 8 ), 0};
+ a[2] = v4i{ uint16_t( (r[1] + r[3] + 4) / 8 ), uint16_t( (g[1] + g[3] + 4) / 8 ), uint16_t( (b[1] + b[3] + 4) / 8 ), 0};
+ a[3] = v4i{ uint16_t( (r[0] + r[2] + 4) / 8 ), uint16_t( (g[0] + g[2] + 4) / 8 ), uint16_t( (b[0] + b[2] + 4) / 8 ), 0};
+#endif
+}
+
+static etcpak_force_inline void CalcErrorBlock( const uint8_t* data, unsigned int err[4][4] )
+{
+#ifdef __SSE4_1__
+ __m128i d0 = _mm_loadu_si128(((__m128i*)data) + 0);
+ __m128i d1 = _mm_loadu_si128(((__m128i*)data) + 1);
+ __m128i d2 = _mm_loadu_si128(((__m128i*)data) + 2);
+ __m128i d3 = _mm_loadu_si128(((__m128i*)data) + 3);
+
+ __m128i dm0 = _mm_and_si128(d0, _mm_set1_epi32(0x00FFFFFF));
+ __m128i dm1 = _mm_and_si128(d1, _mm_set1_epi32(0x00FFFFFF));
+ __m128i dm2 = _mm_and_si128(d2, _mm_set1_epi32(0x00FFFFFF));
+ __m128i dm3 = _mm_and_si128(d3, _mm_set1_epi32(0x00FFFFFF));
+
+ __m128i d0l = _mm_unpacklo_epi8(dm0, _mm_setzero_si128());
+ __m128i d0h = _mm_unpackhi_epi8(dm0, _mm_setzero_si128());
+ __m128i d1l = _mm_unpacklo_epi8(dm1, _mm_setzero_si128());
+ __m128i d1h = _mm_unpackhi_epi8(dm1, _mm_setzero_si128());
+ __m128i d2l = _mm_unpacklo_epi8(dm2, _mm_setzero_si128());
+ __m128i d2h = _mm_unpackhi_epi8(dm2, _mm_setzero_si128());
+ __m128i d3l = _mm_unpacklo_epi8(dm3, _mm_setzero_si128());
+ __m128i d3h = _mm_unpackhi_epi8(dm3, _mm_setzero_si128());
+
+ __m128i sum0 = _mm_add_epi16(d0l, d1l);
+ __m128i sum1 = _mm_add_epi16(d0h, d1h);
+ __m128i sum2 = _mm_add_epi16(d2l, d3l);
+ __m128i sum3 = _mm_add_epi16(d2h, d3h);
+
+ __m128i sum0l = _mm_unpacklo_epi16(sum0, _mm_setzero_si128());
+ __m128i sum0h = _mm_unpackhi_epi16(sum0, _mm_setzero_si128());
+ __m128i sum1l = _mm_unpacklo_epi16(sum1, _mm_setzero_si128());
+ __m128i sum1h = _mm_unpackhi_epi16(sum1, _mm_setzero_si128());
+ __m128i sum2l = _mm_unpacklo_epi16(sum2, _mm_setzero_si128());
+ __m128i sum2h = _mm_unpackhi_epi16(sum2, _mm_setzero_si128());
+ __m128i sum3l = _mm_unpacklo_epi16(sum3, _mm_setzero_si128());
+ __m128i sum3h = _mm_unpackhi_epi16(sum3, _mm_setzero_si128());
+
+ __m128i b0 = _mm_add_epi32(sum0l, sum0h);
+ __m128i b1 = _mm_add_epi32(sum1l, sum1h);
+ __m128i b2 = _mm_add_epi32(sum2l, sum2h);
+ __m128i b3 = _mm_add_epi32(sum3l, sum3h);
+
+ __m128i a0 = _mm_add_epi32(b2, b3);
+ __m128i a1 = _mm_add_epi32(b0, b1);
+ __m128i a2 = _mm_add_epi32(b1, b3);
+ __m128i a3 = _mm_add_epi32(b0, b2);
+
+ _mm_storeu_si128((__m128i*)&err[0], a0);
+ _mm_storeu_si128((__m128i*)&err[1], a1);
+ _mm_storeu_si128((__m128i*)&err[2], a2);
+ _mm_storeu_si128((__m128i*)&err[3], a3);
+#elif defined __ARM_NEON
+ uint8x16x2_t t0 = vzipq_u8(vld1q_u8(data + 0), uint8x16_t());
+ uint8x16x2_t t1 = vzipq_u8(vld1q_u8(data + 16), uint8x16_t());
+ uint8x16x2_t t2 = vzipq_u8(vld1q_u8(data + 32), uint8x16_t());
+ uint8x16x2_t t3 = vzipq_u8(vld1q_u8(data + 48), uint8x16_t());
+
+ uint16x8x2_t d0 = { vreinterpretq_u16_u8(t0.val[0]), vreinterpretq_u16_u8(t0.val[1]) };
+ uint16x8x2_t d1 = { vreinterpretq_u16_u8(t1.val[0]), vreinterpretq_u16_u8(t1.val[1]) };
+ uint16x8x2_t d2 = { vreinterpretq_u16_u8(t2.val[0]), vreinterpretq_u16_u8(t2.val[1]) };
+ uint16x8x2_t d3 = { vreinterpretq_u16_u8(t3.val[0]), vreinterpretq_u16_u8(t3.val[1]) };
+
+ uint16x8x2_t s0 = vzipq_u16(vreinterpretq_u16_s16( vaddq_s16(vreinterpretq_s16_u16( d0.val[0] ), vreinterpretq_s16_u16( d1.val[0] ))), uint16x8_t());
+ uint16x8x2_t s1 = vzipq_u16(vreinterpretq_u16_s16( vaddq_s16(vreinterpretq_s16_u16( d0.val[1] ), vreinterpretq_s16_u16( d1.val[1] ))), uint16x8_t());
+ uint16x8x2_t s2 = vzipq_u16(vreinterpretq_u16_s16( vaddq_s16(vreinterpretq_s16_u16( d2.val[0] ), vreinterpretq_s16_u16( d3.val[0] ))), uint16x8_t());
+ uint16x8x2_t s3 = vzipq_u16(vreinterpretq_u16_s16( vaddq_s16(vreinterpretq_s16_u16( d2.val[1] ), vreinterpretq_s16_u16( d3.val[1] ))), uint16x8_t());
+
+ uint32x4x2_t sum0 = { vreinterpretq_u32_u16(s0.val[0]), vreinterpretq_u32_u16(s0.val[1]) };
+ uint32x4x2_t sum1 = { vreinterpretq_u32_u16(s1.val[0]), vreinterpretq_u32_u16(s1.val[1]) };
+ uint32x4x2_t sum2 = { vreinterpretq_u32_u16(s2.val[0]), vreinterpretq_u32_u16(s2.val[1]) };
+ uint32x4x2_t sum3 = { vreinterpretq_u32_u16(s3.val[0]), vreinterpretq_u32_u16(s3.val[1]) };
+
+ uint32x4_t b0 = vaddq_u32(sum0.val[0], sum0.val[1]);
+ uint32x4_t b1 = vaddq_u32(sum1.val[0], sum1.val[1]);
+ uint32x4_t b2 = vaddq_u32(sum2.val[0], sum2.val[1]);
+ uint32x4_t b3 = vaddq_u32(sum3.val[0], sum3.val[1]);
+
+ uint32x4_t a0 = vreinterpretq_u32_u8( vandq_u8(vreinterpretq_u8_u32( vqaddq_u32(b2, b3) ), vreinterpretq_u8_u32( vdupq_n_u32(0x00FFFFFF)) ) );
+ uint32x4_t a1 = vreinterpretq_u32_u8( vandq_u8(vreinterpretq_u8_u32( vqaddq_u32(b0, b1) ), vreinterpretq_u8_u32( vdupq_n_u32(0x00FFFFFF)) ) );
+ uint32x4_t a2 = vreinterpretq_u32_u8( vandq_u8(vreinterpretq_u8_u32( vqaddq_u32(b1, b3) ), vreinterpretq_u8_u32( vdupq_n_u32(0x00FFFFFF)) ) );
+ uint32x4_t a3 = vreinterpretq_u32_u8( vandq_u8(vreinterpretq_u8_u32( vqaddq_u32(b0, b2) ), vreinterpretq_u8_u32( vdupq_n_u32(0x00FFFFFF)) ) );
+
+ vst1q_u32(err[0], a0);
+ vst1q_u32(err[1], a1);
+ vst1q_u32(err[2], a2);
+ vst1q_u32(err[3], a3);
+#else
+ unsigned int terr[4][4];
+
+ memset(terr, 0, 16 * sizeof(unsigned int));
+
+ for( int j=0; j<4; j++ )
+ {
+ for( int i=0; i<4; i++ )
+ {
+ int index = (j & 2) + (i >> 1);
+ unsigned int d = *data++;
+ terr[index][0] += d;
+ d = *data++;
+ terr[index][1] += d;
+ d = *data++;
+ terr[index][2] += d;
+ data++;
+ }
+ }
+
+ for( int i=0; i<3; i++ )
+ {
+ err[0][i] = terr[2][i] + terr[3][i];
+ err[1][i] = terr[0][i] + terr[1][i];
+ err[2][i] = terr[1][i] + terr[3][i];
+ err[3][i] = terr[0][i] + terr[2][i];
+ }
+ for( int i=0; i<4; i++ )
+ {
+ err[i][3] = 0;
+ }
+#endif
+}
+
+static etcpak_force_inline unsigned int CalcError( const unsigned int block[4], const v4i& average )
+{
+ unsigned int err = 0x3FFFFFFF; // Big value to prevent negative values, but small enough to prevent overflow
+ err -= block[0] * 2 * average[2];
+ err -= block[1] * 2 * average[1];
+ err -= block[2] * 2 * average[0];
+ err += 8 * ( sq( average[0] ) + sq( average[1] ) + sq( average[2] ) );
+ return err;
+}
+
+static etcpak_force_inline void ProcessAverages( v4i* a )
+{
+#ifdef __SSE4_1__
+ for( int i=0; i<2; i++ )
+ {
+ __m128i d = _mm_loadu_si128((__m128i*)a[i*2].data());
+
+ __m128i t = _mm_add_epi16(_mm_mullo_epi16(d, _mm_set1_epi16(31)), _mm_set1_epi16(128));
+
+ __m128i c = _mm_srli_epi16(_mm_add_epi16(t, _mm_srli_epi16(t, 8)), 8);
+
+ __m128i c1 = _mm_shuffle_epi32(c, _MM_SHUFFLE(3, 2, 3, 2));
+ __m128i diff = _mm_sub_epi16(c, c1);
+ diff = _mm_max_epi16(diff, _mm_set1_epi16(-4));
+ diff = _mm_min_epi16(diff, _mm_set1_epi16(3));
+
+ __m128i co = _mm_add_epi16(c1, diff);
+
+ c = _mm_blend_epi16(co, c, 0xF0);
+
+ __m128i a0 = _mm_or_si128(_mm_slli_epi16(c, 3), _mm_srli_epi16(c, 2));
+
+ _mm_storeu_si128((__m128i*)a[4+i*2].data(), a0);
+ }
+
+ for( int i=0; i<2; i++ )
+ {
+ __m128i d = _mm_loadu_si128((__m128i*)a[i*2].data());
+
+ __m128i t0 = _mm_add_epi16(_mm_mullo_epi16(d, _mm_set1_epi16(15)), _mm_set1_epi16(128));
+ __m128i t1 = _mm_srli_epi16(_mm_add_epi16(t0, _mm_srli_epi16(t0, 8)), 8);
+
+ __m128i t2 = _mm_or_si128(t1, _mm_slli_epi16(t1, 4));
+
+ _mm_storeu_si128((__m128i*)a[i*2].data(), t2);
+ }
+#elif defined __ARM_NEON
+ for( int i=0; i<2; i++ )
+ {
+ int16x8_t d = vld1q_s16((int16_t*)&a[i*2]);
+ int16x8_t t = vaddq_s16(vmulq_s16(d, vdupq_n_s16(31)), vdupq_n_s16(128));
+ int16x8_t c = vshrq_n_s16(vaddq_s16(t, vshrq_n_s16(t, 8)), 8);
+
+ int16x8_t c1 = vcombine_s16(vget_high_s16(c), vget_high_s16(c));
+ int16x8_t diff = vsubq_s16(c, c1);
+ diff = vmaxq_s16(diff, vdupq_n_s16(-4));
+ diff = vminq_s16(diff, vdupq_n_s16(3));
+
+ int16x8_t co = vaddq_s16(c1, diff);
+
+ c = vcombine_s16(vget_low_s16(co), vget_high_s16(c));
+
+ int16x8_t a0 = vorrq_s16(vshlq_n_s16(c, 3), vshrq_n_s16(c, 2));
+
+ vst1q_s16((int16_t*)&a[4+i*2], a0);
+ }
+
+ for( int i=0; i<2; i++ )
+ {
+ int16x8_t d = vld1q_s16((int16_t*)&a[i*2]);
+
+ int16x8_t t0 = vaddq_s16(vmulq_s16(d, vdupq_n_s16(15)), vdupq_n_s16(128));
+ int16x8_t t1 = vshrq_n_s16(vaddq_s16(t0, vshrq_n_s16(t0, 8)), 8);
+
+ int16x8_t t2 = vorrq_s16(t1, vshlq_n_s16(t1, 4));
+
+ vst1q_s16((int16_t*)&a[i*2], t2);
+ }
+#else
+ for( int i=0; i<2; i++ )
+ {
+ for( int j=0; j<3; j++ )
+ {
+ int32_t c1 = mul8bit( a[i*2+1][j], 31 );
+ int32_t c2 = mul8bit( a[i*2][j], 31 );
+
+ int32_t diff = c2 - c1;
+ if( diff > 3 ) diff = 3;
+ else if( diff < -4 ) diff = -4;
+
+ int32_t co = c1 + diff;
+
+ a[5+i*2][j] = ( c1 << 3 ) | ( c1 >> 2 );
+ a[4+i*2][j] = ( co << 3 ) | ( co >> 2 );
+ }
+ }
+
+ for( int i=0; i<4; i++ )
+ {
+ a[i][0] = g_avg2[mul8bit( a[i][0], 15 )];
+ a[i][1] = g_avg2[mul8bit( a[i][1], 15 )];
+ a[i][2] = g_avg2[mul8bit( a[i][2], 15 )];
+ }
+#endif
+}
+
+static etcpak_force_inline void EncodeAverages( uint64_t& _d, const v4i* a, size_t idx )
+{
+ auto d = _d;
+ d |= ( idx << 24 );
+ size_t base = idx << 1;
+
+ if( ( idx & 0x2 ) == 0 )
+ {
+ for( int i=0; i<3; i++ )
+ {
+ d |= uint64_t( a[base+0][i] >> 4 ) << ( i*8 );
+ d |= uint64_t( a[base+1][i] >> 4 ) << ( i*8 + 4 );
+ }
+ }
+ else
+ {
+ for( int i=0; i<3; i++ )
+ {
+ d |= uint64_t( a[base+1][i] & 0xF8 ) << ( i*8 );
+ int32_t c = ( ( a[base+0][i] & 0xF8 ) - ( a[base+1][i] & 0xF8 ) ) >> 3;
+ c &= ~0xFFFFFFF8;
+ d |= ((uint64_t)c) << ( i*8 );
+ }
+ }
+ _d = d;
+}
+
+static etcpak_force_inline uint64_t CheckSolid( const uint8_t* src )
+{
+#ifdef __SSE4_1__
+ __m128i d0 = _mm_loadu_si128(((__m128i*)src) + 0);
+ __m128i d1 = _mm_loadu_si128(((__m128i*)src) + 1);
+ __m128i d2 = _mm_loadu_si128(((__m128i*)src) + 2);
+ __m128i d3 = _mm_loadu_si128(((__m128i*)src) + 3);
+
+ __m128i c = _mm_shuffle_epi32(d0, _MM_SHUFFLE(0, 0, 0, 0));
+
+ __m128i c0 = _mm_cmpeq_epi8(d0, c);
+ __m128i c1 = _mm_cmpeq_epi8(d1, c);
+ __m128i c2 = _mm_cmpeq_epi8(d2, c);
+ __m128i c3 = _mm_cmpeq_epi8(d3, c);
+
+ __m128i m0 = _mm_and_si128(c0, c1);
+ __m128i m1 = _mm_and_si128(c2, c3);
+ __m128i m = _mm_and_si128(m0, m1);
+
+ if (!_mm_testc_si128(m, _mm_set1_epi32(-1)))
+ {
+ return 0;
+ }
+#elif defined __ARM_NEON
+ int32x4_t d0 = vld1q_s32((int32_t*)src + 0);
+ int32x4_t d1 = vld1q_s32((int32_t*)src + 4);
+ int32x4_t d2 = vld1q_s32((int32_t*)src + 8);
+ int32x4_t d3 = vld1q_s32((int32_t*)src + 12);
+
+ int32x4_t c = vdupq_n_s32(d0[0]);
+
+ int32x4_t c0 = vreinterpretq_s32_u32(vceqq_s32(d0, c));
+ int32x4_t c1 = vreinterpretq_s32_u32(vceqq_s32(d1, c));
+ int32x4_t c2 = vreinterpretq_s32_u32(vceqq_s32(d2, c));
+ int32x4_t c3 = vreinterpretq_s32_u32(vceqq_s32(d3, c));
+
+ int32x4_t m0 = vandq_s32(c0, c1);
+ int32x4_t m1 = vandq_s32(c2, c3);
+ int64x2_t m = vreinterpretq_s64_s32(vandq_s32(m0, m1));
+
+ if (m[0] != -1 || m[1] != -1)
+ {
+ return 0;
+ }
+#else
+ const uint8_t* ptr = src + 4;
+ for( int i=1; i<16; i++ )
+ {
+ if( memcmp( src, ptr, 4 ) != 0 )
+ {
+ return 0;
+ }
+ ptr += 4;
+ }
+#endif
+ return 0x02000000 |
+ ( (unsigned int)( src[0] & 0xF8 ) << 16 ) |
+ ( (unsigned int)( src[1] & 0xF8 ) << 8 ) |
+ ( (unsigned int)( src[2] & 0xF8 ) );
+}
+
+static etcpak_force_inline void PrepareAverages( v4i a[8], const uint8_t* src, unsigned int err[4] )
+{
+ Average( src, a );
+ ProcessAverages( a );
+
+ unsigned int errblock[4][4];
+ CalcErrorBlock( src, errblock );
+
+ for( int i=0; i<4; i++ )
+ {
+ err[i/2] += CalcError( errblock[i], a[i] );
+ err[2+i/2] += CalcError( errblock[i], a[i+4] );
+ }
+}
+
+static etcpak_force_inline void FindBestFit( uint64_t terr[2][8], uint16_t tsel[16][8], v4i a[8], const uint32_t* id, const uint8_t* data )
+{
+ for( size_t i=0; i<16; i++ )
+ {
+ uint16_t* sel = tsel[i];
+ unsigned int bid = id[i];
+ uint64_t* ter = terr[bid%2];
+
+ uint8_t b = *data++;
+ uint8_t g = *data++;
+ uint8_t r = *data++;
+ data++;
+
+ int dr = a[bid][0] - r;
+ int dg = a[bid][1] - g;
+ int db = a[bid][2] - b;
+
+#ifdef __SSE4_1__
+ // Reference implementation
+
+ __m128i pix = _mm_set1_epi32(dr * 77 + dg * 151 + db * 28);
+ // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
+ __m128i error0 = _mm_abs_epi32(_mm_add_epi32(pix, g_table256_SIMD[0]));
+ __m128i error1 = _mm_abs_epi32(_mm_add_epi32(pix, g_table256_SIMD[1]));
+ __m128i error2 = _mm_abs_epi32(_mm_sub_epi32(pix, g_table256_SIMD[0]));
+ __m128i error3 = _mm_abs_epi32(_mm_sub_epi32(pix, g_table256_SIMD[1]));
+
+ __m128i index0 = _mm_and_si128(_mm_cmplt_epi32(error1, error0), _mm_set1_epi32(1));
+ __m128i minError0 = _mm_min_epi32(error0, error1);
+
+ __m128i index1 = _mm_sub_epi32(_mm_set1_epi32(2), _mm_cmplt_epi32(error3, error2));
+ __m128i minError1 = _mm_min_epi32(error2, error3);
+
+ __m128i minIndex0 = _mm_blendv_epi8(index0, index1, _mm_cmplt_epi32(minError1, minError0));
+ __m128i minError = _mm_min_epi32(minError0, minError1);
+
+ // Squaring the minimum error to produce correct values when adding
+ __m128i minErrorLow = _mm_shuffle_epi32(minError, _MM_SHUFFLE(1, 1, 0, 0));
+ __m128i squareErrorLow = _mm_mul_epi32(minErrorLow, minErrorLow);
+ squareErrorLow = _mm_add_epi64(squareErrorLow, _mm_loadu_si128(((__m128i*)ter) + 0));
+ _mm_storeu_si128(((__m128i*)ter) + 0, squareErrorLow);
+ __m128i minErrorHigh = _mm_shuffle_epi32(minError, _MM_SHUFFLE(3, 3, 2, 2));
+ __m128i squareErrorHigh = _mm_mul_epi32(minErrorHigh, minErrorHigh);
+ squareErrorHigh = _mm_add_epi64(squareErrorHigh, _mm_loadu_si128(((__m128i*)ter) + 1));
+ _mm_storeu_si128(((__m128i*)ter) + 1, squareErrorHigh);
+
+ // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
+ error0 = _mm_abs_epi32(_mm_add_epi32(pix, g_table256_SIMD[2]));
+ error1 = _mm_abs_epi32(_mm_add_epi32(pix, g_table256_SIMD[3]));
+ error2 = _mm_abs_epi32(_mm_sub_epi32(pix, g_table256_SIMD[2]));
+ error3 = _mm_abs_epi32(_mm_sub_epi32(pix, g_table256_SIMD[3]));
+
+ index0 = _mm_and_si128(_mm_cmplt_epi32(error1, error0), _mm_set1_epi32(1));
+ minError0 = _mm_min_epi32(error0, error1);
+
+ index1 = _mm_sub_epi32(_mm_set1_epi32(2), _mm_cmplt_epi32(error3, error2));
+ minError1 = _mm_min_epi32(error2, error3);
+
+ __m128i minIndex1 = _mm_blendv_epi8(index0, index1, _mm_cmplt_epi32(minError1, minError0));
+ minError = _mm_min_epi32(minError0, minError1);
+
+ // Squaring the minimum error to produce correct values when adding
+ minErrorLow = _mm_shuffle_epi32(minError, _MM_SHUFFLE(1, 1, 0, 0));
+ squareErrorLow = _mm_mul_epi32(minErrorLow, minErrorLow);
+ squareErrorLow = _mm_add_epi64(squareErrorLow, _mm_loadu_si128(((__m128i*)ter) + 2));
+ _mm_storeu_si128(((__m128i*)ter) + 2, squareErrorLow);
+ minErrorHigh = _mm_shuffle_epi32(minError, _MM_SHUFFLE(3, 3, 2, 2));
+ squareErrorHigh = _mm_mul_epi32(minErrorHigh, minErrorHigh);
+ squareErrorHigh = _mm_add_epi64(squareErrorHigh, _mm_loadu_si128(((__m128i*)ter) + 3));
+ _mm_storeu_si128(((__m128i*)ter) + 3, squareErrorHigh);
+ __m128i minIndex = _mm_packs_epi32(minIndex0, minIndex1);
+ _mm_storeu_si128((__m128i*)sel, minIndex);
+#elif defined __ARM_NEON
+ int32x4_t pix = vdupq_n_s32(dr * 77 + dg * 151 + db * 28);
+
+ // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
+ uint32x4_t error0 = vreinterpretq_u32_s32(vabsq_s32(vaddq_s32(pix, g_table256_NEON[0])));
+ uint32x4_t error1 = vreinterpretq_u32_s32(vabsq_s32(vaddq_s32(pix, g_table256_NEON[1])));
+ uint32x4_t error2 = vreinterpretq_u32_s32(vabsq_s32(vsubq_s32(pix, g_table256_NEON[0])));
+ uint32x4_t error3 = vreinterpretq_u32_s32(vabsq_s32(vsubq_s32(pix, g_table256_NEON[1])));
+
+ uint32x4_t index0 = vandq_u32(vcltq_u32(error1, error0), vdupq_n_u32(1));
+ uint32x4_t minError0 = vminq_u32(error0, error1);
+
+ uint32x4_t index1 = vreinterpretq_u32_s32(vsubq_s32(vdupq_n_s32(2), vreinterpretq_s32_u32(vcltq_u32(error3, error2))));
+ uint32x4_t minError1 = vminq_u32(error2, error3);
+
+ uint32x4_t blendMask = vcltq_u32(minError1, minError0);
+ uint32x4_t minIndex0 = vorrq_u32(vbicq_u32(index0, blendMask), vandq_u32(index1, blendMask));
+ uint32x4_t minError = vminq_u32(minError0, minError1);
+
+ // Squaring the minimum error to produce correct values when adding
+ uint32x4_t squareErrorLow = vmulq_u32(minError, minError);
+ uint32x4_t squareErrorHigh = vshrq_n_u32(vreinterpretq_u32_s32(vqdmulhq_s32(vreinterpretq_s32_u32(minError), vreinterpretq_s32_u32(minError))), 1);
+ uint32x4x2_t squareErrorZip = vzipq_u32(squareErrorLow, squareErrorHigh);
+ uint64x2x2_t squareError = { vreinterpretq_u64_u32(squareErrorZip.val[0]), vreinterpretq_u64_u32(squareErrorZip.val[1]) };
+ squareError.val[0] = vaddq_u64(squareError.val[0], vld1q_u64(ter + 0));
+ squareError.val[1] = vaddq_u64(squareError.val[1], vld1q_u64(ter + 2));
+ vst1q_u64(ter + 0, squareError.val[0]);
+ vst1q_u64(ter + 2, squareError.val[1]);
+
+ // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
+ error0 = vreinterpretq_u32_s32( vabsq_s32(vaddq_s32(pix, g_table256_NEON[2])));
+ error1 = vreinterpretq_u32_s32( vabsq_s32(vaddq_s32(pix, g_table256_NEON[3])));
+ error2 = vreinterpretq_u32_s32( vabsq_s32(vsubq_s32(pix, g_table256_NEON[2])));
+ error3 = vreinterpretq_u32_s32( vabsq_s32(vsubq_s32(pix, g_table256_NEON[3])));
+
+ index0 = vandq_u32(vcltq_u32(error1, error0), vdupq_n_u32(1));
+ minError0 = vminq_u32(error0, error1);
+
+ index1 = vreinterpretq_u32_s32( vsubq_s32(vdupq_n_s32(2), vreinterpretq_s32_u32(vcltq_u32(error3, error2))) );
+ minError1 = vminq_u32(error2, error3);
+
+ blendMask = vcltq_u32(minError1, minError0);
+ uint32x4_t minIndex1 = vorrq_u32(vbicq_u32(index0, blendMask), vandq_u32(index1, blendMask));
+ minError = vminq_u32(minError0, minError1);
+
+ // Squaring the minimum error to produce correct values when adding
+ squareErrorLow = vmulq_u32(minError, minError);
+ squareErrorHigh = vshrq_n_u32(vreinterpretq_u32_s32( vqdmulhq_s32(vreinterpretq_s32_u32(minError), vreinterpretq_s32_u32(minError)) ), 1 );
+ squareErrorZip = vzipq_u32(squareErrorLow, squareErrorHigh);
+ squareError.val[0] = vaddq_u64(vreinterpretq_u64_u32( squareErrorZip.val[0] ), vld1q_u64(ter + 4));
+ squareError.val[1] = vaddq_u64(vreinterpretq_u64_u32( squareErrorZip.val[1] ), vld1q_u64(ter + 6));
+ vst1q_u64(ter + 4, squareError.val[0]);
+ vst1q_u64(ter + 6, squareError.val[1]);
+
+ uint16x8_t minIndex = vcombine_u16(vqmovn_u32(minIndex0), vqmovn_u32(minIndex1));
+ vst1q_u16(sel, minIndex);
+#else
+ int pix = dr * 77 + dg * 151 + db * 28;
+
+ for( int t=0; t<8; t++ )
+ {
+ const int64_t* tab = g_table256[t];
+ unsigned int idx = 0;
+ uint64_t err = sq( tab[0] + pix );
+ for( int j=1; j<4; j++ )
+ {
+ uint64_t local = sq( tab[j] + pix );
+ if( local < err )
+ {
+ err = local;
+ idx = j;
+ }
+ }
+ *sel++ = idx;
+ *ter++ += err;
+ }
+#endif
+ }
+}
+
+#if defined __SSE4_1__ || defined __ARM_NEON
+// Non-reference implementation, but faster. Produces same results as the AVX2 version
+static etcpak_force_inline void FindBestFit( uint32_t terr[2][8], uint16_t tsel[16][8], v4i a[8], const uint32_t* id, const uint8_t* data )
+{
+ for( size_t i=0; i<16; i++ )
+ {
+ uint16_t* sel = tsel[i];
+ unsigned int bid = id[i];
+ uint32_t* ter = terr[bid%2];
+
+ uint8_t b = *data++;
+ uint8_t g = *data++;
+ uint8_t r = *data++;
+ data++;
+
+ int dr = a[bid][0] - r;
+ int dg = a[bid][1] - g;
+ int db = a[bid][2] - b;
+
+#ifdef __SSE4_1__
+ // The scaling values are divided by two and rounded, to allow the differences to be in the range of signed int16
+ // This produces slightly different results, but is significant faster
+ __m128i pixel = _mm_set1_epi16(dr * 38 + dg * 76 + db * 14);
+ __m128i pix = _mm_abs_epi16(pixel);
+
+ // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
+ // Since the selector table is symmetrical, we need to calculate the difference only for half of the entries.
+ __m128i error0 = _mm_abs_epi16(_mm_sub_epi16(pix, g_table128_SIMD[0]));
+ __m128i error1 = _mm_abs_epi16(_mm_sub_epi16(pix, g_table128_SIMD[1]));
+
+ __m128i index = _mm_and_si128(_mm_cmplt_epi16(error1, error0), _mm_set1_epi16(1));
+ __m128i minError = _mm_min_epi16(error0, error1);
+
+ // Exploiting symmetry of the selector table and use the sign bit
+ // This produces slightly different results, but is needed to produce same results as AVX2 implementation
+ __m128i indexBit = _mm_andnot_si128(_mm_srli_epi16(pixel, 15), _mm_set1_epi8(-1));
+ __m128i minIndex = _mm_or_si128(index, _mm_add_epi16(indexBit, indexBit));
+
+ // Squaring the minimum error to produce correct values when adding
+ __m128i squareErrorLo = _mm_mullo_epi16(minError, minError);
+ __m128i squareErrorHi = _mm_mulhi_epi16(minError, minError);
+
+ __m128i squareErrorLow = _mm_unpacklo_epi16(squareErrorLo, squareErrorHi);
+ __m128i squareErrorHigh = _mm_unpackhi_epi16(squareErrorLo, squareErrorHi);
+
+ squareErrorLow = _mm_add_epi32(squareErrorLow, _mm_loadu_si128(((__m128i*)ter) + 0));
+ _mm_storeu_si128(((__m128i*)ter) + 0, squareErrorLow);
+ squareErrorHigh = _mm_add_epi32(squareErrorHigh, _mm_loadu_si128(((__m128i*)ter) + 1));
+ _mm_storeu_si128(((__m128i*)ter) + 1, squareErrorHigh);
+
+ _mm_storeu_si128((__m128i*)sel, minIndex);
+#elif defined __ARM_NEON
+ int16x8_t pixel = vdupq_n_s16( dr * 38 + dg * 76 + db * 14 );
+ int16x8_t pix = vabsq_s16( pixel );
+
+ int16x8_t error0 = vabsq_s16( vsubq_s16( pix, g_table128_NEON[0] ) );
+ int16x8_t error1 = vabsq_s16( vsubq_s16( pix, g_table128_NEON[1] ) );
+
+ int16x8_t index = vandq_s16( vreinterpretq_s16_u16( vcltq_s16( error1, error0 ) ), vdupq_n_s16( 1 ) );
+ int16x8_t minError = vminq_s16( error0, error1 );
+
+ int16x8_t indexBit = vandq_s16( vmvnq_s16( vshrq_n_s16( pixel, 15 ) ), vdupq_n_s16( -1 ) );
+ int16x8_t minIndex = vorrq_s16( index, vaddq_s16( indexBit, indexBit ) );
+
+ int16x4_t minErrorLow = vget_low_s16( minError );
+ int16x4_t minErrorHigh = vget_high_s16( minError );
+
+ int32x4_t squareErrorLow = vmull_s16( minErrorLow, minErrorLow );
+ int32x4_t squareErrorHigh = vmull_s16( minErrorHigh, minErrorHigh );
+
+ int32x4_t squareErrorSumLow = vaddq_s32( squareErrorLow, vld1q_s32( (int32_t*)ter ) );
+ int32x4_t squareErrorSumHigh = vaddq_s32( squareErrorHigh, vld1q_s32( (int32_t*)ter + 4 ) );
+
+ vst1q_s32( (int32_t*)ter, squareErrorSumLow );
+ vst1q_s32( (int32_t*)ter + 4, squareErrorSumHigh );
+
+ vst1q_s16( (int16_t*)sel, minIndex );
+#endif
+ }
+}
+#endif
+
+static etcpak_force_inline uint8_t convert6(float f)
+{
+ int i = (std::min(std::max(static_cast<int>(f), 0), 1023) - 15) >> 1;
+ return (i + 11 - ((i + 11) >> 7) - ((i + 4) >> 7)) >> 3;
+}
+
+static etcpak_force_inline uint8_t convert7(float f)
+{
+ int i = (std::min(std::max(static_cast<int>(f), 0), 1023) - 15) >> 1;
+ return (i + 9 - ((i + 9) >> 8) - ((i + 6) >> 8)) >> 2;
+}
+
+static etcpak_force_inline std::pair<uint64_t, uint64_t> Planar(const uint8_t* src)
+{
+ int32_t r = 0;
+ int32_t g = 0;
+ int32_t b = 0;
+
+ for (int i = 0; i < 16; ++i)
+ {
+ b += src[i * 4 + 0];
+ g += src[i * 4 + 1];
+ r += src[i * 4 + 2];
+ }
+
+ int32_t difRyz = 0;
+ int32_t difGyz = 0;
+ int32_t difByz = 0;
+ int32_t difRxz = 0;
+ int32_t difGxz = 0;
+ int32_t difBxz = 0;
+
+ const int32_t scaling[] = { -255, -85, 85, 255 };
+
+ for (int i = 0; i < 16; ++i)
+ {
+ int32_t difB = (static_cast<int>(src[i * 4 + 0]) << 4) - b;
+ int32_t difG = (static_cast<int>(src[i * 4 + 1]) << 4) - g;
+ int32_t difR = (static_cast<int>(src[i * 4 + 2]) << 4) - r;
+
+ difRyz += difR * scaling[i % 4];
+ difGyz += difG * scaling[i % 4];
+ difByz += difB * scaling[i % 4];
+
+ difRxz += difR * scaling[i / 4];
+ difGxz += difG * scaling[i / 4];
+ difBxz += difB * scaling[i / 4];
+ }
+
+ const float scale = -4.0f / ((255 * 255 * 8.0f + 85 * 85 * 8.0f) * 16.0f);
+
+ float aR = difRxz * scale;
+ float aG = difGxz * scale;
+ float aB = difBxz * scale;
+
+ float bR = difRyz * scale;
+ float bG = difGyz * scale;
+ float bB = difByz * scale;
+
+ float dR = r * (4.0f / 16.0f);
+ float dG = g * (4.0f / 16.0f);
+ float dB = b * (4.0f / 16.0f);
+
+ // calculating the three colors RGBO, RGBH, and RGBV. RGB = df - af * x - bf * y;
+ float cofR = std::fma(aR, 255.0f, std::fma(bR, 255.0f, dR));
+ float cofG = std::fma(aG, 255.0f, std::fma(bG, 255.0f, dG));
+ float cofB = std::fma(aB, 255.0f, std::fma(bB, 255.0f, dB));
+ float chfR = std::fma(aR, -425.0f, std::fma(bR, 255.0f, dR));
+ float chfG = std::fma(aG, -425.0f, std::fma(bG, 255.0f, dG));
+ float chfB = std::fma(aB, -425.0f, std::fma(bB, 255.0f, dB));
+ float cvfR = std::fma(aR, 255.0f, std::fma(bR, -425.0f, dR));
+ float cvfG = std::fma(aG, 255.0f, std::fma(bG, -425.0f, dG));
+ float cvfB = std::fma(aB, 255.0f, std::fma(bB, -425.0f, dB));
+
+ // convert to r6g7b6
+ int32_t coR = convert6(cofR);
+ int32_t coG = convert7(cofG);
+ int32_t coB = convert6(cofB);
+ int32_t chR = convert6(chfR);
+ int32_t chG = convert7(chfG);
+ int32_t chB = convert6(chfB);
+ int32_t cvR = convert6(cvfR);
+ int32_t cvG = convert7(cvfG);
+ int32_t cvB = convert6(cvfB);
+
+ // Error calculation
+ auto ro0 = coR;
+ auto go0 = coG;
+ auto bo0 = coB;
+ auto ro1 = (ro0 >> 4) | (ro0 << 2);
+ auto go1 = (go0 >> 6) | (go0 << 1);
+ auto bo1 = (bo0 >> 4) | (bo0 << 2);
+ auto ro2 = (ro1 << 2) + 2;
+ auto go2 = (go1 << 2) + 2;
+ auto bo2 = (bo1 << 2) + 2;
+
+ auto rh0 = chR;
+ auto gh0 = chG;
+ auto bh0 = chB;
+ auto rh1 = (rh0 >> 4) | (rh0 << 2);
+ auto gh1 = (gh0 >> 6) | (gh0 << 1);
+ auto bh1 = (bh0 >> 4) | (bh0 << 2);
+
+ auto rh2 = rh1 - ro1;
+ auto gh2 = gh1 - go1;
+ auto bh2 = bh1 - bo1;
+
+ auto rv0 = cvR;
+ auto gv0 = cvG;
+ auto bv0 = cvB;
+ auto rv1 = (rv0 >> 4) | (rv0 << 2);
+ auto gv1 = (gv0 >> 6) | (gv0 << 1);
+ auto bv1 = (bv0 >> 4) | (bv0 << 2);
+
+ auto rv2 = rv1 - ro1;
+ auto gv2 = gv1 - go1;
+ auto bv2 = bv1 - bo1;
+
+ uint64_t error = 0;
+
+ for (int i = 0; i < 16; ++i)
+ {
+ int32_t cR = clampu8((rh2 * (i / 4) + rv2 * (i % 4) + ro2) >> 2);
+ int32_t cG = clampu8((gh2 * (i / 4) + gv2 * (i % 4) + go2) >> 2);
+ int32_t cB = clampu8((bh2 * (i / 4) + bv2 * (i % 4) + bo2) >> 2);
+
+ int32_t difB = static_cast<int>(src[i * 4 + 0]) - cB;
+ int32_t difG = static_cast<int>(src[i * 4 + 1]) - cG;
+ int32_t difR = static_cast<int>(src[i * 4 + 2]) - cR;
+
+ int32_t dif = difR * 38 + difG * 76 + difB * 14;
+
+ error += dif * dif;
+ }
+
+ /**/
+ uint32_t rgbv = cvB | (cvG << 6) | (cvR << 13);
+ uint32_t rgbh = chB | (chG << 6) | (chR << 13);
+ uint32_t hi = rgbv | ((rgbh & 0x1FFF) << 19);
+ uint32_t lo = (chR & 0x1) | 0x2 | ((chR << 1) & 0x7C);
+ lo |= ((coB & 0x07) << 7) | ((coB & 0x18) << 8) | ((coB & 0x20) << 11);
+ lo |= ((coG & 0x3F) << 17) | ((coG & 0x40) << 18);
+ lo |= coR << 25;
+
+ const auto idx = (coR & 0x20) | ((coG & 0x20) >> 1) | ((coB & 0x1E) >> 1);
+
+ lo |= g_flags[idx];
+
+ uint64_t result = static_cast<uint32_t>(_bswap(lo));
+ result |= static_cast<uint64_t>(static_cast<uint32_t>(_bswap(hi))) << 32;
+
+ return std::make_pair(result, error);
+}
+
+#ifdef __ARM_NEON
+
+static etcpak_force_inline int32x2_t Planar_NEON_DifXZ( int16x8_t dif_lo, int16x8_t dif_hi )
+{
+ int32x4_t dif0 = vmull_n_s16( vget_low_s16( dif_lo ), -255 );
+ int32x4_t dif1 = vmull_n_s16( vget_high_s16( dif_lo ), -85 );
+ int32x4_t dif2 = vmull_n_s16( vget_low_s16( dif_hi ), 85 );
+ int32x4_t dif3 = vmull_n_s16( vget_high_s16( dif_hi ), 255 );
+ int32x4_t dif4 = vaddq_s32( vaddq_s32( dif0, dif1 ), vaddq_s32( dif2, dif3 ) );
+
+#ifndef __aarch64__
+ int32x2_t dif5 = vpadd_s32( vget_low_s32( dif4 ), vget_high_s32( dif4 ) );
+ return vpadd_s32( dif5, dif5 );
+#else
+ return vdup_n_s32( vaddvq_s32( dif4 ) );
+#endif
+}
+
+static etcpak_force_inline int32x2_t Planar_NEON_DifYZ( int16x8_t dif_lo, int16x8_t dif_hi )
+{
+ int16x4_t scaling = { -255, -85, 85, 255 };
+ int32x4_t dif0 = vmull_s16( vget_low_s16( dif_lo ), scaling );
+ int32x4_t dif1 = vmull_s16( vget_high_s16( dif_lo ), scaling );
+ int32x4_t dif2 = vmull_s16( vget_low_s16( dif_hi ), scaling );
+ int32x4_t dif3 = vmull_s16( vget_high_s16( dif_hi ), scaling );
+ int32x4_t dif4 = vaddq_s32( vaddq_s32( dif0, dif1 ), vaddq_s32( dif2, dif3 ) );
+
+#ifndef __aarch64__
+ int32x2_t dif5 = vpadd_s32( vget_low_s32( dif4 ), vget_high_s32( dif4 ) );
+ return vpadd_s32( dif5, dif5 );
+#else
+ return vdup_n_s32( vaddvq_s32( dif4 ) );
+#endif
+}
+
+static etcpak_force_inline int16x8_t Planar_NEON_SumWide( uint8x16_t src )
+{
+ uint16x8_t accu8 = vpaddlq_u8( src );
+#ifndef __aarch64__
+ uint16x4_t accu4 = vpadd_u16( vget_low_u16( accu8 ), vget_high_u16( accu8 ) );
+ uint16x4_t accu2 = vpadd_u16( accu4, accu4 );
+ uint16x4_t accu1 = vpadd_u16( accu2, accu2 );
+ return vreinterpretq_s16_u16( vcombine_u16( accu1, accu1 ) );
+#else
+ return vdupq_n_s16( vaddvq_u16( accu8 ) );
+#endif
+}
+
+static etcpak_force_inline int16x8_t convert6_NEON( int32x4_t lo, int32x4_t hi )
+{
+ uint16x8_t x = vcombine_u16( vqmovun_s32( lo ), vqmovun_s32( hi ) );
+ int16x8_t i = vreinterpretq_s16_u16( vshrq_n_u16( vqshlq_n_u16( x, 6 ), 6) ); // clamp 0-1023
+ i = vhsubq_s16( i, vdupq_n_s16( 15 ) );
+
+ int16x8_t ip11 = vaddq_s16( i, vdupq_n_s16( 11 ) );
+ int16x8_t ip4 = vaddq_s16( i, vdupq_n_s16( 4 ) );
+
+ return vshrq_n_s16( vsubq_s16( vsubq_s16( ip11, vshrq_n_s16( ip11, 7 ) ), vshrq_n_s16( ip4, 7) ), 3 );
+}
+
+static etcpak_force_inline int16x4_t convert7_NEON( int32x4_t x )
+{
+ int16x4_t i = vreinterpret_s16_u16( vshr_n_u16( vqshl_n_u16( vqmovun_s32( x ), 6 ), 6 ) ); // clamp 0-1023
+ i = vhsub_s16( i, vdup_n_s16( 15 ) );
+
+ int16x4_t p9 = vadd_s16( i, vdup_n_s16( 9 ) );
+ int16x4_t p6 = vadd_s16( i, vdup_n_s16( 6 ) );
+ return vshr_n_s16( vsub_s16( vsub_s16( p9, vshr_n_s16( p9, 8 ) ), vshr_n_s16( p6, 8 ) ), 2 );
+}
+
+static etcpak_force_inline std::pair<uint64_t, uint64_t> Planar_NEON( const uint8_t* src )
+{
+ uint8x16x4_t srcBlock = vld4q_u8( src );
+
+ int16x8_t bSumWide = Planar_NEON_SumWide( srcBlock.val[0] );
+ int16x8_t gSumWide = Planar_NEON_SumWide( srcBlock.val[1] );
+ int16x8_t rSumWide = Planar_NEON_SumWide( srcBlock.val[2] );
+
+ int16x8_t dif_R_lo = vsubq_s16( vreinterpretq_s16_u16( vshll_n_u8( vget_low_u8( srcBlock.val[2] ), 4) ), rSumWide );
+ int16x8_t dif_R_hi = vsubq_s16( vreinterpretq_s16_u16( vshll_n_u8( vget_high_u8( srcBlock.val[2] ), 4) ), rSumWide );
+
+ int16x8_t dif_G_lo = vsubq_s16( vreinterpretq_s16_u16( vshll_n_u8( vget_low_u8( srcBlock.val[1] ), 4 ) ), gSumWide );
+ int16x8_t dif_G_hi = vsubq_s16( vreinterpretq_s16_u16( vshll_n_u8( vget_high_u8( srcBlock.val[1] ), 4 ) ), gSumWide );
+
+ int16x8_t dif_B_lo = vsubq_s16( vreinterpretq_s16_u16( vshll_n_u8( vget_low_u8( srcBlock.val[0] ), 4) ), bSumWide );
+ int16x8_t dif_B_hi = vsubq_s16( vreinterpretq_s16_u16( vshll_n_u8( vget_high_u8( srcBlock.val[0] ), 4) ), bSumWide );
+
+ int32x2x2_t dif_xz_z = vzip_s32( vzip_s32( Planar_NEON_DifXZ( dif_B_lo, dif_B_hi ), Planar_NEON_DifXZ( dif_R_lo, dif_R_hi ) ).val[0], Planar_NEON_DifXZ( dif_G_lo, dif_G_hi ) );
+ int32x4_t dif_xz = vcombine_s32( dif_xz_z.val[0], dif_xz_z.val[1] );
+ int32x2x2_t dif_yz_z = vzip_s32( vzip_s32( Planar_NEON_DifYZ( dif_B_lo, dif_B_hi ), Planar_NEON_DifYZ( dif_R_lo, dif_R_hi ) ).val[0], Planar_NEON_DifYZ( dif_G_lo, dif_G_hi ) );
+ int32x4_t dif_yz = vcombine_s32( dif_yz_z.val[0], dif_yz_z.val[1] );
+
+ const float fscale = -4.0f / ( (255 * 255 * 8.0f + 85 * 85 * 8.0f ) * 16.0f );
+ float32x4_t fa = vmulq_n_f32( vcvtq_f32_s32( dif_xz ), fscale );
+ float32x4_t fb = vmulq_n_f32( vcvtq_f32_s32( dif_yz ), fscale );
+ int16x4_t bgrgSum = vzip_s16( vzip_s16( vget_low_s16( bSumWide ), vget_low_s16( rSumWide ) ).val[0], vget_low_s16( gSumWide ) ).val[0];
+ float32x4_t fd = vmulq_n_f32( vcvtq_f32_s32( vmovl_s16( bgrgSum ) ), 4.0f / 16.0f);
+
+ float32x4_t cof = vmlaq_n_f32( vmlaq_n_f32( fd, fb, 255.0f ), fa, 255.0f );
+ float32x4_t chf = vmlaq_n_f32( vmlaq_n_f32( fd, fb, 255.0f ), fa, -425.0f );
+ float32x4_t cvf = vmlaq_n_f32( vmlaq_n_f32( fd, fb, -425.0f ), fa, 255.0f );
+
+ int32x4_t coi = vcvtq_s32_f32( cof );
+ int32x4_t chi = vcvtq_s32_f32( chf );
+ int32x4_t cvi = vcvtq_s32_f32( cvf );
+
+ int32x4x2_t tr_hv = vtrnq_s32( chi, cvi );
+ int32x4x2_t tr_o = vtrnq_s32( coi, coi );
+
+ int16x8_t c_hvoo_br_6 = convert6_NEON( tr_hv.val[0], tr_o.val[0] );
+ int16x4_t c_hvox_g_7 = convert7_NEON( vcombine_s32( vget_low_s32( tr_hv.val[1] ), vget_low_s32( tr_o.val[1] ) ) );
+ int16x8_t c_hvoo_br_8 = vorrq_s16( vshrq_n_s16( c_hvoo_br_6, 4 ), vshlq_n_s16( c_hvoo_br_6, 2 ) );
+ int16x4_t c_hvox_g_8 = vorr_s16( vshr_n_s16( c_hvox_g_7, 6 ), vshl_n_s16( c_hvox_g_7, 1 ) );
+
+ int16x4_t rec_gxbr_o = vext_s16( c_hvox_g_8, vget_high_s16( c_hvoo_br_8 ), 3 );
+
+ rec_gxbr_o = vadd_s16( vshl_n_s16( rec_gxbr_o, 2 ), vdup_n_s16( 2 ) );
+ int16x8_t rec_ro_wide = vdupq_lane_s16( rec_gxbr_o, 3 );
+ int16x8_t rec_go_wide = vdupq_lane_s16( rec_gxbr_o, 0 );
+ int16x8_t rec_bo_wide = vdupq_lane_s16( rec_gxbr_o, 1 );
+
+ int16x4_t br_hv2 = vsub_s16( vget_low_s16( c_hvoo_br_8 ), vget_high_s16( c_hvoo_br_8 ) );
+ int16x4_t gg_hv2 = vsub_s16( c_hvox_g_8, vdup_lane_s16( c_hvox_g_8, 2 ) );
+
+ int16x8_t scaleh_lo = { 0, 0, 0, 0, 1, 1, 1, 1 };
+ int16x8_t scaleh_hi = { 2, 2, 2, 2, 3, 3, 3, 3 };
+ int16x8_t scalev = { 0, 1, 2, 3, 0, 1, 2, 3 };
+
+ int16x8_t rec_r_1 = vmlaq_lane_s16( rec_ro_wide, scalev, br_hv2, 3 );
+ int16x8_t rec_r_lo = vreinterpretq_s16_u16( vmovl_u8( vqshrun_n_s16( vmlaq_lane_s16( rec_r_1, scaleh_lo, br_hv2, 2 ), 2 ) ) );
+ int16x8_t rec_r_hi = vreinterpretq_s16_u16( vmovl_u8( vqshrun_n_s16( vmlaq_lane_s16( rec_r_1, scaleh_hi, br_hv2, 2 ), 2 ) ) );
+
+ int16x8_t rec_b_1 = vmlaq_lane_s16( rec_bo_wide, scalev, br_hv2, 1 );
+ int16x8_t rec_b_lo = vreinterpretq_s16_u16( vmovl_u8( vqshrun_n_s16( vmlaq_lane_s16( rec_b_1, scaleh_lo, br_hv2, 0 ), 2 ) ) );
+ int16x8_t rec_b_hi = vreinterpretq_s16_u16( vmovl_u8( vqshrun_n_s16( vmlaq_lane_s16( rec_b_1, scaleh_hi, br_hv2, 0 ), 2 ) ) );
+
+ int16x8_t rec_g_1 = vmlaq_lane_s16( rec_go_wide, scalev, gg_hv2, 1 );
+ int16x8_t rec_g_lo = vreinterpretq_s16_u16( vmovl_u8( vqshrun_n_s16( vmlaq_lane_s16( rec_g_1, scaleh_lo, gg_hv2, 0 ), 2 ) ) );
+ int16x8_t rec_g_hi = vreinterpretq_s16_u16( vmovl_u8( vqshrun_n_s16( vmlaq_lane_s16( rec_g_1, scaleh_hi, gg_hv2, 0 ), 2 ) ) );
+
+ int16x8_t dif_r_lo = vsubq_s16( vreinterpretq_s16_u16( vmovl_u8( vget_low_u8( srcBlock.val[2] ) ) ), rec_r_lo );
+ int16x8_t dif_r_hi = vsubq_s16( vreinterpretq_s16_u16( vmovl_u8( vget_high_u8( srcBlock.val[2] ) ) ), rec_r_hi );
+
+ int16x8_t dif_g_lo = vsubq_s16( vreinterpretq_s16_u16( vmovl_u8( vget_low_u8( srcBlock.val[1] ) ) ), rec_g_lo );
+ int16x8_t dif_g_hi = vsubq_s16( vreinterpretq_s16_u16( vmovl_u8( vget_high_u8( srcBlock.val[1] ) ) ), rec_g_hi );
+
+ int16x8_t dif_b_lo = vsubq_s16( vreinterpretq_s16_u16( vmovl_u8( vget_low_u8( srcBlock.val[0] ) ) ), rec_b_lo );
+ int16x8_t dif_b_hi = vsubq_s16( vreinterpretq_s16_u16( vmovl_u8( vget_high_u8( srcBlock.val[0] ) ) ), rec_b_hi );
+
+ int16x8_t dif_lo = vmlaq_n_s16( vmlaq_n_s16( vmulq_n_s16( dif_r_lo, 38 ), dif_g_lo, 76 ), dif_b_lo, 14 );
+ int16x8_t dif_hi = vmlaq_n_s16( vmlaq_n_s16( vmulq_n_s16( dif_r_hi, 38 ), dif_g_hi, 76 ), dif_b_hi, 14 );
+
+ int16x4_t tmpDif = vget_low_s16( dif_lo );
+ int32x4_t difsq_0 = vmull_s16( tmpDif, tmpDif );
+ tmpDif = vget_high_s16( dif_lo );
+ int32x4_t difsq_1 = vmull_s16( tmpDif, tmpDif );
+ tmpDif = vget_low_s16( dif_hi );
+ int32x4_t difsq_2 = vmull_s16( tmpDif, tmpDif );
+ tmpDif = vget_high_s16( dif_hi );
+ int32x4_t difsq_3 = vmull_s16( tmpDif, tmpDif );
+
+ uint32x4_t difsq_5 = vaddq_u32( vreinterpretq_u32_s32( difsq_0 ), vreinterpretq_u32_s32( difsq_1 ) );
+ uint32x4_t difsq_6 = vaddq_u32( vreinterpretq_u32_s32( difsq_2 ), vreinterpretq_u32_s32( difsq_3) );
+
+ uint64x2_t difsq_7 = vaddl_u32( vget_low_u32( difsq_5 ), vget_high_u32( difsq_5 ) );
+ uint64x2_t difsq_8 = vaddl_u32( vget_low_u32( difsq_6 ), vget_high_u32( difsq_6 ) );
+
+ uint64x2_t difsq_9 = vaddq_u64( difsq_7, difsq_8 );
+
+#ifdef __aarch64__
+ uint64_t error = vaddvq_u64( difsq_9 );
+#else
+ uint64_t error = vgetq_lane_u64( difsq_9, 0 ) + vgetq_lane_u64( difsq_9, 1 );
+#endif
+
+ int32_t coR = c_hvoo_br_6[6];
+ int32_t coG = c_hvox_g_7[2];
+ int32_t coB = c_hvoo_br_6[4];
+
+ int32_t chR = c_hvoo_br_6[2];
+ int32_t chG = c_hvox_g_7[0];
+ int32_t chB = c_hvoo_br_6[0];
+
+ int32_t cvR = c_hvoo_br_6[3];
+ int32_t cvG = c_hvox_g_7[1];
+ int32_t cvB = c_hvoo_br_6[1];
+
+ uint32_t rgbv = cvB | ( cvG << 6 ) | ( cvR << 13 );
+ uint32_t rgbh = chB | ( chG << 6 ) | ( chR << 13 );
+ uint32_t hi = rgbv | ( ( rgbh & 0x1FFF ) << 19 );
+ uint32_t lo = ( chR & 0x1 ) | 0x2 | ( ( chR << 1 ) & 0x7C );
+ lo |= ( ( coB & 0x07 ) << 7 ) | ( ( coB & 0x18 ) << 8 ) | ( ( coB & 0x20 ) << 11 );
+ lo |= ( ( coG & 0x3F) << 17) | ( (coG & 0x40 ) << 18 );
+ lo |= coR << 25;
+
+ const auto idx = ( coR & 0x20 ) | ( ( coG & 0x20 ) >> 1 ) | ( ( coB & 0x1E ) >> 1 );
+
+ lo |= g_flags[idx];
+
+ uint64_t result = static_cast<uint32_t>( _bswap(lo) );
+ result |= static_cast<uint64_t>( static_cast<uint32_t>( _bswap( hi ) ) ) << 32;
+
+ return std::make_pair( result, error );
+}
+
+#endif
+
+template<class T, class S>
+static etcpak_force_inline uint64_t EncodeSelectors( uint64_t d, const T terr[2][8], const S tsel[16][8], const uint32_t* id, const uint64_t value, const uint64_t error)
+{
+ size_t tidx[2];
+ tidx[0] = GetLeastError( terr[0], 8 );
+ tidx[1] = GetLeastError( terr[1], 8 );
+
+ if ((terr[0][tidx[0]] + terr[1][tidx[1]]) >= error)
+ {
+ return value;
+ }
+
+ d |= tidx[0] << 26;
+ d |= tidx[1] << 29;
+ for( int i=0; i<16; i++ )
+ {
+ uint64_t t = tsel[i][tidx[id[i]%2]];
+ d |= ( t & 0x1 ) << ( i + 32 );
+ d |= ( t & 0x2 ) << ( i + 47 );
+ }
+
+ return FixByteOrder(d);
+}
+
+}
+
+static etcpak_force_inline uint64_t ProcessRGB( const uint8_t* src )
+{
+#ifdef __AVX2__
+ uint64_t d = CheckSolid_AVX2( src );
+ if( d != 0 ) return d;
+
+ alignas(32) v4i a[8];
+
+ __m128i err0 = PrepareAverages_AVX2( a, src );
+
+ // Get index of minimum error (err0)
+ __m128i err1 = _mm_shuffle_epi32(err0, _MM_SHUFFLE(2, 3, 0, 1));
+ __m128i errMin0 = _mm_min_epu32(err0, err1);
+
+ __m128i errMin1 = _mm_shuffle_epi32(errMin0, _MM_SHUFFLE(1, 0, 3, 2));
+ __m128i errMin2 = _mm_min_epu32(errMin1, errMin0);
+
+ __m128i errMask = _mm_cmpeq_epi32(errMin2, err0);
+
+ uint32_t mask = _mm_movemask_epi8(errMask);
+
+ uint32_t idx = _bit_scan_forward(mask) >> 2;
+
+ d |= EncodeAverages_AVX2( a, idx );
+
+ alignas(32) uint32_t terr[2][8] = {};
+ alignas(32) uint32_t tsel[8];
+
+ if ((idx == 0) || (idx == 2))
+ {
+ FindBestFit_4x2_AVX2( terr, tsel, a, idx * 2, src );
+ }
+ else
+ {
+ FindBestFit_2x4_AVX2( terr, tsel, a, idx * 2, src );
+ }
+
+ return EncodeSelectors_AVX2( d, terr, tsel, (idx % 2) == 1 );
+#else
+ uint64_t d = CheckSolid( src );
+ if( d != 0 ) return d;
+
+ v4i a[8];
+ unsigned int err[4] = {};
+ PrepareAverages( a, src, err );
+ size_t idx = GetLeastError( err, 4 );
+ EncodeAverages( d, a, idx );
+
+#if ( defined __SSE4_1__ || defined __ARM_NEON ) && !defined REFERENCE_IMPLEMENTATION
+ uint32_t terr[2][8] = {};
+#else
+ uint64_t terr[2][8] = {};
+#endif
+ uint16_t tsel[16][8];
+ auto id = g_id[idx];
+ FindBestFit( terr, tsel, a, id, src );
+
+ return FixByteOrder( EncodeSelectors( d, terr, tsel, id ) );
+#endif
+}
+
+static etcpak_force_inline uint64_t ProcessRGB_ETC2( const uint8_t* src )
+{
+#ifdef __AVX2__
+ uint64_t d = CheckSolid_AVX2( src );
+ if( d != 0 ) return d;
+
+ auto plane = Planar_AVX2( src );
+
+ alignas(32) v4i a[8];
+
+ __m128i err0 = PrepareAverages_AVX2( a, plane.sum4 );
+
+ // Get index of minimum error (err0)
+ __m128i err1 = _mm_shuffle_epi32(err0, _MM_SHUFFLE(2, 3, 0, 1));
+ __m128i errMin0 = _mm_min_epu32(err0, err1);
+
+ __m128i errMin1 = _mm_shuffle_epi32(errMin0, _MM_SHUFFLE(1, 0, 3, 2));
+ __m128i errMin2 = _mm_min_epu32(errMin1, errMin0);
+
+ __m128i errMask = _mm_cmpeq_epi32(errMin2, err0);
+
+ uint32_t mask = _mm_movemask_epi8(errMask);
+
+ size_t idx = _bit_scan_forward(mask) >> 2;
+
+ d = EncodeAverages_AVX2( a, idx );
+
+ alignas(32) uint32_t terr[2][8] = {};
+ alignas(32) uint32_t tsel[8];
+
+ if ((idx == 0) || (idx == 2))
+ {
+ FindBestFit_4x2_AVX2( terr, tsel, a, idx * 2, src );
+ }
+ else
+ {
+ FindBestFit_2x4_AVX2( terr, tsel, a, idx * 2, src );
+ }
+
+ return EncodeSelectors_AVX2( d, terr, tsel, (idx % 2) == 1, plane.plane, plane.error );
+#else
+ uint64_t d = CheckSolid( src );
+ if (d != 0) return d;
+
+#ifdef __ARM_NEON
+ auto result = Planar_NEON( src );
+#else
+ auto result = Planar( src );
+#endif
+
+ v4i a[8];
+ unsigned int err[4] = {};
+ PrepareAverages( a, src, err );
+ size_t idx = GetLeastError( err, 4 );
+ EncodeAverages( d, a, idx );
+
+#if ( defined __SSE4_1__ || defined __ARM_NEON ) && !defined REFERENCE_IMPLEMENTATION
+ uint32_t terr[2][8] = {};
+#else
+ uint64_t terr[2][8] = {};
+#endif
+ uint16_t tsel[16][8];
+ auto id = g_id[idx];
+ FindBestFit( terr, tsel, a, id, src );
+
+ return EncodeSelectors( d, terr, tsel, id, result.first, result.second );
+#endif
+}
+
+#ifdef __SSE4_1__
+template<int K>
+static etcpak_force_inline __m128i Widen( const __m128i src )
+{
+ static_assert( K >= 0 && K <= 7, "Index out of range" );
+
+ __m128i tmp;
+ switch( K )
+ {
+ case 0:
+ tmp = _mm_shufflelo_epi16( src, _MM_SHUFFLE( 0, 0, 0, 0 ) );
+ return _mm_shuffle_epi32( tmp, _MM_SHUFFLE( 0, 0, 0, 0 ) );
+ case 1:
+ tmp = _mm_shufflelo_epi16( src, _MM_SHUFFLE( 1, 1, 1, 1 ) );
+ return _mm_shuffle_epi32( tmp, _MM_SHUFFLE( 0, 0, 0, 0 ) );
+ case 2:
+ tmp = _mm_shufflelo_epi16( src, _MM_SHUFFLE( 2, 2, 2, 2 ) );
+ return _mm_shuffle_epi32( tmp, _MM_SHUFFLE( 0, 0, 0, 0 ) );
+ case 3:
+ tmp = _mm_shufflelo_epi16( src, _MM_SHUFFLE( 3, 3, 3, 3 ) );
+ return _mm_shuffle_epi32( tmp, _MM_SHUFFLE( 0, 0, 0, 0 ) );
+ case 4:
+ tmp = _mm_shufflehi_epi16( src, _MM_SHUFFLE( 0, 0, 0, 0 ) );
+ return _mm_shuffle_epi32( tmp, _MM_SHUFFLE( 2, 2, 2, 2 ) );
+ case 5:
+ tmp = _mm_shufflehi_epi16( src, _MM_SHUFFLE( 1, 1, 1, 1 ) );
+ return _mm_shuffle_epi32( tmp, _MM_SHUFFLE( 2, 2, 2, 2 ) );
+ case 6:
+ tmp = _mm_shufflehi_epi16( src, _MM_SHUFFLE( 2, 2, 2, 2 ) );
+ return _mm_shuffle_epi32( tmp, _MM_SHUFFLE( 2, 2, 2, 2 ) );
+ case 7:
+ tmp = _mm_shufflehi_epi16( src, _MM_SHUFFLE( 3, 3, 3, 3 ) );
+ return _mm_shuffle_epi32( tmp, _MM_SHUFFLE( 2, 2, 2, 2 ) );
+ }
+}
+
+static etcpak_force_inline int GetMulSel( int sel )
+{
+ switch( sel )
+ {
+ case 0:
+ return 0;
+ case 1:
+ case 2:
+ case 3:
+ return 1;
+ case 4:
+ return 2;
+ case 5:
+ case 6:
+ case 7:
+ return 3;
+ case 8:
+ case 9:
+ case 10:
+ case 11:
+ case 12:
+ case 13:
+ return 4;
+ case 14:
+ case 15:
+ return 5;
+ }
+}
+
+#endif
+
+#ifdef __ARM_NEON
+
+static constexpr etcpak_force_inline int GetMulSel(int sel)
+{
+ return ( sel < 1 ) ? 0 : ( sel < 4 ) ? 1 : ( sel < 5 ) ? 2 : ( sel < 8 ) ? 3 : ( sel < 14 ) ? 4 : 5;
+}
+
+static constexpr int ClampConstant( int x, int min, int max )
+{
+ return x < min ? min : x > max ? max : x;
+}
+
+template <int Index>
+etcpak_force_inline static uint16x8_t ErrorProbe_EAC_NEON( uint8x8_t recVal, uint8x16_t alphaBlock )
+{
+ uint8x8_t srcValWide;
+#ifndef __aarch64__
+ if( Index < 8 )
+ srcValWide = vdup_lane_u8( vget_low_u8( alphaBlock ), ClampConstant( Index, 0, 8 ) );
+ else
+ srcValWide = vdup_lane_u8( vget_high_u8( alphaBlock ), ClampConstant( Index - 8, 0, 8 ) );
+#else
+ srcValWide = vdup_laneq_u8( alphaBlock, Index );
+#endif
+
+ uint8x8_t deltaVal = vabd_u8( srcValWide, recVal );
+ return vmull_u8( deltaVal, deltaVal );
+}
+
+etcpak_force_inline static uint16_t MinError_EAC_NEON( uint16x8_t errProbe )
+{
+#ifndef __aarch64__
+ uint16x4_t tmpErr = vpmin_u16( vget_low_u16( errProbe ), vget_high_u16( errProbe ) );
+ tmpErr = vpmin_u16( tmpErr, tmpErr );
+ return vpmin_u16( tmpErr, tmpErr )[0];
+#else
+ return vminvq_u16( errProbe );
+#endif
+}
+
+template <int Index>
+etcpak_force_inline static uint64_t MinErrorIndex_EAC_NEON( uint8x8_t recVal, uint8x16_t alphaBlock )
+{
+ uint16x8_t errProbe = ErrorProbe_EAC_NEON<Index>( recVal, alphaBlock );
+ uint16x8_t minErrMask = vceqq_u16( errProbe, vdupq_n_u16( MinError_EAC_NEON( errProbe ) ) );
+ uint64_t idx = __builtin_ctzll( vget_lane_u64( vreinterpret_u64_u8( vqmovn_u16( minErrMask ) ), 0 ) );
+ idx >>= 3;
+ idx <<= 45 - Index * 3;
+
+ return idx;
+}
+
+template <int Index>
+etcpak_force_inline static int16x8_t WidenMultiplier_EAC_NEON( int16x8_t multipliers )
+{
+ constexpr int Lane = GetMulSel( Index );
+#ifndef __aarch64__
+ if( Lane < 4 )
+ return vdupq_lane_s16( vget_low_s16( multipliers ), ClampConstant( Lane, 0, 4 ) );
+ else
+ return vdupq_lane_s16( vget_high_s16( multipliers ), ClampConstant( Lane - 4, 0, 4 ) );
+#else
+ return vdupq_laneq_s16( multipliers, Lane );
+#endif
+}
+
+#endif
+
+static etcpak_force_inline uint64_t ProcessAlpha_ETC2( const uint8_t* src )
+{
+#if defined __SSE4_1__
+ // Check solid
+ __m128i s = _mm_loadu_si128( (__m128i*)src );
+ __m128i solidCmp = _mm_set1_epi8( src[0] );
+ __m128i cmpRes = _mm_cmpeq_epi8( s, solidCmp );
+ if( _mm_testc_si128( cmpRes, _mm_set1_epi32( -1 ) ) )
+ {
+ return src[0];
+ }
+
+ // Calculate min, max
+ __m128i s1 = _mm_shuffle_epi32( s, _MM_SHUFFLE( 2, 3, 0, 1 ) );
+ __m128i max1 = _mm_max_epu8( s, s1 );
+ __m128i min1 = _mm_min_epu8( s, s1 );
+ __m128i smax2 = _mm_shuffle_epi32( max1, _MM_SHUFFLE( 0, 0, 2, 2 ) );
+ __m128i smin2 = _mm_shuffle_epi32( min1, _MM_SHUFFLE( 0, 0, 2, 2 ) );
+ __m128i max2 = _mm_max_epu8( max1, smax2 );
+ __m128i min2 = _mm_min_epu8( min1, smin2 );
+ __m128i smax3 = _mm_alignr_epi8( max2, max2, 2 );
+ __m128i smin3 = _mm_alignr_epi8( min2, min2, 2 );
+ __m128i max3 = _mm_max_epu8( max2, smax3 );
+ __m128i min3 = _mm_min_epu8( min2, smin3 );
+ __m128i smax4 = _mm_alignr_epi8( max3, max3, 1 );
+ __m128i smin4 = _mm_alignr_epi8( min3, min3, 1 );
+ __m128i max = _mm_max_epu8( max3, smax4 );
+ __m128i min = _mm_min_epu8( min3, smin4 );
+ __m128i max16 = _mm_unpacklo_epi8( max, _mm_setzero_si128() );
+ __m128i min16 = _mm_unpacklo_epi8( min, _mm_setzero_si128() );
+
+ // src range, mid
+ __m128i srcRange = _mm_sub_epi16( max16, min16 );
+ __m128i srcRangeHalf = _mm_srli_epi16( srcRange, 1 );
+ __m128i srcMid = _mm_add_epi16( min16, srcRangeHalf );
+
+ // multiplier
+ __m128i mul1 = _mm_mulhi_epi16( srcRange, g_alphaRange_SIMD );
+ __m128i mul = _mm_add_epi16( mul1, _mm_set1_epi16( 1 ) );
+
+ // wide source
+ __m128i s16_1 = _mm_shuffle_epi32( s, _MM_SHUFFLE( 3, 2, 3, 2 ) );
+ __m128i s16[2] = { _mm_unpacklo_epi8( s, _mm_setzero_si128() ), _mm_unpacklo_epi8( s16_1, _mm_setzero_si128() ) };
+
+ __m128i sr[16] = {
+ Widen<0>( s16[0] ),
+ Widen<1>( s16[0] ),
+ Widen<2>( s16[0] ),
+ Widen<3>( s16[0] ),
+ Widen<4>( s16[0] ),
+ Widen<5>( s16[0] ),
+ Widen<6>( s16[0] ),
+ Widen<7>( s16[0] ),
+ Widen<0>( s16[1] ),
+ Widen<1>( s16[1] ),
+ Widen<2>( s16[1] ),
+ Widen<3>( s16[1] ),
+ Widen<4>( s16[1] ),
+ Widen<5>( s16[1] ),
+ Widen<6>( s16[1] ),
+ Widen<7>( s16[1] )
+ };
+
+#ifdef __AVX2__
+ __m256i srcRangeWide = _mm256_broadcastsi128_si256( srcRange );
+ __m256i srcMidWide = _mm256_broadcastsi128_si256( srcMid );
+
+ __m256i mulWide1 = _mm256_mulhi_epi16( srcRangeWide, g_alphaRange_AVX );
+ __m256i mulWide = _mm256_add_epi16( mulWide1, _mm256_set1_epi16( 1 ) );
+
+ __m256i modMul[8] = {
+ _mm256_unpacklo_epi8( _mm256_packus_epi16( _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[0] ) ), _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[0] ) ) ), _mm256_setzero_si256() ),
+ _mm256_unpacklo_epi8( _mm256_packus_epi16( _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[1] ) ), _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[1] ) ) ), _mm256_setzero_si256() ),
+ _mm256_unpacklo_epi8( _mm256_packus_epi16( _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[2] ) ), _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[2] ) ) ), _mm256_setzero_si256() ),
+ _mm256_unpacklo_epi8( _mm256_packus_epi16( _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[3] ) ), _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[3] ) ) ), _mm256_setzero_si256() ),
+ _mm256_unpacklo_epi8( _mm256_packus_epi16( _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[4] ) ), _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[4] ) ) ), _mm256_setzero_si256() ),
+ _mm256_unpacklo_epi8( _mm256_packus_epi16( _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[5] ) ), _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[5] ) ) ), _mm256_setzero_si256() ),
+ _mm256_unpacklo_epi8( _mm256_packus_epi16( _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[6] ) ), _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[6] ) ) ), _mm256_setzero_si256() ),
+ _mm256_unpacklo_epi8( _mm256_packus_epi16( _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[7] ) ), _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[7] ) ) ), _mm256_setzero_si256() ),
+ };
+
+ // find selector
+ __m256i mulErr = _mm256_setzero_si256();
+ for( int j=0; j<16; j++ )
+ {
+ __m256i s16Wide = _mm256_broadcastsi128_si256( sr[j] );
+ __m256i err1, err2;
+
+ err1 = _mm256_sub_epi16( s16Wide, modMul[0] );
+ __m256i localErr = _mm256_mullo_epi16( err1, err1 );
+
+ err1 = _mm256_sub_epi16( s16Wide, modMul[1] );
+ err2 = _mm256_mullo_epi16( err1, err1 );
+ localErr = _mm256_min_epu16( localErr, err2 );
+
+ err1 = _mm256_sub_epi16( s16Wide, modMul[2] );
+ err2 = _mm256_mullo_epi16( err1, err1 );
+ localErr = _mm256_min_epu16( localErr, err2 );
+
+ err1 = _mm256_sub_epi16( s16Wide, modMul[3] );
+ err2 = _mm256_mullo_epi16( err1, err1 );
+ localErr = _mm256_min_epu16( localErr, err2 );
+
+ err1 = _mm256_sub_epi16( s16Wide, modMul[4] );
+ err2 = _mm256_mullo_epi16( err1, err1 );
+ localErr = _mm256_min_epu16( localErr, err2 );
+
+ err1 = _mm256_sub_epi16( s16Wide, modMul[5] );
+ err2 = _mm256_mullo_epi16( err1, err1 );
+ localErr = _mm256_min_epu16( localErr, err2 );
+
+ err1 = _mm256_sub_epi16( s16Wide, modMul[6] );
+ err2 = _mm256_mullo_epi16( err1, err1 );
+ localErr = _mm256_min_epu16( localErr, err2 );
+
+ err1 = _mm256_sub_epi16( s16Wide, modMul[7] );
+ err2 = _mm256_mullo_epi16( err1, err1 );
+ localErr = _mm256_min_epu16( localErr, err2 );
+
+ // note that this can overflow, but since we're looking for the smallest error, it shouldn't matter
+ mulErr = _mm256_adds_epu16( mulErr, localErr );
+ }
+ uint64_t minPos1 = _mm_cvtsi128_si64( _mm_minpos_epu16( _mm256_castsi256_si128( mulErr ) ) );
+ uint64_t minPos2 = _mm_cvtsi128_si64( _mm_minpos_epu16( _mm256_extracti128_si256( mulErr, 1 ) ) );
+ int sel = ( ( minPos1 & 0xFFFF ) < ( minPos2 & 0xFFFF ) ) ? ( minPos1 >> 16 ) : ( 8 + ( minPos2 >> 16 ) );
+
+ __m128i recVal16;
+ switch( sel )
+ {
+ case 0:
+ recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<0>( mul ), g_alpha_SIMD[0] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<0>( mul ), g_alpha_SIMD[0] ) ) ), _mm_setzero_si128() );
+ break;
+ case 1:
+ recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[1] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[1] ) ) ), _mm_setzero_si128() );
+ break;
+ case 2:
+ recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[2] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[2] ) ) ), _mm_setzero_si128() );
+ break;
+ case 3:
+ recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[3] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[3] ) ) ), _mm_setzero_si128() );
+ break;
+ case 4:
+ recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<2>( mul ), g_alpha_SIMD[4] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<2>( mul ), g_alpha_SIMD[4] ) ) ), _mm_setzero_si128() );
+ break;
+ case 5:
+ recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[5] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[5] ) ) ), _mm_setzero_si128() );
+ break;
+ case 6:
+ recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[6] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[6] ) ) ), _mm_setzero_si128() );
+ break;
+ case 7:
+ recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[7] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[7] ) ) ), _mm_setzero_si128() );
+ break;
+ case 8:
+ recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[8] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[8] ) ) ), _mm_setzero_si128() );
+ break;
+ case 9:
+ recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[9] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[9] ) ) ), _mm_setzero_si128() );
+ break;
+ case 10:
+ recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[10] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[10] ) ) ), _mm_setzero_si128() );
+ break;
+ case 11:
+ recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[11] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[11] ) ) ), _mm_setzero_si128() );
+ break;
+ case 12:
+ recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[12] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[12] ) ) ), _mm_setzero_si128() );
+ break;
+ case 13:
+ recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[13] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[13] ) ) ), _mm_setzero_si128() );
+ break;
+ case 14:
+ recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<5>( mul ), g_alpha_SIMD[14] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<5>( mul ), g_alpha_SIMD[14] ) ) ), _mm_setzero_si128() );
+ break;
+ case 15:
+ recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<5>( mul ), g_alpha_SIMD[15] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<5>( mul ), g_alpha_SIMD[15] ) ) ), _mm_setzero_si128() );
+ break;
+ default:
+ assert( false );
+ break;
+ }
+#else
+ // wide multiplier
+ __m128i rangeMul[16] = {
+ _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<0>( mul ), g_alpha_SIMD[0] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<0>( mul ), g_alpha_SIMD[0] ) ) ), _mm_setzero_si128() ),
+ _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[1] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[1] ) ) ), _mm_setzero_si128() ),
+ _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[2] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[2] ) ) ), _mm_setzero_si128() ),
+ _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[3] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[3] ) ) ), _mm_setzero_si128() ),
+ _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<2>( mul ), g_alpha_SIMD[4] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<2>( mul ), g_alpha_SIMD[4] ) ) ), _mm_setzero_si128() ),
+ _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[5] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[5] ) ) ), _mm_setzero_si128() ),
+ _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[6] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[6] ) ) ), _mm_setzero_si128() ),
+ _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[7] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[7] ) ) ), _mm_setzero_si128() ),
+ _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[8] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[8] ) ) ), _mm_setzero_si128() ),
+ _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[9] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[9] ) ) ), _mm_setzero_si128() ),
+ _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[10] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[10] ) ) ), _mm_setzero_si128() ),
+ _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[11] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[11] ) ) ), _mm_setzero_si128() ),
+ _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[12] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[12] ) ) ), _mm_setzero_si128() ),
+ _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[13] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[13] ) ) ), _mm_setzero_si128() ),
+ _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<5>( mul ), g_alpha_SIMD[14] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<5>( mul ), g_alpha_SIMD[14] ) ) ), _mm_setzero_si128() ),
+ _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<5>( mul ), g_alpha_SIMD[15] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<5>( mul ), g_alpha_SIMD[15] ) ) ), _mm_setzero_si128() )
+ };
+
+ // find selector
+ int err = std::numeric_limits<int>::max();
+ int sel;
+ for( int r=0; r<16; r++ )
+ {
+ __m128i err1, err2, minerr;
+ __m128i recVal16 = rangeMul[r];
+ int rangeErr;
+
+ err1 = _mm_sub_epi16( sr[0], recVal16 );
+ err2 = _mm_mullo_epi16( err1, err1 );
+ minerr = _mm_minpos_epu16( err2 );
+ rangeErr = _mm_cvtsi128_si64( minerr ) & 0xFFFF;
+
+ err1 = _mm_sub_epi16( sr[1], recVal16 );
+ err2 = _mm_mullo_epi16( err1, err1 );
+ minerr = _mm_minpos_epu16( err2 );
+ rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
+
+ err1 = _mm_sub_epi16( sr[2], recVal16 );
+ err2 = _mm_mullo_epi16( err1, err1 );
+ minerr = _mm_minpos_epu16( err2 );
+ rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
+
+ err1 = _mm_sub_epi16( sr[3], recVal16 );
+ err2 = _mm_mullo_epi16( err1, err1 );
+ minerr = _mm_minpos_epu16( err2 );
+ rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
+
+ err1 = _mm_sub_epi16( sr[4], recVal16 );
+ err2 = _mm_mullo_epi16( err1, err1 );
+ minerr = _mm_minpos_epu16( err2 );
+ rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
+
+ err1 = _mm_sub_epi16( sr[5], recVal16 );
+ err2 = _mm_mullo_epi16( err1, err1 );
+ minerr = _mm_minpos_epu16( err2 );
+ rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
+
+ err1 = _mm_sub_epi16( sr[6], recVal16 );
+ err2 = _mm_mullo_epi16( err1, err1 );
+ minerr = _mm_minpos_epu16( err2 );
+ rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
+
+ err1 = _mm_sub_epi16( sr[7], recVal16 );
+ err2 = _mm_mullo_epi16( err1, err1 );
+ minerr = _mm_minpos_epu16( err2 );
+ rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
+
+ err1 = _mm_sub_epi16( sr[8], recVal16 );
+ err2 = _mm_mullo_epi16( err1, err1 );
+ minerr = _mm_minpos_epu16( err2 );
+ rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
+
+ err1 = _mm_sub_epi16( sr[9], recVal16 );
+ err2 = _mm_mullo_epi16( err1, err1 );
+ minerr = _mm_minpos_epu16( err2 );
+ rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
+
+ err1 = _mm_sub_epi16( sr[10], recVal16 );
+ err2 = _mm_mullo_epi16( err1, err1 );
+ minerr = _mm_minpos_epu16( err2 );
+ rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
+
+ err1 = _mm_sub_epi16( sr[11], recVal16 );
+ err2 = _mm_mullo_epi16( err1, err1 );
+ minerr = _mm_minpos_epu16( err2 );
+ rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
+
+ err1 = _mm_sub_epi16( sr[12], recVal16 );
+ err2 = _mm_mullo_epi16( err1, err1 );
+ minerr = _mm_minpos_epu16( err2 );
+ rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
+
+ err1 = _mm_sub_epi16( sr[13], recVal16 );
+ err2 = _mm_mullo_epi16( err1, err1 );
+ minerr = _mm_minpos_epu16( err2 );
+ rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
+
+ err1 = _mm_sub_epi16( sr[14], recVal16 );
+ err2 = _mm_mullo_epi16( err1, err1 );
+ minerr = _mm_minpos_epu16( err2 );
+ rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
+
+ err1 = _mm_sub_epi16( sr[15], recVal16 );
+ err2 = _mm_mullo_epi16( err1, err1 );
+ minerr = _mm_minpos_epu16( err2 );
+ rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
+
+ if( rangeErr < err )
+ {
+ err = rangeErr;
+ sel = r;
+ if( err == 0 ) break;
+ }
+ }
+
+ __m128i recVal16 = rangeMul[sel];
+#endif
+
+ // find indices
+ __m128i err1, err2, minerr;
+ uint64_t idx = 0, tmp;
+
+ err1 = _mm_sub_epi16( sr[0], recVal16 );
+ err2 = _mm_mullo_epi16( err1, err1 );
+ minerr = _mm_minpos_epu16( err2 );
+ tmp = _mm_cvtsi128_si64( minerr );
+ idx |= ( tmp >> 16 ) << 15*3;
+
+ err1 = _mm_sub_epi16( sr[1], recVal16 );
+ err2 = _mm_mullo_epi16( err1, err1 );
+ minerr = _mm_minpos_epu16( err2 );
+ tmp = _mm_cvtsi128_si64( minerr );
+ idx |= ( tmp >> 16 ) << 14*3;
+
+ err1 = _mm_sub_epi16( sr[2], recVal16 );
+ err2 = _mm_mullo_epi16( err1, err1 );
+ minerr = _mm_minpos_epu16( err2 );
+ tmp = _mm_cvtsi128_si64( minerr );
+ idx |= ( tmp >> 16 ) << 13*3;
+
+ err1 = _mm_sub_epi16( sr[3], recVal16 );
+ err2 = _mm_mullo_epi16( err1, err1 );
+ minerr = _mm_minpos_epu16( err2 );
+ tmp = _mm_cvtsi128_si64( minerr );
+ idx |= ( tmp >> 16 ) << 12*3;
+
+ err1 = _mm_sub_epi16( sr[4], recVal16 );
+ err2 = _mm_mullo_epi16( err1, err1 );
+ minerr = _mm_minpos_epu16( err2 );
+ tmp = _mm_cvtsi128_si64( minerr );
+ idx |= ( tmp >> 16 ) << 11*3;
+
+ err1 = _mm_sub_epi16( sr[5], recVal16 );
+ err2 = _mm_mullo_epi16( err1, err1 );
+ minerr = _mm_minpos_epu16( err2 );
+ tmp = _mm_cvtsi128_si64( minerr );
+ idx |= ( tmp >> 16 ) << 10*3;
+
+ err1 = _mm_sub_epi16( sr[6], recVal16 );
+ err2 = _mm_mullo_epi16( err1, err1 );
+ minerr = _mm_minpos_epu16( err2 );
+ tmp = _mm_cvtsi128_si64( minerr );
+ idx |= ( tmp >> 16 ) << 9*3;
+
+ err1 = _mm_sub_epi16( sr[7], recVal16 );
+ err2 = _mm_mullo_epi16( err1, err1 );
+ minerr = _mm_minpos_epu16( err2 );
+ tmp = _mm_cvtsi128_si64( minerr );
+ idx |= ( tmp >> 16 ) << 8*3;
+
+ err1 = _mm_sub_epi16( sr[8], recVal16 );
+ err2 = _mm_mullo_epi16( err1, err1 );
+ minerr = _mm_minpos_epu16( err2 );
+ tmp = _mm_cvtsi128_si64( minerr );
+ idx |= ( tmp >> 16 ) << 7*3;
+
+ err1 = _mm_sub_epi16( sr[9], recVal16 );
+ err2 = _mm_mullo_epi16( err1, err1 );
+ minerr = _mm_minpos_epu16( err2 );
+ tmp = _mm_cvtsi128_si64( minerr );
+ idx |= ( tmp >> 16 ) << 6*3;
+
+ err1 = _mm_sub_epi16( sr[10], recVal16 );
+ err2 = _mm_mullo_epi16( err1, err1 );
+ minerr = _mm_minpos_epu16( err2 );
+ tmp = _mm_cvtsi128_si64( minerr );
+ idx |= ( tmp >> 16 ) << 5*3;
+
+ err1 = _mm_sub_epi16( sr[11], recVal16 );
+ err2 = _mm_mullo_epi16( err1, err1 );
+ minerr = _mm_minpos_epu16( err2 );
+ tmp = _mm_cvtsi128_si64( minerr );
+ idx |= ( tmp >> 16 ) << 4*3;
+
+ err1 = _mm_sub_epi16( sr[12], recVal16 );
+ err2 = _mm_mullo_epi16( err1, err1 );
+ minerr = _mm_minpos_epu16( err2 );
+ tmp = _mm_cvtsi128_si64( minerr );
+ idx |= ( tmp >> 16 ) << 3*3;
+
+ err1 = _mm_sub_epi16( sr[13], recVal16 );
+ err2 = _mm_mullo_epi16( err1, err1 );
+ minerr = _mm_minpos_epu16( err2 );
+ tmp = _mm_cvtsi128_si64( minerr );
+ idx |= ( tmp >> 16 ) << 2*3;
+
+ err1 = _mm_sub_epi16( sr[14], recVal16 );
+ err2 = _mm_mullo_epi16( err1, err1 );
+ minerr = _mm_minpos_epu16( err2 );
+ tmp = _mm_cvtsi128_si64( minerr );
+ idx |= ( tmp >> 16 ) << 1*3;
+
+ err1 = _mm_sub_epi16( sr[15], recVal16 );
+ err2 = _mm_mullo_epi16( err1, err1 );
+ minerr = _mm_minpos_epu16( err2 );
+ tmp = _mm_cvtsi128_si64( minerr );
+ idx |= ( tmp >> 16 ) << 0*3;
+
+ uint16_t rm[8];
+ _mm_storeu_si128( (__m128i*)rm, mul );
+ uint16_t sm = _mm_cvtsi128_si64( srcMid );
+
+ uint64_t d = ( uint64_t( sm ) << 56 ) |
+ ( uint64_t( rm[GetMulSel( sel )] ) << 52 ) |
+ ( uint64_t( sel ) << 48 ) |
+ idx;
+
+ return _bswap64( d );
+#elif defined __ARM_NEON
+
+ int16x8_t srcMidWide, multipliers;
+ int srcMid;
+ uint8x16_t srcAlphaBlock = vld1q_u8( src );
+ {
+ uint8_t ref = src[0];
+ uint8x16_t a0 = vdupq_n_u8( ref );
+ uint8x16_t r = vceqq_u8( srcAlphaBlock, a0 );
+ int64x2_t m = vreinterpretq_s64_u8( r );
+ if( m[0] == -1 && m[1] == -1 )
+ return ref;
+
+ // srcRange
+#ifdef __aarch64__
+ uint8_t min = vminvq_u8( srcAlphaBlock );
+ uint8_t max = vmaxvq_u8( srcAlphaBlock );
+ uint8_t srcRange = max - min;
+ multipliers = vqaddq_s16( vshrq_n_s16( vqdmulhq_n_s16( g_alphaRange_NEON, srcRange ), 1 ), vdupq_n_s16( 1 ) );
+ srcMid = min + srcRange / 2;
+ srcMidWide = vdupq_n_s16( srcMid );
+#else
+ uint8x8_t vmin = vpmin_u8( vget_low_u8( srcAlphaBlock ), vget_high_u8( srcAlphaBlock ) );
+ vmin = vpmin_u8( vmin, vmin );
+ vmin = vpmin_u8( vmin, vmin );
+ vmin = vpmin_u8( vmin, vmin );
+ uint8x8_t vmax = vpmax_u8( vget_low_u8( srcAlphaBlock ), vget_high_u8( srcAlphaBlock ) );
+ vmax = vpmax_u8( vmax, vmax );
+ vmax = vpmax_u8( vmax, vmax );
+ vmax = vpmax_u8( vmax, vmax );
+
+ int16x8_t srcRangeWide = vreinterpretq_s16_u16( vsubl_u8( vmax, vmin ) );
+ multipliers = vqaddq_s16( vshrq_n_s16( vqdmulhq_s16( g_alphaRange_NEON, srcRangeWide ), 1 ), vdupq_n_s16( 1 ) );
+ srcMidWide = vsraq_n_s16( vreinterpretq_s16_u16(vmovl_u8(vmin)), srcRangeWide, 1);
+ srcMid = vgetq_lane_s16( srcMidWide, 0 );
+#endif
+ }
+
+ // calculate reconstructed values
+#define EAC_APPLY_16X( m ) m( 0 ) m( 1 ) m( 2 ) m( 3 ) m( 4 ) m( 5 ) m( 6 ) m( 7 ) m( 8 ) m( 9 ) m( 10 ) m( 11 ) m( 12 ) m( 13 ) m( 14 ) m( 15 )
+
+#define EAC_RECONSTRUCT_VALUE( n ) vqmovun_s16( vmlaq_s16( srcMidWide, g_alpha_NEON[n], WidenMultiplier_EAC_NEON<n>( multipliers ) ) ),
+ uint8x8_t recVals[16] = { EAC_APPLY_16X( EAC_RECONSTRUCT_VALUE ) };
+
+ // find selector
+ int err = std::numeric_limits<int>::max();
+ int sel = 0;
+ for( int r = 0; r < 16; r++ )
+ {
+ uint8x8_t recVal = recVals[r];
+
+ int rangeErr = 0;
+#define EAC_ACCUMULATE_ERROR( n ) rangeErr += MinError_EAC_NEON( ErrorProbe_EAC_NEON<n>( recVal, srcAlphaBlock ) );
+ EAC_APPLY_16X( EAC_ACCUMULATE_ERROR )
+
+ if( rangeErr < err )
+ {
+ err = rangeErr;
+ sel = r;
+ if ( err == 0 ) break;
+ }
+ }
+
+ // combine results
+ uint64_t d = ( uint64_t( srcMid ) << 56 ) |
+ ( uint64_t( multipliers[GetMulSel( sel )] ) << 52 ) |
+ ( uint64_t( sel ) << 48);
+
+ // generate indices
+ uint8x8_t recVal = recVals[sel];
+#define EAC_INSERT_INDEX(n) d |= MinErrorIndex_EAC_NEON<n>( recVal, srcAlphaBlock );
+ EAC_APPLY_16X( EAC_INSERT_INDEX )
+
+ return _bswap64( d );
+
+#undef EAC_APPLY_16X
+#undef EAC_INSERT_INDEX
+#undef EAC_ACCUMULATE_ERROR
+#undef EAC_RECONSTRUCT_VALUE
+
+#else
+ {
+ bool solid = true;
+ const uint8_t* ptr = src + 1;
+ const uint8_t ref = *src;
+ for( int i=1; i<16; i++ )
+ {
+ if( ref != *ptr++ )
+ {
+ solid = false;
+ break;
+ }
+ }
+ if( solid )
+ {
+ return ref;
+ }
+ }
+
+ uint8_t min = src[0];
+ uint8_t max = src[0];
+ for( int i=1; i<16; i++ )
+ {
+ if( min > src[i] ) min = src[i];
+ else if( max < src[i] ) max = src[i];
+ }
+ int srcRange = max - min;
+ int srcMid = min + srcRange / 2;
+
+ uint8_t buf[16][16];
+ int err = std::numeric_limits<int>::max();
+ int sel;
+ int selmul;
+ for( int r=0; r<16; r++ )
+ {
+ int mul = ( ( srcRange * g_alphaRange[r] ) >> 16 ) + 1;
+
+ int rangeErr = 0;
+ for( int i=0; i<16; i++ )
+ {
+ const auto srcVal = src[i];
+
+ int idx = 0;
+ const auto modVal = g_alpha[r][0] * mul;
+ const auto recVal = clampu8( srcMid + modVal );
+ int localErr = sq( srcVal - recVal );
+
+ if( localErr != 0 )
+ {
+ for( int j=1; j<8; j++ )
+ {
+ const auto modVal = g_alpha[r][j] * mul;
+ const auto recVal = clampu8( srcMid + modVal );
+ const auto errProbe = sq( srcVal - recVal );
+ if( errProbe < localErr )
+ {
+ localErr = errProbe;
+ idx = j;
+ }
+ }
+ }
+
+ buf[r][i] = idx;
+ rangeErr += localErr;
+ }
+
+ if( rangeErr < err )
+ {
+ err = rangeErr;
+ sel = r;
+ selmul = mul;
+ if( err == 0 ) break;
+ }
+ }
+
+ uint64_t d = ( uint64_t( srcMid ) << 56 ) |
+ ( uint64_t( selmul ) << 52 ) |
+ ( uint64_t( sel ) << 48 );
+
+ int offset = 45;
+ auto ptr = buf[sel];
+ for( int i=0; i<16; i++ )
+ {
+ d |= uint64_t( *ptr++ ) << offset;
+ offset -= 3;
+ }
+
+ return _bswap64( d );
+#endif
+}
+
+
+void CompressEtc1Alpha( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width )
+{
+ int w = 0;
+ uint32_t buf[4*4];
+ do
+ {
+#ifdef __SSE4_1__
+ __m128 px0 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 0 ) ) );
+ __m128 px1 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 1 ) ) );
+ __m128 px2 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 2 ) ) );
+ __m128 px3 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 3 ) ) );
+
+ _MM_TRANSPOSE4_PS( px0, px1, px2, px3 );
+
+ __m128i c0 = _mm_castps_si128( px0 );
+ __m128i c1 = _mm_castps_si128( px1 );
+ __m128i c2 = _mm_castps_si128( px2 );
+ __m128i c3 = _mm_castps_si128( px3 );
+
+ __m128i mask = _mm_setr_epi32( 0x03030303, 0x07070707, 0x0b0b0b0b, 0x0f0f0f0f );
+ __m128i p0 = _mm_shuffle_epi8( c0, mask );
+ __m128i p1 = _mm_shuffle_epi8( c1, mask );
+ __m128i p2 = _mm_shuffle_epi8( c2, mask );
+ __m128i p3 = _mm_shuffle_epi8( c3, mask );
+
+ _mm_store_si128( (__m128i*)(buf + 0), p0 );
+ _mm_store_si128( (__m128i*)(buf + 4), p1 );
+ _mm_store_si128( (__m128i*)(buf + 8), p2 );
+ _mm_store_si128( (__m128i*)(buf + 12), p3 );
+
+ src += 4;
+#else
+ auto ptr = buf;
+ for( int x=0; x<4; x++ )
+ {
+ unsigned int a = *src >> 24;
+ *ptr++ = a | ( a << 8 ) | ( a << 16 );
+ src += width;
+ a = *src >> 24;
+ *ptr++ = a | ( a << 8 ) | ( a << 16 );
+ src += width;
+ a = *src >> 24;
+ *ptr++ = a | ( a << 8 ) | ( a << 16 );
+ src += width;
+ a = *src >> 24;
+ *ptr++ = a | ( a << 8 ) | ( a << 16 );
+ src -= width * 3 - 1;
+ }
+#endif
+ if( ++w == width/4 )
+ {
+ src += width * 3;
+ w = 0;
+ }
+ *dst++ = ProcessRGB( (uint8_t*)buf );
+ }
+ while( --blocks );
+}
+
+void CompressEtc2Alpha( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width )
+{
+ int w = 0;
+ uint32_t buf[4*4];
+ do
+ {
+#ifdef __SSE4_1__
+ __m128 px0 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 0 ) ) );
+ __m128 px1 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 1 ) ) );
+ __m128 px2 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 2 ) ) );
+ __m128 px3 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 3 ) ) );
+
+ _MM_TRANSPOSE4_PS( px0, px1, px2, px3 );
+
+ __m128i c0 = _mm_castps_si128( px0 );
+ __m128i c1 = _mm_castps_si128( px1 );
+ __m128i c2 = _mm_castps_si128( px2 );
+ __m128i c3 = _mm_castps_si128( px3 );
+
+ __m128i mask = _mm_setr_epi32( 0x03030303, 0x07070707, 0x0b0b0b0b, 0x0f0f0f0f );
+ __m128i p0 = _mm_shuffle_epi8( c0, mask );
+ __m128i p1 = _mm_shuffle_epi8( c1, mask );
+ __m128i p2 = _mm_shuffle_epi8( c2, mask );
+ __m128i p3 = _mm_shuffle_epi8( c3, mask );
+
+ _mm_store_si128( (__m128i*)(buf + 0), p0 );
+ _mm_store_si128( (__m128i*)(buf + 4), p1 );
+ _mm_store_si128( (__m128i*)(buf + 8), p2 );
+ _mm_store_si128( (__m128i*)(buf + 12), p3 );
+
+ src += 4;
+#else
+ auto ptr = buf;
+ for( int x=0; x<4; x++ )
+ {
+ unsigned int a = *src >> 24;
+ *ptr++ = a | ( a << 8 ) | ( a << 16 );
+ src += width;
+ a = *src >> 24;
+ *ptr++ = a | ( a << 8 ) | ( a << 16 );
+ src += width;
+ a = *src >> 24;
+ *ptr++ = a | ( a << 8 ) | ( a << 16 );
+ src += width;
+ a = *src >> 24;
+ *ptr++ = a | ( a << 8 ) | ( a << 16 );
+ src -= width * 3 - 1;
+ }
+#endif
+ if( ++w == width/4 )
+ {
+ src += width * 3;
+ w = 0;
+ }
+ *dst++ = ProcessRGB_ETC2( (uint8_t*)buf );
+ }
+ while( --blocks );
+}
+
+#include <chrono>
+#include <thread>
+
+void CompressEtc1Rgb( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width )
+{
+ int w = 0;
+ uint32_t buf[4*4];
+ do
+ {
+#ifdef __SSE4_1__
+ __m128 px0 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 0 ) ) );
+ __m128 px1 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 1 ) ) );
+ __m128 px2 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 2 ) ) );
+ __m128 px3 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 3 ) ) );
+
+ _MM_TRANSPOSE4_PS( px0, px1, px2, px3 );
+
+ _mm_store_si128( (__m128i*)(buf + 0), _mm_castps_si128( px0 ) );
+ _mm_store_si128( (__m128i*)(buf + 4), _mm_castps_si128( px1 ) );
+ _mm_store_si128( (__m128i*)(buf + 8), _mm_castps_si128( px2 ) );
+ _mm_store_si128( (__m128i*)(buf + 12), _mm_castps_si128( px3 ) );
+
+ src += 4;
+#else
+ auto ptr = buf;
+ for( int x=0; x<4; x++ )
+ {
+ *ptr++ = *src;
+ src += width;
+ *ptr++ = *src;
+ src += width;
+ *ptr++ = *src;
+ src += width;
+ *ptr++ = *src;
+ src -= width * 3 - 1;
+ }
+#endif
+ if( ++w == width/4 )
+ {
+ src += width * 3;
+ w = 0;
+ }
+ *dst++ = ProcessRGB( (uint8_t*)buf );
+ }
+ while( --blocks );
+}
+
+void CompressEtc1RgbDither( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width )
+{
+ int w = 0;
+ uint32_t buf[4*4];
+ do
+ {
+#ifdef __SSE4_1__
+ __m128 px0 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 0 ) ) );
+ __m128 px1 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 1 ) ) );
+ __m128 px2 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 2 ) ) );
+ __m128 px3 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 3 ) ) );
+
+ _MM_TRANSPOSE4_PS( px0, px1, px2, px3 );
+
+# ifdef __AVX2__
+ DitherAvx2( (uint8_t*)buf, _mm_castps_si128( px0 ), _mm_castps_si128( px1 ), _mm_castps_si128( px2 ), _mm_castps_si128( px3 ) );
+# else
+ _mm_store_si128( (__m128i*)(buf + 0), _mm_castps_si128( px0 ) );
+ _mm_store_si128( (__m128i*)(buf + 4), _mm_castps_si128( px1 ) );
+ _mm_store_si128( (__m128i*)(buf + 8), _mm_castps_si128( px2 ) );
+ _mm_store_si128( (__m128i*)(buf + 12), _mm_castps_si128( px3 ) );
+
+ Dither( (uint8_t*)buf );
+# endif
+
+ src += 4;
+#else
+ auto ptr = buf;
+ for( int x=0; x<4; x++ )
+ {
+ *ptr++ = *src;
+ src += width;
+ *ptr++ = *src;
+ src += width;
+ *ptr++ = *src;
+ src += width;
+ *ptr++ = *src;
+ src -= width * 3 - 1;
+ }
+#endif
+ if( ++w == width/4 )
+ {
+ src += width * 3;
+ w = 0;
+ }
+ *dst++ = ProcessRGB( (uint8_t*)buf );
+ }
+ while( --blocks );
+}
+
+void CompressEtc2Rgb( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width )
+{
+ int w = 0;
+ uint32_t buf[4*4];
+ do
+ {
+#ifdef __SSE4_1__
+ __m128 px0 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 0 ) ) );
+ __m128 px1 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 1 ) ) );
+ __m128 px2 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 2 ) ) );
+ __m128 px3 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 3 ) ) );
+
+ _MM_TRANSPOSE4_PS( px0, px1, px2, px3 );
+
+ _mm_store_si128( (__m128i*)(buf + 0), _mm_castps_si128( px0 ) );
+ _mm_store_si128( (__m128i*)(buf + 4), _mm_castps_si128( px1 ) );
+ _mm_store_si128( (__m128i*)(buf + 8), _mm_castps_si128( px2 ) );
+ _mm_store_si128( (__m128i*)(buf + 12), _mm_castps_si128( px3 ) );
+
+ src += 4;
+#else
+ auto ptr = buf;
+ for( int x=0; x<4; x++ )
+ {
+ *ptr++ = *src;
+ src += width;
+ *ptr++ = *src;
+ src += width;
+ *ptr++ = *src;
+ src += width;
+ *ptr++ = *src;
+ src -= width * 3 - 1;
+ }
+#endif
+ if( ++w == width/4 )
+ {
+ src += width * 3;
+ w = 0;
+ }
+ *dst++ = ProcessRGB_ETC2( (uint8_t*)buf );
+ }
+ while( --blocks );
+}
+
+void CompressEtc2Rgba( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width )
+{
+ int w = 0;
+ uint32_t rgba[4*4];
+ uint8_t alpha[4*4];
+ do
+ {
+#ifdef __SSE4_1__
+ __m128 px0 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 0 ) ) );
+ __m128 px1 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 1 ) ) );
+ __m128 px2 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 2 ) ) );
+ __m128 px3 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 3 ) ) );
+
+ _MM_TRANSPOSE4_PS( px0, px1, px2, px3 );
+
+ __m128i c0 = _mm_castps_si128( px0 );
+ __m128i c1 = _mm_castps_si128( px1 );
+ __m128i c2 = _mm_castps_si128( px2 );
+ __m128i c3 = _mm_castps_si128( px3 );
+
+ _mm_store_si128( (__m128i*)(rgba + 0), c0 );
+ _mm_store_si128( (__m128i*)(rgba + 4), c1 );
+ _mm_store_si128( (__m128i*)(rgba + 8), c2 );
+ _mm_store_si128( (__m128i*)(rgba + 12), c3 );
+
+ __m128i mask = _mm_setr_epi32( 0x0f0b0703, -1, -1, -1 );
+
+ __m128i a0 = _mm_shuffle_epi8( c0, mask );
+ __m128i a1 = _mm_shuffle_epi8( c1, _mm_shuffle_epi32( mask, _MM_SHUFFLE( 3, 3, 0, 3 ) ) );
+ __m128i a2 = _mm_shuffle_epi8( c2, _mm_shuffle_epi32( mask, _MM_SHUFFLE( 3, 0, 3, 3 ) ) );
+ __m128i a3 = _mm_shuffle_epi8( c3, _mm_shuffle_epi32( mask, _MM_SHUFFLE( 0, 3, 3, 3 ) ) );
+
+ __m128i s0 = _mm_or_si128( a0, a1 );
+ __m128i s1 = _mm_or_si128( a2, a3 );
+ __m128i s2 = _mm_or_si128( s0, s1 );
+
+ _mm_store_si128( (__m128i*)alpha, s2 );
+
+ src += 4;
+#else
+ auto ptr = rgba;
+ auto ptr8 = alpha;
+ for( int x=0; x<4; x++ )
+ {
+ auto v = *src;
+ *ptr++ = v;
+ *ptr8++ = v >> 24;
+ src += width;
+ v = *src;
+ *ptr++ = v;
+ *ptr8++ = v >> 24;
+ src += width;
+ v = *src;
+ *ptr++ = v;
+ *ptr8++ = v >> 24;
+ src += width;
+ v = *src;
+ *ptr++ = v;
+ *ptr8++ = v >> 24;
+ src -= width * 3 - 1;
+ }
+#endif
+ if( ++w == width/4 )
+ {
+ src += width * 3;
+ w = 0;
+ }
+ *dst++ = ProcessAlpha_ETC2( alpha );
+ *dst++ = ProcessRGB_ETC2( (uint8_t*)rgba );
+ }
+ while( --blocks );
+}
diff --git a/thirdparty/etcpak/ProcessRGB.hpp b/thirdparty/etcpak/ProcessRGB.hpp
new file mode 100644
index 0000000000..c5555a5bb1
--- /dev/null
+++ b/thirdparty/etcpak/ProcessRGB.hpp
@@ -0,0 +1,13 @@
+#ifndef __PROCESSRGB_HPP__
+#define __PROCESSRGB_HPP__
+
+#include <stdint.h>
+
+void CompressEtc1Alpha( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width );
+void CompressEtc2Alpha( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width );
+void CompressEtc1Rgb( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width );
+void CompressEtc1RgbDither( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width );
+void CompressEtc2Rgb( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width );
+void CompressEtc2Rgba( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width );
+
+#endif
diff --git a/thirdparty/etcpak/Semaphore.hpp b/thirdparty/etcpak/Semaphore.hpp
new file mode 100644
index 0000000000..9e42dbb9e0
--- /dev/null
+++ b/thirdparty/etcpak/Semaphore.hpp
@@ -0,0 +1,46 @@
+#ifndef __DARKRL__SEMAPHORE_HPP__
+#define __DARKRL__SEMAPHORE_HPP__
+
+#include <condition_variable>
+#include <mutex>
+
+class Semaphore
+{
+public:
+ Semaphore( int count ) : m_count( count ) {}
+
+ void lock()
+ {
+ std::unique_lock<std::mutex> lock( m_mutex );
+ m_cv.wait( lock, [this](){ return m_count != 0; } );
+ m_count--;
+ }
+
+ void unlock()
+ {
+ std::lock_guard<std::mutex> lock( m_mutex );
+ m_count++;
+ m_cv.notify_one();
+ }
+
+ bool try_lock()
+ {
+ std::lock_guard<std::mutex> lock( m_mutex );
+ if( m_count == 0 )
+ {
+ return false;
+ }
+ else
+ {
+ m_count--;
+ return true;
+ }
+ }
+
+private:
+ std::mutex m_mutex;
+ std::condition_variable m_cv;
+ unsigned int m_count;
+};
+
+#endif
diff --git a/thirdparty/etcpak/System.cpp b/thirdparty/etcpak/System.cpp
new file mode 100644
index 0000000000..a09b289cb2
--- /dev/null
+++ b/thirdparty/etcpak/System.cpp
@@ -0,0 +1,68 @@
+#include <algorithm>
+#ifdef _WIN32
+# include <windows.h>
+#else
+# include <pthread.h>
+# include <unistd.h>
+#endif
+
+#include "System.hpp"
+
+unsigned int System::CPUCores()
+{
+ static unsigned int cores = 0;
+ if( cores == 0 )
+ {
+ int tmp;
+#ifdef _WIN32
+ SYSTEM_INFO info;
+ GetSystemInfo( &info );
+ tmp = (int)info.dwNumberOfProcessors;
+#else
+# ifndef _SC_NPROCESSORS_ONLN
+# ifdef _SC_NPROC_ONLN
+# define _SC_NPROCESSORS_ONLN _SC_NPROC_ONLN
+# elif defined _SC_CRAY_NCPU
+# define _SC_NPROCESSORS_ONLN _SC_CRAY_NCPU
+# endif
+# endif
+ tmp = (int)(long)sysconf( _SC_NPROCESSORS_ONLN );
+#endif
+ cores = (unsigned int)std::max( tmp, 1 );
+ }
+ return cores;
+}
+
+void System::SetThreadName( std::thread& thread, const char* name )
+{
+#ifdef _MSC_VER
+ const DWORD MS_VC_EXCEPTION=0x406D1388;
+
+# pragma pack( push, 8 )
+ struct THREADNAME_INFO
+ {
+ DWORD dwType;
+ LPCSTR szName;
+ DWORD dwThreadID;
+ DWORD dwFlags;
+ };
+# pragma pack(pop)
+
+ DWORD ThreadId = GetThreadId( static_cast<HANDLE>( thread.native_handle() ) );
+ THREADNAME_INFO info;
+ info.dwType = 0x1000;
+ info.szName = name;
+ info.dwThreadID = ThreadId;
+ info.dwFlags = 0;
+
+ __try
+ {
+ RaiseException( MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(ULONG_PTR), (ULONG_PTR*)&info );
+ }
+ __except(EXCEPTION_EXECUTE_HANDLER)
+ {
+ }
+#elif !defined(__APPLE__)
+ pthread_setname_np( thread.native_handle(), name );
+#endif
+}
diff --git a/thirdparty/etcpak/System.hpp b/thirdparty/etcpak/System.hpp
new file mode 100644
index 0000000000..1a09bb15e1
--- /dev/null
+++ b/thirdparty/etcpak/System.hpp
@@ -0,0 +1,15 @@
+#ifndef __DARKRL__SYSTEM_HPP__
+#define __DARKRL__SYSTEM_HPP__
+
+#include <thread>
+
+class System
+{
+public:
+ System() = delete;
+
+ static unsigned int CPUCores();
+ static void SetThreadName( std::thread& thread, const char* name );
+};
+
+#endif
diff --git a/thirdparty/etcpak/Tables.cpp b/thirdparty/etcpak/Tables.cpp
new file mode 100644
index 0000000000..5c7fd9cf61
--- /dev/null
+++ b/thirdparty/etcpak/Tables.cpp
@@ -0,0 +1,221 @@
+#include "Tables.hpp"
+
+const int32_t g_table[8][4] = {
+ { 2, 8, -2, -8 },
+ { 5, 17, -5, -17 },
+ { 9, 29, -9, -29 },
+ { 13, 42, -13, -42 },
+ { 18, 60, -18, -60 },
+ { 24, 80, -24, -80 },
+ { 33, 106, -33, -106 },
+ { 47, 183, -47, -183 }
+};
+
+const int64_t g_table256[8][4] = {
+ { 2*256, 8*256, -2*256, -8*256 },
+ { 5*256, 17*256, -5*256, -17*256 },
+ { 9*256, 29*256, -9*256, -29*256 },
+ { 13*256, 42*256, -13*256, -42*256 },
+ { 18*256, 60*256, -18*256, -60*256 },
+ { 24*256, 80*256, -24*256, -80*256 },
+ { 33*256, 106*256, -33*256, -106*256 },
+ { 47*256, 183*256, -47*256, -183*256 }
+};
+
+const uint32_t g_id[4][16] = {
+ { 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 3, 3, 2, 2, 3, 3, 2, 2, 3, 3, 2, 2, 3, 3, 2, 2 },
+ { 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4 },
+ { 7, 7, 6, 6, 7, 7, 6, 6, 7, 7, 6, 6, 7, 7, 6, 6 }
+};
+
+const uint32_t g_avg2[16] = {
+ 0x00,
+ 0x11,
+ 0x22,
+ 0x33,
+ 0x44,
+ 0x55,
+ 0x66,
+ 0x77,
+ 0x88,
+ 0x99,
+ 0xAA,
+ 0xBB,
+ 0xCC,
+ 0xDD,
+ 0xEE,
+ 0xFF
+};
+
+const uint32_t g_flags[64] = {
+ 0x80800402, 0x80800402, 0x80800402, 0x80800402,
+ 0x80800402, 0x80800402, 0x80800402, 0x8080E002,
+ 0x80800402, 0x80800402, 0x8080E002, 0x8080E002,
+ 0x80800402, 0x8080E002, 0x8080E002, 0x8080E002,
+ 0x80000402, 0x80000402, 0x80000402, 0x80000402,
+ 0x80000402, 0x80000402, 0x80000402, 0x8000E002,
+ 0x80000402, 0x80000402, 0x8000E002, 0x8000E002,
+ 0x80000402, 0x8000E002, 0x8000E002, 0x8000E002,
+ 0x00800402, 0x00800402, 0x00800402, 0x00800402,
+ 0x00800402, 0x00800402, 0x00800402, 0x0080E002,
+ 0x00800402, 0x00800402, 0x0080E002, 0x0080E002,
+ 0x00800402, 0x0080E002, 0x0080E002, 0x0080E002,
+ 0x00000402, 0x00000402, 0x00000402, 0x00000402,
+ 0x00000402, 0x00000402, 0x00000402, 0x0000E002,
+ 0x00000402, 0x00000402, 0x0000E002, 0x0000E002,
+ 0x00000402, 0x0000E002, 0x0000E002, 0x0000E002
+};
+
+const int32_t g_alpha[16][8] = {
+ { -3, -6, -9, -15, 2, 5, 8, 14 },
+ { -3, -7, -10, -13, 2, 6, 9, 12 },
+ { -2, -5, -8, -13, 1, 4, 7, 12 },
+ { -2, -4, -6, -13, 1, 3, 5, 12 },
+ { -3, -6, -8, -12, 2, 5, 7, 11 },
+ { -3, -7, -9, -11, 2, 6, 8, 10 },
+ { -4, -7, -8, -11, 3, 6, 7, 10 },
+ { -3, -5, -8, -11, 2, 4, 7, 10 },
+ { -2, -6, -8, -10, 1, 5, 7, 9 },
+ { -2, -5, -8, -10, 1, 4, 7, 9 },
+ { -2, -4, -8, -10, 1, 3, 7, 9 },
+ { -2, -5, -7, -10, 1, 4, 6, 9 },
+ { -3, -4, -7, -10, 2, 3, 6, 9 },
+ { -1, -2, -3, -10, 0, 1, 2, 9 },
+ { -4, -6, -8, -9, 3, 5, 7, 8 },
+ { -3, -5, -7, -9, 2, 4, 6, 8 }
+};
+
+const int32_t g_alphaRange[16] = {
+ 0x100FF / ( 1 + g_alpha[0][7] - g_alpha[0][3] ),
+ 0x100FF / ( 1 + g_alpha[1][7] - g_alpha[1][3] ),
+ 0x100FF / ( 1 + g_alpha[2][7] - g_alpha[2][3] ),
+ 0x100FF / ( 1 + g_alpha[3][7] - g_alpha[3][3] ),
+ 0x100FF / ( 1 + g_alpha[4][7] - g_alpha[4][3] ),
+ 0x100FF / ( 1 + g_alpha[5][7] - g_alpha[5][3] ),
+ 0x100FF / ( 1 + g_alpha[6][7] - g_alpha[6][3] ),
+ 0x100FF / ( 1 + g_alpha[7][7] - g_alpha[7][3] ),
+ 0x100FF / ( 1 + g_alpha[8][7] - g_alpha[8][3] ),
+ 0x100FF / ( 1 + g_alpha[9][7] - g_alpha[9][3] ),
+ 0x100FF / ( 1 + g_alpha[10][7] - g_alpha[10][3] ),
+ 0x100FF / ( 1 + g_alpha[11][7] - g_alpha[11][3] ),
+ 0x100FF / ( 1 + g_alpha[12][7] - g_alpha[12][3] ),
+ 0x100FF / ( 1 + g_alpha[13][7] - g_alpha[13][3] ),
+ 0x100FF / ( 1 + g_alpha[14][7] - g_alpha[14][3] ),
+ 0x100FF / ( 1 + g_alpha[15][7] - g_alpha[15][3] ),
+};
+
+#ifdef __SSE4_1__
+const __m128i g_table_SIMD[2] =
+{
+ _mm_setr_epi16( 2, 5, 9, 13, 18, 24, 33, 47),
+ _mm_setr_epi16( 8, 17, 29, 42, 60, 80, 106, 183)
+};
+const __m128i g_table128_SIMD[2] =
+{
+ _mm_setr_epi16( 2*128, 5*128, 9*128, 13*128, 18*128, 24*128, 33*128, 47*128),
+ _mm_setr_epi16( 8*128, 17*128, 29*128, 42*128, 60*128, 80*128, 106*128, 183*128)
+};
+const __m128i g_table256_SIMD[4] =
+{
+ _mm_setr_epi32( 2*256, 5*256, 9*256, 13*256),
+ _mm_setr_epi32( 8*256, 17*256, 29*256, 42*256),
+ _mm_setr_epi32( 18*256, 24*256, 33*256, 47*256),
+ _mm_setr_epi32( 60*256, 80*256, 106*256, 183*256)
+};
+
+const __m128i g_alpha_SIMD[16] = {
+ _mm_setr_epi16( g_alpha[ 0][0], g_alpha[ 0][1], g_alpha[ 0][2], g_alpha[ 0][3], g_alpha[ 0][4], g_alpha[ 0][5], g_alpha[ 0][6], g_alpha[ 0][7] ),
+ _mm_setr_epi16( g_alpha[ 1][0], g_alpha[ 1][1], g_alpha[ 1][2], g_alpha[ 1][3], g_alpha[ 1][4], g_alpha[ 1][5], g_alpha[ 1][6], g_alpha[ 1][7] ),
+ _mm_setr_epi16( g_alpha[ 2][0], g_alpha[ 2][1], g_alpha[ 2][2], g_alpha[ 2][3], g_alpha[ 2][4], g_alpha[ 2][5], g_alpha[ 2][6], g_alpha[ 2][7] ),
+ _mm_setr_epi16( g_alpha[ 3][0], g_alpha[ 3][1], g_alpha[ 3][2], g_alpha[ 3][3], g_alpha[ 3][4], g_alpha[ 3][5], g_alpha[ 3][6], g_alpha[ 3][7] ),
+ _mm_setr_epi16( g_alpha[ 4][0], g_alpha[ 4][1], g_alpha[ 4][2], g_alpha[ 4][3], g_alpha[ 4][4], g_alpha[ 4][5], g_alpha[ 4][6], g_alpha[ 4][7] ),
+ _mm_setr_epi16( g_alpha[ 5][0], g_alpha[ 5][1], g_alpha[ 5][2], g_alpha[ 5][3], g_alpha[ 5][4], g_alpha[ 5][5], g_alpha[ 5][6], g_alpha[ 5][7] ),
+ _mm_setr_epi16( g_alpha[ 6][0], g_alpha[ 6][1], g_alpha[ 6][2], g_alpha[ 6][3], g_alpha[ 6][4], g_alpha[ 6][5], g_alpha[ 6][6], g_alpha[ 6][7] ),
+ _mm_setr_epi16( g_alpha[ 7][0], g_alpha[ 7][1], g_alpha[ 7][2], g_alpha[ 7][3], g_alpha[ 7][4], g_alpha[ 7][5], g_alpha[ 7][6], g_alpha[ 7][7] ),
+ _mm_setr_epi16( g_alpha[ 8][0], g_alpha[ 8][1], g_alpha[ 8][2], g_alpha[ 8][3], g_alpha[ 8][4], g_alpha[ 8][5], g_alpha[ 8][6], g_alpha[ 8][7] ),
+ _mm_setr_epi16( g_alpha[ 9][0], g_alpha[ 9][1], g_alpha[ 9][2], g_alpha[ 9][3], g_alpha[ 9][4], g_alpha[ 9][5], g_alpha[ 9][6], g_alpha[ 9][7] ),
+ _mm_setr_epi16( g_alpha[10][0], g_alpha[10][1], g_alpha[10][2], g_alpha[10][3], g_alpha[10][4], g_alpha[10][5], g_alpha[10][6], g_alpha[10][7] ),
+ _mm_setr_epi16( g_alpha[11][0], g_alpha[11][1], g_alpha[11][2], g_alpha[11][3], g_alpha[11][4], g_alpha[11][5], g_alpha[11][6], g_alpha[11][7] ),
+ _mm_setr_epi16( g_alpha[12][0], g_alpha[12][1], g_alpha[12][2], g_alpha[12][3], g_alpha[12][4], g_alpha[12][5], g_alpha[12][6], g_alpha[12][7] ),
+ _mm_setr_epi16( g_alpha[13][0], g_alpha[13][1], g_alpha[13][2], g_alpha[13][3], g_alpha[13][4], g_alpha[13][5], g_alpha[13][6], g_alpha[13][7] ),
+ _mm_setr_epi16( g_alpha[14][0], g_alpha[14][1], g_alpha[14][2], g_alpha[14][3], g_alpha[14][4], g_alpha[14][5], g_alpha[14][6], g_alpha[14][7] ),
+ _mm_setr_epi16( g_alpha[15][0], g_alpha[15][1], g_alpha[15][2], g_alpha[15][3], g_alpha[15][4], g_alpha[15][5], g_alpha[15][6], g_alpha[15][7] ),
+};
+
+const __m128i g_alphaRange_SIMD = _mm_setr_epi16(
+ g_alphaRange[0],
+ g_alphaRange[1],
+ g_alphaRange[4],
+ g_alphaRange[5],
+ g_alphaRange[8],
+ g_alphaRange[14],
+ 0,
+ 0 );
+#endif
+
+#ifdef __AVX2__
+const __m256i g_alpha_AVX[8] = {
+ _mm256_setr_epi16( g_alpha[ 0][0], g_alpha[ 1][0], g_alpha[ 2][0], g_alpha[ 3][0], g_alpha[ 4][0], g_alpha[ 5][0], g_alpha[ 6][0], g_alpha[ 7][0], g_alpha[ 8][0], g_alpha[ 9][0], g_alpha[10][0], g_alpha[11][0], g_alpha[12][0], g_alpha[13][0], g_alpha[14][0], g_alpha[15][0] ),
+ _mm256_setr_epi16( g_alpha[ 0][1], g_alpha[ 1][1], g_alpha[ 2][1], g_alpha[ 3][1], g_alpha[ 4][1], g_alpha[ 5][1], g_alpha[ 6][1], g_alpha[ 7][1], g_alpha[ 8][1], g_alpha[ 9][1], g_alpha[10][1], g_alpha[11][1], g_alpha[12][1], g_alpha[13][1], g_alpha[14][1], g_alpha[15][1] ),
+ _mm256_setr_epi16( g_alpha[ 0][2], g_alpha[ 1][2], g_alpha[ 2][2], g_alpha[ 3][2], g_alpha[ 4][2], g_alpha[ 5][2], g_alpha[ 6][2], g_alpha[ 7][2], g_alpha[ 8][2], g_alpha[ 9][2], g_alpha[10][2], g_alpha[11][2], g_alpha[12][2], g_alpha[13][2], g_alpha[14][2], g_alpha[15][2] ),
+ _mm256_setr_epi16( g_alpha[ 0][3], g_alpha[ 1][3], g_alpha[ 2][3], g_alpha[ 3][3], g_alpha[ 4][3], g_alpha[ 5][3], g_alpha[ 6][3], g_alpha[ 7][3], g_alpha[ 8][3], g_alpha[ 9][3], g_alpha[10][3], g_alpha[11][3], g_alpha[12][3], g_alpha[13][3], g_alpha[14][3], g_alpha[15][3] ),
+ _mm256_setr_epi16( g_alpha[ 0][4], g_alpha[ 1][4], g_alpha[ 2][4], g_alpha[ 3][4], g_alpha[ 4][4], g_alpha[ 5][4], g_alpha[ 6][4], g_alpha[ 7][4], g_alpha[ 8][4], g_alpha[ 9][4], g_alpha[10][4], g_alpha[11][4], g_alpha[12][4], g_alpha[13][4], g_alpha[14][4], g_alpha[15][4] ),
+ _mm256_setr_epi16( g_alpha[ 0][5], g_alpha[ 1][5], g_alpha[ 2][5], g_alpha[ 3][5], g_alpha[ 4][5], g_alpha[ 5][5], g_alpha[ 6][5], g_alpha[ 7][5], g_alpha[ 8][5], g_alpha[ 9][5], g_alpha[10][5], g_alpha[11][5], g_alpha[12][5], g_alpha[13][5], g_alpha[14][5], g_alpha[15][5] ),
+ _mm256_setr_epi16( g_alpha[ 0][6], g_alpha[ 1][6], g_alpha[ 2][6], g_alpha[ 3][6], g_alpha[ 4][6], g_alpha[ 5][6], g_alpha[ 6][6], g_alpha[ 7][6], g_alpha[ 8][6], g_alpha[ 9][6], g_alpha[10][6], g_alpha[11][6], g_alpha[12][6], g_alpha[13][6], g_alpha[14][6], g_alpha[15][6] ),
+ _mm256_setr_epi16( g_alpha[ 0][7], g_alpha[ 1][7], g_alpha[ 2][7], g_alpha[ 3][7], g_alpha[ 4][7], g_alpha[ 5][7], g_alpha[ 6][7], g_alpha[ 7][7], g_alpha[ 8][7], g_alpha[ 9][7], g_alpha[10][7], g_alpha[11][7], g_alpha[12][7], g_alpha[13][7], g_alpha[14][7], g_alpha[15][7] ),
+};
+
+const __m256i g_alphaRange_AVX = _mm256_setr_epi16(
+ g_alphaRange[ 0], g_alphaRange[ 1], g_alphaRange[ 2], g_alphaRange[ 3], g_alphaRange[ 4], g_alphaRange[ 5], g_alphaRange[ 6], g_alphaRange[ 7],
+ g_alphaRange[ 8], g_alphaRange[ 9], g_alphaRange[10], g_alphaRange[11], g_alphaRange[12], g_alphaRange[13], g_alphaRange[14], g_alphaRange[15]
+);
+#endif
+
+#ifdef __ARM_NEON
+const int16x8_t g_table128_NEON[2] =
+{
+ { 2*128, 5*128, 9*128, 13*128, 18*128, 24*128, 33*128, 47*128 },
+ { 8*128, 17*128, 29*128, 42*128, 60*128, 80*128, 106*128, 183*128 }
+};
+
+const int32x4_t g_table256_NEON[4] =
+{
+ { 2*256, 5*256, 9*256, 13*256 },
+ { 8*256, 17*256, 29*256, 42*256 },
+ { 18*256, 24*256, 33*256, 47*256 },
+ { 60*256, 80*256, 106*256, 183*256 }
+};
+
+const int16x8_t g_alpha_NEON[16] =
+{
+ { -3, -6, -9, -15, 2, 5, 8, 14 },
+ { -3, -7, -10, -13, 2, 6, 9, 12 },
+ { -2, -5, -8, -13, 1, 4, 7, 12 },
+ { -2, -4, -6, -13, 1, 3, 5, 12 },
+ { -3, -6, -8, -12, 2, 5, 7, 11 },
+ { -3, -7, -9, -11, 2, 6, 8, 10 },
+ { -4, -7, -8, -11, 3, 6, 7, 10 },
+ { -3, -5, -8, -11, 2, 4, 7, 10 },
+ { -2, -6, -8, -10, 1, 5, 7, 9 },
+ { -2, -5, -8, -10, 1, 4, 7, 9 },
+ { -2, -4, -8, -10, 1, 3, 7, 9 },
+ { -2, -5, -7, -10, 1, 4, 6, 9 },
+ { -3, -4, -7, -10, 2, 3, 6, 9 },
+ { -1, -2, -3, -10, 0, 1, 2, 9 },
+ { -4, -6, -8, -9, 3, 5, 7, 8 },
+ { -3, -5, -7, -9, 2, 4, 6, 8 }
+};
+
+const int16x8_t g_alphaRange_NEON =
+{
+ (int16_t)g_alphaRange[0],
+ (int16_t)g_alphaRange[1],
+ (int16_t)g_alphaRange[4],
+ (int16_t)g_alphaRange[5],
+ (int16_t)g_alphaRange[8],
+ (int16_t)g_alphaRange[14],
+ 0,
+ 0
+};
+#endif
diff --git a/thirdparty/etcpak/Tables.hpp b/thirdparty/etcpak/Tables.hpp
new file mode 100644
index 0000000000..69d7e8aa07
--- /dev/null
+++ b/thirdparty/etcpak/Tables.hpp
@@ -0,0 +1,49 @@
+#ifndef __TABLES_HPP__
+#define __TABLES_HPP__
+
+#include <stdint.h>
+
+#ifdef __AVX2__
+# include <immintrin.h>
+#endif
+#ifdef __SSE4_1__
+# include <smmintrin.h>
+#endif
+#ifdef __ARM_NEON
+# include <arm_neon.h>
+#endif
+
+extern const int32_t g_table[8][4];
+extern const int64_t g_table256[8][4];
+
+extern const uint32_t g_id[4][16];
+
+extern const uint32_t g_avg2[16];
+
+extern const uint32_t g_flags[64];
+
+extern const int32_t g_alpha[16][8];
+extern const int32_t g_alphaRange[16];
+
+#ifdef __SSE4_1__
+extern const __m128i g_table_SIMD[2];
+extern const __m128i g_table128_SIMD[2];
+extern const __m128i g_table256_SIMD[4];
+
+extern const __m128i g_alpha_SIMD[16];
+extern const __m128i g_alphaRange_SIMD;
+#endif
+
+#ifdef __AVX2__
+extern const __m256i g_alpha_AVX[8];
+extern const __m256i g_alphaRange_AVX;
+#endif
+
+#ifdef __ARM_NEON
+extern const int16x8_t g_table128_NEON[2];
+extern const int32x4_t g_table256_NEON[4];
+extern const int16x8_t g_alpha_NEON[16];
+extern const int16x8_t g_alphaRange_NEON;
+#endif
+
+#endif
diff --git a/thirdparty/etcpak/TaskDispatch.cpp b/thirdparty/etcpak/TaskDispatch.cpp
new file mode 100644
index 0000000000..7287da4de2
--- /dev/null
+++ b/thirdparty/etcpak/TaskDispatch.cpp
@@ -0,0 +1,115 @@
+#include <assert.h>
+#include <stdio.h>
+
+#include "Debug.hpp"
+#include "System.hpp"
+#include "TaskDispatch.hpp"
+
+static TaskDispatch* s_instance = nullptr;
+
+TaskDispatch::TaskDispatch( size_t workers )
+ : m_exit( false )
+ , m_jobs( 0 )
+{
+ assert( !s_instance );
+ s_instance = this;
+
+ assert( workers >= 1 );
+ workers--;
+
+ m_workers.reserve( workers );
+ for( size_t i=0; i<workers; i++ )
+ {
+ char tmp[16];
+ sprintf( tmp, "Worker %zu", i );
+#ifdef __APPLE__
+ auto worker = std::thread( [this, tmp]{
+ pthread_setname_np( tmp );
+ Worker();
+ } );
+#else
+ auto worker = std::thread( [this]{ Worker(); } );
+#endif
+ System::SetThreadName( worker, tmp );
+ m_workers.emplace_back( std::move( worker ) );
+ }
+
+ DBGPRINT( "Task dispatcher with " << m_workers.size() + 1 << " workers" );
+}
+
+TaskDispatch::~TaskDispatch()
+{
+ m_exit = true;
+ m_queueLock.lock();
+ m_cvWork.notify_all();
+ m_queueLock.unlock();
+
+ for( auto& worker : m_workers )
+ {
+ worker.join();
+ }
+
+ assert( s_instance );
+ s_instance = nullptr;
+}
+
+void TaskDispatch::Queue( const std::function<void(void)>& f )
+{
+ std::unique_lock<std::mutex> lock( s_instance->m_queueLock );
+ s_instance->m_queue.emplace_back( f );
+ const auto size = s_instance->m_queue.size();
+ lock.unlock();
+ if( size > 1 )
+ {
+ s_instance->m_cvWork.notify_one();
+ }
+}
+
+void TaskDispatch::Queue( std::function<void(void)>&& f )
+{
+ std::unique_lock<std::mutex> lock( s_instance->m_queueLock );
+ s_instance->m_queue.emplace_back( std::move( f ) );
+ const auto size = s_instance->m_queue.size();
+ lock.unlock();
+ if( size > 1 )
+ {
+ s_instance->m_cvWork.notify_one();
+ }
+}
+
+void TaskDispatch::Sync()
+{
+ std::unique_lock<std::mutex> lock( s_instance->m_queueLock );
+ while( !s_instance->m_queue.empty() )
+ {
+ auto f = s_instance->m_queue.back();
+ s_instance->m_queue.pop_back();
+ lock.unlock();
+ f();
+ lock.lock();
+ }
+ s_instance->m_cvJobs.wait( lock, []{ return s_instance->m_jobs == 0; } );
+}
+
+void TaskDispatch::Worker()
+{
+ for(;;)
+ {
+ std::unique_lock<std::mutex> lock( m_queueLock );
+ m_cvWork.wait( lock, [this]{ return !m_queue.empty() || m_exit; } );
+ if( m_exit ) return;
+ auto f = m_queue.back();
+ m_queue.pop_back();
+ m_jobs++;
+ lock.unlock();
+ f();
+ lock.lock();
+ m_jobs--;
+ bool notify = m_jobs == 0 && m_queue.empty();
+ lock.unlock();
+ if( notify )
+ {
+ m_cvJobs.notify_all();
+ }
+ }
+}
diff --git a/thirdparty/etcpak/TaskDispatch.hpp b/thirdparty/etcpak/TaskDispatch.hpp
new file mode 100644
index 0000000000..b513de4c0c
--- /dev/null
+++ b/thirdparty/etcpak/TaskDispatch.hpp
@@ -0,0 +1,34 @@
+#ifndef __DARKRL__TASKDISPATCH_HPP__
+#define __DARKRL__TASKDISPATCH_HPP__
+
+#include <atomic>
+#include <condition_variable>
+#include <functional>
+#include <mutex>
+#include <thread>
+#include <vector>
+
+class TaskDispatch
+{
+public:
+ TaskDispatch( size_t workers );
+ ~TaskDispatch();
+
+ static void Queue( const std::function<void(void)>& f );
+ static void Queue( std::function<void(void)>&& f );
+
+ static void Sync();
+
+private:
+ void Worker();
+
+ std::vector<std::function<void(void)>> m_queue;
+ std::mutex m_queueLock;
+ std::condition_variable m_cvWork, m_cvJobs;
+ std::atomic<bool> m_exit;
+ size_t m_jobs;
+
+ std::vector<std::thread> m_workers;
+};
+
+#endif
diff --git a/thirdparty/etcpak/Timing.cpp b/thirdparty/etcpak/Timing.cpp
new file mode 100644
index 0000000000..2af851f9a9
--- /dev/null
+++ b/thirdparty/etcpak/Timing.cpp
@@ -0,0 +1,8 @@
+#include <chrono>
+
+#include "Timing.hpp"
+
+uint64_t GetTime()
+{
+ return std::chrono::time_point_cast<std::chrono::microseconds>( std::chrono::high_resolution_clock::now() ).time_since_epoch().count();
+}
diff --git a/thirdparty/etcpak/Timing.hpp b/thirdparty/etcpak/Timing.hpp
new file mode 100644
index 0000000000..3767e20f24
--- /dev/null
+++ b/thirdparty/etcpak/Timing.hpp
@@ -0,0 +1,8 @@
+#ifndef __DARKRL__TIMING_HPP__
+#define __DARKRL__TIMING_HPP__
+
+#include <stdint.h>
+
+uint64_t GetTime();
+
+#endif
diff --git a/thirdparty/etcpak/Vector.hpp b/thirdparty/etcpak/Vector.hpp
new file mode 100644
index 0000000000..3370a88aea
--- /dev/null
+++ b/thirdparty/etcpak/Vector.hpp
@@ -0,0 +1,222 @@
+#ifndef __DARKRL__VECTOR_HPP__
+#define __DARKRL__VECTOR_HPP__
+
+#include <assert.h>
+#include <algorithm>
+#include <math.h>
+#include <stdint.h>
+
+#include "Math.hpp"
+
+template<class T>
+struct Vector2
+{
+ Vector2() : x( 0 ), y( 0 ) {}
+ Vector2( T v ) : x( v ), y( v ) {}
+ Vector2( T _x, T _y ) : x( _x ), y( _y ) {}
+
+ bool operator==( const Vector2<T>& rhs ) const { return x == rhs.x && y == rhs.y; }
+ bool operator!=( const Vector2<T>& rhs ) const { return !( *this == rhs ); }
+
+ Vector2<T>& operator+=( const Vector2<T>& rhs )
+ {
+ x += rhs.x;
+ y += rhs.y;
+ return *this;
+ }
+ Vector2<T>& operator-=( const Vector2<T>& rhs )
+ {
+ x -= rhs.x;
+ y -= rhs.y;
+ return *this;
+ }
+ Vector2<T>& operator*=( const Vector2<T>& rhs )
+ {
+ x *= rhs.x;
+ y *= rhs.y;
+ return *this;
+ }
+
+ T x, y;
+};
+
+template<class T>
+Vector2<T> operator+( const Vector2<T>& lhs, const Vector2<T>& rhs )
+{
+ return Vector2<T>( lhs.x + rhs.x, lhs.y + rhs.y );
+}
+
+template<class T>
+Vector2<T> operator-( const Vector2<T>& lhs, const Vector2<T>& rhs )
+{
+ return Vector2<T>( lhs.x - rhs.x, lhs.y - rhs.y );
+}
+
+template<class T>
+Vector2<T> operator*( const Vector2<T>& lhs, const float& rhs )
+{
+ return Vector2<T>( lhs.x * rhs, lhs.y * rhs );
+}
+
+template<class T>
+Vector2<T> operator/( const Vector2<T>& lhs, const T& rhs )
+{
+ return Vector2<T>( lhs.x / rhs, lhs.y / rhs );
+}
+
+
+typedef Vector2<int32_t> v2i;
+typedef Vector2<float> v2f;
+
+
+template<class T>
+struct Vector3
+{
+ Vector3() : x( 0 ), y( 0 ), z( 0 ) {}
+ Vector3( T v ) : x( v ), y( v ), z( v ) {}
+ Vector3( T _x, T _y, T _z ) : x( _x ), y( _y ), z( _z ) {}
+ template<class Y>
+ Vector3( const Vector3<Y>& v ) : x( T( v.x ) ), y( T( v.y ) ), z( T( v.z ) ) {}
+
+ T Luminance() const { return T( x * 0.3f + y * 0.59f + z * 0.11f ); }
+ void Clamp()
+ {
+ x = std::min( T(1), std::max( T(0), x ) );
+ y = std::min( T(1), std::max( T(0), y ) );
+ z = std::min( T(1), std::max( T(0), z ) );
+ }
+
+ bool operator==( const Vector3<T>& rhs ) const { return x == rhs.x && y == rhs.y && z == rhs.z; }
+ bool operator!=( const Vector2<T>& rhs ) const { return !( *this == rhs ); }
+
+ T& operator[]( unsigned int idx ) { assert( idx < 3 ); return ((T*)this)[idx]; }
+ const T& operator[]( unsigned int idx ) const { assert( idx < 3 ); return ((T*)this)[idx]; }
+
+ Vector3<T> operator+=( const Vector3<T>& rhs )
+ {
+ x += rhs.x;
+ y += rhs.y;
+ z += rhs.z;
+ return *this;
+ }
+
+ Vector3<T> operator*=( const Vector3<T>& rhs )
+ {
+ x *= rhs.x;
+ y *= rhs.y;
+ z *= rhs.z;
+ return *this;
+ }
+
+ Vector3<T> operator*=( const float& rhs )
+ {
+ x *= rhs;
+ y *= rhs;
+ z *= rhs;
+ return *this;
+ }
+
+ T x, y, z;
+ T padding;
+};
+
+template<class T>
+Vector3<T> operator+( const Vector3<T>& lhs, const Vector3<T>& rhs )
+{
+ return Vector3<T>( lhs.x + rhs.x, lhs.y + rhs.y, lhs.z + rhs.z );
+}
+
+template<class T>
+Vector3<T> operator-( const Vector3<T>& lhs, const Vector3<T>& rhs )
+{
+ return Vector3<T>( lhs.x - rhs.x, lhs.y - rhs.y, lhs.z - rhs.z );
+}
+
+template<class T>
+Vector3<T> operator*( const Vector3<T>& lhs, const Vector3<T>& rhs )
+{
+ return Vector3<T>( lhs.x * rhs.x, lhs.y * rhs.y, lhs.z * rhs.z );
+}
+
+template<class T>
+Vector3<T> operator*( const Vector3<T>& lhs, const float& rhs )
+{
+ return Vector3<T>( T( lhs.x * rhs ), T( lhs.y * rhs ), T( lhs.z * rhs ) );
+}
+
+template<class T>
+Vector3<T> operator/( const Vector3<T>& lhs, const T& rhs )
+{
+ return Vector3<T>( lhs.x / rhs, lhs.y / rhs, lhs.z / rhs );
+}
+
+template<class T>
+bool operator<( const Vector3<T>& lhs, const Vector3<T>& rhs )
+{
+ return lhs.Luminance() < rhs.Luminance();
+}
+
+typedef Vector3<int32_t> v3i;
+typedef Vector3<float> v3f;
+typedef Vector3<uint8_t> v3b;
+
+
+static inline v3b v3f_to_v3b( const v3f& v )
+{
+ return v3b( uint8_t( std::min( 1.f, v.x ) * 255 ), uint8_t( std::min( 1.f, v.y ) * 255 ), uint8_t( std::min( 1.f, v.z ) * 255 ) );
+}
+
+template<class T>
+Vector3<T> Mix( const Vector3<T>& v1, const Vector3<T>& v2, float amount )
+{
+ return v1 + ( v2 - v1 ) * amount;
+}
+
+template<>
+inline v3b Mix( const v3b& v1, const v3b& v2, float amount )
+{
+ return v3b( v3f( v1 ) + ( v3f( v2 ) - v3f( v1 ) ) * amount );
+}
+
+template<class T>
+Vector3<T> Desaturate( const Vector3<T>& v )
+{
+ T l = v.Luminance();
+ return Vector3<T>( l, l, l );
+}
+
+template<class T>
+Vector3<T> Desaturate( const Vector3<T>& v, float mul )
+{
+ T l = T( v.Luminance() * mul );
+ return Vector3<T>( l, l, l );
+}
+
+template<class T>
+Vector3<T> pow( const Vector3<T>& base, float exponent )
+{
+ return Vector3<T>(
+ pow( base.x, exponent ),
+ pow( base.y, exponent ),
+ pow( base.z, exponent ) );
+}
+
+template<class T>
+Vector3<T> sRGB2linear( const Vector3<T>& v )
+{
+ return Vector3<T>(
+ sRGB2linear( v.x ),
+ sRGB2linear( v.y ),
+ sRGB2linear( v.z ) );
+}
+
+template<class T>
+Vector3<T> linear2sRGB( const Vector3<T>& v )
+{
+ return Vector3<T>(
+ linear2sRGB( v.x ),
+ linear2sRGB( v.y ),
+ linear2sRGB( v.z ) );
+}
+
+#endif
diff --git a/thirdparty/etcpak/lz4/lz4.c b/thirdparty/etcpak/lz4/lz4.c
new file mode 100644
index 0000000000..08cf6b5cd7
--- /dev/null
+++ b/thirdparty/etcpak/lz4/lz4.c
@@ -0,0 +1,1516 @@
+/*
+ LZ4 - Fast LZ compression algorithm
+ Copyright (C) 2011-2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - LZ4 source repository : https://github.com/Cyan4973/lz4
+ - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+
+
+/**************************************
+* Tuning parameters
+**************************************/
+/*
+ * HEAPMODE :
+ * Select how default compression functions will allocate memory for their hash table,
+ * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
+ */
+#define HEAPMODE 0
+
+/*
+ * ACCELERATION_DEFAULT :
+ * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0
+ */
+#define ACCELERATION_DEFAULT 1
+
+
+/**************************************
+* CPU Feature Detection
+**************************************/
+/*
+ * LZ4_FORCE_SW_BITCOUNT
+ * Define this parameter if your target system or compiler does not support hardware bit count
+ */
+#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for Windows CE does not support Hardware bit count */
+# define LZ4_FORCE_SW_BITCOUNT
+#endif
+
+
+/**************************************
+* Includes
+**************************************/
+#include "lz4.h"
+
+
+/**************************************
+* Compiler Options
+**************************************/
+#ifdef _MSC_VER /* Visual Studio */
+# define FORCE_INLINE static __forceinline
+# include <intrin.h>
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+# pragma warning(disable : 4293) /* disable: C4293: too large shift (32-bits) */
+#else
+# if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */
+# if defined(__GNUC__) || defined(__clang__)
+# define FORCE_INLINE static inline __attribute__((always_inline))
+# else
+# define FORCE_INLINE static inline
+# endif
+# else
+# define FORCE_INLINE static
+# endif /* __STDC_VERSION__ */
+#endif /* _MSC_VER */
+
+/* LZ4_GCC_VERSION is defined into lz4.h */
+#if (LZ4_GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)
+# define expect(expr,value) (__builtin_expect ((expr),(value)) )
+#else
+# define expect(expr,value) (expr)
+#endif
+
+#define likely(expr) expect((expr) != 0, 1)
+#define unlikely(expr) expect((expr) != 0, 0)
+
+
+/**************************************
+* Memory routines
+**************************************/
+#include <stdlib.h> /* malloc, calloc, free */
+#define ALLOCATOR(n,s) calloc(n,s)
+#define FREEMEM free
+#include <string.h> /* memset, memcpy */
+#define MEM_INIT memset
+
+
+/**************************************
+* Basic Types
+**************************************/
+#if defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */
+# include <stdint.h>
+ typedef uint8_t BYTE;
+ typedef uint16_t U16;
+ typedef uint32_t U32;
+ typedef int32_t S32;
+ typedef uint64_t U64;
+#else
+ typedef unsigned char BYTE;
+ typedef unsigned short U16;
+ typedef unsigned int U32;
+ typedef signed int S32;
+ typedef unsigned long long U64;
+#endif
+
+
+/**************************************
+* Reading and writing into memory
+**************************************/
+#define STEPSIZE sizeof(size_t)
+
+static unsigned LZ4_64bits(void) { return sizeof(void*)==8; }
+
+static unsigned LZ4_isLittleEndian(void)
+{
+ const union { U32 i; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
+ return one.c[0];
+}
+
+
+static U16 LZ4_read16(const void* memPtr)
+{
+ U16 val16;
+ memcpy(&val16, memPtr, 2);
+ return val16;
+}
+
+static U16 LZ4_readLE16(const void* memPtr)
+{
+ if (LZ4_isLittleEndian())
+ {
+ return LZ4_read16(memPtr);
+ }
+ else
+ {
+ const BYTE* p = (const BYTE*)memPtr;
+ return (U16)((U16)p[0] + (p[1]<<8));
+ }
+}
+
+static void LZ4_writeLE16(void* memPtr, U16 value)
+{
+ if (LZ4_isLittleEndian())
+ {
+ memcpy(memPtr, &value, 2);
+ }
+ else
+ {
+ BYTE* p = (BYTE*)memPtr;
+ p[0] = (BYTE) value;
+ p[1] = (BYTE)(value>>8);
+ }
+}
+
+static U32 LZ4_read32(const void* memPtr)
+{
+ U32 val32;
+ memcpy(&val32, memPtr, 4);
+ return val32;
+}
+
+static U64 LZ4_read64(const void* memPtr)
+{
+ U64 val64;
+ memcpy(&val64, memPtr, 8);
+ return val64;
+}
+
+static size_t LZ4_read_ARCH(const void* p)
+{
+ if (LZ4_64bits())
+ return (size_t)LZ4_read64(p);
+ else
+ return (size_t)LZ4_read32(p);
+}
+
+
+static void LZ4_copy4(void* dstPtr, const void* srcPtr) { memcpy(dstPtr, srcPtr, 4); }
+
+static void LZ4_copy8(void* dstPtr, const void* srcPtr) { memcpy(dstPtr, srcPtr, 8); }
+
+/* customized version of memcpy, which may overwrite up to 7 bytes beyond dstEnd */
+static void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd)
+{
+ BYTE* d = (BYTE*)dstPtr;
+ const BYTE* s = (const BYTE*)srcPtr;
+ BYTE* e = (BYTE*)dstEnd;
+ do { LZ4_copy8(d,s); d+=8; s+=8; } while (d<e);
+}
+
+
+/**************************************
+* Common Constants
+**************************************/
+#define MINMATCH 4
+
+#define COPYLENGTH 8
+#define LASTLITERALS 5
+#define MFLIMIT (COPYLENGTH+MINMATCH)
+static const int LZ4_minLength = (MFLIMIT+1);
+
+#define KB *(1 <<10)
+#define MB *(1 <<20)
+#define GB *(1U<<30)
+
+#define MAXD_LOG 16
+#define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
+
+#define ML_BITS 4
+#define ML_MASK ((1U<<ML_BITS)-1)
+#define RUN_BITS (8-ML_BITS)
+#define RUN_MASK ((1U<<RUN_BITS)-1)
+
+
+/**************************************
+* Common Utils
+**************************************/
+#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
+
+
+/**************************************
+* Common functions
+**************************************/
+static unsigned LZ4_NbCommonBytes (register size_t val)
+{
+ if (LZ4_isLittleEndian())
+ {
+ if (LZ4_64bits())
+ {
+# if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ unsigned long r = 0;
+ _BitScanForward64( &r, (U64)val );
+ return (int)(r>>3);
+# elif (defined(__clang__) || (LZ4_GCC_VERSION >= 304)) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ return (__builtin_ctzll((U64)val) >> 3);
+# else
+ static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
+ return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
+# endif
+ }
+ else /* 32 bits */
+ {
+# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ unsigned long r;
+ _BitScanForward( &r, (U32)val );
+ return (int)(r>>3);
+# elif (defined(__clang__) || (LZ4_GCC_VERSION >= 304)) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ return (__builtin_ctz((U32)val) >> 3);
+# else
+ static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
+ return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
+# endif
+ }
+ }
+ else /* Big Endian CPU */
+ {
+ if (LZ4_64bits())
+ {
+# if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ unsigned long r = 0;
+ _BitScanReverse64( &r, val );
+ return (unsigned)(r>>3);
+# elif (defined(__clang__) || (LZ4_GCC_VERSION >= 304)) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ return (__builtin_clzll((U64)val) >> 3);
+# else
+ unsigned r;
+ if (!(val>>32)) { r=4; } else { r=0; val>>=32; }
+ if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
+ r += (!val);
+ return r;
+# endif
+ }
+ else /* 32 bits */
+ {
+# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ unsigned long r = 0;
+ _BitScanReverse( &r, (unsigned long)val );
+ return (unsigned)(r>>3);
+# elif (defined(__clang__) || (LZ4_GCC_VERSION >= 304)) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ return (__builtin_clz((U32)val) >> 3);
+# else
+ unsigned r;
+ if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
+ r += (!val);
+ return r;
+# endif
+ }
+ }
+}
+
+static unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
+{
+ const BYTE* const pStart = pIn;
+
+ while (likely(pIn<pInLimit-(STEPSIZE-1)))
+ {
+ size_t diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
+ if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }
+ pIn += LZ4_NbCommonBytes(diff);
+ return (unsigned)(pIn - pStart);
+ }
+
+ if (LZ4_64bits()) if ((pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }
+ if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }
+ if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
+ return (unsigned)(pIn - pStart);
+}
+
+
+#ifndef LZ4_COMMONDEFS_ONLY
+/**************************************
+* Local Constants
+**************************************/
+#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2)
+#define HASHTABLESIZE (1 << LZ4_MEMORY_USAGE)
+#define HASH_SIZE_U32 (1 << LZ4_HASHLOG) /* required as macro for static allocation */
+
+static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT-1));
+static const U32 LZ4_skipTrigger = 6; /* Increase this value ==> compression run slower on incompressible data */
+
+
+/**************************************
+* Local Structures and types
+**************************************/
+typedef struct {
+ U32 hashTable[HASH_SIZE_U32];
+ U32 currentOffset;
+ U32 initCheck;
+ const BYTE* dictionary;
+ BYTE* bufferStart; /* obsolete, used for slideInputBuffer */
+ U32 dictSize;
+} LZ4_stream_t_internal;
+
+typedef enum { notLimited = 0, limitedOutput = 1 } limitedOutput_directive;
+typedef enum { byPtr, byU32, byU16 } tableType_t;
+
+typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
+typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
+
+typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
+typedef enum { full = 0, partial = 1 } earlyEnd_directive;
+
+
+/**************************************
+* Local Utils
+**************************************/
+int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }
+int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }
+int LZ4_sizeofState() { return LZ4_STREAMSIZE; }
+
+
+
+/********************************
+* Compression functions
+********************************/
+
+static U32 LZ4_hashSequence(U32 sequence, tableType_t const tableType)
+{
+ if (tableType == byU16)
+ return (((sequence) * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
+ else
+ return (((sequence) * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
+}
+
+static const U64 prime5bytes = 889523592379ULL;
+static U32 LZ4_hashSequence64(size_t sequence, tableType_t const tableType)
+{
+ const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG;
+ const U32 hashMask = (1<<hashLog) - 1;
+ return ((sequence * prime5bytes) >> (40 - hashLog)) & hashMask;
+}
+
+static U32 LZ4_hashSequenceT(size_t sequence, tableType_t const tableType)
+{
+ if (LZ4_64bits())
+ return LZ4_hashSequence64(sequence, tableType);
+ return LZ4_hashSequence((U32)sequence, tableType);
+}
+
+static U32 LZ4_hashPosition(const void* p, tableType_t tableType) { return LZ4_hashSequenceT(LZ4_read_ARCH(p), tableType); }
+
+static void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, tableType_t const tableType, const BYTE* srcBase)
+{
+ switch (tableType)
+ {
+ case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; }
+ case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; }
+ case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; }
+ }
+}
+
+static void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
+{
+ U32 h = LZ4_hashPosition(p, tableType);
+ LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
+}
+
+static const BYTE* LZ4_getPositionOnHash(U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
+{
+ if (tableType == byPtr) { const BYTE** hashTable = (const BYTE**) tableBase; return hashTable[h]; }
+ if (tableType == byU32) { U32* hashTable = (U32*) tableBase; return hashTable[h] + srcBase; }
+ { U16* hashTable = (U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */
+}
+
+static const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
+{
+ U32 h = LZ4_hashPosition(p, tableType);
+ return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
+}
+
+FORCE_INLINE int LZ4_compress_generic(
+ void* const ctx,
+ const char* const source,
+ char* const dest,
+ const int inputSize,
+ const int maxOutputSize,
+ const limitedOutput_directive outputLimited,
+ const tableType_t tableType,
+ const dict_directive dict,
+ const dictIssue_directive dictIssue,
+ const U32 acceleration)
+{
+ LZ4_stream_t_internal* const dictPtr = (LZ4_stream_t_internal*)ctx;
+
+ const BYTE* ip = (const BYTE*) source;
+ const BYTE* base;
+ const BYTE* lowLimit;
+ const BYTE* const lowRefLimit = ip - dictPtr->dictSize;
+ const BYTE* const dictionary = dictPtr->dictionary;
+ const BYTE* const dictEnd = dictionary + dictPtr->dictSize;
+ const size_t dictDelta = dictEnd - (const BYTE*)source;
+ const BYTE* anchor = (const BYTE*) source;
+ const BYTE* const iend = ip + inputSize;
+ const BYTE* const mflimit = iend - MFLIMIT;
+ const BYTE* const matchlimit = iend - LASTLITERALS;
+
+ BYTE* op = (BYTE*) dest;
+ BYTE* const olimit = op + maxOutputSize;
+
+ U32 forwardH;
+ size_t refDelta=0;
+
+ /* Init conditions */
+ if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size, too large (or negative) */
+ switch(dict)
+ {
+ case noDict:
+ default:
+ base = (const BYTE*)source;
+ lowLimit = (const BYTE*)source;
+ break;
+ case withPrefix64k:
+ base = (const BYTE*)source - dictPtr->currentOffset;
+ lowLimit = (const BYTE*)source - dictPtr->dictSize;
+ break;
+ case usingExtDict:
+ base = (const BYTE*)source - dictPtr->currentOffset;
+ lowLimit = (const BYTE*)source;
+ break;
+ }
+ if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) return 0; /* Size too large (not within 64K limit) */
+ if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
+
+ /* First Byte */
+ LZ4_putPosition(ip, ctx, tableType, base);
+ ip++; forwardH = LZ4_hashPosition(ip, tableType);
+
+ /* Main Loop */
+ for ( ; ; )
+ {
+ const BYTE* match;
+ BYTE* token;
+ {
+ const BYTE* forwardIp = ip;
+ unsigned step = 1;
+ unsigned searchMatchNb = acceleration << LZ4_skipTrigger;
+
+ /* Find a match */
+ do {
+ U32 h = forwardH;
+ ip = forwardIp;
+ forwardIp += step;
+ step = (searchMatchNb++ >> LZ4_skipTrigger);
+
+ if (unlikely(forwardIp > mflimit)) goto _last_literals;
+
+ match = LZ4_getPositionOnHash(h, ctx, tableType, base);
+ if (dict==usingExtDict)
+ {
+ if (match<(const BYTE*)source)
+ {
+ refDelta = dictDelta;
+ lowLimit = dictionary;
+ }
+ else
+ {
+ refDelta = 0;
+ lowLimit = (const BYTE*)source;
+ }
+ }
+ forwardH = LZ4_hashPosition(forwardIp, tableType);
+ LZ4_putPositionOnHash(ip, h, ctx, tableType, base);
+
+ } while ( ((dictIssue==dictSmall) ? (match < lowRefLimit) : 0)
+ || ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip))
+ || (LZ4_read32(match+refDelta) != LZ4_read32(ip)) );
+ }
+
+ /* Catch up */
+ while ((ip>anchor) && (match+refDelta > lowLimit) && (unlikely(ip[-1]==match[refDelta-1]))) { ip--; match--; }
+
+ {
+ /* Encode Literal length */
+ unsigned litLength = (unsigned)(ip - anchor);
+ token = op++;
+ if ((outputLimited) && (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)))
+ return 0; /* Check output limit */
+ if (litLength>=RUN_MASK)
+ {
+ int len = (int)litLength-RUN_MASK;
+ *token=(RUN_MASK<<ML_BITS);
+ for(; len >= 255 ; len-=255) *op++ = 255;
+ *op++ = (BYTE)len;
+ }
+ else *token = (BYTE)(litLength<<ML_BITS);
+
+ /* Copy Literals */
+ LZ4_wildCopy(op, anchor, op+litLength);
+ op+=litLength;
+ }
+
+_next_match:
+ /* Encode Offset */
+ LZ4_writeLE16(op, (U16)(ip-match)); op+=2;
+
+ /* Encode MatchLength */
+ {
+ unsigned matchLength;
+
+ if ((dict==usingExtDict) && (lowLimit==dictionary))
+ {
+ const BYTE* limit;
+ match += refDelta;
+ limit = ip + (dictEnd-match);
+ if (limit > matchlimit) limit = matchlimit;
+ matchLength = LZ4_count(ip+MINMATCH, match+MINMATCH, limit);
+ ip += MINMATCH + matchLength;
+ if (ip==limit)
+ {
+ unsigned more = LZ4_count(ip, (const BYTE*)source, matchlimit);
+ matchLength += more;
+ ip += more;
+ }
+ }
+ else
+ {
+ matchLength = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
+ ip += MINMATCH + matchLength;
+ }
+
+ if ((outputLimited) && (unlikely(op + (1 + LASTLITERALS) + (matchLength>>8) > olimit)))
+ return 0; /* Check output limit */
+ if (matchLength>=ML_MASK)
+ {
+ *token += ML_MASK;
+ matchLength -= ML_MASK;
+ for (; matchLength >= 510 ; matchLength-=510) { *op++ = 255; *op++ = 255; }
+ if (matchLength >= 255) { matchLength-=255; *op++ = 255; }
+ *op++ = (BYTE)matchLength;
+ }
+ else *token += (BYTE)(matchLength);
+ }
+
+ anchor = ip;
+
+ /* Test end of chunk */
+ if (ip > mflimit) break;
+
+ /* Fill table */
+ LZ4_putPosition(ip-2, ctx, tableType, base);
+
+ /* Test next position */
+ match = LZ4_getPosition(ip, ctx, tableType, base);
+ if (dict==usingExtDict)
+ {
+ if (match<(const BYTE*)source)
+ {
+ refDelta = dictDelta;
+ lowLimit = dictionary;
+ }
+ else
+ {
+ refDelta = 0;
+ lowLimit = (const BYTE*)source;
+ }
+ }
+ LZ4_putPosition(ip, ctx, tableType, base);
+ if ( ((dictIssue==dictSmall) ? (match>=lowRefLimit) : 1)
+ && (match+MAX_DISTANCE>=ip)
+ && (LZ4_read32(match+refDelta)==LZ4_read32(ip)) )
+ { token=op++; *token=0; goto _next_match; }
+
+ /* Prepare next loop */
+ forwardH = LZ4_hashPosition(++ip, tableType);
+ }
+
+_last_literals:
+ /* Encode Last Literals */
+ {
+ const size_t lastRun = (size_t)(iend - anchor);
+ if ((outputLimited) && ((op - (BYTE*)dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize))
+ return 0; /* Check output limit */
+ if (lastRun >= RUN_MASK)
+ {
+ size_t accumulator = lastRun - RUN_MASK;
+ *op++ = RUN_MASK << ML_BITS;
+ for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
+ *op++ = (BYTE) accumulator;
+ }
+ else
+ {
+ *op++ = (BYTE)(lastRun<<ML_BITS);
+ }
+ memcpy(op, anchor, lastRun);
+ op += lastRun;
+ }
+
+ /* End */
+ return (int) (((char*)op)-dest);
+}
+
+
+int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
+{
+ LZ4_resetStream((LZ4_stream_t*)state);
+ if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
+
+ if (maxOutputSize >= LZ4_compressBound(inputSize))
+ {
+ if (inputSize < LZ4_64Klimit)
+ return LZ4_compress_generic(state, source, dest, inputSize, 0, notLimited, byU16, noDict, noDictIssue, acceleration);
+ else
+ return LZ4_compress_generic(state, source, dest, inputSize, 0, notLimited, LZ4_64bits() ? byU32 : byPtr, noDict, noDictIssue, acceleration);
+ }
+ else
+ {
+ if (inputSize < LZ4_64Klimit)
+ return LZ4_compress_generic(state, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
+ else
+ return LZ4_compress_generic(state, source, dest, inputSize, maxOutputSize, limitedOutput, LZ4_64bits() ? byU32 : byPtr, noDict, noDictIssue, acceleration);
+ }
+}
+
+
+int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
+{
+#if (HEAPMODE)
+ void* ctxPtr = ALLOCATOR(1, sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
+#else
+ LZ4_stream_t ctx;
+ void* ctxPtr = &ctx;
+#endif
+
+ int result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration);
+
+#if (HEAPMODE)
+ FREEMEM(ctxPtr);
+#endif
+ return result;
+}
+
+
+int LZ4_compress_default(const char* source, char* dest, int inputSize, int maxOutputSize)
+{
+ return LZ4_compress_fast(source, dest, inputSize, maxOutputSize, 1);
+}
+
+
+/* hidden debug function */
+/* strangely enough, gcc generates faster code when this function is uncommented, even if unused */
+int LZ4_compress_fast_force(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
+{
+ LZ4_stream_t ctx;
+
+ LZ4_resetStream(&ctx);
+
+ if (inputSize < LZ4_64Klimit)
+ return LZ4_compress_generic(&ctx, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
+ else
+ return LZ4_compress_generic(&ctx, source, dest, inputSize, maxOutputSize, limitedOutput, LZ4_64bits() ? byU32 : byPtr, noDict, noDictIssue, acceleration);
+}
+
+
+/********************************
+* destSize variant
+********************************/
+
+static int LZ4_compress_destSize_generic(
+ void* const ctx,
+ const char* const src,
+ char* const dst,
+ int* const srcSizePtr,
+ const int targetDstSize,
+ const tableType_t tableType)
+{
+ const BYTE* ip = (const BYTE*) src;
+ const BYTE* base = (const BYTE*) src;
+ const BYTE* lowLimit = (const BYTE*) src;
+ const BYTE* anchor = ip;
+ const BYTE* const iend = ip + *srcSizePtr;
+ const BYTE* const mflimit = iend - MFLIMIT;
+ const BYTE* const matchlimit = iend - LASTLITERALS;
+
+ BYTE* op = (BYTE*) dst;
+ BYTE* const oend = op + targetDstSize;
+ BYTE* const oMaxLit = op + targetDstSize - 2 /* offset */ - 8 /* because 8+MINMATCH==MFLIMIT */ - 1 /* token */;
+ BYTE* const oMaxMatch = op + targetDstSize - (LASTLITERALS + 1 /* token */);
+ BYTE* const oMaxSeq = oMaxLit - 1 /* token */;
+
+ U32 forwardH;
+
+
+ /* Init conditions */
+ if (targetDstSize < 1) return 0; /* Impossible to store anything */
+ if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size, too large (or negative) */
+ if ((tableType == byU16) && (*srcSizePtr>=LZ4_64Klimit)) return 0; /* Size too large (not within 64K limit) */
+ if (*srcSizePtr<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
+
+ /* First Byte */
+ *srcSizePtr = 0;
+ LZ4_putPosition(ip, ctx, tableType, base);
+ ip++; forwardH = LZ4_hashPosition(ip, tableType);
+
+ /* Main Loop */
+ for ( ; ; )
+ {
+ const BYTE* match;
+ BYTE* token;
+ {
+ const BYTE* forwardIp = ip;
+ unsigned step = 1;
+ unsigned searchMatchNb = 1 << LZ4_skipTrigger;
+
+ /* Find a match */
+ do {
+ U32 h = forwardH;
+ ip = forwardIp;
+ forwardIp += step;
+ step = (searchMatchNb++ >> LZ4_skipTrigger);
+
+ if (unlikely(forwardIp > mflimit))
+ goto _last_literals;
+
+ match = LZ4_getPositionOnHash(h, ctx, tableType, base);
+ forwardH = LZ4_hashPosition(forwardIp, tableType);
+ LZ4_putPositionOnHash(ip, h, ctx, tableType, base);
+
+ } while ( ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip))
+ || (LZ4_read32(match) != LZ4_read32(ip)) );
+ }
+
+ /* Catch up */
+ while ((ip>anchor) && (match > lowLimit) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; }
+
+ {
+ /* Encode Literal length */
+ unsigned litLength = (unsigned)(ip - anchor);
+ token = op++;
+ if (op + ((litLength+240)/255) + litLength > oMaxLit)
+ {
+ /* Not enough space for a last match */
+ op--;
+ goto _last_literals;
+ }
+ if (litLength>=RUN_MASK)
+ {
+ unsigned len = litLength - RUN_MASK;
+ *token=(RUN_MASK<<ML_BITS);
+ for(; len >= 255 ; len-=255) *op++ = 255;
+ *op++ = (BYTE)len;
+ }
+ else *token = (BYTE)(litLength<<ML_BITS);
+
+ /* Copy Literals */
+ LZ4_wildCopy(op, anchor, op+litLength);
+ op += litLength;
+ }
+
+_next_match:
+ /* Encode Offset */
+ LZ4_writeLE16(op, (U16)(ip-match)); op+=2;
+
+ /* Encode MatchLength */
+ {
+ size_t matchLength;
+
+ matchLength = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
+
+ if (op + ((matchLength+240)/255) > oMaxMatch)
+ {
+ /* Match description too long : reduce it */
+ matchLength = (15-1) + (oMaxMatch-op) * 255;
+ }
+ //printf("offset %5i, matchLength%5i \n", (int)(ip-match), matchLength + MINMATCH);
+ ip += MINMATCH + matchLength;
+
+ if (matchLength>=ML_MASK)
+ {
+ *token += ML_MASK;
+ matchLength -= ML_MASK;
+ while (matchLength >= 255) { matchLength-=255; *op++ = 255; }
+ *op++ = (BYTE)matchLength;
+ }
+ else *token += (BYTE)(matchLength);
+ }
+
+ anchor = ip;
+
+ /* Test end of block */
+ if (ip > mflimit) break;
+ if (op > oMaxSeq) break;
+
+ /* Fill table */
+ LZ4_putPosition(ip-2, ctx, tableType, base);
+
+ /* Test next position */
+ match = LZ4_getPosition(ip, ctx, tableType, base);
+ LZ4_putPosition(ip, ctx, tableType, base);
+ if ( (match+MAX_DISTANCE>=ip)
+ && (LZ4_read32(match)==LZ4_read32(ip)) )
+ { token=op++; *token=0; goto _next_match; }
+
+ /* Prepare next loop */
+ forwardH = LZ4_hashPosition(++ip, tableType);
+ }
+
+_last_literals:
+ /* Encode Last Literals */
+ {
+ size_t lastRunSize = (size_t)(iend - anchor);
+ if (op + 1 /* token */ + ((lastRunSize+240)/255) /* litLength */ + lastRunSize /* literals */ > oend)
+ {
+ /* adapt lastRunSize to fill 'dst' */
+ lastRunSize = (oend-op) - 1;
+ lastRunSize -= (lastRunSize+240)/255;
+ }
+ ip = anchor + lastRunSize;
+
+ if (lastRunSize >= RUN_MASK)
+ {
+ size_t accumulator = lastRunSize - RUN_MASK;
+ *op++ = RUN_MASK << ML_BITS;
+ for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
+ *op++ = (BYTE) accumulator;
+ }
+ else
+ {
+ *op++ = (BYTE)(lastRunSize<<ML_BITS);
+ }
+ memcpy(op, anchor, lastRunSize);
+ op += lastRunSize;
+ }
+
+ /* End */
+ *srcSizePtr = (int) (((const char*)ip)-src);
+ return (int) (((char*)op)-dst);
+}
+
+
+static int LZ4_compress_destSize_extState (void* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize)
+{
+ LZ4_resetStream((LZ4_stream_t*)state);
+
+ if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) /* compression success is guaranteed */
+ {
+ return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1);
+ }
+ else
+ {
+ if (*srcSizePtr < LZ4_64Klimit)
+ return LZ4_compress_destSize_generic(state, src, dst, srcSizePtr, targetDstSize, byU16);
+ else
+ return LZ4_compress_destSize_generic(state, src, dst, srcSizePtr, targetDstSize, LZ4_64bits() ? byU32 : byPtr);
+ }
+}
+
+
+int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)
+{
+#if (HEAPMODE)
+ void* ctx = ALLOCATOR(1, sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
+#else
+ LZ4_stream_t ctxBody;
+ void* ctx = &ctxBody;
+#endif
+
+ int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize);
+
+#if (HEAPMODE)
+ FREEMEM(ctx);
+#endif
+ return result;
+}
+
+
+
+/********************************
+* Streaming functions
+********************************/
+
+LZ4_stream_t* LZ4_createStream(void)
+{
+ LZ4_stream_t* lz4s = (LZ4_stream_t*)ALLOCATOR(8, LZ4_STREAMSIZE_U64);
+ LZ4_STATIC_ASSERT(LZ4_STREAMSIZE >= sizeof(LZ4_stream_t_internal)); /* A compilation error here means LZ4_STREAMSIZE is not large enough */
+ LZ4_resetStream(lz4s);
+ return lz4s;
+}
+
+void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
+{
+ MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t));
+}
+
+int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
+{
+ FREEMEM(LZ4_stream);
+ return (0);
+}
+
+
+#define HASH_UNIT sizeof(size_t)
+int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
+{
+ LZ4_stream_t_internal* dict = (LZ4_stream_t_internal*) LZ4_dict;
+ const BYTE* p = (const BYTE*)dictionary;
+ const BYTE* const dictEnd = p + dictSize;
+ const BYTE* base;
+
+ if ((dict->initCheck) || (dict->currentOffset > 1 GB)) /* Uninitialized structure, or reuse overflow */
+ LZ4_resetStream(LZ4_dict);
+
+ if (dictSize < (int)HASH_UNIT)
+ {
+ dict->dictionary = NULL;
+ dict->dictSize = 0;
+ return 0;
+ }
+
+ if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;
+ dict->currentOffset += 64 KB;
+ base = p - dict->currentOffset;
+ dict->dictionary = p;
+ dict->dictSize = (U32)(dictEnd - p);
+ dict->currentOffset += dict->dictSize;
+
+ while (p <= dictEnd-HASH_UNIT)
+ {
+ LZ4_putPosition(p, dict->hashTable, byU32, base);
+ p+=3;
+ }
+
+ return dict->dictSize;
+}
+
+
+static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const BYTE* src)
+{
+ if ((LZ4_dict->currentOffset > 0x80000000) ||
+ ((size_t)LZ4_dict->currentOffset > (size_t)src)) /* address space overflow */
+ {
+ /* rescale hash table */
+ U32 delta = LZ4_dict->currentOffset - 64 KB;
+ const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
+ int i;
+ for (i=0; i<HASH_SIZE_U32; i++)
+ {
+ if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0;
+ else LZ4_dict->hashTable[i] -= delta;
+ }
+ LZ4_dict->currentOffset = 64 KB;
+ if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;
+ LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
+ }
+}
+
+
+int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
+{
+ LZ4_stream_t_internal* streamPtr = (LZ4_stream_t_internal*)LZ4_stream;
+ const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
+
+ const BYTE* smallest = (const BYTE*) source;
+ if (streamPtr->initCheck) return 0; /* Uninitialized structure detected */
+ if ((streamPtr->dictSize>0) && (smallest>dictEnd)) smallest = dictEnd;
+ LZ4_renormDictT(streamPtr, smallest);
+ if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
+
+ /* Check overlapping input/dictionary space */
+ {
+ const BYTE* sourceEnd = (const BYTE*) source + inputSize;
+ if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd))
+ {
+ streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
+ if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;
+ if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;
+ streamPtr->dictionary = dictEnd - streamPtr->dictSize;
+ }
+ }
+
+ /* prefix mode : source data follows dictionary */
+ if (dictEnd == (const BYTE*)source)
+ {
+ int result;
+ if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
+ result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, withPrefix64k, dictSmall, acceleration);
+ else
+ result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, withPrefix64k, noDictIssue, acceleration);
+ streamPtr->dictSize += (U32)inputSize;
+ streamPtr->currentOffset += (U32)inputSize;
+ return result;
+ }
+
+ /* external dictionary mode */
+ {
+ int result;
+ if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
+ result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, usingExtDict, dictSmall, acceleration);
+ else
+ result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, usingExtDict, noDictIssue, acceleration);
+ streamPtr->dictionary = (const BYTE*)source;
+ streamPtr->dictSize = (U32)inputSize;
+ streamPtr->currentOffset += (U32)inputSize;
+ return result;
+ }
+}
+
+
+/* Hidden debug function, to force external dictionary mode */
+int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int inputSize)
+{
+ LZ4_stream_t_internal* streamPtr = (LZ4_stream_t_internal*)LZ4_dict;
+ int result;
+ const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
+
+ const BYTE* smallest = dictEnd;
+ if (smallest > (const BYTE*) source) smallest = (const BYTE*) source;
+ LZ4_renormDictT((LZ4_stream_t_internal*)LZ4_dict, smallest);
+
+ result = LZ4_compress_generic(LZ4_dict, source, dest, inputSize, 0, notLimited, byU32, usingExtDict, noDictIssue, 1);
+
+ streamPtr->dictionary = (const BYTE*)source;
+ streamPtr->dictSize = (U32)inputSize;
+ streamPtr->currentOffset += (U32)inputSize;
+
+ return result;
+}
+
+
+int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
+{
+ LZ4_stream_t_internal* dict = (LZ4_stream_t_internal*) LZ4_dict;
+ const BYTE* previousDictEnd = dict->dictionary + dict->dictSize;
+
+ if ((U32)dictSize > 64 KB) dictSize = 64 KB; /* useless to define a dictionary > 64 KB */
+ if ((U32)dictSize > dict->dictSize) dictSize = dict->dictSize;
+
+ memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
+
+ dict->dictionary = (const BYTE*)safeBuffer;
+ dict->dictSize = (U32)dictSize;
+
+ return dictSize;
+}
+
+
+
+/*******************************
+* Decompression functions
+*******************************/
+/*
+ * This generic decompression function cover all use cases.
+ * It shall be instantiated several times, using different sets of directives
+ * Note that it is essential this generic function is really inlined,
+ * in order to remove useless branches during compilation optimization.
+ */
+FORCE_INLINE int LZ4_decompress_generic(
+ const char* const source,
+ char* const dest,
+ int inputSize,
+ int outputSize, /* If endOnInput==endOnInputSize, this value is the max size of Output Buffer. */
+
+ int endOnInput, /* endOnOutputSize, endOnInputSize */
+ int partialDecoding, /* full, partial */
+ int targetOutputSize, /* only used if partialDecoding==partial */
+ int dict, /* noDict, withPrefix64k, usingExtDict */
+ const BYTE* const lowPrefix, /* == dest if dict == noDict */
+ const BYTE* const dictStart, /* only if dict==usingExtDict */
+ const size_t dictSize /* note : = 0 if noDict */
+ )
+{
+ /* Local Variables */
+ const BYTE* ip = (const BYTE*) source;
+ const BYTE* const iend = ip + inputSize;
+
+ BYTE* op = (BYTE*) dest;
+ BYTE* const oend = op + outputSize;
+ BYTE* cpy;
+ BYTE* oexit = op + targetOutputSize;
+ const BYTE* const lowLimit = lowPrefix - dictSize;
+
+ const BYTE* const dictEnd = (const BYTE*)dictStart + dictSize;
+ const size_t dec32table[] = {4, 1, 2, 1, 4, 4, 4, 4};
+ const size_t dec64table[] = {0, 0, 0, (size_t)-1, 0, 1, 2, 3};
+
+ const int safeDecode = (endOnInput==endOnInputSize);
+ const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB)));
+
+
+ /* Special cases */
+ if ((partialDecoding) && (oexit> oend-MFLIMIT)) oexit = oend-MFLIMIT; /* targetOutputSize too high => decode everything */
+ if ((endOnInput) && (unlikely(outputSize==0))) return ((inputSize==1) && (*ip==0)) ? 0 : -1; /* Empty output buffer */
+ if ((!endOnInput) && (unlikely(outputSize==0))) return (*ip==0?1:-1);
+
+
+ /* Main Loop */
+ while (1)
+ {
+ unsigned token;
+ size_t length;
+ const BYTE* match;
+
+ /* get literal length */
+ token = *ip++;
+ if ((length=(token>>ML_BITS)) == RUN_MASK)
+ {
+ unsigned s;
+ do
+ {
+ s = *ip++;
+ length += s;
+ }
+ while (likely((endOnInput)?ip<iend-RUN_MASK:1) && (s==255));
+ if ((safeDecode) && unlikely((size_t)(op+length)<(size_t)(op))) goto _output_error; /* overflow detection */
+ if ((safeDecode) && unlikely((size_t)(ip+length)<(size_t)(ip))) goto _output_error; /* overflow detection */
+ }
+
+ /* copy literals */
+ cpy = op+length;
+ if (((endOnInput) && ((cpy>(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) )
+ || ((!endOnInput) && (cpy>oend-COPYLENGTH)))
+ {
+ if (partialDecoding)
+ {
+ if (cpy > oend) goto _output_error; /* Error : write attempt beyond end of output buffer */
+ if ((endOnInput) && (ip+length > iend)) goto _output_error; /* Error : read attempt beyond end of input buffer */
+ }
+ else
+ {
+ if ((!endOnInput) && (cpy != oend)) goto _output_error; /* Error : block decoding must stop exactly there */
+ if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) goto _output_error; /* Error : input must be consumed */
+ }
+ memcpy(op, ip, length);
+ ip += length;
+ op += length;
+ break; /* Necessarily EOF, due to parsing restrictions */
+ }
+ LZ4_wildCopy(op, ip, cpy);
+ ip += length; op = cpy;
+
+ /* get offset */
+ match = cpy - LZ4_readLE16(ip); ip+=2;
+ if ((checkOffset) && (unlikely(match < lowLimit))) goto _output_error; /* Error : offset outside destination buffer */
+
+ /* get matchlength */
+ length = token & ML_MASK;
+ if (length == ML_MASK)
+ {
+ unsigned s;
+ do
+ {
+ if ((endOnInput) && (ip > iend-LASTLITERALS)) goto _output_error;
+ s = *ip++;
+ length += s;
+ } while (s==255);
+ if ((safeDecode) && unlikely((size_t)(op+length)<(size_t)op)) goto _output_error; /* overflow detection */
+ }
+ length += MINMATCH;
+
+ /* check external dictionary */
+ if ((dict==usingExtDict) && (match < lowPrefix))
+ {
+ if (unlikely(op+length > oend-LASTLITERALS)) goto _output_error; /* doesn't respect parsing restriction */
+
+ if (length <= (size_t)(lowPrefix-match))
+ {
+ /* match can be copied as a single segment from external dictionary */
+ match = dictEnd - (lowPrefix-match);
+ memmove(op, match, length); op += length;
+ }
+ else
+ {
+ /* match encompass external dictionary and current segment */
+ size_t copySize = (size_t)(lowPrefix-match);
+ memcpy(op, dictEnd - copySize, copySize);
+ op += copySize;
+ copySize = length - copySize;
+ if (copySize > (size_t)(op-lowPrefix)) /* overlap within current segment */
+ {
+ BYTE* const endOfMatch = op + copySize;
+ const BYTE* copyFrom = lowPrefix;
+ while (op < endOfMatch) *op++ = *copyFrom++;
+ }
+ else
+ {
+ memcpy(op, lowPrefix, copySize);
+ op += copySize;
+ }
+ }
+ continue;
+ }
+
+ /* copy repeated sequence */
+ cpy = op + length;
+ if (unlikely((op-match)<8))
+ {
+ const size_t dec64 = dec64table[op-match];
+ op[0] = match[0];
+ op[1] = match[1];
+ op[2] = match[2];
+ op[3] = match[3];
+ match += dec32table[op-match];
+ LZ4_copy4(op+4, match);
+ op += 8; match -= dec64;
+ } else { LZ4_copy8(op, match); op+=8; match+=8; }
+
+ if (unlikely(cpy>oend-12))
+ {
+ if (cpy > oend-LASTLITERALS) goto _output_error; /* Error : last LASTLITERALS bytes must be literals */
+ if (op < oend-8)
+ {
+ LZ4_wildCopy(op, match, oend-8);
+ match += (oend-8) - op;
+ op = oend-8;
+ }
+ while (op<cpy) *op++ = *match++;
+ }
+ else
+ LZ4_wildCopy(op, match, cpy);
+ op=cpy; /* correction */
+ }
+
+ /* end of decoding */
+ if (endOnInput)
+ return (int) (((char*)op)-dest); /* Nb of output bytes decoded */
+ else
+ return (int) (((const char*)ip)-source); /* Nb of input bytes read */
+
+ /* Overflow error detected */
+_output_error:
+ return (int) (-(((const char*)ip)-source))-1;
+}
+
+
+int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, full, 0, noDict, (BYTE*)dest, NULL, 0);
+}
+
+int LZ4_decompress_safe_partial(const char* source, char* dest, int compressedSize, int targetOutputSize, int maxDecompressedSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, partial, targetOutputSize, noDict, (BYTE*)dest, NULL, 0);
+}
+
+int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
+{
+ return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)(dest - 64 KB), NULL, 64 KB);
+}
+
+
+/* streaming decompression functions */
+
+typedef struct
+{
+ const BYTE* externalDict;
+ size_t extDictSize;
+ const BYTE* prefixEnd;
+ size_t prefixSize;
+} LZ4_streamDecode_t_internal;
+
+/*
+ * If you prefer dynamic allocation methods,
+ * LZ4_createStreamDecode()
+ * provides a pointer (void*) towards an initialized LZ4_streamDecode_t structure.
+ */
+LZ4_streamDecode_t* LZ4_createStreamDecode(void)
+{
+ LZ4_streamDecode_t* lz4s = (LZ4_streamDecode_t*) ALLOCATOR(1, sizeof(LZ4_streamDecode_t));
+ return lz4s;
+}
+
+int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream)
+{
+ FREEMEM(LZ4_stream);
+ return 0;
+}
+
+/*
+ * LZ4_setStreamDecode
+ * Use this function to instruct where to find the dictionary
+ * This function is not necessary if previous data is still available where it was decoded.
+ * Loading a size of 0 is allowed (same effect as no dictionary).
+ * Return : 1 if OK, 0 if error
+ */
+int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize)
+{
+ LZ4_streamDecode_t_internal* lz4sd = (LZ4_streamDecode_t_internal*) LZ4_streamDecode;
+ lz4sd->prefixSize = (size_t) dictSize;
+ lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize;
+ lz4sd->externalDict = NULL;
+ lz4sd->extDictSize = 0;
+ return 1;
+}
+
+/*
+*_continue() :
+ These decoding functions allow decompression of multiple blocks in "streaming" mode.
+ Previously decoded blocks must still be available at the memory position where they were decoded.
+ If it's not possible, save the relevant part of decoded data into a safe buffer,
+ and indicate where it stands using LZ4_setStreamDecode()
+*/
+int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)
+{
+ LZ4_streamDecode_t_internal* lz4sd = (LZ4_streamDecode_t_internal*) LZ4_streamDecode;
+ int result;
+
+ if (lz4sd->prefixEnd == (BYTE*)dest)
+ {
+ result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
+ endOnInputSize, full, 0,
+ usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
+ if (result <= 0) return result;
+ lz4sd->prefixSize += result;
+ lz4sd->prefixEnd += result;
+ }
+ else
+ {
+ lz4sd->extDictSize = lz4sd->prefixSize;
+ lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
+ result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
+ endOnInputSize, full, 0,
+ usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize);
+ if (result <= 0) return result;
+ lz4sd->prefixSize = result;
+ lz4sd->prefixEnd = (BYTE*)dest + result;
+ }
+
+ return result;
+}
+
+int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize)
+{
+ LZ4_streamDecode_t_internal* lz4sd = (LZ4_streamDecode_t_internal*) LZ4_streamDecode;
+ int result;
+
+ if (lz4sd->prefixEnd == (BYTE*)dest)
+ {
+ result = LZ4_decompress_generic(source, dest, 0, originalSize,
+ endOnOutputSize, full, 0,
+ usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
+ if (result <= 0) return result;
+ lz4sd->prefixSize += originalSize;
+ lz4sd->prefixEnd += originalSize;
+ }
+ else
+ {
+ lz4sd->extDictSize = lz4sd->prefixSize;
+ lz4sd->externalDict = (BYTE*)dest - lz4sd->extDictSize;
+ result = LZ4_decompress_generic(source, dest, 0, originalSize,
+ endOnOutputSize, full, 0,
+ usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize);
+ if (result <= 0) return result;
+ lz4sd->prefixSize = originalSize;
+ lz4sd->prefixEnd = (BYTE*)dest + originalSize;
+ }
+
+ return result;
+}
+
+
+/*
+Advanced decoding functions :
+*_usingDict() :
+ These decoding functions work the same as "_continue" ones,
+ the dictionary must be explicitly provided within parameters
+*/
+
+FORCE_INLINE int LZ4_decompress_usingDict_generic(const char* source, char* dest, int compressedSize, int maxOutputSize, int safe, const char* dictStart, int dictSize)
+{
+ if (dictSize==0)
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest, NULL, 0);
+ if (dictStart+dictSize == dest)
+ {
+ if (dictSize >= (int)(64 KB - 1))
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, withPrefix64k, (BYTE*)dest-64 KB, NULL, 0);
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest-dictSize, NULL, 0);
+ }
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize);
+}
+
+int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
+{
+ return LZ4_decompress_usingDict_generic(source, dest, compressedSize, maxOutputSize, 1, dictStart, dictSize);
+}
+
+int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)
+{
+ return LZ4_decompress_usingDict_generic(source, dest, 0, originalSize, 0, dictStart, dictSize);
+}
+
+/* debug function */
+int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize);
+}
+
+
+/***************************************************
+* Obsolete Functions
+***************************************************/
+/* obsolete compression functions */
+int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize) { return LZ4_compress_default(source, dest, inputSize, maxOutputSize); }
+int LZ4_compress(const char* source, char* dest, int inputSize) { return LZ4_compress_default(source, dest, inputSize, LZ4_compressBound(inputSize)); }
+int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1); }
+int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1); }
+int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, maxDstSize, 1); }
+int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize) { return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1); }
+
+/*
+These function names are deprecated and should no longer be used.
+They are only provided here for compatibility with older user programs.
+- LZ4_uncompress is totally equivalent to LZ4_decompress_fast
+- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
+*/
+int LZ4_uncompress (const char* source, char* dest, int outputSize) { return LZ4_decompress_fast(source, dest, outputSize); }
+int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) { return LZ4_decompress_safe(source, dest, isize, maxOutputSize); }
+
+
+/* Obsolete Streaming functions */
+
+int LZ4_sizeofStreamState() { return LZ4_STREAMSIZE; }
+
+static void LZ4_init(LZ4_stream_t_internal* lz4ds, BYTE* base)
+{
+ MEM_INIT(lz4ds, 0, LZ4_STREAMSIZE);
+ lz4ds->bufferStart = base;
+}
+
+int LZ4_resetStreamState(void* state, char* inputBuffer)
+{
+ if ((((size_t)state) & 3) != 0) return 1; /* Error : pointer is not aligned on 4-bytes boundary */
+ LZ4_init((LZ4_stream_t_internal*)state, (BYTE*)inputBuffer);
+ return 0;
+}
+
+void* LZ4_create (char* inputBuffer)
+{
+ void* lz4ds = ALLOCATOR(8, LZ4_STREAMSIZE_U64);
+ LZ4_init ((LZ4_stream_t_internal*)lz4ds, (BYTE*)inputBuffer);
+ return lz4ds;
+}
+
+char* LZ4_slideInputBuffer (void* LZ4_Data)
+{
+ LZ4_stream_t_internal* ctx = (LZ4_stream_t_internal*)LZ4_Data;
+ int dictSize = LZ4_saveDict((LZ4_stream_t*)LZ4_Data, (char*)ctx->bufferStart, 64 KB);
+ return (char*)(ctx->bufferStart + dictSize);
+}
+
+/* Obsolete streaming decompression functions */
+
+int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB);
+}
+
+int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize)
+{
+ return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB);
+}
+
+#endif /* LZ4_COMMONDEFS_ONLY */
+
diff --git a/thirdparty/etcpak/lz4/lz4.h b/thirdparty/etcpak/lz4/lz4.h
new file mode 100644
index 0000000000..3e74002256
--- /dev/null
+++ b/thirdparty/etcpak/lz4/lz4.h
@@ -0,0 +1,360 @@
+/*
+ LZ4 - Fast LZ compression algorithm
+ Header File
+ Copyright (C) 2011-2015, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - LZ4 source repository : https://github.com/Cyan4973/lz4
+ - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+#pragma once
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * lz4.h provides block compression functions, and gives full buffer control to programmer.
+ * If you need to generate inter-operable compressed data (respecting LZ4 frame specification),
+ * and can let the library handle its own memory, please use lz4frame.h instead.
+*/
+
+/**************************************
+* Version
+**************************************/
+#define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */
+#define LZ4_VERSION_MINOR 7 /* for new (non-breaking) interface capabilities */
+#define LZ4_VERSION_RELEASE 1 /* for tweaks, bug-fixes, or development */
+#define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE)
+int LZ4_versionNumber (void);
+
+/**************************************
+* Tuning parameter
+**************************************/
+/*
+ * LZ4_MEMORY_USAGE :
+ * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+ * Increasing memory usage improves compression ratio
+ * Reduced memory usage can improve speed, due to cache effect
+ * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
+ */
+#define LZ4_MEMORY_USAGE 14
+
+
+/**************************************
+* Simple Functions
+**************************************/
+
+int LZ4_compress_default(const char* source, char* dest, int sourceSize, int maxDestSize);
+int LZ4_decompress_safe (const char* source, char* dest, int compressedSize, int maxDecompressedSize);
+
+/*
+LZ4_compress_default() :
+ Compresses 'sourceSize' bytes from buffer 'source'
+ into already allocated 'dest' buffer of size 'maxDestSize'.
+ Compression is guaranteed to succeed if 'maxDestSize' >= LZ4_compressBound(sourceSize).
+ It also runs faster, so it's a recommended setting.
+ If the function cannot compress 'source' into a more limited 'dest' budget,
+ compression stops *immediately*, and the function result is zero.
+ As a consequence, 'dest' content is not valid.
+ This function never writes outside 'dest' buffer, nor read outside 'source' buffer.
+ sourceSize : Max supported value is LZ4_MAX_INPUT_VALUE
+ maxDestSize : full or partial size of buffer 'dest' (which must be already allocated)
+ return : the number of bytes written into buffer 'dest' (necessarily <= maxOutputSize)
+ or 0 if compression fails
+
+LZ4_decompress_safe() :
+ compressedSize : is the precise full size of the compressed block.
+ maxDecompressedSize : is the size of destination buffer, which must be already allocated.
+ return : the number of bytes decompressed into destination buffer (necessarily <= maxDecompressedSize)
+ If destination buffer is not large enough, decoding will stop and output an error code (<0).
+ If the source stream is detected malformed, the function will stop decoding and return a negative result.
+ This function is protected against buffer overflow exploits, including malicious data packets.
+ It never writes outside output buffer, nor reads outside input buffer.
+*/
+
+
+/**************************************
+* Advanced Functions
+**************************************/
+#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */
+#define LZ4_COMPRESSBOUND(isize) ((unsigned)(isize) > (unsigned)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize)/255) + 16)
+
+/*
+LZ4_compressBound() :
+ Provides the maximum size that LZ4 compression may output in a "worst case" scenario (input data not compressible)
+ This function is primarily useful for memory allocation purposes (destination buffer size).
+ Macro LZ4_COMPRESSBOUND() is also provided for compilation-time evaluation (stack memory allocation for example).
+ Note that LZ4_compress_default() compress faster when dest buffer size is >= LZ4_compressBound(srcSize)
+ inputSize : max supported value is LZ4_MAX_INPUT_SIZE
+ return : maximum output size in a "worst case" scenario
+ or 0, if input size is too large ( > LZ4_MAX_INPUT_SIZE)
+*/
+int LZ4_compressBound(int inputSize);
+
+/*
+LZ4_compress_fast() :
+ Same as LZ4_compress_default(), but allows to select an "acceleration" factor.
+ The larger the acceleration value, the faster the algorithm, but also the lesser the compression.
+ It's a trade-off. It can be fine tuned, with each successive value providing roughly +~3% to speed.
+ An acceleration value of "1" is the same as regular LZ4_compress_default()
+ Values <= 0 will be replaced by ACCELERATION_DEFAULT (see lz4.c), which is 1.
+*/
+int LZ4_compress_fast (const char* source, char* dest, int sourceSize, int maxDestSize, int acceleration);
+
+
+/*
+LZ4_compress_fast_extState() :
+ Same compression function, just using an externally allocated memory space to store compression state.
+ Use LZ4_sizeofState() to know how much memory must be allocated,
+ and allocate it on 8-bytes boundaries (using malloc() typically).
+ Then, provide it as 'void* state' to compression function.
+*/
+int LZ4_sizeofState(void);
+int LZ4_compress_fast_extState (void* state, const char* source, char* dest, int inputSize, int maxDestSize, int acceleration);
+
+
+/*
+LZ4_compress_destSize() :
+ Reverse the logic, by compressing as much data as possible from 'source' buffer
+ into already allocated buffer 'dest' of size 'targetDestSize'.
+ This function either compresses the entire 'source' content into 'dest' if it's large enough,
+ or fill 'dest' buffer completely with as much data as possible from 'source'.
+ *sourceSizePtr : will be modified to indicate how many bytes where read from 'source' to fill 'dest'.
+ New value is necessarily <= old value.
+ return : Nb bytes written into 'dest' (necessarily <= targetDestSize)
+ or 0 if compression fails
+*/
+int LZ4_compress_destSize (const char* source, char* dest, int* sourceSizePtr, int targetDestSize);
+
+
+/*
+LZ4_decompress_fast() :
+ originalSize : is the original and therefore uncompressed size
+ return : the number of bytes read from the source buffer (in other words, the compressed size)
+ If the source stream is detected malformed, the function will stop decoding and return a negative result.
+ Destination buffer must be already allocated. Its size must be a minimum of 'originalSize' bytes.
+ note : This function fully respect memory boundaries for properly formed compressed data.
+ It is a bit faster than LZ4_decompress_safe().
+ However, it does not provide any protection against intentionally modified data stream (malicious input).
+ Use this function in trusted environment only (data to decode comes from a trusted source).
+*/
+int LZ4_decompress_fast (const char* source, char* dest, int originalSize);
+
+/*
+LZ4_decompress_safe_partial() :
+ This function decompress a compressed block of size 'compressedSize' at position 'source'
+ into destination buffer 'dest' of size 'maxDecompressedSize'.
+ The function tries to stop decompressing operation as soon as 'targetOutputSize' has been reached,
+ reducing decompression time.
+ return : the number of bytes decoded in the destination buffer (necessarily <= maxDecompressedSize)
+ Note : this number can be < 'targetOutputSize' should the compressed block to decode be smaller.
+ Always control how many bytes were decoded.
+ If the source stream is detected malformed, the function will stop decoding and return a negative result.
+ This function never writes outside of output buffer, and never reads outside of input buffer. It is therefore protected against malicious data packets
+*/
+int LZ4_decompress_safe_partial (const char* source, char* dest, int compressedSize, int targetOutputSize, int maxDecompressedSize);
+
+
+/***********************************************
+* Streaming Compression Functions
+***********************************************/
+#define LZ4_STREAMSIZE_U64 ((1 << (LZ4_MEMORY_USAGE-3)) + 4)
+#define LZ4_STREAMSIZE (LZ4_STREAMSIZE_U64 * sizeof(long long))
+/*
+ * LZ4_stream_t
+ * information structure to track an LZ4 stream.
+ * important : init this structure content before first use !
+ * note : only allocated directly the structure if you are statically linking LZ4
+ * If you are using liblz4 as a DLL, please use below construction methods instead.
+ */
+typedef struct { long long table[LZ4_STREAMSIZE_U64]; } LZ4_stream_t;
+
+/*
+ * LZ4_resetStream
+ * Use this function to init an allocated LZ4_stream_t structure
+ */
+void LZ4_resetStream (LZ4_stream_t* streamPtr);
+
+/*
+ * LZ4_createStream will allocate and initialize an LZ4_stream_t structure
+ * LZ4_freeStream releases its memory.
+ * In the context of a DLL (liblz4), please use these methods rather than the static struct.
+ * They are more future proof, in case of a change of LZ4_stream_t size.
+ */
+LZ4_stream_t* LZ4_createStream(void);
+int LZ4_freeStream (LZ4_stream_t* streamPtr);
+
+/*
+ * LZ4_loadDict
+ * Use this function to load a static dictionary into LZ4_stream.
+ * Any previous data will be forgotten, only 'dictionary' will remain in memory.
+ * Loading a size of 0 is allowed.
+ * Return : dictionary size, in bytes (necessarily <= 64 KB)
+ */
+int LZ4_loadDict (LZ4_stream_t* streamPtr, const char* dictionary, int dictSize);
+
+/*
+ * LZ4_compress_fast_continue
+ * Compress buffer content 'src', using data from previously compressed blocks as dictionary to improve compression ratio.
+ * Important : Previous data blocks are assumed to still be present and unmodified !
+ * 'dst' buffer must be already allocated.
+ * If maxDstSize >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster.
+ * If not, and if compressed data cannot fit into 'dst' buffer size, compression stops, and function returns a zero.
+ */
+int LZ4_compress_fast_continue (LZ4_stream_t* streamPtr, const char* src, char* dst, int srcSize, int maxDstSize, int acceleration);
+
+/*
+ * LZ4_saveDict
+ * If previously compressed data block is not guaranteed to remain available at its memory location
+ * save it into a safer place (char* safeBuffer)
+ * Note : you don't need to call LZ4_loadDict() afterwards,
+ * dictionary is immediately usable, you can therefore call LZ4_compress_fast_continue()
+ * Return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error
+ */
+int LZ4_saveDict (LZ4_stream_t* streamPtr, char* safeBuffer, int dictSize);
+
+
+/************************************************
+* Streaming Decompression Functions
+************************************************/
+
+#define LZ4_STREAMDECODESIZE_U64 4
+#define LZ4_STREAMDECODESIZE (LZ4_STREAMDECODESIZE_U64 * sizeof(unsigned long long))
+typedef struct { unsigned long long table[LZ4_STREAMDECODESIZE_U64]; } LZ4_streamDecode_t;
+/*
+ * LZ4_streamDecode_t
+ * information structure to track an LZ4 stream.
+ * init this structure content using LZ4_setStreamDecode or memset() before first use !
+ *
+ * In the context of a DLL (liblz4) please prefer usage of construction methods below.
+ * They are more future proof, in case of a change of LZ4_streamDecode_t size in the future.
+ * LZ4_createStreamDecode will allocate and initialize an LZ4_streamDecode_t structure
+ * LZ4_freeStreamDecode releases its memory.
+ */
+LZ4_streamDecode_t* LZ4_createStreamDecode(void);
+int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream);
+
+/*
+ * LZ4_setStreamDecode
+ * Use this function to instruct where to find the dictionary.
+ * Setting a size of 0 is allowed (same effect as reset).
+ * Return : 1 if OK, 0 if error
+ */
+int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize);
+
+/*
+*_continue() :
+ These decoding functions allow decompression of multiple blocks in "streaming" mode.
+ Previously decoded blocks *must* remain available at the memory position where they were decoded (up to 64 KB)
+ In the case of a ring buffers, decoding buffer must be either :
+ - Exactly same size as encoding buffer, with same update rule (block boundaries at same positions)
+ In which case, the decoding & encoding ring buffer can have any size, including very small ones ( < 64 KB).
+ - Larger than encoding buffer, by a minimum of maxBlockSize more bytes.
+ maxBlockSize is implementation dependent. It's the maximum size you intend to compress into a single block.
+ In which case, encoding and decoding buffers do not need to be synchronized,
+ and encoding ring buffer can have any size, including small ones ( < 64 KB).
+ - _At least_ 64 KB + 8 bytes + maxBlockSize.
+ In which case, encoding and decoding buffers do not need to be synchronized,
+ and encoding ring buffer can have any size, including larger than decoding buffer.
+ Whenever these conditions are not possible, save the last 64KB of decoded data into a safe buffer,
+ and indicate where it is saved using LZ4_setStreamDecode()
+*/
+int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxDecompressedSize);
+int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize);
+
+
+/*
+Advanced decoding functions :
+*_usingDict() :
+ These decoding functions work the same as
+ a combination of LZ4_setStreamDecode() followed by LZ4_decompress_x_continue()
+ They are stand-alone. They don't need nor update an LZ4_streamDecode_t structure.
+*/
+int LZ4_decompress_safe_usingDict (const char* source, char* dest, int compressedSize, int maxDecompressedSize, const char* dictStart, int dictSize);
+int LZ4_decompress_fast_usingDict (const char* source, char* dest, int originalSize, const char* dictStart, int dictSize);
+
+
+
+/**************************************
+* Obsolete Functions
+**************************************/
+/* Deprecate Warnings */
+/* Should these warnings messages be a problem,
+ it is generally possible to disable them,
+ with -Wno-deprecated-declarations for gcc
+ or _CRT_SECURE_NO_WARNINGS in Visual for example.
+ You can also define LZ4_DEPRECATE_WARNING_DEFBLOCK. */
+#ifndef LZ4_DEPRECATE_WARNING_DEFBLOCK
+# define LZ4_DEPRECATE_WARNING_DEFBLOCK
+# define LZ4_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+# if (LZ4_GCC_VERSION >= 405) || defined(__clang__)
+# define LZ4_DEPRECATED(message) __attribute__((deprecated(message)))
+# elif (LZ4_GCC_VERSION >= 301)
+# define LZ4_DEPRECATED(message) __attribute__((deprecated))
+# elif defined(_MSC_VER)
+# define LZ4_DEPRECATED(message) __declspec(deprecated(message))
+# else
+# pragma message("WARNING: You need to implement LZ4_DEPRECATED for this compiler")
+# define LZ4_DEPRECATED(message)
+# endif
+#endif /* LZ4_DEPRECATE_WARNING_DEFBLOCK */
+
+/* Obsolete compression functions */
+/* These functions are planned to start generate warnings by r131 approximately */
+int LZ4_compress (const char* source, char* dest, int sourceSize);
+int LZ4_compress_limitedOutput (const char* source, char* dest, int sourceSize, int maxOutputSize);
+int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize);
+int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize);
+int LZ4_compress_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize);
+int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize, int maxOutputSize);
+
+/* Obsolete decompression functions */
+/* These function names are completely deprecated and must no longer be used.
+ They are only provided here for compatibility with older programs.
+ - LZ4_uncompress is the same as LZ4_decompress_fast
+ - LZ4_uncompress_unknownOutputSize is the same as LZ4_decompress_safe
+ These function prototypes are now disabled; uncomment them only if you really need them.
+ It is highly recommended to stop using these prototypes and migrate to maintained ones */
+/* int LZ4_uncompress (const char* source, char* dest, int outputSize); */
+/* int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize); */
+
+/* Obsolete streaming functions; use new streaming interface whenever possible */
+LZ4_DEPRECATED("use LZ4_createStream() instead") void* LZ4_create (char* inputBuffer);
+LZ4_DEPRECATED("use LZ4_createStream() instead") int LZ4_sizeofStreamState(void);
+LZ4_DEPRECATED("use LZ4_resetStream() instead") int LZ4_resetStreamState(void* state, char* inputBuffer);
+LZ4_DEPRECATED("use LZ4_saveDict() instead") char* LZ4_slideInputBuffer (void* state);
+
+/* Obsolete streaming decoding functions */
+LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead") int LZ4_decompress_safe_withPrefix64k (const char* src, char* dst, int compressedSize, int maxDstSize);
+LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") int LZ4_decompress_fast_withPrefix64k (const char* src, char* dst, int originalSize);
+
+
+#if defined (__cplusplus)
+}
+#endif
diff --git a/thirdparty/etcpak/mmap.cpp b/thirdparty/etcpak/mmap.cpp
new file mode 100644
index 0000000000..c2460ee9e4
--- /dev/null
+++ b/thirdparty/etcpak/mmap.cpp
@@ -0,0 +1,38 @@
+#include "mmap.hpp"
+
+#ifdef _WIN32
+# include <io.h>
+# include <windows.h>
+
+void* mmap( void* addr, size_t length, int prot, int flags, int fd, off_t offset )
+{
+ HANDLE hnd;
+ void* map = nullptr;
+
+ switch( prot )
+ {
+ case PROT_READ:
+ if( hnd = CreateFileMapping( HANDLE( _get_osfhandle( fd ) ), nullptr, PAGE_READONLY, 0, DWORD( length ), nullptr ) )
+ {
+ map = MapViewOfFile( hnd, FILE_MAP_READ, 0, 0, length );
+ CloseHandle( hnd );
+ }
+ break;
+ case PROT_WRITE:
+ if( hnd = CreateFileMapping( HANDLE( _get_osfhandle( fd ) ), nullptr, PAGE_READWRITE, 0, DWORD( length ), nullptr ) )
+ {
+ map = MapViewOfFile( hnd, FILE_MAP_WRITE, 0, 0, length );
+ CloseHandle( hnd );
+ }
+ break;
+ }
+
+ return map ? (char*)map + offset : (void*)-1;
+}
+
+int munmap( void* addr, size_t length )
+{
+ return UnmapViewOfFile( addr ) != 0 ? 0 : -1;
+}
+
+#endif
diff --git a/thirdparty/etcpak/mmap.hpp b/thirdparty/etcpak/mmap.hpp
new file mode 100644
index 0000000000..e4cfe7759c
--- /dev/null
+++ b/thirdparty/etcpak/mmap.hpp
@@ -0,0 +1,19 @@
+#ifndef __MMAP_HPP__
+#define __MMAP_HPP__
+
+#ifndef _WIN32
+# include <sys/mman.h>
+#else
+# include <string.h>
+# include <sys/types.h>
+
+# define PROT_READ 1
+# define PROT_WRITE 2
+# define MAP_SHARED 0
+
+void* mmap( void* addr, size_t length, int prot, int flags, int fd, off_t offset );
+int munmap( void* addr, size_t length );
+
+#endif
+
+#endif
diff --git a/thirdparty/etcpak/patches/libpng-unbundle.patch b/thirdparty/etcpak/patches/libpng-unbundle.patch
new file mode 100644
index 0000000000..e3c07412c6
--- /dev/null
+++ b/thirdparty/etcpak/patches/libpng-unbundle.patch
@@ -0,0 +1,13 @@
+diff --git a/thirdparty/etcpak/Bitmap.cpp b/thirdparty/etcpak/Bitmap.cpp
+index 6aa36f5caa..ef318318ac 100644
+--- a/thirdparty/etcpak/Bitmap.cpp
++++ b/thirdparty/etcpak/Bitmap.cpp
+@@ -3,7 +3,7 @@
+ #include <string.h>
+ #include <assert.h>
+
+-#include "libpng/png.h"
++#include <png.h>
+ #include "lz4/lz4.h"
+
+ #include "Bitmap.hpp"
diff --git a/thirdparty/etcpak/patches/llvm-c++11-narrowing-errors.patch b/thirdparty/etcpak/patches/llvm-c++11-narrowing-errors.patch
new file mode 100644
index 0000000000..ab0d1e63a2
--- /dev/null
+++ b/thirdparty/etcpak/patches/llvm-c++11-narrowing-errors.patch
@@ -0,0 +1,64 @@
+diff --git a/thirdparty/etcpak/BlockData.cpp b/thirdparty/etcpak/BlockData.cpp
+index bd738085f3..395b55246b 100644
+--- a/thirdparty/etcpak/BlockData.cpp
++++ b/thirdparty/etcpak/BlockData.cpp
+@@ -334,10 +334,10 @@ static etcpak_force_inline void DecodeT( uint64_t block, uint32_t* dst, uint32_t
+ const auto c3b = clampu8( cb1 - table59T58H[codeword] );
+
+ const uint32_t col_tab[4] = {
+- cr0 | ( cg0 << 8 ) | ( cb0 << 16 ) | 0xFF000000,
+- c2r | ( c2g << 8 ) | ( c2b << 16 ) | 0xFF000000,
+- cr1 | ( cg1 << 8 ) | ( cb1 << 16 ) | 0xFF000000,
+- c3r | ( c3g << 8 ) | ( c3b << 16 ) | 0xFF000000
++ uint32_t(cr0 | ( cg0 << 8 ) | ( cb0 << 16 ) | 0xFF000000),
++ uint32_t(c2r | ( c2g << 8 ) | ( c2b << 16 ) | 0xFF000000),
++ uint32_t(cr1 | ( cg1 << 8 ) | ( cb1 << 16 ) | 0xFF000000),
++ uint32_t(c3r | ( c3g << 8 ) | ( c3b << 16 ) | 0xFF000000)
+ };
+
+ const uint32_t indexes = ( block >> 32 ) & 0xFFFFFFFF;
+@@ -389,10 +389,10 @@ static etcpak_force_inline void DecodeTAlpha( uint64_t block, uint64_t alpha, ui
+ const auto c3b = clampu8( cb1 - table59T58H[codeword] );
+
+ const uint32_t col_tab[4] = {
+- cr0 | ( cg0 << 8 ) | ( cb0 << 16 ),
+- c2r | ( c2g << 8 ) | ( c2b << 16 ),
+- cr1 | ( cg1 << 8 ) | ( cb1 << 16 ),
+- c3r | ( c3g << 8 ) | ( c3b << 16 )
++ uint32_t(cr0 | ( cg0 << 8 ) | ( cb0 << 16 )),
++ uint32_t(c2r | ( c2g << 8 ) | ( c2b << 16 )),
++ uint32_t(cr1 | ( cg1 << 8 ) | ( cb1 << 16 )),
++ uint32_t(c3r | ( c3g << 8 ) | ( c3b << 16 ))
+ };
+
+ const uint32_t indexes = ( block >> 32 ) & 0xFFFFFFFF;
+@@ -436,10 +436,10 @@ static etcpak_force_inline void DecodeH( uint64_t block, uint32_t* dst, uint32_t
+ const auto codeword = codeword_hi | codeword_lo;
+
+ const uint32_t col_tab[] = {
+- clampu8( r0 + table59T58H[codeword] ) | ( clampu8( g0 + table59T58H[codeword] ) << 8 ) | ( clampu8( b0 + table59T58H[codeword] ) << 16 ),
+- clampu8( r0 - table59T58H[codeword] ) | ( clampu8( g0 - table59T58H[codeword] ) << 8 ) | ( clampu8( b0 - table59T58H[codeword] ) << 16 ),
+- clampu8( r1 + table59T58H[codeword] ) | ( clampu8( g1 + table59T58H[codeword] ) << 8 ) | ( clampu8( b1 + table59T58H[codeword] ) << 16 ),
+- clampu8( r1 - table59T58H[codeword] ) | ( clampu8( g1 - table59T58H[codeword] ) << 8 ) | ( clampu8( b1 - table59T58H[codeword] ) << 16 )
++ uint32_t(clampu8( r0 + table59T58H[codeword] ) | ( clampu8( g0 + table59T58H[codeword] ) << 8 ) | ( clampu8( b0 + table59T58H[codeword] ) << 16 )),
++ uint32_t(clampu8( r0 - table59T58H[codeword] ) | ( clampu8( g0 - table59T58H[codeword] ) << 8 ) | ( clampu8( b0 - table59T58H[codeword] ) << 16 )),
++ uint32_t(clampu8( r1 + table59T58H[codeword] ) | ( clampu8( g1 + table59T58H[codeword] ) << 8 ) | ( clampu8( b1 + table59T58H[codeword] ) << 16 )),
++ uint32_t(clampu8( r1 - table59T58H[codeword] ) | ( clampu8( g1 - table59T58H[codeword] ) << 8 ) | ( clampu8( b1 - table59T58H[codeword] ) << 16 ))
+ };
+
+ for( uint8_t j = 0; j < 4; j++ )
+@@ -483,10 +483,10 @@ static etcpak_force_inline void DecodeHAlpha( uint64_t block, uint64_t alpha, ui
+ const auto tbl = g_alpha[(alpha >> 48) & 0xF];
+
+ const uint32_t col_tab[] = {
+- clampu8( r0 + table59T58H[codeword] ) | ( clampu8( g0 + table59T58H[codeword] ) << 8 ) | ( clampu8( b0 + table59T58H[codeword] ) << 16 ),
+- clampu8( r0 - table59T58H[codeword] ) | ( clampu8( g0 - table59T58H[codeword] ) << 8 ) | ( clampu8( b0 - table59T58H[codeword] ) << 16 ),
+- clampu8( r1 + table59T58H[codeword] ) | ( clampu8( g1 + table59T58H[codeword] ) << 8 ) | ( clampu8( b1 + table59T58H[codeword] ) << 16 ),
+- clampu8( r1 - table59T58H[codeword] ) | ( clampu8( g1 - table59T58H[codeword] ) << 8 ) | ( clampu8( b1 - table59T58H[codeword] ) << 16 )
++ uint32_t(clampu8( r0 + table59T58H[codeword] ) | ( clampu8( g0 + table59T58H[codeword] ) << 8 ) | ( clampu8( b0 + table59T58H[codeword] ) << 16 )),
++ uint32_t(clampu8( r0 - table59T58H[codeword] ) | ( clampu8( g0 - table59T58H[codeword] ) << 8 ) | ( clampu8( b0 - table59T58H[codeword] ) << 16 )),
++ uint32_t(clampu8( r1 + table59T58H[codeword] ) | ( clampu8( g1 + table59T58H[codeword] ) << 8 ) | ( clampu8( b1 + table59T58H[codeword] ) << 16 )),
++ uint32_t(clampu8( r1 - table59T58H[codeword] ) | ( clampu8( g1 - table59T58H[codeword] ) << 8 ) | ( clampu8( b1 - table59T58H[codeword] ) << 16 ))
+ };
+
+ for( uint8_t j = 0; j < 4; j++ )
diff --git a/thirdparty/etcpak/patches/windows-mingw-fixes.patch b/thirdparty/etcpak/patches/windows-mingw-fixes.patch
new file mode 100644
index 0000000000..1da60e4a4f
--- /dev/null
+++ b/thirdparty/etcpak/patches/windows-mingw-fixes.patch
@@ -0,0 +1,63 @@
+diff --git a/thirdparty/etcpak/BlockData.cpp b/thirdparty/etcpak/BlockData.cpp
+index a2cd032c5b..bd738085f3 100644
+--- a/thirdparty/etcpak/BlockData.cpp
++++ b/thirdparty/etcpak/BlockData.cpp
+@@ -15,7 +15,7 @@
+ # include <arm_neon.h>
+ #endif
+
+-#ifdef __SSE4_1__
++#if defined __SSE4_1__ || defined __AVX2__ || defined _MSC_VER
+ # ifdef _MSC_VER
+ # include <intrin.h>
+ # include <Windows.h>
+@@ -24,12 +24,6 @@
+ # else
+ # include <x86intrin.h>
+ # endif
+-#else
+-# ifndef _MSC_VER
+-# include <byteswap.h>
+-# define _bswap(x) bswap_32(x)
+-# define _bswap64(x) bswap_64(x)
+-# endif
+ #endif
+
+ #ifndef _bswap
+diff --git a/thirdparty/etcpak/ProcessRGB.cpp b/thirdparty/etcpak/ProcessRGB.cpp
+index 220d5c55e2..9dc5a78b67 100644
+--- a/thirdparty/etcpak/ProcessRGB.cpp
++++ b/thirdparty/etcpak/ProcessRGB.cpp
+@@ -1,5 +1,6 @@
+ #include <array>
+ #include <string.h>
++#include <limits>
+
+ #ifdef __ARM_NEON
+ # include <arm_neon.h>
+@@ -21,12 +22,6 @@
+ # else
+ # include <x86intrin.h>
+ # endif
+-#else
+-# ifndef _MSC_VER
+-# include <byteswap.h>
+-# define _bswap(x) bswap_32(x)
+-# define _bswap64(x) bswap_64(x)
+-# endif
+ #endif
+
+ #ifndef _bswap
+diff --git a/thirdparty/etcpak/System.cpp b/thirdparty/etcpak/System.cpp
+index 1383d0ecd0..a09b289cb2 100644
+--- a/thirdparty/etcpak/System.cpp
++++ b/thirdparty/etcpak/System.cpp
+@@ -35,7 +35,7 @@ unsigned int System::CPUCores()
+
+ void System::SetThreadName( std::thread& thread, const char* name )
+ {
+-#ifdef _WIN32
++#ifdef _MSC_VER
+ const DWORD MS_VC_EXCEPTION=0x406D1388;
+
+ # pragma pack( push, 8 )