diff options
Diffstat (limited to 'thirdparty')
76 files changed, 28479 insertions, 2628 deletions
diff --git a/thirdparty/README.md b/thirdparty/README.md index 756b31b995..c846094a6a 100644 --- a/thirdparty/README.md +++ b/thirdparty/README.md @@ -37,6 +37,18 @@ Check the diff of enet.h, protocol.c, and host.c with the 1.3.13 tarball before the next update. +## etc2comp + +- Upstream: https://github.com/google/etc2comp +- Version: 9cd0f9c (git) +- License: Apache + +Files extracted from upstream source: + +- all .cpp and .h files in EtcLib/ +- README.md, LICENSE, AUTHORS + + ## fonts - Upstream: ? @@ -60,7 +72,7 @@ Files extracted from upstream source: ## glad - Upstream: https://github.com/Dav1dde/glad -- Version: 0.1.13a0 +- Version: 0.1.14a0 - License: MIT The files we package are automatically generated. @@ -296,17 +308,6 @@ Files extracted from upstream source: - LICENSE.TXT -## rg-etc1 - -- Upstream: https://github.com/richgel999/rg-etc1 -- Version: 1.04 -- License: zlib - -Files extracted from upstream source: - -- `rg_etc1.{cpp,h}` - - ## rtaudio - Upstream: http://www.music.mcgill.ca/~gary/rtaudio/ @@ -366,3 +367,14 @@ https://github.com/godotengine/godot/commit/37f5e1dcd94611dd5b670f013abf0323e8b4 Files extracted from upstream source: - all .c and .h files + +## zstd + +- Upstream: https://github.com/facebook/zstd +- Version: 1.2.0 +- License: BSD-3-Clause + +Files extracted from upstream source: + +- all .c and .h under lib/ +- README.md, LICENSE, PATENTS diff --git a/thirdparty/etc2comp/AUTHORS b/thirdparty/etc2comp/AUTHORS new file mode 100644 index 0000000000..32daca27fe --- /dev/null +++ b/thirdparty/etc2comp/AUTHORS @@ -0,0 +1,7 @@ +# This is the list of Etc2Comp authors for copyright purposes.
+#
+# This does not necessarily list everyone who has contributed code, since in
+# some cases, their employer may be the copyright holder. To see the full list
+# of contributors, see the revision history in source control.
+Google Inc.
+Blue Shift Inc.
diff --git a/thirdparty/etc2comp/Etc.cpp b/thirdparty/etc2comp/Etc.cpp new file mode 100644 index 0000000000..a5ee706048 --- /dev/null +++ b/thirdparty/etc2comp/Etc.cpp @@ -0,0 +1,128 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "EtcConfig.h" +#include "Etc.h" +#include "EtcFilter.h" + +#include <string.h> + +namespace Etc +{ + // ---------------------------------------------------------------------------------------------------- + // C-style inteface to the encoder + // + void Encode(float *a_pafSourceRGBA, + unsigned int a_uiSourceWidth, + unsigned int a_uiSourceHeight, + Image::Format a_format, + ErrorMetric a_eErrMetric, + float a_fEffort, + unsigned int a_uiJobs, + unsigned int a_uiMaxJobs, + unsigned char **a_ppaucEncodingBits, + unsigned int *a_puiEncodingBitsBytes, + unsigned int *a_puiExtendedWidth, + unsigned int *a_puiExtendedHeight, + int *a_piEncodingTime_ms, bool a_bVerboseOutput) + { + + Image image(a_pafSourceRGBA, a_uiSourceWidth, + a_uiSourceHeight, + a_eErrMetric); + image.m_bVerboseOutput = a_bVerboseOutput; + image.Encode(a_format, a_eErrMetric, a_fEffort, a_uiJobs, a_uiMaxJobs); + + *a_ppaucEncodingBits = image.GetEncodingBits(); + *a_puiEncodingBitsBytes = image.GetEncodingBitsBytes(); + *a_puiExtendedWidth = image.GetExtendedWidth(); + *a_puiExtendedHeight = image.GetExtendedHeight(); + *a_piEncodingTime_ms = image.GetEncodingTimeMs(); + } + + void EncodeMipmaps(float *a_pafSourceRGBA, + unsigned int a_uiSourceWidth, + unsigned int a_uiSourceHeight, + Image::Format a_format, + ErrorMetric a_eErrMetric, + float a_fEffort, + unsigned int a_uiJobs, + unsigned int a_uiMaxJobs, + unsigned int a_uiMaxMipmaps, + unsigned int a_uiMipFilterFlags, + RawImage* a_pMipmapImages, + int *a_piEncodingTime_ms, + bool a_bVerboseOutput) + { + auto mipWidth = a_uiSourceWidth; + auto mipHeight = a_uiSourceHeight; + int totalEncodingTime = 0; + for(unsigned int mip = 0; mip < a_uiMaxMipmaps && mipWidth >= 1 && mipHeight >= 1; mip++) + { + float* pImageData = nullptr; + float* pMipImage = nullptr; + + if(mip == 0) + { + pImageData = a_pafSourceRGBA; + } + else + { + pMipImage = new float[mipWidth*mipHeight*4]; + if(FilterTwoPass(a_pafSourceRGBA, a_uiSourceWidth, a_uiSourceHeight, pMipImage, mipWidth, mipHeight, a_uiMipFilterFlags, Etc::FilterLanczos3) ) + { + pImageData = pMipImage; + } + } + + if ( pImageData ) + { + + Image image(pImageData, mipWidth, mipHeight, a_eErrMetric); + + image.m_bVerboseOutput = a_bVerboseOutput; + image.Encode(a_format, a_eErrMetric, a_fEffort, a_uiJobs, a_uiMaxJobs); + + a_pMipmapImages[mip].paucEncodingBits = std::shared_ptr<unsigned char>(image.GetEncodingBits(), [](unsigned char *p) { delete[] p; }); + a_pMipmapImages[mip].uiEncodingBitsBytes = image.GetEncodingBitsBytes(); + a_pMipmapImages[mip].uiExtendedWidth = image.GetExtendedWidth(); + a_pMipmapImages[mip].uiExtendedHeight = image.GetExtendedHeight(); + + totalEncodingTime += image.GetEncodingTimeMs(); + } + + if(pMipImage) + { + delete[] pMipImage; + } + + if (!pImageData) + { + break; + } + + mipWidth >>= 1; + mipHeight >>= 1; + } + + *a_piEncodingTime_ms = totalEncodingTime; + } + + + // ---------------------------------------------------------------------------------------------------- + // + +} diff --git a/thirdparty/etc2comp/Etc.h b/thirdparty/etc2comp/Etc.h new file mode 100644 index 0000000000..439388d649 --- /dev/null +++ b/thirdparty/etc2comp/Etc.h @@ -0,0 +1,71 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "EtcConfig.h" +#include "EtcImage.h" +#include "EtcColor.h" +#include "EtcErrorMetric.h" +#include <memory> + +#define ETCCOMP_MIN_EFFORT_LEVEL (0.0f) +#define ETCCOMP_DEFAULT_EFFORT_LEVEL (40.0f) +#define ETCCOMP_MAX_EFFORT_LEVEL (100.0f) + +namespace Etc +{ + class Block4x4EncodingBits; + + struct RawImage + { + int uiExtendedWidth; + int uiExtendedHeight; + unsigned int uiEncodingBitsBytes; + std::shared_ptr<unsigned char> paucEncodingBits; + }; + + + + // C-style inteface to the encoder + void Encode(float *a_pafSourceRGBA, + unsigned int a_uiSourceWidth, + unsigned int a_uiSourceHeight, + Image::Format a_format, + ErrorMetric a_eErrMetric, + float a_fEffort, + unsigned int a_uiJobs, + unsigned int a_uimaxJobs, + unsigned char **a_ppaucEncodingBits, + unsigned int *a_puiEncodingBitsBytes, + unsigned int *a_puiExtendedWidth, + unsigned int *a_puiExtendedHeight, + int *a_piEncodingTime_ms, bool a_bVerboseOutput = false); + + void EncodeMipmaps(float *a_pafSourceRGBA, + unsigned int a_uiSourceWidth, + unsigned int a_uiSourceHeight, + Image::Format a_format, + ErrorMetric a_eErrMetric, + float a_fEffort, + unsigned int a_uiJobs, + unsigned int a_uiMaxJobs, + unsigned int a_uiMaxMipmaps, + unsigned int a_uiMipFilterFlags, + RawImage* a_pMipmaps, + int *a_piEncodingTime_ms, bool a_bVerboseOutput = false); + +} diff --git a/thirdparty/etc2comp/EtcBlock4x4.cpp b/thirdparty/etc2comp/EtcBlock4x4.cpp new file mode 100644 index 0000000000..3082fe60db --- /dev/null +++ b/thirdparty/etc2comp/EtcBlock4x4.cpp @@ -0,0 +1,425 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* +EtcBlock4x4.cpp + +Implements the state associated with each 4x4 block of pixels in an image + +Source images that are not a multiple of 4x4 are extended to fill the Block4x4 using pixels with an +alpha of NAN + +*/ + +#include "EtcConfig.h" +#include "EtcBlock4x4.h" + +#include "EtcBlock4x4EncodingBits.h" +#include "EtcColor.h" +#include "EtcImage.h" +#include "EtcColorFloatRGBA.h" +#include "EtcBlock4x4Encoding_RGB8.h" +#include "EtcBlock4x4Encoding_RGBA8.h" +#include "EtcBlock4x4Encoding_RGB8A1.h" +#include "EtcBlock4x4Encoding_R11.h" +#include "EtcBlock4x4Encoding_RG11.h" + +#include <stdio.h> +#include <string.h> +#include <assert.h> + +namespace Etc +{ + // ETC pixels are scanned vertically. + // this mapping is for when someone wants to scan the ETC pixels horizontally + const unsigned int Block4x4::s_auiPixelOrderHScan[PIXELS] = { 0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15 }; + + // ---------------------------------------------------------------------------------------------------- + // + Block4x4::Block4x4(void) + { + m_pimageSource = nullptr; + m_uiSourceH = 0; + m_uiSourceV = 0; + + m_sourcealphamix = SourceAlphaMix::UNKNOWN; + m_boolBorderPixels = false; + m_boolPunchThroughPixels = false; + + m_pencoding = nullptr; + + m_errormetric = ErrorMetric::NUMERIC; + + } + Block4x4::~Block4x4() + { + m_pimageSource = nullptr; + if (m_pencoding) + { + delete m_pencoding; + m_pencoding = nullptr; + } + } + // ---------------------------------------------------------------------------------------------------- + // initialization prior to encoding from a source image + // [a_uiSourceH,a_uiSourceV] is the location of the block in a_pimageSource + // a_paucEncodingBits is the place to store the final encoding + // a_errormetric is used for finding the best encoding + // + void Block4x4::InitFromSource(Image *a_pimageSource, + unsigned int a_uiSourceH, unsigned int a_uiSourceV, + unsigned char *a_paucEncodingBits, + ErrorMetric a_errormetric) + { + + Block4x4(); + + m_pimageSource = a_pimageSource; + m_uiSourceH = a_uiSourceH; + m_uiSourceV = a_uiSourceV; + m_errormetric = a_errormetric; + + SetSourcePixels(); + + // set block encoder function + switch (m_pimageSource->GetFormat()) + { + case Image::Format::ETC1: + m_pencoding = new Block4x4Encoding_ETC1; + break; + + case Image::Format::RGB8: + case Image::Format::SRGB8: + m_pencoding = new Block4x4Encoding_RGB8; + break; + + case Image::Format::RGBA8: + case Image::Format::SRGBA8: + if (a_errormetric == RGBX) + { + m_pencoding = new Block4x4Encoding_RGBA8; + } + else + { + switch (m_sourcealphamix) + { + case SourceAlphaMix::OPAQUE: + m_pencoding = new Block4x4Encoding_RGBA8_Opaque; + break; + + case SourceAlphaMix::TRANSPARENT: + m_pencoding = new Block4x4Encoding_RGBA8_Transparent; + break; + + case SourceAlphaMix::TRANSLUCENT: + m_pencoding = new Block4x4Encoding_RGBA8; + break; + + default: + assert(0); + break; + } + break; + } + break; + + case Image::Format::RGB8A1: + case Image::Format::SRGB8A1: + switch (m_sourcealphamix) + { + case SourceAlphaMix::OPAQUE: + m_pencoding = new Block4x4Encoding_RGB8A1_Opaque; + break; + + case SourceAlphaMix::TRANSPARENT: + m_pencoding = new Block4x4Encoding_RGB8A1_Transparent; + break; + + case SourceAlphaMix::TRANSLUCENT: + if (m_boolPunchThroughPixels) + { + m_pencoding = new Block4x4Encoding_RGB8A1; + } + else + { + m_pencoding = new Block4x4Encoding_RGB8A1_Opaque; + } + break; + + default: + assert(0); + break; + } + break; + + case Image::Format::R11: + case Image::Format::SIGNED_R11: + m_pencoding = new Block4x4Encoding_R11; + break; + case Image::Format::RG11: + case Image::Format::SIGNED_RG11: + m_pencoding = new Block4x4Encoding_RG11; + break; + default: + assert(0); + break; + } + + m_pencoding->InitFromSource(this, m_afrgbaSource, + a_paucEncodingBits, a_errormetric); + + } + + // ---------------------------------------------------------------------------------------------------- + // initialization of encoding state from a prior encoding using encoding bits + // [a_uiSourceH,a_uiSourceV] is the location of the block in a_pimageSource + // a_paucEncodingBits is the place to read the prior encoding + // a_imageformat is used to determine how to interpret a_paucEncodingBits + // a_errormetric was used for the prior encoding + // + void Block4x4::InitFromEtcEncodingBits(Image::Format a_imageformat, + unsigned int a_uiSourceH, unsigned int a_uiSourceV, + unsigned char *a_paucEncodingBits, + Image *a_pimageSource, + ErrorMetric a_errormetric) + { + Block4x4(); + + m_pimageSource = a_pimageSource; + m_uiSourceH = a_uiSourceH; + m_uiSourceV = a_uiSourceV; + m_errormetric = a_errormetric; + + SetSourcePixels(); + + // set block encoder function + switch (a_imageformat) + { + case Image::Format::ETC1: + m_pencoding = new Block4x4Encoding_ETC1; + break; + + case Image::Format::RGB8: + case Image::Format::SRGB8: + m_pencoding = new Block4x4Encoding_RGB8; + break; + + case Image::Format::RGBA8: + case Image::Format::SRGBA8: + m_pencoding = new Block4x4Encoding_RGBA8; + break; + + case Image::Format::RGB8A1: + case Image::Format::SRGB8A1: + m_pencoding = new Block4x4Encoding_RGB8A1; + break; + + case Image::Format::R11: + case Image::Format::SIGNED_R11: + m_pencoding = new Block4x4Encoding_R11; + break; + case Image::Format::RG11: + case Image::Format::SIGNED_RG11: + m_pencoding = new Block4x4Encoding_RG11; + break; + default: + assert(0); + break; + } + + m_pencoding->InitFromEncodingBits(this, a_paucEncodingBits, m_afrgbaSource, + m_pimageSource->GetErrorMetric()); + + } + + // ---------------------------------------------------------------------------------------------------- + // set source pixels from m_pimageSource + // set m_alphamix + // + void Block4x4::SetSourcePixels(void) + { + + Image::Format imageformat = m_pimageSource->GetFormat(); + + // alpha census + unsigned int uiTransparentSourcePixels = 0; + unsigned int uiOpaqueSourcePixels = 0; + + // copy source to consecutive memory locations + // convert from image horizontal scan to block vertical scan + unsigned int uiPixel = 0; + for (unsigned int uiBlockPixelH = 0; uiBlockPixelH < Block4x4::COLUMNS; uiBlockPixelH++) + { + unsigned int uiSourcePixelH = m_uiSourceH + uiBlockPixelH; + + for (unsigned int uiBlockPixelV = 0; uiBlockPixelV < Block4x4::ROWS; uiBlockPixelV++) + { + unsigned int uiSourcePixelV = m_uiSourceV + uiBlockPixelV; + + ColorFloatRGBA *pfrgbaSource = m_pimageSource->GetSourcePixel(uiSourcePixelH, uiSourcePixelV); + + // if pixel extends beyond source image because of block padding + if (pfrgbaSource == nullptr) + { + m_afrgbaSource[uiPixel] = ColorFloatRGBA(0.0f, 0.0f, 0.0f, NAN); // denotes border pixel + m_boolBorderPixels = true; + uiTransparentSourcePixels++; + } + else + { + //get teh current pixel data, and store some of the attributes + //before capping values to fit the encoder type + + m_afrgbaSource[uiPixel] = (*pfrgbaSource).ClampRGBA(); + + if (m_afrgbaSource[uiPixel].fA == 1.0f || m_errormetric == RGBX) + { + m_pimageSource->m_iNumOpaquePixels++; + } + else if (m_afrgbaSource[uiPixel].fA == 0.0f) + { + m_pimageSource->m_iNumTransparentPixels++; + } + else if(m_afrgbaSource[uiPixel].fA > 0.0f && m_afrgbaSource[uiPixel].fA < 1.0f) + { + m_pimageSource->m_iNumTranslucentPixels++; + } + else + { + m_pimageSource->m_numOutOfRangeValues.fA++; + } + + if (m_afrgbaSource[uiPixel].fR != 0.0f) + { + m_pimageSource->m_numColorValues.fR++; + //make sure we are getting a float between 0-1 + if (m_afrgbaSource[uiPixel].fR - 1.0f > 0.0f) + { + m_pimageSource->m_numOutOfRangeValues.fR++; + } + } + + if (m_afrgbaSource[uiPixel].fG != 0.0f) + { + m_pimageSource->m_numColorValues.fG++; + if (m_afrgbaSource[uiPixel].fG - 1.0f > 0.0f) + { + m_pimageSource->m_numOutOfRangeValues.fG++; + } + } + if (m_afrgbaSource[uiPixel].fB != 0.0f) + { + m_pimageSource->m_numColorValues.fB++; + if (m_afrgbaSource[uiPixel].fB - 1.0f > 0.0f) + { + m_pimageSource->m_numOutOfRangeValues.fB++; + } + } + // for formats with no alpha, set source alpha to 1 + if (imageformat == Image::Format::ETC1 || + imageformat == Image::Format::RGB8 || + imageformat == Image::Format::SRGB8) + { + m_afrgbaSource[uiPixel].fA = 1.0f; + } + + if (imageformat == Image::Format::R11 || + imageformat == Image::Format::SIGNED_R11) + { + m_afrgbaSource[uiPixel].fA = 1.0f; + m_afrgbaSource[uiPixel].fG = 0.0f; + m_afrgbaSource[uiPixel].fB = 0.0f; + } + + if (imageformat == Image::Format::RG11 || + imageformat == Image::Format::SIGNED_RG11) + { + m_afrgbaSource[uiPixel].fA = 1.0f; + m_afrgbaSource[uiPixel].fB = 0.0f; + } + + + // for RGB8A1, set source alpha to 0.0 or 1.0 + // set punch through flag + if (imageformat == Image::Format::RGB8A1 || + imageformat == Image::Format::SRGB8A1) + { + if (m_afrgbaSource[uiPixel].fA >= 0.5f) + { + m_afrgbaSource[uiPixel].fA = 1.0f; + } + else + { + m_afrgbaSource[uiPixel].fA = 0.0f; + m_boolPunchThroughPixels = true; + } + } + + if (m_afrgbaSource[uiPixel].fA == 1.0f || m_errormetric == RGBX) + { + uiOpaqueSourcePixels++; + } + else if (m_afrgbaSource[uiPixel].fA == 0.0f) + { + uiTransparentSourcePixels++; + } + + } + + uiPixel += 1; + } + } + + if (uiOpaqueSourcePixels == PIXELS) + { + m_sourcealphamix = SourceAlphaMix::OPAQUE; + } + else if (uiTransparentSourcePixels == PIXELS) + { + m_sourcealphamix = SourceAlphaMix::TRANSPARENT; + } + else + { + m_sourcealphamix = SourceAlphaMix::TRANSLUCENT; + } + + } + + // ---------------------------------------------------------------------------------------------------- + // return a name for the encoding mode + // + const char * Block4x4::GetEncodingModeName(void) + { + + switch (m_pencoding->GetMode()) + { + case Block4x4Encoding::MODE_ETC1: + return "ETC1"; + case Block4x4Encoding::MODE_T: + return "T"; + case Block4x4Encoding::MODE_H: + return "H"; + case Block4x4Encoding::MODE_PLANAR: + return "PLANAR"; + default: + return "???"; + } + } + + // ---------------------------------------------------------------------------------------------------- + // + +} diff --git a/thirdparty/etc2comp/EtcBlock4x4.h b/thirdparty/etc2comp/EtcBlock4x4.h new file mode 100644 index 0000000000..0fd30c598d --- /dev/null +++ b/thirdparty/etc2comp/EtcBlock4x4.h @@ -0,0 +1,172 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "EtcColor.h" +#include "EtcColorFloatRGBA.h" +#include "EtcErrorMetric.h" +#include "EtcImage.h" +#include "EtcBlock4x4Encoding.h" + +namespace Etc +{ + class Block4x4EncodingBits; + + class Block4x4 + { + public: + + static const unsigned int ROWS = 4; + static const unsigned int COLUMNS = 4; + static const unsigned int PIXELS = ROWS * COLUMNS; + + // the alpha mix for a 4x4 block of pixels + enum class SourceAlphaMix + { + UNKNOWN, + // + OPAQUE, // all 1.0 + TRANSPARENT, // all 0.0 or NAN + TRANSLUCENT // not all opaque or transparent + }; + + typedef void (Block4x4::*EncoderFunctionPtr)(void); + + Block4x4(void); + ~Block4x4(); + void InitFromSource(Image *a_pimageSource, + unsigned int a_uiSourceH, + unsigned int a_uiSourceV, + unsigned char *a_paucEncodingBits, + ErrorMetric a_errormetric); + + void InitFromEtcEncodingBits(Image::Format a_imageformat, + unsigned int a_uiSourceH, + unsigned int a_uiSourceV, + unsigned char *a_paucEncodingBits, + Image *a_pimageSource, + ErrorMetric a_errormetric); + + // return true if final iteration was performed + inline void PerformEncodingIteration(float a_fEffort) + { + m_pencoding->PerformIteration(a_fEffort); + } + + inline void SetEncodingBitsFromEncoding(void) + { + m_pencoding->SetEncodingBits(); + } + + inline unsigned int GetSourceH(void) + { + return m_uiSourceH; + } + + inline unsigned int GetSourceV(void) + { + return m_uiSourceV; + } + + inline float GetError(void) + { + return m_pencoding->GetError(); + } + + static const unsigned int s_auiPixelOrderHScan[PIXELS]; + + inline ColorFloatRGBA * GetDecodedColors(void) + { + return m_pencoding->GetDecodedColors(); + } + + inline float * GetDecodedAlphas(void) + { + return m_pencoding->GetDecodedAlphas(); + } + + inline Block4x4Encoding::Mode GetEncodingMode(void) + { + return m_pencoding->GetMode(); + } + + inline bool GetFlip(void) + { + return m_pencoding->GetFlip(); + } + + inline bool IsDifferential(void) + { + return m_pencoding->IsDifferential(); + } + + inline ColorFloatRGBA * GetSource() + { + return m_afrgbaSource; + } + + inline ErrorMetric GetErrorMetric() + { + return m_errormetric; + } + + const char * GetEncodingModeName(void); + + inline Block4x4Encoding * GetEncoding(void) + { + return m_pencoding; + } + + inline SourceAlphaMix GetSourceAlphaMix(void) + { + return m_sourcealphamix; + } + + inline Image * GetImageSource(void) + { + return m_pimageSource; + } + + inline bool HasBorderPixels(void) + { + return m_boolBorderPixels; + } + + inline bool HasPunchThroughPixels(void) + { + return m_boolPunchThroughPixels; + } + + private: + + void SetSourcePixels(void); + + Image *m_pimageSource; + unsigned int m_uiSourceH; + unsigned int m_uiSourceV; + ErrorMetric m_errormetric; + ColorFloatRGBA m_afrgbaSource[PIXELS]; // vertical scan + + SourceAlphaMix m_sourcealphamix; + bool m_boolBorderPixels; // marked as rgba(NAN, NAN, NAN, NAN) + bool m_boolPunchThroughPixels; // RGB8A1 or SRGB8A1 with any pixels with alpha < 0.5 + + Block4x4Encoding *m_pencoding; + + }; + +} // namespace Etc diff --git a/thirdparty/etc2comp/EtcBlock4x4Encoding.cpp b/thirdparty/etc2comp/EtcBlock4x4Encoding.cpp new file mode 100644 index 0000000000..7a9e68c4cf --- /dev/null +++ b/thirdparty/etc2comp/EtcBlock4x4Encoding.cpp @@ -0,0 +1,261 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* +EtcBlock4x4Encoding.cpp + +Block4x4Encoding is the abstract base class for the different encoders. Each encoder targets a +particular file format (e.g. ETC1, RGB8, RGBA8, R11) + +*/ + +#include "EtcConfig.h" +#include "EtcBlock4x4Encoding.h" + +#include "EtcBlock4x4EncodingBits.h" +#include "EtcBlock4x4.h" + +#include <stdio.h> +#include <string.h> +#include <assert.h> + +namespace Etc +{ + // ---------------------------------------------------------------------------------------------------- + // + const float Block4x4Encoding::LUMA_WEIGHT = 3.0f; + const float Block4x4Encoding::CHROMA_BLUE_WEIGHT = 0.5f; + + // ---------------------------------------------------------------------------------------------------- + // + Block4x4Encoding::Block4x4Encoding(void) + { + + m_pblockParent = nullptr; + + m_pafrgbaSource = nullptr; + + m_boolBorderPixels = false; + + m_fError = -1.0f; + + m_mode = MODE_UNKNOWN; + + m_uiEncodingIterations = 0; + m_boolDone = false; + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + m_afrgbaDecodedColors[uiPixel] = ColorFloatRGBA(-1.0f, -1.0f, -1.0f, -1.0f); + m_afDecodedAlphas[uiPixel] = -1.0f; + } + + } + + // ---------------------------------------------------------------------------------------------------- + // initialize the generic encoding for a 4x4 block + // a_pblockParent points to the block associated with this encoding + // a_errormetric is used to choose the best encoding + // init the decoded pixels to -1 to mark them as undefined + // init the error to -1 to mark it as undefined + // + void Block4x4Encoding::Init(Block4x4 *a_pblockParent, + ColorFloatRGBA *a_pafrgbaSource, + ErrorMetric a_errormetric) + { + + m_pblockParent = a_pblockParent; + + m_pafrgbaSource = a_pafrgbaSource; + + m_boolBorderPixels = m_pblockParent->HasBorderPixels(); + + m_fError = -1.0f; + + m_uiEncodingIterations = 0; + + m_errormetric = a_errormetric; + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + m_afrgbaDecodedColors[uiPixel] = ColorFloatRGBA(-1.0f, -1.0f, -1.0f, -1.0f); + m_afDecodedAlphas[uiPixel] = -1.0f; + } + + } + + // ---------------------------------------------------------------------------------------------------- + // calculate the error for the block by summing the pixel errors + // + void Block4x4Encoding::CalcBlockError(void) + { + m_fError = 0.0f; + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + m_fError += CalcPixelError(m_afrgbaDecodedColors[uiPixel], m_afDecodedAlphas[uiPixel], + m_pafrgbaSource[uiPixel]); + } + + } + + // ---------------------------------------------------------------------------------------------------- + // calculate the error between the source pixel and the decoded pixel + // the error amount is base on the error metric + // + float Block4x4Encoding::CalcPixelError(ColorFloatRGBA a_frgbaDecodedColor, float a_fDecodedAlpha, + ColorFloatRGBA a_frgbaSourcePixel) + { + + // if a border pixel + if (isnan(a_frgbaSourcePixel.fA)) + { + return 0.0f; + } + + if (m_errormetric == ErrorMetric::RGBA) + { + assert(a_fDecodedAlpha >= 0.0f); + + float fDRed = (a_fDecodedAlpha * a_frgbaDecodedColor.fR) - + (a_frgbaSourcePixel.fA * a_frgbaSourcePixel.fR); + float fDGreen = (a_fDecodedAlpha * a_frgbaDecodedColor.fG) - + (a_frgbaSourcePixel.fA * a_frgbaSourcePixel.fG); + float fDBlue = (a_fDecodedAlpha * a_frgbaDecodedColor.fB) - + (a_frgbaSourcePixel.fA * a_frgbaSourcePixel.fB); + + float fDAlpha = a_fDecodedAlpha - a_frgbaSourcePixel.fA; + + return fDRed*fDRed + fDGreen*fDGreen + fDBlue*fDBlue + fDAlpha*fDAlpha; + } + else if (m_errormetric == ErrorMetric::RGBX) + { + assert(a_fDecodedAlpha >= 0.0f); + + float fDRed = a_frgbaDecodedColor.fR - a_frgbaSourcePixel.fR; + float fDGreen = a_frgbaDecodedColor.fG - a_frgbaSourcePixel.fG; + float fDBlue = a_frgbaDecodedColor.fB - a_frgbaSourcePixel.fB; + float fDAlpha = a_fDecodedAlpha - a_frgbaSourcePixel.fA; + + return fDRed*fDRed + fDGreen*fDGreen + fDBlue*fDBlue + fDAlpha*fDAlpha; + } + else if (m_errormetric == ErrorMetric::REC709) + { + assert(a_fDecodedAlpha >= 0.0f); + + float fLuma1 = a_frgbaSourcePixel.fR*0.2126f + a_frgbaSourcePixel.fG*0.7152f + a_frgbaSourcePixel.fB*0.0722f; + float fChromaR1 = 0.5f * ((a_frgbaSourcePixel.fR - fLuma1) * (1.0f / (1.0f - 0.2126f))); + float fChromaB1 = 0.5f * ((a_frgbaSourcePixel.fB - fLuma1) * (1.0f / (1.0f - 0.0722f))); + + float fLuma2 = a_frgbaDecodedColor.fR*0.2126f + + a_frgbaDecodedColor.fG*0.7152f + + a_frgbaDecodedColor.fB*0.0722f; + float fChromaR2 = 0.5f * ((a_frgbaDecodedColor.fR - fLuma2) * (1.0f / (1.0f - 0.2126f))); + float fChromaB2 = 0.5f * ((a_frgbaDecodedColor.fB - fLuma2) * (1.0f / (1.0f - 0.0722f))); + + float fDeltaL = a_frgbaSourcePixel.fA * fLuma1 - a_fDecodedAlpha * fLuma2; + float fDeltaCr = a_frgbaSourcePixel.fA * fChromaR1 - a_fDecodedAlpha * fChromaR2; + float fDeltaCb = a_frgbaSourcePixel.fA * fChromaB1 - a_fDecodedAlpha * fChromaB2; + + float fDAlpha = a_fDecodedAlpha - a_frgbaSourcePixel.fA; + + // Favor Luma accuracy over Chroma, and Red over Blue + return LUMA_WEIGHT*fDeltaL*fDeltaL + + fDeltaCr*fDeltaCr + + CHROMA_BLUE_WEIGHT*fDeltaCb*fDeltaCb + + fDAlpha*fDAlpha; + #if 0 + float fDRed = a_frgbaDecodedPixel.fR - a_frgbaSourcePixel.fR; + float fDGreen = a_frgbaDecodedPixel.fG - a_frgbaSourcePixel.fG; + float fDBlue = a_frgbaDecodedPixel.fB - a_frgbaSourcePixel.fB; + return 2.0f * 3.0f * fDeltaL * fDeltaL + fDRed*fDRed + fDGreen*fDGreen + fDBlue*fDBlue; +#endif + } + else if (m_errormetric == ErrorMetric::NORMALXYZ) + { + float fDecodedX = 2.0f * a_frgbaDecodedColor.fR - 1.0f; + float fDecodedY = 2.0f * a_frgbaDecodedColor.fG - 1.0f; + float fDecodedZ = 2.0f * a_frgbaDecodedColor.fB - 1.0f; + + float fDecodedLength = sqrtf(fDecodedX*fDecodedX + fDecodedY*fDecodedY + fDecodedZ*fDecodedZ); + + if (fDecodedLength < 0.5f) + { + return 1.0f; + } + else if (fDecodedLength == 0.0f) + { + fDecodedX = 1.0f; + fDecodedY = 0.0f; + fDecodedZ = 0.0f; + } + else + { + fDecodedX /= fDecodedLength; + fDecodedY /= fDecodedLength; + fDecodedZ /= fDecodedLength; + } + + float fSourceX = 2.0f * a_frgbaSourcePixel.fR - 1.0f; + float fSourceY = 2.0f * a_frgbaSourcePixel.fG - 1.0f; + float fSourceZ = 2.0f * a_frgbaSourcePixel.fB - 1.0f; + + float fSourceLength = sqrtf(fSourceX*fSourceX + fSourceY*fSourceY + fSourceZ*fSourceZ); + + if (fSourceLength == 0.0f) + { + fSourceX = 1.0f; + fSourceY = 0.0f; + fSourceZ = 0.0f; + } + else + { + fSourceX /= fSourceLength; + fSourceY /= fSourceLength; + fSourceZ /= fSourceLength; + } + + float fDotProduct = fSourceX*fDecodedX + fSourceY*fDecodedY + fSourceZ*fDecodedZ; + float fNormalizedDotProduct = 1.0f - 0.5f * (fDotProduct + 1.0f); + float fDotProductError = fNormalizedDotProduct * fNormalizedDotProduct; + + float fLength2 = fDecodedX*fDecodedX + fDecodedY*fDecodedY + fDecodedZ*fDecodedZ; + float fLength2Error = fabsf(1.0f - fLength2); + + float fDeltaW = a_frgbaDecodedColor.fA - a_frgbaSourcePixel.fA; + float fErrorW = fDeltaW * fDeltaW; + + return fDotProductError + fLength2Error + fErrorW; + } + else // ErrorMetric::NUMERIC + { + assert(a_fDecodedAlpha >= 0.0f); + + float fDX = a_frgbaDecodedColor.fR - a_frgbaSourcePixel.fR; + float fDY = a_frgbaDecodedColor.fG - a_frgbaSourcePixel.fG; + float fDZ = a_frgbaDecodedColor.fB - a_frgbaSourcePixel.fB; + float fDW = a_frgbaDecodedColor.fA - a_frgbaSourcePixel.fA; + + return fDX*fDX + fDY*fDY + fDZ*fDZ + fDW*fDW; + } + + } + + // ---------------------------------------------------------------------------------------------------- + // + +} // namespace Etc + diff --git a/thirdparty/etc2comp/EtcBlock4x4Encoding.h b/thirdparty/etc2comp/EtcBlock4x4Encoding.h new file mode 100644 index 0000000000..c14c3b8616 --- /dev/null +++ b/thirdparty/etc2comp/EtcBlock4x4Encoding.h @@ -0,0 +1,148 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "EtcColorFloatRGBA.h" + +#include "EtcErrorMetric.h" + +#include <assert.h> +#include <float.h> + +namespace Etc +{ + class Block4x4; + + // abstract base class for specific encodings + class Block4x4Encoding + { + public: + + static const unsigned int ROWS = 4; + static const unsigned int COLUMNS = 4; + static const unsigned int PIXELS = ROWS * COLUMNS; + static const float LUMA_WEIGHT; + static const float CHROMA_BLUE_WEIGHT; + + typedef enum + { + MODE_UNKNOWN, + // + MODE_ETC1, + MODE_T, + MODE_H, + MODE_PLANAR, + MODE_R11, + MODE_RG11, + // + MODES + } Mode; + + Block4x4Encoding(void); + //virtual ~Block4x4Encoding(void) =0; + virtual ~Block4x4Encoding(void) {} + virtual void InitFromSource(Block4x4 *a_pblockParent, + ColorFloatRGBA *a_pafrgbaSource, + + unsigned char *a_paucEncodingBits, ErrorMetric a_errormetric) = 0; + + virtual void InitFromEncodingBits(Block4x4 *a_pblockParent, + unsigned char *a_paucEncodingBits, + ColorFloatRGBA *a_pafrgbaSource, + + ErrorMetric a_errormetric) = 0; + + // perform an iteration of the encoding + // the first iteration must generate a complete, valid (if poor) encoding + virtual void PerformIteration(float a_fEffort) = 0; + + void CalcBlockError(void); + + inline float GetError(void) + { + assert(m_fError >= 0.0f); + + return m_fError; + } + + inline ColorFloatRGBA * GetDecodedColors(void) + { + return m_afrgbaDecodedColors; + } + + inline float * GetDecodedAlphas(void) + { + return m_afDecodedAlphas; + } + + virtual void SetEncodingBits(void) = 0; + + virtual bool GetFlip(void) = 0; + + virtual bool IsDifferential(void) = 0; + + virtual bool HasSeverelyBentDifferentialColors(void) const = 0; + + inline Mode GetMode(void) + { + return m_mode; + } + + inline bool IsDone(void) + { + return m_boolDone; + } + + inline void SetDoneIfPerfect() + { + if (GetError() == 0.0f) + { + m_boolDone = true; + } + } + + float CalcPixelError(ColorFloatRGBA a_frgbaDecodedColor, float a_fDecodedAlpha, + ColorFloatRGBA a_frgbaSourcePixel); + + protected: + + void Init(Block4x4 *a_pblockParent, + ColorFloatRGBA *a_pafrgbaSource, + + ErrorMetric a_errormetric); + + Block4x4 *m_pblockParent; + ColorFloatRGBA *m_pafrgbaSource; + + bool m_boolBorderPixels; // if block has any border pixels + + ColorFloatRGBA m_afrgbaDecodedColors[PIXELS]; // decoded RGB components, ignore Alpha + float m_afDecodedAlphas[PIXELS]; // decoded alpha component + float m_fError; // error for RGBA relative to m_pafrgbaSource + + // intermediate encoding + Mode m_mode; + + unsigned int m_uiEncodingIterations; + bool m_boolDone; // all iterations have been done + ErrorMetric m_errormetric; + + private: + + }; + +} // namespace Etc diff --git a/thirdparty/etc2comp/EtcBlock4x4EncodingBits.h b/thirdparty/etc2comp/EtcBlock4x4EncodingBits.h new file mode 100644 index 0000000000..4065700379 --- /dev/null +++ b/thirdparty/etc2comp/EtcBlock4x4EncodingBits.h @@ -0,0 +1,315 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include <assert.h> + +namespace Etc +{ + + // ################################################################################ + // Block4x4EncodingBits + // Base class for Block4x4EncodingBits_XXXX + // ################################################################################ + + class Block4x4EncodingBits + { + public: + + enum class Format + { + UNKNOWN, + // + RGB8, + RGBA8, + R11, + RG11, + RGB8A1, + // + FORMATS + }; + + static unsigned int GetBytesPerBlock(Format a_format) + { + switch (a_format) + { + case Format::RGB8: + case Format::R11: + case Format::RGB8A1: + return 8; + break; + + case Format::RGBA8: + case Format::RG11: + return 16; + break; + + default: + return 0; + break; + } + + } + + }; + + // ################################################################################ + // Block4x4EncodingBits_RGB8 + // Encoding bits for the RGB portion of ETC1, RGB8, RGB8A1 and RGBA8 + // ################################################################################ + + class Block4x4EncodingBits_RGB8 + { + public: + + static const unsigned int BYTES_PER_BLOCK = 8; + + inline Block4x4EncodingBits_RGB8(void) + { + assert(sizeof(Block4x4EncodingBits_RGB8) == BYTES_PER_BLOCK); + + for (unsigned int uiByte = 0; uiByte < BYTES_PER_BLOCK; uiByte++) + { + auc[uiByte] = 0; + } + + } + + typedef struct + { + unsigned red2 : 4; + unsigned red1 : 4; + // + unsigned green2 : 4; + unsigned green1 : 4; + // + unsigned blue2 : 4; + unsigned blue1 : 4; + // + unsigned flip : 1; + unsigned diff : 1; + unsigned cw2 : 3; + unsigned cw1 : 3; + // + unsigned int selectors; + } Individual; + + typedef struct + { + signed dred2 : 3; + unsigned red1 : 5; + // + signed dgreen2 : 3; + unsigned green1 : 5; + // + signed dblue2 : 3; + unsigned blue1 : 5; + // + unsigned flip : 1; + unsigned diff : 1; + unsigned cw2 : 3; + unsigned cw1 : 3; + // + unsigned int selectors; + } Differential; + + typedef struct + { + unsigned red1b : 2; + unsigned detect2 : 1; + unsigned red1a : 2; + unsigned detect1 : 3; + // + unsigned blue1 : 4; + unsigned green1 : 4; + // + unsigned green2 : 4; + unsigned red2 : 4; + // + unsigned db : 1; + unsigned diff : 1; + unsigned da : 2; + unsigned blue2 : 4; + // + unsigned int selectors; + } T; + + typedef struct + { + unsigned green1a : 3; + unsigned red1 : 4; + unsigned detect1 : 1; + // + unsigned blue1b : 2; + unsigned detect3 : 1; + unsigned blue1a : 1; + unsigned green1b : 1; + unsigned detect2 : 3; + // + unsigned green2a : 3; + unsigned red2 : 4; + unsigned blue1c : 1; + // + unsigned db : 1; + unsigned diff : 1; + unsigned da : 1; + unsigned blue2 : 4; + unsigned green2b : 1; + // + unsigned int selectors; + } H; + + typedef struct + { + unsigned originGreen1 : 1; + unsigned originRed : 6; + unsigned detect1 : 1; + // + unsigned originBlue1 : 1; + unsigned originGreen2 : 6; + unsigned detect2 : 1; + // + unsigned originBlue3 : 2; + unsigned detect4 : 1; + unsigned originBlue2 : 2; + unsigned detect3 : 3; + // + unsigned horizRed2 : 1; + unsigned diff : 1; + unsigned horizRed1 : 5; + unsigned originBlue4 : 1; + // + unsigned horizBlue1: 1; + unsigned horizGreen : 7; + // + unsigned vertRed1 : 3; + unsigned horizBlue2 : 5; + // + unsigned vertGreen1 : 5; + unsigned vertRed2 : 3; + // + unsigned vertBlue : 6; + unsigned vertGreen2 : 2; + } Planar; + + union + { + unsigned char auc[BYTES_PER_BLOCK]; + unsigned long int ul; + Individual individual; + Differential differential; + T t; + H h; + Planar planar; + }; + + }; + + // ################################################################################ + // Block4x4EncodingBits_A8 + // Encoding bits for the A portion of RGBA8 + // ################################################################################ + + class Block4x4EncodingBits_A8 + { + public: + + static const unsigned int BYTES_PER_BLOCK = 8; + static const unsigned int SELECTOR_BYTES = 6; + + typedef struct + { + unsigned base : 8; + unsigned table : 4; + unsigned multiplier : 4; + unsigned selectors0 : 8; + unsigned selectors1 : 8; + unsigned selectors2 : 8; + unsigned selectors3 : 8; + unsigned selectors4 : 8; + unsigned selectors5 : 8; + } Data; + + Data data; + + }; + + // ################################################################################ + // Block4x4EncodingBits_R11 + // Encoding bits for the R portion of R11 + // ################################################################################ + + class Block4x4EncodingBits_R11 + { + public: + + static const unsigned int BYTES_PER_BLOCK = 8; + static const unsigned int SELECTOR_BYTES = 6; + + typedef struct + { + unsigned base : 8; + unsigned table : 4; + unsigned multiplier : 4; + unsigned selectors0 : 8; + unsigned selectors1 : 8; + unsigned selectors2 : 8; + unsigned selectors3 : 8; + unsigned selectors4 : 8; + unsigned selectors5 : 8; + } Data; + + Data data; + + }; + + class Block4x4EncodingBits_RG11 + { + public: + + static const unsigned int BYTES_PER_BLOCK = 16; + static const unsigned int SELECTOR_BYTES = 12; + + typedef struct + { + //Red portion + unsigned baseR : 8; + unsigned tableIndexR : 4; + unsigned multiplierR : 4; + unsigned selectorsR0 : 8; + unsigned selectorsR1 : 8; + unsigned selectorsR2 : 8; + unsigned selectorsR3 : 8; + unsigned selectorsR4 : 8; + unsigned selectorsR5 : 8; + //Green portion + unsigned baseG : 8; + unsigned tableIndexG : 4; + unsigned multiplierG : 4; + unsigned selectorsG0 : 8; + unsigned selectorsG1 : 8; + unsigned selectorsG2 : 8; + unsigned selectorsG3 : 8; + unsigned selectorsG4 : 8; + unsigned selectorsG5 : 8; + } Data; + + Data data; + + }; + +} diff --git a/thirdparty/etc2comp/EtcBlock4x4Encoding_ETC1.cpp b/thirdparty/etc2comp/EtcBlock4x4Encoding_ETC1.cpp new file mode 100644 index 0000000000..a27f74c0d5 --- /dev/null +++ b/thirdparty/etc2comp/EtcBlock4x4Encoding_ETC1.cpp @@ -0,0 +1,1281 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* +EtcBlock4x4Encoding_ETC1.cpp + +Block4x4Encoding_ETC1 is the encoder to use when targetting file format ETC1. This encoder is also +used for the ETC1 subset of file format RGB8, RGBA8 and RGB8A1 + +*/ + +#include "EtcConfig.h" +#include "EtcBlock4x4Encoding_ETC1.h" + +#include "EtcBlock4x4.h" +#include "EtcBlock4x4EncodingBits.h" +#include "EtcDifferentialTrys.h" + +#include <stdio.h> +#include <string.h> +#include <assert.h> +#include <float.h> +#include <limits> + +namespace Etc +{ + + // pixel processing order if the flip bit = 0 (horizontal split) + const unsigned int Block4x4Encoding_ETC1::s_auiPixelOrderFlip0[PIXELS] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }; + + // pixel processing order if the flip bit = 1 (vertical split) + const unsigned int Block4x4Encoding_ETC1::s_auiPixelOrderFlip1[PIXELS] = { 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15 }; + + // pixel processing order for horizontal scan (ETC normally does a vertical scan) + const unsigned int Block4x4Encoding_ETC1::s_auiPixelOrderHScan[PIXELS] = { 0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15 }; + + // pixel indices for different block halves + const unsigned int Block4x4Encoding_ETC1::s_auiLeftPixelMapping[8] = { 0, 1, 2, 3, 4, 5, 6, 7 }; + const unsigned int Block4x4Encoding_ETC1::s_auiRightPixelMapping[8] = { 8, 9, 10, 11, 12, 13, 14, 15 }; + const unsigned int Block4x4Encoding_ETC1::s_auiTopPixelMapping[8] = { 0, 1, 4, 5, 8, 9, 12, 13 }; + const unsigned int Block4x4Encoding_ETC1::s_auiBottomPixelMapping[8] = { 2, 3, 6, 7, 10, 11, 14, 15 }; + + // CW ranges that the ETC1 decoders use + // CW is basically a contrast for the different selector bits, since these values are offsets to the base color + // the first axis in the array is indexed by the CW in the encoding bits + // the second axis in the array is indexed by the selector bits + float Block4x4Encoding_ETC1::s_aafCwTable[CW_RANGES][SELECTORS] = + { + { 2.0f / 255.0f, 8.0f / 255.0f, -2.0f / 255.0f, -8.0f / 255.0f }, + { 5.0f / 255.0f, 17.0f / 255.0f, -5.0f / 255.0f, -17.0f / 255.0f }, + { 9.0f / 255.0f, 29.0f / 255.0f, -9.0f / 255.0f, -29.0f / 255.0f }, + { 13.0f / 255.0f, 42.0f / 255.0f, -13.0f / 255.0f, -42.0f / 255.0f }, + { 18.0f / 255.0f, 60.0f / 255.0f, -18.0f / 255.0f, -60.0f / 255.0f }, + { 24.0f / 255.0f, 80.0f / 255.0f, -24.0f / 255.0f, -80.0f / 255.0f }, + { 33.0f / 255.0f, 106.0f / 255.0f, -33.0f / 255.0f, -106.0f / 255.0f }, + { 47.0f / 255.0f, 183.0f / 255.0f, -47.0f / 255.0f, -183.0f / 255.0f } + }; + + // ---------------------------------------------------------------------------------------------------- + // + Block4x4Encoding_ETC1::Block4x4Encoding_ETC1(void) + { + m_mode = MODE_ETC1; + m_boolDiff = false; + m_boolFlip = false; + m_frgbaColor1 = ColorFloatRGBA(); + m_frgbaColor2 = ColorFloatRGBA(); + m_uiCW1 = 0; + m_uiCW2 = 0; + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + m_auiSelectors[uiPixel] = 0; + m_afDecodedAlphas[uiPixel] = 1.0f; + } + + m_boolMostLikelyFlip = false; + + m_fError = -1.0f; + + m_fError1 = -1.0f; + m_fError2 = -1.0f; + m_boolSeverelyBentDifferentialColors = false; + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + m_afDecodedAlphas[uiPixel] = 1.0f; + } + + } + + Block4x4Encoding_ETC1::~Block4x4Encoding_ETC1(void) {} + + // ---------------------------------------------------------------------------------------------------- + // initialization prior to encoding + // a_pblockParent points to the block associated with this encoding + // a_errormetric is used to choose the best encoding + // a_pafrgbaSource points to a 4x4 block subset of the source image + // a_paucEncodingBits points to the final encoding bits + // + void Block4x4Encoding_ETC1::InitFromSource(Block4x4 *a_pblockParent, + ColorFloatRGBA *a_pafrgbaSource, + unsigned char *a_paucEncodingBits, ErrorMetric a_errormetric) + { + + Block4x4Encoding::Init(a_pblockParent, a_pafrgbaSource,a_errormetric); + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + m_afDecodedAlphas[uiPixel] = 1.0f; + } + + m_fError = -1.0f; + + m_pencodingbitsRGB8 = (Block4x4EncodingBits_RGB8 *)(a_paucEncodingBits); + + } + + // ---------------------------------------------------------------------------------------------------- + // initialization from the encoding bits of a previous encoding + // a_pblockParent points to the block associated with this encoding + // a_errormetric is used to choose the best encoding + // a_pafrgbaSource points to a 4x4 block subset of the source image + // a_paucEncodingBits points to the final encoding bits of a previous encoding + // + void Block4x4Encoding_ETC1::InitFromEncodingBits(Block4x4 *a_pblockParent, + unsigned char *a_paucEncodingBits, + ColorFloatRGBA *a_pafrgbaSource, + ErrorMetric a_errormetric) + { + + Block4x4Encoding::Init(a_pblockParent, a_pafrgbaSource,a_errormetric); + m_fError = -1.0f; + + m_pencodingbitsRGB8 = (Block4x4EncodingBits_RGB8 *)a_paucEncodingBits; + + m_mode = MODE_ETC1; + m_boolDiff = m_pencodingbitsRGB8->individual.diff; + m_boolFlip = m_pencodingbitsRGB8->individual.flip; + if (m_boolDiff) + { + int iR2 = (int)(m_pencodingbitsRGB8->differential.red1 + m_pencodingbitsRGB8->differential.dred2); + if (iR2 < 0) + { + iR2 = 0; + } + else if (iR2 > 31) + { + iR2 = 31; + } + + int iG2 = (int)(m_pencodingbitsRGB8->differential.green1 + m_pencodingbitsRGB8->differential.dgreen2); + if (iG2 < 0) + { + iG2 = 0; + } + else if (iG2 > 31) + { + iG2 = 31; + } + + int iB2 = (int)(m_pencodingbitsRGB8->differential.blue1 + m_pencodingbitsRGB8->differential.dblue2); + if (iB2 < 0) + { + iB2 = 0; + } + else if (iB2 > 31) + { + iB2 = 31; + } + + m_frgbaColor1 = ColorFloatRGBA::ConvertFromRGB5(m_pencodingbitsRGB8->differential.red1, m_pencodingbitsRGB8->differential.green1, m_pencodingbitsRGB8->differential.blue1); + m_frgbaColor2 = ColorFloatRGBA::ConvertFromRGB5((unsigned char)iR2, (unsigned char)iG2, (unsigned char)iB2); + + } + else + { + m_frgbaColor1 = ColorFloatRGBA::ConvertFromRGB4(m_pencodingbitsRGB8->individual.red1, m_pencodingbitsRGB8->individual.green1, m_pencodingbitsRGB8->individual.blue1); + m_frgbaColor2 = ColorFloatRGBA::ConvertFromRGB4(m_pencodingbitsRGB8->individual.red2, m_pencodingbitsRGB8->individual.green2, m_pencodingbitsRGB8->individual.blue2); + } + + m_uiCW1 = m_pencodingbitsRGB8->individual.cw1; + m_uiCW2 = m_pencodingbitsRGB8->individual.cw2; + + InitFromEncodingBits_Selectors(); + + Decode(); + + CalcBlockError(); + } + + // ---------------------------------------------------------------------------------------------------- + // init the selectors from a prior encoding + // + void Block4x4Encoding_ETC1::InitFromEncodingBits_Selectors(void) + { + + unsigned char *paucSelectors = (unsigned char *)&m_pencodingbitsRGB8->individual.selectors; + + for (unsigned int iPixel = 0; iPixel < PIXELS; iPixel++) + { + unsigned int uiByteMSB = (unsigned int)(1 - (iPixel / 8)); + unsigned int uiByteLSB = (unsigned int)(3 - (iPixel / 8)); + unsigned int uiShift = (unsigned int)(iPixel & 7); + + unsigned int uiSelectorMSB = (unsigned int)((paucSelectors[uiByteMSB] >> uiShift) & 1); + unsigned int uiSelectorLSB = (unsigned int)((paucSelectors[uiByteLSB] >> uiShift) & 1); + + m_auiSelectors[iPixel] = (uiSelectorMSB << 1) + uiSelectorLSB; + } + + } + + // ---------------------------------------------------------------------------------------------------- + // perform a single encoding iteration + // replace the encoding if a better encoding was found + // subsequent iterations generally take longer for each iteration + // set m_boolDone if encoding is perfect or encoding is finished based on a_fEffort + // + void Block4x4Encoding_ETC1::PerformIteration(float a_fEffort) + { + assert(!m_boolDone); + + switch (m_uiEncodingIterations) + { + case 0: + PerformFirstIteration(); + break; + + case 1: + TryDifferential(m_boolMostLikelyFlip, 1, 0, 0); + break; + + case 2: + TryIndividual(m_boolMostLikelyFlip, 1); + if (a_fEffort <= 49.5f) + { + m_boolDone = true; + } + break; + + case 3: + TryDifferential(!m_boolMostLikelyFlip, 1, 0, 0); + if (a_fEffort <= 59.5f) + { + m_boolDone = true; + } + break; + + case 4: + TryIndividual(!m_boolMostLikelyFlip, 1); + if (a_fEffort <= 69.5f) + { + m_boolDone = true; + } + break; + + case 5: + TryDegenerates1(); + if (a_fEffort <= 79.5f) + { + m_boolDone = true; + } + break; + + case 6: + TryDegenerates2(); + if (a_fEffort <= 89.5f) + { + m_boolDone = true; + } + break; + + case 7: + TryDegenerates3(); + if (a_fEffort <= 99.5f) + { + m_boolDone = true; + } + break; + + case 8: + TryDegenerates4(); + m_boolDone = true; + break; + + default: + assert(0); + break; + } + + m_uiEncodingIterations++; + SetDoneIfPerfect(); + } + + // ---------------------------------------------------------------------------------------------------- + // find best initial encoding to ensure block has a valid encoding + // + void Block4x4Encoding_ETC1::PerformFirstIteration(void) + { + CalculateMostLikelyFlip(); + + m_fError = FLT_MAX; + + TryDifferential(m_boolMostLikelyFlip, 0, 0, 0); + SetDoneIfPerfect(); + if (m_boolDone) + { + return; + } + + TryIndividual(m_boolMostLikelyFlip, 0); + SetDoneIfPerfect(); + if (m_boolDone) + { + return; + } + TryDifferential(!m_boolMostLikelyFlip, 0, 0, 0); + SetDoneIfPerfect(); + if (m_boolDone) + { + return; + } + TryIndividual(!m_boolMostLikelyFlip, 0); + + } + + // ---------------------------------------------------------------------------------------------------- + // algorithm: + // create a source average color for the Left, Right, Top and Bottom halves using the 8 pixels in each half + // note: the "gray line" is the line of equal delta RGB that goes thru the average color + // for each half: + // see how close each of the 8 pixels are to the "gray line" that goes thru the source average color + // create an error value that is the sum of the distances from the gray line + // h_error is the sum of Left and Right errors + // v_error is the sum of Top and Bottom errors + // + void Block4x4Encoding_ETC1::CalculateMostLikelyFlip(void) + { + static const bool DEBUG_PRINT = false; + + CalculateSourceAverages(); + + float fLeftGrayErrorSum = 0.0f; + float fRightGrayErrorSum = 0.0f; + float fTopGrayErrorSum = 0.0f; + float fBottomGrayErrorSum = 0.0f; + + for (unsigned int uiPixel = 0; uiPixel < 8; uiPixel++) + { + ColorFloatRGBA *pfrgbaLeft = &m_pafrgbaSource[uiPixel]; + ColorFloatRGBA *pfrgbaRight = &m_pafrgbaSource[uiPixel + 8]; + ColorFloatRGBA *pfrgbaTop = &m_pafrgbaSource[s_auiTopPixelMapping[uiPixel]]; + ColorFloatRGBA *pfrgbaBottom = &m_pafrgbaSource[s_auiBottomPixelMapping[uiPixel]]; + + float fLeftGrayError = CalcGrayDistance2(*pfrgbaLeft, m_frgbaSourceAverageLeft); + float fRightGrayError = CalcGrayDistance2(*pfrgbaRight, m_frgbaSourceAverageRight); + float fTopGrayError = CalcGrayDistance2(*pfrgbaTop, m_frgbaSourceAverageTop); + float fBottomGrayError = CalcGrayDistance2(*pfrgbaBottom, m_frgbaSourceAverageBottom); + + fLeftGrayErrorSum += fLeftGrayError; + fRightGrayErrorSum += fRightGrayError; + fTopGrayErrorSum += fTopGrayError; + fBottomGrayErrorSum += fBottomGrayError; + } + + if (DEBUG_PRINT) + { + printf("\n%.2f %.2f\n", fLeftGrayErrorSum + fRightGrayErrorSum, fTopGrayErrorSum + fBottomGrayErrorSum); + } + + m_boolMostLikelyFlip = (fTopGrayErrorSum + fBottomGrayErrorSum) < (fLeftGrayErrorSum + fRightGrayErrorSum); + + } + + // ---------------------------------------------------------------------------------------------------- + // calculate source pixel averages for each 2x2 quadrant in a 4x4 block + // these are used to determine the averages for each of the 4 different halves (left, right, top, bottom) + // ignore pixels that have alpha == NAN (these are border pixels outside of the source image) + // weight the averages based on a pixel's alpha + // + void Block4x4Encoding_ETC1::CalculateSourceAverages(void) + { + static const bool DEBUG_PRINT = false; + + bool boolRGBX = m_pblockParent->GetImageSource()->GetErrorMetric() == ErrorMetric::RGBX; + + if (m_pblockParent->GetSourceAlphaMix() == Block4x4::SourceAlphaMix::OPAQUE || boolRGBX) + { + ColorFloatRGBA frgbaSumUL = m_pafrgbaSource[0] + m_pafrgbaSource[1] + m_pafrgbaSource[4] + m_pafrgbaSource[5]; + ColorFloatRGBA frgbaSumLL = m_pafrgbaSource[2] + m_pafrgbaSource[3] + m_pafrgbaSource[6] + m_pafrgbaSource[7]; + ColorFloatRGBA frgbaSumUR = m_pafrgbaSource[8] + m_pafrgbaSource[9] + m_pafrgbaSource[12] + m_pafrgbaSource[13]; + ColorFloatRGBA frgbaSumLR = m_pafrgbaSource[10] + m_pafrgbaSource[11] + m_pafrgbaSource[14] + m_pafrgbaSource[15]; + + m_frgbaSourceAverageLeft = (frgbaSumUL + frgbaSumLL) * 0.125f; + m_frgbaSourceAverageRight = (frgbaSumUR + frgbaSumLR) * 0.125f; + m_frgbaSourceAverageTop = (frgbaSumUL + frgbaSumUR) * 0.125f; + m_frgbaSourceAverageBottom = (frgbaSumLL + frgbaSumLR) * 0.125f; + } + else + { + float afSourceAlpha[PIXELS]; + + // treat alpha NAN as 0.0f + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + afSourceAlpha[uiPixel] = isnan(m_pafrgbaSource[uiPixel].fA) ? + 0.0f : + m_pafrgbaSource[uiPixel].fA; + } + + ColorFloatRGBA afrgbaAlphaWeightedSource[PIXELS]; + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + afrgbaAlphaWeightedSource[uiPixel] = m_pafrgbaSource[uiPixel] * afSourceAlpha[uiPixel]; + } + + ColorFloatRGBA frgbaSumUL = afrgbaAlphaWeightedSource[0] + + afrgbaAlphaWeightedSource[1] + + afrgbaAlphaWeightedSource[4] + + afrgbaAlphaWeightedSource[5]; + + ColorFloatRGBA frgbaSumLL = afrgbaAlphaWeightedSource[2] + + afrgbaAlphaWeightedSource[3] + + afrgbaAlphaWeightedSource[6] + + afrgbaAlphaWeightedSource[7]; + + ColorFloatRGBA frgbaSumUR = afrgbaAlphaWeightedSource[8] + + afrgbaAlphaWeightedSource[9] + + afrgbaAlphaWeightedSource[12] + + afrgbaAlphaWeightedSource[13]; + + ColorFloatRGBA frgbaSumLR = afrgbaAlphaWeightedSource[10] + + afrgbaAlphaWeightedSource[11] + + afrgbaAlphaWeightedSource[14] + + afrgbaAlphaWeightedSource[15]; + + float fWeightSumUL = afSourceAlpha[0] + + afSourceAlpha[1] + + afSourceAlpha[4] + + afSourceAlpha[5]; + + float fWeightSumLL = afSourceAlpha[2] + + afSourceAlpha[3] + + afSourceAlpha[6] + + afSourceAlpha[7]; + + float fWeightSumUR = afSourceAlpha[8] + + afSourceAlpha[9] + + afSourceAlpha[12] + + afSourceAlpha[13]; + + float fWeightSumLR = afSourceAlpha[10] + + afSourceAlpha[11] + + afSourceAlpha[14] + + afSourceAlpha[15]; + + ColorFloatRGBA frgbaSumLeft = frgbaSumUL + frgbaSumLL; + ColorFloatRGBA frgbaSumRight = frgbaSumUR + frgbaSumLR; + ColorFloatRGBA frgbaSumTop = frgbaSumUL + frgbaSumUR; + ColorFloatRGBA frgbaSumBottom = frgbaSumLL + frgbaSumLR; + + float fWeightSumLeft = fWeightSumUL + fWeightSumLL; + float fWeightSumRight = fWeightSumUR + fWeightSumLR; + float fWeightSumTop = fWeightSumUL + fWeightSumUR; + float fWeightSumBottom = fWeightSumLL + fWeightSumLR; + + // check to see if there is at least 1 pixel with non-zero alpha + // completely transparent block should not make it to this code + assert((fWeightSumLeft + fWeightSumRight) > 0.0f); + assert((fWeightSumTop + fWeightSumBottom) > 0.0f); + + if (fWeightSumLeft > 0.0f) + { + m_frgbaSourceAverageLeft = frgbaSumLeft * (1.0f/fWeightSumLeft); + } + if (fWeightSumRight > 0.0f) + { + m_frgbaSourceAverageRight = frgbaSumRight * (1.0f/fWeightSumRight); + } + if (fWeightSumTop > 0.0f) + { + m_frgbaSourceAverageTop = frgbaSumTop * (1.0f/fWeightSumTop); + } + if (fWeightSumBottom > 0.0f) + { + m_frgbaSourceAverageBottom = frgbaSumBottom * (1.0f/fWeightSumBottom); + } + + if (fWeightSumLeft == 0.0f) + { + assert(fWeightSumRight > 0.0f); + m_frgbaSourceAverageLeft = m_frgbaSourceAverageRight; + } + if (fWeightSumRight == 0.0f) + { + assert(fWeightSumLeft > 0.0f); + m_frgbaSourceAverageRight = m_frgbaSourceAverageLeft; + } + if (fWeightSumTop == 0.0f) + { + assert(fWeightSumBottom > 0.0f); + m_frgbaSourceAverageTop = m_frgbaSourceAverageBottom; + } + if (fWeightSumBottom == 0.0f) + { + assert(fWeightSumTop > 0.0f); + m_frgbaSourceAverageBottom = m_frgbaSourceAverageTop; + } + } + + + + if (DEBUG_PRINT) + { + printf("\ntarget: [%.2f,%.2f,%.2f] [%.2f,%.2f,%.2f] [%.2f,%.2f,%.2f] [%.2f,%.2f,%.2f]\n", + m_frgbaSourceAverageLeft.fR, m_frgbaSourceAverageLeft.fG, m_frgbaSourceAverageLeft.fB, + m_frgbaSourceAverageRight.fR, m_frgbaSourceAverageRight.fG, m_frgbaSourceAverageRight.fB, + m_frgbaSourceAverageTop.fR, m_frgbaSourceAverageTop.fG, m_frgbaSourceAverageTop.fB, + m_frgbaSourceAverageBottom.fR, m_frgbaSourceAverageBottom.fG, m_frgbaSourceAverageBottom.fB); + } + + } + + // ---------------------------------------------------------------------------------------------------- + // try an ETC1 differential mode encoding + // use a_boolFlip to set the encoding F bit + // use a_uiRadius to alter basecolor components in the range[-a_uiRadius:a_uiRadius] + // use a_iGrayOffset1 and a_iGrayOffset2 to offset the basecolor to search for degenerate encodings + // replace the encoding if the encoding error is less than previous encoding + // + void Block4x4Encoding_ETC1::TryDifferential(bool a_boolFlip, unsigned int a_uiRadius, + int a_iGrayOffset1, int a_iGrayOffset2) + { + + ColorFloatRGBA frgbaColor1; + ColorFloatRGBA frgbaColor2; + + const unsigned int *pauiPixelMapping1; + const unsigned int *pauiPixelMapping2; + + if (a_boolFlip) + { + frgbaColor1 = m_frgbaSourceAverageTop; + frgbaColor2 = m_frgbaSourceAverageBottom; + + pauiPixelMapping1 = s_auiTopPixelMapping; + pauiPixelMapping2 = s_auiBottomPixelMapping; + } + else + { + frgbaColor1 = m_frgbaSourceAverageLeft; + frgbaColor2 = m_frgbaSourceAverageRight; + + pauiPixelMapping1 = s_auiLeftPixelMapping; + pauiPixelMapping2 = s_auiRightPixelMapping; + } + + DifferentialTrys trys(frgbaColor1, frgbaColor2, pauiPixelMapping1, pauiPixelMapping2, + a_uiRadius, a_iGrayOffset1, a_iGrayOffset2); + + Block4x4Encoding_ETC1 encodingTry = *this; + encodingTry.m_boolFlip = a_boolFlip; + + encodingTry.TryDifferentialHalf(&trys.m_half1); + encodingTry.TryDifferentialHalf(&trys.m_half2); + + // find best halves that are within differential range + DifferentialTrys::Try *ptryBest1 = nullptr; + DifferentialTrys::Try *ptryBest2 = nullptr; + encodingTry.m_fError = FLT_MAX; + + // see if the best of each half are in differential range + int iDRed = trys.m_half2.m_ptryBest->m_iRed - trys.m_half1.m_ptryBest->m_iRed; + int iDGreen = trys.m_half2.m_ptryBest->m_iGreen - trys.m_half1.m_ptryBest->m_iGreen; + int iDBlue = trys.m_half2.m_ptryBest->m_iBlue - trys.m_half1.m_ptryBest->m_iBlue; + if (iDRed >= -4 && iDRed <= 3 && iDGreen >= -4 && iDGreen <= 3 && iDBlue >= -4 && iDBlue <= 3) + { + ptryBest1 = trys.m_half1.m_ptryBest; + ptryBest2 = trys.m_half2.m_ptryBest; + encodingTry.m_fError = trys.m_half1.m_ptryBest->m_fError + trys.m_half2.m_ptryBest->m_fError; + } + else + { + // else, find the next best halves that are in differential range + for (DifferentialTrys::Try *ptry1 = &trys.m_half1.m_atry[0]; + ptry1 < &trys.m_half1.m_atry[trys.m_half1.m_uiTrys]; + ptry1++) + { + for (DifferentialTrys::Try *ptry2 = &trys.m_half2.m_atry[0]; + ptry2 < &trys.m_half2.m_atry[trys.m_half2.m_uiTrys]; + ptry2++) + { + iDRed = ptry2->m_iRed - ptry1->m_iRed; + bool boolValidRedDelta = iDRed <= 3 && iDRed >= -4; + iDGreen = ptry2->m_iGreen - ptry1->m_iGreen; + bool boolValidGreenDelta = iDGreen <= 3 && iDGreen >= -4; + iDBlue = ptry2->m_iBlue - ptry1->m_iBlue; + bool boolValidBlueDelta = iDBlue <= 3 && iDBlue >= -4; + + if (boolValidRedDelta && boolValidGreenDelta && boolValidBlueDelta) + { + float fError = ptry1->m_fError + ptry2->m_fError; + + if (fError < encodingTry.m_fError) + { + encodingTry.m_fError = fError; + + ptryBest1 = ptry1; + ptryBest2 = ptry2; + } + } + + } + } + assert(encodingTry.m_fError < FLT_MAX); + assert(ptryBest1 != nullptr); + assert(ptryBest2 != nullptr); + } + + if (encodingTry.m_fError < m_fError) + { + m_mode = MODE_ETC1; + m_boolDiff = true; + m_boolFlip = encodingTry.m_boolFlip; + m_frgbaColor1 = ColorFloatRGBA::ConvertFromRGB5((unsigned char)ptryBest1->m_iRed, (unsigned char)ptryBest1->m_iGreen, (unsigned char)ptryBest1->m_iBlue); + m_frgbaColor2 = ColorFloatRGBA::ConvertFromRGB5((unsigned char)ptryBest2->m_iRed, (unsigned char)ptryBest2->m_iGreen, (unsigned char)ptryBest2->m_iBlue); + m_uiCW1 = ptryBest1->m_uiCW; + m_uiCW2 = ptryBest2->m_uiCW; + + for (unsigned int uiPixelOrder = 0; uiPixelOrder < PIXELS / 2; uiPixelOrder++) + { + unsigned int uiPixel1 = pauiPixelMapping1[uiPixelOrder]; + unsigned int uiPixel2 = pauiPixelMapping2[uiPixelOrder]; + + unsigned int uiSelector1 = ptryBest1->m_auiSelectors[uiPixelOrder]; + unsigned int uiSelector2 = ptryBest2->m_auiSelectors[uiPixelOrder]; + + m_auiSelectors[uiPixel1] = uiSelector1; + m_auiSelectors[uiPixel2] = ptryBest2->m_auiSelectors[uiPixelOrder]; + + float fDeltaRGB1 = s_aafCwTable[m_uiCW1][uiSelector1]; + float fDeltaRGB2 = s_aafCwTable[m_uiCW2][uiSelector2]; + + m_afrgbaDecodedColors[uiPixel1] = (m_frgbaColor1 + fDeltaRGB1).ClampRGB(); + m_afrgbaDecodedColors[uiPixel2] = (m_frgbaColor2 + fDeltaRGB2).ClampRGB(); + } + + m_fError1 = ptryBest1->m_fError; + m_fError2 = ptryBest2->m_fError; + m_boolSeverelyBentDifferentialColors = trys.m_boolSeverelyBentColors; + m_fError = m_fError1 + m_fError2; + + // sanity check + { + int iRed1 = m_frgbaColor1.IntRed(31.0f); + int iGreen1 = m_frgbaColor1.IntGreen(31.0f); + int iBlue1 = m_frgbaColor1.IntBlue(31.0f); + + int iRed2 = m_frgbaColor2.IntRed(31.0f); + int iGreen2 = m_frgbaColor2.IntGreen(31.0f); + int iBlue2 = m_frgbaColor2.IntBlue(31.0f); + + iDRed = iRed2 - iRed1; + iDGreen = iGreen2 - iGreen1; + iDBlue = iBlue2 - iBlue1; + + assert(iDRed >= -4 && iDRed < 4); + assert(iDGreen >= -4 && iDGreen < 4); + assert(iDBlue >= -4 && iDBlue < 4); + } + } + + } + + // ---------------------------------------------------------------------------------------------------- + // try an ETC1 differential mode encoding for a half of a 4x4 block + // vary the basecolor components using a radius + // + void Block4x4Encoding_ETC1::TryDifferentialHalf(DifferentialTrys::Half *a_phalf) + { + + a_phalf->m_ptryBest = nullptr; + float fBestTryError = FLT_MAX; + + a_phalf->m_uiTrys = 0; + for (int iRed = a_phalf->m_iRed - (int)a_phalf->m_uiRadius; + iRed <= a_phalf->m_iRed + (int)a_phalf->m_uiRadius; + iRed++) + { + assert(iRed >= 0 && iRed <= 31); + + for (int iGreen = a_phalf->m_iGreen - (int)a_phalf->m_uiRadius; + iGreen <= a_phalf->m_iGreen + (int)a_phalf->m_uiRadius; + iGreen++) + { + assert(iGreen >= 0 && iGreen <= 31); + + for (int iBlue = a_phalf->m_iBlue - (int)a_phalf->m_uiRadius; + iBlue <= a_phalf->m_iBlue + (int)a_phalf->m_uiRadius; + iBlue++) + { + assert(iBlue >= 0 && iBlue <= 31); + + DifferentialTrys::Try *ptry = &a_phalf->m_atry[a_phalf->m_uiTrys]; + assert(ptry < &a_phalf->m_atry[DifferentialTrys::Half::MAX_TRYS]); + + ptry->m_iRed = iRed; + ptry->m_iGreen = iGreen; + ptry->m_iBlue = iBlue; + ptry->m_fError = FLT_MAX; + ColorFloatRGBA frgbaColor = ColorFloatRGBA::ConvertFromRGB5((unsigned char)iRed, (unsigned char)iGreen, (unsigned char)iBlue); + + // try each CW + for (unsigned int uiCW = 0; uiCW < CW_RANGES; uiCW++) + { + unsigned int auiPixelSelectors[PIXELS / 2]; + ColorFloatRGBA afrgbaDecodedPixels[PIXELS / 2]; + float afPixelErrors[PIXELS / 2] = { FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, + FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX }; + + // pre-compute decoded pixels for each selector + ColorFloatRGBA afrgbaSelectors[SELECTORS]; + assert(SELECTORS == 4); + afrgbaSelectors[0] = (frgbaColor + s_aafCwTable[uiCW][0]).ClampRGB(); + afrgbaSelectors[1] = (frgbaColor + s_aafCwTable[uiCW][1]).ClampRGB(); + afrgbaSelectors[2] = (frgbaColor + s_aafCwTable[uiCW][2]).ClampRGB(); + afrgbaSelectors[3] = (frgbaColor + s_aafCwTable[uiCW][3]).ClampRGB(); + + for (unsigned int uiPixel = 0; uiPixel < 8; uiPixel++) + { + ColorFloatRGBA *pfrgbaSourcePixel = &m_pafrgbaSource[a_phalf->m_pauiPixelMapping[uiPixel]]; + ColorFloatRGBA frgbaDecodedPixel; + + for (unsigned int uiSelector = 0; uiSelector < SELECTORS; uiSelector++) + { + frgbaDecodedPixel = afrgbaSelectors[uiSelector]; + + float fPixelError; + + fPixelError = CalcPixelError(frgbaDecodedPixel, m_afDecodedAlphas[a_phalf->m_pauiPixelMapping[uiPixel]], + *pfrgbaSourcePixel); + + if (fPixelError < afPixelErrors[uiPixel]) + { + auiPixelSelectors[uiPixel] = uiSelector; + afrgbaDecodedPixels[uiPixel] = frgbaDecodedPixel; + afPixelErrors[uiPixel] = fPixelError; + } + + } + } + + // add up all pixel errors + float fCWError = 0.0f; + for (unsigned int uiPixel = 0; uiPixel < 8; uiPixel++) + { + fCWError += afPixelErrors[uiPixel]; + } + + // if best CW so far + if (fCWError < ptry->m_fError) + { + ptry->m_uiCW = uiCW; + for (unsigned int uiPixel = 0; uiPixel < 8; uiPixel++) + { + ptry->m_auiSelectors[uiPixel] = auiPixelSelectors[uiPixel]; + } + ptry->m_fError = fCWError; + } + + } + + if (ptry->m_fError < fBestTryError) + { + a_phalf->m_ptryBest = ptry; + fBestTryError = ptry->m_fError; + } + + assert(ptry->m_fError < FLT_MAX); + + a_phalf->m_uiTrys++; + } + } + } + + } + + // ---------------------------------------------------------------------------------------------------- + // try an ETC1 individual mode encoding + // use a_boolFlip to set the encoding F bit + // use a_uiRadius to alter basecolor components in the range[-a_uiRadius:a_uiRadius] + // replace the encoding if the encoding error is less than previous encoding + // + void Block4x4Encoding_ETC1::TryIndividual(bool a_boolFlip, unsigned int a_uiRadius) + { + + ColorFloatRGBA frgbaColor1; + ColorFloatRGBA frgbaColor2; + + const unsigned int *pauiPixelMapping1; + const unsigned int *pauiPixelMapping2; + + if (a_boolFlip) + { + frgbaColor1 = m_frgbaSourceAverageTop; + frgbaColor2 = m_frgbaSourceAverageBottom; + + pauiPixelMapping1 = s_auiTopPixelMapping; + pauiPixelMapping2 = s_auiBottomPixelMapping; + } + else + { + frgbaColor1 = m_frgbaSourceAverageLeft; + frgbaColor2 = m_frgbaSourceAverageRight; + + pauiPixelMapping1 = s_auiLeftPixelMapping; + pauiPixelMapping2 = s_auiRightPixelMapping; + } + + IndividualTrys trys(frgbaColor1, frgbaColor2, pauiPixelMapping1, pauiPixelMapping2, a_uiRadius); + + Block4x4Encoding_ETC1 encodingTry = *this; + encodingTry.m_boolFlip = a_boolFlip; + + encodingTry.TryIndividualHalf(&trys.m_half1); + encodingTry.TryIndividualHalf(&trys.m_half2); + + // use the best of each half + IndividualTrys::Try *ptryBest1 = trys.m_half1.m_ptryBest; + IndividualTrys::Try *ptryBest2 = trys.m_half2.m_ptryBest; + encodingTry.m_fError = trys.m_half1.m_ptryBest->m_fError + trys.m_half2.m_ptryBest->m_fError; + + if (encodingTry.m_fError < m_fError) + { + m_mode = MODE_ETC1; + m_boolDiff = false; + m_boolFlip = encodingTry.m_boolFlip; + m_frgbaColor1 = ColorFloatRGBA::ConvertFromRGB4((unsigned char)ptryBest1->m_iRed, (unsigned char)ptryBest1->m_iGreen, (unsigned char)ptryBest1->m_iBlue); + m_frgbaColor2 = ColorFloatRGBA::ConvertFromRGB4((unsigned char)ptryBest2->m_iRed, (unsigned char)ptryBest2->m_iGreen, (unsigned char)ptryBest2->m_iBlue); + m_uiCW1 = ptryBest1->m_uiCW; + m_uiCW2 = ptryBest2->m_uiCW; + + for (unsigned int uiPixelOrder = 0; uiPixelOrder < PIXELS / 2; uiPixelOrder++) + { + unsigned int uiPixel1 = pauiPixelMapping1[uiPixelOrder]; + unsigned int uiPixel2 = pauiPixelMapping2[uiPixelOrder]; + + unsigned int uiSelector1 = ptryBest1->m_auiSelectors[uiPixelOrder]; + unsigned int uiSelector2 = ptryBest2->m_auiSelectors[uiPixelOrder]; + + m_auiSelectors[uiPixel1] = uiSelector1; + m_auiSelectors[uiPixel2] = ptryBest2->m_auiSelectors[uiPixelOrder]; + + float fDeltaRGB1 = s_aafCwTable[m_uiCW1][uiSelector1]; + float fDeltaRGB2 = s_aafCwTable[m_uiCW2][uiSelector2]; + + m_afrgbaDecodedColors[uiPixel1] = (m_frgbaColor1 + fDeltaRGB1).ClampRGB(); + m_afrgbaDecodedColors[uiPixel2] = (m_frgbaColor2 + fDeltaRGB2).ClampRGB(); + } + + m_fError1 = ptryBest1->m_fError; + m_fError2 = ptryBest2->m_fError; + m_fError = m_fError1 + m_fError2; + } + + } + + // ---------------------------------------------------------------------------------------------------- + // try an ETC1 differential mode encoding for a half of a 4x4 block + // vary the basecolor components using a radius + // + void Block4x4Encoding_ETC1::TryIndividualHalf(IndividualTrys::Half *a_phalf) + { + + a_phalf->m_ptryBest = nullptr; + float fBestTryError = FLT_MAX; + + a_phalf->m_uiTrys = 0; + for (int iRed = a_phalf->m_iRed - (int)a_phalf->m_uiRadius; + iRed <= a_phalf->m_iRed + (int)a_phalf->m_uiRadius; + iRed++) + { + assert(iRed >= 0 && iRed <= 15); + + for (int iGreen = a_phalf->m_iGreen - (int)a_phalf->m_uiRadius; + iGreen <= a_phalf->m_iGreen + (int)a_phalf->m_uiRadius; + iGreen++) + { + assert(iGreen >= 0 && iGreen <= 15); + + for (int iBlue = a_phalf->m_iBlue - (int)a_phalf->m_uiRadius; + iBlue <= a_phalf->m_iBlue + (int)a_phalf->m_uiRadius; + iBlue++) + { + assert(iBlue >= 0 && iBlue <= 15); + + IndividualTrys::Try *ptry = &a_phalf->m_atry[a_phalf->m_uiTrys]; + assert(ptry < &a_phalf->m_atry[IndividualTrys::Half::MAX_TRYS]); + + ptry->m_iRed = iRed; + ptry->m_iGreen = iGreen; + ptry->m_iBlue = iBlue; + ptry->m_fError = FLT_MAX; + ColorFloatRGBA frgbaColor = ColorFloatRGBA::ConvertFromRGB4((unsigned char)iRed, (unsigned char)iGreen, (unsigned char)iBlue); + + // try each CW + for (unsigned int uiCW = 0; uiCW < CW_RANGES; uiCW++) + { + unsigned int auiPixelSelectors[PIXELS / 2]; + ColorFloatRGBA afrgbaDecodedPixels[PIXELS / 2]; + float afPixelErrors[PIXELS / 2] = { FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, + FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX }; + + // pre-compute decoded pixels for each selector + ColorFloatRGBA afrgbaSelectors[SELECTORS]; + assert(SELECTORS == 4); + afrgbaSelectors[0] = (frgbaColor + s_aafCwTable[uiCW][0]).ClampRGB(); + afrgbaSelectors[1] = (frgbaColor + s_aafCwTable[uiCW][1]).ClampRGB(); + afrgbaSelectors[2] = (frgbaColor + s_aafCwTable[uiCW][2]).ClampRGB(); + afrgbaSelectors[3] = (frgbaColor + s_aafCwTable[uiCW][3]).ClampRGB(); + + for (unsigned int uiPixel = 0; uiPixel < 8; uiPixel++) + { + ColorFloatRGBA *pfrgbaSourcePixel = &m_pafrgbaSource[a_phalf->m_pauiPixelMapping[uiPixel]]; + ColorFloatRGBA frgbaDecodedPixel; + + for (unsigned int uiSelector = 0; uiSelector < SELECTORS; uiSelector++) + { + frgbaDecodedPixel = afrgbaSelectors[uiSelector]; + + float fPixelError; + + fPixelError = CalcPixelError(frgbaDecodedPixel, m_afDecodedAlphas[a_phalf->m_pauiPixelMapping[uiPixel]], + *pfrgbaSourcePixel); + + if (fPixelError < afPixelErrors[uiPixel]) + { + auiPixelSelectors[uiPixel] = uiSelector; + afrgbaDecodedPixels[uiPixel] = frgbaDecodedPixel; + afPixelErrors[uiPixel] = fPixelError; + } + + } + } + + // add up all pixel errors + float fCWError = 0.0f; + for (unsigned int uiPixel = 0; uiPixel < 8; uiPixel++) + { + fCWError += afPixelErrors[uiPixel]; + } + + // if best CW so far + if (fCWError < ptry->m_fError) + { + ptry->m_uiCW = uiCW; + for (unsigned int uiPixel = 0; uiPixel < 8; uiPixel++) + { + ptry->m_auiSelectors[uiPixel] = auiPixelSelectors[uiPixel]; + } + ptry->m_fError = fCWError; + } + + } + + if (ptry->m_fError < fBestTryError) + { + a_phalf->m_ptryBest = ptry; + fBestTryError = ptry->m_fError; + } + + assert(ptry->m_fError < FLT_MAX); + + a_phalf->m_uiTrys++; + } + } + } + + } + + // ---------------------------------------------------------------------------------------------------- + // try version 1 of the degenerate search + // degenerate encodings use basecolor movement and a subset of the selectors to find useful encodings + // each subsequent version of the degenerate search uses more basecolor movement and is less likely to + // be successfull + // + void Block4x4Encoding_ETC1::TryDegenerates1(void) + { + + TryDifferential(m_boolMostLikelyFlip, 1, -2, 0); + TryDifferential(m_boolMostLikelyFlip, 1, 2, 0); + TryDifferential(m_boolMostLikelyFlip, 1, 0, 2); + TryDifferential(m_boolMostLikelyFlip, 1, 0, -2); + + } + + // ---------------------------------------------------------------------------------------------------- + // try version 2 of the degenerate search + // degenerate encodings use basecolor movement and a subset of the selectors to find useful encodings + // each subsequent version of the degenerate search uses more basecolor movement and is less likely to + // be successfull + // + void Block4x4Encoding_ETC1::TryDegenerates2(void) + { + + TryDifferential(!m_boolMostLikelyFlip, 1, -2, 0); + TryDifferential(!m_boolMostLikelyFlip, 1, 2, 0); + TryDifferential(!m_boolMostLikelyFlip, 1, 0, 2); + TryDifferential(!m_boolMostLikelyFlip, 1, 0, -2); + + } + + // ---------------------------------------------------------------------------------------------------- + // try version 3 of the degenerate search + // degenerate encodings use basecolor movement and a subset of the selectors to find useful encodings + // each subsequent version of the degenerate search uses more basecolor movement and is less likely to + // be successfull + // + void Block4x4Encoding_ETC1::TryDegenerates3(void) + { + + TryDifferential(m_boolMostLikelyFlip, 1, -2, -2); + TryDifferential(m_boolMostLikelyFlip, 1, -2, 2); + TryDifferential(m_boolMostLikelyFlip, 1, 2, -2); + TryDifferential(m_boolMostLikelyFlip, 1, 2, 2); + + } + + // ---------------------------------------------------------------------------------------------------- + // try version 4 of the degenerate search + // degenerate encodings use basecolor movement and a subset of the selectors to find useful encodings + // each subsequent version of the degenerate search uses more basecolor movement and is less likely to + // be successfull + // + void Block4x4Encoding_ETC1::TryDegenerates4(void) + { + + TryDifferential(m_boolMostLikelyFlip, 1, -4, 0); + TryDifferential(m_boolMostLikelyFlip, 1, 4, 0); + TryDifferential(m_boolMostLikelyFlip, 1, 0, 4); + TryDifferential(m_boolMostLikelyFlip, 1, 0, -4); + + } + + // ---------------------------------------------------------------------------------------------------- + // find the best selector for each pixel based on a particular basecolor and CW that have been previously set + // calculate the selectors for each half of the block separately + // set the block error as the sum of each half's error + // + void Block4x4Encoding_ETC1::CalculateSelectors() + { + if (m_boolFlip) + { + CalculateHalfOfTheSelectors(0, s_auiTopPixelMapping); + CalculateHalfOfTheSelectors(1, s_auiBottomPixelMapping); + } + else + { + CalculateHalfOfTheSelectors(0, s_auiLeftPixelMapping); + CalculateHalfOfTheSelectors(1, s_auiRightPixelMapping); + } + + m_fError = m_fError1 + m_fError2; + } + + // ---------------------------------------------------------------------------------------------------- + // choose best selectors for half of the block + // calculate the error for half of the block + // + void Block4x4Encoding_ETC1::CalculateHalfOfTheSelectors(unsigned int a_uiHalf, + const unsigned int *pauiPixelMapping) + { + static const bool DEBUG_PRINT = false; + + ColorFloatRGBA *pfrgbaColor = a_uiHalf ? &m_frgbaColor2 : &m_frgbaColor1; + unsigned int *puiCW = a_uiHalf ? &m_uiCW2 : &m_uiCW1; + + float *pfHalfError = a_uiHalf ? &m_fError2 : &m_fError1; + *pfHalfError = FLT_MAX; + + // try each CW + for (unsigned int uiCW = 0; uiCW < CW_RANGES; uiCW++) + { + if (DEBUG_PRINT) + { + printf("\ncw=%u\n", uiCW); + } + + unsigned int auiPixelSelectors[PIXELS / 2]; + ColorFloatRGBA afrgbaDecodedPixels[PIXELS / 2]; + float afPixelErrors[PIXELS / 2] = { FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX }; + + for (unsigned int uiPixel = 0; uiPixel < 8; uiPixel++) + { + if (DEBUG_PRINT) + { + printf("\tsource [%.2f,%.2f,%.2f]\n", m_pafrgbaSource[pauiPixelMapping[uiPixel]].fR, + m_pafrgbaSource[pauiPixelMapping[uiPixel]].fG, m_pafrgbaSource[pauiPixelMapping[uiPixel]].fB); + } + + ColorFloatRGBA *pfrgbaSourcePixel = &m_pafrgbaSource[pauiPixelMapping[uiPixel]]; + ColorFloatRGBA frgbaDecodedPixel; + + for (unsigned int uiSelector = 0; uiSelector < SELECTORS; uiSelector++) + { + float fDeltaRGB = s_aafCwTable[uiCW][uiSelector]; + + frgbaDecodedPixel = (*pfrgbaColor + fDeltaRGB).ClampRGB(); + + float fPixelError; + + fPixelError = CalcPixelError(frgbaDecodedPixel, m_afDecodedAlphas[pauiPixelMapping[uiPixel]], + *pfrgbaSourcePixel); + + if (DEBUG_PRINT) + { + printf("\tpixel %u, index %u [%.2f,%.2f,%.2f], error %.2f", uiPixel, uiSelector, + frgbaDecodedPixel.fR, + frgbaDecodedPixel.fG, + frgbaDecodedPixel.fB, + fPixelError); + } + + if (fPixelError < afPixelErrors[uiPixel]) + { + if (DEBUG_PRINT) + { + printf(" *"); + } + + auiPixelSelectors[uiPixel] = uiSelector; + afrgbaDecodedPixels[uiPixel] = frgbaDecodedPixel; + afPixelErrors[uiPixel] = fPixelError; + } + + if (DEBUG_PRINT) + { + printf("\n"); + } + } + } + + // add up all pixel errors + float fCWError = 0.0f; + for (unsigned int uiPixel = 0; uiPixel < 8; uiPixel++) + { + fCWError += afPixelErrors[uiPixel]; + } + if (DEBUG_PRINT) + { + printf("\terror %.2f\n", fCWError); + } + + // if best CW so far + if (fCWError < *pfHalfError) + { + *pfHalfError = fCWError; + *puiCW = uiCW; + for (unsigned int uiPixel = 0; uiPixel < 8; uiPixel++) + { + m_auiSelectors[pauiPixelMapping[uiPixel]] = auiPixelSelectors[uiPixel]; + m_afrgbaDecodedColors[pauiPixelMapping[uiPixel]] = afrgbaDecodedPixels[uiPixel]; + } + } + } + + } + + // ---------------------------------------------------------------------------------------------------- + // set the encoding bits based on encoding state + // + void Block4x4Encoding_ETC1::SetEncodingBits(void) + { + assert(m_mode == MODE_ETC1); + + if (m_boolDiff) + { + int iRed1 = m_frgbaColor1.IntRed(31.0f); + int iGreen1 = m_frgbaColor1.IntGreen(31.0f); + int iBlue1 = m_frgbaColor1.IntBlue(31.0f); + + int iRed2 = m_frgbaColor2.IntRed(31.0f); + int iGreen2 = m_frgbaColor2.IntGreen(31.0f); + int iBlue2 = m_frgbaColor2.IntBlue(31.0f); + + int iDRed2 = iRed2 - iRed1; + int iDGreen2 = iGreen2 - iGreen1; + int iDBlue2 = iBlue2 - iBlue1; + + assert(iDRed2 >= -4 && iDRed2 < 4); + assert(iDGreen2 >= -4 && iDGreen2 < 4); + assert(iDBlue2 >= -4 && iDBlue2 < 4); + + m_pencodingbitsRGB8->differential.red1 = (unsigned int)iRed1; + m_pencodingbitsRGB8->differential.green1 = (unsigned int)iGreen1; + m_pencodingbitsRGB8->differential.blue1 = (unsigned int)iBlue1; + + m_pencodingbitsRGB8->differential.dred2 = iDRed2; + m_pencodingbitsRGB8->differential.dgreen2 = iDGreen2; + m_pencodingbitsRGB8->differential.dblue2 = iDBlue2; + } + else + { + m_pencodingbitsRGB8->individual.red1 = (unsigned int)m_frgbaColor1.IntRed(15.0f); + m_pencodingbitsRGB8->individual.green1 = (unsigned int)m_frgbaColor1.IntGreen(15.0f); + m_pencodingbitsRGB8->individual.blue1 = (unsigned int)m_frgbaColor1.IntBlue(15.0f); + + m_pencodingbitsRGB8->individual.red2 = (unsigned int)m_frgbaColor2.IntRed(15.0f); + m_pencodingbitsRGB8->individual.green2 = (unsigned int)m_frgbaColor2.IntGreen(15.0f); + m_pencodingbitsRGB8->individual.blue2 = (unsigned int)m_frgbaColor2.IntBlue(15.0f); + } + + m_pencodingbitsRGB8->individual.cw1 = m_uiCW1; + m_pencodingbitsRGB8->individual.cw2 = m_uiCW2; + + SetEncodingBits_Selectors(); + + m_pencodingbitsRGB8->individual.diff = (unsigned int)m_boolDiff; + m_pencodingbitsRGB8->individual.flip = (unsigned int)m_boolFlip; + + } + + // ---------------------------------------------------------------------------------------------------- + // set the selectors in the encoding bits + // + void Block4x4Encoding_ETC1::SetEncodingBits_Selectors(void) + { + + m_pencodingbitsRGB8->individual.selectors = 0; + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + unsigned int uiSelector = m_auiSelectors[uiPixel]; + + // set index msb + m_pencodingbitsRGB8->individual.selectors |= (uiSelector >> 1) << (uiPixel ^ 8); + + // set index lsb + m_pencodingbitsRGB8->individual.selectors |= (uiSelector & 1) << ((16 + uiPixel) ^ 8); + } + + } + + // ---------------------------------------------------------------------------------------------------- + // set the decoded colors and decoded alpha based on the encoding state + // + void Block4x4Encoding_ETC1::Decode(void) + { + + const unsigned int *pauiPixelOrder = m_boolFlip ? s_auiPixelOrderFlip1 : s_auiPixelOrderFlip0; + + for (unsigned int uiPixelOrder = 0; uiPixelOrder < PIXELS; uiPixelOrder++) + { + ColorFloatRGBA *pfrgbaCenter = uiPixelOrder < 8 ? &m_frgbaColor1 : &m_frgbaColor2; + unsigned int uiCW = uiPixelOrder < 8 ? m_uiCW1 : m_uiCW2; + + unsigned int uiPixel = pauiPixelOrder[uiPixelOrder]; + + float fDelta = s_aafCwTable[uiCW][m_auiSelectors[uiPixel]]; + m_afrgbaDecodedColors[uiPixel] = (*pfrgbaCenter + fDelta).ClampRGB(); + m_afDecodedAlphas[uiPixel] = 1.0f; + } + + } + + // ---------------------------------------------------------------------------------------------------- + // + +} // namespace Etc diff --git a/thirdparty/etc2comp/EtcBlock4x4Encoding_ETC1.h b/thirdparty/etc2comp/EtcBlock4x4Encoding_ETC1.h new file mode 100644 index 0000000000..c0dc84d5d5 --- /dev/null +++ b/thirdparty/etc2comp/EtcBlock4x4Encoding_ETC1.h @@ -0,0 +1,186 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "EtcBlock4x4Encoding.h" +#include "EtcBlock4x4EncodingBits.h" +#include "EtcDifferentialTrys.h" +#include "EtcIndividualTrys.h" + +namespace Etc +{ + + // base class for Block4x4Encoding_RGB8 + class Block4x4Encoding_ETC1 : public Block4x4Encoding + { + public: + + Block4x4Encoding_ETC1(void); + virtual ~Block4x4Encoding_ETC1(void); + + virtual void InitFromSource(Block4x4 *a_pblockParent, + ColorFloatRGBA *a_pafrgbaSource, + + unsigned char *a_paucEncodingBits, + ErrorMetric a_errormetric); + + virtual void InitFromEncodingBits(Block4x4 *a_pblockParent, + unsigned char *a_paucEncodingBits, + ColorFloatRGBA *a_pafrgbaSource, + + ErrorMetric a_errormetric); + + virtual void PerformIteration(float a_fEffort); + + inline virtual bool GetFlip(void) + { + return m_boolFlip; + } + + inline virtual bool IsDifferential(void) + { + return m_boolDiff; + } + + virtual void SetEncodingBits(void); + + void Decode(void); + + inline ColorFloatRGBA GetColor1(void) const + { + return m_frgbaColor1; + } + + inline ColorFloatRGBA GetColor2(void) const + { + return m_frgbaColor2; + } + + inline const unsigned int * GetSelectors(void) const + { + return m_auiSelectors; + } + + inline unsigned int GetCW1(void) const + { + return m_uiCW1; + } + + inline unsigned int GetCW2(void) const + { + return m_uiCW2; + } + + inline bool HasSeverelyBentDifferentialColors(void) const + { + return m_boolSeverelyBentDifferentialColors; + } + + protected: + + static const unsigned int s_auiPixelOrderFlip0[PIXELS]; + static const unsigned int s_auiPixelOrderFlip1[PIXELS]; + static const unsigned int s_auiPixelOrderHScan[PIXELS]; + + static const unsigned int s_auiLeftPixelMapping[8]; + static const unsigned int s_auiRightPixelMapping[8]; + static const unsigned int s_auiTopPixelMapping[8]; + static const unsigned int s_auiBottomPixelMapping[8]; + + static const unsigned int SELECTOR_BITS = 2; + static const unsigned int SELECTORS = 1 << SELECTOR_BITS; + + static const unsigned int CW_BITS = 3; + static const unsigned int CW_RANGES = 1 << CW_BITS; + + static float s_aafCwTable[CW_RANGES][SELECTORS]; + static unsigned char s_aucDifferentialCwRange[256]; + + static const int MAX_DIFFERENTIAL = 3; + static const int MIN_DIFFERENTIAL = -4; + + void InitFromEncodingBits_Selectors(void); + + void PerformFirstIteration(void); + void CalculateMostLikelyFlip(void); + + void TryDifferential(bool a_boolFlip, unsigned int a_uiRadius, + int a_iGrayOffset1, int a_iGrayOffset2); + void TryDifferentialHalf(DifferentialTrys::Half *a_phalf); + + void TryIndividual(bool a_boolFlip, unsigned int a_uiRadius); + void TryIndividualHalf(IndividualTrys::Half *a_phalf); + + void TryDegenerates1(void); + void TryDegenerates2(void); + void TryDegenerates3(void); + void TryDegenerates4(void); + + void CalculateSelectors(); + void CalculateHalfOfTheSelectors(unsigned int a_uiHalf, + const unsigned int *pauiPixelMapping); + + // calculate the distance2 of r_frgbaPixel from r_frgbaTarget's gray line + inline float CalcGrayDistance2(ColorFloatRGBA &r_frgbaPixel, + ColorFloatRGBA &r_frgbaTarget) + { + float fDeltaGray = ((r_frgbaPixel.fR - r_frgbaTarget.fR) + + (r_frgbaPixel.fG - r_frgbaTarget.fG) + + (r_frgbaPixel.fB - r_frgbaTarget.fB)) / 3.0f; + + ColorFloatRGBA frgbaPointOnGrayLine = (r_frgbaTarget + fDeltaGray).ClampRGB(); + + float fDR = r_frgbaPixel.fR - frgbaPointOnGrayLine.fR; + float fDG = r_frgbaPixel.fG - frgbaPointOnGrayLine.fG; + float fDB = r_frgbaPixel.fB - frgbaPointOnGrayLine.fB; + + return (fDR*fDR) + (fDG*fDG) + (fDB*fDB); + } + + void SetEncodingBits_Selectors(void); + + // intermediate encoding + bool m_boolDiff; + bool m_boolFlip; + ColorFloatRGBA m_frgbaColor1; + ColorFloatRGBA m_frgbaColor2; + unsigned int m_uiCW1; + unsigned int m_uiCW2; + unsigned int m_auiSelectors[PIXELS]; + + // state shared between iterations + ColorFloatRGBA m_frgbaSourceAverageLeft; + ColorFloatRGBA m_frgbaSourceAverageRight; + ColorFloatRGBA m_frgbaSourceAverageTop; + ColorFloatRGBA m_frgbaSourceAverageBottom; + bool m_boolMostLikelyFlip; + + // stats + float m_fError1; // error for Etc1 half 1 + float m_fError2; // error for Etc1 half 2 + bool m_boolSeverelyBentDifferentialColors; // only valid if m_boolDiff; + + // final encoding + Block4x4EncodingBits_RGB8 *m_pencodingbitsRGB8; // or RGB8 portion of Block4x4EncodingBits_RGB8A8 + + private: + + void CalculateSourceAverages(void); + + }; + +} // namespace Etc diff --git a/thirdparty/etc2comp/EtcBlock4x4Encoding_R11.cpp b/thirdparty/etc2comp/EtcBlock4x4Encoding_R11.cpp new file mode 100644 index 0000000000..4c012fbbf1 --- /dev/null +++ b/thirdparty/etc2comp/EtcBlock4x4Encoding_R11.cpp @@ -0,0 +1,429 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* +EtcBlock4x4Encoding_R11.cpp + +Block4x4Encoding_R11 is the encoder to use when targetting file format R11 and SR11 (signed R11). + +*/ + +#include "EtcConfig.h" +#include "EtcBlock4x4Encoding_R11.h" + +#include "EtcBlock4x4EncodingBits.h" +#include "EtcBlock4x4.h" + +#include <stdio.h> +#include <string.h> +#include <assert.h> +#include <float.h> +#include <limits> + +namespace Etc +{ + + // modifier values to use for R11, SR11, RG11 and SRG11 + float Block4x4Encoding_R11::s_aafModifierTable[MODIFIER_TABLE_ENTRYS][SELECTORS] + { + { -3.0f / 255.0f, -6.0f / 255.0f, -9.0f / 255.0f, -15.0f / 255.0f, 2.0f / 255.0f, 5.0f / 255.0f, 8.0f / 255.0f, 14.0f / 255.0f }, + { -3.0f / 255.0f, -7.0f / 255.0f, -10.0f / 255.0f, -13.0f / 255.0f, 2.0f / 255.0f, 6.0f / 255.0f, 9.0f / 255.0f, 12.0f / 255.0f }, + { -2.0f / 255.0f, -5.0f / 255.0f, -8.0f / 255.0f, -13.0f / 255.0f, 1.0f / 255.0f, 4.0f / 255.0f, 7.0f / 255.0f, 12.0f / 255.0f }, + { -2.0f / 255.0f, -4.0f / 255.0f, -6.0f / 255.0f, -13.0f / 255.0f, 1.0f / 255.0f, 3.0f / 255.0f, 5.0f / 255.0f, 12.0f / 255.0f }, + + { -3.0f / 255.0f, -6.0f / 255.0f, -8.0f / 255.0f, -12.0f / 255.0f, 2.0f / 255.0f, 5.0f / 255.0f, 7.0f / 255.0f, 11.0f / 255.0f }, + { -3.0f / 255.0f, -7.0f / 255.0f, -9.0f / 255.0f, -11.0f / 255.0f, 2.0f / 255.0f, 6.0f / 255.0f, 8.0f / 255.0f, 10.0f / 255.0f }, + { -4.0f / 255.0f, -7.0f / 255.0f, -8.0f / 255.0f, -11.0f / 255.0f, 3.0f / 255.0f, 6.0f / 255.0f, 7.0f / 255.0f, 10.0f / 255.0f }, + { -3.0f / 255.0f, -5.0f / 255.0f, -8.0f / 255.0f, -11.0f / 255.0f, 2.0f / 255.0f, 4.0f / 255.0f, 7.0f / 255.0f, 10.0f / 255.0f }, + + { -2.0f / 255.0f, -6.0f / 255.0f, -8.0f / 255.0f, -10.0f / 255.0f, 1.0f / 255.0f, 5.0f / 255.0f, 7.0f / 255.0f, 9.0f / 255.0f }, + { -2.0f / 255.0f, -5.0f / 255.0f, -8.0f / 255.0f, -10.0f / 255.0f, 1.0f / 255.0f, 4.0f / 255.0f, 7.0f / 255.0f, 9.0f / 255.0f }, + { -2.0f / 255.0f, -4.0f / 255.0f, -8.0f / 255.0f, -10.0f / 255.0f, 1.0f / 255.0f, 3.0f / 255.0f, 7.0f / 255.0f, 9.0f / 255.0f }, + { -2.0f / 255.0f, -5.0f / 255.0f, -7.0f / 255.0f, -10.0f / 255.0f, 1.0f / 255.0f, 4.0f / 255.0f, 6.0f / 255.0f, 9.0f / 255.0f }, + + { -3.0f / 255.0f, -4.0f / 255.0f, -7.0f / 255.0f, -10.0f / 255.0f, 2.0f / 255.0f, 3.0f / 255.0f, 6.0f / 255.0f, 9.0f / 255.0f }, + { -1.0f / 255.0f, -2.0f / 255.0f, -3.0f / 255.0f, -10.0f / 255.0f, 0.0f / 255.0f, 1.0f / 255.0f, 2.0f / 255.0f, 9.0f / 255.0f }, + { -4.0f / 255.0f, -6.0f / 255.0f, -8.0f / 255.0f, -9.0f / 255.0f, 3.0f / 255.0f, 5.0f / 255.0f, 7.0f / 255.0f, 8.0f / 255.0f }, + { -3.0f / 255.0f, -5.0f / 255.0f, -7.0f / 255.0f, -9.0f / 255.0f, 2.0f / 255.0f, 4.0f / 255.0f, 6.0f / 255.0f, 8.0f / 255.0f } + }; + + // ---------------------------------------------------------------------------------------------------- + // + Block4x4Encoding_R11::Block4x4Encoding_R11(void) + { + + m_pencodingbitsR11 = nullptr; + + } + + Block4x4Encoding_R11::~Block4x4Encoding_R11(void) {} + // ---------------------------------------------------------------------------------------------------- + // initialization prior to encoding + // a_pblockParent points to the block associated with this encoding + // a_errormetric is used to choose the best encoding + // a_pafrgbaSource points to a 4x4 block subset of the source image + // a_paucEncodingBits points to the final encoding bits + // + void Block4x4Encoding_R11::InitFromSource(Block4x4 *a_pblockParent, + ColorFloatRGBA *a_pafrgbaSource, + unsigned char *a_paucEncodingBits, ErrorMetric a_errormetric) + { + Block4x4Encoding::Init(a_pblockParent, a_pafrgbaSource,a_errormetric); + + m_pencodingbitsR11 = (Block4x4EncodingBits_R11 *)a_paucEncodingBits; + } + + // ---------------------------------------------------------------------------------------------------- + // initialization from the encoding bits of a previous encoding + // a_pblockParent points to the block associated with this encoding + // a_errormetric is used to choose the best encoding + // a_pafrgbaSource points to a 4x4 block subset of the source image + // a_paucEncodingBits points to the final encoding bits of a previous encoding + // + void Block4x4Encoding_R11::InitFromEncodingBits(Block4x4 *a_pblockParent, + unsigned char *a_paucEncodingBits, + ColorFloatRGBA *a_pafrgbaSource, + ErrorMetric a_errormetric) + { + m_pencodingbitsR11 = (Block4x4EncodingBits_R11 *)a_paucEncodingBits; + + // init RGB portion + Block4x4Encoding_RGB8::InitFromEncodingBits(a_pblockParent, + (unsigned char *)m_pencodingbitsR11, + a_pafrgbaSource, + a_errormetric); + + // init R11 portion + { + m_mode = MODE_R11; + if (a_pblockParent->GetImageSource()->GetFormat() == Image::Format::SIGNED_R11 || a_pblockParent->GetImageSource()->GetFormat() == Image::Format::SIGNED_RG11) + { + m_fRedBase = (float)(signed char)m_pencodingbitsR11->data.base; + } + else + { + m_fRedBase = (float)(unsigned char)m_pencodingbitsR11->data.base; + } + m_fRedMultiplier = (float)m_pencodingbitsR11->data.multiplier; + m_uiRedModifierTableIndex = m_pencodingbitsR11->data.table; + + unsigned long long int ulliSelectorBits = 0; + ulliSelectorBits |= (unsigned long long int)m_pencodingbitsR11->data.selectors0 << 40; + ulliSelectorBits |= (unsigned long long int)m_pencodingbitsR11->data.selectors1 << 32; + ulliSelectorBits |= (unsigned long long int)m_pencodingbitsR11->data.selectors2 << 24; + ulliSelectorBits |= (unsigned long long int)m_pencodingbitsR11->data.selectors3 << 16; + ulliSelectorBits |= (unsigned long long int)m_pencodingbitsR11->data.selectors4 << 8; + ulliSelectorBits |= (unsigned long long int)m_pencodingbitsR11->data.selectors5; + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + unsigned int uiShift = 45 - (3 * uiPixel); + m_auiRedSelectors[uiPixel] = (ulliSelectorBits >> uiShift) & (SELECTORS - 1); + } + + // decode the red channel + // calc red error + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + float fDecodedPixelData = 0.0f; + if (a_pblockParent->GetImageSource()->GetFormat() == Image::Format::R11 || a_pblockParent->GetImageSource()->GetFormat() == Image::Format::RG11) + { + fDecodedPixelData = DecodePixelRed(m_fRedBase, m_fRedMultiplier, + m_uiRedModifierTableIndex, + m_auiRedSelectors[uiPixel]); + } + else if (a_pblockParent->GetImageSource()->GetFormat() == Image::Format::SIGNED_R11 || a_pblockParent->GetImageSource()->GetFormat() == Image::Format::SIGNED_RG11) + { + fDecodedPixelData = DecodePixelRed(m_fRedBase + 128, m_fRedMultiplier, + m_uiRedModifierTableIndex, + m_auiRedSelectors[uiPixel]); + } + else + { + assert(0); + } + m_afrgbaDecodedColors[uiPixel] = ColorFloatRGBA(fDecodedPixelData, 0.0f, 0.0f, 1.0f); + } + CalcBlockError(); + } + + } + + // ---------------------------------------------------------------------------------------------------- + // perform a single encoding iteration + // replace the encoding if a better encoding was found + // subsequent iterations generally take longer for each iteration + // set m_boolDone if encoding is perfect or encoding is finished based on a_fEffort + // + void Block4x4Encoding_R11::PerformIteration(float a_fEffort) + { + assert(!m_boolDone); + m_mode = MODE_R11; + + switch (m_uiEncodingIterations) + { + case 0: + m_fError = FLT_MAX; + m_fRedBlockError = FLT_MAX; // artificially high value + CalculateR11(8, 0.0f, 0.0f); + m_fError = m_fRedBlockError; + break; + + case 1: + CalculateR11(8, 2.0f, 1.0f); + m_fError = m_fRedBlockError; + if (a_fEffort <= 24.5f) + { + m_boolDone = true; + } + break; + + case 2: + CalculateR11(8, 12.0f, 1.0f); + m_fError = m_fRedBlockError; + if (a_fEffort <= 49.5f) + { + m_boolDone = true; + } + break; + + case 3: + CalculateR11(7, 6.0f, 1.0f); + m_fError = m_fRedBlockError; + break; + + case 4: + CalculateR11(6, 3.0f, 1.0f); + m_fError = m_fRedBlockError; + break; + + case 5: + CalculateR11(5, 1.0f, 0.0f); + m_fError = m_fRedBlockError; + m_boolDone = true; + break; + + default: + assert(0); + break; + } + + m_uiEncodingIterations++; + SetDoneIfPerfect(); + } + + // ---------------------------------------------------------------------------------------------------- + // find the best combination of base color, multiplier and selectors + // + // a_uiSelectorsUsed limits the number of selector combinations to try + // a_fBaseRadius limits the range of base colors to try + // a_fMultiplierRadius limits the range of multipliers to try + // + void Block4x4Encoding_R11::CalculateR11(unsigned int a_uiSelectorsUsed, + float a_fBaseRadius, float a_fMultiplierRadius) + { + // maps from virtual (monotonic) selector to ETC selector + static const unsigned int auiVirtualSelectorMap[8] = {3, 2, 1, 0, 4, 5, 6, 7}; + + // find min/max red + float fMinRed = 1.0f; + float fMaxRed = 0.0f; + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + // ignore border pixels + float fAlpha = m_pafrgbaSource[uiPixel].fA; + if (isnan(fAlpha)) + { + continue; + } + + float fRed = m_pafrgbaSource[uiPixel].fR; + + if (fRed < fMinRed) + { + fMinRed = fRed; + } + if (fRed > fMaxRed) + { + fMaxRed = fRed; + } + } + assert(fMinRed <= fMaxRed); + + float fRedRange = (fMaxRed - fMinRed); + + // try each modifier table entry + for (unsigned int uiTableEntry = 0; uiTableEntry < MODIFIER_TABLE_ENTRYS; uiTableEntry++) + { + for (unsigned int uiMinVirtualSelector = 0; + uiMinVirtualSelector <= (8- a_uiSelectorsUsed); + uiMinVirtualSelector++) + { + unsigned int uiMaxVirtualSelector = uiMinVirtualSelector + a_uiSelectorsUsed - 1; + + unsigned int uiMinSelector = auiVirtualSelectorMap[uiMinVirtualSelector]; + unsigned int uiMaxSelector = auiVirtualSelectorMap[uiMaxVirtualSelector]; + + float fTableEntryCenter = -s_aafModifierTable[uiTableEntry][uiMinSelector]; + + float fTableEntryRange = s_aafModifierTable[uiTableEntry][uiMaxSelector] - + s_aafModifierTable[uiTableEntry][uiMinSelector]; + + float fCenterRatio = fTableEntryCenter / fTableEntryRange; + + float fCenter = fMinRed + fCenterRatio*fRedRange; + fCenter = roundf(255.0f * fCenter) / 255.0f; + + float fMinBase = fCenter - (a_fBaseRadius / 255.0f); + if (fMinBase < 0.0f) + { + fMinBase = 0.0f; + } + + float fMaxBase = fCenter + (a_fBaseRadius / 255.0f); + if (fMaxBase > 1.0f) + { + fMaxBase = 1.0f; + } + + for (float fBase = fMinBase; fBase <= fMaxBase; fBase += (0.999999f / 255.0f)) + { + float fRangeMultiplier = roundf(fRedRange / fTableEntryRange); + + float fMinMultiplier = fRangeMultiplier - a_fMultiplierRadius; + if (fMinMultiplier < 1.0f) + { + fMinMultiplier = 0.0f; + } + else if (fMinMultiplier > 15.0f) + { + fMinMultiplier = 15.0f; + } + + float fMaxMultiplier = fRangeMultiplier + a_fMultiplierRadius; + if (fMaxMultiplier < 1.0f) + { + fMaxMultiplier = 1.0f; + } + else if (fMaxMultiplier > 15.0f) + { + fMaxMultiplier = 15.0f; + } + + for (float fMultiplier = fMinMultiplier; fMultiplier <= fMaxMultiplier; fMultiplier += 1.0f) + { + // find best selector for each pixel + unsigned int auiBestSelectors[PIXELS]; + float afBestRedError[PIXELS]; + float afBestPixelRed[PIXELS]; + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + float fBestPixelRedError = FLT_MAX; + + for (unsigned int uiSelector = 0; uiSelector < SELECTORS; uiSelector++) + { + float fPixelRed = DecodePixelRed(fBase * 255.0f, fMultiplier, uiTableEntry, uiSelector); + + ColorFloatRGBA frgba(fPixelRed, m_pafrgbaSource[uiPixel].fG,0.0f,1.0f); + + float fPixelRedError = CalcPixelError(frgba, 1.0f, m_pafrgbaSource[uiPixel]); + + if (fPixelRedError < fBestPixelRedError) + { + fBestPixelRedError = fPixelRedError; + auiBestSelectors[uiPixel] = uiSelector; + afBestRedError[uiPixel] = fBestPixelRedError; + afBestPixelRed[uiPixel] = fPixelRed; + } + } + } + float fBlockError = 0.0f; + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + fBlockError += afBestRedError[uiPixel]; + } + if (fBlockError < m_fRedBlockError) + { + m_fRedBlockError = fBlockError; + + if (m_pblockParent->GetImageSource()->GetFormat() == Image::Format::R11 || m_pblockParent->GetImageSource()->GetFormat() == Image::Format::RG11) + { + m_fRedBase = 255.0f * fBase; + } + else if (m_pblockParent->GetImageSource()->GetFormat() == Image::Format::SIGNED_R11 || m_pblockParent->GetImageSource()->GetFormat() == Image::Format::SIGNED_RG11) + { + m_fRedBase = (fBase * 255) - 128; + } + else + { + assert(0); + } + m_fRedMultiplier = fMultiplier; + m_uiRedModifierTableIndex = uiTableEntry; + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + m_auiRedSelectors[uiPixel] = auiBestSelectors[uiPixel]; + float fBestPixelRed = afBestPixelRed[uiPixel]; + m_afrgbaDecodedColors[uiPixel] = ColorFloatRGBA(fBestPixelRed, 0.0f, 0.0f, 1.0f); + m_afDecodedAlphas[uiPixel] = 1.0f; + } + } + } + } + + } + } + } + + // ---------------------------------------------------------------------------------------------------- + // set the encoding bits based on encoding state + // + void Block4x4Encoding_R11::SetEncodingBits(void) + { + if (m_pblockParent->GetImageSource()->GetFormat() == Image::Format::R11 || m_pblockParent->GetImageSource()->GetFormat() == Image::Format::RG11) + { + m_pencodingbitsR11->data.base = (unsigned char)roundf(m_fRedBase); + } + else if (m_pblockParent->GetImageSource()->GetFormat() == Image::Format::SIGNED_R11 || m_pblockParent->GetImageSource()->GetFormat() == Image::Format::SIGNED_RG11) + { + m_pencodingbitsR11->data.base = (signed char)roundf(m_fRedBase); + } + else + { + assert(0); + } + m_pencodingbitsR11->data.table = m_uiRedModifierTableIndex; + m_pencodingbitsR11->data.multiplier = (unsigned char)roundf(m_fRedMultiplier); + + unsigned long long int ulliSelectorBits = 0; + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + unsigned int uiShift = 45 - (3 * uiPixel); + ulliSelectorBits |= ((unsigned long long int)m_auiRedSelectors[uiPixel]) << uiShift; + } + + m_pencodingbitsR11->data.selectors0 = ulliSelectorBits >> 40; + m_pencodingbitsR11->data.selectors1 = ulliSelectorBits >> 32; + m_pencodingbitsR11->data.selectors2 = ulliSelectorBits >> 24; + m_pencodingbitsR11->data.selectors3 = ulliSelectorBits >> 16; + m_pencodingbitsR11->data.selectors4 = ulliSelectorBits >> 8; + m_pencodingbitsR11->data.selectors5 = ulliSelectorBits; + } + + // ---------------------------------------------------------------------------------------------------- + // +} diff --git a/thirdparty/etc2comp/EtcBlock4x4Encoding_R11.h b/thirdparty/etc2comp/EtcBlock4x4Encoding_R11.h new file mode 100644 index 0000000000..b40c1e0036 --- /dev/null +++ b/thirdparty/etc2comp/EtcBlock4x4Encoding_R11.h @@ -0,0 +1,122 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "EtcBlock4x4Encoding_RGB8.h" + +namespace Etc +{ + class Block4x4EncodingBits_R11; + + // ################################################################################ + // Block4x4Encoding_R11 + // ################################################################################ + + class Block4x4Encoding_R11 : public Block4x4Encoding_RGB8 + { + public: + + Block4x4Encoding_R11(void); + virtual ~Block4x4Encoding_R11(void); + + virtual void InitFromSource(Block4x4 *a_pblockParent, + ColorFloatRGBA *a_pafrgbaSource, + unsigned char *a_paucEncodingBits, ErrorMetric a_errormetric); + + virtual void InitFromEncodingBits(Block4x4 *a_pblockParent, + unsigned char *a_paucEncodingBits, + ColorFloatRGBA *a_pafrgbaSource, + ErrorMetric a_errormetric); + + virtual void PerformIteration(float a_fEffort); + + virtual void SetEncodingBits(void); + + inline float GetRedBase(void) const + { + return m_fRedBase; + } + + inline float GetRedMultiplier(void) const + { + return m_fRedMultiplier; + } + + inline int GetRedTableIndex(void) const + { + return m_uiRedModifierTableIndex; + } + + inline const unsigned int * GetRedSelectors(void) const + { + return m_auiRedSelectors; + } + + protected: + + static const unsigned int MODIFIER_TABLE_ENTRYS = 16; + static const unsigned int SELECTOR_BITS = 3; + static const unsigned int SELECTORS = 1 << SELECTOR_BITS; + + static float s_aafModifierTable[MODIFIER_TABLE_ENTRYS][SELECTORS]; + + void CalculateR11(unsigned int a_uiSelectorsUsed, + float a_fBaseRadius, float a_fMultiplierRadius); + + + + + inline float DecodePixelRed(float a_fBase, float a_fMultiplier, + unsigned int a_uiTableIndex, unsigned int a_uiSelector) + { + float fMultiplier = a_fMultiplier; + if (fMultiplier <= 0.0f) + { + fMultiplier = 1.0f / 8.0f; + } + + float fPixelRed = a_fBase * 8 + 4 + + 8 * fMultiplier*s_aafModifierTable[a_uiTableIndex][a_uiSelector]*255; + fPixelRed /= 2047.0f; + + if (fPixelRed < 0.0f) + { + fPixelRed = 0.0f; + } + else if (fPixelRed > 1.0f) + { + fPixelRed = 1.0f; + } + + return fPixelRed; + } + + Block4x4EncodingBits_R11 *m_pencodingbitsR11; + + float m_fRedBase; + float m_fRedMultiplier; + float m_fRedBlockError; + unsigned int m_uiRedModifierTableIndex; + unsigned int m_auiRedSelectors[PIXELS]; + + + }; + + // ---------------------------------------------------------------------------------------------------- + // + +} // namespace Etc diff --git a/thirdparty/etc2comp/EtcBlock4x4Encoding_RG11.cpp b/thirdparty/etc2comp/EtcBlock4x4Encoding_RG11.cpp new file mode 100644 index 0000000000..417835db51 --- /dev/null +++ b/thirdparty/etc2comp/EtcBlock4x4Encoding_RG11.cpp @@ -0,0 +1,447 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* +EtcBlock4x4Encoding_RG11.cpp + +Block4x4Encoding_RG11 is the encoder to use when targetting file format RG11 and SRG11 (signed RG11). + +*/ + +#include "EtcConfig.h" +#include "EtcBlock4x4Encoding_RG11.h" + +#include "EtcBlock4x4EncodingBits.h" +#include "EtcBlock4x4.h" + +#include <stdio.h> +#include <string.h> +#include <assert.h> +#include <float.h> +#include <limits> + +namespace Etc +{ + // ---------------------------------------------------------------------------------------------------- + // + Block4x4Encoding_RG11::Block4x4Encoding_RG11(void) + { + m_pencodingbitsRG11 = nullptr; + } + + Block4x4Encoding_RG11::~Block4x4Encoding_RG11(void) {} + // ---------------------------------------------------------------------------------------------------- + // initialization prior to encoding + // a_pblockParent points to the block associated with this encoding + // a_errormetric is used to choose the best encoding + // a_pafrgbaSource points to a 4x4 block subset of the source image + // a_paucEncodingBits points to the final encoding bits + // + void Block4x4Encoding_RG11::InitFromSource(Block4x4 *a_pblockParent, + ColorFloatRGBA *a_pafrgbaSource, + unsigned char *a_paucEncodingBits, ErrorMetric a_errormetric) + { + Block4x4Encoding::Init(a_pblockParent, a_pafrgbaSource,a_errormetric); + + m_pencodingbitsRG11 = (Block4x4EncodingBits_RG11 *)a_paucEncodingBits; + } + + // ---------------------------------------------------------------------------------------------------- + // initialization from the encoding bits of a previous encoding + // a_pblockParent points to the block associated with this encoding + // a_errormetric is used to choose the best encoding + // a_pafrgbaSource points to a 4x4 block subset of the source image + // a_paucEncodingBits points to the final encoding bits of a previous encoding + // + void Block4x4Encoding_RG11::InitFromEncodingBits(Block4x4 *a_pblockParent, + unsigned char *a_paucEncodingBits, + ColorFloatRGBA *a_pafrgbaSource, + ErrorMetric a_errormetric) + { + + m_pencodingbitsRG11 = (Block4x4EncodingBits_RG11 *)a_paucEncodingBits; + + // init RGB portion + Block4x4Encoding_RGB8::InitFromEncodingBits(a_pblockParent, + (unsigned char *)m_pencodingbitsRG11, + a_pafrgbaSource, + a_errormetric); + m_fError = 0.0f; + + { + m_mode = MODE_RG11; + if (a_pblockParent->GetImageSource()->GetFormat() == Image::Format::SIGNED_RG11) + { + m_fRedBase = (float)(signed char)m_pencodingbitsRG11->data.baseR; + m_fGrnBase = (float)(signed char)m_pencodingbitsRG11->data.baseG; + } + else + { + m_fRedBase = (float)(unsigned char)m_pencodingbitsRG11->data.baseR; + m_fGrnBase = (float)(unsigned char)m_pencodingbitsRG11->data.baseG; + } + m_fRedMultiplier = (float)m_pencodingbitsRG11->data.multiplierR; + m_fGrnMultiplier = (float)m_pencodingbitsRG11->data.multiplierG; + m_uiRedModifierTableIndex = m_pencodingbitsRG11->data.tableIndexR; + m_uiGrnModifierTableIndex = m_pencodingbitsRG11->data.tableIndexG; + + unsigned long long int ulliSelectorBitsR = 0; + ulliSelectorBitsR |= (unsigned long long int)m_pencodingbitsRG11->data.selectorsR0 << 40; + ulliSelectorBitsR |= (unsigned long long int)m_pencodingbitsRG11->data.selectorsR1 << 32; + ulliSelectorBitsR |= (unsigned long long int)m_pencodingbitsRG11->data.selectorsR2 << 24; + ulliSelectorBitsR |= (unsigned long long int)m_pencodingbitsRG11->data.selectorsR3 << 16; + ulliSelectorBitsR |= (unsigned long long int)m_pencodingbitsRG11->data.selectorsR4 << 8; + ulliSelectorBitsR |= (unsigned long long int)m_pencodingbitsRG11->data.selectorsR5; + + unsigned long long int ulliSelectorBitsG = 0; + ulliSelectorBitsG |= (unsigned long long int)m_pencodingbitsRG11->data.selectorsG0 << 40; + ulliSelectorBitsG |= (unsigned long long int)m_pencodingbitsRG11->data.selectorsG1 << 32; + ulliSelectorBitsG |= (unsigned long long int)m_pencodingbitsRG11->data.selectorsG2 << 24; + ulliSelectorBitsG |= (unsigned long long int)m_pencodingbitsRG11->data.selectorsG3 << 16; + ulliSelectorBitsG |= (unsigned long long int)m_pencodingbitsRG11->data.selectorsG4 << 8; + ulliSelectorBitsG |= (unsigned long long int)m_pencodingbitsRG11->data.selectorsG5; + + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + unsigned int uiShift = 45 - (3 * uiPixel); + m_auiRedSelectors[uiPixel] = (ulliSelectorBitsR >> uiShift) & (SELECTORS - 1); + m_auiGrnSelectors[uiPixel] = (ulliSelectorBitsG >> uiShift) & (SELECTORS - 1); + } + + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + float fRedDecodedData = 0.0f; + float fGrnDecodedData = 0.0f; + if (a_pblockParent->GetImageSource()->GetFormat() == Image::Format::RG11) + { + fRedDecodedData = DecodePixelRed(m_fRedBase, m_fRedMultiplier, m_uiRedModifierTableIndex, m_auiRedSelectors[uiPixel]); + fGrnDecodedData = DecodePixelRed(m_fGrnBase, m_fGrnMultiplier, m_uiGrnModifierTableIndex, m_auiGrnSelectors[uiPixel]); + } + else if (a_pblockParent->GetImageSource()->GetFormat() == Image::Format::SIGNED_RG11) + { + fRedDecodedData = DecodePixelRed(m_fRedBase + 128, m_fRedMultiplier, m_uiRedModifierTableIndex, m_auiRedSelectors[uiPixel]); + fGrnDecodedData = DecodePixelRed(m_fGrnBase + 128, m_fGrnMultiplier, m_uiGrnModifierTableIndex, m_auiGrnSelectors[uiPixel]); + } + else + { + assert(0); + } + m_afrgbaDecodedColors[uiPixel] = ColorFloatRGBA(fRedDecodedData, fGrnDecodedData, 0.0f, 1.0f); + } + + } + + CalcBlockError(); + } + + // ---------------------------------------------------------------------------------------------------- + // perform a single encoding iteration + // replace the encoding if a better encoding was found + // subsequent iterations generally take longer for each iteration + // set m_boolDone if encoding is perfect or encoding is finished based on a_fEffort + // + void Block4x4Encoding_RG11::PerformIteration(float a_fEffort) + { + assert(!m_boolDone); + + switch (m_uiEncodingIterations) + { + case 0: + m_fError = FLT_MAX; + m_fGrnBlockError = FLT_MAX; // artificially high value + m_fRedBlockError = FLT_MAX; + CalculateR11(8, 0.0f, 0.0f); + CalculateG11(8, 0.0f, 0.0f); + m_fError = (m_fGrnBlockError + m_fRedBlockError); + break; + + case 1: + CalculateR11(8, 2.0f, 1.0f); + CalculateG11(8, 2.0f, 1.0f); + m_fError = (m_fGrnBlockError + m_fRedBlockError); + if (a_fEffort <= 24.5f) + { + m_boolDone = true; + } + break; + + case 2: + CalculateR11(8, 12.0f, 1.0f); + CalculateG11(8, 12.0f, 1.0f); + m_fError = (m_fGrnBlockError + m_fRedBlockError); + if (a_fEffort <= 49.5f) + { + m_boolDone = true; + } + break; + + case 3: + CalculateR11(7, 6.0f, 1.0f); + CalculateG11(7, 6.0f, 1.0f); + m_fError = (m_fGrnBlockError + m_fRedBlockError); + break; + + case 4: + CalculateR11(6, 3.0f, 1.0f); + CalculateG11(6, 3.0f, 1.0f); + m_fError = (m_fGrnBlockError + m_fRedBlockError); + break; + + case 5: + CalculateR11(5, 1.0f, 0.0f); + CalculateG11(5, 1.0f, 0.0f); + m_fError = (m_fGrnBlockError + m_fRedBlockError); + m_boolDone = true; + break; + + default: + assert(0); + break; + } + + m_uiEncodingIterations++; + SetDoneIfPerfect(); + } + + // ---------------------------------------------------------------------------------------------------- + // find the best combination of base color, multiplier and selectors + // + // a_uiSelectorsUsed limits the number of selector combinations to try + // a_fBaseRadius limits the range of base colors to try + // a_fMultiplierRadius limits the range of multipliers to try + // + void Block4x4Encoding_RG11::CalculateG11(unsigned int a_uiSelectorsUsed, + float a_fBaseRadius, float a_fMultiplierRadius) + { + // maps from virtual (monotonic) selector to etc selector + static const unsigned int auiVirtualSelectorMap[8] = { 3, 2, 1, 0, 4, 5, 6, 7 }; + + // find min/max Grn + float fMinGrn = 1.0f; + float fMaxGrn = 0.0f; + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + // ignore border pixels + float fAlpha = m_pafrgbaSource[uiPixel].fA; + if (isnan(fAlpha)) + { + continue; + } + + float fGrn = m_pafrgbaSource[uiPixel].fG; + + if (fGrn < fMinGrn) + { + fMinGrn = fGrn; + } + if (fGrn > fMaxGrn) + { + fMaxGrn = fGrn; + } + } + assert(fMinGrn <= fMaxGrn); + + float fGrnRange = (fMaxGrn - fMinGrn); + + // try each modifier table entry + for (unsigned int uiTableEntry = 0; uiTableEntry < MODIFIER_TABLE_ENTRYS; uiTableEntry++) + { + for (unsigned int uiMinVirtualSelector = 0; + uiMinVirtualSelector <= (8 - a_uiSelectorsUsed); + uiMinVirtualSelector++) + { + unsigned int uiMaxVirtualSelector = uiMinVirtualSelector + a_uiSelectorsUsed - 1; + + unsigned int uiMinSelector = auiVirtualSelectorMap[uiMinVirtualSelector]; + unsigned int uiMaxSelector = auiVirtualSelectorMap[uiMaxVirtualSelector]; + + float fTableEntryCenter = -s_aafModifierTable[uiTableEntry][uiMinSelector]; + + float fTableEntryRange = s_aafModifierTable[uiTableEntry][uiMaxSelector] - + s_aafModifierTable[uiTableEntry][uiMinSelector]; + + float fCenterRatio = fTableEntryCenter / fTableEntryRange; + + float fCenter = fMinGrn + fCenterRatio*fGrnRange; + fCenter = roundf(255.0f * fCenter) / 255.0f; + + float fMinBase = fCenter - (a_fBaseRadius / 255.0f); + if (fMinBase < 0.0f) + { + fMinBase = 0.0f; + } + + float fMaxBase = fCenter + (a_fBaseRadius / 255.0f); + if (fMaxBase > 1.0f) + { + fMaxBase = 1.0f; + } + + for (float fBase = fMinBase; fBase <= fMaxBase; fBase += (0.999999f / 255.0f)) + { + float fRangeMultiplier = roundf(fGrnRange / fTableEntryRange); + + float fMinMultiplier = fRangeMultiplier - a_fMultiplierRadius; + if (fMinMultiplier < 1.0f) + { + fMinMultiplier = 0.0f; + } + else if (fMinMultiplier > 15.0f) + { + fMinMultiplier = 15.0f; + } + + float fMaxMultiplier = fRangeMultiplier + a_fMultiplierRadius; + if (fMaxMultiplier < 1.0f) + { + fMaxMultiplier = 1.0f; + } + else if (fMaxMultiplier > 15.0f) + { + fMaxMultiplier = 15.0f; + } + + for (float fMultiplier = fMinMultiplier; fMultiplier <= fMaxMultiplier; fMultiplier += 1.0f) + { + // find best selector for each pixel + unsigned int auiBestSelectors[PIXELS]; + float afBestGrnError[PIXELS]; + float afBestPixelGrn[PIXELS]; + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + float fBestPixelGrnError = FLT_MAX; + + for (unsigned int uiSelector = 0; uiSelector < SELECTORS; uiSelector++) + { + //DecodePixelRed is not red channel specific + float fPixelGrn = DecodePixelRed(fBase * 255.0f, fMultiplier, uiTableEntry, uiSelector); + + ColorFloatRGBA frgba(m_pafrgbaSource[uiPixel].fR, fPixelGrn, 0.0f, 1.0f); + + float fPixelGrnError = CalcPixelError(frgba, 1.0f, m_pafrgbaSource[uiPixel]); + + if (fPixelGrnError < fBestPixelGrnError) + { + fBestPixelGrnError = fPixelGrnError; + auiBestSelectors[uiPixel] = uiSelector; + afBestGrnError[uiPixel] = fBestPixelGrnError; + afBestPixelGrn[uiPixel] = fPixelGrn; + } + } + } + float fBlockError = 0.0f; + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + fBlockError += afBestGrnError[uiPixel]; + } + + if (fBlockError < m_fGrnBlockError) + { + m_fGrnBlockError = fBlockError; + + if (m_pblockParent->GetImageSource()->GetFormat() == Image::Format::RG11) + { + m_fGrnBase = 255.0f * fBase; + } + else if (m_pblockParent->GetImageSource()->GetFormat() == Image::Format::SIGNED_RG11) + { + m_fGrnBase = (fBase * 255) - 128; + } + else + { + assert(0); + } + m_fGrnMultiplier = fMultiplier; + m_uiGrnModifierTableIndex = uiTableEntry; + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + m_auiGrnSelectors[uiPixel] = auiBestSelectors[uiPixel]; + m_afrgbaDecodedColors[uiPixel].fG = afBestPixelGrn[uiPixel]; + m_afDecodedAlphas[uiPixel] = 1.0f; + } + } + } + } + + } + } + } + + // ---------------------------------------------------------------------------------------------------- + // set the encoding bits based on encoding state + // + void Block4x4Encoding_RG11::SetEncodingBits(void) + { + unsigned long long int ulliSelectorBitsR = 0; + unsigned long long int ulliSelectorBitsG = 0; + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + unsigned int uiShift = 45 - (3 * uiPixel); + ulliSelectorBitsR |= ((unsigned long long int)m_auiRedSelectors[uiPixel]) << uiShift; + ulliSelectorBitsG |= ((unsigned long long int)m_auiGrnSelectors[uiPixel]) << uiShift; + } + if (m_pblockParent->GetImageSource()->GetFormat() == Image::Format::RG11) + { + m_pencodingbitsRG11->data.baseR = (unsigned char)roundf(m_fRedBase); + } + else if (m_pblockParent->GetImageSource()->GetFormat() == Image::Format::SIGNED_RG11) + { + m_pencodingbitsRG11->data.baseR = (signed char)roundf(m_fRedBase); + } + else + { + assert(0); + } + m_pencodingbitsRG11->data.tableIndexR = m_uiRedModifierTableIndex; + m_pencodingbitsRG11->data.multiplierR = (unsigned char)roundf(m_fRedMultiplier); + + m_pencodingbitsRG11->data.selectorsR0 = ulliSelectorBitsR >> 40; + m_pencodingbitsRG11->data.selectorsR1 = ulliSelectorBitsR >> 32; + m_pencodingbitsRG11->data.selectorsR2 = ulliSelectorBitsR >> 24; + m_pencodingbitsRG11->data.selectorsR3 = ulliSelectorBitsR >> 16; + m_pencodingbitsRG11->data.selectorsR4 = ulliSelectorBitsR >> 8; + m_pencodingbitsRG11->data.selectorsR5 = ulliSelectorBitsR; + + if (m_pblockParent->GetImageSource()->GetFormat() == Image::Format::RG11) + { + m_pencodingbitsRG11->data.baseG = (unsigned char)roundf(m_fGrnBase); + } + else if (m_pblockParent->GetImageSource()->GetFormat() == Image::Format::SIGNED_RG11) + { + m_pencodingbitsRG11->data.baseG = (signed char)roundf(m_fGrnBase); + } + else + { + assert(0); + } + m_pencodingbitsRG11->data.tableIndexG = m_uiGrnModifierTableIndex; + m_pencodingbitsRG11->data.multiplierG = (unsigned char)roundf(m_fGrnMultiplier); + + m_pencodingbitsRG11->data.selectorsG0 = ulliSelectorBitsG >> 40; + m_pencodingbitsRG11->data.selectorsG1 = ulliSelectorBitsG >> 32; + m_pencodingbitsRG11->data.selectorsG2 = ulliSelectorBitsG >> 24; + m_pencodingbitsRG11->data.selectorsG3 = ulliSelectorBitsG >> 16; + m_pencodingbitsRG11->data.selectorsG4 = ulliSelectorBitsG >> 8; + m_pencodingbitsRG11->data.selectorsG5 = ulliSelectorBitsG; + + } + + // ---------------------------------------------------------------------------------------------------- + // +} diff --git a/thirdparty/etc2comp/EtcBlock4x4Encoding_RG11.h b/thirdparty/etc2comp/EtcBlock4x4Encoding_RG11.h new file mode 100644 index 0000000000..d4993b8c5f --- /dev/null +++ b/thirdparty/etc2comp/EtcBlock4x4Encoding_RG11.h @@ -0,0 +1,86 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "EtcBlock4x4Encoding_RGB8.h" +#include "EtcBlock4x4Encoding_R11.h" + +namespace Etc +{ + class Block4x4EncodingBits_RG11; + + // ################################################################################ + // Block4x4Encoding_RG11 + // ################################################################################ + + class Block4x4Encoding_RG11 : public Block4x4Encoding_R11 + { + float m_fGrnBase; + float m_fGrnMultiplier; + float m_fGrnBlockError; + unsigned int m_auiGrnSelectors[PIXELS]; + unsigned int m_uiGrnModifierTableIndex; + public: + + Block4x4Encoding_RG11(void); + virtual ~Block4x4Encoding_RG11(void); + + virtual void InitFromSource(Block4x4 *a_pblockParent, + ColorFloatRGBA *a_pafrgbaSource, + + unsigned char *a_paucEncodingBits, ErrorMetric a_errormetric); + + virtual void InitFromEncodingBits(Block4x4 *a_pblockParent, + unsigned char *a_paucEncodingBits, + ColorFloatRGBA *a_pafrgbaSource, + + ErrorMetric a_errormetric); + + virtual void PerformIteration(float a_fEffort); + + virtual void SetEncodingBits(void); + + Block4x4EncodingBits_RG11 *m_pencodingbitsRG11; + + void CalculateG11(unsigned int a_uiSelectorsUsed, float a_fBaseRadius, float a_fMultiplierRadius); + + inline float GetGrnBase(void) const + { + return m_fGrnBase; + } + + inline float GetGrnMultiplier(void) const + { + return m_fGrnMultiplier; + } + + inline int GetGrnTableIndex(void) const + { + return m_uiGrnModifierTableIndex; + } + + inline const unsigned int * GetGrnSelectors(void) const + { + return m_auiGrnSelectors; + } + + }; + + // ---------------------------------------------------------------------------------------------------- + // + +} // namespace Etc diff --git a/thirdparty/etc2comp/EtcBlock4x4Encoding_RGB8.cpp b/thirdparty/etc2comp/EtcBlock4x4Encoding_RGB8.cpp new file mode 100644 index 0000000000..5656556db9 --- /dev/null +++ b/thirdparty/etc2comp/EtcBlock4x4Encoding_RGB8.cpp @@ -0,0 +1,1730 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* +EtcBlock4x4Encoding_RGB8.cpp + +Block4x4Encoding_RGB8 is the encoder to use for the ETC2 extensions when targetting file format RGB8. +This encoder is also used for the ETC2 subset of file format RGBA8. + +Block4x4Encoding_ETC1 encodes the ETC1 subset of RGB8. + +*/ + +#include "EtcConfig.h" +#include "EtcBlock4x4Encoding_RGB8.h" + +#include "EtcBlock4x4EncodingBits.h" +#include "EtcBlock4x4.h" +#include "EtcMath.h" + +#include <stdio.h> +#include <string.h> +#include <assert.h> +#include <float.h> +#include <limits> + +namespace Etc +{ + float Block4x4Encoding_RGB8::s_afTHDistanceTable[TH_DISTANCES] = + { + 3.0f / 255.0f, + 6.0f / 255.0f, + 11.0f / 255.0f, + 16.0f / 255.0f, + 23.0f / 255.0f, + 32.0f / 255.0f, + 41.0f / 255.0f, + 64.0f / 255.0f + }; + + // ---------------------------------------------------------------------------------------------------- + // + Block4x4Encoding_RGB8::Block4x4Encoding_RGB8(void) + { + + m_pencodingbitsRGB8 = nullptr; + + } + + Block4x4Encoding_RGB8::~Block4x4Encoding_RGB8(void) {} + // ---------------------------------------------------------------------------------------------------- + // initialization from the encoding bits of a previous encoding + // a_pblockParent points to the block associated with this encoding + // a_errormetric is used to choose the best encoding + // a_pafrgbaSource points to a 4x4 block subset of the source image + // a_paucEncodingBits points to the final encoding bits of a previous encoding + // + void Block4x4Encoding_RGB8::InitFromEncodingBits(Block4x4 *a_pblockParent, + unsigned char *a_paucEncodingBits, + ColorFloatRGBA *a_pafrgbaSource, + ErrorMetric a_errormetric) + { + + // handle ETC1 modes + Block4x4Encoding_ETC1::InitFromEncodingBits(a_pblockParent, + a_paucEncodingBits, a_pafrgbaSource,a_errormetric); + + m_pencodingbitsRGB8 = (Block4x4EncodingBits_RGB8 *)a_paucEncodingBits; + + // detect if there is a T, H or Planar mode present + if (m_pencodingbitsRGB8->differential.diff) + { + int iRed1 = (int)m_pencodingbitsRGB8->differential.red1; + int iDRed2 = m_pencodingbitsRGB8->differential.dred2; + int iRed2 = iRed1 + iDRed2; + + int iGreen1 = (int)m_pencodingbitsRGB8->differential.green1; + int iDGreen2 = m_pencodingbitsRGB8->differential.dgreen2; + int iGreen2 = iGreen1 + iDGreen2; + + int iBlue1 = (int)m_pencodingbitsRGB8->differential.blue1; + int iDBlue2 = m_pencodingbitsRGB8->differential.dblue2; + int iBlue2 = iBlue1 + iDBlue2; + + if (iRed2 < 0 || iRed2 > 31) + { + InitFromEncodingBits_T(); + } + else if (iGreen2 < 0 || iGreen2 > 31) + { + InitFromEncodingBits_H(); + } + else if (iBlue2 < 0 || iBlue2 > 31) + { + InitFromEncodingBits_Planar(); + } + } + + } + + // ---------------------------------------------------------------------------------------------------- + // initialization from the encoding bits of a previous encoding if T mode is detected + // + void Block4x4Encoding_RGB8::InitFromEncodingBits_T(void) + { + + m_mode = MODE_T; + + unsigned char ucRed1 = (unsigned char)((m_pencodingbitsRGB8->t.red1a << 2) + + m_pencodingbitsRGB8->t.red1b); + unsigned char ucGreen1 = m_pencodingbitsRGB8->t.green1; + unsigned char ucBlue1 = m_pencodingbitsRGB8->t.blue1; + + unsigned char ucRed2 = m_pencodingbitsRGB8->t.red2; + unsigned char ucGreen2 = m_pencodingbitsRGB8->t.green2; + unsigned char ucBlue2 = m_pencodingbitsRGB8->t.blue2; + + m_frgbaColor1 = ColorFloatRGBA::ConvertFromRGB4(ucRed1, ucGreen1, ucBlue1); + m_frgbaColor2 = ColorFloatRGBA::ConvertFromRGB4(ucRed2, ucGreen2, ucBlue2); + + m_uiCW1 = (m_pencodingbitsRGB8->t.da << 1) + m_pencodingbitsRGB8->t.db; + + Block4x4Encoding_ETC1::InitFromEncodingBits_Selectors(); + + DecodePixels_T(); + + CalcBlockError(); + + } + + // ---------------------------------------------------------------------------------------------------- + // initialization from the encoding bits of a previous encoding if H mode is detected + // + void Block4x4Encoding_RGB8::InitFromEncodingBits_H(void) + { + + m_mode = MODE_H; + + unsigned char ucRed1 = m_pencodingbitsRGB8->h.red1; + unsigned char ucGreen1 = (unsigned char)((m_pencodingbitsRGB8->h.green1a << 1) + + m_pencodingbitsRGB8->h.green1b); + unsigned char ucBlue1 = (unsigned char)((m_pencodingbitsRGB8->h.blue1a << 3) + + (m_pencodingbitsRGB8->h.blue1b << 1) + + m_pencodingbitsRGB8->h.blue1c); + + unsigned char ucRed2 = m_pencodingbitsRGB8->h.red2; + unsigned char ucGreen2 = (unsigned char)((m_pencodingbitsRGB8->h.green2a << 1) + + m_pencodingbitsRGB8->h.green2b); + unsigned char ucBlue2 = m_pencodingbitsRGB8->h.blue2; + + m_frgbaColor1 = ColorFloatRGBA::ConvertFromRGB4(ucRed1, ucGreen1, ucBlue1); + m_frgbaColor2 = ColorFloatRGBA::ConvertFromRGB4(ucRed2, ucGreen2, ucBlue2); + + // used to determine the LSB of the CW + unsigned int uiRGB1 = (unsigned int)(((int)ucRed1 << 16) + ((int)ucGreen1 << 8) + (int)ucBlue1); + unsigned int uiRGB2 = (unsigned int)(((int)ucRed2 << 16) + ((int)ucGreen2 << 8) + (int)ucBlue2); + + m_uiCW1 = (m_pencodingbitsRGB8->h.da << 2) + (m_pencodingbitsRGB8->h.db << 1); + if (uiRGB1 >= uiRGB2) + { + m_uiCW1++; + } + + Block4x4Encoding_ETC1::InitFromEncodingBits_Selectors(); + + DecodePixels_H(); + + CalcBlockError(); + + } + + // ---------------------------------------------------------------------------------------------------- + // initialization from the encoding bits of a previous encoding if Planar mode is detected + // + void Block4x4Encoding_RGB8::InitFromEncodingBits_Planar(void) + { + + m_mode = MODE_PLANAR; + + unsigned char ucOriginRed = m_pencodingbitsRGB8->planar.originRed; + unsigned char ucOriginGreen = (unsigned char)((m_pencodingbitsRGB8->planar.originGreen1 << 6) + + m_pencodingbitsRGB8->planar.originGreen2); + unsigned char ucOriginBlue = (unsigned char)((m_pencodingbitsRGB8->planar.originBlue1 << 5) + + (m_pencodingbitsRGB8->planar.originBlue2 << 3) + + (m_pencodingbitsRGB8->planar.originBlue3 << 1) + + m_pencodingbitsRGB8->planar.originBlue4); + + unsigned char ucHorizRed = (unsigned char)((m_pencodingbitsRGB8->planar.horizRed1 << 1) + + m_pencodingbitsRGB8->planar.horizRed2); + unsigned char ucHorizGreen = m_pencodingbitsRGB8->planar.horizGreen; + unsigned char ucHorizBlue = (unsigned char)((m_pencodingbitsRGB8->planar.horizBlue1 << 5) + + m_pencodingbitsRGB8->planar.horizBlue2); + + unsigned char ucVertRed = (unsigned char)((m_pencodingbitsRGB8->planar.vertRed1 << 3) + + m_pencodingbitsRGB8->planar.vertRed2); + unsigned char ucVertGreen = (unsigned char)((m_pencodingbitsRGB8->planar.vertGreen1 << 2) + + m_pencodingbitsRGB8->planar.vertGreen2); + unsigned char ucVertBlue = m_pencodingbitsRGB8->planar.vertBlue; + + m_frgbaColor1 = ColorFloatRGBA::ConvertFromR6G7B6(ucOriginRed, ucOriginGreen, ucOriginBlue); + m_frgbaColor2 = ColorFloatRGBA::ConvertFromR6G7B6(ucHorizRed, ucHorizGreen, ucHorizBlue); + m_frgbaColor3 = ColorFloatRGBA::ConvertFromR6G7B6(ucVertRed, ucVertGreen, ucVertBlue); + + DecodePixels_Planar(); + + CalcBlockError(); + + } + + // ---------------------------------------------------------------------------------------------------- + // perform a single encoding iteration + // replace the encoding if a better encoding was found + // subsequent iterations generally take longer for each iteration + // set m_boolDone if encoding is perfect or encoding is finished based on a_fEffort + // + void Block4x4Encoding_RGB8::PerformIteration(float a_fEffort) + { + assert(!m_boolDone); + + switch (m_uiEncodingIterations) + { + case 0: + Block4x4Encoding_ETC1::PerformFirstIteration(); + if (m_boolDone) + { + break; + } + TryPlanar(0); + SetDoneIfPerfect(); + if (m_boolDone) + { + break; + } + TryTAndH(0); + break; + + case 1: + Block4x4Encoding_ETC1::TryDifferential(m_boolMostLikelyFlip, 1, 0, 0); + break; + + case 2: + Block4x4Encoding_ETC1::TryIndividual(m_boolMostLikelyFlip, 1); + break; + + case 3: + Block4x4Encoding_ETC1::TryDifferential(!m_boolMostLikelyFlip, 1, 0, 0); + break; + + case 4: + Block4x4Encoding_ETC1::TryIndividual(!m_boolMostLikelyFlip, 1); + break; + + case 5: + TryPlanar(1); + if (a_fEffort <= 49.5f) + { + m_boolDone = true; + } + break; + + case 6: + TryTAndH(1); + if (a_fEffort <= 59.5f) + { + m_boolDone = true; + } + break; + + case 7: + Block4x4Encoding_ETC1::TryDegenerates1(); + if (a_fEffort <= 69.5f) + { + m_boolDone = true; + } + break; + + case 8: + Block4x4Encoding_ETC1::TryDegenerates2(); + if (a_fEffort <= 79.5f) + { + m_boolDone = true; + } + break; + + case 9: + Block4x4Encoding_ETC1::TryDegenerates3(); + if (a_fEffort <= 89.5f) + { + m_boolDone = true; + } + break; + + case 10: + Block4x4Encoding_ETC1::TryDegenerates4(); + m_boolDone = true; + break; + + default: + assert(0); + break; + } + + m_uiEncodingIterations++; + + SetDoneIfPerfect(); + } + + // ---------------------------------------------------------------------------------------------------- + // try encoding in Planar mode + // save this encoding if it improves the error + // + void Block4x4Encoding_RGB8::TryPlanar(unsigned int a_uiRadius) + { + Block4x4Encoding_RGB8 encodingTry = *this; + + // init "try" + { + encodingTry.m_mode = MODE_PLANAR; + encodingTry.m_boolDiff = true; + encodingTry.m_boolFlip = false; + } + + encodingTry.CalculatePlanarCornerColors(); + + encodingTry.DecodePixels_Planar(); + + encodingTry.CalcBlockError(); + + if (a_uiRadius > 0) + { + encodingTry.TwiddlePlanar(); + } + + if (encodingTry.m_fError < m_fError) + { + m_mode = MODE_PLANAR; + m_boolDiff = true; + m_boolFlip = false; + m_frgbaColor1 = encodingTry.m_frgbaColor1; + m_frgbaColor2 = encodingTry.m_frgbaColor2; + m_frgbaColor3 = encodingTry.m_frgbaColor3; + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + m_afrgbaDecodedColors[uiPixel] = encodingTry.m_afrgbaDecodedColors[uiPixel]; + } + + m_fError = encodingTry.m_fError; + } + + } + + // ---------------------------------------------------------------------------------------------------- + // try encoding in T mode or H mode + // save this encoding if it improves the error + // + void Block4x4Encoding_RGB8::TryTAndH(unsigned int a_uiRadius) + { + + CalculateBaseColorsForTAndH(); + + TryT(a_uiRadius); + + TryH(a_uiRadius); + + } + + // ---------------------------------------------------------------------------------------------------- + // calculate original values for base colors + // store them in m_frgbaOriginalColor1 and m_frgbaOriginalColor2 + // + void Block4x4Encoding_RGB8::CalculateBaseColorsForTAndH(void) + { + + bool boolRGBX = m_pblockParent->GetImageSource()->GetErrorMetric() == ErrorMetric::RGBX; + + ColorFloatRGBA frgbaBlockAverage = (m_frgbaSourceAverageLeft + m_frgbaSourceAverageRight) * 0.5f; + + // find pixel farthest from average gray line + unsigned int uiFarthestPixel = 0; + float fFarthestGrayDistance2 = 0.0f; + unsigned int uiTransparentPixels = 0; + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + // don't count transparent + if (m_pafrgbaSource[uiPixel].fA == 0.0f && !boolRGBX) + { + uiTransparentPixels++; + } + else + { + float fGrayDistance2 = CalcGrayDistance2(m_pafrgbaSource[uiPixel], frgbaBlockAverage); + + if (fGrayDistance2 > fFarthestGrayDistance2) + { + uiFarthestPixel = uiPixel; + fFarthestGrayDistance2 = fGrayDistance2; + } + } + } + // a transparent block should not reach this method + assert(uiTransparentPixels < PIXELS); + + // set the original base colors to: + // half way to the farthest pixel and + // the mirror color on the other side of the average + ColorFloatRGBA frgbaOffset = (m_pafrgbaSource[uiFarthestPixel] - frgbaBlockAverage) * 0.5f; + m_frgbaOriginalColor1_TAndH = (frgbaBlockAverage + frgbaOffset).QuantizeR4G4B4(); + m_frgbaOriginalColor2_TAndH = (frgbaBlockAverage - frgbaOffset).ClampRGB().QuantizeR4G4B4(); // the "other side" might be out of range + + // move base colors to find best fit + for (unsigned int uiIteration = 0; uiIteration < 10; uiIteration++) + { + // find the center of pixels closest to each color + float fPixelsCloserToColor1 = 0.0f; + ColorFloatRGBA frgbSumPixelsCloserToColor1; + float fPixelsCloserToColor2 = 0.0f; + ColorFloatRGBA frgbSumPixelsCloserToColor2; + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + // don't count transparent pixels + if (m_pafrgbaSource[uiPixel].fA == 0.0f) + { + continue; + } + + float fGrayDistance2ToColor1 = CalcGrayDistance2(m_pafrgbaSource[uiPixel], m_frgbaOriginalColor1_TAndH); + float fGrayDistance2ToColor2 = CalcGrayDistance2(m_pafrgbaSource[uiPixel], m_frgbaOriginalColor2_TAndH); + + ColorFloatRGBA frgbaAlphaWeightedSource = m_pafrgbaSource[uiPixel] * m_pafrgbaSource[uiPixel].fA; + + if (fGrayDistance2ToColor1 <= fGrayDistance2ToColor2) + { + fPixelsCloserToColor1 += m_pafrgbaSource[uiPixel].fA; + frgbSumPixelsCloserToColor1 = frgbSumPixelsCloserToColor1 + frgbaAlphaWeightedSource; + } + else + { + fPixelsCloserToColor2 += m_pafrgbaSource[uiPixel].fA; + frgbSumPixelsCloserToColor2 = frgbSumPixelsCloserToColor2 + frgbaAlphaWeightedSource; + } + } + if (fPixelsCloserToColor1 == 0.0f || fPixelsCloserToColor2 == 0.0f) + { + break; + } + + ColorFloatRGBA frgbAvgColor1Pixels = (frgbSumPixelsCloserToColor1 * (1.0f / fPixelsCloserToColor1)).QuantizeR4G4B4(); + ColorFloatRGBA frgbAvgColor2Pixels = (frgbSumPixelsCloserToColor2 * (1.0f / fPixelsCloserToColor2)).QuantizeR4G4B4(); + + if (frgbAvgColor1Pixels.fR == m_frgbaOriginalColor1_TAndH.fR && + frgbAvgColor1Pixels.fG == m_frgbaOriginalColor1_TAndH.fG && + frgbAvgColor1Pixels.fB == m_frgbaOriginalColor1_TAndH.fB && + frgbAvgColor2Pixels.fR == m_frgbaOriginalColor2_TAndH.fR && + frgbAvgColor2Pixels.fG == m_frgbaOriginalColor2_TAndH.fG && + frgbAvgColor2Pixels.fB == m_frgbaOriginalColor2_TAndH.fB) + { + break; + } + + m_frgbaOriginalColor1_TAndH = frgbAvgColor1Pixels; + m_frgbaOriginalColor2_TAndH = frgbAvgColor2Pixels; + } + + } + + // ---------------------------------------------------------------------------------------------------- + // try encoding in T mode + // save this encoding if it improves the error + // + // since pixels that use base color1 don't use the distance table, color1 and color2 can be twiddled independently + // better encoding can be found if TWIDDLE_RADIUS is set to 2, but it will be much slower + // + void Block4x4Encoding_RGB8::TryT(unsigned int a_uiRadius) + { + Block4x4Encoding_RGB8 encodingTry = *this; + + // init "try" + { + encodingTry.m_mode = MODE_T; + encodingTry.m_boolDiff = true; + encodingTry.m_boolFlip = false; + encodingTry.m_fError = FLT_MAX; + } + + int iColor1Red = m_frgbaOriginalColor1_TAndH.IntRed(15.0f); + int iColor1Green = m_frgbaOriginalColor1_TAndH.IntGreen(15.0f); + int iColor1Blue = m_frgbaOriginalColor1_TAndH.IntBlue(15.0f); + + int iMinRed1 = iColor1Red - (int)a_uiRadius; + if (iMinRed1 < 0) + { + iMinRed1 = 0; + } + int iMaxRed1 = iColor1Red + (int)a_uiRadius; + if (iMaxRed1 > 15) + { + iMinRed1 = 15; + } + + int iMinGreen1 = iColor1Green - (int)a_uiRadius; + if (iMinGreen1 < 0) + { + iMinGreen1 = 0; + } + int iMaxGreen1 = iColor1Green + (int)a_uiRadius; + if (iMaxGreen1 > 15) + { + iMinGreen1 = 15; + } + + int iMinBlue1 = iColor1Blue - (int)a_uiRadius; + if (iMinBlue1 < 0) + { + iMinBlue1 = 0; + } + int iMaxBlue1 = iColor1Blue + (int)a_uiRadius; + if (iMaxBlue1 > 15) + { + iMinBlue1 = 15; + } + + int iColor2Red = m_frgbaOriginalColor2_TAndH.IntRed(15.0f); + int iColor2Green = m_frgbaOriginalColor2_TAndH.IntGreen(15.0f); + int iColor2Blue = m_frgbaOriginalColor2_TAndH.IntBlue(15.0f); + + int iMinRed2 = iColor2Red - (int)a_uiRadius; + if (iMinRed2 < 0) + { + iMinRed2 = 0; + } + int iMaxRed2 = iColor2Red + (int)a_uiRadius; + if (iMaxRed2 > 15) + { + iMinRed2 = 15; + } + + int iMinGreen2 = iColor2Green - (int)a_uiRadius; + if (iMinGreen2 < 0) + { + iMinGreen2 = 0; + } + int iMaxGreen2 = iColor2Green + (int)a_uiRadius; + if (iMaxGreen2 > 15) + { + iMinGreen2 = 15; + } + + int iMinBlue2 = iColor2Blue - (int)a_uiRadius; + if (iMinBlue2 < 0) + { + iMinBlue2 = 0; + } + int iMaxBlue2 = iColor2Blue + (int)a_uiRadius; + if (iMaxBlue2 > 15) + { + iMinBlue2 = 15; + } + + for (unsigned int uiDistance = 0; uiDistance < TH_DISTANCES; uiDistance++) + { + encodingTry.m_uiCW1 = uiDistance; + + // twiddle m_frgbaOriginalColor2_TAndH + // twiddle color2 first, since it affects 3 selectors, while color1 only affects one selector + // + for (int iRed2 = iMinRed2; iRed2 <= iMaxRed2; iRed2++) + { + for (int iGreen2 = iMinGreen2; iGreen2 <= iMaxGreen2; iGreen2++) + { + for (int iBlue2 = iMinBlue2; iBlue2 <= iMaxBlue2; iBlue2++) + { + for (unsigned int uiBaseColorSwaps = 0; uiBaseColorSwaps < 2; uiBaseColorSwaps++) + { + if (uiBaseColorSwaps == 0) + { + encodingTry.m_frgbaColor1 = m_frgbaOriginalColor1_TAndH; + encodingTry.m_frgbaColor2 = ColorFloatRGBA::ConvertFromRGB4((unsigned char)iRed2, (unsigned char)iGreen2, (unsigned char)iBlue2); + } + else + { + encodingTry.m_frgbaColor1 = ColorFloatRGBA::ConvertFromRGB4((unsigned char)iRed2, (unsigned char)iGreen2, (unsigned char)iBlue2); + encodingTry.m_frgbaColor2 = m_frgbaOriginalColor1_TAndH; + } + + encodingTry.TryT_BestSelectorCombination(); + + if (encodingTry.m_fError < m_fError) + { + m_mode = encodingTry.m_mode; + m_boolDiff = encodingTry.m_boolDiff; + m_boolFlip = encodingTry.m_boolFlip; + + m_frgbaColor1 = encodingTry.m_frgbaColor1; + m_frgbaColor2 = encodingTry.m_frgbaColor2; + m_uiCW1 = encodingTry.m_uiCW1; + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + m_auiSelectors[uiPixel] = encodingTry.m_auiSelectors[uiPixel]; + m_afrgbaDecodedColors[uiPixel] = encodingTry.m_afrgbaDecodedColors[uiPixel]; + } + + m_fError = encodingTry.m_fError; + } + } + } + } + } + + // twiddle m_frgbaOriginalColor1_TAndH + for (int iRed1 = iMinRed1; iRed1 <= iMaxRed1; iRed1++) + { + for (int iGreen1 = iMinGreen1; iGreen1 <= iMaxGreen1; iGreen1++) + { + for (int iBlue1 = iMinBlue1; iBlue1 <= iMaxBlue1; iBlue1++) + { + for (unsigned int uiBaseColorSwaps = 0; uiBaseColorSwaps < 2; uiBaseColorSwaps++) + { + if (uiBaseColorSwaps == 0) + { + encodingTry.m_frgbaColor1 = ColorFloatRGBA::ConvertFromRGB4((unsigned char)iRed1, (unsigned char)iGreen1, (unsigned char)iBlue1); + encodingTry.m_frgbaColor2 = m_frgbaOriginalColor2_TAndH; + } + else + { + encodingTry.m_frgbaColor1 = m_frgbaOriginalColor2_TAndH; + encodingTry.m_frgbaColor2 = ColorFloatRGBA::ConvertFromRGB4((unsigned char)iRed1, (unsigned char)iGreen1, (unsigned char)iBlue1); + } + + encodingTry.TryT_BestSelectorCombination(); + + if (encodingTry.m_fError < m_fError) + { + m_mode = encodingTry.m_mode; + m_boolDiff = encodingTry.m_boolDiff; + m_boolFlip = encodingTry.m_boolFlip; + + m_frgbaColor1 = encodingTry.m_frgbaColor1; + m_frgbaColor2 = encodingTry.m_frgbaColor2; + m_uiCW1 = encodingTry.m_uiCW1; + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + m_auiSelectors[uiPixel] = encodingTry.m_auiSelectors[uiPixel]; + m_afrgbaDecodedColors[uiPixel] = encodingTry.m_afrgbaDecodedColors[uiPixel]; + } + + m_fError = encodingTry.m_fError; + } + } + } + } + } + + } + + } + + // ---------------------------------------------------------------------------------------------------- + // find best selector combination for TryT + // called on an encodingTry + // + void Block4x4Encoding_RGB8::TryT_BestSelectorCombination(void) + { + + float fDistance = s_afTHDistanceTable[m_uiCW1]; + + unsigned int auiBestPixelSelectors[PIXELS]; + float afBestPixelErrors[PIXELS] = { FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, + FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX }; + ColorFloatRGBA afrgbaBestDecodedPixels[PIXELS]; + ColorFloatRGBA afrgbaDecodedPixel[SELECTORS]; + + assert(SELECTORS == 4); + afrgbaDecodedPixel[0] = m_frgbaColor1; + afrgbaDecodedPixel[1] = (m_frgbaColor2 + fDistance).ClampRGB(); + afrgbaDecodedPixel[2] = m_frgbaColor2; + afrgbaDecodedPixel[3] = (m_frgbaColor2 - fDistance).ClampRGB(); + + // try each selector + for (unsigned int uiSelector = 0; uiSelector < SELECTORS; uiSelector++) + { + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + + float fPixelError = CalcPixelError(afrgbaDecodedPixel[uiSelector], m_afDecodedAlphas[uiPixel], + m_pafrgbaSource[uiPixel]); + + if (fPixelError < afBestPixelErrors[uiPixel]) + { + afBestPixelErrors[uiPixel] = fPixelError; + auiBestPixelSelectors[uiPixel] = uiSelector; + afrgbaBestDecodedPixels[uiPixel] = afrgbaDecodedPixel[uiSelector]; + } + } + } + + + // add up all of the pixel errors + float fBlockError = 0.0f; + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + fBlockError += afBestPixelErrors[uiPixel]; + } + + if (fBlockError < m_fError) + { + m_fError = fBlockError; + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + m_auiSelectors[uiPixel] = auiBestPixelSelectors[uiPixel]; + m_afrgbaDecodedColors[uiPixel] = afrgbaBestDecodedPixels[uiPixel]; + } + } + + } + + // ---------------------------------------------------------------------------------------------------- + // try encoding in T mode + // save this encoding if it improves the error + // + // since all pixels use the distance table, color1 and color2 can NOT be twiddled independently + // TWIDDLE_RADIUS of 2 is WAY too slow + // + void Block4x4Encoding_RGB8::TryH(unsigned int a_uiRadius) + { + Block4x4Encoding_RGB8 encodingTry = *this; + + // init "try" + { + encodingTry.m_mode = MODE_H; + encodingTry.m_boolDiff = true; + encodingTry.m_boolFlip = false; + encodingTry.m_fError = FLT_MAX; + } + + int iColor1Red = m_frgbaOriginalColor1_TAndH.IntRed(15.0f); + int iColor1Green = m_frgbaOriginalColor1_TAndH.IntGreen(15.0f); + int iColor1Blue = m_frgbaOriginalColor1_TAndH.IntBlue(15.0f); + + int iMinRed1 = iColor1Red - (int)a_uiRadius; + if (iMinRed1 < 0) + { + iMinRed1 = 0; + } + int iMaxRed1 = iColor1Red + (int)a_uiRadius; + if (iMaxRed1 > 15) + { + iMinRed1 = 15; + } + + int iMinGreen1 = iColor1Green - (int)a_uiRadius; + if (iMinGreen1 < 0) + { + iMinGreen1 = 0; + } + int iMaxGreen1 = iColor1Green + (int)a_uiRadius; + if (iMaxGreen1 > 15) + { + iMinGreen1 = 15; + } + + int iMinBlue1 = iColor1Blue - (int)a_uiRadius; + if (iMinBlue1 < 0) + { + iMinBlue1 = 0; + } + int iMaxBlue1 = iColor1Blue + (int)a_uiRadius; + if (iMaxBlue1 > 15) + { + iMinBlue1 = 15; + } + + int iColor2Red = m_frgbaOriginalColor2_TAndH.IntRed(15.0f); + int iColor2Green = m_frgbaOriginalColor2_TAndH.IntGreen(15.0f); + int iColor2Blue = m_frgbaOriginalColor2_TAndH.IntBlue(15.0f); + + int iMinRed2 = iColor2Red - (int)a_uiRadius; + if (iMinRed2 < 0) + { + iMinRed2 = 0; + } + int iMaxRed2 = iColor2Red + (int)a_uiRadius; + if (iMaxRed2 > 15) + { + iMinRed2 = 15; + } + + int iMinGreen2 = iColor2Green - (int)a_uiRadius; + if (iMinGreen2 < 0) + { + iMinGreen2 = 0; + } + int iMaxGreen2 = iColor2Green + (int)a_uiRadius; + if (iMaxGreen2 > 15) + { + iMinGreen2 = 15; + } + + int iMinBlue2 = iColor2Blue - (int)a_uiRadius; + if (iMinBlue2 < 0) + { + iMinBlue2 = 0; + } + int iMaxBlue2 = iColor2Blue + (int)a_uiRadius; + if (iMaxBlue2 > 15) + { + iMinBlue2 = 15; + } + + for (unsigned int uiDistance = 0; uiDistance < TH_DISTANCES; uiDistance++) + { + encodingTry.m_uiCW1 = uiDistance; + + // twiddle m_frgbaOriginalColor1_TAndH + for (int iRed1 = iMinRed1; iRed1 <= iMaxRed1; iRed1++) + { + for (int iGreen1 = iMinGreen1; iGreen1 <= iMaxGreen1; iGreen1++) + { + for (int iBlue1 = iMinBlue1; iBlue1 <= iMaxBlue1; iBlue1++) + { + encodingTry.m_frgbaColor1 = ColorFloatRGBA::ConvertFromRGB4((unsigned char)iRed1, (unsigned char)iGreen1, (unsigned char)iBlue1); + encodingTry.m_frgbaColor2 = m_frgbaOriginalColor2_TAndH; + + // if color1 == color2, H encoding issues can pop up, so abort + if (iRed1 == iColor2Red && iGreen1 == iColor2Green && iBlue1 == iColor2Blue) + { + continue; + } + + encodingTry.TryH_BestSelectorCombination(); + + if (encodingTry.m_fError < m_fError) + { + m_mode = encodingTry.m_mode; + m_boolDiff = encodingTry.m_boolDiff; + m_boolFlip = encodingTry.m_boolFlip; + + m_frgbaColor1 = encodingTry.m_frgbaColor1; + m_frgbaColor2 = encodingTry.m_frgbaColor2; + m_uiCW1 = encodingTry.m_uiCW1; + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + m_auiSelectors[uiPixel] = encodingTry.m_auiSelectors[uiPixel]; + m_afrgbaDecodedColors[uiPixel] = encodingTry.m_afrgbaDecodedColors[uiPixel]; + } + + m_fError = encodingTry.m_fError; + } + } + } + } + + // twiddle m_frgbaOriginalColor2_TAndH + for (int iRed2 = iMinRed2; iRed2 <= iMaxRed2; iRed2++) + { + for (int iGreen2 = iMinGreen2; iGreen2 <= iMaxGreen2; iGreen2++) + { + for (int iBlue2 = iMinBlue2; iBlue2 <= iMaxBlue2; iBlue2++) + { + encodingTry.m_frgbaColor1 = m_frgbaOriginalColor1_TAndH; + encodingTry.m_frgbaColor2 = ColorFloatRGBA::ConvertFromRGB4((unsigned char)iRed2, (unsigned char)iGreen2, (unsigned char)iBlue2); + + // if color1 == color2, H encoding issues can pop up, so abort + if (iRed2 == iColor1Red && iGreen2 == iColor1Green && iBlue2 == iColor1Blue) + { + continue; + } + + encodingTry.TryH_BestSelectorCombination(); + + if (encodingTry.m_fError < m_fError) + { + m_mode = encodingTry.m_mode; + m_boolDiff = encodingTry.m_boolDiff; + m_boolFlip = encodingTry.m_boolFlip; + + m_frgbaColor1 = encodingTry.m_frgbaColor1; + m_frgbaColor2 = encodingTry.m_frgbaColor2; + m_uiCW1 = encodingTry.m_uiCW1; + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + m_auiSelectors[uiPixel] = encodingTry.m_auiSelectors[uiPixel]; + m_afrgbaDecodedColors[uiPixel] = encodingTry.m_afrgbaDecodedColors[uiPixel]; + } + + m_fError = encodingTry.m_fError; + } + } + } + } + + } + + } + + // ---------------------------------------------------------------------------------------------------- + // find best selector combination for TryH + // called on an encodingTry + // + void Block4x4Encoding_RGB8::TryH_BestSelectorCombination(void) + { + + float fDistance = s_afTHDistanceTable[m_uiCW1]; + + unsigned int auiBestPixelSelectors[PIXELS]; + float afBestPixelErrors[PIXELS] = { FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, + FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX }; + ColorFloatRGBA afrgbaBestDecodedPixels[PIXELS]; + ColorFloatRGBA afrgbaDecodedPixel[SELECTORS]; + + assert(SELECTORS == 4); + afrgbaDecodedPixel[0] = (m_frgbaColor1 + fDistance).ClampRGB(); + afrgbaDecodedPixel[1] = (m_frgbaColor1 - fDistance).ClampRGB(); + afrgbaDecodedPixel[2] = (m_frgbaColor2 + fDistance).ClampRGB(); + afrgbaDecodedPixel[3] = (m_frgbaColor2 - fDistance).ClampRGB(); + + // try each selector + for (unsigned int uiSelector = 0; uiSelector < SELECTORS; uiSelector++) + { + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + + float fPixelError = CalcPixelError(afrgbaDecodedPixel[uiSelector], m_afDecodedAlphas[uiPixel], + m_pafrgbaSource[uiPixel]); + + if (fPixelError < afBestPixelErrors[uiPixel]) + { + afBestPixelErrors[uiPixel] = fPixelError; + auiBestPixelSelectors[uiPixel] = uiSelector; + afrgbaBestDecodedPixels[uiPixel] = afrgbaDecodedPixel[uiSelector]; + } + } + } + + + // add up all of the pixel errors + float fBlockError = 0.0f; + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + fBlockError += afBestPixelErrors[uiPixel]; + } + + if (fBlockError < m_fError) + { + m_fError = fBlockError; + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + m_auiSelectors[uiPixel] = auiBestPixelSelectors[uiPixel]; + m_afrgbaDecodedColors[uiPixel] = afrgbaBestDecodedPixels[uiPixel]; + } + } + + } + + // ---------------------------------------------------------------------------------------------------- + // use linear regression to find the best fit for colors along the edges of the 4x4 block + // + void Block4x4Encoding_RGB8::CalculatePlanarCornerColors(void) + { + ColorFloatRGBA afrgbaRegression[MAX_PLANAR_REGRESSION_SIZE]; + ColorFloatRGBA frgbaSlope; + ColorFloatRGBA frgbaOffset; + + // top edge + afrgbaRegression[0] = m_pafrgbaSource[0]; + afrgbaRegression[1] = m_pafrgbaSource[4]; + afrgbaRegression[2] = m_pafrgbaSource[8]; + afrgbaRegression[3] = m_pafrgbaSource[12]; + ColorRegression(afrgbaRegression, 4, &frgbaSlope, &frgbaOffset); + m_frgbaColor1 = frgbaOffset; + m_frgbaColor2 = (frgbaSlope * 4.0f) + frgbaOffset; + + // left edge + afrgbaRegression[0] = m_pafrgbaSource[0]; + afrgbaRegression[1] = m_pafrgbaSource[1]; + afrgbaRegression[2] = m_pafrgbaSource[2]; + afrgbaRegression[3] = m_pafrgbaSource[3]; + ColorRegression(afrgbaRegression, 4, &frgbaSlope, &frgbaOffset); + m_frgbaColor1 = (m_frgbaColor1 + frgbaOffset) * 0.5f; // average with top edge + m_frgbaColor3 = (frgbaSlope * 4.0f) + frgbaOffset; + + // right edge + afrgbaRegression[0] = m_pafrgbaSource[12]; + afrgbaRegression[1] = m_pafrgbaSource[13]; + afrgbaRegression[2] = m_pafrgbaSource[14]; + afrgbaRegression[3] = m_pafrgbaSource[15]; + ColorRegression(afrgbaRegression, 4, &frgbaSlope, &frgbaOffset); + m_frgbaColor2 = (m_frgbaColor2 + frgbaOffset) * 0.5f; // average with top edge + + // bottom edge + afrgbaRegression[0] = m_pafrgbaSource[3]; + afrgbaRegression[1] = m_pafrgbaSource[7]; + afrgbaRegression[2] = m_pafrgbaSource[11]; + afrgbaRegression[3] = m_pafrgbaSource[15]; + ColorRegression(afrgbaRegression, 4, &frgbaSlope, &frgbaOffset); + m_frgbaColor3 = (m_frgbaColor3 + frgbaOffset) * 0.5f; // average with left edge + + // quantize corner colors to 6/7/6 + m_frgbaColor1 = m_frgbaColor1.QuantizeR6G7B6(); + m_frgbaColor2 = m_frgbaColor2.QuantizeR6G7B6(); + m_frgbaColor3 = m_frgbaColor3.QuantizeR6G7B6(); + + } + + // ---------------------------------------------------------------------------------------------------- + // try different corner colors by slightly changing R, G and B independently + // + // R, G and B decoding and errors are independent, so R, G and B twiddles can be independent + // + // return true if improvement + // + bool Block4x4Encoding_RGB8::TwiddlePlanar(void) + { + bool boolImprovement = false; + + while (TwiddlePlanarR()) + { + boolImprovement = true; + } + + while (TwiddlePlanarG()) + { + boolImprovement = true; + } + + while (TwiddlePlanarB()) + { + boolImprovement = true; + } + + return boolImprovement; + } + + // ---------------------------------------------------------------------------------------------------- + // try different corner colors by slightly changing R + // + bool Block4x4Encoding_RGB8::TwiddlePlanarR() + { + bool boolImprovement = false; + + Block4x4Encoding_RGB8 encodingTry = *this; + + // init "try" + { + encodingTry.m_mode = MODE_PLANAR; + encodingTry.m_boolDiff = true; + encodingTry.m_boolFlip = false; + } + + int iOriginRed = encodingTry.m_frgbaColor1.IntRed(63.0f); + int iHorizRed = encodingTry.m_frgbaColor2.IntRed(63.0f); + int iVertRed = encodingTry.m_frgbaColor3.IntRed(63.0f); + + for (int iTryOriginRed = iOriginRed - 1; iTryOriginRed <= iOriginRed + 1; iTryOriginRed++) + { + // check for out of range + if (iTryOriginRed < 0 || iTryOriginRed > 63) + { + continue; + } + + encodingTry.m_frgbaColor1.fR = ((iTryOriginRed << 2) + (iTryOriginRed >> 4)) / 255.0f; + + for (int iTryHorizRed = iHorizRed - 1; iTryHorizRed <= iHorizRed + 1; iTryHorizRed++) + { + // check for out of range + if (iTryHorizRed < 0 || iTryHorizRed > 63) + { + continue; + } + + encodingTry.m_frgbaColor2.fR = ((iTryHorizRed << 2) + (iTryHorizRed >> 4)) / 255.0f; + + for (int iTryVertRed = iVertRed - 1; iTryVertRed <= iVertRed + 1; iTryVertRed++) + { + // check for out of range + if (iTryVertRed < 0 || iTryVertRed > 63) + { + continue; + } + + // don't bother with null twiddle + if (iTryOriginRed == iOriginRed && iTryHorizRed == iHorizRed && iTryVertRed == iVertRed) + { + continue; + } + + encodingTry.m_frgbaColor3.fR = ((iTryVertRed << 2) + (iTryVertRed >> 4)) / 255.0f; + + encodingTry.DecodePixels_Planar(); + + encodingTry.CalcBlockError(); + + if (encodingTry.m_fError < m_fError) + { + m_mode = MODE_PLANAR; + m_boolDiff = true; + m_boolFlip = false; + m_frgbaColor1 = encodingTry.m_frgbaColor1; + m_frgbaColor2 = encodingTry.m_frgbaColor2; + m_frgbaColor3 = encodingTry.m_frgbaColor3; + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + m_afrgbaDecodedColors[uiPixel] = encodingTry.m_afrgbaDecodedColors[uiPixel]; + } + + m_fError = encodingTry.m_fError; + + boolImprovement = true; + } + } + } + } + + return boolImprovement; + } + + // ---------------------------------------------------------------------------------------------------- + // try different corner colors by slightly changing G + // + bool Block4x4Encoding_RGB8::TwiddlePlanarG() + { + bool boolImprovement = false; + + Block4x4Encoding_RGB8 encodingTry = *this; + + // init "try" + { + encodingTry.m_mode = MODE_PLANAR; + encodingTry.m_boolDiff = true; + encodingTry.m_boolFlip = false; + } + + int iOriginGreen = encodingTry.m_frgbaColor1.IntGreen(127.0f); + int iHorizGreen = encodingTry.m_frgbaColor2.IntGreen(127.0f); + int iVertGreen = encodingTry.m_frgbaColor3.IntGreen(127.0f); + + for (int iTryOriginGreen = iOriginGreen - 1; iTryOriginGreen <= iOriginGreen + 1; iTryOriginGreen++) + { + // check for out of range + if (iTryOriginGreen < 0 || iTryOriginGreen > 127) + { + continue; + } + + encodingTry.m_frgbaColor1.fG = ((iTryOriginGreen << 1) + (iTryOriginGreen >> 6)) / 255.0f; + + for (int iTryHorizGreen = iHorizGreen - 1; iTryHorizGreen <= iHorizGreen + 1; iTryHorizGreen++) + { + // check for out of range + if (iTryHorizGreen < 0 || iTryHorizGreen > 127) + { + continue; + } + + encodingTry.m_frgbaColor2.fG = ((iTryHorizGreen << 1) + (iTryHorizGreen >> 6)) / 255.0f; + + for (int iTryVertGreen = iVertGreen - 1; iTryVertGreen <= iVertGreen + 1; iTryVertGreen++) + { + // check for out of range + if (iTryVertGreen < 0 || iTryVertGreen > 127) + { + continue; + } + + // don't bother with null twiddle + if (iTryOriginGreen == iOriginGreen && + iTryHorizGreen == iHorizGreen && + iTryVertGreen == iVertGreen) + { + continue; + } + + encodingTry.m_frgbaColor3.fG = ((iTryVertGreen << 1) + (iTryVertGreen >> 6)) / 255.0f; + + encodingTry.DecodePixels_Planar(); + + encodingTry.CalcBlockError(); + + if (encodingTry.m_fError < m_fError) + { + m_mode = MODE_PLANAR; + m_boolDiff = true; + m_boolFlip = false; + m_frgbaColor1 = encodingTry.m_frgbaColor1; + m_frgbaColor2 = encodingTry.m_frgbaColor2; + m_frgbaColor3 = encodingTry.m_frgbaColor3; + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + m_afrgbaDecodedColors[uiPixel] = encodingTry.m_afrgbaDecodedColors[uiPixel]; + } + + m_fError = encodingTry.m_fError; + + boolImprovement = true; + } + } + } + } + + return boolImprovement; + } + + // ---------------------------------------------------------------------------------------------------- + // try different corner colors by slightly changing B + // + bool Block4x4Encoding_RGB8::TwiddlePlanarB() + { + bool boolImprovement = false; + + Block4x4Encoding_RGB8 encodingTry = *this; + + // init "try" + { + encodingTry.m_mode = MODE_PLANAR; + encodingTry.m_boolDiff = true; + encodingTry.m_boolFlip = false; + } + + int iOriginBlue = encodingTry.m_frgbaColor1.IntBlue(63.0f); + int iHorizBlue = encodingTry.m_frgbaColor2.IntBlue(63.0f); + int iVertBlue = encodingTry.m_frgbaColor3.IntBlue(63.0f); + + for (int iTryOriginBlue = iOriginBlue - 1; iTryOriginBlue <= iOriginBlue + 1; iTryOriginBlue++) + { + // check for out of range + if (iTryOriginBlue < 0 || iTryOriginBlue > 63) + { + continue; + } + + encodingTry.m_frgbaColor1.fB = ((iTryOriginBlue << 2) + (iTryOriginBlue >> 4)) / 255.0f; + + for (int iTryHorizBlue = iHorizBlue - 1; iTryHorizBlue <= iHorizBlue + 1; iTryHorizBlue++) + { + // check for out of range + if (iTryHorizBlue < 0 || iTryHorizBlue > 63) + { + continue; + } + + encodingTry.m_frgbaColor2.fB = ((iTryHorizBlue << 2) + (iTryHorizBlue >> 4)) / 255.0f; + + for (int iTryVertBlue = iVertBlue - 1; iTryVertBlue <= iVertBlue + 1; iTryVertBlue++) + { + // check for out of range + if (iTryVertBlue < 0 || iTryVertBlue > 63) + { + continue; + } + + // don't bother with null twiddle + if (iTryOriginBlue == iOriginBlue && iTryHorizBlue == iHorizBlue && iTryVertBlue == iVertBlue) + { + continue; + } + + encodingTry.m_frgbaColor3.fB = ((iTryVertBlue << 2) + (iTryVertBlue >> 4)) / 255.0f; + + encodingTry.DecodePixels_Planar(); + + encodingTry.CalcBlockError(); + + if (encodingTry.m_fError < m_fError) + { + m_mode = MODE_PLANAR; + m_boolDiff = true; + m_boolFlip = false; + m_frgbaColor1 = encodingTry.m_frgbaColor1; + m_frgbaColor2 = encodingTry.m_frgbaColor2; + m_frgbaColor3 = encodingTry.m_frgbaColor3; + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + m_afrgbaDecodedColors[uiPixel] = encodingTry.m_afrgbaDecodedColors[uiPixel]; + } + + m_fError = encodingTry.m_fError; + + boolImprovement = true; + } + } + } + } + + return boolImprovement; + } + + // ---------------------------------------------------------------------------------------------------- + // set the encoding bits based on encoding state + // + void Block4x4Encoding_RGB8::SetEncodingBits(void) + { + + switch (m_mode) + { + case MODE_ETC1: + Block4x4Encoding_ETC1::SetEncodingBits(); + break; + + case MODE_T: + SetEncodingBits_T(); + break; + + case MODE_H: + SetEncodingBits_H(); + break; + + case MODE_PLANAR: + SetEncodingBits_Planar(); + break; + + default: + assert(false); + } + + } + + // ---------------------------------------------------------------------------------------------------- + // set the encoding bits based on encoding state for T mode + // + void Block4x4Encoding_RGB8::SetEncodingBits_T(void) + { + static const bool SANITY_CHECK = true; + + assert(m_mode == MODE_T); + assert(m_boolDiff == true); + + unsigned int uiRed1 = (unsigned int)m_frgbaColor1.IntRed(15.0f); + unsigned int uiGreen1 = (unsigned int)m_frgbaColor1.IntGreen(15.0f); + unsigned int uiBlue1 = (unsigned int)m_frgbaColor1.IntBlue(15.0f); + + unsigned int uiRed2 = (unsigned int)m_frgbaColor2.IntRed(15.0f); + unsigned int uiGreen2 = (unsigned int)m_frgbaColor2.IntGreen(15.0f); + unsigned int uiBlue2 = (unsigned int)m_frgbaColor2.IntBlue(15.0f); + + m_pencodingbitsRGB8->t.red1a = uiRed1 >> 2; + m_pencodingbitsRGB8->t.red1b = uiRed1; + m_pencodingbitsRGB8->t.green1 = uiGreen1; + m_pencodingbitsRGB8->t.blue1 = uiBlue1; + + m_pencodingbitsRGB8->t.red2 = uiRed2; + m_pencodingbitsRGB8->t.green2 = uiGreen2; + m_pencodingbitsRGB8->t.blue2 = uiBlue2; + + m_pencodingbitsRGB8->t.da = m_uiCW1 >> 1; + m_pencodingbitsRGB8->t.db = m_uiCW1; + + m_pencodingbitsRGB8->t.diff = 1; + + Block4x4Encoding_ETC1::SetEncodingBits_Selectors(); + + // create an invalid R differential to trigger T mode + m_pencodingbitsRGB8->t.detect1 = 0; + m_pencodingbitsRGB8->t.detect2 = 0; + int iRed2 = (int)m_pencodingbitsRGB8->differential.red1 + (int)m_pencodingbitsRGB8->differential.dred2; + if (iRed2 >= 4) + { + m_pencodingbitsRGB8->t.detect1 = 7; + m_pencodingbitsRGB8->t.detect2 = 0; + } + else + { + m_pencodingbitsRGB8->t.detect1 = 0; + m_pencodingbitsRGB8->t.detect2 = 1; + } + + if (SANITY_CHECK) + { + iRed2 = (int)m_pencodingbitsRGB8->differential.red1 + (int)m_pencodingbitsRGB8->differential.dred2; + + // make sure red overflows + assert(iRed2 < 0 || iRed2 > 31); + } + + } + + // ---------------------------------------------------------------------------------------------------- + // set the encoding bits based on encoding state for H mode + // + // colors and selectors may need to swap in order to generate lsb of distance index + // + void Block4x4Encoding_RGB8::SetEncodingBits_H(void) + { + static const bool SANITY_CHECK = true; + + assert(m_mode == MODE_H); + assert(m_boolDiff == true); + + unsigned int uiRed1 = (unsigned int)m_frgbaColor1.IntRed(15.0f); + unsigned int uiGreen1 = (unsigned int)m_frgbaColor1.IntGreen(15.0f); + unsigned int uiBlue1 = (unsigned int)m_frgbaColor1.IntBlue(15.0f); + + unsigned int uiRed2 = (unsigned int)m_frgbaColor2.IntRed(15.0f); + unsigned int uiGreen2 = (unsigned int)m_frgbaColor2.IntGreen(15.0f); + unsigned int uiBlue2 = (unsigned int)m_frgbaColor2.IntBlue(15.0f); + + unsigned int uiColor1 = (uiRed1 << 16) + (uiGreen1 << 8) + uiBlue1; + unsigned int uiColor2 = (uiRed2 << 16) + (uiGreen2 << 8) + uiBlue2; + + bool boolOddDistance = m_uiCW1 & 1; + bool boolSwapColors = (uiColor1 < uiColor2) ^ !boolOddDistance; + + if (boolSwapColors) + { + m_pencodingbitsRGB8->h.red1 = uiRed2; + m_pencodingbitsRGB8->h.green1a = uiGreen2 >> 1; + m_pencodingbitsRGB8->h.green1b = uiGreen2; + m_pencodingbitsRGB8->h.blue1a = uiBlue2 >> 3; + m_pencodingbitsRGB8->h.blue1b = uiBlue2 >> 1; + m_pencodingbitsRGB8->h.blue1c = uiBlue2; + + m_pencodingbitsRGB8->h.red2 = uiRed1; + m_pencodingbitsRGB8->h.green2a = uiGreen1 >> 1; + m_pencodingbitsRGB8->h.green2b = uiGreen1; + m_pencodingbitsRGB8->h.blue2 = uiBlue1; + + m_pencodingbitsRGB8->h.da = m_uiCW1 >> 2; + m_pencodingbitsRGB8->h.db = m_uiCW1 >> 1; + } + else + { + m_pencodingbitsRGB8->h.red1 = uiRed1; + m_pencodingbitsRGB8->h.green1a = uiGreen1 >> 1; + m_pencodingbitsRGB8->h.green1b = uiGreen1; + m_pencodingbitsRGB8->h.blue1a = uiBlue1 >> 3; + m_pencodingbitsRGB8->h.blue1b = uiBlue1 >> 1; + m_pencodingbitsRGB8->h.blue1c = uiBlue1; + + m_pencodingbitsRGB8->h.red2 = uiRed2; + m_pencodingbitsRGB8->h.green2a = uiGreen2 >> 1; + m_pencodingbitsRGB8->h.green2b = uiGreen2; + m_pencodingbitsRGB8->h.blue2 = uiBlue2; + + m_pencodingbitsRGB8->h.da = m_uiCW1 >> 2; + m_pencodingbitsRGB8->h.db = m_uiCW1 >> 1; + } + + m_pencodingbitsRGB8->h.diff = 1; + + Block4x4Encoding_ETC1::SetEncodingBits_Selectors(); + + if (boolSwapColors) + { + m_pencodingbitsRGB8->h.selectors ^= 0x0000FFFF; + } + + // create an invalid R differential to trigger T mode + m_pencodingbitsRGB8->h.detect1 = 0; + m_pencodingbitsRGB8->h.detect2 = 0; + m_pencodingbitsRGB8->h.detect3 = 0; + int iRed2 = (int)m_pencodingbitsRGB8->differential.red1 + (int)m_pencodingbitsRGB8->differential.dred2; + int iGreen2 = (int)m_pencodingbitsRGB8->differential.green1 + (int)m_pencodingbitsRGB8->differential.dgreen2; + if (iRed2 < 0 || iRed2 > 31) + { + m_pencodingbitsRGB8->h.detect1 = 1; + } + if (iGreen2 >= 4) + { + m_pencodingbitsRGB8->h.detect2 = 7; + m_pencodingbitsRGB8->h.detect3 = 0; + } + else + { + m_pencodingbitsRGB8->h.detect2 = 0; + m_pencodingbitsRGB8->h.detect3 = 1; + } + + if (SANITY_CHECK) + { + iRed2 = (int)m_pencodingbitsRGB8->differential.red1 + (int)m_pencodingbitsRGB8->differential.dred2; + iGreen2 = (int)m_pencodingbitsRGB8->differential.green1 + (int)m_pencodingbitsRGB8->differential.dgreen2; + + // make sure red doesn't overflow and green does + assert(iRed2 >= 0 && iRed2 <= 31); + assert(iGreen2 < 0 || iGreen2 > 31); + } + + } + + // ---------------------------------------------------------------------------------------------------- + // set the encoding bits based on encoding state for Planar mode + // + void Block4x4Encoding_RGB8::SetEncodingBits_Planar(void) + { + static const bool SANITY_CHECK = true; + + assert(m_mode == MODE_PLANAR); + assert(m_boolDiff == true); + + unsigned int uiOriginRed = (unsigned int)m_frgbaColor1.IntRed(63.0f); + unsigned int uiOriginGreen = (unsigned int)m_frgbaColor1.IntGreen(127.0f); + unsigned int uiOriginBlue = (unsigned int)m_frgbaColor1.IntBlue(63.0f); + + unsigned int uiHorizRed = (unsigned int)m_frgbaColor2.IntRed(63.0f); + unsigned int uiHorizGreen = (unsigned int)m_frgbaColor2.IntGreen(127.0f); + unsigned int uiHorizBlue = (unsigned int)m_frgbaColor2.IntBlue(63.0f); + + unsigned int uiVertRed = (unsigned int)m_frgbaColor3.IntRed(63.0f); + unsigned int uiVertGreen = (unsigned int)m_frgbaColor3.IntGreen(127.0f); + unsigned int uiVertBlue = (unsigned int)m_frgbaColor3.IntBlue(63.0f); + + m_pencodingbitsRGB8->planar.originRed = uiOriginRed; + m_pencodingbitsRGB8->planar.originGreen1 = uiOriginGreen >> 6; + m_pencodingbitsRGB8->planar.originGreen2 = uiOriginGreen; + m_pencodingbitsRGB8->planar.originBlue1 = uiOriginBlue >> 5; + m_pencodingbitsRGB8->planar.originBlue2 = uiOriginBlue >> 3; + m_pencodingbitsRGB8->planar.originBlue3 = uiOriginBlue >> 1; + m_pencodingbitsRGB8->planar.originBlue4 = uiOriginBlue; + + m_pencodingbitsRGB8->planar.horizRed1 = uiHorizRed >> 1; + m_pencodingbitsRGB8->planar.horizRed2 = uiHorizRed; + m_pencodingbitsRGB8->planar.horizGreen = uiHorizGreen; + m_pencodingbitsRGB8->planar.horizBlue1 = uiHorizBlue >> 5; + m_pencodingbitsRGB8->planar.horizBlue2 = uiHorizBlue; + + m_pencodingbitsRGB8->planar.vertRed1 = uiVertRed >> 3; + m_pencodingbitsRGB8->planar.vertRed2 = uiVertRed; + m_pencodingbitsRGB8->planar.vertGreen1 = uiVertGreen >> 2; + m_pencodingbitsRGB8->planar.vertGreen2 = uiVertGreen; + m_pencodingbitsRGB8->planar.vertBlue = uiVertBlue; + + m_pencodingbitsRGB8->planar.diff = 1; + + // create valid RG differentials and an invalid B differential to trigger planar mode + m_pencodingbitsRGB8->planar.detect1 = 0; + m_pencodingbitsRGB8->planar.detect2 = 0; + m_pencodingbitsRGB8->planar.detect3 = 0; + m_pencodingbitsRGB8->planar.detect4 = 0; + int iRed2 = (int)m_pencodingbitsRGB8->differential.red1 + (int)m_pencodingbitsRGB8->differential.dred2; + int iGreen2 = (int)m_pencodingbitsRGB8->differential.green1 + (int)m_pencodingbitsRGB8->differential.dgreen2; + int iBlue2 = (int)m_pencodingbitsRGB8->differential.blue1 + (int)m_pencodingbitsRGB8->differential.dblue2; + if (iRed2 < 0 || iRed2 > 31) + { + m_pencodingbitsRGB8->planar.detect1 = 1; + } + if (iGreen2 < 0 || iGreen2 > 31) + { + m_pencodingbitsRGB8->planar.detect2 = 1; + } + if (iBlue2 >= 4) + { + m_pencodingbitsRGB8->planar.detect3 = 7; + m_pencodingbitsRGB8->planar.detect4 = 0; + } + else + { + m_pencodingbitsRGB8->planar.detect3 = 0; + m_pencodingbitsRGB8->planar.detect4 = 1; + } + + if (SANITY_CHECK) + { + iRed2 = (int)m_pencodingbitsRGB8->differential.red1 + (int)m_pencodingbitsRGB8->differential.dred2; + iGreen2 = (int)m_pencodingbitsRGB8->differential.green1 + (int)m_pencodingbitsRGB8->differential.dgreen2; + iBlue2 = (int)m_pencodingbitsRGB8->differential.blue1 + (int)m_pencodingbitsRGB8->differential.dblue2; + + // make sure red and green don't overflow and blue does + assert(iRed2 >= 0 && iRed2 <= 31); + assert(iGreen2 >= 0 && iGreen2 <= 31); + assert(iBlue2 < 0 || iBlue2 > 31); + } + + } + + // ---------------------------------------------------------------------------------------------------- + // set the decoded colors and decoded alpha based on the encoding state for T mode + // + void Block4x4Encoding_RGB8::DecodePixels_T(void) + { + + float fDistance = s_afTHDistanceTable[m_uiCW1]; + ColorFloatRGBA frgbaDistance(fDistance, fDistance, fDistance, 0.0f); + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + switch (m_auiSelectors[uiPixel]) + { + case 0: + m_afrgbaDecodedColors[uiPixel] = m_frgbaColor1; + break; + + case 1: + m_afrgbaDecodedColors[uiPixel] = (m_frgbaColor2 + frgbaDistance).ClampRGB(); + break; + + case 2: + m_afrgbaDecodedColors[uiPixel] = m_frgbaColor2; + break; + + case 3: + m_afrgbaDecodedColors[uiPixel] = (m_frgbaColor2 - frgbaDistance).ClampRGB(); + break; + } + + } + + } + + // ---------------------------------------------------------------------------------------------------- + // set the decoded colors and decoded alpha based on the encoding state for H mode + // + void Block4x4Encoding_RGB8::DecodePixels_H(void) + { + + float fDistance = s_afTHDistanceTable[m_uiCW1]; + ColorFloatRGBA frgbaDistance(fDistance, fDistance, fDistance, 0.0f); + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + switch (m_auiSelectors[uiPixel]) + { + case 0: + m_afrgbaDecodedColors[uiPixel] = (m_frgbaColor1 + frgbaDistance).ClampRGB(); + break; + + case 1: + m_afrgbaDecodedColors[uiPixel] = (m_frgbaColor1 - frgbaDistance).ClampRGB(); + break; + + case 2: + m_afrgbaDecodedColors[uiPixel] = (m_frgbaColor2 + frgbaDistance).ClampRGB(); + break; + + case 3: + m_afrgbaDecodedColors[uiPixel] = (m_frgbaColor2 - frgbaDistance).ClampRGB(); + break; + } + + } + + } + + // ---------------------------------------------------------------------------------------------------- + // set the decoded colors and decoded alpha based on the encoding state for Planar mode + // + void Block4x4Encoding_RGB8::DecodePixels_Planar(void) + { + + int iRO = (int)roundf(m_frgbaColor1.fR * 255.0f); + int iGO = (int)roundf(m_frgbaColor1.fG * 255.0f); + int iBO = (int)roundf(m_frgbaColor1.fB * 255.0f); + + int iRH = (int)roundf(m_frgbaColor2.fR * 255.0f); + int iGH = (int)roundf(m_frgbaColor2.fG * 255.0f); + int iBH = (int)roundf(m_frgbaColor2.fB * 255.0f); + + int iRV = (int)roundf(m_frgbaColor3.fR * 255.0f); + int iGV = (int)roundf(m_frgbaColor3.fG * 255.0f); + int iBV = (int)roundf(m_frgbaColor3.fB * 255.0f); + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + int iX = (int)(uiPixel >> 2); + int iY = (int)(uiPixel & 3); + + int iR = (iX*(iRH - iRO) + iY*(iRV - iRO) + 4*iRO + 2) >> 2; + int iG = (iX*(iGH - iGO) + iY*(iGV - iGO) + 4*iGO + 2) >> 2; + int iB = (iX*(iBH - iBO) + iY*(iBV - iBO) + 4*iBO + 2) >> 2; + + ColorFloatRGBA frgba; + frgba.fR = (float)iR / 255.0f; + frgba.fG = (float)iG / 255.0f; + frgba.fB = (float)iB / 255.0f; + frgba.fA = 1.0f; + + m_afrgbaDecodedColors[uiPixel] = frgba.ClampRGB(); + } + + } + + // ---------------------------------------------------------------------------------------------------- + // perform a linear regression for the a_uiPixels in a_pafrgbaPixels[] + // + // output the closest color line using a_pfrgbaSlope and a_pfrgbaOffset + // + void Block4x4Encoding_RGB8::ColorRegression(ColorFloatRGBA *a_pafrgbaPixels, unsigned int a_uiPixels, + ColorFloatRGBA *a_pfrgbaSlope, ColorFloatRGBA *a_pfrgbaOffset) + { + typedef struct + { + float f[4]; + } Float4; + + Float4 *paf4Pixels = (Float4 *)(a_pafrgbaPixels); + Float4 *pf4Slope = (Float4 *)(a_pfrgbaSlope); + Float4 *pf4Offset = (Float4 *)(a_pfrgbaOffset); + + float afX[MAX_PLANAR_REGRESSION_SIZE]; + float afY[MAX_PLANAR_REGRESSION_SIZE]; + + // handle r, g and b separately. don't bother with a + for (unsigned int uiComponent = 0; uiComponent < 3; uiComponent++) + { + for (unsigned int uiPixel = 0; uiPixel < a_uiPixels; uiPixel++) + { + afX[uiPixel] = (float)uiPixel; + afY[uiPixel] = paf4Pixels[uiPixel].f[uiComponent]; + + } + Etc::Regression(afX, afY, a_uiPixels, + &(pf4Slope->f[uiComponent]), &(pf4Offset->f[uiComponent])); + } + + } + + // ---------------------------------------------------------------------------------------------------- + // +} diff --git a/thirdparty/etc2comp/EtcBlock4x4Encoding_RGB8.h b/thirdparty/etc2comp/EtcBlock4x4Encoding_RGB8.h new file mode 100644 index 0000000000..03754d5e3b --- /dev/null +++ b/thirdparty/etc2comp/EtcBlock4x4Encoding_RGB8.h @@ -0,0 +1,96 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "EtcBlock4x4Encoding_ETC1.h" + +namespace Etc +{ + + class Block4x4Encoding_RGB8 : public Block4x4Encoding_ETC1 + { + public: + + Block4x4Encoding_RGB8(void); + virtual ~Block4x4Encoding_RGB8(void); + + virtual void InitFromEncodingBits(Block4x4 *a_pblockParent, + unsigned char *a_paucEncodingBits, + ColorFloatRGBA *a_pafrgbaSource, + + ErrorMetric a_errormetric); + + virtual void PerformIteration(float a_fEffort); + + virtual void SetEncodingBits(void); + + inline ColorFloatRGBA GetColor3(void) const + { + return m_frgbaColor3; + } + + protected: + + static const unsigned int PLANAR_CORNER_COLORS = 3; + static const unsigned int MAX_PLANAR_REGRESSION_SIZE = 4; + static const unsigned int TH_DISTANCES = 8; + + static float s_afTHDistanceTable[TH_DISTANCES]; + + void TryPlanar(unsigned int a_uiRadius); + void TryTAndH(unsigned int a_uiRadius); + + void InitFromEncodingBits_Planar(void); + + ColorFloatRGBA m_frgbaColor3; // used for planar + + void SetEncodingBits_T(void); + void SetEncodingBits_H(void); + void SetEncodingBits_Planar(void); + + // state shared between iterations + ColorFloatRGBA m_frgbaOriginalColor1_TAndH; + ColorFloatRGBA m_frgbaOriginalColor2_TAndH; + + void CalculateBaseColorsForTAndH(void); + void TryT(unsigned int a_uiRadius); + void TryT_BestSelectorCombination(void); + void TryH(unsigned int a_uiRadius); + void TryH_BestSelectorCombination(void); + + private: + + void InitFromEncodingBits_T(void); + void InitFromEncodingBits_H(void); + + void CalculatePlanarCornerColors(void); + + void ColorRegression(ColorFloatRGBA *a_pafrgbaPixels, unsigned int a_uiPixels, + ColorFloatRGBA *a_pfrgbaSlope, ColorFloatRGBA *a_pfrgbaOffset); + + bool TwiddlePlanar(void); + bool TwiddlePlanarR(); + bool TwiddlePlanarG(); + bool TwiddlePlanarB(); + + void DecodePixels_T(void); + void DecodePixels_H(void); + void DecodePixels_Planar(void); + + }; + +} // namespace Etc diff --git a/thirdparty/etc2comp/EtcBlock4x4Encoding_RGB8A1.cpp b/thirdparty/etc2comp/EtcBlock4x4Encoding_RGB8A1.cpp new file mode 100644 index 0000000000..ba2b42fb05 --- /dev/null +++ b/thirdparty/etc2comp/EtcBlock4x4Encoding_RGB8A1.cpp @@ -0,0 +1,1819 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* +EtcBlock4x4Encoding_RGB8A1.cpp contains: + Block4x4Encoding_RGB8A1 + Block4x4Encoding_RGB8A1_Opaque + Block4x4Encoding_RGB8A1_Transparent + +These encoders are used when targetting file format RGB8A1. + +Block4x4Encoding_RGB8A1_Opaque is used when all pixels in the 4x4 block are opaque +Block4x4Encoding_RGB8A1_Transparent is used when all pixels in the 4x4 block are transparent +Block4x4Encoding_RGB8A1 is used when there is a mixture of alphas in the 4x4 block + +*/ + +#include "EtcConfig.h" +#include "EtcBlock4x4Encoding_RGB8A1.h" + +#include "EtcBlock4x4.h" +#include "EtcBlock4x4EncodingBits.h" +#include "EtcBlock4x4Encoding_RGB8.h" + +#include <stdio.h> +#include <string.h> +#include <assert.h> + +namespace Etc +{ + + // #################################################################################################### + // Block4x4Encoding_RGB8A1 + // #################################################################################################### + + float Block4x4Encoding_RGB8A1::s_aafCwOpaqueUnsetTable[CW_RANGES][SELECTORS] = + { + { 0.0f / 255.0f, 8.0f / 255.0f, 0.0f / 255.0f, -8.0f / 255.0f }, + { 0.0f / 255.0f, 17.0f / 255.0f, 0.0f / 255.0f, -17.0f / 255.0f }, + { 0.0f / 255.0f, 29.0f / 255.0f, 0.0f / 255.0f, -29.0f / 255.0f }, + { 0.0f / 255.0f, 42.0f / 255.0f, 0.0f / 255.0f, -42.0f / 255.0f }, + { 0.0f / 255.0f, 60.0f / 255.0f, 0.0f / 255.0f, -60.0f / 255.0f }, + { 0.0f / 255.0f, 80.0f / 255.0f, 0.0f / 255.0f, -80.0f / 255.0f }, + { 0.0f / 255.0f, 106.0f / 255.0f, 0.0f / 255.0f, -106.0f / 255.0f }, + { 0.0f / 255.0f, 183.0f / 255.0f, 0.0f / 255.0f, -183.0f / 255.0f } + }; + + // ---------------------------------------------------------------------------------------------------- + // + Block4x4Encoding_RGB8A1::Block4x4Encoding_RGB8A1(void) + { + m_pencodingbitsRGB8 = nullptr; + m_boolOpaque = false; + m_boolTransparent = false; + m_boolPunchThroughPixels = true; + + } + Block4x4Encoding_RGB8A1::~Block4x4Encoding_RGB8A1(void) {} + // ---------------------------------------------------------------------------------------------------- + // initialization prior to encoding + // a_pblockParent points to the block associated with this encoding + // a_errormetric is used to choose the best encoding + // a_pafrgbaSource points to a 4x4 block subset of the source image + // a_paucEncodingBits points to the final encoding bits + // + void Block4x4Encoding_RGB8A1::InitFromSource(Block4x4 *a_pblockParent, + ColorFloatRGBA *a_pafrgbaSource, + unsigned char *a_paucEncodingBits, + ErrorMetric a_errormetric) + { + + Block4x4Encoding_RGB8::InitFromSource(a_pblockParent, + a_pafrgbaSource, + a_paucEncodingBits, + a_errormetric); + + m_boolOpaque = a_pblockParent->GetSourceAlphaMix() == Block4x4::SourceAlphaMix::OPAQUE; + m_boolTransparent = a_pblockParent->GetSourceAlphaMix() == Block4x4::SourceAlphaMix::TRANSPARENT; + m_boolPunchThroughPixels = a_pblockParent->HasPunchThroughPixels(); + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + if (m_pafrgbaSource[uiPixel].fA >= 0.5f) + { + m_afDecodedAlphas[uiPixel] = 1.0f; + } + else + { + m_afDecodedAlphas[uiPixel] = 0.0f; + } + } + + } + + // ---------------------------------------------------------------------------------------------------- + // initialization from the encoding bits of a previous encoding + // a_pblockParent points to the block associated with this encoding + // a_errormetric is used to choose the best encoding + // a_pafrgbaSource points to a 4x4 block subset of the source image + // a_paucEncodingBits points to the final encoding bits of a previous encoding + // + void Block4x4Encoding_RGB8A1::InitFromEncodingBits(Block4x4 *a_pblockParent, + unsigned char *a_paucEncodingBits, + ColorFloatRGBA *a_pafrgbaSource, + ErrorMetric a_errormetric) + { + + + InitFromEncodingBits_ETC1(a_pblockParent, + a_paucEncodingBits, + a_pafrgbaSource, + a_errormetric); + + m_pencodingbitsRGB8 = (Block4x4EncodingBits_RGB8 *)a_paucEncodingBits; + + // detect if there is a T, H or Planar mode present + int iRed1 = m_pencodingbitsRGB8->differential.red1; + int iDRed2 = m_pencodingbitsRGB8->differential.dred2; + int iRed2 = iRed1 + iDRed2; + + int iGreen1 = m_pencodingbitsRGB8->differential.green1; + int iDGreen2 = m_pencodingbitsRGB8->differential.dgreen2; + int iGreen2 = iGreen1 + iDGreen2; + + int iBlue1 = m_pencodingbitsRGB8->differential.blue1; + int iDBlue2 = m_pencodingbitsRGB8->differential.dblue2; + int iBlue2 = iBlue1 + iDBlue2; + + if (iRed2 < 0 || iRed2 > 31) + { + InitFromEncodingBits_T(); + } + else if (iGreen2 < 0 || iGreen2 > 31) + { + InitFromEncodingBits_H(); + } + else if (iBlue2 < 0 || iBlue2 > 31) + { + Block4x4Encoding_RGB8::InitFromEncodingBits_Planar(); + } + + } + + // ---------------------------------------------------------------------------------------------------- + // initialization from the encoding bits of a previous encoding assuming the encoding is an ETC1 mode. + // if it isn't an ETC1 mode, this will be overwritten later + // + void Block4x4Encoding_RGB8A1::InitFromEncodingBits_ETC1(Block4x4 *a_pblockParent, + unsigned char *a_paucEncodingBits, + ColorFloatRGBA *a_pafrgbaSource, + ErrorMetric a_errormetric) + { + Block4x4Encoding::Init(a_pblockParent, a_pafrgbaSource, + a_errormetric); + + m_pencodingbitsRGB8 = (Block4x4EncodingBits_RGB8 *)a_paucEncodingBits; + + m_mode = MODE_ETC1; + m_boolDiff = true; + m_boolFlip = m_pencodingbitsRGB8->differential.flip; + m_boolOpaque = m_pencodingbitsRGB8->differential.diff; + + int iR2 = m_pencodingbitsRGB8->differential.red1 + m_pencodingbitsRGB8->differential.dred2; + if (iR2 < 0) + { + iR2 = 0; + } + else if (iR2 > 31) + { + iR2 = 31; + } + + int iG2 = m_pencodingbitsRGB8->differential.green1 + m_pencodingbitsRGB8->differential.dgreen2; + if (iG2 < 0) + { + iG2 = 0; + } + else if (iG2 > 31) + { + iG2 = 31; + } + + int iB2 = m_pencodingbitsRGB8->differential.blue1 + m_pencodingbitsRGB8->differential.dblue2; + if (iB2 < 0) + { + iB2 = 0; + } + else if (iB2 > 31) + { + iB2 = 31; + } + + m_frgbaColor1 = ColorFloatRGBA::ConvertFromRGB5(m_pencodingbitsRGB8->differential.red1, m_pencodingbitsRGB8->differential.green1, m_pencodingbitsRGB8->differential.blue1); + m_frgbaColor2 = ColorFloatRGBA::ConvertFromRGB5((unsigned char)iR2, (unsigned char)iG2, (unsigned char)iB2); + + m_uiCW1 = m_pencodingbitsRGB8->differential.cw1; + m_uiCW2 = m_pencodingbitsRGB8->differential.cw2; + + Block4x4Encoding_ETC1::InitFromEncodingBits_Selectors(); + + Decode_ETC1(); + + CalcBlockError(); + + } + + // ---------------------------------------------------------------------------------------------------- + // initialization from the encoding bits of a previous encoding if T mode is detected + // + void Block4x4Encoding_RGB8A1::InitFromEncodingBits_T(void) + { + m_mode = MODE_T; + + unsigned char ucRed1 = (unsigned char)((m_pencodingbitsRGB8->t.red1a << 2) + + m_pencodingbitsRGB8->t.red1b); + unsigned char ucGreen1 = m_pencodingbitsRGB8->t.green1; + unsigned char ucBlue1 = m_pencodingbitsRGB8->t.blue1; + + unsigned char ucRed2 = m_pencodingbitsRGB8->t.red2; + unsigned char ucGreen2 = m_pencodingbitsRGB8->t.green2; + unsigned char ucBlue2 = m_pencodingbitsRGB8->t.blue2; + + m_frgbaColor1 = ColorFloatRGBA::ConvertFromRGB4(ucRed1, ucGreen1, ucBlue1); + m_frgbaColor2 = ColorFloatRGBA::ConvertFromRGB4(ucRed2, ucGreen2, ucBlue2); + + m_uiCW1 = (m_pencodingbitsRGB8->t.da << 1) + m_pencodingbitsRGB8->t.db; + + Block4x4Encoding_ETC1::InitFromEncodingBits_Selectors(); + + DecodePixels_T(); + + CalcBlockError(); + } + + // ---------------------------------------------------------------------------------------------------- + // initialization from the encoding bits of a previous encoding if H mode is detected + // + void Block4x4Encoding_RGB8A1::InitFromEncodingBits_H(void) + { + m_mode = MODE_H; + + unsigned char ucRed1 = m_pencodingbitsRGB8->h.red1; + unsigned char ucGreen1 = (unsigned char)((m_pencodingbitsRGB8->h.green1a << 1) + + m_pencodingbitsRGB8->h.green1b); + unsigned char ucBlue1 = (unsigned char)((m_pencodingbitsRGB8->h.blue1a << 3) + + (m_pencodingbitsRGB8->h.blue1b << 1) + + m_pencodingbitsRGB8->h.blue1c); + + unsigned char ucRed2 = m_pencodingbitsRGB8->h.red2; + unsigned char ucGreen2 = (unsigned char)((m_pencodingbitsRGB8->h.green2a << 1) + + m_pencodingbitsRGB8->h.green2b); + unsigned char ucBlue2 = m_pencodingbitsRGB8->h.blue2; + + m_frgbaColor1 = ColorFloatRGBA::ConvertFromRGB4(ucRed1, ucGreen1, ucBlue1); + m_frgbaColor2 = ColorFloatRGBA::ConvertFromRGB4(ucRed2, ucGreen2, ucBlue2); + + // used to determine the LSB of the CW + unsigned int uiRGB1 = (unsigned int)(((int)ucRed1 << 16) + ((int)ucGreen1 << 8) + (int)ucBlue1); + unsigned int uiRGB2 = (unsigned int)(((int)ucRed2 << 16) + ((int)ucGreen2 << 8) + (int)ucBlue2); + + m_uiCW1 = (m_pencodingbitsRGB8->h.da << 2) + (m_pencodingbitsRGB8->h.db << 1); + if (uiRGB1 >= uiRGB2) + { + m_uiCW1++; + } + + Block4x4Encoding_ETC1::InitFromEncodingBits_Selectors(); + + DecodePixels_H(); + + CalcBlockError(); + } + + // ---------------------------------------------------------------------------------------------------- + // for ETC1 modes, set the decoded colors and decoded alpha based on the encoding state + // + void Block4x4Encoding_RGB8A1::Decode_ETC1(void) + { + + const unsigned int *pauiPixelOrder = m_boolFlip ? s_auiPixelOrderFlip1 : s_auiPixelOrderFlip0; + + for (unsigned int uiPixelOrder = 0; uiPixelOrder < PIXELS; uiPixelOrder++) + { + ColorFloatRGBA *pfrgbaCenter = uiPixelOrder < 8 ? &m_frgbaColor1 : &m_frgbaColor2; + unsigned int uiCW = uiPixelOrder < 8 ? m_uiCW1 : m_uiCW2; + + unsigned int uiPixel = pauiPixelOrder[uiPixelOrder]; + + float fDelta; + if (m_boolOpaque) + fDelta = Block4x4Encoding_ETC1::s_aafCwTable[uiCW][m_auiSelectors[uiPixel]]; + else + fDelta = s_aafCwOpaqueUnsetTable[uiCW][m_auiSelectors[uiPixel]]; + + if (m_boolOpaque == false && m_auiSelectors[uiPixel] == TRANSPARENT_SELECTOR) + { + m_afrgbaDecodedColors[uiPixel] = ColorFloatRGBA(); + m_afDecodedAlphas[uiPixel] = 0.0f; + } + else + { + m_afrgbaDecodedColors[uiPixel] = (*pfrgbaCenter + fDelta).ClampRGB(); + m_afDecodedAlphas[uiPixel] = 1.0f; + } + } + + } + + // ---------------------------------------------------------------------------------------------------- + // for T mode, set the decoded colors and decoded alpha based on the encoding state + // + void Block4x4Encoding_RGB8A1::DecodePixels_T(void) + { + + float fDistance = s_afTHDistanceTable[m_uiCW1]; + ColorFloatRGBA frgbaDistance(fDistance, fDistance, fDistance, 0.0f); + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + switch (m_auiSelectors[uiPixel]) + { + case 0: + m_afrgbaDecodedColors[uiPixel] = m_frgbaColor1; + m_afDecodedAlphas[uiPixel] = 1.0f; + break; + + case 1: + m_afrgbaDecodedColors[uiPixel] = (m_frgbaColor2 + frgbaDistance).ClampRGB(); + m_afDecodedAlphas[uiPixel] = 1.0f; + break; + + case 2: + if (m_boolOpaque == false) + { + m_afrgbaDecodedColors[uiPixel] = ColorFloatRGBA(); + m_afDecodedAlphas[uiPixel] = 0.0f; + } + else + { + m_afrgbaDecodedColors[uiPixel] = m_frgbaColor2; + m_afDecodedAlphas[uiPixel] = 1.0f; + } + break; + + case 3: + m_afrgbaDecodedColors[uiPixel] = (m_frgbaColor2 - frgbaDistance).ClampRGB(); + m_afDecodedAlphas[uiPixel] = 1.0f; + break; + } + + } + + } + + // ---------------------------------------------------------------------------------------------------- + // for H mode, set the decoded colors and decoded alpha based on the encoding state + // + void Block4x4Encoding_RGB8A1::DecodePixels_H(void) + { + + float fDistance = s_afTHDistanceTable[m_uiCW1]; + ColorFloatRGBA frgbaDistance(fDistance, fDistance, fDistance, 0.0f); + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + switch (m_auiSelectors[uiPixel]) + { + case 0: + m_afrgbaDecodedColors[uiPixel] = (m_frgbaColor1 + frgbaDistance).ClampRGB(); + m_afDecodedAlphas[uiPixel] = 1.0f; + break; + + case 1: + m_afrgbaDecodedColors[uiPixel] = (m_frgbaColor1 - frgbaDistance).ClampRGB(); + m_afDecodedAlphas[uiPixel] = 1.0f; + break; + + case 2: + if (m_boolOpaque == false) + { + m_afrgbaDecodedColors[uiPixel] = ColorFloatRGBA(); + m_afDecodedAlphas[uiPixel] = 0.0f; + } + else + { + m_afrgbaDecodedColors[uiPixel] = (m_frgbaColor2 + frgbaDistance).ClampRGB(); + m_afDecodedAlphas[uiPixel] = 1.0f; + } + break; + + case 3: + m_afrgbaDecodedColors[uiPixel] = (m_frgbaColor2 - frgbaDistance).ClampRGB(); + m_afDecodedAlphas[uiPixel] = 1.0f; + break; + } + + } + + } + + + // ---------------------------------------------------------------------------------------------------- + // perform a single encoding iteration + // replace the encoding if a better encoding was found + // subsequent iterations generally take longer for each iteration + // set m_boolDone if encoding is perfect or encoding is finished based on a_fEffort + // + // RGB8A1 can't use individual mode + // RGB8A1 with transparent pixels can't use planar mode + // + void Block4x4Encoding_RGB8A1::PerformIteration(float a_fEffort) + { + assert(!m_boolOpaque); + assert(!m_boolTransparent); + assert(!m_boolDone); + + switch (m_uiEncodingIterations) + { + case 0: + PerformFirstIteration(); + break; + + case 1: + TryDifferential(m_boolMostLikelyFlip, 1, 0, 0); + break; + + case 2: + TryDifferential(!m_boolMostLikelyFlip, 1, 0, 0); + if (a_fEffort <= 39.5f) + { + m_boolDone = true; + } + break; + + case 3: + Block4x4Encoding_RGB8::CalculateBaseColorsForTAndH(); + TryT(1); + TryH(1); + if (a_fEffort <= 49.5f) + { + m_boolDone = true; + } + break; + + case 4: + TryDegenerates1(); + if (a_fEffort <= 59.5f) + { + m_boolDone = true; + } + break; + + case 5: + TryDegenerates2(); + if (a_fEffort <= 69.5f) + { + m_boolDone = true; + } + break; + + case 6: + TryDegenerates3(); + if (a_fEffort <= 79.5f) + { + m_boolDone = true; + } + break; + + case 7: + TryDegenerates4(); + m_boolDone = true; + break; + + default: + assert(0); + break; + } + + m_uiEncodingIterations++; + + SetDoneIfPerfect(); + + } + + // ---------------------------------------------------------------------------------------------------- + // find best initial encoding to ensure block has a valid encoding + // + void Block4x4Encoding_RGB8A1::PerformFirstIteration(void) + { + Block4x4Encoding_ETC1::CalculateMostLikelyFlip(); + + m_fError = FLT_MAX; + + TryDifferential(m_boolMostLikelyFlip, 0, 0, 0); + SetDoneIfPerfect(); + if (m_boolDone) + { + return; + } + TryDifferential(!m_boolMostLikelyFlip, 0, 0, 0); + SetDoneIfPerfect(); + + } + + // ---------------------------------------------------------------------------------------------------- + // mostly copied from ETC1 + // differences: + // Block4x4Encoding_RGB8A1 encodingTry = *this; + // + void Block4x4Encoding_RGB8A1::TryDifferential(bool a_boolFlip, unsigned int a_uiRadius, + int a_iGrayOffset1, int a_iGrayOffset2) + { + + ColorFloatRGBA frgbaColor1; + ColorFloatRGBA frgbaColor2; + + const unsigned int *pauiPixelMapping1; + const unsigned int *pauiPixelMapping2; + + if (a_boolFlip) + { + frgbaColor1 = m_frgbaSourceAverageTop; + frgbaColor2 = m_frgbaSourceAverageBottom; + + pauiPixelMapping1 = s_auiTopPixelMapping; + pauiPixelMapping2 = s_auiBottomPixelMapping; + } + else + { + frgbaColor1 = m_frgbaSourceAverageLeft; + frgbaColor2 = m_frgbaSourceAverageRight; + + pauiPixelMapping1 = s_auiLeftPixelMapping; + pauiPixelMapping2 = s_auiRightPixelMapping; + } + + DifferentialTrys trys(frgbaColor1, frgbaColor2, pauiPixelMapping1, pauiPixelMapping2, + a_uiRadius, a_iGrayOffset1, a_iGrayOffset2); + + Block4x4Encoding_RGB8A1 encodingTry = *this; + encodingTry.m_boolFlip = a_boolFlip; + + encodingTry.TryDifferentialHalf(&trys.m_half1); + encodingTry.TryDifferentialHalf(&trys.m_half2); + + // find best halves that are within differential range + DifferentialTrys::Try *ptryBest1 = nullptr; + DifferentialTrys::Try *ptryBest2 = nullptr; + encodingTry.m_fError = FLT_MAX; + + // see if the best of each half are in differential range + int iDRed = trys.m_half2.m_ptryBest->m_iRed - trys.m_half1.m_ptryBest->m_iRed; + int iDGreen = trys.m_half2.m_ptryBest->m_iGreen - trys.m_half1.m_ptryBest->m_iGreen; + int iDBlue = trys.m_half2.m_ptryBest->m_iBlue - trys.m_half1.m_ptryBest->m_iBlue; + if (iDRed >= -4 && iDRed <= 3 && iDGreen >= -4 && iDGreen <= 3 && iDBlue >= -4 && iDBlue <= 3) + { + ptryBest1 = trys.m_half1.m_ptryBest; + ptryBest2 = trys.m_half2.m_ptryBest; + encodingTry.m_fError = trys.m_half1.m_ptryBest->m_fError + trys.m_half2.m_ptryBest->m_fError; + } + else + { + // else, find the next best halves that are in differential range + for (DifferentialTrys::Try *ptry1 = &trys.m_half1.m_atry[0]; + ptry1 < &trys.m_half1.m_atry[trys.m_half1.m_uiTrys]; + ptry1++) + { + for (DifferentialTrys::Try *ptry2 = &trys.m_half2.m_atry[0]; + ptry2 < &trys.m_half2.m_atry[trys.m_half2.m_uiTrys]; + ptry2++) + { + iDRed = ptry2->m_iRed - ptry1->m_iRed; + bool boolValidRedDelta = iDRed <= 3 && iDRed >= -4; + iDGreen = ptry2->m_iGreen - ptry1->m_iGreen; + bool boolValidGreenDelta = iDGreen <= 3 && iDGreen >= -4; + iDBlue = ptry2->m_iBlue - ptry1->m_iBlue; + bool boolValidBlueDelta = iDBlue <= 3 && iDBlue >= -4; + + if (boolValidRedDelta && boolValidGreenDelta && boolValidBlueDelta) + { + float fError = ptry1->m_fError + ptry2->m_fError; + + if (fError < encodingTry.m_fError) + { + encodingTry.m_fError = fError; + + ptryBest1 = ptry1; + ptryBest2 = ptry2; + } + } + + } + } + assert(encodingTry.m_fError < FLT_MAX); + assert(ptryBest1 != nullptr); + assert(ptryBest2 != nullptr); + } + + if (encodingTry.m_fError < m_fError) + { + m_mode = MODE_ETC1; + m_boolDiff = true; + m_boolFlip = encodingTry.m_boolFlip; + m_frgbaColor1 = ColorFloatRGBA::ConvertFromRGB5((unsigned char)ptryBest1->m_iRed, (unsigned char)ptryBest1->m_iGreen, (unsigned char)ptryBest1->m_iBlue); + m_frgbaColor2 = ColorFloatRGBA::ConvertFromRGB5((unsigned char)ptryBest2->m_iRed, (unsigned char)ptryBest2->m_iGreen, (unsigned char)ptryBest2->m_iBlue); + m_uiCW1 = ptryBest1->m_uiCW; + m_uiCW2 = ptryBest2->m_uiCW; + + m_fError = 0.0f; + for (unsigned int uiPixelOrder = 0; uiPixelOrder < PIXELS / 2; uiPixelOrder++) + { + unsigned int uiPixel1 = pauiPixelMapping1[uiPixelOrder]; + unsigned int uiPixel2 = pauiPixelMapping2[uiPixelOrder]; + + unsigned int uiSelector1 = ptryBest1->m_auiSelectors[uiPixelOrder]; + unsigned int uiSelector2 = ptryBest2->m_auiSelectors[uiPixelOrder]; + + m_auiSelectors[uiPixel1] = uiSelector1; + m_auiSelectors[uiPixel2] = ptryBest2->m_auiSelectors[uiPixelOrder]; + + if (uiSelector1 == TRANSPARENT_SELECTOR) + { + m_afrgbaDecodedColors[uiPixel1] = ColorFloatRGBA(); + m_afDecodedAlphas[uiPixel1] = 0.0f; + } + else + { + float fDeltaRGB1 = s_aafCwOpaqueUnsetTable[m_uiCW1][uiSelector1]; + m_afrgbaDecodedColors[uiPixel1] = (m_frgbaColor1 + fDeltaRGB1).ClampRGB(); + m_afDecodedAlphas[uiPixel1] = 1.0f; + } + + if (uiSelector2 == TRANSPARENT_SELECTOR) + { + m_afrgbaDecodedColors[uiPixel2] = ColorFloatRGBA(); + m_afDecodedAlphas[uiPixel2] = 0.0f; + } + else + { + float fDeltaRGB2 = s_aafCwOpaqueUnsetTable[m_uiCW2][uiSelector2]; + m_afrgbaDecodedColors[uiPixel2] = (m_frgbaColor2 + fDeltaRGB2).ClampRGB(); + m_afDecodedAlphas[uiPixel2] = 1.0f; + } + + float fDeltaA1 = m_afDecodedAlphas[uiPixel1] - m_pafrgbaSource[uiPixel1].fA; + m_fError += fDeltaA1 * fDeltaA1; + float fDeltaA2 = m_afDecodedAlphas[uiPixel2] - m_pafrgbaSource[uiPixel2].fA; + m_fError += fDeltaA2 * fDeltaA2; + } + + m_fError1 = ptryBest1->m_fError; + m_fError2 = ptryBest2->m_fError; + m_boolSeverelyBentDifferentialColors = trys.m_boolSeverelyBentColors; + m_fError = m_fError1 + m_fError2; + + // sanity check + { + int iRed1 = m_frgbaColor1.IntRed(31.0f); + int iGreen1 = m_frgbaColor1.IntGreen(31.0f); + int iBlue1 = m_frgbaColor1.IntBlue(31.0f); + + int iRed2 = m_frgbaColor2.IntRed(31.0f); + int iGreen2 = m_frgbaColor2.IntGreen(31.0f); + int iBlue2 = m_frgbaColor2.IntBlue(31.0f); + + iDRed = iRed2 - iRed1; + iDGreen = iGreen2 - iGreen1; + iDBlue = iBlue2 - iBlue1; + + assert(iDRed >= -4 && iDRed < 4); + assert(iDGreen >= -4 && iDGreen < 4); + assert(iDBlue >= -4 && iDBlue < 4); + } + } + + } + + // ---------------------------------------------------------------------------------------------------- + // mostly copied from ETC1 + // differences: + // uses s_aafCwOpaqueUnsetTable + // color for selector set to 0,0,0,0 + // + void Block4x4Encoding_RGB8A1::TryDifferentialHalf(DifferentialTrys::Half *a_phalf) + { + + a_phalf->m_ptryBest = nullptr; + float fBestTryError = FLT_MAX; + + a_phalf->m_uiTrys = 0; + for (int iRed = a_phalf->m_iRed - (int)a_phalf->m_uiRadius; + iRed <= a_phalf->m_iRed + (int)a_phalf->m_uiRadius; + iRed++) + { + assert(iRed >= 0 && iRed <= 31); + + for (int iGreen = a_phalf->m_iGreen - (int)a_phalf->m_uiRadius; + iGreen <= a_phalf->m_iGreen + (int)a_phalf->m_uiRadius; + iGreen++) + { + assert(iGreen >= 0 && iGreen <= 31); + + for (int iBlue = a_phalf->m_iBlue - (int)a_phalf->m_uiRadius; + iBlue <= a_phalf->m_iBlue + (int)a_phalf->m_uiRadius; + iBlue++) + { + assert(iBlue >= 0 && iBlue <= 31); + + DifferentialTrys::Try *ptry = &a_phalf->m_atry[a_phalf->m_uiTrys]; + assert(ptry < &a_phalf->m_atry[DifferentialTrys::Half::MAX_TRYS]); + + ptry->m_iRed = iRed; + ptry->m_iGreen = iGreen; + ptry->m_iBlue = iBlue; + ptry->m_fError = FLT_MAX; + ColorFloatRGBA frgbaColor = ColorFloatRGBA::ConvertFromRGB5((unsigned char)iRed, (unsigned char)iGreen, (unsigned char)iBlue); + + // try each CW + for (unsigned int uiCW = 0; uiCW < CW_RANGES; uiCW++) + { + unsigned int auiPixelSelectors[PIXELS / 2]; + ColorFloatRGBA afrgbaDecodedColors[PIXELS / 2]; + float afPixelErrors[PIXELS / 2] = { FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, + FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX }; + + // pre-compute decoded pixels for each selector + ColorFloatRGBA afrgbaSelectors[SELECTORS]; + assert(SELECTORS == 4); + afrgbaSelectors[0] = (frgbaColor + s_aafCwOpaqueUnsetTable[uiCW][0]).ClampRGB(); + afrgbaSelectors[1] = (frgbaColor + s_aafCwOpaqueUnsetTable[uiCW][1]).ClampRGB(); + afrgbaSelectors[2] = ColorFloatRGBA(); + afrgbaSelectors[3] = (frgbaColor + s_aafCwOpaqueUnsetTable[uiCW][3]).ClampRGB(); + + for (unsigned int uiPixel = 0; uiPixel < 8; uiPixel++) + { + ColorFloatRGBA *pfrgbaSourcePixel = &m_pafrgbaSource[a_phalf->m_pauiPixelMapping[uiPixel]]; + ColorFloatRGBA frgbaDecodedPixel; + + for (unsigned int uiSelector = 0; uiSelector < SELECTORS; uiSelector++) + { + if (pfrgbaSourcePixel->fA < 0.5f) + { + uiSelector = TRANSPARENT_SELECTOR; + } + else if (uiSelector == TRANSPARENT_SELECTOR) + { + continue; + } + + frgbaDecodedPixel = afrgbaSelectors[uiSelector]; + + float fPixelError; + + fPixelError = CalcPixelError(frgbaDecodedPixel, m_afDecodedAlphas[a_phalf->m_pauiPixelMapping[uiPixel]], + *pfrgbaSourcePixel); + + if (fPixelError < afPixelErrors[uiPixel]) + { + auiPixelSelectors[uiPixel] = uiSelector; + afrgbaDecodedColors[uiPixel] = frgbaDecodedPixel; + afPixelErrors[uiPixel] = fPixelError; + } + + if (uiSelector == TRANSPARENT_SELECTOR) + { + break; + } + } + } + + // add up all pixel errors + float fCWError = 0.0f; + for (unsigned int uiPixel = 0; uiPixel < 8; uiPixel++) + { + fCWError += afPixelErrors[uiPixel]; + } + + // if best CW so far + if (fCWError < ptry->m_fError) + { + ptry->m_uiCW = uiCW; + for (unsigned int uiPixel = 0; uiPixel < 8; uiPixel++) + { + ptry->m_auiSelectors[uiPixel] = auiPixelSelectors[uiPixel]; + } + ptry->m_fError = fCWError; + } + + } + + if (ptry->m_fError < fBestTryError) + { + a_phalf->m_ptryBest = ptry; + fBestTryError = ptry->m_fError; + } + + assert(ptry->m_fError < FLT_MAX); + + a_phalf->m_uiTrys++; + } + } + } + + } + + // ---------------------------------------------------------------------------------------------------- + // try encoding in T mode + // save this encoding if it improves the error + // + // since pixels that use base color1 don't use the distance table, color1 and color2 can be twiddled independently + // better encoding can be found if TWIDDLE_RADIUS is set to 2, but it will be much slower + // + void Block4x4Encoding_RGB8A1::TryT(unsigned int a_uiRadius) + { + Block4x4Encoding_RGB8A1 encodingTry = *this; + + // init "try" + { + encodingTry.m_mode = MODE_T; + encodingTry.m_boolDiff = true; + encodingTry.m_boolFlip = false; + encodingTry.m_fError = FLT_MAX; + } + + int iColor1Red = m_frgbaOriginalColor1_TAndH.IntRed(15.0f); + int iColor1Green = m_frgbaOriginalColor1_TAndH.IntGreen(15.0f); + int iColor1Blue = m_frgbaOriginalColor1_TAndH.IntBlue(15.0f); + + int iMinRed1 = iColor1Red - (int)a_uiRadius; + if (iMinRed1 < 0) + { + iMinRed1 = 0; + } + int iMaxRed1 = iColor1Red + (int)a_uiRadius; + if (iMaxRed1 > 15) + { + iMinRed1 = 15; + } + + int iMinGreen1 = iColor1Green - (int)a_uiRadius; + if (iMinGreen1 < 0) + { + iMinGreen1 = 0; + } + int iMaxGreen1 = iColor1Green + (int)a_uiRadius; + if (iMaxGreen1 > 15) + { + iMinGreen1 = 15; + } + + int iMinBlue1 = iColor1Blue - (int)a_uiRadius; + if (iMinBlue1 < 0) + { + iMinBlue1 = 0; + } + int iMaxBlue1 = iColor1Blue + (int)a_uiRadius; + if (iMaxBlue1 > 15) + { + iMinBlue1 = 15; + } + + int iColor2Red = m_frgbaOriginalColor2_TAndH.IntRed(15.0f); + int iColor2Green = m_frgbaOriginalColor2_TAndH.IntGreen(15.0f); + int iColor2Blue = m_frgbaOriginalColor2_TAndH.IntBlue(15.0f); + + int iMinRed2 = iColor2Red - (int)a_uiRadius; + if (iMinRed2 < 0) + { + iMinRed2 = 0; + } + int iMaxRed2 = iColor2Red + (int)a_uiRadius; + if (iMaxRed2 > 15) + { + iMinRed2 = 15; + } + + int iMinGreen2 = iColor2Green - (int)a_uiRadius; + if (iMinGreen2 < 0) + { + iMinGreen2 = 0; + } + int iMaxGreen2 = iColor2Green + (int)a_uiRadius; + if (iMaxGreen2 > 15) + { + iMinGreen2 = 15; + } + + int iMinBlue2 = iColor2Blue - (int)a_uiRadius; + if (iMinBlue2 < 0) + { + iMinBlue2 = 0; + } + int iMaxBlue2 = iColor2Blue + (int)a_uiRadius; + if (iMaxBlue2 > 15) + { + iMinBlue2 = 15; + } + + for (unsigned int uiDistance = 0; uiDistance < TH_DISTANCES; uiDistance++) + { + encodingTry.m_uiCW1 = uiDistance; + + // twiddle m_frgbaOriginalColor2_TAndH + // twiddle color2 first, since it affects 3 selectors, while color1 only affects one selector + // + for (int iRed2 = iMinRed2; iRed2 <= iMaxRed2; iRed2++) + { + for (int iGreen2 = iMinGreen2; iGreen2 <= iMaxGreen2; iGreen2++) + { + for (int iBlue2 = iMinBlue2; iBlue2 <= iMaxBlue2; iBlue2++) + { + for (unsigned int uiBaseColorSwaps = 0; uiBaseColorSwaps < 2; uiBaseColorSwaps++) + { + if (uiBaseColorSwaps == 0) + { + encodingTry.m_frgbaColor1 = m_frgbaOriginalColor1_TAndH; + encodingTry.m_frgbaColor2 = ColorFloatRGBA::ConvertFromRGB4((unsigned char)iRed2, (unsigned char)iGreen2, (unsigned char)iBlue2); + } + else + { + encodingTry.m_frgbaColor1 = ColorFloatRGBA::ConvertFromRGB4((unsigned char)iRed2, (unsigned char)iGreen2, (unsigned char)iBlue2); + encodingTry.m_frgbaColor2 = m_frgbaOriginalColor1_TAndH; + } + + encodingTry.TryT_BestSelectorCombination(); + + if (encodingTry.m_fError < m_fError) + { + m_mode = encodingTry.m_mode; + m_boolDiff = encodingTry.m_boolDiff; + m_boolFlip = encodingTry.m_boolFlip; + + m_frgbaColor1 = encodingTry.m_frgbaColor1; + m_frgbaColor2 = encodingTry.m_frgbaColor2; + m_uiCW1 = encodingTry.m_uiCW1; + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + m_auiSelectors[uiPixel] = encodingTry.m_auiSelectors[uiPixel]; + m_afrgbaDecodedColors[uiPixel] = encodingTry.m_afrgbaDecodedColors[uiPixel]; + } + + m_fError = encodingTry.m_fError; + } + } + } + } + } + + // twiddle m_frgbaOriginalColor1_TAndH + for (int iRed1 = iMinRed1; iRed1 <= iMaxRed1; iRed1++) + { + for (int iGreen1 = iMinGreen1; iGreen1 <= iMaxGreen1; iGreen1++) + { + for (int iBlue1 = iMinBlue1; iBlue1 <= iMaxBlue1; iBlue1++) + { + for (unsigned int uiBaseColorSwaps = 0; uiBaseColorSwaps < 2; uiBaseColorSwaps++) + { + if (uiBaseColorSwaps == 0) + { + encodingTry.m_frgbaColor1 = ColorFloatRGBA::ConvertFromRGB4((unsigned char)iRed1, (unsigned char)iGreen1, (unsigned char)iBlue1); + encodingTry.m_frgbaColor2 = m_frgbaOriginalColor2_TAndH; + } + else + { + encodingTry.m_frgbaColor1 = m_frgbaOriginalColor2_TAndH; + encodingTry.m_frgbaColor2 = ColorFloatRGBA::ConvertFromRGB4((unsigned char)iRed1, (unsigned char)iGreen1, (unsigned char)iBlue1); + } + + encodingTry.TryT_BestSelectorCombination(); + + if (encodingTry.m_fError < m_fError) + { + m_mode = encodingTry.m_mode; + m_boolDiff = encodingTry.m_boolDiff; + m_boolFlip = encodingTry.m_boolFlip; + + m_frgbaColor1 = encodingTry.m_frgbaColor1; + m_frgbaColor2 = encodingTry.m_frgbaColor2; + m_uiCW1 = encodingTry.m_uiCW1; + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + m_auiSelectors[uiPixel] = encodingTry.m_auiSelectors[uiPixel]; + m_afrgbaDecodedColors[uiPixel] = encodingTry.m_afrgbaDecodedColors[uiPixel]; + } + + m_fError = encodingTry.m_fError; + } + } + } + } + } + + } + + } + + // ---------------------------------------------------------------------------------------------------- + // find best selector combination for TryT + // called on an encodingTry + // + void Block4x4Encoding_RGB8A1::TryT_BestSelectorCombination(void) + { + + float fDistance = s_afTHDistanceTable[m_uiCW1]; + + unsigned int auiBestPixelSelectors[PIXELS]; + float afBestPixelErrors[PIXELS] = { FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, + FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX }; + ColorFloatRGBA afrgbaBestDecodedPixels[PIXELS]; + ColorFloatRGBA afrgbaDecodedPixel[SELECTORS]; + + assert(SELECTORS == 4); + afrgbaDecodedPixel[0] = m_frgbaColor1; + afrgbaDecodedPixel[1] = (m_frgbaColor2 + fDistance).ClampRGB(); + afrgbaDecodedPixel[2] = ColorFloatRGBA(); + afrgbaDecodedPixel[3] = (m_frgbaColor2 - fDistance).ClampRGB(); + + // try each selector + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + unsigned int uiMinSelector = 0; + unsigned int uiMaxSelector = SELECTORS - 1; + + if (m_pafrgbaSource[uiPixel].fA < 0.5f) + { + uiMinSelector = 2; + uiMaxSelector = 2; + } + + for (unsigned int uiSelector = uiMinSelector; uiSelector <= uiMaxSelector; uiSelector++) + { + float fPixelError = CalcPixelError(afrgbaDecodedPixel[uiSelector], m_afDecodedAlphas[uiPixel], + m_pafrgbaSource[uiPixel]); + + if (fPixelError < afBestPixelErrors[uiPixel]) + { + afBestPixelErrors[uiPixel] = fPixelError; + auiBestPixelSelectors[uiPixel] = uiSelector; + afrgbaBestDecodedPixels[uiPixel] = afrgbaDecodedPixel[uiSelector]; + } + } + } + + + // add up all of the pixel errors + float fBlockError = 0.0f; + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + fBlockError += afBestPixelErrors[uiPixel]; + } + + if (fBlockError < m_fError) + { + m_fError = fBlockError; + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + m_auiSelectors[uiPixel] = auiBestPixelSelectors[uiPixel]; + m_afrgbaDecodedColors[uiPixel] = afrgbaBestDecodedPixels[uiPixel]; + } + } + + } + + // ---------------------------------------------------------------------------------------------------- + // try encoding in H mode + // save this encoding if it improves the error + // + // since all pixels use the distance table, color1 and color2 can NOT be twiddled independently + // TWIDDLE_RADIUS of 2 is WAY too slow + // + void Block4x4Encoding_RGB8A1::TryH(unsigned int a_uiRadius) + { + Block4x4Encoding_RGB8A1 encodingTry = *this; + + // init "try" + { + encodingTry.m_mode = MODE_H; + encodingTry.m_boolDiff = true; + encodingTry.m_boolFlip = false; + encodingTry.m_fError = FLT_MAX; + } + + int iColor1Red = m_frgbaOriginalColor1_TAndH.IntRed(15.0f); + int iColor1Green = m_frgbaOriginalColor1_TAndH.IntGreen(15.0f); + int iColor1Blue = m_frgbaOriginalColor1_TAndH.IntBlue(15.0f); + + int iMinRed1 = iColor1Red - (int)a_uiRadius; + if (iMinRed1 < 0) + { + iMinRed1 = 0; + } + int iMaxRed1 = iColor1Red + (int)a_uiRadius; + if (iMaxRed1 > 15) + { + iMinRed1 = 15; + } + + int iMinGreen1 = iColor1Green - (int)a_uiRadius; + if (iMinGreen1 < 0) + { + iMinGreen1 = 0; + } + int iMaxGreen1 = iColor1Green + (int)a_uiRadius; + if (iMaxGreen1 > 15) + { + iMinGreen1 = 15; + } + + int iMinBlue1 = iColor1Blue - (int)a_uiRadius; + if (iMinBlue1 < 0) + { + iMinBlue1 = 0; + } + int iMaxBlue1 = iColor1Blue + (int)a_uiRadius; + if (iMaxBlue1 > 15) + { + iMinBlue1 = 15; + } + + int iColor2Red = m_frgbaOriginalColor2_TAndH.IntRed(15.0f); + int iColor2Green = m_frgbaOriginalColor2_TAndH.IntGreen(15.0f); + int iColor2Blue = m_frgbaOriginalColor2_TAndH.IntBlue(15.0f); + + int iMinRed2 = iColor2Red - (int)a_uiRadius; + if (iMinRed2 < 0) + { + iMinRed2 = 0; + } + int iMaxRed2 = iColor2Red + (int)a_uiRadius; + if (iMaxRed2 > 15) + { + iMinRed2 = 15; + } + + int iMinGreen2 = iColor2Green - (int)a_uiRadius; + if (iMinGreen2 < 0) + { + iMinGreen2 = 0; + } + int iMaxGreen2 = iColor2Green + (int)a_uiRadius; + if (iMaxGreen2 > 15) + { + iMinGreen2 = 15; + } + + int iMinBlue2 = iColor2Blue - (int)a_uiRadius; + if (iMinBlue2 < 0) + { + iMinBlue2 = 0; + } + int iMaxBlue2 = iColor2Blue + (int)a_uiRadius; + if (iMaxBlue2 > 15) + { + iMinBlue2 = 15; + } + + for (unsigned int uiDistance = 0; uiDistance < TH_DISTANCES; uiDistance++) + { + encodingTry.m_uiCW1 = uiDistance; + + // twiddle m_frgbaOriginalColor1_TAndH + for (int iRed1 = iMinRed1; iRed1 <= iMaxRed1; iRed1++) + { + for (int iGreen1 = iMinGreen1; iGreen1 <= iMaxGreen1; iGreen1++) + { + for (int iBlue1 = iMinBlue1; iBlue1 <= iMaxBlue1; iBlue1++) + { + encodingTry.m_frgbaColor1 = ColorFloatRGBA::ConvertFromRGB4((unsigned char)iRed1, (unsigned char)iGreen1, (unsigned char)iBlue1); + encodingTry.m_frgbaColor2 = m_frgbaOriginalColor2_TAndH; + + // if color1 == color2, H encoding issues can pop up, so abort + if (iRed1 == iColor2Red && iGreen1 == iColor2Green && iBlue1 == iColor2Blue) + { + continue; + } + + encodingTry.TryH_BestSelectorCombination(); + + if (encodingTry.m_fError < m_fError) + { + m_mode = encodingTry.m_mode; + m_boolDiff = encodingTry.m_boolDiff; + m_boolFlip = encodingTry.m_boolFlip; + + m_frgbaColor1 = encodingTry.m_frgbaColor1; + m_frgbaColor2 = encodingTry.m_frgbaColor2; + m_uiCW1 = encodingTry.m_uiCW1; + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + m_auiSelectors[uiPixel] = encodingTry.m_auiSelectors[uiPixel]; + m_afrgbaDecodedColors[uiPixel] = encodingTry.m_afrgbaDecodedColors[uiPixel]; + } + + m_fError = encodingTry.m_fError; + } + } + } + } + + // twiddle m_frgbaOriginalColor2_TAndH + for (int iRed2 = iMinRed2; iRed2 <= iMaxRed2; iRed2++) + { + for (int iGreen2 = iMinGreen2; iGreen2 <= iMaxGreen2; iGreen2++) + { + for (int iBlue2 = iMinBlue2; iBlue2 <= iMaxBlue2; iBlue2++) + { + encodingTry.m_frgbaColor1 = m_frgbaOriginalColor1_TAndH; + encodingTry.m_frgbaColor2 = ColorFloatRGBA::ConvertFromRGB4((unsigned char)iRed2, (unsigned char)iGreen2, (unsigned char)iBlue2); + + // if color1 == color2, H encoding issues can pop up, so abort + if (iRed2 == iColor1Red && iGreen2 == iColor1Green && iBlue2 == iColor1Blue) + { + continue; + } + + encodingTry.TryH_BestSelectorCombination(); + + if (encodingTry.m_fError < m_fError) + { + m_mode = encodingTry.m_mode; + m_boolDiff = encodingTry.m_boolDiff; + m_boolFlip = encodingTry.m_boolFlip; + + m_frgbaColor1 = encodingTry.m_frgbaColor1; + m_frgbaColor2 = encodingTry.m_frgbaColor2; + m_uiCW1 = encodingTry.m_uiCW1; + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + m_auiSelectors[uiPixel] = encodingTry.m_auiSelectors[uiPixel]; + m_afrgbaDecodedColors[uiPixel] = encodingTry.m_afrgbaDecodedColors[uiPixel]; + } + + m_fError = encodingTry.m_fError; + } + } + } + } + + } + + } + + // ---------------------------------------------------------------------------------------------------- + // find best selector combination for TryH + // called on an encodingTry + // + void Block4x4Encoding_RGB8A1::TryH_BestSelectorCombination(void) + { + + // abort if colors and CW will pose an encoding problem + { + unsigned int uiRed1 = (unsigned int)m_frgbaColor1.IntRed(255.0f); + unsigned int uiGreen1 = (unsigned int)m_frgbaColor1.IntGreen(255.0f); + unsigned int uiBlue1 = (unsigned int)m_frgbaColor1.IntBlue(255.0f); + unsigned int uiColorValue1 = (uiRed1 << 16) + (uiGreen1 << 8) + uiBlue1; + + unsigned int uiRed2 = (unsigned int)m_frgbaColor2.IntRed(255.0f); + unsigned int uiGreen2 = (unsigned int)m_frgbaColor2.IntGreen(255.0f); + unsigned int uiBlue2 = (unsigned int)m_frgbaColor2.IntBlue(255.0f); + unsigned int uiColorValue2 = (uiRed2 << 16) + (uiGreen2 << 8) + uiBlue2; + + unsigned int uiCWLsb = m_uiCW1 & 1; + + if ((uiColorValue1 >= (uiColorValue2 & uiCWLsb)) == 0 || + (uiColorValue1 < (uiColorValue2 & uiCWLsb)) == 1) + { + return; + } + } + + float fDistance = s_afTHDistanceTable[m_uiCW1]; + + unsigned int auiBestPixelSelectors[PIXELS]; + float afBestPixelErrors[PIXELS] = { FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, + FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX }; + ColorFloatRGBA afrgbaBestDecodedPixels[PIXELS]; + ColorFloatRGBA afrgbaDecodedPixel[SELECTORS]; + + assert(SELECTORS == 4); + afrgbaDecodedPixel[0] = (m_frgbaColor1 + fDistance).ClampRGB(); + afrgbaDecodedPixel[1] = (m_frgbaColor1 - fDistance).ClampRGB(); + afrgbaDecodedPixel[2] = ColorFloatRGBA();; + afrgbaDecodedPixel[3] = (m_frgbaColor2 - fDistance).ClampRGB(); + + + // try each selector + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + unsigned int uiMinSelector = 0; + unsigned int uiMaxSelector = SELECTORS - 1; + + if (m_pafrgbaSource[uiPixel].fA < 0.5f) + { + uiMinSelector = 2; + uiMaxSelector = 2; + } + + for (unsigned int uiSelector = uiMinSelector; uiSelector <= uiMaxSelector; uiSelector++) + { + float fPixelError = CalcPixelError(afrgbaDecodedPixel[uiSelector], m_afDecodedAlphas[uiPixel], + m_pafrgbaSource[uiPixel]); + + if (fPixelError < afBestPixelErrors[uiPixel]) + { + afBestPixelErrors[uiPixel] = fPixelError; + auiBestPixelSelectors[uiPixel] = uiSelector; + afrgbaBestDecodedPixels[uiPixel] = afrgbaDecodedPixel[uiSelector]; + } + } + } + + + // add up all of the pixel errors + float fBlockError = 0.0f; + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + fBlockError += afBestPixelErrors[uiPixel]; + } + + if (fBlockError < m_fError) + { + m_fError = fBlockError; + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + m_auiSelectors[uiPixel] = auiBestPixelSelectors[uiPixel]; + m_afrgbaDecodedColors[uiPixel] = afrgbaBestDecodedPixels[uiPixel]; + } + } + + } + + // ---------------------------------------------------------------------------------------------------- + // try version 1 of the degenerate search + // degenerate encodings use basecolor movement and a subset of the selectors to find useful encodings + // each subsequent version of the degenerate search uses more basecolor movement and is less likely to + // be successfull + // + void Block4x4Encoding_RGB8A1::TryDegenerates1(void) + { + + TryDifferential(m_boolMostLikelyFlip, 1, -2, 0); + TryDifferential(m_boolMostLikelyFlip, 1, 2, 0); + TryDifferential(m_boolMostLikelyFlip, 1, 0, 2); + TryDifferential(m_boolMostLikelyFlip, 1, 0, -2); + + } + + // ---------------------------------------------------------------------------------------------------- + // try version 2 of the degenerate search + // degenerate encodings use basecolor movement and a subset of the selectors to find useful encodings + // each subsequent version of the degenerate search uses more basecolor movement and is less likely to + // be successfull + // + void Block4x4Encoding_RGB8A1::TryDegenerates2(void) + { + + TryDifferential(!m_boolMostLikelyFlip, 1, -2, 0); + TryDifferential(!m_boolMostLikelyFlip, 1, 2, 0); + TryDifferential(!m_boolMostLikelyFlip, 1, 0, 2); + TryDifferential(!m_boolMostLikelyFlip, 1, 0, -2); + + } + + // ---------------------------------------------------------------------------------------------------- + // try version 3 of the degenerate search + // degenerate encodings use basecolor movement and a subset of the selectors to find useful encodings + // each subsequent version of the degenerate search uses more basecolor movement and is less likely to + // be successfull + // + void Block4x4Encoding_RGB8A1::TryDegenerates3(void) + { + + TryDifferential(m_boolMostLikelyFlip, 1, -2, -2); + TryDifferential(m_boolMostLikelyFlip, 1, -2, 2); + TryDifferential(m_boolMostLikelyFlip, 1, 2, -2); + TryDifferential(m_boolMostLikelyFlip, 1, 2, 2); + + } + + // ---------------------------------------------------------------------------------------------------- + // try version 4 of the degenerate search + // degenerate encodings use basecolor movement and a subset of the selectors to find useful encodings + // each subsequent version of the degenerate search uses more basecolor movement and is less likely to + // be successfull + // + void Block4x4Encoding_RGB8A1::TryDegenerates4(void) + { + + TryDifferential(m_boolMostLikelyFlip, 1, -4, 0); + TryDifferential(m_boolMostLikelyFlip, 1, 4, 0); + TryDifferential(m_boolMostLikelyFlip, 1, 0, 4); + TryDifferential(m_boolMostLikelyFlip, 1, 0, -4); + + } + + // ---------------------------------------------------------------------------------------------------- + // set the encoding bits based on encoding state + // + void Block4x4Encoding_RGB8A1::SetEncodingBits(void) + { + switch (m_mode) + { + case MODE_ETC1: + SetEncodingBits_ETC1(); + break; + + case MODE_T: + SetEncodingBits_T(); + break; + + case MODE_H: + SetEncodingBits_H(); + break; + + case MODE_PLANAR: + Block4x4Encoding_RGB8::SetEncodingBits_Planar(); + break; + + default: + assert(false); + } + } + + // ---------------------------------------------------------------------------------------------------- + // set the encoding bits based on encoding state if ETC1 mode + // + void Block4x4Encoding_RGB8A1::SetEncodingBits_ETC1(void) + { + + // there is no individual mode in RGB8A1 + assert(m_boolDiff); + + int iRed1 = m_frgbaColor1.IntRed(31.0f); + int iGreen1 = m_frgbaColor1.IntGreen(31.0f); + int iBlue1 = m_frgbaColor1.IntBlue(31.0f); + + int iRed2 = m_frgbaColor2.IntRed(31.0f); + int iGreen2 = m_frgbaColor2.IntGreen(31.0f); + int iBlue2 = m_frgbaColor2.IntBlue(31.0f); + + int iDRed2 = iRed2 - iRed1; + int iDGreen2 = iGreen2 - iGreen1; + int iDBlue2 = iBlue2 - iBlue1; + + assert(iDRed2 >= -4 && iDRed2 < 4); + assert(iDGreen2 >= -4 && iDGreen2 < 4); + assert(iDBlue2 >= -4 && iDBlue2 < 4); + + m_pencodingbitsRGB8->differential.red1 = iRed1; + m_pencodingbitsRGB8->differential.green1 = iGreen1; + m_pencodingbitsRGB8->differential.blue1 = iBlue1; + + m_pencodingbitsRGB8->differential.dred2 = iDRed2; + m_pencodingbitsRGB8->differential.dgreen2 = iDGreen2; + m_pencodingbitsRGB8->differential.dblue2 = iDBlue2; + + m_pencodingbitsRGB8->individual.cw1 = m_uiCW1; + m_pencodingbitsRGB8->individual.cw2 = m_uiCW2; + + SetEncodingBits_Selectors(); + + // in RGB8A1 encoding bits, opaque replaces differential + m_pencodingbitsRGB8->differential.diff = !m_boolPunchThroughPixels; + + m_pencodingbitsRGB8->individual.flip = m_boolFlip; + + } + + // ---------------------------------------------------------------------------------------------------- + // set the encoding bits based on encoding state if T mode + // + void Block4x4Encoding_RGB8A1::SetEncodingBits_T(void) + { + static const bool SANITY_CHECK = true; + + assert(m_mode == MODE_T); + assert(m_boolDiff == true); + + unsigned int uiRed1 = (unsigned int)m_frgbaColor1.IntRed(15.0f); + unsigned int uiGreen1 = (unsigned int)m_frgbaColor1.IntGreen(15.0f); + unsigned int uiBlue1 = (unsigned int)m_frgbaColor1.IntBlue(15.0f); + + unsigned int uiRed2 = (unsigned int)m_frgbaColor2.IntRed(15.0f); + unsigned int uiGreen2 = (unsigned int)m_frgbaColor2.IntGreen(15.0f); + unsigned int uiBlue2 = (unsigned int)m_frgbaColor2.IntBlue(15.0f); + + m_pencodingbitsRGB8->t.red1a = uiRed1 >> 2; + m_pencodingbitsRGB8->t.red1b = uiRed1; + m_pencodingbitsRGB8->t.green1 = uiGreen1; + m_pencodingbitsRGB8->t.blue1 = uiBlue1; + + m_pencodingbitsRGB8->t.red2 = uiRed2; + m_pencodingbitsRGB8->t.green2 = uiGreen2; + m_pencodingbitsRGB8->t.blue2 = uiBlue2; + + m_pencodingbitsRGB8->t.da = m_uiCW1 >> 1; + m_pencodingbitsRGB8->t.db = m_uiCW1; + + // in RGB8A1 encoding bits, opaque replaces differential + m_pencodingbitsRGB8->differential.diff = !m_boolPunchThroughPixels; + + Block4x4Encoding_ETC1::SetEncodingBits_Selectors(); + + // create an invalid R differential to trigger T mode + m_pencodingbitsRGB8->t.detect1 = 0; + m_pencodingbitsRGB8->t.detect2 = 0; + int iRed2 = (int)m_pencodingbitsRGB8->differential.red1 + (int)m_pencodingbitsRGB8->differential.dred2; + if (iRed2 >= 4) + { + m_pencodingbitsRGB8->t.detect1 = 7; + m_pencodingbitsRGB8->t.detect2 = 0; + } + else + { + m_pencodingbitsRGB8->t.detect1 = 0; + m_pencodingbitsRGB8->t.detect2 = 1; + } + + if (SANITY_CHECK) + { + iRed2 = (int)m_pencodingbitsRGB8->differential.red1 + (int)m_pencodingbitsRGB8->differential.dred2; + + // make sure red overflows + assert(iRed2 < 0 || iRed2 > 31); + } + + } + + // ---------------------------------------------------------------------------------------------------- + // set the encoding bits based on encoding state if H mode + // + // colors and selectors may need to swap in order to generate lsb of distance index + // + void Block4x4Encoding_RGB8A1::SetEncodingBits_H(void) + { + static const bool SANITY_CHECK = true; + + assert(m_mode == MODE_H); + assert(m_boolDiff == true); + + unsigned int uiRed1 = (unsigned int)m_frgbaColor1.IntRed(15.0f); + unsigned int uiGreen1 = (unsigned int)m_frgbaColor1.IntGreen(15.0f); + unsigned int uiBlue1 = (unsigned int)m_frgbaColor1.IntBlue(15.0f); + + unsigned int uiRed2 = (unsigned int)m_frgbaColor2.IntRed(15.0f); + unsigned int uiGreen2 = (unsigned int)m_frgbaColor2.IntGreen(15.0f); + unsigned int uiBlue2 = (unsigned int)m_frgbaColor2.IntBlue(15.0f); + + unsigned int uiColor1 = (uiRed1 << 16) + (uiGreen1 << 8) + uiBlue1; + unsigned int uiColor2 = (uiRed2 << 16) + (uiGreen2 << 8) + uiBlue2; + + bool boolOddDistance = m_uiCW1 & 1; + bool boolSwapColors = (uiColor1 < uiColor2) ^ !boolOddDistance; + + if (boolSwapColors) + { + m_pencodingbitsRGB8->h.red1 = uiRed2; + m_pencodingbitsRGB8->h.green1a = uiGreen2 >> 1; + m_pencodingbitsRGB8->h.green1b = uiGreen2; + m_pencodingbitsRGB8->h.blue1a = uiBlue2 >> 3; + m_pencodingbitsRGB8->h.blue1b = uiBlue2 >> 1; + m_pencodingbitsRGB8->h.blue1c = uiBlue2; + + m_pencodingbitsRGB8->h.red2 = uiRed1; + m_pencodingbitsRGB8->h.green2a = uiGreen1 >> 1; + m_pencodingbitsRGB8->h.green2b = uiGreen1; + m_pencodingbitsRGB8->h.blue2 = uiBlue1; + + m_pencodingbitsRGB8->h.da = m_uiCW1 >> 2; + m_pencodingbitsRGB8->h.db = m_uiCW1 >> 1; + } + else + { + m_pencodingbitsRGB8->h.red1 = uiRed1; + m_pencodingbitsRGB8->h.green1a = uiGreen1 >> 1; + m_pencodingbitsRGB8->h.green1b = uiGreen1; + m_pencodingbitsRGB8->h.blue1a = uiBlue1 >> 3; + m_pencodingbitsRGB8->h.blue1b = uiBlue1 >> 1; + m_pencodingbitsRGB8->h.blue1c = uiBlue1; + + m_pencodingbitsRGB8->h.red2 = uiRed2; + m_pencodingbitsRGB8->h.green2a = uiGreen2 >> 1; + m_pencodingbitsRGB8->h.green2b = uiGreen2; + m_pencodingbitsRGB8->h.blue2 = uiBlue2; + + m_pencodingbitsRGB8->h.da = m_uiCW1 >> 2; + m_pencodingbitsRGB8->h.db = m_uiCW1 >> 1; + } + + // in RGB8A1 encoding bits, opaque replaces differential + m_pencodingbitsRGB8->differential.diff = !m_boolPunchThroughPixels; + + Block4x4Encoding_ETC1::SetEncodingBits_Selectors(); + + if (boolSwapColors) + { + m_pencodingbitsRGB8->h.selectors ^= 0x0000FFFF; + } + + // create an invalid R differential to trigger T mode + m_pencodingbitsRGB8->h.detect1 = 0; + m_pencodingbitsRGB8->h.detect2 = 0; + m_pencodingbitsRGB8->h.detect3 = 0; + int iRed2 = (int)m_pencodingbitsRGB8->differential.red1 + (int)m_pencodingbitsRGB8->differential.dred2; + int iGreen2 = (int)m_pencodingbitsRGB8->differential.green1 + (int)m_pencodingbitsRGB8->differential.dgreen2; + if (iRed2 < 0 || iRed2 > 31) + { + m_pencodingbitsRGB8->h.detect1 = 1; + } + if (iGreen2 >= 4) + { + m_pencodingbitsRGB8->h.detect2 = 7; + m_pencodingbitsRGB8->h.detect3 = 0; + } + else + { + m_pencodingbitsRGB8->h.detect2 = 0; + m_pencodingbitsRGB8->h.detect3 = 1; + } + + if (SANITY_CHECK) + { + iRed2 = (int)m_pencodingbitsRGB8->differential.red1 + (int)m_pencodingbitsRGB8->differential.dred2; + iGreen2 = (int)m_pencodingbitsRGB8->differential.green1 + (int)m_pencodingbitsRGB8->differential.dgreen2; + + // make sure red doesn't overflow and green does + assert(iRed2 >= 0 && iRed2 <= 31); + assert(iGreen2 < 0 || iGreen2 > 31); + } + + } + + // #################################################################################################### + // Block4x4Encoding_RGB8A1_Opaque + // #################################################################################################### + + // ---------------------------------------------------------------------------------------------------- + // perform a single encoding iteration + // replace the encoding if a better encoding was found + // subsequent iterations generally take longer for each iteration + // set m_boolDone if encoding is perfect or encoding is finished based on a_fEffort + // + void Block4x4Encoding_RGB8A1_Opaque::PerformIteration(float a_fEffort) + { + assert(!m_boolPunchThroughPixels); + assert(!m_boolTransparent); + assert(!m_boolDone); + + switch (m_uiEncodingIterations) + { + case 0: + PerformFirstIteration(); + break; + + case 1: + Block4x4Encoding_ETC1::TryDifferential(m_boolMostLikelyFlip, 1, 0, 0); + break; + + case 2: + Block4x4Encoding_ETC1::TryDifferential(!m_boolMostLikelyFlip, 1, 0, 0); + break; + + case 3: + Block4x4Encoding_RGB8::TryPlanar(1); + break; + + case 4: + Block4x4Encoding_RGB8::TryTAndH(1); + if (a_fEffort <= 49.5f) + { + m_boolDone = true; + } + break; + + case 5: + Block4x4Encoding_ETC1::TryDegenerates1(); + if (a_fEffort <= 59.5f) + { + m_boolDone = true; + } + break; + + case 6: + Block4x4Encoding_ETC1::TryDegenerates2(); + if (a_fEffort <= 69.5f) + { + m_boolDone = true; + } + break; + + case 7: + Block4x4Encoding_ETC1::TryDegenerates3(); + if (a_fEffort <= 79.5f) + { + m_boolDone = true; + } + break; + + case 8: + Block4x4Encoding_ETC1::TryDegenerates4(); + m_boolDone = true; + break; + + default: + assert(0); + break; + } + + m_uiEncodingIterations++; + SetDoneIfPerfect(); + } + + // ---------------------------------------------------------------------------------------------------- + // find best initial encoding to ensure block has a valid encoding + // + void Block4x4Encoding_RGB8A1_Opaque::PerformFirstIteration(void) + { + + // set decoded alphas + // calculate alpha error + m_fError = 0.0f; + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + m_afDecodedAlphas[uiPixel] = 1.0f; + + float fDeltaA = 1.0f - m_pafrgbaSource[uiPixel].fA; + m_fError += fDeltaA * fDeltaA; + } + + CalculateMostLikelyFlip(); + + m_fError = FLT_MAX; + + Block4x4Encoding_ETC1::TryDifferential(m_boolMostLikelyFlip, 0, 0, 0); + SetDoneIfPerfect(); + if (m_boolDone) + { + return; + } + Block4x4Encoding_ETC1::TryDifferential(!m_boolMostLikelyFlip, 0, 0, 0); + SetDoneIfPerfect(); + if (m_boolDone) + { + return; + } + Block4x4Encoding_RGB8::TryPlanar(0); + SetDoneIfPerfect(); + if (m_boolDone) + { + return; + } + Block4x4Encoding_RGB8::TryTAndH(0); + SetDoneIfPerfect(); + } + + // #################################################################################################### + // Block4x4Encoding_RGB8A1_Transparent + // #################################################################################################### + + // ---------------------------------------------------------------------------------------------------- + // perform a single encoding iteration + // replace the encoding if a better encoding was found + // subsequent iterations generally take longer for each iteration + // set m_boolDone if encoding is perfect or encoding is finished based on a_fEffort + // + void Block4x4Encoding_RGB8A1_Transparent::PerformIteration(float ) + { + assert(!m_boolOpaque); + assert(m_boolTransparent); + assert(!m_boolDone); + assert(m_uiEncodingIterations == 0); + + m_mode = MODE_ETC1; + m_boolDiff = true; + m_boolFlip = false; + + m_uiCW1 = 0; + m_uiCW2 = 0; + + m_frgbaColor1 = ColorFloatRGBA(); + m_frgbaColor2 = ColorFloatRGBA(); + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + m_auiSelectors[uiPixel] = TRANSPARENT_SELECTOR; + + m_afrgbaDecodedColors[uiPixel] = ColorFloatRGBA(); + m_afDecodedAlphas[uiPixel] = 0.0f; + } + + CalcBlockError(); + + m_boolDone = true; + m_uiEncodingIterations++; + + } + + // ---------------------------------------------------------------------------------------------------- + // +} diff --git a/thirdparty/etc2comp/EtcBlock4x4Encoding_RGB8A1.h b/thirdparty/etc2comp/EtcBlock4x4Encoding_RGB8A1.h new file mode 100644 index 0000000000..ff26e462f8 --- /dev/null +++ b/thirdparty/etc2comp/EtcBlock4x4Encoding_RGB8A1.h @@ -0,0 +1,129 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "EtcBlock4x4Encoding_RGB8.h" +#include "EtcErrorMetric.h" +#include "EtcBlock4x4EncodingBits.h" + +namespace Etc +{ + + // ################################################################################ + // Block4x4Encoding_RGB8A1 + // RGB8A1 if not completely opaque or transparent + // ################################################################################ + + class Block4x4Encoding_RGB8A1 : public Block4x4Encoding_RGB8 + { + public: + + static const unsigned int TRANSPARENT_SELECTOR = 2; + + Block4x4Encoding_RGB8A1(void); + virtual ~Block4x4Encoding_RGB8A1(void); + + virtual void InitFromSource(Block4x4 *a_pblockParent, + ColorFloatRGBA *a_pafrgbaSource, + unsigned char *a_paucEncodingBits, + ErrorMetric a_errormetric); + + virtual void InitFromEncodingBits(Block4x4 *a_pblockParent, + unsigned char *a_paucEncodingBits, + ColorFloatRGBA *a_pafrgbaSource, + ErrorMetric a_errormetric); + + virtual void PerformIteration(float a_fEffort); + + virtual void SetEncodingBits(void); + + void InitFromEncodingBits_ETC1(Block4x4 *a_pblockParent, + unsigned char *a_paucEncodingBits, + ColorFloatRGBA *a_pafrgbaSource, + ErrorMetric a_errormetric); + + void InitFromEncodingBits_T(void); + void InitFromEncodingBits_H(void); + + void PerformFirstIteration(void); + + void Decode_ETC1(void); + void DecodePixels_T(void); + void DecodePixels_H(void); + void SetEncodingBits_ETC1(void); + void SetEncodingBits_T(void); + void SetEncodingBits_H(void); + + protected: + + bool m_boolOpaque; // all source pixels have alpha >= 0.5 + bool m_boolTransparent; // all source pixels have alpha < 0.5 + bool m_boolPunchThroughPixels; // some source pixels have alpha < 0.5 + + static float s_aafCwOpaqueUnsetTable[CW_RANGES][SELECTORS]; + + private: + + void TryDifferential(bool a_boolFlip, unsigned int a_uiRadius, + int a_iGrayOffset1, int a_iGrayOffset2); + void TryDifferentialHalf(DifferentialTrys::Half *a_phalf); + + void TryT(unsigned int a_uiRadius); + void TryT_BestSelectorCombination(void); + void TryH(unsigned int a_uiRadius); + void TryH_BestSelectorCombination(void); + + void TryDegenerates1(void); + void TryDegenerates2(void); + void TryDegenerates3(void); + void TryDegenerates4(void); + + }; + + // ################################################################################ + // Block4x4Encoding_RGB8A1_Opaque + // RGB8A1 if all pixels have alpha==1 + // ################################################################################ + + class Block4x4Encoding_RGB8A1_Opaque : public Block4x4Encoding_RGB8A1 + { + public: + + virtual void PerformIteration(float a_fEffort); + + void PerformFirstIteration(void); + + private: + + }; + + // ################################################################################ + // Block4x4Encoding_RGB8A1_Transparent + // RGB8A1 if all pixels have alpha==0 + // ################################################################################ + + class Block4x4Encoding_RGB8A1_Transparent : public Block4x4Encoding_RGB8A1 + { + public: + + virtual void PerformIteration(float a_fEffort); + + private: + + }; + +} // namespace Etc diff --git a/thirdparty/etc2comp/EtcBlock4x4Encoding_RGBA8.cpp b/thirdparty/etc2comp/EtcBlock4x4Encoding_RGBA8.cpp new file mode 100644 index 0000000000..600c7ab405 --- /dev/null +++ b/thirdparty/etc2comp/EtcBlock4x4Encoding_RGBA8.cpp @@ -0,0 +1,474 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* +EtcBlock4x4Encoding_RGBA8.cpp contains: + Block4x4Encoding_RGBA8 + Block4x4Encoding_RGBA8_Opaque + Block4x4Encoding_RGBA8_Transparent + +These encoders are used when targetting file format RGBA8. + +Block4x4Encoding_RGBA8_Opaque is used when all pixels in the 4x4 block are opaque +Block4x4Encoding_RGBA8_Transparent is used when all pixels in the 4x4 block are transparent +Block4x4Encoding_RGBA8 is used when there is a mixture of alphas in the 4x4 block + +*/ + +#include "EtcConfig.h" +#include "EtcBlock4x4Encoding_RGBA8.h" + +#include "EtcBlock4x4EncodingBits.h" +#include "EtcBlock4x4.h" + +#include <stdio.h> +#include <string.h> +#include <assert.h> +#include <float.h> +#include <limits> + +namespace Etc +{ + + // #################################################################################################### + // Block4x4Encoding_RGBA8 + // #################################################################################################### + + float Block4x4Encoding_RGBA8::s_aafModifierTable[MODIFIER_TABLE_ENTRYS][ALPHA_SELECTORS] + { + { -3.0f / 255.0f, -6.0f / 255.0f, -9.0f / 255.0f, -15.0f / 255.0f, 2.0f / 255.0f, 5.0f / 255.0f, 8.0f / 255.0f, 14.0f / 255.0f }, + { -3.0f / 255.0f, -7.0f / 255.0f, -10.0f / 255.0f, -13.0f / 255.0f, 2.0f / 255.0f, 6.0f / 255.0f, 9.0f / 255.0f, 12.0f / 255.0f }, + { -2.0f / 255.0f, -5.0f / 255.0f, -8.0f / 255.0f, -13.0f / 255.0f, 1.0f / 255.0f, 4.0f / 255.0f, 7.0f / 255.0f, 12.0f / 255.0f }, + { -2.0f / 255.0f, -4.0f / 255.0f, -6.0f / 255.0f, -13.0f / 255.0f, 1.0f / 255.0f, 3.0f / 255.0f, 5.0f / 255.0f, 12.0f / 255.0f }, + + { -3.0f / 255.0f, -6.0f / 255.0f, -8.0f / 255.0f, -12.0f / 255.0f, 2.0f / 255.0f, 5.0f / 255.0f, 7.0f / 255.0f, 11.0f / 255.0f }, + { -3.0f / 255.0f, -7.0f / 255.0f, -9.0f / 255.0f, -11.0f / 255.0f, 2.0f / 255.0f, 6.0f / 255.0f, 8.0f / 255.0f, 10.0f / 255.0f }, + { -4.0f / 255.0f, -7.0f / 255.0f, -8.0f / 255.0f, -11.0f / 255.0f, 3.0f / 255.0f, 6.0f / 255.0f, 7.0f / 255.0f, 10.0f / 255.0f }, + { -3.0f / 255.0f, -5.0f / 255.0f, -8.0f / 255.0f, -11.0f / 255.0f, 2.0f / 255.0f, 4.0f / 255.0f, 7.0f / 255.0f, 10.0f / 255.0f }, + + { -2.0f / 255.0f, -6.0f / 255.0f, -8.0f / 255.0f, -10.0f / 255.0f, 1.0f / 255.0f, 5.0f / 255.0f, 7.0f / 255.0f, 9.0f / 255.0f }, + { -2.0f / 255.0f, -5.0f / 255.0f, -8.0f / 255.0f, -10.0f / 255.0f, 1.0f / 255.0f, 4.0f / 255.0f, 7.0f / 255.0f, 9.0f / 255.0f }, + { -2.0f / 255.0f, -4.0f / 255.0f, -8.0f / 255.0f, -10.0f / 255.0f, 1.0f / 255.0f, 3.0f / 255.0f, 7.0f / 255.0f, 9.0f / 255.0f }, + { -2.0f / 255.0f, -5.0f / 255.0f, -7.0f / 255.0f, -10.0f / 255.0f, 1.0f / 255.0f, 4.0f / 255.0f, 6.0f / 255.0f, 9.0f / 255.0f }, + + { -3.0f / 255.0f, -4.0f / 255.0f, -7.0f / 255.0f, -10.0f / 255.0f, 2.0f / 255.0f, 3.0f / 255.0f, 6.0f / 255.0f, 9.0f / 255.0f }, + { -1.0f / 255.0f, -2.0f / 255.0f, -3.0f / 255.0f, -10.0f / 255.0f, 0.0f / 255.0f, 1.0f / 255.0f, 2.0f / 255.0f, 9.0f / 255.0f }, + { -4.0f / 255.0f, -6.0f / 255.0f, -8.0f / 255.0f, -9.0f / 255.0f, 3.0f / 255.0f, 5.0f / 255.0f, 7.0f / 255.0f, 8.0f / 255.0f }, + { -3.0f / 255.0f, -5.0f / 255.0f, -7.0f / 255.0f, -9.0f / 255.0f, 2.0f / 255.0f, 4.0f / 255.0f, 6.0f / 255.0f, 8.0f / 255.0f } + }; + + // ---------------------------------------------------------------------------------------------------- + // + Block4x4Encoding_RGBA8::Block4x4Encoding_RGBA8(void) + { + + m_pencodingbitsA8 = nullptr; + + } + Block4x4Encoding_RGBA8::~Block4x4Encoding_RGBA8(void) {} + // ---------------------------------------------------------------------------------------------------- + // initialization prior to encoding + // a_pblockParent points to the block associated with this encoding + // a_errormetric is used to choose the best encoding + // a_pafrgbaSource points to a 4x4 block subset of the source image + // a_paucEncodingBits points to the final encoding bits + // + void Block4x4Encoding_RGBA8::InitFromSource(Block4x4 *a_pblockParent, + ColorFloatRGBA *a_pafrgbaSource, + unsigned char *a_paucEncodingBits, ErrorMetric a_errormetric) + { + Block4x4Encoding::Init(a_pblockParent, a_pafrgbaSource,a_errormetric); + + m_pencodingbitsA8 = (Block4x4EncodingBits_A8 *)a_paucEncodingBits; + m_pencodingbitsRGB8 = (Block4x4EncodingBits_RGB8 *)(a_paucEncodingBits + sizeof(Block4x4EncodingBits_A8)); + + } + + // ---------------------------------------------------------------------------------------------------- + // initialization from the encoding bits of a previous encoding + // a_pblockParent points to the block associated with this encoding + // a_errormetric is used to choose the best encoding + // a_pafrgbaSource points to a 4x4 block subset of the source image + // a_paucEncodingBits points to the final encoding bits of a previous encoding + // + void Block4x4Encoding_RGBA8::InitFromEncodingBits(Block4x4 *a_pblockParent, + unsigned char *a_paucEncodingBits, + ColorFloatRGBA *a_pafrgbaSource, + ErrorMetric a_errormetric) + { + + m_pencodingbitsA8 = (Block4x4EncodingBits_A8 *)a_paucEncodingBits; + m_pencodingbitsRGB8 = (Block4x4EncodingBits_RGB8 *)(a_paucEncodingBits + sizeof(Block4x4EncodingBits_A8)); + + // init RGB portion + Block4x4Encoding_RGB8::InitFromEncodingBits(a_pblockParent, + (unsigned char *) m_pencodingbitsRGB8, + a_pafrgbaSource, + a_errormetric); + + // init A8 portion + // has to be done after InitFromEncodingBits() + { + m_fBase = m_pencodingbitsA8->data.base / 255.0f; + m_fMultiplier = (float)m_pencodingbitsA8->data.multiplier; + m_uiModifierTableIndex = m_pencodingbitsA8->data.table; + + unsigned long long int ulliSelectorBits = 0; + ulliSelectorBits |= (unsigned long long int)m_pencodingbitsA8->data.selectors0 << 40; + ulliSelectorBits |= (unsigned long long int)m_pencodingbitsA8->data.selectors1 << 32; + ulliSelectorBits |= (unsigned long long int)m_pencodingbitsA8->data.selectors2 << 24; + ulliSelectorBits |= (unsigned long long int)m_pencodingbitsA8->data.selectors3 << 16; + ulliSelectorBits |= (unsigned long long int)m_pencodingbitsA8->data.selectors4 << 8; + ulliSelectorBits |= (unsigned long long int)m_pencodingbitsA8->data.selectors5; + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + unsigned int uiShift = 45 - (3 * uiPixel); + m_auiAlphaSelectors[uiPixel] = (ulliSelectorBits >> uiShift) & (ALPHA_SELECTORS - 1); + } + + // decode the alphas + // calc alpha error + m_fError = 0.0f; + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + m_afDecodedAlphas[uiPixel] = DecodePixelAlpha(m_fBase, m_fMultiplier, + m_uiModifierTableIndex, + m_auiAlphaSelectors[uiPixel]); + + float fDeltaAlpha = m_afDecodedAlphas[uiPixel] - m_pafrgbaSource[uiPixel].fA; + m_fError += fDeltaAlpha * fDeltaAlpha; + } + } + + // redo error calc to include alpha + CalcBlockError(); + + } + + // ---------------------------------------------------------------------------------------------------- + // perform a single encoding iteration + // replace the encoding if a better encoding was found + // subsequent iterations generally take longer for each iteration + // set m_boolDone if encoding is perfect or encoding is finished based on a_fEffort + // + // similar to Block4x4Encoding_RGB8_Base::Encode_RGB8(), but with alpha added + // + void Block4x4Encoding_RGBA8::PerformIteration(float a_fEffort) + { + assert(!m_boolDone); + + if (m_uiEncodingIterations == 0) + { + if (a_fEffort < 24.9f) + { + CalculateA8(0.0f); + } + else if (a_fEffort < 49.9f) + { + CalculateA8(1.0f); + } + else + { + CalculateA8(2.0f); + } + } + + Block4x4Encoding_RGB8::PerformIteration(a_fEffort); + + } + + // ---------------------------------------------------------------------------------------------------- + // find the best combination of base alpga, multiplier and selectors + // + // a_fRadius limits the range of base alpha to try + // + void Block4x4Encoding_RGBA8::CalculateA8(float a_fRadius) + { + + // find min/max alpha + float fMinAlpha = 1.0f; + float fMaxAlpha = 0.0f; + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + float fAlpha = m_pafrgbaSource[uiPixel].fA; + + // ignore border pixels + if (isnan(fAlpha)) + { + continue; + } + + if (fAlpha < fMinAlpha) + { + fMinAlpha = fAlpha; + } + if (fAlpha > fMaxAlpha) + { + fMaxAlpha = fAlpha; + } + } + assert(fMinAlpha <= fMaxAlpha); + + float fAlphaRange = fMaxAlpha - fMinAlpha; + + // try each modifier table entry + m_fError = FLT_MAX; // artificially high value + for (unsigned int uiTableEntry = 0; uiTableEntry < MODIFIER_TABLE_ENTRYS; uiTableEntry++) + { + static const unsigned int MIN_VALUE_SELECTOR = 3; + static const unsigned int MAX_VALUE_SELECTOR = 7; + + float fTableEntryCenter = -s_aafModifierTable[uiTableEntry][MIN_VALUE_SELECTOR]; + + float fTableEntryRange = s_aafModifierTable[uiTableEntry][MAX_VALUE_SELECTOR] - + s_aafModifierTable[uiTableEntry][MIN_VALUE_SELECTOR]; + + float fCenterRatio = fTableEntryCenter / fTableEntryRange; + + float fCenter = fMinAlpha + fCenterRatio*fAlphaRange; + fCenter = roundf(255.0f * fCenter) / 255.0f; + + float fMinBase = fCenter - (a_fRadius / 255.0f); + if (fMinBase < 0.0f) + { + fMinBase = 0.0f; + } + + float fMaxBase = fCenter + (a_fRadius / 255.0f); + if (fMaxBase > 1.0f) + { + fMaxBase = 1.0f; + } + + for (float fBase = fMinBase; fBase <= fMaxBase; fBase += (0.999999f / 255.0f)) + { + + float fRangeMultiplier = roundf(fAlphaRange / fTableEntryRange); + + float fMinMultiplier = fRangeMultiplier - a_fRadius; + if (fMinMultiplier < 1.0f) + { + fMinMultiplier = 1.0f; + } + else if (fMinMultiplier > 15.0f) + { + fMinMultiplier = 15.0f; + } + + float fMaxMultiplier = fRangeMultiplier + a_fRadius; + if (fMaxMultiplier < 1.0f) + { + fMaxMultiplier = 1.0f; + } + else if (fMaxMultiplier > 15.0f) + { + fMaxMultiplier = 15.0f; + } + + for (float fMultiplier = fMinMultiplier; fMultiplier <= fMaxMultiplier; fMultiplier += 1.0f) + { + // find best selector for each pixel + unsigned int auiBestSelectors[PIXELS]; + float afBestAlphaError[PIXELS]; + float afBestDecodedAlphas[PIXELS]; + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + float fBestPixelAlphaError = FLT_MAX; + for (unsigned int uiSelector = 0; uiSelector < ALPHA_SELECTORS; uiSelector++) + { + float fDecodedAlpha = DecodePixelAlpha(fBase, fMultiplier, uiTableEntry, uiSelector); + + // border pixels (NAN) should have zero error + float fPixelDeltaAlpha = isnan(m_pafrgbaSource[uiPixel].fA) ? + 0.0f : + fDecodedAlpha - m_pafrgbaSource[uiPixel].fA; + + float fPixelAlphaError = fPixelDeltaAlpha * fPixelDeltaAlpha; + + if (fPixelAlphaError < fBestPixelAlphaError) + { + fBestPixelAlphaError = fPixelAlphaError; + auiBestSelectors[uiPixel] = uiSelector; + afBestAlphaError[uiPixel] = fBestPixelAlphaError; + afBestDecodedAlphas[uiPixel] = fDecodedAlpha; + } + } + } + + float fBlockError = 0.0f; + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + fBlockError += afBestAlphaError[uiPixel]; + } + + if (fBlockError < m_fError) + { + m_fError = fBlockError; + + m_fBase = fBase; + m_fMultiplier = fMultiplier; + m_uiModifierTableIndex = uiTableEntry; + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + m_auiAlphaSelectors[uiPixel] = auiBestSelectors[uiPixel]; + m_afDecodedAlphas[uiPixel] = afBestDecodedAlphas[uiPixel]; + } + } + } + } + + } + + } + + // ---------------------------------------------------------------------------------------------------- + // set the encoding bits based on encoding state + // + void Block4x4Encoding_RGBA8::SetEncodingBits(void) + { + + // set the RGB8 portion + Block4x4Encoding_RGB8::SetEncodingBits(); + + // set the A8 portion + { + m_pencodingbitsA8->data.base = (unsigned char)roundf(255.0f * m_fBase); + m_pencodingbitsA8->data.table = m_uiModifierTableIndex; + m_pencodingbitsA8->data.multiplier = (unsigned char)roundf(m_fMultiplier); + + unsigned long long int ulliSelectorBits = 0; + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + unsigned int uiShift = 45 - (3 * uiPixel); + ulliSelectorBits |= ((unsigned long long int)m_auiAlphaSelectors[uiPixel]) << uiShift; + } + + m_pencodingbitsA8->data.selectors0 = ulliSelectorBits >> 40; + m_pencodingbitsA8->data.selectors1 = ulliSelectorBits >> 32; + m_pencodingbitsA8->data.selectors2 = ulliSelectorBits >> 24; + m_pencodingbitsA8->data.selectors3 = ulliSelectorBits >> 16; + m_pencodingbitsA8->data.selectors4 = ulliSelectorBits >> 8; + m_pencodingbitsA8->data.selectors5 = ulliSelectorBits; + } + + } + + // #################################################################################################### + // Block4x4Encoding_RGBA8_Opaque + // #################################################################################################### + + // ---------------------------------------------------------------------------------------------------- + // perform a single encoding iteration + // replace the encoding if a better encoding was found + // subsequent iterations generally take longer for each iteration + // set m_boolDone if encoding is perfect or encoding is finished based on a_fEffort + // + void Block4x4Encoding_RGBA8_Opaque::PerformIteration(float a_fEffort) + { + assert(!m_boolDone); + + if (m_uiEncodingIterations == 0) + { + m_fError = 0.0f; + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + m_afDecodedAlphas[uiPixel] = 1.0f; + } + } + + Block4x4Encoding_RGB8::PerformIteration(a_fEffort); + + } + + // ---------------------------------------------------------------------------------------------------- + // set the encoding bits based on encoding state + // + void Block4x4Encoding_RGBA8_Opaque::SetEncodingBits(void) + { + + // set the RGB8 portion + Block4x4Encoding_RGB8::SetEncodingBits(); + + // set the A8 portion + m_pencodingbitsA8->data.base = 255; + m_pencodingbitsA8->data.table = 15; + m_pencodingbitsA8->data.multiplier = 15; + m_pencodingbitsA8->data.selectors0 = 0xFF; + m_pencodingbitsA8->data.selectors1 = 0xFF; + m_pencodingbitsA8->data.selectors2 = 0xFF; + m_pencodingbitsA8->data.selectors3 = 0xFF; + m_pencodingbitsA8->data.selectors4 = 0xFF; + m_pencodingbitsA8->data.selectors5 = 0xFF; + + } + + // #################################################################################################### + // Block4x4Encoding_RGBA8_Transparent + // #################################################################################################### + + // ---------------------------------------------------------------------------------------------------- + // perform a single encoding iteration + // replace the encoding if a better encoding was found + // subsequent iterations generally take longer for each iteration + // set m_boolDone if encoding is perfect or encoding is finished based on a_fEffort + // + void Block4x4Encoding_RGBA8_Transparent::PerformIteration(float ) + { + assert(!m_boolDone); + assert(m_uiEncodingIterations == 0); + + m_mode = MODE_ETC1; + m_boolDiff = true; + m_boolFlip = false; + + for (unsigned int uiPixel = 0; uiPixel < PIXELS; uiPixel++) + { + m_afrgbaDecodedColors[uiPixel] = ColorFloatRGBA(); + m_afDecodedAlphas[uiPixel] = 0.0f; + } + + m_fError = 0.0f; + + m_boolDone = true; + m_uiEncodingIterations++; + + } + + // ---------------------------------------------------------------------------------------------------- + // set the encoding bits based on encoding state + // + void Block4x4Encoding_RGBA8_Transparent::SetEncodingBits(void) + { + + Block4x4Encoding_RGB8::SetEncodingBits(); + + // set the A8 portion + m_pencodingbitsA8->data.base = 0; + m_pencodingbitsA8->data.table = 0; + m_pencodingbitsA8->data.multiplier = 1; + m_pencodingbitsA8->data.selectors0 = 0; + m_pencodingbitsA8->data.selectors1 = 0; + m_pencodingbitsA8->data.selectors2 = 0; + m_pencodingbitsA8->data.selectors3 = 0; + m_pencodingbitsA8->data.selectors4 = 0; + m_pencodingbitsA8->data.selectors5 = 0; + + } + + // ---------------------------------------------------------------------------------------------------- + // +} diff --git a/thirdparty/etc2comp/EtcBlock4x4Encoding_RGBA8.h b/thirdparty/etc2comp/EtcBlock4x4Encoding_RGBA8.h new file mode 100644 index 0000000000..5765d36b90 --- /dev/null +++ b/thirdparty/etc2comp/EtcBlock4x4Encoding_RGBA8.h @@ -0,0 +1,121 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "EtcBlock4x4Encoding_RGB8.h" + +namespace Etc +{ + class Block4x4EncodingBits_A8; + + // ################################################################################ + // Block4x4Encoding_RGBA8 + // RGBA8 if not completely opaque or transparent + // ################################################################################ + + class Block4x4Encoding_RGBA8 : public Block4x4Encoding_RGB8 + { + public: + + Block4x4Encoding_RGBA8(void); + virtual ~Block4x4Encoding_RGBA8(void); + + virtual void InitFromSource(Block4x4 *a_pblockParent, + ColorFloatRGBA *a_pafrgbaSource, + unsigned char *a_paucEncodingBits, ErrorMetric a_errormetric); + + virtual void InitFromEncodingBits(Block4x4 *a_pblockParent, + unsigned char *a_paucEncodingBits, + ColorFloatRGBA *a_pafrgbaSource, + ErrorMetric a_errormetric); + + virtual void PerformIteration(float a_fEffort); + + virtual void SetEncodingBits(void); + + protected: + + static const unsigned int MODIFIER_TABLE_ENTRYS = 16; + static const unsigned int ALPHA_SELECTOR_BITS = 3; + static const unsigned int ALPHA_SELECTORS = 1 << ALPHA_SELECTOR_BITS; + + static float s_aafModifierTable[MODIFIER_TABLE_ENTRYS][ALPHA_SELECTORS]; + + void CalculateA8(float a_fRadius); + + Block4x4EncodingBits_A8 *m_pencodingbitsA8; // A8 portion of Block4x4EncodingBits_RGBA8 + + float m_fBase; + float m_fMultiplier; + unsigned int m_uiModifierTableIndex; + unsigned int m_auiAlphaSelectors[PIXELS]; + + private: + + inline float DecodePixelAlpha(float a_fBase, float a_fMultiplier, + unsigned int a_uiTableIndex, unsigned int a_uiSelector) + { + float fPixelAlpha = a_fBase + + a_fMultiplier*s_aafModifierTable[a_uiTableIndex][a_uiSelector]; + if (fPixelAlpha < 0.0f) + { + fPixelAlpha = 0.0f; + } + else if (fPixelAlpha > 1.0f) + { + fPixelAlpha = 1.0f; + } + + return fPixelAlpha; + } + + }; + + // ################################################################################ + // Block4x4Encoding_RGBA8_Opaque + // RGBA8 if all pixels have alpha==1 + // ################################################################################ + + class Block4x4Encoding_RGBA8_Opaque : public Block4x4Encoding_RGBA8 + { + public: + + virtual void PerformIteration(float a_fEffort); + + virtual void SetEncodingBits(void); + + }; + + // ################################################################################ + // Block4x4Encoding_RGBA8_Transparent + // RGBA8 if all pixels have alpha==0 + // ################################################################################ + + class Block4x4Encoding_RGBA8_Transparent : public Block4x4Encoding_RGBA8 + { + public: + + virtual void PerformIteration(float a_fEffort); + + virtual void SetEncodingBits(void); + + }; + + // ---------------------------------------------------------------------------------------------------- + // + +} // namespace Etc diff --git a/thirdparty/etc2comp/EtcColor.h b/thirdparty/etc2comp/EtcColor.h new file mode 100644 index 0000000000..7ceae05b65 --- /dev/null +++ b/thirdparty/etc2comp/EtcColor.h @@ -0,0 +1,64 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include <math.h> + +namespace Etc +{ + + inline float LogToLinear(float a_fLog) + { + static const float ALPHA = 0.055f; + static const float ONE_PLUS_ALPHA = 1.0f + ALPHA; + + if (a_fLog <= 0.04045f) + { + return a_fLog / 12.92f; + } + else + { + return powf((a_fLog + ALPHA) / ONE_PLUS_ALPHA, 2.4f); + } + } + + inline float LinearToLog(float &a_fLinear) + { + static const float ALPHA = 0.055f; + static const float ONE_PLUS_ALPHA = 1.0f + ALPHA; + + if (a_fLinear <= 0.0031308f) + { + return 12.92f * a_fLinear; + } + else + { + return ONE_PLUS_ALPHA * powf(a_fLinear, (1.0f/2.4f)) - ALPHA; + } + } + + class ColorR8G8B8A8 + { + public: + + unsigned char ucR; + unsigned char ucG; + unsigned char ucB; + unsigned char ucA; + + }; +} diff --git a/thirdparty/etc2comp/EtcColorFloatRGBA.h b/thirdparty/etc2comp/EtcColorFloatRGBA.h new file mode 100644 index 0000000000..f2ca2c1f71 --- /dev/null +++ b/thirdparty/etc2comp/EtcColorFloatRGBA.h @@ -0,0 +1,321 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "EtcConfig.h" +#include "EtcColor.h" + +#include <math.h> + +namespace Etc +{ + + class ColorFloatRGBA + { + public: + + ColorFloatRGBA(void) + { + fR = fG = fB = fA = 0.0f; + } + + ColorFloatRGBA(float a_fR, float a_fG, float a_fB, float a_fA) + { + fR = a_fR; + fG = a_fG; + fB = a_fB; + fA = a_fA; + } + + inline ColorFloatRGBA operator+(ColorFloatRGBA& a_rfrgba) + { + ColorFloatRGBA frgba; + frgba.fR = fR + a_rfrgba.fR; + frgba.fG = fG + a_rfrgba.fG; + frgba.fB = fB + a_rfrgba.fB; + frgba.fA = fA + a_rfrgba.fA; + return frgba; + } + + inline ColorFloatRGBA operator+(float a_f) + { + ColorFloatRGBA frgba; + frgba.fR = fR + a_f; + frgba.fG = fG + a_f; + frgba.fB = fB + a_f; + frgba.fA = fA; + return frgba; + } + + inline ColorFloatRGBA operator-(float a_f) + { + ColorFloatRGBA frgba; + frgba.fR = fR - a_f; + frgba.fG = fG - a_f; + frgba.fB = fB - a_f; + frgba.fA = fA; + return frgba; + } + + inline ColorFloatRGBA operator-(ColorFloatRGBA& a_rfrgba) + { + ColorFloatRGBA frgba; + frgba.fR = fR - a_rfrgba.fR; + frgba.fG = fG - a_rfrgba.fG; + frgba.fB = fB - a_rfrgba.fB; + frgba.fA = fA - a_rfrgba.fA; + return frgba; + } + + inline ColorFloatRGBA operator*(float a_f) + { + ColorFloatRGBA frgba; + frgba.fR = fR * a_f; + frgba.fG = fG * a_f; + frgba.fB = fB * a_f; + frgba.fA = fA; + + return frgba; + } + + inline ColorFloatRGBA ScaleRGB(float a_f) + { + ColorFloatRGBA frgba; + frgba.fR = a_f * fR; + frgba.fG = a_f * fG; + frgba.fB = a_f * fB; + frgba.fA = fA; + + return frgba; + } + + inline ColorFloatRGBA RoundRGB(void) + { + ColorFloatRGBA frgba; + frgba.fR = roundf(fR); + frgba.fG = roundf(fG); + frgba.fB = roundf(fB); + + return frgba; + } + + inline ColorFloatRGBA ToLinear() + { + ColorFloatRGBA frgbaLinear; + frgbaLinear.fR = LogToLinear(fR); + frgbaLinear.fG = LogToLinear(fG); + frgbaLinear.fB = LogToLinear(fB); + frgbaLinear.fA = fA; + + return frgbaLinear; + } + + inline ColorFloatRGBA ToLog(void) + { + ColorFloatRGBA frgbaLog; + frgbaLog.fR = LinearToLog(fR); + frgbaLog.fG = LinearToLog(fG); + frgbaLog.fB = LinearToLog(fB); + frgbaLog.fA = fA; + + return frgbaLog; + } + + inline static ColorFloatRGBA ConvertFromRGBA8(unsigned char a_ucR, + unsigned char a_ucG, unsigned char a_ucB, unsigned char a_ucA) + { + ColorFloatRGBA frgba; + + frgba.fR = (float)a_ucR / 255.0f; + frgba.fG = (float)a_ucG / 255.0f; + frgba.fB = (float)a_ucB / 255.0f; + frgba.fA = (float)a_ucA / 255.0f; + + return frgba; + } + + inline static ColorFloatRGBA ConvertFromRGB4(unsigned char a_ucR4, + unsigned char a_ucG4, + unsigned char a_ucB4) + { + ColorFloatRGBA frgba; + + unsigned char ucR8 = (unsigned char)((a_ucR4 << 4) + a_ucR4); + unsigned char ucG8 = (unsigned char)((a_ucG4 << 4) + a_ucG4); + unsigned char ucB8 = (unsigned char)((a_ucB4 << 4) + a_ucB4); + + frgba.fR = (float)ucR8 / 255.0f; + frgba.fG = (float)ucG8 / 255.0f; + frgba.fB = (float)ucB8 / 255.0f; + frgba.fA = 1.0f; + + return frgba; + } + + inline static ColorFloatRGBA ConvertFromRGB5(unsigned char a_ucR5, + unsigned char a_ucG5, + unsigned char a_ucB5) + { + ColorFloatRGBA frgba; + + unsigned char ucR8 = (unsigned char)((a_ucR5 << 3) + (a_ucR5 >> 2)); + unsigned char ucG8 = (unsigned char)((a_ucG5 << 3) + (a_ucG5 >> 2)); + unsigned char ucB8 = (unsigned char)((a_ucB5 << 3) + (a_ucB5 >> 2)); + + frgba.fR = (float)ucR8 / 255.0f; + frgba.fG = (float)ucG8 / 255.0f; + frgba.fB = (float)ucB8 / 255.0f; + frgba.fA = 1.0f; + + return frgba; + } + + inline static ColorFloatRGBA ConvertFromR6G7B6(unsigned char a_ucR6, + unsigned char a_ucG7, + unsigned char a_ucB6) + { + ColorFloatRGBA frgba; + + unsigned char ucR8 = (unsigned char)((a_ucR6 << 2) + (a_ucR6 >> 4)); + unsigned char ucG8 = (unsigned char)((a_ucG7 << 1) + (a_ucG7 >> 6)); + unsigned char ucB8 = (unsigned char)((a_ucB6 << 2) + (a_ucB6 >> 4)); + + frgba.fR = (float)ucR8 / 255.0f; + frgba.fG = (float)ucG8 / 255.0f; + frgba.fB = (float)ucB8 / 255.0f; + frgba.fA = 1.0f; + + return frgba; + } + + // quantize to 4 bits, expand to 8 bits + inline ColorFloatRGBA QuantizeR4G4B4(void) const + { + ColorFloatRGBA frgba = *this; + + // quantize to 4 bits + frgba = frgba.ClampRGB().ScaleRGB(15.0f).RoundRGB(); + unsigned int uiR4 = (unsigned int)frgba.fR; + unsigned int uiG4 = (unsigned int)frgba.fG; + unsigned int uiB4 = (unsigned int)frgba.fB; + + // expand to 8 bits + frgba.fR = (float) ((uiR4 << 4) + uiR4); + frgba.fG = (float) ((uiG4 << 4) + uiG4); + frgba.fB = (float) ((uiB4 << 4) + uiB4); + + frgba = frgba.ScaleRGB(1.0f/255.0f); + + return frgba; + } + + // quantize to 5 bits, expand to 8 bits + inline ColorFloatRGBA QuantizeR5G5B5(void) const + { + ColorFloatRGBA frgba = *this; + + // quantize to 5 bits + frgba = frgba.ClampRGB().ScaleRGB(31.0f).RoundRGB(); + unsigned int uiR5 = (unsigned int)frgba.fR; + unsigned int uiG5 = (unsigned int)frgba.fG; + unsigned int uiB5 = (unsigned int)frgba.fB; + + // expand to 8 bits + frgba.fR = (float)((uiR5 << 3) + (uiR5 >> 2)); + frgba.fG = (float)((uiG5 << 3) + (uiG5 >> 2)); + frgba.fB = (float)((uiB5 << 3) + (uiB5 >> 2)); + + frgba = frgba.ScaleRGB(1.0f / 255.0f); + + return frgba; + } + + // quantize to 6/7/6 bits, expand to 8 bits + inline ColorFloatRGBA QuantizeR6G7B6(void) const + { + ColorFloatRGBA frgba = *this; + + // quantize to 6/7/6 bits + ColorFloatRGBA frgba6 = frgba.ClampRGB().ScaleRGB(63.0f).RoundRGB(); + ColorFloatRGBA frgba7 = frgba.ClampRGB().ScaleRGB(127.0f).RoundRGB(); + unsigned int uiR6 = (unsigned int)frgba6.fR; + unsigned int uiG7 = (unsigned int)frgba7.fG; + unsigned int uiB6 = (unsigned int)frgba6.fB; + + // expand to 8 bits + frgba.fR = (float)((uiR6 << 2) + (uiR6 >> 4)); + frgba.fG = (float)((uiG7 << 1) + (uiG7 >> 6)); + frgba.fB = (float)((uiB6 << 2) + (uiB6 >> 4)); + + frgba = frgba.ScaleRGB(1.0f / 255.0f); + + return frgba; + } + + inline ColorFloatRGBA ClampRGB(void) + { + ColorFloatRGBA frgba = *this; + if (frgba.fR < 0.0f) { frgba.fR = 0.0f; } + if (frgba.fR > 1.0f) { frgba.fR = 1.0f; } + if (frgba.fG < 0.0f) { frgba.fG = 0.0f; } + if (frgba.fG > 1.0f) { frgba.fG = 1.0f; } + if (frgba.fB < 0.0f) { frgba.fB = 0.0f; } + if (frgba.fB > 1.0f) { frgba.fB = 1.0f; } + + return frgba; + } + + inline ColorFloatRGBA ClampRGBA(void) + { + ColorFloatRGBA frgba = *this; + if (frgba.fR < 0.0f) { frgba.fR = 0.0f; } + if (frgba.fR > 1.0f) { frgba.fR = 1.0f; } + if (frgba.fG < 0.0f) { frgba.fG = 0.0f; } + if (frgba.fG > 1.0f) { frgba.fG = 1.0f; } + if (frgba.fB < 0.0f) { frgba.fB = 0.0f; } + if (frgba.fB > 1.0f) { frgba.fB = 1.0f; } + if (frgba.fA < 0.0f) { frgba.fA = 0.0f; } + if (frgba.fA > 1.0f) { frgba.fA = 1.0f; } + + return frgba; + } + + inline int IntRed(float a_fScale) + { + return (int)roundf(fR * a_fScale); + } + + inline int IntGreen(float a_fScale) + { + return (int)roundf(fG * a_fScale); + } + + inline int IntBlue(float a_fScale) + { + return (int)roundf(fB * a_fScale); + } + + inline int IntAlpha(float a_fScale) + { + return (int)roundf(fA * a_fScale); + } + + float fR, fG, fB, fA; + }; + +} + diff --git a/thirdparty/etc2comp/EtcConfig.h b/thirdparty/etc2comp/EtcConfig.h new file mode 100644 index 0000000000..3bfe1d99a8 --- /dev/null +++ b/thirdparty/etc2comp/EtcConfig.h @@ -0,0 +1,67 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#ifdef _WIN32 +#define ETC_WINDOWS (1) +#else +#define ETC_WINDOWS (0) +#endif + +#if __APPLE__ +#define ETC_OSX (1) +#else +#define ETC_OSX (0) +#endif + +#if __unix__ +#define ETC_UNIX (1) +#else +#define ETC_UNIX (0) +#endif + + +// short names for common types +#include <stdint.h> +typedef int8_t i8; +typedef int16_t i16; +typedef int32_t i32; +typedef int64_t i64; + +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef uint64_t u64; + +typedef float f32; +typedef double f64; + +// Keep asserts enabled in release builds during development +#undef NDEBUG + +// 0=disable. stb_image can be used if you need to compress +//other image formats like jpg +#define USE_STB_IMAGE_LOAD 0 + +#if ETC_WINDOWS +#include <sdkddkver.h> +#define _CRT_SECURE_NO_WARNINGS (1) +#include <tchar.h> +#endif + +#include <stdio.h> + diff --git a/thirdparty/etc2comp/EtcDifferentialTrys.cpp b/thirdparty/etc2comp/EtcDifferentialTrys.cpp new file mode 100644 index 0000000000..ef4cd103d9 --- /dev/null +++ b/thirdparty/etc2comp/EtcDifferentialTrys.cpp @@ -0,0 +1,173 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* +EtcDifferentialTrys.cpp + +Gathers the results of the various encoding trys for both halves of a 4x4 block for Differential mode + +*/ + +#include "EtcConfig.h" +#include "EtcDifferentialTrys.h" + +#include <assert.h> + +namespace Etc +{ + + // ---------------------------------------------------------------------------------------------------- + // construct a list of trys (encoding attempts) + // + // a_frgbaColor1 is the basecolor for the first half + // a_frgbaColor2 is the basecolor for the second half + // a_pauiPixelMapping1 is the pixel order for the first half + // a_pauiPixelMapping2 is the pixel order for the second half + // a_uiRadius is the amount to vary the base colors + // + DifferentialTrys::DifferentialTrys(ColorFloatRGBA a_frgbaColor1, ColorFloatRGBA a_frgbaColor2, + const unsigned int *a_pauiPixelMapping1, + const unsigned int *a_pauiPixelMapping2, + unsigned int a_uiRadius, + int a_iGrayOffset1, int a_iGrayOffset2) + { + assert(a_uiRadius <= MAX_RADIUS); + + m_boolSeverelyBentColors = false; + + ColorFloatRGBA frgbaQuantizedColor1 = a_frgbaColor1.QuantizeR5G5B5(); + ColorFloatRGBA frgbaQuantizedColor2 = a_frgbaColor2.QuantizeR5G5B5(); + + // quantize base colors + // ensure that trys with a_uiRadius don't overflow + int iRed1 = MoveAwayFromEdge(frgbaQuantizedColor1.IntRed(31.0f)+a_iGrayOffset1, a_uiRadius); + int iGreen1 = MoveAwayFromEdge(frgbaQuantizedColor1.IntGreen(31.0f) + a_iGrayOffset1, a_uiRadius); + int iBlue1 = MoveAwayFromEdge(frgbaQuantizedColor1.IntBlue(31.0f) + a_iGrayOffset1, a_uiRadius); + int iRed2 = MoveAwayFromEdge(frgbaQuantizedColor2.IntRed(31.0f) + a_iGrayOffset2, a_uiRadius); + int iGreen2 = MoveAwayFromEdge(frgbaQuantizedColor2.IntGreen(31.0f) + a_iGrayOffset2, a_uiRadius); + int iBlue2 = MoveAwayFromEdge(frgbaQuantizedColor2.IntBlue(31.0f) + a_iGrayOffset2, a_uiRadius); + + int iDeltaRed = iRed2 - iRed1; + int iDeltaGreen = iGreen2 - iGreen1; + int iDeltaBlue = iBlue2 - iBlue1; + + // make sure components are within range + { + if (iDeltaRed > 3) + { + if (iDeltaRed > 7) + { + m_boolSeverelyBentColors = true; + } + + iRed1 += (iDeltaRed - 3) / 2; + iRed2 = iRed1 + 3; + iDeltaRed = 3; + } + else if (iDeltaRed < -4) + { + if (iDeltaRed < -8) + { + m_boolSeverelyBentColors = true; + } + + iRed1 += (iDeltaRed + 4) / 2; + iRed2 = iRed1 - 4; + iDeltaRed = -4; + } + assert(iRed1 >= (signed)(0 + a_uiRadius) && iRed1 <= (signed)(31 - a_uiRadius)); + assert(iRed2 >= (signed)(0 + a_uiRadius) && iRed2 <= (signed)(31 - a_uiRadius)); + assert(iDeltaRed >= -4 && iDeltaRed <= 3); + + if (iDeltaGreen > 3) + { + if (iDeltaGreen > 7) + { + m_boolSeverelyBentColors = true; + } + + iGreen1 += (iDeltaGreen - 3) / 2; + iGreen2 = iGreen1 + 3; + iDeltaGreen = 3; + } + else if (iDeltaGreen < -4) + { + if (iDeltaGreen < -8) + { + m_boolSeverelyBentColors = true; + } + + iGreen1 += (iDeltaGreen + 4) / 2; + iGreen2 = iGreen1 - 4; + iDeltaGreen = -4; + } + assert(iGreen1 >= (signed)(0 + a_uiRadius) && iGreen1 <= (signed)(31 - a_uiRadius)); + assert(iGreen2 >= (signed)(0 + a_uiRadius) && iGreen2 <= (signed)(31 - a_uiRadius)); + assert(iDeltaGreen >= -4 && iDeltaGreen <= 3); + + if (iDeltaBlue > 3) + { + if (iDeltaBlue > 7) + { + m_boolSeverelyBentColors = true; + } + + iBlue1 += (iDeltaBlue - 3) / 2; + iBlue2 = iBlue1 + 3; + iDeltaBlue = 3; + } + else if (iDeltaBlue < -4) + { + if (iDeltaBlue < -8) + { + m_boolSeverelyBentColors = true; + } + + iBlue1 += (iDeltaBlue + 4) / 2; + iBlue2 = iBlue1 - 4; + iDeltaBlue = -4; + } + assert(iBlue1 >= (signed)(0+a_uiRadius) && iBlue1 <= (signed)(31 - a_uiRadius)); + assert(iBlue2 >= (signed)(0 + a_uiRadius) && iBlue2 <= (signed)(31 - a_uiRadius)); + assert(iDeltaBlue >= -4 && iDeltaBlue <= 3); + } + + m_half1.Init(iRed1, iGreen1, iBlue1, a_pauiPixelMapping1, a_uiRadius); + m_half2.Init(iRed2, iGreen2, iBlue2, a_pauiPixelMapping2, a_uiRadius); + + } + + // ---------------------------------------------------------------------------------------------------- + // + void DifferentialTrys::Half::Init(int a_iRed, int a_iGreen, int a_iBlue, + const unsigned int *a_pauiPixelMapping, unsigned int a_uiRadius) + { + + m_iRed = a_iRed; + m_iGreen = a_iGreen; + m_iBlue = a_iBlue; + + m_pauiPixelMapping = a_pauiPixelMapping; + m_uiRadius = a_uiRadius; + + m_uiTrys = 0; + + } + + // ---------------------------------------------------------------------------------------------------- + // + +} // namespace Etc diff --git a/thirdparty/etc2comp/EtcDifferentialTrys.h b/thirdparty/etc2comp/EtcDifferentialTrys.h new file mode 100644 index 0000000000..71860908ff --- /dev/null +++ b/thirdparty/etc2comp/EtcDifferentialTrys.h @@ -0,0 +1,97 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "EtcColorFloatRGBA.h" + +namespace Etc +{ + + class DifferentialTrys + { + public: + + static const unsigned int MAX_RADIUS = 2; + + DifferentialTrys(ColorFloatRGBA a_frgbaColor1, + ColorFloatRGBA a_frgbaColor2, + const unsigned int *a_pauiPixelMapping1, + const unsigned int *a_pauiPixelMapping2, + unsigned int a_uiRadius, + int a_iGrayOffset1, int a_iGrayOffset2); + + inline static int MoveAwayFromEdge(int a_i, int a_iDistance) + { + if (a_i < (0+ a_iDistance)) + { + return (0 + a_iDistance); + } + else if (a_i > (31- a_iDistance)) + { + return (31 - a_iDistance); + } + + return a_i; + } + + class Try + { + public : + static const unsigned int SELECTORS = 8; // per half + + int m_iRed; + int m_iGreen; + int m_iBlue; + unsigned int m_uiCW; + unsigned int m_auiSelectors[SELECTORS]; + float m_fError; + }; + + class Half + { + public: + + static const unsigned int MAX_TRYS = 125; + + void Init(int a_iRed, int a_iGreen, int a_iBlue, + const unsigned int *a_pauiPixelMapping, + unsigned int a_uiRadius); + + // center of trys + int m_iRed; + int m_iGreen; + int m_iBlue; + + const unsigned int *m_pauiPixelMapping; + unsigned int m_uiRadius; + + unsigned int m_uiTrys; + Try m_atry[MAX_TRYS]; + + Try *m_ptryBest; + }; + + Half m_half1; + Half m_half2; + + bool m_boolSeverelyBentColors; + }; + + // ---------------------------------------------------------------------------------------------------- + // + +} // namespace Etc diff --git a/thirdparty/etc2comp/EtcErrorMetric.h b/thirdparty/etc2comp/EtcErrorMetric.h new file mode 100644 index 0000000000..df4dcab4fb --- /dev/null +++ b/thirdparty/etc2comp/EtcErrorMetric.h @@ -0,0 +1,54 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +namespace Etc +{ + + enum ErrorMetric + { + RGBA, + RGBX, + REC709, + NUMERIC, + NORMALXYZ, + // + ERROR_METRICS, + // + BT709 = REC709 + }; + + inline const char *ErrorMetricToString(ErrorMetric errorMetric) + { + switch (errorMetric) + { + case RGBA: + return "RGBA"; + case RGBX: + return "RGBX"; + case REC709: + return "REC709"; + case NUMERIC: + return "NUMERIC"; + case NORMALXYZ: + return "NORMALXYZ"; + case ERROR_METRICS: + default: + return "UNKNOWN"; + } + } +} // namespace Etc diff --git a/thirdparty/etc2comp/EtcFile.cpp b/thirdparty/etc2comp/EtcFile.cpp new file mode 100644 index 0000000000..831a3aac45 --- /dev/null +++ b/thirdparty/etc2comp/EtcFile.cpp @@ -0,0 +1,390 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifdef _WIN32 +#define _CRT_SECURE_NO_WARNINGS (1) +#endif + +#include "EtcConfig.h" + + +#include "EtcFile.h" + +#include "EtcFileHeader.h" +#include "EtcColor.h" +#include "Etc.h" +#include "EtcBlock4x4EncodingBits.h" + +#include <stdio.h> +#include <string.h> +#include <assert.h> +#include <stdlib.h> + +using namespace Etc; + +// ---------------------------------------------------------------------------------------------------- +// +File::File(const char *a_pstrFilename, Format a_fileformat, Image::Format a_imageformat, + unsigned char *a_paucEncodingBits, unsigned int a_uiEncodingBitsBytes, + unsigned int a_uiSourceWidth, unsigned int a_uiSourceHeight, + unsigned int a_uiExtendedWidth, unsigned int a_uiExtendedHeight) +{ + if (a_pstrFilename == nullptr) + { + m_pstrFilename = const_cast<char *>(""); + } + else + { + m_pstrFilename = new char[strlen(a_pstrFilename) + 1]; + strcpy(m_pstrFilename, a_pstrFilename); + } + + m_fileformat = a_fileformat; + if (m_fileformat == Format::INFER_FROM_FILE_EXTENSION) + { + // ***** TODO: add this later ***** + m_fileformat = Format::KTX; + } + + m_imageformat = a_imageformat; + + m_uiNumMipmaps = 1; + m_pMipmapImages = new RawImage[m_uiNumMipmaps]; + m_pMipmapImages[0].paucEncodingBits = std::shared_ptr<unsigned char>(a_paucEncodingBits, [](unsigned char *p) { delete[] p; } ); + m_pMipmapImages[0].uiEncodingBitsBytes = a_uiEncodingBitsBytes; + m_pMipmapImages[0].uiExtendedWidth = a_uiExtendedWidth; + m_pMipmapImages[0].uiExtendedHeight = a_uiExtendedHeight; + + m_uiSourceWidth = a_uiSourceWidth; + m_uiSourceHeight = a_uiSourceHeight; + + switch (m_fileformat) + { + case Format::PKM: + m_pheader = new FileHeader_Pkm(this); + break; + + case Format::KTX: + m_pheader = new FileHeader_Ktx(this); + break; + + default: + assert(0); + break; + } + +} + +// ---------------------------------------------------------------------------------------------------- +// +File::File(const char *a_pstrFilename, Format a_fileformat, Image::Format a_imageformat, + unsigned int a_uiNumMipmaps, RawImage *a_pMipmapImages, + unsigned int a_uiSourceWidth, unsigned int a_uiSourceHeight) +{ + if (a_pstrFilename == nullptr) + { + m_pstrFilename = const_cast<char *>(""); + } + else + { + m_pstrFilename = new char[strlen(a_pstrFilename) + 1]; + strcpy(m_pstrFilename, a_pstrFilename); + } + + m_fileformat = a_fileformat; + if (m_fileformat == Format::INFER_FROM_FILE_EXTENSION) + { + // ***** TODO: add this later ***** + m_fileformat = Format::KTX; + } + + m_imageformat = a_imageformat; + + m_uiNumMipmaps = a_uiNumMipmaps; + m_pMipmapImages = new RawImage[m_uiNumMipmaps]; + + for(unsigned int mip = 0; mip < m_uiNumMipmaps; mip++) + { + m_pMipmapImages[mip] = a_pMipmapImages[mip]; + } + + m_uiSourceWidth = a_uiSourceWidth; + m_uiSourceHeight = a_uiSourceHeight; + + switch (m_fileformat) + { + case Format::PKM: + m_pheader = new FileHeader_Pkm(this); + break; + + case Format::KTX: + m_pheader = new FileHeader_Ktx(this); + break; + + default: + assert(0); + break; + } + +} + +// ---------------------------------------------------------------------------------------------------- +// +File::File(const char *a_pstrFilename, Format a_fileformat) +{ + if (a_pstrFilename == nullptr) + { + return; + } + else + { + m_pstrFilename = new char[strlen(a_pstrFilename) + 1]; + strcpy(m_pstrFilename, a_pstrFilename); + } + + m_fileformat = a_fileformat; + if (m_fileformat == Format::INFER_FROM_FILE_EXTENSION) + { + // ***** TODO: add this later ***** + m_fileformat = Format::KTX; + } + + FILE *pfile = fopen(m_pstrFilename, "rb"); + if (pfile == nullptr) + { + printf("ERROR: Couldn't open %s", m_pstrFilename); + exit(1); + } + fseek(pfile, 0, SEEK_END); + unsigned int fileSize = ftell(pfile); + fseek(pfile, 0, SEEK_SET); + size_t szResult; + + m_pheader = new FileHeader_Ktx(this); + szResult = fread( ((FileHeader_Ktx*)m_pheader)->GetData(), 1, sizeof(FileHeader_Ktx::Data), pfile); + assert(szResult > 0); + + m_uiNumMipmaps = 1; + m_pMipmapImages = new RawImage[m_uiNumMipmaps]; + + if (((FileHeader_Ktx*)m_pheader)->GetData()->m_u32BytesOfKeyValueData > 0) + fseek(pfile, ((FileHeader_Ktx*)m_pheader)->GetData()->m_u32BytesOfKeyValueData, SEEK_CUR); + szResult = fread(&m_pMipmapImages->uiEncodingBitsBytes, 1, sizeof(unsigned int), pfile); + assert(szResult > 0); + + m_pMipmapImages->paucEncodingBits = std::shared_ptr<unsigned char>(new unsigned char[m_pMipmapImages->uiEncodingBitsBytes], [](unsigned char *p) { delete[] p; } ); + assert(ftell(pfile) + m_pMipmapImages->uiEncodingBitsBytes <= fileSize); + szResult = fread(m_pMipmapImages->paucEncodingBits.get(), 1, m_pMipmapImages->uiEncodingBitsBytes, pfile); + assert(szResult == m_pMipmapImages->uiEncodingBitsBytes); + + uint32_t uiInternalFormat = ((FileHeader_Ktx*)m_pheader)->GetData()->m_u32GlInternalFormat; + uint32_t uiBaseInternalFormat = ((FileHeader_Ktx*)m_pheader)->GetData()->m_u32GlBaseInternalFormat; + + if (uiInternalFormat == (uint32_t)FileHeader_Ktx::InternalFormat::ETC1_RGB8 && uiBaseInternalFormat == (uint32_t)FileHeader_Ktx::BaseInternalFormat::ETC1_RGB8) + { + m_imageformat = Image::Format::ETC1; + } + else if (uiInternalFormat == (uint32_t)FileHeader_Ktx::InternalFormat::ETC2_RGB8 && uiBaseInternalFormat == (uint32_t)FileHeader_Ktx::BaseInternalFormat::ETC2_RGB8) + { + m_imageformat = Image::Format::RGB8; + } + else if (uiInternalFormat == (uint32_t)FileHeader_Ktx::InternalFormat::ETC2_RGB8A1 && uiBaseInternalFormat == (uint32_t)FileHeader_Ktx::BaseInternalFormat::ETC2_RGB8A1) + { + m_imageformat = Image::Format::RGB8A1; + } + else if (uiInternalFormat == (uint32_t)FileHeader_Ktx::InternalFormat::ETC2_RGBA8 && uiBaseInternalFormat == (uint32_t)FileHeader_Ktx::BaseInternalFormat::ETC2_RGBA8) + { + m_imageformat = Image::Format::RGBA8; + } + else if (uiInternalFormat == (uint32_t)FileHeader_Ktx::InternalFormat::ETC2_R11 && uiBaseInternalFormat == (uint32_t)FileHeader_Ktx::BaseInternalFormat::ETC2_R11) + { + m_imageformat = Image::Format::R11; + } + else if (uiInternalFormat == (uint32_t)FileHeader_Ktx::InternalFormat::ETC2_SIGNED_R11 && uiBaseInternalFormat == (uint32_t)FileHeader_Ktx::BaseInternalFormat::ETC2_R11) + { + m_imageformat = Image::Format::SIGNED_R11; + } + else if (uiInternalFormat == (uint32_t)FileHeader_Ktx::InternalFormat::ETC2_RG11 && uiBaseInternalFormat == (uint32_t)FileHeader_Ktx::BaseInternalFormat::ETC2_RG11) + { + m_imageformat = Image::Format::RG11; + } + else if (uiInternalFormat == (uint32_t)FileHeader_Ktx::InternalFormat::ETC2_SIGNED_RG11 && uiBaseInternalFormat == (uint32_t)FileHeader_Ktx::BaseInternalFormat::ETC2_RG11) + { + m_imageformat = Image::Format::SIGNED_RG11; + } + else + { + m_imageformat = Image::Format::UNKNOWN; + } + + m_uiSourceWidth = ((FileHeader_Ktx*)m_pheader)->GetData()->m_u32PixelWidth; + m_uiSourceHeight = ((FileHeader_Ktx*)m_pheader)->GetData()->m_u32PixelHeight; + m_pMipmapImages->uiExtendedWidth = Image::CalcExtendedDimension((unsigned short)m_uiSourceWidth); + m_pMipmapImages->uiExtendedHeight = Image::CalcExtendedDimension((unsigned short)m_uiSourceHeight); + + unsigned int uiBlocks = m_pMipmapImages->uiExtendedWidth * m_pMipmapImages->uiExtendedHeight / 16; + Block4x4EncodingBits::Format encodingbitsformat = Image::DetermineEncodingBitsFormat(m_imageformat); + unsigned int expectedbytes = uiBlocks * Block4x4EncodingBits::GetBytesPerBlock(encodingbitsformat); + assert(expectedbytes == m_pMipmapImages->uiEncodingBitsBytes); + + fclose(pfile); +} + +File::~File() +{ + if (m_pMipmapImages != nullptr) + { + delete [] m_pMipmapImages; + } + + if(m_pstrFilename != nullptr) + { + delete[] m_pstrFilename; + m_pstrFilename = nullptr; + } + if (m_pheader != nullptr) + { + delete m_pheader; + m_pheader = nullptr; + } +} + +void File::UseSingleBlock(int a_iPixelX, int a_iPixelY) +{ + if (a_iPixelX <= -1 || a_iPixelY <= -1) + return; + if (a_iPixelX >(int) m_uiSourceWidth) + { + //if we are using a ktx thats the size of a single block or less + //then make sure we use the 4x4 image as the single block + if (m_uiSourceWidth <= 4) + { + a_iPixelX = 0; + } + else + { + printf("blockAtHV: H coordinate out of range, capped to image width\n"); + a_iPixelX = m_uiSourceWidth - 1; + } + } + if (a_iPixelY >(int) m_uiSourceHeight) + { + //if we are using a ktx thats the size of a single block or less + //then make sure we use the 4x4 image as the single block + if (m_uiSourceHeight <= 4) + { + a_iPixelY= 0; + } + else + { + printf("blockAtHV: V coordinate out of range, capped to image height\n"); + a_iPixelY = m_uiSourceHeight - 1; + } + } + + unsigned int origWidth = m_uiSourceWidth; + unsigned int origHeight = m_uiSourceHeight; + + m_uiSourceWidth = 4; + m_uiSourceHeight = 4; + + Block4x4EncodingBits::Format encodingbitsformat = Image::DetermineEncodingBitsFormat(m_imageformat); + unsigned int uiEncodingBitsBytesPerBlock = Block4x4EncodingBits::GetBytesPerBlock(encodingbitsformat); + + int numMipmaps = 1; + RawImage* pMipmapImages = new RawImage[numMipmaps]; + pMipmapImages[0].uiExtendedWidth = Image::CalcExtendedDimension((unsigned short)m_uiSourceWidth); + pMipmapImages[0].uiExtendedHeight = Image::CalcExtendedDimension((unsigned short)m_uiSourceHeight); + pMipmapImages[0].uiEncodingBitsBytes = 0; + pMipmapImages[0].paucEncodingBits = std::shared_ptr<unsigned char>(new unsigned char[uiEncodingBitsBytesPerBlock], [](unsigned char *p) { delete[] p; }); + + //block position in pixels + // remove the bottom 2 bits to get the block coordinates + unsigned int iBlockPosX = (a_iPixelX & 0xFFFFFFFC); + unsigned int iBlockPosY = (a_iPixelY & 0xFFFFFFFC); + + int numXBlocks = (origWidth / 4); + int numYBlocks = (origHeight / 4); + + + // block location + //int iBlockX = (a_iPixelX % 4) == 0 ? a_iPixelX / 4.0f : (a_iPixelX / 4) + 1; + //int iBlockY = (a_iPixelY % 4) == 0 ? a_iPixelY / 4.0f : (a_iPixelY / 4) + 1; + //m_paucEncodingBits += ((iBlockY * numXBlocks) + iBlockX) * uiEncodingBitsBytesPerBlock; + + + unsigned int num = numXBlocks*numYBlocks; + unsigned int uiH = 0, uiV = 0; + unsigned char* pEncodingBits = m_pMipmapImages[0].paucEncodingBits.get(); + for (unsigned int uiBlock = 0; uiBlock < num; uiBlock++) + { + if (uiH == iBlockPosX && uiV == iBlockPosY) + { + memcpy(pMipmapImages[0].paucEncodingBits.get(),pEncodingBits, uiEncodingBitsBytesPerBlock); + break; + } + pEncodingBits += uiEncodingBitsBytesPerBlock; + uiH += 4; + + if (uiH >= origWidth) + { + uiH = 0; + uiV += 4; + } + } + + delete [] m_pMipmapImages; + m_pMipmapImages = pMipmapImages; +} +// ---------------------------------------------------------------------------------------------------- +// +void File::Write() +{ + + FILE *pfile = fopen(m_pstrFilename, "wb"); + if (pfile == nullptr) + { + printf("Error: couldn't open Etc file (%s)\n", m_pstrFilename); + exit(1); + } + + m_pheader->Write(pfile); + + for(unsigned int mip = 0; mip < m_uiNumMipmaps; mip++) + { + if(m_fileformat == Format::KTX) + { + // Write u32 image size + uint32_t u32ImageSize = m_pMipmapImages[mip].uiEncodingBitsBytes; + uint32_t szBytesWritten = fwrite(&u32ImageSize, 1, sizeof(u32ImageSize), pfile); + assert(szBytesWritten == sizeof(u32ImageSize)); + } + + unsigned int iResult = (int)fwrite(m_pMipmapImages[mip].paucEncodingBits.get(), 1, m_pMipmapImages[mip].uiEncodingBitsBytes, pfile); + if (iResult != m_pMipmapImages[mip].uiEncodingBitsBytes) + { + printf("Error: couldn't write Etc file (%s)\n", m_pstrFilename); + exit(1); + } + } + + fclose(pfile); + +} + +// ---------------------------------------------------------------------------------------------------- +// + diff --git a/thirdparty/etc2comp/EtcFile.h b/thirdparty/etc2comp/EtcFile.h new file mode 100644 index 0000000000..69bf3b2d3a --- /dev/null +++ b/thirdparty/etc2comp/EtcFile.h @@ -0,0 +1,136 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "EtcColorFloatRGBA.h" +#include "EtcImage.h" +#include "Etc.h" + +namespace Etc +{ + class FileHeader; + class SourceImage; + + class File + { + public: + + enum class Format + { + INFER_FROM_FILE_EXTENSION, + PKM, + KTX, + }; + + File(const char *a_pstrFilename, Format a_fileformat, Image::Format a_imageformat, + unsigned char *a_paucEncodingBits, unsigned int a_uiEncodingBitsBytes, + unsigned int a_uiSourceWidth, unsigned int a_uiSourceHeight, + unsigned int a_uiExtendedWidth, unsigned int a_uiExtendedHeight); + + File(const char *a_pstrFilename, Format a_fileformat, Image::Format a_imageformat, + unsigned int a_uiNumMipmaps, RawImage *pMipmapImages, + unsigned int a_uiSourceWidth, unsigned int a_uiSourceHeight ); + + File(const char *a_pstrFilename, Format a_fileformat); + ~File(); + const char *GetFilename(void) { return m_pstrFilename; } + + void Read(const char *a_pstrFilename); + void Write(void); + + inline unsigned int GetSourceWidth(void) + { + return m_uiSourceWidth; + } + + inline unsigned int GetSourceHeight(void) + { + return m_uiSourceHeight; + } + + inline unsigned int GetExtendedWidth(unsigned int mipmapIndex = 0) + { + if (mipmapIndex < m_uiNumMipmaps) + { + return m_pMipmapImages[mipmapIndex].uiExtendedWidth; + } + else + { + return 0; + } + } + + inline unsigned int GetExtendedHeight(unsigned int mipmapIndex = 0) + { + if (mipmapIndex < m_uiNumMipmaps) + { + return m_pMipmapImages[mipmapIndex].uiExtendedHeight; + } + else + { + return 0; + } + } + + inline Image::Format GetImageFormat() + { + return m_imageformat; + } + + inline unsigned int GetEncodingBitsBytes(unsigned int mipmapIndex = 0) + { + if (mipmapIndex < m_uiNumMipmaps) + { + return m_pMipmapImages[mipmapIndex].uiEncodingBitsBytes; + } + else + { + return 0; + } + } + + inline unsigned char* GetEncodingBits(unsigned int mipmapIndex = 0) + { + if( mipmapIndex < m_uiNumMipmaps) + { + return m_pMipmapImages[mipmapIndex].paucEncodingBits.get(); + } + else + { + return nullptr; + } + } + + inline unsigned int GetNumMipmaps() + { + return m_uiNumMipmaps; + } + + void UseSingleBlock(int a_iPixelX = -1, int a_iPixelY = -1); + private: + + char *m_pstrFilename; // includes directory path and file extension + Format m_fileformat; + Image::Format m_imageformat; + FileHeader *m_pheader; + unsigned int m_uiNumMipmaps; + RawImage* m_pMipmapImages; + unsigned int m_uiSourceWidth; + unsigned int m_uiSourceHeight; + }; + +} diff --git a/thirdparty/etc2comp/EtcFileHeader.cpp b/thirdparty/etc2comp/EtcFileHeader.cpp new file mode 100644 index 0000000000..f02fcab011 --- /dev/null +++ b/thirdparty/etc2comp/EtcFileHeader.cpp @@ -0,0 +1,185 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "EtcFileHeader.h" + +#include "EtcBlock4x4EncodingBits.h" + +#include <assert.h> + +namespace Etc +{ + + // ---------------------------------------------------------------------------------------------------- + // + FileHeader_Pkm::FileHeader_Pkm(File *a_pfile) + { + m_pfile = a_pfile; + + static const char s_acMagicNumberData[4] = { 'P', 'K', 'M', ' ' }; + static const char s_acVersionData[2] = { '1', '0' }; + + for (unsigned int ui = 0; ui < sizeof(s_acMagicNumberData); ui++) + { + m_data.m_acMagicNumber[ui] = s_acMagicNumberData[ui]; + } + + for (unsigned int ui = 0; ui < sizeof(s_acVersionData); ui++) + { + m_data.m_acVersion[ui] = s_acVersionData[ui]; + } + + m_data.m_ucDataType_msb = 0; // ETC1_RGB_NO_MIPMAPS + m_data.m_ucDataType_lsb = 0; + + m_data.m_ucOriginalWidth_msb = (unsigned char)(m_pfile->GetSourceWidth() >> 8); + m_data.m_ucOriginalWidth_lsb = m_pfile->GetSourceWidth() & 0xFF; + m_data.m_ucOriginalHeight_msb = (unsigned char)(m_pfile->GetSourceHeight() >> 8); + m_data.m_ucOriginalHeight_lsb = m_pfile->GetSourceHeight() & 0xFF; + + m_data.m_ucExtendedWidth_msb = (unsigned char)(m_pfile->GetExtendedWidth() >> 8); + m_data.m_ucExtendedWidth_lsb = m_pfile->GetExtendedWidth() & 0xFF; + m_data.m_ucExtendedHeight_msb = (unsigned char)(m_pfile->GetExtendedHeight() >> 8); + m_data.m_ucExtendedHeight_lsb = m_pfile->GetExtendedHeight() & 0xFF; + + } + + // ---------------------------------------------------------------------------------------------------- + // + void FileHeader_Pkm::Write(FILE *a_pfile) + { + + fwrite(&m_data, sizeof(Data), 1, a_pfile); + + } + + // ---------------------------------------------------------------------------------------------------- + // + FileHeader_Ktx::FileHeader_Ktx(File *a_pfile) + { + m_pfile = a_pfile; + + static const uint8_t s_au8Itentfier[12] = + { + 0xAB, 0x4B, 0x54, 0x58, // first four bytes of Byte[12] identifier + 0x20, 0x31, 0x31, 0xBB, // next four bytes of Byte[12] identifier + 0x0D, 0x0A, 0x1A, 0x0A // final four bytes of Byte[12] identifier + }; + + for (unsigned int ui = 0; ui < sizeof(s_au8Itentfier); ui++) + { + m_data.m_au8Identifier[ui] = s_au8Itentfier[ui]; + } + + m_data.m_u32Endianness = 0x04030201; + m_data.m_u32GlType = 0; + m_data.m_u32GlTypeSize = 1; + m_data.m_u32GlFormat = 0; + + switch (m_pfile->GetImageFormat()) + { + case Image::Format::RGB8: + case Image::Format::SRGB8: + m_data.m_u32GlInternalFormat = (unsigned int)InternalFormat::ETC2_RGB8; + m_data.m_u32GlBaseInternalFormat = (unsigned int)BaseInternalFormat::ETC2_RGB8; + break; + + case Image::Format::RGBA8: + case Image::Format::SRGBA8: + m_data.m_u32GlInternalFormat = (unsigned int)InternalFormat::ETC2_RGBA8; + m_data.m_u32GlBaseInternalFormat = (unsigned int)BaseInternalFormat::ETC2_RGBA8; + break; + + case Image::Format::RGB8A1: + case Image::Format::SRGB8A1: + m_data.m_u32GlInternalFormat = (unsigned int)InternalFormat::ETC2_RGB8A1; + m_data.m_u32GlBaseInternalFormat = (unsigned int)BaseInternalFormat::ETC2_RGB8A1; + break; + + case Image::Format::R11: + m_data.m_u32GlInternalFormat = (unsigned int)InternalFormat::ETC2_R11; + m_data.m_u32GlBaseInternalFormat = (unsigned int)BaseInternalFormat::ETC2_R11; + break; + + case Image::Format::SIGNED_R11: + m_data.m_u32GlInternalFormat = (unsigned int)InternalFormat::ETC2_SIGNED_R11; + m_data.m_u32GlBaseInternalFormat = (unsigned int)BaseInternalFormat::ETC2_R11; + break; + + case Image::Format::RG11: + m_data.m_u32GlInternalFormat = (unsigned int)InternalFormat::ETC2_RG11; + m_data.m_u32GlBaseInternalFormat = (unsigned int)BaseInternalFormat::ETC2_RG11; + break; + + case Image::Format::SIGNED_RG11: + m_data.m_u32GlInternalFormat = (unsigned int)InternalFormat::ETC2_SIGNED_RG11; + m_data.m_u32GlBaseInternalFormat = (unsigned int)BaseInternalFormat::ETC2_RG11; + break; + + default: + m_data.m_u32GlInternalFormat = (unsigned int)InternalFormat::ETC1_RGB8; + m_data.m_u32GlBaseInternalFormat = (unsigned int)BaseInternalFormat::ETC1_RGB8; + break; + } + + m_data.m_u32PixelWidth = 0; + m_data.m_u32PixelHeight = 0; + m_data.m_u32PixelDepth = 0; + m_data.m_u32NumberOfArrayElements = 0; + m_data.m_u32NumberOfFaces = 0; + m_data.m_u32BytesOfKeyValueData = 0; + + m_pkeyvaluepair = nullptr; + + m_u32Images = 0; + m_u32KeyValuePairs = 0; + + m_data.m_u32PixelWidth = m_pfile->GetSourceWidth(); + m_data.m_u32PixelHeight = m_pfile->GetSourceHeight(); + m_data.m_u32PixelDepth = 0; + m_data.m_u32NumberOfArrayElements = 0; + m_data.m_u32NumberOfFaces = 1; + m_data.m_u32NumberOfMipmapLevels = m_pfile->GetNumMipmaps(); + + } + + // ---------------------------------------------------------------------------------------------------- + // + void FileHeader_Ktx::Write(FILE *a_pfile) + { + size_t szBytesWritten; + + // Write header + szBytesWritten = fwrite(&m_data, 1, sizeof(Data), a_pfile); + assert(szBytesWritten == sizeof(Data)); + + // Write KeyAndValuePairs + if (m_u32KeyValuePairs) + { + fwrite(m_pkeyvaluepair, m_pkeyvaluepair->u32KeyAndValueByteSize, 1, a_pfile); + } + } + + // ---------------------------------------------------------------------------------------------------- + // + FileHeader_Ktx::Data *FileHeader_Ktx::GetData() + { + return &m_data; + } + + // ---------------------------------------------------------------------------------------------------- + // +} // namespace Etc diff --git a/thirdparty/etc2comp/EtcFileHeader.h b/thirdparty/etc2comp/EtcFileHeader.h new file mode 100644 index 0000000000..55a9cb5d9d --- /dev/null +++ b/thirdparty/etc2comp/EtcFileHeader.h @@ -0,0 +1,146 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "EtcFile.h" +#include <stdio.h> +#include <inttypes.h> + +namespace Etc +{ + + class Image; + + class FileHeader + { + public: + + virtual void Write(FILE *a_pfile) = 0; + File GetFile(); + virtual ~FileHeader(void) {} + protected: + + File *m_pfile; + }; + + // ---------------------------------------------------------------------------------------------------- + // + class FileHeader_Pkm : public FileHeader + { + public: + + FileHeader_Pkm(File *a_pfile); + + virtual void Write(FILE *a_pfile); + virtual ~FileHeader_Pkm(void) {} + private: + + typedef struct + { + char m_acMagicNumber[4]; + char m_acVersion[2]; + unsigned char m_ucDataType_msb; // e.g. ETC1_RGB_NO_MIPMAPS + unsigned char m_ucDataType_lsb; + unsigned char m_ucExtendedWidth_msb; // padded to 4x4 blocks + unsigned char m_ucExtendedWidth_lsb; + unsigned char m_ucExtendedHeight_msb; // padded to 4x4 blocks + unsigned char m_ucExtendedHeight_lsb; + unsigned char m_ucOriginalWidth_msb; + unsigned char m_ucOriginalWidth_lsb; + unsigned char m_ucOriginalHeight_msb; + unsigned char m_ucOriginalHeight_lsb; + } Data; + + Data m_data; + }; + + // ---------------------------------------------------------------------------------------------------- + // + class FileHeader_Ktx : public FileHeader + { + public: + + typedef struct + { + uint32_t u32KeyAndValueByteSize; + } KeyValuePair; + + typedef struct + { + uint8_t m_au8Identifier[12]; + uint32_t m_u32Endianness; + uint32_t m_u32GlType; + uint32_t m_u32GlTypeSize; + uint32_t m_u32GlFormat; + uint32_t m_u32GlInternalFormat; + uint32_t m_u32GlBaseInternalFormat; + uint32_t m_u32PixelWidth; + uint32_t m_u32PixelHeight; + uint32_t m_u32PixelDepth; + uint32_t m_u32NumberOfArrayElements; + uint32_t m_u32NumberOfFaces; + uint32_t m_u32NumberOfMipmapLevels; + uint32_t m_u32BytesOfKeyValueData; + } Data; + + enum class InternalFormat + { + ETC1_RGB8 = 0x8D64, + ETC1_ALPHA8 = ETC1_RGB8, + // + ETC2_R11 = 0x9270, + ETC2_SIGNED_R11 = 0x9271, + ETC2_RG11 = 0x9272, + ETC2_SIGNED_RG11 = 0x9273, + ETC2_RGB8 = 0x9274, + ETC2_SRGB8 = 0x9275, + ETC2_RGB8A1 = 0x9276, + ETC2_SRGB8_PUNCHTHROUGH_ALPHA1 = 0x9277, + ETC2_RGBA8 = 0x9278 + }; + + enum class BaseInternalFormat + { + ETC2_R11 = 0x1903, + ETC2_RG11 = 0x8227, + ETC1_RGB8 = 0x1907, + ETC1_ALPHA8 = ETC1_RGB8, + // + ETC2_RGB8 = 0x1907, + ETC2_RGB8A1 = 0x1908, + ETC2_RGBA8 = 0x1908, + }; + + FileHeader_Ktx(File *a_pfile); + + virtual void Write(FILE *a_pfile); + virtual ~FileHeader_Ktx(void) {} + + void AddKeyAndValue(KeyValuePair *a_pkeyvaluepair); + + Data* GetData(); + + private: + + Data m_data; + KeyValuePair *m_pkeyvaluepair; + + uint32_t m_u32Images; + uint32_t m_u32KeyValuePairs; + }; + +} // namespace Etc diff --git a/thirdparty/etc2comp/EtcFilter.cpp b/thirdparty/etc2comp/EtcFilter.cpp new file mode 100644 index 0000000000..bc899a533e --- /dev/null +++ b/thirdparty/etc2comp/EtcFilter.cpp @@ -0,0 +1,401 @@ +#include <stdlib.h> +#include <math.h> +#include "EtcFilter.h" + + +namespace Etc +{ + +static const double PiConst = 3.14159265358979323846; + +inline double sinc(double x) +{ + if ( x == 0.0 ) + { + return 1.0; + } + + return sin(PiConst * x) / (PiConst * x); +} + +//inline float sincf( float x ) +//{ +// x *= F_PI; +// if (x < 0.01f && x > -0.01f) +// { +// return 1.0f + x*x*(-1.0f/6.0f + x*x*1.0f/120.0f); +// } +// +// return sinf(x)/x; +//} +// +//double bessel0(double x) +//{ +// const double EPSILON_RATIO = 1E-16; +// double xh, sum, pow, ds; +// int k; +// +// xh = 0.5 * x; +// sum = 1.0; +// pow = 1.0; +// k = 0; +// ds = 1.0; +// while (ds > sum * EPSILON_RATIO) +// { +// ++k; +// pow = pow * (xh / k); +// ds = pow * pow; +// sum = sum + ds; +// } +// +// return sum; +//} + +//**-------------------------------------------------------------------------- +//** Name: kaiser(double alpha, double half_width, double x) +//** Returns: +//** Description: Alpha controls shape of filter. We are using 4. +//**-------------------------------------------------------------------------- +//inline double kaiser(double alpha, double half_width, double x) +//{ +// double ratio = (x / half_width); +// return bessel0(alpha * sqrt(1 - ratio * ratio)) / bessel0(alpha); +//} +// +//float Filter_Lanczos4Sinc(float x) +//{ +// if (x <= -4.0f || x >= 4.0f) // half-width of 4 +// { +// return 0.0; +// } +// +// return sinc(0.875f * x) * sinc(0.25f * x); +//} +// +//double Filter_Kaiser4( double t ) +//{ +// return kaiser( 4.0, 3.0, t); +//} +// +//double Filter_KaiserOptimal( double t ) +//{ +// return kaiser( 8.93, 3.0f, t); +//} + +double FilterLanczos3( double t ) +{ + if ( t <= -3.0 || t >= 3.0 ) + { + return 0.0; + } + + return sinc( t ) * sinc( t / 3.0 ); +} + +double FilterBox( double t ) +{ + return ( t > -0.5 && t < 0.5) ? 1.0 : 0.0; +} + +double FilterLinear( double t ) +{ + if (t < 0.0) t = -t; + + return (t < 1.0) ? (1.0 - t) : 0.0; +} + + +//**-------------------------------------------------------------------------- +//** Name: CalcContributions( int srcSize, +//** int destSize, +//** double filterSize, +//** bool wrap, +//** double (*FilterProc)(double), +//** FilterWeights contrib[] ) +//** Returns: void +//** Description: +//**-------------------------------------------------------------------------- +void CalcContributions( int srcSize, int destSize, double filterSize, bool wrap, double (*FilterProc)(double), FilterWeights contrib[] ) +{ + double scale; + double filterScale; + double center; + double totalWeight; + double weight; + int iRight; + int iLeft; + int iDest; + + scale = (double)destSize / srcSize; + if ( scale < 1.0 ) + { + filterSize = filterSize / scale; + filterScale = scale; + } + else + { + filterScale = 1.0; + } + + if ( filterSize > (double)MaxFilterSize ) + { + filterSize = (double)MaxFilterSize; + } + + for ( iDest = 0; iDest < destSize; ++iDest ) + { + center = (double)iDest / scale; + + iLeft = (int)ceil(center - filterSize); + iRight = (int)floor(center + filterSize); + + if ( !wrap ) + { + if ( iLeft < 0 ) + { + iLeft = 0; + } + + if ( iRight >= srcSize ) + { + iRight = srcSize - 1; + } + } + + int numWeights = iRight - iLeft + 1; + + contrib[iDest].first = iLeft; + contrib[iDest].numWeights = numWeights; + + totalWeight = 0; + double t = ((double)iLeft - center) * filterScale; + for (int i = 0; i < numWeights; i++) + { + weight = (*FilterProc)(t) * filterScale; + totalWeight += weight; + contrib[iDest].weight[i] = weight; + t += filterScale; + } + + //**-------------------------------------------------------- + //** Normalize weights by dividing by the sum of the weights + //**-------------------------------------------------------- + if ( totalWeight > 0.0 ) + { + for ( int i = 0; i < numWeights; i++) + { + contrib[iDest].weight[i] /= totalWeight; + } + } + } +} + +//**------------------------------------------------------------------------- +//** Name: Filter_TwoPass( RGBCOLOR *pSrcImage, +//** int srcWidth, int srcHeight, +//** RGBCOLOR *pDestImage, +//** int destWidth, int destHeight, +//** double (*FilterProc)(double) ) +//** Returns: 0 on failure and 1 on success +//** Description: Filters a 2d image with a two pass filter by averaging the +//** weighted contributions of the pixels within the filter region. The +//** contributions are determined by a weighting function parameter. +//**------------------------------------------------------------------------- +int FilterTwoPass( RGBCOLOR *pSrcImage, int srcWidth, int srcHeight, + RGBCOLOR *pDestImage, int destWidth, int destHeight, unsigned int wrapFlags, double (*FilterProc)(double) ) +{ + FilterWeights *contrib; + RGBCOLOR *pPixel; + RGBCOLOR *pSrcPixel; + RGBCOLOR *pTempImage; + int iRow; + int iCol; + int iSrcCol; + int iSrcRow; + int iWeight; + double dRed; + double dGreen; + double dBlue; + double dAlpha; + double filterSize = 3.0; + + int maxDim = (srcWidth>srcHeight)?srcWidth:srcHeight; + contrib = (FilterWeights*)malloc(maxDim * sizeof(FilterWeights)); + + //**------------------------------------------------------------------------ + //** Need to create a temporary image to stuff the horizontally scaled image + //**------------------------------------------------------------------------ + pTempImage = (RGBCOLOR *)malloc( destWidth * srcHeight * sizeof(RGBCOLOR) ); + if ( pTempImage == NULL ) + { + return 0; + } + + //**------------------------------------------------------- + //** Horizontally filter the image into the temporary image + //**------------------------------------------------------- + bool bWrapHorizontal = !!(wrapFlags&FILTER_WRAP_X); + CalcContributions( srcWidth, destWidth, filterSize, bWrapHorizontal, FilterProc, contrib ); + for ( iRow = 0; iRow < srcHeight; iRow++ ) + { + for ( iCol = 0; iCol < destWidth; iCol++ ) + { + dRed = 0; + dGreen = 0; + dBlue = 0; + dAlpha = 0; + + for ( iWeight = 0; iWeight < contrib[iCol].numWeights; iWeight++ ) + { + iSrcCol = iWeight + contrib[iCol].first; + if (bWrapHorizontal) + { + iSrcCol = (iSrcCol < 0) ? (srcWidth + iSrcCol) : (iSrcCol >= srcWidth) ? (iSrcCol - srcWidth) : iSrcCol; + } + pSrcPixel = pSrcImage + (iRow * srcWidth) + iSrcCol; + dRed += contrib[iCol].weight[iWeight] * pSrcPixel->rgba[0]; + dGreen += contrib[iCol].weight[iWeight] * pSrcPixel->rgba[1]; + dBlue += contrib[iCol].weight[iWeight] * pSrcPixel->rgba[2]; + dAlpha += contrib[iCol].weight[iWeight] * pSrcPixel->rgba[3]; + } + + pPixel = pTempImage + (iRow * destWidth) + iCol; + pPixel->rgba[0] = static_cast<unsigned char>(std::max(0.0, std::min(255.0, dRed))); + pPixel->rgba[1] = static_cast<unsigned char>(std::max(0.0, std::min(255.0, dGreen))); + pPixel->rgba[2] = static_cast<unsigned char>(std::max(0.0, std::min(255.0, dBlue))); + pPixel->rgba[3] = static_cast<unsigned char>(std::max(0.0, std::min(255.0, dAlpha))); + } + } + + //**------------------------------------------------------- + //** Vertically filter the image into the destination image + //**------------------------------------------------------- + bool bWrapVertical = !!(wrapFlags&FILTER_WRAP_Y); + CalcContributions(srcHeight, destHeight, filterSize, bWrapVertical, FilterProc, contrib); + for ( iCol = 0; iCol < destWidth; iCol++ ) + { + for ( iRow = 0; iRow < destHeight; iRow++ ) + { + dRed = 0; + dGreen = 0; + dBlue = 0; + dAlpha = 0; + + for ( iWeight = 0; iWeight < contrib[iRow].numWeights; iWeight++ ) + { + iSrcRow = iWeight + contrib[iRow].first; + if (bWrapVertical) + { + iSrcRow = (iSrcRow < 0) ? (srcHeight + iSrcRow) : (iSrcRow >= srcHeight) ? (iSrcRow - srcHeight) : iSrcRow; + } + pSrcPixel = pTempImage + (iSrcRow * destWidth) + iCol; + dRed += contrib[iRow].weight[iWeight] * pSrcPixel->rgba[0]; + dGreen += contrib[iRow].weight[iWeight] * pSrcPixel->rgba[1]; + dBlue += contrib[iRow].weight[iWeight] * pSrcPixel->rgba[2]; + dAlpha += contrib[iRow].weight[iWeight] * pSrcPixel->rgba[3]; + } + + pPixel = pDestImage + (iRow * destWidth) + iCol; + pPixel->rgba[0] = (unsigned char)(std::max( 0.0, std::min( 255.0, dRed))); + pPixel->rgba[1] = (unsigned char)(std::max( 0.0, std::min( 255.0, dGreen))); + pPixel->rgba[2] = (unsigned char)(std::max( 0.0, std::min( 255.0, dBlue))); + pPixel->rgba[3] = (unsigned char)(std::max( 0.0, std::min( 255.0, dAlpha))); + } + } + + free( pTempImage ); + free( contrib ); + + return 1; +} + +//**------------------------------------------------------------------------- +//** Name: FilterResample(RGBCOLOR *pSrcImage, int srcWidth, int srcHeight, +//** RGBCOLOR *pDstImage, int dstWidth, int dstHeight) +//** Returns: 1 +//** Description: This function runs a 2d box filter over the srouce image +//** to produce the destination image. +//**------------------------------------------------------------------------- +void FilterResample( RGBCOLOR *pSrcImage, int srcWidth, int srcHeight, + RGBCOLOR *pDstImage, int dstWidth, int dstHeight ) +{ + int iRow; + int iCol; + int iSampleRow; + int iSampleCol; + int iFirstSampleRow; + int iFirstSampleCol; + int iLastSampleRow; + int iLastSampleCol; + int red; + int green; + int blue; + int alpha; + int samples; + float xScale; + float yScale; + + RGBCOLOR *pSrcPixel; + RGBCOLOR *pDstPixel; + + xScale = (float)srcWidth / dstWidth; + yScale = (float)srcHeight / dstHeight; + + for ( iRow = 0; iRow < dstHeight; iRow++ ) + { + for ( iCol = 0; iCol < dstWidth; iCol++ ) + { + iFirstSampleRow = (int)(iRow * yScale); + iLastSampleRow = (int)ceil(iFirstSampleRow + yScale - 1); + if ( iLastSampleRow >= srcHeight ) + { + iLastSampleRow = srcHeight - 1; + } + + iFirstSampleCol = (int)(iCol * xScale); + iLastSampleCol = (int)ceil(iFirstSampleCol + xScale - 1); + if ( iLastSampleCol >= srcWidth ) + { + iLastSampleCol = srcWidth - 1; + } + + samples = 0; + red = 0; + green = 0; + blue = 0; + alpha = 0; + for ( iSampleRow = iFirstSampleRow; iSampleRow <= iLastSampleRow; iSampleRow++ ) + { + for ( iSampleCol = iFirstSampleCol; iSampleCol <= iLastSampleCol; iSampleCol++ ) + { + pSrcPixel = pSrcImage + iSampleRow * srcWidth + iSampleCol; + red += pSrcPixel->rgba[0]; + green += pSrcPixel->rgba[1]; + blue += pSrcPixel->rgba[2]; + alpha += pSrcPixel->rgba[3]; + + samples++; + } + } + + pDstPixel = pDstImage + iRow * dstWidth + iCol; + if ( samples > 0 ) + { + pDstPixel->rgba[0] = static_cast<uint8_t>(red / samples); + pDstPixel->rgba[1] = static_cast<uint8_t>(green / samples); + pDstPixel->rgba[2] = static_cast<uint8_t>(blue / samples); + pDstPixel->rgba[3] = static_cast<uint8_t>(alpha / samples); + } + else + { + pDstPixel->rgba[0] = static_cast<uint8_t>(red); + pDstPixel->rgba[1] = static_cast<uint8_t>(green); + pDstPixel->rgba[2] = static_cast<uint8_t>(blue); + pDstPixel->rgba[3] = static_cast<uint8_t>(alpha); + } + } + } +} + + +}
\ No newline at end of file diff --git a/thirdparty/etc2comp/EtcFilter.h b/thirdparty/etc2comp/EtcFilter.h new file mode 100644 index 0000000000..fcf125c6df --- /dev/null +++ b/thirdparty/etc2comp/EtcFilter.h @@ -0,0 +1,244 @@ +#pragma once +#include <stdint.h> +#include <algorithm> + +namespace Etc +{ + +enum FilterEnums +{ + MaxFilterSize = 32 +}; + +enum WrapFlags +{ + FILTER_WRAP_NONE = 0, + FILTER_WRAP_X = 0x1, + FILTER_WRAP_Y = 0x2 +}; + +typedef struct tagFilterWeights +{ + int first; + int numWeights; + double weight[MaxFilterSize * 2 + 1]; +} FilterWeights; + +typedef struct tagRGBCOLOR +{ + union + { + uint32_t ulColor; + uint8_t rgba[4]; + }; +} RGBCOLOR; + + +double FilterBox( double t ); +double FilterLinear( double t ); +double FilterLanczos3( double t ); + +int FilterTwoPass( RGBCOLOR *pSrcImage, int srcWidth, int srcHeight, + RGBCOLOR *pDestImage, int destWidth, int destHeight, unsigned int wrapFlags, double (*FilterProc)(double) ); +void FilterResample( RGBCOLOR *pSrcImage, int srcWidth, int srcHeight, + RGBCOLOR *pDstImage, int dstWidth, int dstHeight ); + + +void CalcContributions(int srcSize, int destSize, double filterSize, bool wrap, double(*FilterProc)(double), FilterWeights contrib[]); + +template <typename T> +void FilterResample(T *pSrcImage, int srcWidth, int srcHeight, T *pDstImage, int dstWidth, int dstHeight) +{ + float xScale; + float yScale; + + T *pSrcPixel; + T *pDstPixel; + + xScale = (float)srcWidth / dstWidth; + yScale = (float)srcHeight / dstHeight; + + for (int iRow = 0; iRow < dstHeight; iRow++) + { + for (int iCol = 0; iCol < dstWidth; iCol++) + { + int samples; + int iFirstSampleRow; + int iFirstSampleCol; + int iLastSampleRow; + int iLastSampleCol; + float red; + float green; + float blue; + float alpha; + + iFirstSampleRow = (int)(iRow * yScale); + iLastSampleRow = (int)ceil(iFirstSampleRow + yScale - 1); + if (iLastSampleRow >= srcHeight) + { + iLastSampleRow = srcHeight - 1; + } + + iFirstSampleCol = (int)(iCol * xScale); + iLastSampleCol = (int)ceil(iFirstSampleCol + xScale - 1); + if (iLastSampleCol >= srcWidth) + { + iLastSampleCol = srcWidth - 1; + } + + samples = 0; + red = 0.f; + green = 0.f; + blue = 0.f; + alpha = 0.f; + for (int iSampleRow = iFirstSampleRow; iSampleRow <= iLastSampleRow; iSampleRow++) + { + for (int iSampleCol = iFirstSampleCol; iSampleCol <= iLastSampleCol; iSampleCol++) + { + pSrcPixel = pSrcImage + (iSampleRow * srcWidth + iSampleCol) * 4; + red += static_cast<float>(pSrcPixel[0]); + green += static_cast<float>(pSrcPixel[1]); + blue += static_cast<float>(pSrcPixel[2]); + alpha += static_cast<float>(pSrcPixel[3]); + + samples++; + } + } + + pDstPixel = pDstImage + (iRow * dstWidth + iCol) * 4; + if (samples > 0) + { + pDstPixel[0] = static_cast<T>(red / samples); + pDstPixel[1] = static_cast<T>(green / samples); + pDstPixel[2] = static_cast<T>(blue / samples); + pDstPixel[3] = static_cast<T>(alpha / samples); + } + else + { + pDstPixel[0] = static_cast<T>(red); + pDstPixel[1] = static_cast<T>(green); + pDstPixel[2] = static_cast<T>(blue); + pDstPixel[3] = static_cast<T>(alpha); + } + } + } + +} + +//**------------------------------------------------------------------------- +//** Name: Filter_TwoPass( RGBCOLOR *pSrcImage, +//** int srcWidth, int srcHeight, +//** RGBCOLOR *pDestImage, +//** int destWidth, int destHeight, +//** double (*FilterProc)(double) ) +//** Returns: 0 on failure and 1 on success +//** Description: Filters a 2d image with a two pass filter by averaging the +//** weighted contributions of the pixels within the filter region. The +//** contributions are determined by a weighting function parameter. +//**------------------------------------------------------------------------- +template <typename T> +int FilterTwoPass(T *pSrcImage, int srcWidth, int srcHeight, + T *pDestImage, int destWidth, int destHeight, unsigned int wrapFlags, double(*FilterProc)(double)) +{ + const int numComponents = 4; + FilterWeights *contrib; + T *pPixel; + T *pTempImage; + double dRed; + double dGreen; + double dBlue; + double dAlpha; + double filterSize = 3.0; + + int maxDim = (srcWidth>srcHeight) ? srcWidth : srcHeight; + contrib = new FilterWeights[maxDim]; + + //**------------------------------------------------------------------------ + //** Need to create a temporary image to stuff the horizontally scaled image + //**------------------------------------------------------------------------ + pTempImage = new T[destWidth * srcHeight * numComponents]; + if (pTempImage == NULL) + { + return 0; + } + + //**------------------------------------------------------- + //** Horizontally filter the image into the temporary image + //**------------------------------------------------------- + bool bWrapHorizontal = !!(wrapFlags&FILTER_WRAP_X); + CalcContributions(srcWidth, destWidth, filterSize, bWrapHorizontal, FilterProc, contrib); + for (int iRow = 0; iRow < srcHeight; iRow++) + { + for (int iCol = 0; iCol < destWidth; iCol++) + { + dRed = 0; + dGreen = 0; + dBlue = 0; + dAlpha = 0; + + for (int iWeight = 0; iWeight < contrib[iCol].numWeights; iWeight++) + { + int iSrcCol = iWeight + contrib[iCol].first; + if(bWrapHorizontal) + { + iSrcCol = (iSrcCol < 0)?(srcWidth+iSrcCol):(iSrcCol >= srcWidth)?(iSrcCol-srcWidth):iSrcCol; + } + T* pSrcPixel = pSrcImage + ((iRow * srcWidth) + iSrcCol)*numComponents; + dRed += contrib[iCol].weight[iWeight] * pSrcPixel[0]; + dGreen += contrib[iCol].weight[iWeight] * pSrcPixel[1]; + dBlue += contrib[iCol].weight[iWeight] * pSrcPixel[2]; + dAlpha += contrib[iCol].weight[iWeight] * pSrcPixel[3]; + } + + pPixel = pTempImage + ((iRow * destWidth) + iCol)*numComponents; + pPixel[0] = static_cast<T>(std::max(0.0, std::min(255.0, dRed))); + pPixel[1] = static_cast<T>(std::max(0.0, std::min(255.0, dGreen))); + pPixel[2] = static_cast<T>(std::max(0.0, std::min(255.0, dBlue))); + pPixel[3] = static_cast<T>(std::max(0.0, std::min(255.0, dAlpha))); + } + } + + //**------------------------------------------------------- + //** Vertically filter the image into the destination image + //**------------------------------------------------------- + bool bWrapVertical = !!(wrapFlags&FILTER_WRAP_Y); + CalcContributions(srcHeight, destHeight, filterSize, bWrapVertical, FilterProc, contrib); + for (int iCol = 0; iCol < destWidth; iCol++) + { + for (int iRow = 0; iRow < destHeight; iRow++) + { + dRed = 0; + dGreen = 0; + dBlue = 0; + dAlpha = 0; + + for (int iWeight = 0; iWeight < contrib[iRow].numWeights; iWeight++) + { + int iSrcRow = iWeight + contrib[iRow].first; + if (bWrapVertical) + { + iSrcRow = (iSrcRow < 0) ? (srcHeight + iSrcRow) : (iSrcRow >= srcHeight) ? (iSrcRow - srcHeight) : iSrcRow; + } + T* pSrcPixel = pTempImage + ((iSrcRow * destWidth) + iCol)*numComponents; + dRed += contrib[iRow].weight[iWeight] * pSrcPixel[0]; + dGreen += contrib[iRow].weight[iWeight] * pSrcPixel[1]; + dBlue += contrib[iRow].weight[iWeight] * pSrcPixel[2]; + dAlpha += contrib[iRow].weight[iWeight] * pSrcPixel[3]; + } + + pPixel = pDestImage + ((iRow * destWidth) + iCol)*numComponents; + pPixel[0] = static_cast<T>(std::max(0.0, std::min(255.0, dRed))); + pPixel[1] = static_cast<T>(std::max(0.0, std::min(255.0, dGreen))); + pPixel[2] = static_cast<T>(std::max(0.0, std::min(255.0, dBlue))); + pPixel[3] = static_cast<T>(std::max(0.0, std::min(255.0, dAlpha))); + } + } + + delete[] pTempImage; + delete[] contrib; + + return 1; +} + + +}
\ No newline at end of file diff --git a/thirdparty/etc2comp/EtcImage.cpp b/thirdparty/etc2comp/EtcImage.cpp new file mode 100644 index 0000000000..7a1058844d --- /dev/null +++ b/thirdparty/etc2comp/EtcImage.cpp @@ -0,0 +1,685 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* +EtcImage.cpp + +Image is an array of 4x4 blocks that represent the encoding of the source image + +*/ + +#include "EtcConfig.h" + +#include <stdlib.h> + +#include "EtcImage.h" + +#include "Etc.h" +#include "EtcBlock4x4.h" +#include "EtcBlock4x4EncodingBits.h" +#include "EtcSortedBlockList.h" + +#if ETC_WINDOWS +#include <windows.h> +#endif +#include <ctime> +#include <chrono> +#include <future> +#include <stdio.h> +#include <string.h> +#include <assert.h> + +// fix conflict with Block4x4::AlphaMix +#ifdef OPAQUE +#undef OPAQUE +#endif +#ifdef TRANSPARENT +#undef TRANSPARENT +#endif + +namespace Etc +{ + + // ---------------------------------------------------------------------------------------------------- + // + Image::Image(void) + { + m_encodingStatus = EncodingStatus::SUCCESS; + m_warningsToCapture = EncodingStatus::SUCCESS; + m_pafrgbaSource = nullptr; + + m_pablock = nullptr; + + m_encodingbitsformat = Block4x4EncodingBits::Format::UNKNOWN; + m_uiEncodingBitsBytes = 0; + m_paucEncodingBits = nullptr; + + m_format = Format::UNKNOWN; + m_iNumOpaquePixels = 0; + m_iNumTranslucentPixels = 0; + m_iNumTransparentPixels = 0; + } + + // ---------------------------------------------------------------------------------------------------- + // constructor using source image + // used to set state before Encode() is called + // + Image::Image(float *a_pafSourceRGBA, unsigned int a_uiSourceWidth, + unsigned int a_uiSourceHeight, + ErrorMetric a_errormetric) + { + m_encodingStatus = EncodingStatus::SUCCESS; + m_warningsToCapture = EncodingStatus::SUCCESS; + m_pafrgbaSource = (ColorFloatRGBA *) a_pafSourceRGBA; + m_uiSourceWidth = a_uiSourceWidth; + m_uiSourceHeight = a_uiSourceHeight; + + m_uiExtendedWidth = CalcExtendedDimension((unsigned short)m_uiSourceWidth); + m_uiExtendedHeight = CalcExtendedDimension((unsigned short)m_uiSourceHeight); + + m_uiBlockColumns = m_uiExtendedWidth >> 2; + m_uiBlockRows = m_uiExtendedHeight >> 2; + + m_pablock = new Block4x4[GetNumberOfBlocks()]; + assert(m_pablock); + + m_format = Format::UNKNOWN; + + m_encodingbitsformat = Block4x4EncodingBits::Format::UNKNOWN; + m_uiEncodingBitsBytes = 0; + m_paucEncodingBits = nullptr; + + m_errormetric = a_errormetric; + m_fEffort = 0.0f; + + m_iEncodeTime_ms = -1; + + m_iNumOpaquePixels = 0; + m_iNumTranslucentPixels = 0; + m_iNumTransparentPixels = 0; + m_bVerboseOutput = false; + + } + + // ---------------------------------------------------------------------------------------------------- + // constructor using encoding bits + // recreates encoding state using a previously encoded image + // + Image::Image(Format a_format, + unsigned int a_uiSourceWidth, unsigned int a_uiSourceHeight, + unsigned char *a_paucEncidingBits, unsigned int a_uiEncodingBitsBytes, + Image *a_pimageSource, ErrorMetric a_errormetric) + { + m_encodingStatus = EncodingStatus::SUCCESS; + m_pafrgbaSource = nullptr; + m_uiSourceWidth = a_uiSourceWidth; + m_uiSourceHeight = a_uiSourceHeight; + + m_uiExtendedWidth = CalcExtendedDimension((unsigned short)m_uiSourceWidth); + m_uiExtendedHeight = CalcExtendedDimension((unsigned short)m_uiSourceHeight); + + m_uiBlockColumns = m_uiExtendedWidth >> 2; + m_uiBlockRows = m_uiExtendedHeight >> 2; + + unsigned int uiBlocks = GetNumberOfBlocks(); + + m_pablock = new Block4x4[uiBlocks]; + assert(m_pablock); + + m_format = a_format; + + m_iNumOpaquePixels = 0; + m_iNumTranslucentPixels = 0; + m_iNumTransparentPixels = 0; + + m_encodingbitsformat = DetermineEncodingBitsFormat(m_format); + if (m_encodingbitsformat == Block4x4EncodingBits::Format::UNKNOWN) + { + AddToEncodingStatus(ERROR_UNKNOWN_FORMAT); + return; + } + m_uiEncodingBitsBytes = a_uiEncodingBitsBytes; + m_paucEncodingBits = a_paucEncidingBits; + + m_errormetric = a_errormetric; + m_fEffort = 0.0f; + m_bVerboseOutput = false; + m_iEncodeTime_ms = -1; + + unsigned char *paucEncodingBits = m_paucEncodingBits; + unsigned int uiEncodingBitsBytesPerBlock = Block4x4EncodingBits::GetBytesPerBlock(m_encodingbitsformat); + + unsigned int uiH = 0; + unsigned int uiV = 0; + for (unsigned int uiBlock = 0; uiBlock < uiBlocks; uiBlock++) + { + m_pablock[uiBlock].InitFromEtcEncodingBits(a_format, uiH, uiV, paucEncodingBits, + a_pimageSource, a_errormetric); + paucEncodingBits += uiEncodingBitsBytesPerBlock; + uiH += 4; + if (uiH >= m_uiSourceWidth) + { + uiH = 0; + uiV += 4; + } + } + + } + + // ---------------------------------------------------------------------------------------------------- + // + Image::~Image(void) + { + if (m_pablock != nullptr) + { + delete[] m_pablock; + m_pablock = nullptr; + } + + /*if (m_paucEncodingBits != nullptr) + { + delete[] m_paucEncodingBits; + m_paucEncodingBits = nullptr; + }*/ + } + + // ---------------------------------------------------------------------------------------------------- + // encode an image + // create a set of encoding bits that conforms to a_format + // find best fit using a_errormetric + // explore a range of possible encodings based on a_fEffort (range = [0:100]) + // speed up process using a_uiJobs as the number of process threads (a_uiJobs must not excede a_uiMaxJobs) + // + Image::EncodingStatus Image::Encode(Format a_format, ErrorMetric a_errormetric, float a_fEffort, unsigned int a_uiJobs, unsigned int a_uiMaxJobs) + { + + auto start = std::chrono::steady_clock::now(); + + m_encodingStatus = EncodingStatus::SUCCESS; + + m_format = a_format; + m_errormetric = a_errormetric; + m_fEffort = a_fEffort; + + if (m_errormetric < 0 || m_errormetric > ERROR_METRICS) + { + AddToEncodingStatus(ERROR_UNKNOWN_ERROR_METRIC); + return m_encodingStatus; + } + + if (m_fEffort < ETCCOMP_MIN_EFFORT_LEVEL) + { + AddToEncodingStatus(WARNING_EFFORT_OUT_OF_RANGE); + m_fEffort = ETCCOMP_MIN_EFFORT_LEVEL; + } + else if (m_fEffort > ETCCOMP_MAX_EFFORT_LEVEL) + { + AddToEncodingStatus(WARNING_EFFORT_OUT_OF_RANGE); + m_fEffort = ETCCOMP_MAX_EFFORT_LEVEL; + } + if (a_uiJobs < 1) + { + a_uiJobs = 1; + AddToEncodingStatus(WARNING_JOBS_OUT_OF_RANGE); + } + else if (a_uiJobs > a_uiMaxJobs) + { + a_uiJobs = a_uiMaxJobs; + AddToEncodingStatus(WARNING_JOBS_OUT_OF_RANGE); + } + + m_encodingbitsformat = DetermineEncodingBitsFormat(m_format); + + if (m_encodingbitsformat == Block4x4EncodingBits::Format::UNKNOWN) + { + AddToEncodingStatus(ERROR_UNKNOWN_FORMAT); + return m_encodingStatus; + } + + assert(m_paucEncodingBits == nullptr); + m_uiEncodingBitsBytes = GetNumberOfBlocks() * Block4x4EncodingBits::GetBytesPerBlock(m_encodingbitsformat); + m_paucEncodingBits = new unsigned char[m_uiEncodingBitsBytes]; + + InitBlocksAndBlockSorter(); + + + std::future<void> *handle = new std::future<void>[a_uiMaxJobs]; + + unsigned int uiNumThreadsNeeded = 0; + unsigned int uiUnfinishedBlocks = GetNumberOfBlocks(); + + uiNumThreadsNeeded = (uiUnfinishedBlocks < a_uiJobs) ? uiUnfinishedBlocks : a_uiJobs; + + for (int i = 0; i < (int)uiNumThreadsNeeded - 1; i++) + { + handle[i] = async(std::launch::async, &Image::RunFirstPass, this, i, uiNumThreadsNeeded); + } + + RunFirstPass(uiNumThreadsNeeded - 1, uiNumThreadsNeeded); + + for (int i = 0; i < (int)uiNumThreadsNeeded - 1; i++) + { + handle[i].get(); + } + + // perform effort-based encoding + if (m_fEffort > ETCCOMP_MIN_EFFORT_LEVEL) + { + unsigned int uiFinishedBlocks = 0; + unsigned int uiTotalEffortBlocks = static_cast<unsigned int>(roundf(0.01f * m_fEffort * GetNumberOfBlocks())); + + if (m_bVerboseOutput) + { + printf("effortblocks = %d\n", uiTotalEffortBlocks); + } + unsigned int uiPass = 0; + while (1) + { + if (m_bVerboseOutput) + { + uiPass++; + printf("pass %u\n", uiPass); + } + m_psortedblocklist->Sort(); + uiUnfinishedBlocks = m_psortedblocklist->GetNumberOfSortedBlocks(); + uiFinishedBlocks = GetNumberOfBlocks() - uiUnfinishedBlocks; + if (m_bVerboseOutput) + { + printf(" %u unfinished blocks\n", uiUnfinishedBlocks); + // m_psortedblocklist->Print(); + } + + + + //stop enocding when we did enough to satify the effort percentage + if (uiFinishedBlocks >= uiTotalEffortBlocks) + { + if (m_bVerboseOutput) + { + printf("Finished %d Blocks out of %d\n", uiFinishedBlocks, uiTotalEffortBlocks); + } + break; + } + + unsigned int uiIteratedBlocks = 0; + unsigned int blocksToIterateThisPass = (uiTotalEffortBlocks - uiFinishedBlocks); + uiNumThreadsNeeded = (uiUnfinishedBlocks < a_uiJobs) ? uiUnfinishedBlocks : a_uiJobs; + + if (uiNumThreadsNeeded <= 1) + { + //since we already how many blocks each thread will process + //cap the thread limit to do the proper amount of work, and not more + uiIteratedBlocks = IterateThroughWorstBlocks(blocksToIterateThisPass, 0, 1); + } + else + { + //we have a lot of work to do, so lets multi thread it + std::future<unsigned int> *handleToBlockEncoders = new std::future<unsigned int>[uiNumThreadsNeeded-1]; + + for (int i = 0; i < (int)uiNumThreadsNeeded - 1; i++) + { + handleToBlockEncoders[i] = async(std::launch::async, &Image::IterateThroughWorstBlocks, this, blocksToIterateThisPass, i, uiNumThreadsNeeded); + } + uiIteratedBlocks = IterateThroughWorstBlocks(blocksToIterateThisPass, uiNumThreadsNeeded - 1, uiNumThreadsNeeded); + + for (int i = 0; i < (int)uiNumThreadsNeeded - 1; i++) + { + uiIteratedBlocks += handleToBlockEncoders[i].get(); + } + + delete[] handleToBlockEncoders; + } + + if (m_bVerboseOutput) + { + printf(" %u iterated blocks\n", uiIteratedBlocks); + } + } + } + + // generate Etc2-compatible bit-format 4x4 blocks + for (int i = 0; i < (int)a_uiJobs - 1; i++) + { + handle[i] = async(std::launch::async, &Image::SetEncodingBits, this, i, a_uiJobs); + } + SetEncodingBits(a_uiJobs - 1, a_uiJobs); + + for (int i = 0; i < (int)a_uiJobs - 1; i++) + { + handle[i].get(); + } + + auto end = std::chrono::steady_clock::now(); + std::chrono::milliseconds elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(end - start); + m_iEncodeTime_ms = (int)elapsed.count(); + + delete[] handle; + delete m_psortedblocklist; + return m_encodingStatus; + } + + // ---------------------------------------------------------------------------------------------------- + // iterate the encoding thru the blocks with the worst error + // stop when a_uiMaxBlocks blocks have been iterated + // split the blocks between the process threads using a_uiMultithreadingOffset and a_uiMultithreadingStride + // + unsigned int Image::IterateThroughWorstBlocks(unsigned int a_uiMaxBlocks, + unsigned int a_uiMultithreadingOffset, + unsigned int a_uiMultithreadingStride) + { + assert(a_uiMultithreadingStride > 0); + unsigned int uiIteratedBlocks = a_uiMultithreadingOffset; + + SortedBlockList::Link *plink = m_psortedblocklist->GetLinkToFirstBlock(); + for (plink = plink->Advance(a_uiMultithreadingOffset); + plink != nullptr; + plink = plink->Advance(a_uiMultithreadingStride) ) + { + if (uiIteratedBlocks >= a_uiMaxBlocks) + { + break; + } + + plink->GetBlock()->PerformEncodingIteration(m_fEffort); + + uiIteratedBlocks += a_uiMultithreadingStride; + } + + return uiIteratedBlocks; + } + + // ---------------------------------------------------------------------------------------------------- + // determine which warnings to check for during Encode() based on encoding format + // + void Image::FindEncodingWarningTypesForCurFormat() + { + TrackEncodingWarning(WARNING_ALL_TRANSPARENT_PIXELS); + TrackEncodingWarning(WARNING_SOME_RGBA_NOT_0_TO_1); + switch (m_format) + { + case Image::Format::ETC1: + case Image::Format::RGB8: + case Image::Format::SRGB8: + TrackEncodingWarning(WARNING_SOME_NON_OPAQUE_PIXELS); + TrackEncodingWarning(WARNING_SOME_TRANSLUCENT_PIXELS); + break; + + case Image::Format::RGB8A1: + case Image::Format::SRGB8A1: + TrackEncodingWarning(WARNING_SOME_TRANSLUCENT_PIXELS); + TrackEncodingWarning(WARNING_ALL_OPAQUE_PIXELS); + break; + case Image::Format::RGBA8: + case Image::Format::SRGBA8: + TrackEncodingWarning(WARNING_ALL_OPAQUE_PIXELS); + break; + + case Image::Format::R11: + case Image::Format::SIGNED_R11: + TrackEncodingWarning(WARNING_SOME_NON_OPAQUE_PIXELS); + TrackEncodingWarning(WARNING_SOME_TRANSLUCENT_PIXELS); + TrackEncodingWarning(WARNING_SOME_GREEN_VALUES_ARE_NOT_ZERO); + TrackEncodingWarning(WARNING_SOME_BLUE_VALUES_ARE_NOT_ZERO); + break; + + case Image::Format::RG11: + case Image::Format::SIGNED_RG11: + TrackEncodingWarning(WARNING_SOME_NON_OPAQUE_PIXELS); + TrackEncodingWarning(WARNING_SOME_TRANSLUCENT_PIXELS); + TrackEncodingWarning(WARNING_SOME_BLUE_VALUES_ARE_NOT_ZERO); + break; + case Image::Format::FORMATS: + case Image::Format::UNKNOWN: + default: + assert(0); + break; + } + } + + // ---------------------------------------------------------------------------------------------------- + // examine source pixels to check for warnings + // + void Image::FindAndSetEncodingWarnings() + { + int numPixels = (m_uiBlockRows * 4) * (m_uiBlockColumns * 4); + if (m_iNumOpaquePixels == numPixels) + { + AddToEncodingStatusIfSignfigant(Image::EncodingStatus::WARNING_ALL_OPAQUE_PIXELS); + } + if (m_iNumOpaquePixels < numPixels) + { + AddToEncodingStatusIfSignfigant(Image::EncodingStatus::WARNING_SOME_NON_OPAQUE_PIXELS); + } + if (m_iNumTranslucentPixels > 0) + { + AddToEncodingStatusIfSignfigant(Image::EncodingStatus::WARNING_SOME_TRANSLUCENT_PIXELS); + } + if (m_iNumTransparentPixels == numPixels) + { + AddToEncodingStatusIfSignfigant(Image::EncodingStatus::WARNING_ALL_TRANSPARENT_PIXELS); + } + if (m_numColorValues.fB > 0.0f) + { + AddToEncodingStatusIfSignfigant(Image::EncodingStatus::WARNING_SOME_BLUE_VALUES_ARE_NOT_ZERO); + } + if (m_numColorValues.fG > 0.0f) + { + AddToEncodingStatusIfSignfigant(Image::EncodingStatus::WARNING_SOME_GREEN_VALUES_ARE_NOT_ZERO); + } + + if (m_numOutOfRangeValues.fR > 0.0f || m_numOutOfRangeValues.fG > 0.0f) + { + AddToEncodingStatusIfSignfigant(Image::EncodingStatus::WARNING_SOME_RGBA_NOT_0_TO_1); + } + if (m_numOutOfRangeValues.fB > 0.0f || m_numOutOfRangeValues.fA > 0.0f) + { + AddToEncodingStatusIfSignfigant(Image::EncodingStatus::WARNING_SOME_RGBA_NOT_0_TO_1); + } + } + + // ---------------------------------------------------------------------------------------------------- + // return a string name for a given image format + // + const char * Image::EncodingFormatToString(Image::Format a_format) + { + switch (a_format) + { + case Image::Format::ETC1: + return "ETC1"; + case Image::Format::RGB8: + return "RGB8"; + case Image::Format::SRGB8: + return "SRGB8"; + + case Image::Format::RGB8A1: + return "RGB8A1"; + case Image::Format::SRGB8A1: + return "SRGB8A1"; + case Image::Format::RGBA8: + return "RGBA8"; + case Image::Format::SRGBA8: + return "SRGBA8"; + + case Image::Format::R11: + return "R11"; + case Image::Format::SIGNED_R11: + return "SIGNED_R11"; + + case Image::Format::RG11: + return "RG11"; + case Image::Format::SIGNED_RG11: + return "SIGNED_RG11"; + case Image::Format::FORMATS: + case Image::Format::UNKNOWN: + default: + return "UNKNOWN"; + } + } + + // ---------------------------------------------------------------------------------------------------- + // return a string name for the image's format + // + const char * Image::EncodingFormatToString(void) + { + return EncodingFormatToString(m_format); + } + + // ---------------------------------------------------------------------------------------------------- + // init image blocks prior to encoding + // init block sorter for subsequent sortings + // check for encoding warnings + // + void Image::InitBlocksAndBlockSorter(void) + { + + FindEncodingWarningTypesForCurFormat(); + + // init each block + Block4x4 *pblock = m_pablock; + unsigned char *paucEncodingBits = m_paucEncodingBits; + for (unsigned int uiBlockRow = 0; uiBlockRow < m_uiBlockRows; uiBlockRow++) + { + unsigned int uiBlockV = uiBlockRow * 4; + + for (unsigned int uiBlockColumn = 0; uiBlockColumn < m_uiBlockColumns; uiBlockColumn++) + { + unsigned int uiBlockH = uiBlockColumn * 4; + + pblock->InitFromSource(this, uiBlockH, uiBlockV, paucEncodingBits, m_errormetric); + + paucEncodingBits += Block4x4EncodingBits::GetBytesPerBlock(m_encodingbitsformat); + + pblock++; + } + } + + FindAndSetEncodingWarnings(); + + // init block sorter + { + m_psortedblocklist = new SortedBlockList(GetNumberOfBlocks(), 100); + + for (unsigned int uiBlock = 0; uiBlock < GetNumberOfBlocks(); uiBlock++) + { + pblock = &m_pablock[uiBlock]; + m_psortedblocklist->AddBlock(pblock); + } + } + + } + + // ---------------------------------------------------------------------------------------------------- + // run the first pass of the encoder + // the encoder generally finds a reasonable, fast encoding + // this is run on all blocks regardless of effort to ensure that all blocks have a valid encoding + // + void Image::RunFirstPass(unsigned int a_uiMultithreadingOffset, unsigned int a_uiMultithreadingStride) + { + assert(a_uiMultithreadingStride > 0); + + for (unsigned int uiBlock = a_uiMultithreadingOffset; + uiBlock < GetNumberOfBlocks(); + uiBlock += a_uiMultithreadingStride) + { + Block4x4 *pblock = &m_pablock[uiBlock]; + pblock->PerformEncodingIteration(m_fEffort); + } + } + + // ---------------------------------------------------------------------------------------------------- + // set the encoding bits (for the output file) based on the best encoding for each block + // + void Image::SetEncodingBits(unsigned int a_uiMultithreadingOffset, + unsigned int a_uiMultithreadingStride) + { + assert(a_uiMultithreadingStride > 0); + + for (unsigned int uiBlock = a_uiMultithreadingOffset; + uiBlock < GetNumberOfBlocks(); + uiBlock += a_uiMultithreadingStride) + { + Block4x4 *pblock = &m_pablock[uiBlock]; + pblock->SetEncodingBitsFromEncoding(); + } + + } + + // ---------------------------------------------------------------------------------------------------- + // return the image error + // image error is the sum of all block errors + // + float Image::GetError(void) + { + float fError = 0.0f; + + for (unsigned int uiBlock = 0; uiBlock < GetNumberOfBlocks(); uiBlock++) + { + Block4x4 *pblock = &m_pablock[uiBlock]; + fError += pblock->GetError(); + } + + return fError; + } + + // ---------------------------------------------------------------------------------------------------- + // determine the encoding bits format based on the encoding format + // the encoding bits format is a family of bit encodings that are shared across various encoding formats + // + Block4x4EncodingBits::Format Image::DetermineEncodingBitsFormat(Format a_format) + { + Block4x4EncodingBits::Format encodingbitsformat; + + // determine encoding bits format from image format + switch (a_format) + { + case Format::ETC1: + case Format::RGB8: + case Format::SRGB8: + encodingbitsformat = Block4x4EncodingBits::Format::RGB8; + break; + + case Format::RGBA8: + case Format::SRGBA8: + encodingbitsformat = Block4x4EncodingBits::Format::RGBA8; + break; + + case Format::R11: + case Format::SIGNED_R11: + encodingbitsformat = Block4x4EncodingBits::Format::R11; + break; + + case Format::RG11: + case Format::SIGNED_RG11: + encodingbitsformat = Block4x4EncodingBits::Format::RG11; + break; + + case Format::RGB8A1: + case Format::SRGB8A1: + encodingbitsformat = Block4x4EncodingBits::Format::RGB8A1; + break; + + default: + encodingbitsformat = Block4x4EncodingBits::Format::UNKNOWN; + break; + } + + return encodingbitsformat; + } + + // ---------------------------------------------------------------------------------------------------- + // + +} // namespace Etc diff --git a/thirdparty/etc2comp/EtcImage.h b/thirdparty/etc2comp/EtcImage.h new file mode 100644 index 0000000000..bd807ac32e --- /dev/null +++ b/thirdparty/etc2comp/EtcImage.h @@ -0,0 +1,249 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +//#include "Etc.h" +#include "EtcColorFloatRGBA.h" +#include "EtcBlock4x4EncodingBits.h" +#include "EtcErrorMetric.h" + + +namespace Etc +{ + class Block4x4; + class EncoderSpec; + class SortedBlockList; + + class Image + { + public: + + //the differnt warning and errors that can come up during encoding + enum EncodingStatus + { + SUCCESS = 0, + // + WARNING_THRESHOLD = 1 << 0, + // + WARNING_EFFORT_OUT_OF_RANGE = 1 << 1, + WARNING_JOBS_OUT_OF_RANGE = 1 << 2, + WARNING_SOME_NON_OPAQUE_PIXELS = 1 << 3,//just for opaque formats, etc1, rgb8, r11, rg11 + WARNING_ALL_OPAQUE_PIXELS = 1 << 4, + WARNING_ALL_TRANSPARENT_PIXELS = 1 << 5, + WARNING_SOME_TRANSLUCENT_PIXELS = 1 << 6,//just for rgb8A1 + WARNING_SOME_RGBA_NOT_0_TO_1 = 1 << 7, + WARNING_SOME_BLUE_VALUES_ARE_NOT_ZERO = 1 << 8, + WARNING_SOME_GREEN_VALUES_ARE_NOT_ZERO = 1 << 9, + // + ERROR_THRESHOLD = 1 << 16, + // + ERROR_UNKNOWN_FORMAT = 1 << 17, + ERROR_UNKNOWN_ERROR_METRIC = 1 << 18, + ERROR_ZERO_WIDTH_OR_HEIGHT = 1 << 19, + // + }; + + enum class Format + { + UNKNOWN, + // + ETC1, + // + // ETC2 formats + RGB8, + SRGB8, + RGBA8, + SRGBA8, + R11, + SIGNED_R11, + RG11, + SIGNED_RG11, + RGB8A1, + SRGB8A1, + // + FORMATS, + // + DEFAULT = SRGB8 + }; + + // constructor using source image + Image(float *a_pafSourceRGBA, unsigned int a_uiSourceWidth, + unsigned int a_uiSourceHeight, + ErrorMetric a_errormetric); + + // constructor using encoding bits + Image(Format a_format, + unsigned int a_uiSourceWidth, unsigned int a_uiSourceHeight, + unsigned char *a_paucEncidingBits, unsigned int a_uiEncodingBitsBytes, + Image *a_pimageSource, + ErrorMetric a_errormetric); + + ~Image(void); + + EncodingStatus Encode(Format a_format, ErrorMetric a_errormetric, float a_fEffort, + unsigned int a_uiJobs, unsigned int a_uiMaxJobs); + + inline void AddToEncodingStatus(EncodingStatus a_encStatus) + { + m_encodingStatus = (EncodingStatus)((unsigned int)m_encodingStatus | (unsigned int)a_encStatus); + } + + inline unsigned int GetSourceWidth(void) + { + return m_uiSourceWidth; + } + + inline unsigned int GetSourceHeight(void) + { + return m_uiSourceHeight; + } + + inline unsigned int GetExtendedWidth(void) + { + return m_uiExtendedWidth; + } + + inline unsigned int GetExtendedHeight(void) + { + return m_uiExtendedHeight; + } + + inline unsigned int GetNumberOfBlocks() + { + return m_uiBlockColumns * m_uiBlockRows; + } + + inline Block4x4 * GetBlocks() + { + return m_pablock; + } + + inline unsigned char * GetEncodingBits(void) + { + return m_paucEncodingBits; + } + + inline unsigned int GetEncodingBitsBytes(void) + { + return m_uiEncodingBitsBytes; + } + + inline int GetEncodingTimeMs(void) + { + return m_iEncodeTime_ms; + } + + float GetError(void); + + inline ColorFloatRGBA * GetSourcePixel(unsigned int a_uiH, unsigned int a_uiV) + { + if (a_uiH >= m_uiSourceWidth || a_uiV >= m_uiSourceHeight) + { + return nullptr; + } + + return &m_pafrgbaSource[a_uiV*m_uiSourceWidth + a_uiH]; + } + + inline Format GetFormat(void) + { + return m_format; + } + + static Block4x4EncodingBits::Format DetermineEncodingBitsFormat(Format a_format); + + inline static unsigned short CalcExtendedDimension(unsigned short a_ushOriginalDimension) + { + return (unsigned short)((a_ushOriginalDimension + 3) & ~3); + } + + inline ErrorMetric GetErrorMetric(void) + { + return m_errormetric; + } + + static const char * EncodingFormatToString(Image::Format a_format); + const char * EncodingFormatToString(void); + //used to get basic information about the image data + int m_iNumOpaquePixels; + int m_iNumTranslucentPixels; + int m_iNumTransparentPixels; + + ColorFloatRGBA m_numColorValues; + ColorFloatRGBA m_numOutOfRangeValues; + + bool m_bVerboseOutput; + private: + //add a warning or error to check for while encoding + inline void TrackEncodingWarning(EncodingStatus a_encStatus) + { + m_warningsToCapture = (EncodingStatus)((unsigned int)m_warningsToCapture | (unsigned int)a_encStatus); + } + + //report the warning if it is something we care about for this encoding + inline void AddToEncodingStatusIfSignfigant(EncodingStatus a_encStatus) + { + if ((EncodingStatus)((unsigned int)m_warningsToCapture & (unsigned int)a_encStatus) == a_encStatus) + { + AddToEncodingStatus(a_encStatus); + } + } + + Image(void); + void FindEncodingWarningTypesForCurFormat(); + void FindAndSetEncodingWarnings(); + + void InitBlocksAndBlockSorter(void); + + void RunFirstPass(unsigned int a_uiMultithreadingOffset, + unsigned int a_uiMultithreadingStride); + + void SetEncodingBits(unsigned int a_uiMultithreadingOffset, + unsigned int a_uiMultithreadingStride); + + unsigned int IterateThroughWorstBlocks(unsigned int a_uiMaxBlocks, + unsigned int a_uiMultithreadingOffset, + unsigned int a_uiMultithreadingStride); + + // inputs + ColorFloatRGBA *m_pafrgbaSource; + unsigned int m_uiSourceWidth; + unsigned int m_uiSourceHeight; + unsigned int m_uiExtendedWidth; + unsigned int m_uiExtendedHeight; + unsigned int m_uiBlockColumns; + unsigned int m_uiBlockRows; + // intermediate data + Block4x4 *m_pablock; + // encoding + Format m_format; + Block4x4EncodingBits::Format m_encodingbitsformat; + unsigned int m_uiEncodingBitsBytes; // for entire image + unsigned char *m_paucEncodingBits; + ErrorMetric m_errormetric; + float m_fEffort; + // stats + int m_iEncodeTime_ms; + + SortedBlockList *m_psortedblocklist; + //this will hold any warning or errors that happen during encoding + EncodingStatus m_encodingStatus; + //these will be the warnings we are tracking + EncodingStatus m_warningsToCapture; + }; + +} // namespace Etc diff --git a/thirdparty/etc2comp/EtcIndividualTrys.cpp b/thirdparty/etc2comp/EtcIndividualTrys.cpp new file mode 100644 index 0000000000..56ff4c65ec --- /dev/null +++ b/thirdparty/etc2comp/EtcIndividualTrys.cpp @@ -0,0 +1,85 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* +EtcIndividualTrys.cpp + +Gathers the results of the various encoding trys for both halves of a 4x4 block for Individual mode + +*/ + +#include "EtcConfig.h" +#include "EtcIndividualTrys.h" + +#include <assert.h> + +namespace Etc +{ + + // ---------------------------------------------------------------------------------------------------- + // construct a list of trys (encoding attempts) + // + // a_frgbaColor1 is the basecolor for the first half + // a_frgbaColor2 is the basecolor for the second half + // a_pauiPixelMapping1 is the pixel order for the first half + // a_pauiPixelMapping2 is the pixel order for the second half + // a_uiRadius is the amount to vary the base colors + // + IndividualTrys::IndividualTrys(ColorFloatRGBA a_frgbaColor1, ColorFloatRGBA a_frgbaColor2, + const unsigned int *a_pauiPixelMapping1, + const unsigned int *a_pauiPixelMapping2, + unsigned int a_uiRadius) + { + assert(a_uiRadius <= MAX_RADIUS); + + ColorFloatRGBA frgbaQuantizedColor1 = a_frgbaColor1.QuantizeR4G4B4(); + ColorFloatRGBA frgbaQuantizedColor2 = a_frgbaColor2.QuantizeR4G4B4(); + + // quantize base colors + // ensure that trys with a_uiRadius don't overflow + int iRed1 = MoveAwayFromEdge(frgbaQuantizedColor1.IntRed(15.0f), a_uiRadius); + int iGreen1 = MoveAwayFromEdge(frgbaQuantizedColor1.IntGreen(15.0f), a_uiRadius); + int iBlue1 = MoveAwayFromEdge(frgbaQuantizedColor1.IntBlue(15.0f), a_uiRadius); + int iRed2 = MoveAwayFromEdge(frgbaQuantizedColor2.IntRed(15.0f), a_uiRadius); + int iGreen2 = MoveAwayFromEdge(frgbaQuantizedColor2.IntGreen(15.0f), a_uiRadius); + int iBlue2 = MoveAwayFromEdge(frgbaQuantizedColor2.IntBlue(15.0f), a_uiRadius); + + m_half1.Init(iRed1, iGreen1, iBlue1, a_pauiPixelMapping1, a_uiRadius); + m_half2.Init(iRed2, iGreen2, iBlue2, a_pauiPixelMapping2, a_uiRadius); + + } + + // ---------------------------------------------------------------------------------------------------- + // + void IndividualTrys::Half::Init(int a_iRed, int a_iGreen, int a_iBlue, + const unsigned int *a_pauiPixelMapping, unsigned int a_uiRadius) + { + + m_iRed = a_iRed; + m_iGreen = a_iGreen; + m_iBlue = a_iBlue; + + m_pauiPixelMapping = a_pauiPixelMapping; + m_uiRadius = a_uiRadius; + + m_uiTrys = 0; + + } + + // ---------------------------------------------------------------------------------------------------- + // + +} // namespace Etc diff --git a/thirdparty/etc2comp/EtcIndividualTrys.h b/thirdparty/etc2comp/EtcIndividualTrys.h new file mode 100644 index 0000000000..5fb12fbcf4 --- /dev/null +++ b/thirdparty/etc2comp/EtcIndividualTrys.h @@ -0,0 +1,95 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "EtcColorFloatRGBA.h" + +namespace Etc +{ + + class IndividualTrys + { + public: + + static const unsigned int MAX_RADIUS = 1; + + IndividualTrys(ColorFloatRGBA a_frgbaColor1, + ColorFloatRGBA a_frgbaColor2, + const unsigned int *a_pauiPixelMapping1, + const unsigned int *a_pauiPixelMapping2, + unsigned int a_uiRadius); + + inline static int MoveAwayFromEdge(int a_i, int a_iDistance) + { + if (a_i < (0+ a_iDistance)) + { + return (0 + a_iDistance); + } + else if (a_i > (15- a_iDistance)) + { + return (15 - a_iDistance); + } + + return a_i; + } + + class Try + { + public : + static const unsigned int SELECTORS = 8; // per half + + int m_iRed; + int m_iGreen; + int m_iBlue; + unsigned int m_uiCW; + unsigned int m_auiSelectors[SELECTORS]; + float m_fError; + }; + + class Half + { + public: + + static const unsigned int MAX_TRYS = 27; + + void Init(int a_iRed, int a_iGreen, int a_iBlue, + const unsigned int *a_pauiPixelMapping, + unsigned int a_uiRadius); + + // center of trys + int m_iRed; + int m_iGreen; + int m_iBlue; + + const unsigned int *m_pauiPixelMapping; + unsigned int m_uiRadius; + + unsigned int m_uiTrys; + Try m_atry[MAX_TRYS]; + + Try *m_ptryBest; + }; + + Half m_half1; + Half m_half2; + + }; + + // ---------------------------------------------------------------------------------------------------- + // + +} // namespace Etc diff --git a/thirdparty/etc2comp/EtcMath.cpp b/thirdparty/etc2comp/EtcMath.cpp new file mode 100644 index 0000000000..096d5f7ab9 --- /dev/null +++ b/thirdparty/etc2comp/EtcMath.cpp @@ -0,0 +1,64 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "EtcConfig.h" +#include "EtcMath.h" + +namespace Etc +{ + + // ---------------------------------------------------------------------------------------------------- + // calculate the line that best fits the set of XY points contained in a_afX[] and a_afY[] + // use a_fSlope and a_fOffset to define that line + // + bool Regression(float a_afX[], float a_afY[], unsigned int a_Points, + float *a_fSlope, float *a_fOffset) + { + float fPoints = (float)a_Points; + + float fSumX = 0.0f; + float fSumY = 0.0f; + float fSumXY = 0.0f; + float fSumX2 = 0.0f; + + for (unsigned int uiPoint = 0; uiPoint < a_Points; uiPoint++) + { + fSumX += a_afX[uiPoint]; + fSumY += a_afY[uiPoint]; + fSumXY += a_afX[uiPoint] * a_afY[uiPoint]; + fSumX2 += a_afX[uiPoint] * a_afX[uiPoint]; + } + + float fDivisor = fPoints*fSumX2 - fSumX*fSumX; + + // if vertical line + if (fDivisor == 0.0f) + { + *a_fSlope = 0.0f; + *a_fOffset = 0.0f; + return true; + } + + *a_fSlope = (fPoints*fSumXY - fSumX*fSumY) / fDivisor; + *a_fOffset = (fSumY - (*a_fSlope)*fSumX) / fPoints; + + return false; + } + + // ---------------------------------------------------------------------------------------------------- + // + +} // namespace Etc diff --git a/thirdparty/etc2comp/EtcMath.h b/thirdparty/etc2comp/EtcMath.h new file mode 100644 index 0000000000..c58c9a91bc --- /dev/null +++ b/thirdparty/etc2comp/EtcMath.h @@ -0,0 +1,40 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include <math.h> + +namespace Etc +{ + + // ---------------------------------------------------------------------------------------------------- + // return true if vertical line + bool Regression(float a_afX[], float a_afY[], unsigned int a_Points, + float *a_fSlope, float *a_fOffset); + + inline float ConvertMSEToPSNR(float a_fMSE) + { + if (a_fMSE == 0.0f) + { + return INFINITY; + } + + return 10.0f * log10f(1.0f / a_fMSE); + } + + +} diff --git a/thirdparty/etc2comp/EtcSortedBlockList.cpp b/thirdparty/etc2comp/EtcSortedBlockList.cpp new file mode 100644 index 0000000000..bfa6b7b3fa --- /dev/null +++ b/thirdparty/etc2comp/EtcSortedBlockList.cpp @@ -0,0 +1,228 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* +EtcSortedBlockList.cpp + +SortedBlockList is a list of 4x4 blocks that can be used by the "effort" system to prioritize +the encoding of the 4x4 blocks. + +The sorting is done with buckets, where each bucket is an indication of how much error each 4x4 block has + +*/ + +#include "EtcConfig.h" +#include "EtcSortedBlockList.h" + +#include "EtcBlock4x4.h" + +#include <stdio.h> +#include <string.h> +#include <assert.h> + +namespace Etc +{ + + // ---------------------------------------------------------------------------------------------------- + // construct an empty list + // + // allocate enough memory to add all of the image's 4x4 blocks later + // allocate enough buckets to sort the blocks + // + SortedBlockList::SortedBlockList(unsigned int a_uiImageBlocks, unsigned int a_uiBuckets) + { + m_uiImageBlocks = a_uiImageBlocks; + m_iBuckets = (int)a_uiBuckets; + + m_uiAddedBlocks = 0; + m_uiSortedBlocks = 0; + m_palinkPool = new Link[m_uiImageBlocks]; + m_pabucket = new Bucket[m_iBuckets]; + m_fMaxError = 0.0f; + + InitBuckets(); + + } + + // ---------------------------------------------------------------------------------------------------- + // + SortedBlockList::~SortedBlockList(void) + { + delete[] m_palinkPool; + delete[] m_pabucket; + } + + // ---------------------------------------------------------------------------------------------------- + // add a 4x4 block to the list + // the 4x4 block will be sorted later + // + void SortedBlockList::AddBlock(Block4x4 *a_pblock) + { + assert(m_uiAddedBlocks < m_uiImageBlocks); + Link *plink = &m_palinkPool[m_uiAddedBlocks++]; + plink->Init(a_pblock); + } + + // ---------------------------------------------------------------------------------------------------- + // sort all of the 4x4 blocks that have been added to the list + // + // first, determine the maximum error, then assign an error range to each bucket + // next, determine which bucket each 4x4 block belongs to based on the 4x4 block's error + // add the 4x4 block to the appropriate bucket + // lastly, walk thru the buckets and add each bucket to a sorted linked list + // + // the resultant sorting is an approximate sorting from most to least error + // + void SortedBlockList::Sort(void) + { + assert(m_uiAddedBlocks == m_uiImageBlocks); + InitBuckets(); + + // find max block error + m_fMaxError = -1.0f; + + for (unsigned int uiLink = 0; uiLink < m_uiAddedBlocks; uiLink++) + { + Link *plinkBlock = &m_palinkPool[uiLink]; + + float fBlockError = plinkBlock->GetBlock()->GetError(); + if (fBlockError > m_fMaxError) + { + m_fMaxError = fBlockError; + } + } + // prevent divide by zero or divide by negative + if (m_fMaxError <= 0.0f) + { + m_fMaxError = 1.0f; + } + //used for debugging + //int numDone = 0; + // put all of the blocks with unfinished encodings into the appropriate bucket + m_uiSortedBlocks = 0; + for (unsigned int uiLink = 0; uiLink < m_uiAddedBlocks; uiLink++) + { + Link *plinkBlock = &m_palinkPool[uiLink]; + + // if the encoding is done, don't add it to the list + if (plinkBlock->GetBlock()->GetEncoding()->IsDone()) + { + //numDone++; + continue; + } + + // calculate the appropriate sort bucket + float fBlockError = plinkBlock->GetBlock()->GetError(); + int iBucket = (int) floorf(m_iBuckets * fBlockError / m_fMaxError); + // clamp to bucket index + iBucket = iBucket < 0 ? 0 : iBucket >= m_iBuckets ? m_iBuckets - 1 : iBucket; + + // add block to bucket + { + Bucket *pbucket = &m_pabucket[iBucket]; + if (pbucket->plinkLast) + { + pbucket->plinkLast->SetNext(plinkBlock); + pbucket->plinkLast = plinkBlock; + } + else + { + pbucket->plinkFirst = pbucket->plinkLast = plinkBlock; + } + plinkBlock->SetNext(nullptr); + } + + m_uiSortedBlocks++; + + if (0) + { + printf("%u: e=%.3f\n", uiLink, fBlockError); + Print(); + printf("\n\n\n"); + } + } + //printf("num blocks already done: %d\n",numDone); + //link the blocks together across buckets + m_plinkFirst = nullptr; + m_plinkLast = nullptr; + for (int iBucket = m_iBuckets - 1; iBucket >= 0; iBucket--) + { + Bucket *pbucket = &m_pabucket[iBucket]; + + if (pbucket->plinkFirst) + { + if (m_plinkFirst == nullptr) + { + m_plinkFirst = pbucket->plinkFirst; + } + else + { + assert(pbucket->plinkLast->GetNext() == nullptr); + m_plinkLast->SetNext(pbucket->plinkFirst); + } + + m_plinkLast = pbucket->plinkLast; + } + } + + + } + + // ---------------------------------------------------------------------------------------------------- + // clear all of the buckets. normally done in preparation for a sort + // + void SortedBlockList::InitBuckets(void) + { + for (int iBucket = 0; iBucket < m_iBuckets; iBucket++) + { + Bucket *pbucket = &m_pabucket[iBucket]; + + pbucket->plinkFirst = 0; + pbucket->plinkLast = 0; + } + } + + // ---------------------------------------------------------------------------------------------------- + // print out the list of sorted 4x4 blocks + // normally used for debugging + // + void SortedBlockList::Print(void) + { + for (int iBucket = m_iBuckets-1; iBucket >= 0; iBucket--) + { + Bucket *pbucket = &m_pabucket[iBucket]; + + unsigned int uiBlocks = 0; + for (Link *plink = pbucket->plinkFirst; plink != nullptr; plink = plink->GetNext() ) + { + uiBlocks++; + + if (plink == pbucket->plinkLast) + { + break; + } + } + + float fBucketError = m_fMaxError * iBucket / m_iBuckets; + float fBucketRMS = sqrtf(fBucketError / (4.0f*16.0f) ); + printf("%3d: e=%.3f rms=%.6f %u\n", iBucket, fBucketError, fBucketRMS, uiBlocks); + } + } + + // ---------------------------------------------------------------------------------------------------- + // + +} // namespace Etc diff --git a/thirdparty/etc2comp/EtcSortedBlockList.h b/thirdparty/etc2comp/EtcSortedBlockList.h new file mode 100644 index 0000000000..960e8adc34 --- /dev/null +++ b/thirdparty/etc2comp/EtcSortedBlockList.h @@ -0,0 +1,124 @@ +/* + * Copyright 2015 The Etc2Comp Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +namespace Etc +{ + class Block4x4; + + class SortedBlockList + { + public: + + class Link + { + public: + + inline void Init(Block4x4 *a_pblock) + { + m_pblock = a_pblock; + m_plinkNext = nullptr; + } + + inline Block4x4 * GetBlock(void) + { + return m_pblock; + } + + inline void SetNext(Link *a_plinkNext) + { + m_plinkNext = a_plinkNext; + } + + inline Link * GetNext(void) + { + return m_plinkNext; + } + + inline Link * Advance(unsigned int a_uiSteps = 1) + { + Link *plink = this; + + for (unsigned int uiStep = 0; uiStep < a_uiSteps; uiStep++) + { + if (plink == nullptr) + { + break; + } + + plink = plink->m_plinkNext; + } + + return plink; + } + + private: + + Block4x4 *m_pblock; + Link *m_plinkNext; + }; + + SortedBlockList(unsigned int a_uiImageBlocks, unsigned int a_uiBuckets); + ~SortedBlockList(void); + + void AddBlock(Block4x4 *a_pblock); + + void Sort(void); + + inline Link * GetLinkToFirstBlock(void) + { + return m_plinkFirst; + } + + inline unsigned int GetNumberOfAddedBlocks(void) + { + return m_uiAddedBlocks; + } + + inline unsigned int GetNumberOfSortedBlocks(void) + { + return m_uiSortedBlocks; + } + + void Print(void); + + private: + + void InitBuckets(void); + + class Bucket + { + public: + Link *plinkFirst; + Link *plinkLast; + }; + + unsigned int m_uiImageBlocks; + int m_iBuckets; + + unsigned int m_uiAddedBlocks; + unsigned int m_uiSortedBlocks; + Link *m_palinkPool; + Bucket *m_pabucket; + float m_fMaxError; + + Link *m_plinkFirst; + Link *m_plinkLast; + + }; + +} // namespace Etc diff --git a/thirdparty/etc2comp/LICENSE b/thirdparty/etc2comp/LICENSE new file mode 100644 index 0000000000..75b52484ea --- /dev/null +++ b/thirdparty/etc2comp/LICENSE @@ -0,0 +1,202 @@ +
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/thirdparty/etc2comp/README.md b/thirdparty/etc2comp/README.md new file mode 100644 index 0000000000..1c70ae9f4e --- /dev/null +++ b/thirdparty/etc2comp/README.md @@ -0,0 +1,197 @@ +# Etc2Comp - Texture to ETC2 compressor
+
+Etc2Comp is a command line tool that converts textures (e.g. bitmaps)
+into the [ETC2](https://en.wikipedia.org/wiki/Ericsson_Texture_Compression)
+format. The tool is built with a focus on encoding performance
+to reduce the amount of time required to compile asset heavy applications as
+well as reduce overall application size.
+
+This repo provides source code that can be compiled into a binary. The
+binary can then be used to convert textures to the ETC2 format.
+
+Important: This is not an official Google product. It is an experimental
+library published as-is. Please see the CONTRIBUTORS.md file for information
+about questions or issues.
+
+## Setup
+This project uses [CMake](https://cmake.org/) to generate platform-specific
+build files:
+ - Linux: make files
+ - OS X: Xcode workspace files
+ - Microsoft Windows: Visual Studio solution files
+ - Note: CMake supports other formats, but this doc only provides steps for
+ one of each platform for brevity.
+
+Refer to each platform's setup section to setup your environment and build
+an Etc2Comp binary. Then skip to the usage section of this page for examples
+of how to use the library.
+
+### Setup for OS X
+ build tested on this config:
+ OS X 10.9.5 i7 16GB RAM
+ Xcode 5.1.1
+ cmake 3.2.3
+
+Start by downloading and installing the following components if they are not
+already installed on your development machine.
+ - *Xcode* version 5.1.1, or greater
+ - [CMake](https://cmake.org/download/) version 3.2.3, or greater
+
+To build the Etc2Comp binary:
+ 1. Open a *Terminal* window and navigate to the project directory.
+ 1. Run `mkdir build_xcode`
+ 1. Run `cd build_xcode`
+ 1. Run `cmake -G Xcode ../`
+ 1. Open *Xcode* and import the `build_xcode/EtcTest.xcodeproj` file.
+ 1. Open the Product menu and choose Build For -> Running.
+ 1. Once the build succeeds the binary located at `build_xcode/EtcTool/Debug/EtcTool`
+can be executed.
+
+Optional
+Xcode EtcTool ‘Run’ preferences
+note: if the build_xcode/EtcTest.xcodeproj is manually deleted then some Xcode preferences
+will need to be set by hand after cmake is run (these prefs are retained across
+cmake updates if the .xcodeproj is not deleted/removed)
+
+1. Set the active scheme to ‘EtcTool’
+1. Edit the scheme
+1. Select option ‘Run EtcTool’, then tab ‘Arguments’.
+Add this launch argument: ‘-argfile ../../EtcTool/args.txt’
+1. Select tab ‘Options’ and set a custom working directory to: ‘$(SRCROOT)/Build_Xcode/EtcTool’
+
+### SetUp for Windows
+
+1. Open a *Terminal* window and navigate to the project directory.
+1. Run `mkdir build_vs`
+1. Run `cd build_vs`
+1. Run CMAKE, noting what build version you need, and pointing to the parent directory as the source root;
+ For VS 2013 : `cmake -G "Visual Studio 12 2013 Win64" ../`
+ For VS 2015 : `cmake -G "Visual Studio 14 2015 Win64" ../`
+ NOTE: To see what supported Visual Studio outputs there are, run `cmake -G`
+1. open the 'EtcTest' solution
+1. make the 'EtcTool' project the start up project
+1. (optional) in the project properties, under 'Debugging ->command arguments'
+add the argfile textfile thats included in the EtcTool directory.
+example: -argfile C:\etc2\EtcTool\Args.txt
+
+### Setup For Linux
+The Linux build was tested on this config:
+ Ubuntu desktop 14.04
+ gcc/g++ 4.8
+ cmake 2.8.12.2
+
+1. Verify linux has cmake and C++-11 capable g++ installed
+1. Open shell
+1. Run `mkdir build_linux`
+1. Run `cd build_linux`
+1. Run `cmake ../`
+1. Run `make`
+1. navigate to the newly created EtcTool directory `cd EtcTool`
+1. run the executable: `./EtcTool -argfile ../../EtcTool/args.txt`
+
+Skip to the <a href="#usage">Usage</a> section for more information about using the
+tool.
+
+## Usage
+
+### Command Line Usage
+EtcTool can be run from the command line with the following usage:
+ etctool.exe source_image [options ...] -output encoded_image
+
+The encoder will use an array of RGBA floats read from the source_image to create
+an ETC1 or ETC2 encoded image in encoded_image. The RGBA floats should be in the
+range [0:1].
+
+Options:
+
+ -analyze <analysis_folder>
+ -argfile <arg_file> additional command line arguments read from a file
+ -blockAtHV <H V> encodes a single block that contains the
+ pixel specified by the H V coordinates
+ -compare <comparison_image> compares source_image to comparison_image
+ -effort <amount> number between 0 and 100 to specify the encoding quality
+ (100 is the highest quality)
+ -errormetric <error_metric> specify the error metric, the options are
+ rgba, rgbx, rec709, numeric and normalxyz
+ -format <etc_format> ETC1, RGB8, SRGB8, RGBA8, SRGB8, RGB8A1,
+ SRGB8A1 or R11
+ -help prints this message
+ -jobs or -j <thread_count> specifies the number of threads (default=1)
+ -normalizexyz normalize RGB to have a length of 1
+ -verbose or -v shows status information during the encoding
+ process
+ -mipmaps or -m <mip_count> sets the maximum number of mipaps to generate (default=1)
+ -mipwrap or -w <x|y|xy> sets the mipmap filter wrap mode (default=clamp)
+
+* -analyze will run an analysis of the encoding and place it in folder
+"analysis_folder" (e.g. ../analysis/kodim05). within the analysis_folder, a folder
+will be created with a name of the current date/time (e.g. 20151204_153306). this
+date/time folder is used to compare encodings of the same texture over time.
+within the date/time folder is a text file with several encoding stats and a 2x png
+image showing the encoding mode for each 4x4 block.
+
+* -argfile allows additional command line arguments to be placed in a text file
+
+* -blockAtHV selects the 4x4 pixel subset of the source image at position (H,V).
+This is mainly used for debugging
+
+* -compare compares the source image to the created encoded image. The encoding
+will dictate what error analysis is used in the comparison.
+
+* -effort uses an "amount" between 0 and 100 to determine how much additional effort
+to apply during the encoding.
+
+* -errormetric selects the fitting algorithm used by the encoder. "rgba" calculates
+RMS error using RGB components that are weighted by A. "rgbx" calculates RMS error
+using RGBA components, where A is treated as an additional data channel, instead of
+as alpha. "rec709" is similar to "rgba", except the RGB components are also weighted
+according to Rec709. "numeric" calculates RMS error using unweighted RGBA components.
+"normalize" calculates error based on dot product and vector length for RGB and RMS
+error for A.
+
+* -help prints out the usage message
+
+* -jobs enables multi-threading to speed up image encoding
+
+* -normalizexyz normalizes the source RGB to have a length of 1.
+
+* -verbose shows information on the current encoding process. It will then display the
+PSNR and time time it took to encode the image.
+
+* -mipmaps takes an argument that specifies how many mipmaps to generate from the
+source image. The mipmaps are generated with a lanczos3 filter using edge clamping.
+If the mipmaps option is not specified no mipmaps are created.
+
+* -mipwrap takes an argument that specifies the mipmap filter wrap mode. The options
+are "x", "y" and "xy" which specify wrapping in x only, y only or x and y respectively.
+The default options are clamping in both x and y.
+
+Note: Path names can use slashes or backslashes. The tool will convert the
+slashes to the appropriate polarity for the current platform.
+
+
+## API
+
+The library supports two different APIs - a C-like API that is not heavily
+class-based and a class-based API.
+
+main() in EtcTool.cpp contains an example of both APIs.
+
+The Encode() method now returns an EncodingStatus that contains bit flags for
+reporting various warnings and flags encountered when encoding.
+
+
+## Copyright
+Copyright 2015 Etc2Comp Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/thirdparty/glad/glad.c b/thirdparty/glad/glad.c index 70a93f8d25..2d756ec3f6 100644 --- a/thirdparty/glad/glad.c +++ b/thirdparty/glad/glad.c @@ -1,6 +1,6 @@ /* - OpenGL loader generated by glad 0.1.13a0 on Fri Jan 6 19:27:07 2017. + OpenGL loader generated by glad 0.1.14a0 on Wed Jun 14 20:12:45 2017. Language/Generator: C/C++ Specification: gl @@ -30,7 +30,7 @@ static void* get_proc(const char *namez); static HMODULE libGL; typedef void* (APIENTRYP PFNWGLGETPROCADDRESSPROC_PRIVATE)(const char*); -PFNWGLGETPROCADDRESSPROC_PRIVATE gladGetProcAddressPtr; +static PFNWGLGETPROCADDRESSPROC_PRIVATE gladGetProcAddressPtr; static int open_gl(void) { @@ -57,7 +57,7 @@ static void* libGL; #ifndef __APPLE__ typedef void* (APIENTRYP PFNGLXGETPROCADDRESSPROC_PRIVATE)(const char*); -PFNGLXGETPROCADDRESSPROC_PRIVATE gladGetProcAddressPtr; +static PFNGLXGETPROCADDRESSPROC_PRIVATE gladGetProcAddressPtr; #endif static @@ -152,19 +152,19 @@ static int get_exts(void) { exts = (const char *)glGetString(GL_EXTENSIONS); #ifdef _GLAD_IS_SOME_NEW_VERSION } else { - int index; + unsigned int index; num_exts_i = 0; glGetIntegerv(GL_NUM_EXTENSIONS, &num_exts_i); if (num_exts_i > 0) { - exts_i = (const char **)realloc((void *)exts_i, num_exts_i * sizeof *exts_i); + exts_i = (const char **)realloc((void *)exts_i, (size_t)num_exts_i * (sizeof *exts_i)); } if (exts_i == NULL) { return 0; } - for(index = 0; index < num_exts_i; index++) { + for(index = 0; index < (unsigned)num_exts_i; index++) { exts_i[index] = (const char*)glGetStringi(GL_EXTENSIONS, index); } } @@ -174,7 +174,7 @@ static int get_exts(void) { static void free_exts(void) { if (exts_i != NULL) { - free((char **)exts_i); + free((void *)exts_i); exts_i = NULL; } } diff --git a/thirdparty/glad/glad/glad.h b/thirdparty/glad/glad/glad.h index e5eb22e297..cb78df071e 100644 --- a/thirdparty/glad/glad/glad.h +++ b/thirdparty/glad/glad/glad.h @@ -1,6 +1,6 @@ /* - OpenGL loader generated by glad 0.1.13a0 on Fri Jan 6 19:27:07 2017. + OpenGL loader generated by glad 0.1.14a0 on Wed Jun 14 20:12:45 2017. Language/Generator: C/C++ Specification: gl @@ -54,7 +54,7 @@ typedef void* (* GLADloadproc)(const char *name); #ifndef GLAPI # if defined(GLAD_GLAPI_EXPORT) -# if defined(WIN32) || defined(__CYGWIN__) +# if defined(_WIN32) || defined(__CYGWIN__) # if defined(GLAD_GLAPI_EXPORT_BUILD) # if defined(__GNUC__) # define GLAPI __attribute__ ((dllexport)) extern @@ -183,6 +183,7 @@ typedef void (APIENTRY *GLDEBUGPROCKHR)(GLenum source,GLenum type,GLuint id,GLen typedef void (APIENTRY *GLDEBUGPROCAMD)(GLuint id,GLenum category,GLenum severity,GLsizei length,const GLchar *message,void *userParam); typedef unsigned short GLhalfNV; typedef GLintptr GLvdpauSurfaceNV; +typedef void (APIENTRY *GLVULKANPROCNV)(void); #define GL_DEPTH_BUFFER_BIT 0x00000100 #define GL_STENCIL_BUFFER_BIT 0x00000400 #define GL_COLOR_BUFFER_BIT 0x00004000 @@ -264,7 +265,6 @@ typedef GLintptr GLvdpauSurfaceNV; #define GL_BLEND_SRC 0x0BE1 #define GL_BLEND 0x0BE2 #define GL_LOGIC_OP_MODE 0x0BF0 -#define GL_COLOR_LOGIC_OP 0x0BF2 #define GL_DRAW_BUFFER 0x0C01 #define GL_READ_BUFFER 0x0C02 #define GL_SCISSOR_BOX 0x0C10 @@ -292,21 +292,9 @@ typedef GLintptr GLvdpauSurfaceNV; #define GL_SUBPIXEL_BITS 0x0D50 #define GL_TEXTURE_1D 0x0DE0 #define GL_TEXTURE_2D 0x0DE1 -#define GL_POLYGON_OFFSET_UNITS 0x2A00 -#define GL_POLYGON_OFFSET_POINT 0x2A01 -#define GL_POLYGON_OFFSET_LINE 0x2A02 -#define GL_POLYGON_OFFSET_FILL 0x8037 -#define GL_POLYGON_OFFSET_FACTOR 0x8038 -#define GL_TEXTURE_BINDING_1D 0x8068 -#define GL_TEXTURE_BINDING_2D 0x8069 #define GL_TEXTURE_WIDTH 0x1000 #define GL_TEXTURE_HEIGHT 0x1001 -#define GL_TEXTURE_INTERNAL_FORMAT 0x1003 #define GL_TEXTURE_BORDER_COLOR 0x1004 -#define GL_TEXTURE_RED_SIZE 0x805C -#define GL_TEXTURE_GREEN_SIZE 0x805D -#define GL_TEXTURE_BLUE_SIZE 0x805E -#define GL_TEXTURE_ALPHA_SIZE 0x805F #define GL_DONT_CARE 0x1100 #define GL_FASTEST 0x1101 #define GL_NICEST 0x1102 @@ -317,7 +305,6 @@ typedef GLintptr GLvdpauSurfaceNV; #define GL_INT 0x1404 #define GL_UNSIGNED_INT 0x1405 #define GL_FLOAT 0x1406 -#define GL_DOUBLE 0x140A #define GL_STACK_OVERFLOW 0x0503 #define GL_STACK_UNDERFLOW 0x0504 #define GL_CLEAR 0x1500 @@ -369,23 +356,7 @@ typedef GLintptr GLvdpauSurfaceNV; #define GL_TEXTURE_MIN_FILTER 0x2801 #define GL_TEXTURE_WRAP_S 0x2802 #define GL_TEXTURE_WRAP_T 0x2803 -#define GL_PROXY_TEXTURE_1D 0x8063 -#define GL_PROXY_TEXTURE_2D 0x8064 #define GL_REPEAT 0x2901 -#define GL_R3_G3_B2 0x2A10 -#define GL_RGB4 0x804F -#define GL_RGB5 0x8050 -#define GL_RGB8 0x8051 -#define GL_RGB10 0x8052 -#define GL_RGB12 0x8053 -#define GL_RGB16 0x8054 -#define GL_RGBA2 0x8055 -#define GL_RGBA4 0x8056 -#define GL_RGB5_A1 0x8057 -#define GL_RGBA8 0x8058 -#define GL_RGB10_A2 0x8059 -#define GL_RGBA12 0x805A -#define GL_RGBA16 0x805B #define GL_CURRENT_BIT 0x00000001 #define GL_POINT_BIT 0x00000002 #define GL_LINE_BIT 0x00000004 @@ -404,9 +375,6 @@ typedef GLintptr GLvdpauSurfaceNV; #define GL_TEXTURE_BIT 0x00040000 #define GL_SCISSOR_BIT 0x00080000 #define GL_ALL_ATTRIB_BITS 0xFFFFFFFF -#define GL_CLIENT_PIXEL_STORE_BIT 0x00000001 -#define GL_CLIENT_VERTEX_ARRAY_BIT 0x00000002 -#define GL_CLIENT_ALL_ATTRIB_BITS 0xFFFFFFFF #define GL_QUAD_STRIP 0x0008 #define GL_POLYGON 0x0009 #define GL_ACCUM 0x0100 @@ -446,14 +414,6 @@ typedef GLintptr GLvdpauSurfaceNV; #define GL_PIXEL_MAP_G_TO_G 0x0C77 #define GL_PIXEL_MAP_B_TO_B 0x0C78 #define GL_PIXEL_MAP_A_TO_A 0x0C79 -#define GL_VERTEX_ARRAY_POINTER 0x808E -#define GL_NORMAL_ARRAY_POINTER 0x808F -#define GL_COLOR_ARRAY_POINTER 0x8090 -#define GL_INDEX_ARRAY_POINTER 0x8091 -#define GL_TEXTURE_COORD_ARRAY_POINTER 0x8092 -#define GL_EDGE_FLAG_ARRAY_POINTER 0x8093 -#define GL_FEEDBACK_BUFFER_POINTER 0x0DF0 -#define GL_SELECTION_BUFFER_POINTER 0x0DF3 #define GL_CURRENT_COLOR 0x0B00 #define GL_CURRENT_INDEX 0x0B01 #define GL_CURRENT_NORMAL 0x0B02 @@ -499,11 +459,9 @@ typedef GLintptr GLvdpauSurfaceNV; #define GL_PROJECTION_MATRIX 0x0BA7 #define GL_TEXTURE_MATRIX 0x0BA8 #define GL_ATTRIB_STACK_DEPTH 0x0BB0 -#define GL_CLIENT_ATTRIB_STACK_DEPTH 0x0BB1 #define GL_ALPHA_TEST 0x0BC0 #define GL_ALPHA_TEST_FUNC 0x0BC1 #define GL_ALPHA_TEST_REF 0x0BC2 -#define GL_INDEX_LOGIC_OP 0x0BF1 #define GL_LOGIC_OP 0x0BF1 #define GL_AUX_BUFFERS 0x0C00 #define GL_INDEX_CLEAR_VALUE 0x0C20 @@ -553,7 +511,6 @@ typedef GLintptr GLvdpauSurfaceNV; #define GL_MAX_NAME_STACK_DEPTH 0x0D37 #define GL_MAX_PROJECTION_STACK_DEPTH 0x0D38 #define GL_MAX_TEXTURE_STACK_DEPTH 0x0D39 -#define GL_MAX_CLIENT_ATTRIB_STACK_DEPTH 0x0D3B #define GL_INDEX_BITS 0x0D51 #define GL_RED_BITS 0x0D52 #define GL_GREEN_BITS 0x0D53 @@ -589,35 +546,8 @@ typedef GLintptr GLvdpauSurfaceNV; #define GL_MAP1_GRID_SEGMENTS 0x0DD1 #define GL_MAP2_GRID_DOMAIN 0x0DD2 #define GL_MAP2_GRID_SEGMENTS 0x0DD3 -#define GL_FEEDBACK_BUFFER_SIZE 0x0DF1 -#define GL_FEEDBACK_BUFFER_TYPE 0x0DF2 -#define GL_SELECTION_BUFFER_SIZE 0x0DF4 -#define GL_VERTEX_ARRAY 0x8074 -#define GL_NORMAL_ARRAY 0x8075 -#define GL_COLOR_ARRAY 0x8076 -#define GL_INDEX_ARRAY 0x8077 -#define GL_TEXTURE_COORD_ARRAY 0x8078 -#define GL_EDGE_FLAG_ARRAY 0x8079 -#define GL_VERTEX_ARRAY_SIZE 0x807A -#define GL_VERTEX_ARRAY_TYPE 0x807B -#define GL_VERTEX_ARRAY_STRIDE 0x807C -#define GL_NORMAL_ARRAY_TYPE 0x807E -#define GL_NORMAL_ARRAY_STRIDE 0x807F -#define GL_COLOR_ARRAY_SIZE 0x8081 -#define GL_COLOR_ARRAY_TYPE 0x8082 -#define GL_COLOR_ARRAY_STRIDE 0x8083 -#define GL_INDEX_ARRAY_TYPE 0x8085 -#define GL_INDEX_ARRAY_STRIDE 0x8086 -#define GL_TEXTURE_COORD_ARRAY_SIZE 0x8088 -#define GL_TEXTURE_COORD_ARRAY_TYPE 0x8089 -#define GL_TEXTURE_COORD_ARRAY_STRIDE 0x808A -#define GL_EDGE_FLAG_ARRAY_STRIDE 0x808C #define GL_TEXTURE_COMPONENTS 0x1003 #define GL_TEXTURE_BORDER 0x1005 -#define GL_TEXTURE_LUMINANCE_SIZE 0x8060 -#define GL_TEXTURE_INTENSITY_SIZE 0x8061 -#define GL_TEXTURE_PRIORITY 0x8066 -#define GL_TEXTURE_RESIDENT 0x8067 #define GL_AMBIENT 0x1200 #define GL_DIFFUSE 0x1201 #define GL_SPECULAR 0x1202 @@ -664,6 +594,91 @@ typedef GLintptr GLvdpauSurfaceNV; #define GL_OBJECT_PLANE 0x2501 #define GL_EYE_PLANE 0x2502 #define GL_CLAMP 0x2900 +#define GL_CLIP_PLANE0 0x3000 +#define GL_CLIP_PLANE1 0x3001 +#define GL_CLIP_PLANE2 0x3002 +#define GL_CLIP_PLANE3 0x3003 +#define GL_CLIP_PLANE4 0x3004 +#define GL_CLIP_PLANE5 0x3005 +#define GL_LIGHT0 0x4000 +#define GL_LIGHT1 0x4001 +#define GL_LIGHT2 0x4002 +#define GL_LIGHT3 0x4003 +#define GL_LIGHT4 0x4004 +#define GL_LIGHT5 0x4005 +#define GL_LIGHT6 0x4006 +#define GL_LIGHT7 0x4007 +#define GL_COLOR_LOGIC_OP 0x0BF2 +#define GL_POLYGON_OFFSET_UNITS 0x2A00 +#define GL_POLYGON_OFFSET_POINT 0x2A01 +#define GL_POLYGON_OFFSET_LINE 0x2A02 +#define GL_POLYGON_OFFSET_FILL 0x8037 +#define GL_POLYGON_OFFSET_FACTOR 0x8038 +#define GL_TEXTURE_BINDING_1D 0x8068 +#define GL_TEXTURE_BINDING_2D 0x8069 +#define GL_TEXTURE_INTERNAL_FORMAT 0x1003 +#define GL_TEXTURE_RED_SIZE 0x805C +#define GL_TEXTURE_GREEN_SIZE 0x805D +#define GL_TEXTURE_BLUE_SIZE 0x805E +#define GL_TEXTURE_ALPHA_SIZE 0x805F +#define GL_DOUBLE 0x140A +#define GL_PROXY_TEXTURE_1D 0x8063 +#define GL_PROXY_TEXTURE_2D 0x8064 +#define GL_R3_G3_B2 0x2A10 +#define GL_RGB4 0x804F +#define GL_RGB5 0x8050 +#define GL_RGB8 0x8051 +#define GL_RGB10 0x8052 +#define GL_RGB12 0x8053 +#define GL_RGB16 0x8054 +#define GL_RGBA2 0x8055 +#define GL_RGBA4 0x8056 +#define GL_RGB5_A1 0x8057 +#define GL_RGBA8 0x8058 +#define GL_RGB10_A2 0x8059 +#define GL_RGBA12 0x805A +#define GL_RGBA16 0x805B +#define GL_CLIENT_PIXEL_STORE_BIT 0x00000001 +#define GL_CLIENT_VERTEX_ARRAY_BIT 0x00000002 +#define GL_CLIENT_ALL_ATTRIB_BITS 0xFFFFFFFF +#define GL_VERTEX_ARRAY_POINTER 0x808E +#define GL_NORMAL_ARRAY_POINTER 0x808F +#define GL_COLOR_ARRAY_POINTER 0x8090 +#define GL_INDEX_ARRAY_POINTER 0x8091 +#define GL_TEXTURE_COORD_ARRAY_POINTER 0x8092 +#define GL_EDGE_FLAG_ARRAY_POINTER 0x8093 +#define GL_FEEDBACK_BUFFER_POINTER 0x0DF0 +#define GL_SELECTION_BUFFER_POINTER 0x0DF3 +#define GL_CLIENT_ATTRIB_STACK_DEPTH 0x0BB1 +#define GL_INDEX_LOGIC_OP 0x0BF1 +#define GL_MAX_CLIENT_ATTRIB_STACK_DEPTH 0x0D3B +#define GL_FEEDBACK_BUFFER_SIZE 0x0DF1 +#define GL_FEEDBACK_BUFFER_TYPE 0x0DF2 +#define GL_SELECTION_BUFFER_SIZE 0x0DF4 +#define GL_VERTEX_ARRAY 0x8074 +#define GL_NORMAL_ARRAY 0x8075 +#define GL_COLOR_ARRAY 0x8076 +#define GL_INDEX_ARRAY 0x8077 +#define GL_TEXTURE_COORD_ARRAY 0x8078 +#define GL_EDGE_FLAG_ARRAY 0x8079 +#define GL_VERTEX_ARRAY_SIZE 0x807A +#define GL_VERTEX_ARRAY_TYPE 0x807B +#define GL_VERTEX_ARRAY_STRIDE 0x807C +#define GL_NORMAL_ARRAY_TYPE 0x807E +#define GL_NORMAL_ARRAY_STRIDE 0x807F +#define GL_COLOR_ARRAY_SIZE 0x8081 +#define GL_COLOR_ARRAY_TYPE 0x8082 +#define GL_COLOR_ARRAY_STRIDE 0x8083 +#define GL_INDEX_ARRAY_TYPE 0x8085 +#define GL_INDEX_ARRAY_STRIDE 0x8086 +#define GL_TEXTURE_COORD_ARRAY_SIZE 0x8088 +#define GL_TEXTURE_COORD_ARRAY_TYPE 0x8089 +#define GL_TEXTURE_COORD_ARRAY_STRIDE 0x808A +#define GL_EDGE_FLAG_ARRAY_STRIDE 0x808C +#define GL_TEXTURE_LUMINANCE_SIZE 0x8060 +#define GL_TEXTURE_INTENSITY_SIZE 0x8061 +#define GL_TEXTURE_PRIORITY 0x8066 +#define GL_TEXTURE_RESIDENT 0x8067 #define GL_ALPHA4 0x803B #define GL_ALPHA8 0x803C #define GL_ALPHA12 0x803D @@ -697,20 +712,6 @@ typedef GLintptr GLvdpauSurfaceNV; #define GL_T2F_N3F_V3F 0x2A2B #define GL_T2F_C4F_N3F_V3F 0x2A2C #define GL_T4F_C4F_N3F_V4F 0x2A2D -#define GL_CLIP_PLANE0 0x3000 -#define GL_CLIP_PLANE1 0x3001 -#define GL_CLIP_PLANE2 0x3002 -#define GL_CLIP_PLANE3 0x3003 -#define GL_CLIP_PLANE4 0x3004 -#define GL_CLIP_PLANE5 0x3005 -#define GL_LIGHT0 0x4000 -#define GL_LIGHT1 0x4001 -#define GL_LIGHT2 0x4002 -#define GL_LIGHT3 0x4003 -#define GL_LIGHT4 0x4004 -#define GL_LIGHT5 0x4005 -#define GL_LIGHT6 0x4006 -#define GL_LIGHT7 0x4007 #define GL_UNSIGNED_BYTE_3_3_2 0x8032 #define GL_UNSIGNED_SHORT_4_4_4_4 0x8033 #define GL_UNSIGNED_SHORT_5_5_5_1 0x8034 diff --git a/thirdparty/rg-etc1/rg_etc1.cpp b/thirdparty/rg-etc1/rg_etc1.cpp deleted file mode 100644 index 8e28b53f9d..0000000000 --- a/thirdparty/rg-etc1/rg_etc1.cpp +++ /dev/null @@ -1,2446 +0,0 @@ -// File: rg_etc1.cpp - Fast, high quality ETC1 block packer/unpacker - Rich Geldreich <richgel99@gmail.com> -// Please see ZLIB license at the end of rg_etc1.h. -// -// For more information Ericsson Texture Compression (ETC/ETC1), see: -// http://www.khronos.org/registry/gles/extensions/OES/OES_compressed_ETC1_RGB8_texture.txt -// -// v1.04 - 5/15/14 - Fix signed vs. unsigned subtraction problem (noticed when compiled with gcc) in pack_etc1_block_init(). -// This issue would cause an assert when this func. was called in debug. (Note this module was developed/testing with MSVC, -// I still need to test it throughly when compiled with gcc.) -// -// v1.03 - 5/12/13 - Initial public release -#include "rg_etc1.h" - -#include <stdlib.h> -#include <memory.h> -#include <assert.h> -//#include <stdio.h> -#include <math.h> - -#pragma warning (disable: 4201) // nonstandard extension used : nameless struct/union - -#if defined(_DEBUG) || defined(DEBUG) -#define RG_ETC1_BUILD_DEBUG -#endif - -#define RG_ETC1_ASSERT assert - -namespace rg_etc1 -{ - typedef unsigned char uint8; - typedef unsigned short uint16; - typedef unsigned int uint; - typedef unsigned int uint32; - typedef long long int64; - typedef unsigned long long uint64; - - const uint32 cUINT32_MAX = 0xFFFFFFFFU; - const uint64 cUINT64_MAX = 0xFFFFFFFFFFFFFFFFULL; //0xFFFFFFFFFFFFFFFFui64; - - template<typename T> inline T minimum(T a, T b) { return (a < b) ? a : b; } - template<typename T> inline T minimum(T a, T b, T c) { return minimum(minimum(a, b), c); } - template<typename T> inline T maximum(T a, T b) { return (a > b) ? a : b; } - template<typename T> inline T maximum(T a, T b, T c) { return maximum(maximum(a, b), c); } - template<typename T> inline T clamp(T value, T low, T high) { return (value < low) ? low : ((value > high) ? high : value); } - template<typename T> inline T square(T value) { return value * value; } - template<typename T> inline void zero_object(T& obj) { memset((void*)&obj, 0, sizeof(obj)); } - template<typename T> inline void zero_this(T* pObj) { memset((void*)pObj, 0, sizeof(*pObj)); } - - template<class T, size_t N> T decay_array_to_subtype(T (&a)[N]); - -#define RG_ETC1_ARRAY_SIZE(X) (sizeof(X) / sizeof(decay_array_to_subtype(X))) - - enum eNoClamp { cNoClamp }; - - struct color_quad_u8 - { - static inline int clamp(int v) { if (v & 0xFFFFFF00U) v = (~(static_cast<int>(v) >> 31)) & 0xFF; return v; } - - struct component_traits { enum { cSigned = false, cFloat = false, cMin = 0U, cMax = 255U }; }; - - public: - typedef unsigned char component_t; - typedef int parameter_t; - - enum { cNumComps = 4 }; - - union - { - struct - { - component_t r; - component_t g; - component_t b; - component_t a; - }; - - component_t c[cNumComps]; - - uint32 m_u32; - }; - - inline color_quad_u8() - { - } - - inline color_quad_u8(const color_quad_u8& other) : m_u32(other.m_u32) - { - } - - explicit inline color_quad_u8(parameter_t y, parameter_t alpha = component_traits::cMax) - { - set(y, alpha); - } - - inline color_quad_u8(parameter_t red, parameter_t green, parameter_t blue, parameter_t alpha = component_traits::cMax) - { - set(red, green, blue, alpha); - } - - explicit inline color_quad_u8(eNoClamp, parameter_t y, parameter_t alpha = component_traits::cMax) - { - set_noclamp_y_alpha(y, alpha); - } - - inline color_quad_u8(eNoClamp, parameter_t red, parameter_t green, parameter_t blue, parameter_t alpha = component_traits::cMax) - { - set_noclamp_rgba(red, green, blue, alpha); - } - - inline void clear() - { - m_u32 = 0; - } - - inline color_quad_u8& operator= (const color_quad_u8& other) - { - m_u32 = other.m_u32; - return *this; - } - - inline color_quad_u8& set_rgb(const color_quad_u8& other) - { - r = other.r; - g = other.g; - b = other.b; - return *this; - } - - inline color_quad_u8& operator= (parameter_t y) - { - set(y, component_traits::cMax); - return *this; - } - - inline color_quad_u8& set(parameter_t y, parameter_t alpha = component_traits::cMax) - { - y = clamp(y); - alpha = clamp(alpha); - r = static_cast<component_t>(y); - g = static_cast<component_t>(y); - b = static_cast<component_t>(y); - a = static_cast<component_t>(alpha); - return *this; - } - - inline color_quad_u8& set_noclamp_y_alpha(parameter_t y, parameter_t alpha = component_traits::cMax) - { - RG_ETC1_ASSERT( (y >= component_traits::cMin) && (y <= component_traits::cMax) ); - RG_ETC1_ASSERT( (alpha >= component_traits::cMin) && (alpha <= component_traits::cMax) ); - - r = static_cast<component_t>(y); - g = static_cast<component_t>(y); - b = static_cast<component_t>(y); - a = static_cast<component_t>(alpha); - return *this; - } - - inline color_quad_u8& set(parameter_t red, parameter_t green, parameter_t blue, parameter_t alpha = component_traits::cMax) - { - r = static_cast<component_t>(clamp(red)); - g = static_cast<component_t>(clamp(green)); - b = static_cast<component_t>(clamp(blue)); - a = static_cast<component_t>(clamp(alpha)); - return *this; - } - - inline color_quad_u8& set_noclamp_rgba(parameter_t red, parameter_t green, parameter_t blue, parameter_t alpha) - { - RG_ETC1_ASSERT( (red >= component_traits::cMin) && (red <= component_traits::cMax) ); - RG_ETC1_ASSERT( (green >= component_traits::cMin) && (green <= component_traits::cMax) ); - RG_ETC1_ASSERT( (blue >= component_traits::cMin) && (blue <= component_traits::cMax) ); - RG_ETC1_ASSERT( (alpha >= component_traits::cMin) && (alpha <= component_traits::cMax) ); - - r = static_cast<component_t>(red); - g = static_cast<component_t>(green); - b = static_cast<component_t>(blue); - a = static_cast<component_t>(alpha); - return *this; - } - - inline color_quad_u8& set_noclamp_rgb(parameter_t red, parameter_t green, parameter_t blue) - { - RG_ETC1_ASSERT( (red >= component_traits::cMin) && (red <= component_traits::cMax) ); - RG_ETC1_ASSERT( (green >= component_traits::cMin) && (green <= component_traits::cMax) ); - RG_ETC1_ASSERT( (blue >= component_traits::cMin) && (blue <= component_traits::cMax) ); - - r = static_cast<component_t>(red); - g = static_cast<component_t>(green); - b = static_cast<component_t>(blue); - return *this; - } - - static inline parameter_t get_min_comp() { return component_traits::cMin; } - static inline parameter_t get_max_comp() { return component_traits::cMax; } - static inline bool get_comps_are_signed() { return component_traits::cSigned; } - - inline component_t operator[] (uint i) const { RG_ETC1_ASSERT(i < cNumComps); return c[i]; } - inline component_t& operator[] (uint i) { RG_ETC1_ASSERT(i < cNumComps); return c[i]; } - - inline color_quad_u8& set_component(uint i, parameter_t f) - { - RG_ETC1_ASSERT(i < cNumComps); - - c[i] = static_cast<component_t>(clamp(f)); - - return *this; - } - - inline color_quad_u8& set_grayscale(parameter_t l) - { - component_t x = static_cast<component_t>(clamp(l)); - c[0] = x; - c[1] = x; - c[2] = x; - return *this; - } - - inline color_quad_u8& clamp(const color_quad_u8& l, const color_quad_u8& h) - { - for (uint i = 0; i < cNumComps; i++) - c[i] = static_cast<component_t>(rg_etc1::clamp<parameter_t>(c[i], l[i], h[i])); - return *this; - } - - inline color_quad_u8& clamp(parameter_t l, parameter_t h) - { - for (uint i = 0; i < cNumComps; i++) - c[i] = static_cast<component_t>(rg_etc1::clamp<parameter_t>(c[i], l, h)); - return *this; - } - - // Returns CCIR 601 luma (consistent with color_utils::RGB_To_Y). - inline parameter_t get_luma() const - { - return static_cast<parameter_t>((19595U * r + 38470U * g + 7471U * b + 32768U) >> 16U); - } - - // Returns REC 709 luma. - inline parameter_t get_luma_rec709() const - { - return static_cast<parameter_t>((13938U * r + 46869U * g + 4729U * b + 32768U) >> 16U); - } - - inline uint squared_distance_rgb(const color_quad_u8& c) const - { - return rg_etc1::square(r - c.r) + rg_etc1::square(g - c.g) + rg_etc1::square(b - c.b); - } - - inline uint squared_distance_rgba(const color_quad_u8& c) const - { - return rg_etc1::square(r - c.r) + rg_etc1::square(g - c.g) + rg_etc1::square(b - c.b) + rg_etc1::square(a - c.a); - } - - inline bool rgb_equals(const color_quad_u8& rhs) const - { - return (r == rhs.r) && (g == rhs.g) && (b == rhs.b); - } - - inline bool operator== (const color_quad_u8& rhs) const - { - return m_u32 == rhs.m_u32; - } - - color_quad_u8& operator+= (const color_quad_u8& other) - { - for (uint i = 0; i < 4; i++) - c[i] = static_cast<component_t>(clamp(c[i] + other.c[i])); - return *this; - } - - color_quad_u8& operator-= (const color_quad_u8& other) - { - for (uint i = 0; i < 4; i++) - c[i] = static_cast<component_t>(clamp(c[i] - other.c[i])); - return *this; - } - - friend color_quad_u8 operator+ (const color_quad_u8& lhs, const color_quad_u8& rhs) - { - color_quad_u8 result(lhs); - result += rhs; - return result; - } - - friend color_quad_u8 operator- (const color_quad_u8& lhs, const color_quad_u8& rhs) - { - color_quad_u8 result(lhs); - result -= rhs; - return result; - } - }; // class color_quad_u8 - - struct vec3F - { - float m_s[3]; - - inline vec3F() { } - inline vec3F(float s) { m_s[0] = s; m_s[1] = s; m_s[2] = s; } - inline vec3F(float x, float y, float z) { m_s[0] = x; m_s[1] = y; m_s[2] = z; } - - inline float operator[] (uint i) const { RG_ETC1_ASSERT(i < 3); return m_s[i]; } - - inline vec3F& operator += (const vec3F& other) { for (uint i = 0; i < 3; i++) m_s[i] += other.m_s[i]; return *this; } - - inline vec3F& operator *= (float s) { for (uint i = 0; i < 3; i++) m_s[i] *= s; return *this; } - }; - - enum etc_constants - { - cETC1BytesPerBlock = 8U, - - cETC1SelectorBits = 2U, - cETC1SelectorValues = 1U << cETC1SelectorBits, - cETC1SelectorMask = cETC1SelectorValues - 1U, - - cETC1BlockShift = 2U, - cETC1BlockSize = 1U << cETC1BlockShift, - - cETC1LSBSelectorIndicesBitOffset = 0, - cETC1MSBSelectorIndicesBitOffset = 16, - - cETC1FlipBitOffset = 32, - cETC1DiffBitOffset = 33, - - cETC1IntenModifierNumBits = 3, - cETC1IntenModifierValues = 1 << cETC1IntenModifierNumBits, - cETC1RightIntenModifierTableBitOffset = 34, - cETC1LeftIntenModifierTableBitOffset = 37, - - // Base+Delta encoding (5 bit bases, 3 bit delta) - cETC1BaseColorCompNumBits = 5, - cETC1BaseColorCompMax = 1 << cETC1BaseColorCompNumBits, - - cETC1DeltaColorCompNumBits = 3, - cETC1DeltaColorComp = 1 << cETC1DeltaColorCompNumBits, - cETC1DeltaColorCompMax = 1 << cETC1DeltaColorCompNumBits, - - cETC1BaseColor5RBitOffset = 59, - cETC1BaseColor5GBitOffset = 51, - cETC1BaseColor5BBitOffset = 43, - - cETC1DeltaColor3RBitOffset = 56, - cETC1DeltaColor3GBitOffset = 48, - cETC1DeltaColor3BBitOffset = 40, - - // Absolute (non-delta) encoding (two 4-bit per component bases) - cETC1AbsColorCompNumBits = 4, - cETC1AbsColorCompMax = 1 << cETC1AbsColorCompNumBits, - - cETC1AbsColor4R1BitOffset = 60, - cETC1AbsColor4G1BitOffset = 52, - cETC1AbsColor4B1BitOffset = 44, - - cETC1AbsColor4R2BitOffset = 56, - cETC1AbsColor4G2BitOffset = 48, - cETC1AbsColor4B2BitOffset = 40, - - cETC1ColorDeltaMin = -4, - cETC1ColorDeltaMax = 3, - - // Delta3: - // 0 1 2 3 4 5 6 7 - // 000 001 010 011 100 101 110 111 - // 0 1 2 3 -4 -3 -2 -1 - }; - - static uint8 g_quant5_tab[256+16]; - - static const int g_etc1_inten_tables[cETC1IntenModifierValues][cETC1SelectorValues] = - { - { -8, -2, 2, 8 }, { -17, -5, 5, 17 }, { -29, -9, 9, 29 }, { -42, -13, 13, 42 }, - { -60, -18, 18, 60 }, { -80, -24, 24, 80 }, { -106, -33, 33, 106 }, { -183, -47, 47, 183 } - }; - - static const uint8 g_etc1_to_selector_index[cETC1SelectorValues] = { 2, 3, 1, 0 }; - static const uint8 g_selector_index_to_etc1[cETC1SelectorValues] = { 3, 2, 0, 1 }; - - // Given an ETC1 diff/inten_table/selector, and an 8-bit desired color, this table encodes the best packed_color in the low byte, and the abs error in the high byte. - static uint16 g_etc1_inverse_lookup[2*8*4][256]; // [diff/inten_table/selector][desired_color] - - // g_color8_to_etc_block_config[color][table_index] = Supplies for each 8-bit color value a list of packed ETC1 diff/intensity table/selectors/packed_colors that map to that color. - // To pack: diff | (inten << 1) | (selector << 4) | (packed_c << 8) - static const uint16 g_color8_to_etc_block_config_0_255[2][33] = - { - { 0x0000, 0x0010, 0x0002, 0x0012, 0x0004, 0x0014, 0x0006, 0x0016, 0x0008, 0x0018, 0x000A, 0x001A, 0x000C, 0x001C, 0x000E, 0x001E, - 0x0001, 0x0011, 0x0003, 0x0013, 0x0005, 0x0015, 0x0007, 0x0017, 0x0009, 0x0019, 0x000B, 0x001B, 0x000D, 0x001D, 0x000F, 0x001F, 0xFFFF }, - { 0x0F20, 0x0F30, 0x0E32, 0x0F22, 0x0E34, 0x0F24, 0x0D36, 0x0F26, 0x0C38, 0x0E28, 0x0B3A, 0x0E2A, 0x093C, 0x0E2C, 0x053E, 0x0D2E, - 0x1E31, 0x1F21, 0x1D33, 0x1F23, 0x1C35, 0x1E25, 0x1A37, 0x1E27, 0x1839, 0x1D29, 0x163B, 0x1C2B, 0x133D, 0x1B2D, 0x093F, 0x1A2F, 0xFFFF }, - }; - - // Really only [254][11]. - static const uint16 g_color8_to_etc_block_config_1_to_254[254][12] = - { - { 0x021C, 0x0D0D, 0xFFFF }, { 0x0020, 0x0021, 0x0A0B, 0x061F, 0xFFFF }, { 0x0113, 0x0217, 0xFFFF }, { 0x0116, 0x031E, - 0x0B0E, 0x0405, 0xFFFF }, { 0x0022, 0x0204, 0x050A, 0x0023, 0xFFFF }, { 0x0111, 0x0319, 0x0809, 0x170F, 0xFFFF }, { - 0x0303, 0x0215, 0x0607, 0xFFFF }, { 0x0030, 0x0114, 0x0408, 0x0031, 0x0201, 0x051D, 0xFFFF }, { 0x0100, 0x0024, 0x0306, - 0x0025, 0x041B, 0x0E0D, 0xFFFF }, { 0x021A, 0x0121, 0x0B0B, 0x071F, 0xFFFF }, { 0x0213, 0x0317, 0xFFFF }, { 0x0112, - 0x0505, 0xFFFF }, { 0x0026, 0x070C, 0x0123, 0x0027, 0xFFFF }, { 0x0211, 0x0909, 0xFFFF }, { 0x0110, 0x0315, 0x0707, - 0x0419, 0x180F, 0xFFFF }, { 0x0218, 0x0131, 0x0301, 0x0403, 0x061D, 0xFFFF }, { 0x0032, 0x0202, 0x0033, 0x0125, 0x051B, - 0x0F0D, 0xFFFF }, { 0x0028, 0x031C, 0x0221, 0x0029, 0xFFFF }, { 0x0120, 0x0313, 0x0C0B, 0x081F, 0xFFFF }, { 0x0605, - 0x0417, 0xFFFF }, { 0x0216, 0x041E, 0x0C0E, 0x0223, 0x0127, 0xFFFF }, { 0x0122, 0x0304, 0x060A, 0x0311, 0x0A09, 0xFFFF - }, { 0x0519, 0x190F, 0xFFFF }, { 0x002A, 0x0231, 0x0503, 0x0415, 0x0807, 0x002B, 0x071D, 0xFFFF }, { 0x0130, 0x0214, - 0x0508, 0x0401, 0x0133, 0x0225, 0x061B, 0xFFFF }, { 0x0200, 0x0124, 0x0406, 0x0321, 0x0129, 0x100D, 0xFFFF }, { 0x031A, - 0x0D0B, 0x091F, 0xFFFF }, { 0x0413, 0x0705, 0x0517, 0xFFFF }, { 0x0212, 0x0034, 0x0323, 0x0035, 0x0227, 0xFFFF }, { - 0x0126, 0x080C, 0x0B09, 0xFFFF }, { 0x0411, 0x0619, 0x1A0F, 0xFFFF }, { 0x0210, 0x0331, 0x0603, 0x0515, 0x0907, 0x012B, - 0xFFFF }, { 0x0318, 0x002C, 0x0501, 0x0233, 0x0325, 0x071B, 0x002D, 0x081D, 0xFFFF }, { 0x0132, 0x0302, 0x0229, 0x110D, - 0xFFFF }, { 0x0128, 0x041C, 0x0421, 0x0E0B, 0x0A1F, 0xFFFF }, { 0x0220, 0x0513, 0x0617, 0xFFFF }, { 0x0135, 0x0805, - 0x0327, 0xFFFF }, { 0x0316, 0x051E, 0x0D0E, 0x0423, 0xFFFF }, { 0x0222, 0x0404, 0x070A, 0x0511, 0x0719, 0x0C09, 0x1B0F, - 0xFFFF }, { 0x0703, 0x0615, 0x0A07, 0x022B, 0xFFFF }, { 0x012A, 0x0431, 0x0601, 0x0333, 0x012D, 0x091D, 0xFFFF }, { - 0x0230, 0x0314, 0x0036, 0x0608, 0x0425, 0x0037, 0x0329, 0x081B, 0x120D, 0xFFFF }, { 0x0300, 0x0224, 0x0506, 0x0521, - 0x0F0B, 0x0B1F, 0xFFFF }, { 0x041A, 0x0613, 0x0717, 0xFFFF }, { 0x0235, 0x0905, 0xFFFF }, { 0x0312, 0x0134, 0x0523, - 0x0427, 0xFFFF }, { 0x0226, 0x090C, 0x002E, 0x0611, 0x0D09, 0x002F, 0xFFFF }, { 0x0715, 0x0B07, 0x0819, 0x032B, 0x1C0F, - 0xFFFF }, { 0x0310, 0x0531, 0x0701, 0x0803, 0x022D, 0x0A1D, 0xFFFF }, { 0x0418, 0x012C, 0x0433, 0x0525, 0x0137, 0x091B, - 0x130D, 0xFFFF }, { 0x0232, 0x0402, 0x0621, 0x0429, 0xFFFF }, { 0x0228, 0x051C, 0x0713, 0x100B, 0x0C1F, 0xFFFF }, { - 0x0320, 0x0335, 0x0A05, 0x0817, 0xFFFF }, { 0x0623, 0x0527, 0xFFFF }, { 0x0416, 0x061E, 0x0E0E, 0x0711, 0x0E09, 0x012F, - 0xFFFF }, { 0x0322, 0x0504, 0x080A, 0x0919, 0x1D0F, 0xFFFF }, { 0x0631, 0x0903, 0x0815, 0x0C07, 0x042B, 0x032D, 0x0B1D, - 0xFFFF }, { 0x022A, 0x0801, 0x0533, 0x0625, 0x0237, 0x0A1B, 0xFFFF }, { 0x0330, 0x0414, 0x0136, 0x0708, 0x0721, 0x0529, - 0x140D, 0xFFFF }, { 0x0400, 0x0324, 0x0606, 0x0038, 0x0039, 0x110B, 0x0D1F, 0xFFFF }, { 0x051A, 0x0813, 0x0B05, 0x0917, - 0xFFFF }, { 0x0723, 0x0435, 0x0627, 0xFFFF }, { 0x0412, 0x0234, 0x0F09, 0x022F, 0xFFFF }, { 0x0326, 0x0A0C, 0x012E, - 0x0811, 0x0A19, 0x1E0F, 0xFFFF }, { 0x0731, 0x0A03, 0x0915, 0x0D07, 0x052B, 0xFFFF }, { 0x0410, 0x0901, 0x0633, 0x0725, - 0x0337, 0x0B1B, 0x042D, 0x0C1D, 0xFFFF }, { 0x0518, 0x022C, 0x0629, 0x150D, 0xFFFF }, { 0x0332, 0x0502, 0x0821, 0x0139, - 0x120B, 0x0E1F, 0xFFFF }, { 0x0328, 0x061C, 0x0913, 0x0A17, 0xFFFF }, { 0x0420, 0x0535, 0x0C05, 0x0727, 0xFFFF }, { - 0x0823, 0x032F, 0xFFFF }, { 0x0516, 0x071E, 0x0F0E, 0x0911, 0x0B19, 0x1009, 0x1F0F, 0xFFFF }, { 0x0422, 0x0604, 0x090A, - 0x0B03, 0x0A15, 0x0E07, 0x062B, 0xFFFF }, { 0x0831, 0x0A01, 0x0733, 0x052D, 0x0D1D, 0xFFFF }, { 0x032A, 0x0825, 0x0437, - 0x0729, 0x0C1B, 0x160D, 0xFFFF }, { 0x0430, 0x0514, 0x0236, 0x0808, 0x0921, 0x0239, 0x130B, 0x0F1F, 0xFFFF }, { 0x0500, - 0x0424, 0x0706, 0x0138, 0x0A13, 0x0B17, 0xFFFF }, { 0x061A, 0x0635, 0x0D05, 0xFFFF }, { 0x0923, 0x0827, 0xFFFF }, { - 0x0512, 0x0334, 0x003A, 0x0A11, 0x1109, 0x003B, 0x042F, 0xFFFF }, { 0x0426, 0x0B0C, 0x022E, 0x0B15, 0x0F07, 0x0C19, - 0x072B, 0xFFFF }, { 0x0931, 0x0B01, 0x0C03, 0x062D, 0x0E1D, 0xFFFF }, { 0x0510, 0x0833, 0x0925, 0x0537, 0x0D1B, 0x170D, - 0xFFFF }, { 0x0618, 0x032C, 0x0A21, 0x0339, 0x0829, 0xFFFF }, { 0x0432, 0x0602, 0x0B13, 0x140B, 0x101F, 0xFFFF }, { - 0x0428, 0x071C, 0x0735, 0x0E05, 0x0C17, 0xFFFF }, { 0x0520, 0x0A23, 0x0927, 0xFFFF }, { 0x0B11, 0x1209, 0x013B, 0x052F, - 0xFFFF }, { 0x0616, 0x081E, 0x0D19, 0xFFFF }, { 0x0522, 0x0704, 0x0A0A, 0x0A31, 0x0D03, 0x0C15, 0x1007, 0x082B, 0x072D, - 0x0F1D, 0xFFFF }, { 0x0C01, 0x0933, 0x0A25, 0x0637, 0x0E1B, 0xFFFF }, { 0x042A, 0x0B21, 0x0929, 0x180D, 0xFFFF }, { - 0x0530, 0x0614, 0x0336, 0x0908, 0x0439, 0x150B, 0x111F, 0xFFFF }, { 0x0600, 0x0524, 0x0806, 0x0238, 0x0C13, 0x0F05, - 0x0D17, 0xFFFF }, { 0x071A, 0x0B23, 0x0835, 0x0A27, 0xFFFF }, { 0x1309, 0x023B, 0x062F, 0xFFFF }, { 0x0612, 0x0434, - 0x013A, 0x0C11, 0x0E19, 0xFFFF }, { 0x0526, 0x0C0C, 0x032E, 0x0B31, 0x0E03, 0x0D15, 0x1107, 0x092B, 0xFFFF }, { 0x0D01, - 0x0A33, 0x0B25, 0x0737, 0x0F1B, 0x082D, 0x101D, 0xFFFF }, { 0x0610, 0x0A29, 0x190D, 0xFFFF }, { 0x0718, 0x042C, 0x0C21, - 0x0539, 0x160B, 0x121F, 0xFFFF }, { 0x0532, 0x0702, 0x0D13, 0x0E17, 0xFFFF }, { 0x0528, 0x081C, 0x0935, 0x1005, 0x0B27, - 0xFFFF }, { 0x0620, 0x0C23, 0x033B, 0x072F, 0xFFFF }, { 0x0D11, 0x0F19, 0x1409, 0xFFFF }, { 0x0716, 0x003C, 0x091E, - 0x0F03, 0x0E15, 0x1207, 0x0A2B, 0x003D, 0xFFFF }, { 0x0622, 0x0804, 0x0B0A, 0x0C31, 0x0E01, 0x0B33, 0x092D, 0x111D, - 0xFFFF }, { 0x0C25, 0x0837, 0x0B29, 0x101B, 0x1A0D, 0xFFFF }, { 0x052A, 0x0D21, 0x0639, 0x170B, 0x131F, 0xFFFF }, { - 0x0630, 0x0714, 0x0436, 0x0A08, 0x0E13, 0x0F17, 0xFFFF }, { 0x0700, 0x0624, 0x0906, 0x0338, 0x0A35, 0x1105, 0xFFFF }, { - 0x081A, 0x0D23, 0x0C27, 0xFFFF }, { 0x0E11, 0x1509, 0x043B, 0x082F, 0xFFFF }, { 0x0712, 0x0534, 0x023A, 0x0F15, 0x1307, - 0x1019, 0x0B2B, 0x013D, 0xFFFF }, { 0x0626, 0x0D0C, 0x042E, 0x0D31, 0x0F01, 0x1003, 0x0A2D, 0x121D, 0xFFFF }, { 0x0C33, - 0x0D25, 0x0937, 0x111B, 0x1B0D, 0xFFFF }, { 0x0710, 0x0E21, 0x0739, 0x0C29, 0xFFFF }, { 0x0818, 0x052C, 0x0F13, 0x180B, - 0x141F, 0xFFFF }, { 0x0632, 0x0802, 0x0B35, 0x1205, 0x1017, 0xFFFF }, { 0x0628, 0x091C, 0x0E23, 0x0D27, 0xFFFF }, { - 0x0720, 0x0F11, 0x1609, 0x053B, 0x092F, 0xFFFF }, { 0x1119, 0x023D, 0xFFFF }, { 0x0816, 0x013C, 0x0A1E, 0x0E31, 0x1103, - 0x1015, 0x1407, 0x0C2B, 0x0B2D, 0x131D, 0xFFFF }, { 0x0722, 0x0904, 0x0C0A, 0x1001, 0x0D33, 0x0E25, 0x0A37, 0x121B, - 0xFFFF }, { 0x0F21, 0x0D29, 0x1C0D, 0xFFFF }, { 0x062A, 0x0839, 0x190B, 0x151F, 0xFFFF }, { 0x0730, 0x0814, 0x0536, - 0x0B08, 0x1013, 0x1305, 0x1117, 0xFFFF }, { 0x0800, 0x0724, 0x0A06, 0x0438, 0x0F23, 0x0C35, 0x0E27, 0xFFFF }, { 0x091A, - 0x1709, 0x063B, 0x0A2F, 0xFFFF }, { 0x1011, 0x1219, 0x033D, 0xFFFF }, { 0x0812, 0x0634, 0x033A, 0x0F31, 0x1203, 0x1115, - 0x1507, 0x0D2B, 0xFFFF }, { 0x0726, 0x0E0C, 0x052E, 0x1101, 0x0E33, 0x0F25, 0x0B37, 0x131B, 0x0C2D, 0x141D, 0xFFFF }, { - 0x0E29, 0x1D0D, 0xFFFF }, { 0x0810, 0x1021, 0x0939, 0x1A0B, 0x161F, 0xFFFF }, { 0x0918, 0x062C, 0x1113, 0x1217, 0xFFFF - }, { 0x0732, 0x0902, 0x0D35, 0x1405, 0x0F27, 0xFFFF }, { 0x0728, 0x0A1C, 0x1023, 0x073B, 0x0B2F, 0xFFFF }, { 0x0820, - 0x1111, 0x1319, 0x1809, 0xFFFF }, { 0x1303, 0x1215, 0x1607, 0x0E2B, 0x043D, 0xFFFF }, { 0x0916, 0x023C, 0x0B1E, 0x1031, - 0x1201, 0x0F33, 0x0D2D, 0x151D, 0xFFFF }, { 0x0822, 0x0A04, 0x0D0A, 0x1025, 0x0C37, 0x0F29, 0x141B, 0x1E0D, 0xFFFF }, { - 0x1121, 0x0A39, 0x1B0B, 0x171F, 0xFFFF }, { 0x072A, 0x1213, 0x1317, 0xFFFF }, { 0x0830, 0x0914, 0x0636, 0x0C08, 0x0E35, - 0x1505, 0xFFFF }, { 0x0900, 0x0824, 0x0B06, 0x0538, 0x1123, 0x1027, 0xFFFF }, { 0x0A1A, 0x1211, 0x1909, 0x083B, 0x0C2F, - 0xFFFF }, { 0x1315, 0x1707, 0x1419, 0x0F2B, 0x053D, 0xFFFF }, { 0x0912, 0x0734, 0x043A, 0x1131, 0x1301, 0x1403, 0x0E2D, - 0x161D, 0xFFFF }, { 0x0826, 0x0F0C, 0x062E, 0x1033, 0x1125, 0x0D37, 0x151B, 0x1F0D, 0xFFFF }, { 0x1221, 0x0B39, 0x1029, - 0xFFFF }, { 0x0910, 0x1313, 0x1C0B, 0x181F, 0xFFFF }, { 0x0A18, 0x072C, 0x0F35, 0x1605, 0x1417, 0xFFFF }, { 0x0832, - 0x0A02, 0x1223, 0x1127, 0xFFFF }, { 0x0828, 0x0B1C, 0x1311, 0x1A09, 0x093B, 0x0D2F, 0xFFFF }, { 0x0920, 0x1519, 0x063D, - 0xFFFF }, { 0x1231, 0x1503, 0x1415, 0x1807, 0x102B, 0x0F2D, 0x171D, 0xFFFF }, { 0x0A16, 0x033C, 0x0C1E, 0x1401, 0x1133, - 0x1225, 0x0E37, 0x161B, 0xFFFF }, { 0x0922, 0x0B04, 0x0E0A, 0x1321, 0x1129, 0xFFFF }, { 0x0C39, 0x1D0B, 0x191F, 0xFFFF - }, { 0x082A, 0x1413, 0x1705, 0x1517, 0xFFFF }, { 0x0930, 0x0A14, 0x0736, 0x0D08, 0x1323, 0x1035, 0x1227, 0xFFFF }, { - 0x0A00, 0x0924, 0x0C06, 0x0638, 0x1B09, 0x0A3B, 0x0E2F, 0xFFFF }, { 0x0B1A, 0x1411, 0x1619, 0x073D, 0xFFFF }, { 0x1331, - 0x1603, 0x1515, 0x1907, 0x112B, 0xFFFF }, { 0x0A12, 0x0834, 0x053A, 0x1501, 0x1233, 0x1325, 0x0F37, 0x171B, 0x102D, - 0x181D, 0xFFFF }, { 0x0926, 0x072E, 0x1229, 0xFFFF }, { 0x1421, 0x0D39, 0x1E0B, 0x1A1F, 0xFFFF }, { 0x0A10, 0x1513, - 0x1617, 0xFFFF }, { 0x0B18, 0x082C, 0x1135, 0x1805, 0x1327, 0xFFFF }, { 0x0932, 0x0B02, 0x1423, 0x0B3B, 0x0F2F, 0xFFFF - }, { 0x0928, 0x0C1C, 0x1511, 0x1719, 0x1C09, 0xFFFF }, { 0x0A20, 0x1703, 0x1615, 0x1A07, 0x122B, 0x083D, 0xFFFF }, { - 0x1431, 0x1601, 0x1333, 0x112D, 0x191D, 0xFFFF }, { 0x0B16, 0x043C, 0x0D1E, 0x1425, 0x1037, 0x1329, 0x181B, 0xFFFF }, { - 0x0A22, 0x0C04, 0x0F0A, 0x1521, 0x0E39, 0x1F0B, 0x1B1F, 0xFFFF }, { 0x1613, 0x1717, 0xFFFF }, { 0x092A, 0x1235, 0x1905, - 0xFFFF }, { 0x0A30, 0x0B14, 0x0836, 0x0E08, 0x1523, 0x1427, 0xFFFF }, { 0x0B00, 0x0A24, 0x0D06, 0x0738, 0x1611, 0x1D09, - 0x0C3B, 0x102F, 0xFFFF }, { 0x0C1A, 0x1715, 0x1B07, 0x1819, 0x132B, 0x093D, 0xFFFF }, { 0x1531, 0x1701, 0x1803, 0x122D, - 0x1A1D, 0xFFFF }, { 0x0B12, 0x0934, 0x063A, 0x1433, 0x1525, 0x1137, 0x191B, 0xFFFF }, { 0x0A26, 0x003E, 0x082E, 0x1621, - 0x0F39, 0x1429, 0x003F, 0xFFFF }, { 0x1713, 0x1C1F, 0xFFFF }, { 0x0B10, 0x1335, 0x1A05, 0x1817, 0xFFFF }, { 0x0C18, - 0x092C, 0x1623, 0x1527, 0xFFFF }, { 0x0A32, 0x0C02, 0x1711, 0x1E09, 0x0D3B, 0x112F, 0xFFFF }, { 0x0A28, 0x0D1C, 0x1919, - 0x0A3D, 0xFFFF }, { 0x0B20, 0x1631, 0x1903, 0x1815, 0x1C07, 0x142B, 0x132D, 0x1B1D, 0xFFFF }, { 0x1801, 0x1533, 0x1625, - 0x1237, 0x1A1B, 0xFFFF }, { 0x0C16, 0x053C, 0x0E1E, 0x1721, 0x1529, 0x013F, 0xFFFF }, { 0x0B22, 0x0D04, 0x1039, 0x1D1F, - 0xFFFF }, { 0x1813, 0x1B05, 0x1917, 0xFFFF }, { 0x0A2A, 0x1723, 0x1435, 0x1627, 0xFFFF }, { 0x0B30, 0x0C14, 0x0936, - 0x0F08, 0x1F09, 0x0E3B, 0x122F, 0xFFFF }, { 0x0C00, 0x0B24, 0x0E06, 0x0838, 0x1811, 0x1A19, 0x0B3D, 0xFFFF }, { 0x0D1A, - 0x1731, 0x1A03, 0x1915, 0x1D07, 0x152B, 0xFFFF }, { 0x1901, 0x1633, 0x1725, 0x1337, 0x1B1B, 0x142D, 0x1C1D, 0xFFFF }, { - 0x0C12, 0x0A34, 0x073A, 0x1629, 0x023F, 0xFFFF }, { 0x0B26, 0x013E, 0x092E, 0x1821, 0x1139, 0x1E1F, 0xFFFF }, { 0x1913, - 0x1A17, 0xFFFF }, { 0x0C10, 0x1535, 0x1C05, 0x1727, 0xFFFF }, { 0x0D18, 0x0A2C, 0x1823, 0x0F3B, 0x132F, 0xFFFF }, { - 0x0B32, 0x0D02, 0x1911, 0x1B19, 0xFFFF }, { 0x0B28, 0x0E1C, 0x1B03, 0x1A15, 0x1E07, 0x162B, 0x0C3D, 0xFFFF }, { 0x0C20, - 0x1831, 0x1A01, 0x1733, 0x152D, 0x1D1D, 0xFFFF }, { 0x1825, 0x1437, 0x1729, 0x1C1B, 0x033F, 0xFFFF }, { 0x0D16, 0x063C, - 0x0F1E, 0x1921, 0x1239, 0x1F1F, 0xFFFF }, { 0x0C22, 0x0E04, 0x1A13, 0x1B17, 0xFFFF }, { 0x1635, 0x1D05, 0xFFFF }, { - 0x0B2A, 0x1923, 0x1827, 0xFFFF }, { 0x0C30, 0x0D14, 0x0A36, 0x1A11, 0x103B, 0x142F, 0xFFFF }, { 0x0D00, 0x0C24, 0x0F06, - 0x0938, 0x1B15, 0x1F07, 0x1C19, 0x172B, 0x0D3D, 0xFFFF }, { 0x0E1A, 0x1931, 0x1B01, 0x1C03, 0x162D, 0x1E1D, 0xFFFF }, { - 0x1833, 0x1925, 0x1537, 0x1D1B, 0xFFFF }, { 0x0D12, 0x0B34, 0x083A, 0x1A21, 0x1339, 0x1829, 0x043F, 0xFFFF }, { 0x0C26, - 0x023E, 0x0A2E, 0x1B13, 0xFFFF }, { 0x1735, 0x1E05, 0x1C17, 0xFFFF }, { 0x0D10, 0x1A23, 0x1927, 0xFFFF }, { 0x0E18, - 0x0B2C, 0x1B11, 0x113B, 0x152F, 0xFFFF }, { 0x0C32, 0x0E02, 0x1D19, 0x0E3D, 0xFFFF }, { 0x0C28, 0x0F1C, 0x1A31, 0x1D03, - 0x1C15, 0x182B, 0x172D, 0x1F1D, 0xFFFF }, { 0x0D20, 0x1C01, 0x1933, 0x1A25, 0x1637, 0x1E1B, 0xFFFF }, { 0x1B21, 0x1929, - 0x053F, 0xFFFF }, { 0x0E16, 0x073C, 0x1439, 0xFFFF }, { 0x0D22, 0x0F04, 0x1C13, 0x1F05, 0x1D17, 0xFFFF }, { 0x1B23, - 0x1835, 0x1A27, 0xFFFF }, { 0x0C2A, 0x123B, 0x162F, 0xFFFF }, { 0x0D30, 0x0E14, 0x0B36, 0x1C11, 0x1E19, 0x0F3D, 0xFFFF - }, { 0x0E00, 0x0D24, 0x0A38, 0x1B31, 0x1E03, 0x1D15, 0x192B, 0xFFFF }, { 0x0F1A, 0x1D01, 0x1A33, 0x1B25, 0x1737, 0x1F1B, - 0x182D, 0xFFFF }, { 0x1A29, 0x063F, 0xFFFF }, { 0x0E12, 0x0C34, 0x093A, 0x1C21, 0x1539, 0xFFFF }, { 0x0D26, 0x033E, - 0x0B2E, 0x1D13, 0x1E17, 0xFFFF }, { 0x1935, 0x1B27, 0xFFFF }, { 0x0E10, 0x1C23, 0x133B, 0x172F, 0xFFFF }, { 0x0F18, - 0x0C2C, 0x1D11, 0x1F19, 0xFFFF }, { 0x0D32, 0x0F02, 0x1F03, 0x1E15, 0x1A2B, 0x103D, 0xFFFF }, { 0x0D28, 0x1C31, 0x1E01, - 0x1B33, 0x192D, 0xFFFF }, { 0x0E20, 0x1C25, 0x1837, 0x1B29, 0x073F, 0xFFFF }, { 0x1D21, 0x1639, 0xFFFF }, { 0x0F16, - 0x083C, 0x1E13, 0x1F17, 0xFFFF }, { 0x0E22, 0x1A35, 0xFFFF }, { 0x1D23, 0x1C27, 0xFFFF }, { 0x0D2A, 0x1E11, 0x143B, - 0x182F, 0xFFFF }, { 0x0E30, 0x0F14, 0x0C36, 0x1F15, 0x1B2B, 0x113D, 0xFFFF }, { 0x0F00, 0x0E24, 0x0B38, 0x1D31, 0x1F01, - 0x1A2D, 0xFFFF }, { 0x1C33, 0x1D25, 0x1937, 0xFFFF }, { 0x1E21, 0x1739, 0x1C29, 0x083F, 0xFFFF }, { 0x0F12, 0x0D34, - 0x0A3A, 0x1F13, 0xFFFF }, { 0x0E26, 0x043E, 0x0C2E, 0x1B35, 0xFFFF }, { 0x1E23, 0x1D27, 0xFFFF }, { 0x0F10, 0x1F11, - 0x153B, 0x192F, 0xFFFF }, { 0x0D2C, 0x123D, 0xFFFF }, - }; - - struct etc1_block - { - // big endian uint64: - // bit ofs: 56 48 40 32 24 16 8 0 - // byte ofs: b0, b1, b2, b3, b4, b5, b6, b7 - union - { - uint64 m_uint64; - uint8 m_bytes[8]; - }; - - uint8 m_low_color[2]; - uint8 m_high_color[2]; - - enum { cNumSelectorBytes = 4 }; - uint8 m_selectors[cNumSelectorBytes]; - - inline void clear() - { - zero_this(this); - } - - inline uint get_byte_bits(uint ofs, uint num) const - { - RG_ETC1_ASSERT((ofs + num) <= 64U); - RG_ETC1_ASSERT(num && (num <= 8U)); - RG_ETC1_ASSERT((ofs >> 3) == ((ofs + num - 1) >> 3)); - const uint byte_ofs = 7 - (ofs >> 3); - const uint byte_bit_ofs = ofs & 7; - return (m_bytes[byte_ofs] >> byte_bit_ofs) & ((1 << num) - 1); - } - - inline void set_byte_bits(uint ofs, uint num, uint bits) - { - RG_ETC1_ASSERT((ofs + num) <= 64U); - RG_ETC1_ASSERT(num && (num < 32U)); - RG_ETC1_ASSERT((ofs >> 3) == ((ofs + num - 1) >> 3)); - RG_ETC1_ASSERT(bits < (1U << num)); - const uint byte_ofs = 7 - (ofs >> 3); - const uint byte_bit_ofs = ofs & 7; - const uint mask = (1 << num) - 1; - m_bytes[byte_ofs] &= ~(mask << byte_bit_ofs); - m_bytes[byte_ofs] |= (bits << byte_bit_ofs); - } - - // false = left/right subblocks - // true = upper/lower subblocks - inline bool get_flip_bit() const - { - return (m_bytes[3] & 1) != 0; - } - - inline void set_flip_bit(bool flip) - { - m_bytes[3] &= ~1; - m_bytes[3] |= static_cast<uint8>(flip); - } - - inline bool get_diff_bit() const - { - return (m_bytes[3] & 2) != 0; - } - - inline void set_diff_bit(bool diff) - { - m_bytes[3] &= ~2; - m_bytes[3] |= (static_cast<uint>(diff) << 1); - } - - // Returns intensity modifier table (0-7) used by subblock subblock_id. - // subblock_id=0 left/top (CW 1), 1=right/bottom (CW 2) - inline uint get_inten_table(uint subblock_id) const - { - RG_ETC1_ASSERT(subblock_id < 2); - const uint ofs = subblock_id ? 2 : 5; - return (m_bytes[3] >> ofs) & 7; - } - - // Sets intensity modifier table (0-7) used by subblock subblock_id (0 or 1) - inline void set_inten_table(uint subblock_id, uint t) - { - RG_ETC1_ASSERT(subblock_id < 2); - RG_ETC1_ASSERT(t < 8); - const uint ofs = subblock_id ? 2 : 5; - m_bytes[3] &= ~(7 << ofs); - m_bytes[3] |= (t << ofs); - } - - // Returned selector value ranges from 0-3 and is a direct index into g_etc1_inten_tables. - inline uint get_selector(uint x, uint y) const - { - RG_ETC1_ASSERT((x | y) < 4); - - const uint bit_index = x * 4 + y; - const uint byte_bit_ofs = bit_index & 7; - const uint8 *p = &m_bytes[7 - (bit_index >> 3)]; - const uint lsb = (p[0] >> byte_bit_ofs) & 1; - const uint msb = (p[-2] >> byte_bit_ofs) & 1; - const uint val = lsb | (msb << 1); - - return g_etc1_to_selector_index[val]; - } - - // Selector "val" ranges from 0-3 and is a direct index into g_etc1_inten_tables. - inline void set_selector(uint x, uint y, uint val) - { - RG_ETC1_ASSERT((x | y | val) < 4); - const uint bit_index = x * 4 + y; - - uint8 *p = &m_bytes[7 - (bit_index >> 3)]; - - const uint byte_bit_ofs = bit_index & 7; - const uint mask = 1 << byte_bit_ofs; - - const uint etc1_val = g_selector_index_to_etc1[val]; - - const uint lsb = etc1_val & 1; - const uint msb = etc1_val >> 1; - - p[0] &= ~mask; - p[0] |= (lsb << byte_bit_ofs); - - p[-2] &= ~mask; - p[-2] |= (msb << byte_bit_ofs); - } - - inline void set_base4_color(uint idx, uint16 c) - { - if (idx) - { - set_byte_bits(cETC1AbsColor4R2BitOffset, 4, (c >> 8) & 15); - set_byte_bits(cETC1AbsColor4G2BitOffset, 4, (c >> 4) & 15); - set_byte_bits(cETC1AbsColor4B2BitOffset, 4, c & 15); - } - else - { - set_byte_bits(cETC1AbsColor4R1BitOffset, 4, (c >> 8) & 15); - set_byte_bits(cETC1AbsColor4G1BitOffset, 4, (c >> 4) & 15); - set_byte_bits(cETC1AbsColor4B1BitOffset, 4, c & 15); - } - } - - inline uint16 get_base4_color(uint idx) const - { - uint r, g, b; - if (idx) - { - r = get_byte_bits(cETC1AbsColor4R2BitOffset, 4); - g = get_byte_bits(cETC1AbsColor4G2BitOffset, 4); - b = get_byte_bits(cETC1AbsColor4B2BitOffset, 4); - } - else - { - r = get_byte_bits(cETC1AbsColor4R1BitOffset, 4); - g = get_byte_bits(cETC1AbsColor4G1BitOffset, 4); - b = get_byte_bits(cETC1AbsColor4B1BitOffset, 4); - } - return static_cast<uint16>(b | (g << 4U) | (r << 8U)); - } - - inline void set_base5_color(uint16 c) - { - set_byte_bits(cETC1BaseColor5RBitOffset, 5, (c >> 10) & 31); - set_byte_bits(cETC1BaseColor5GBitOffset, 5, (c >> 5) & 31); - set_byte_bits(cETC1BaseColor5BBitOffset, 5, c & 31); - } - - inline uint16 get_base5_color() const - { - const uint r = get_byte_bits(cETC1BaseColor5RBitOffset, 5); - const uint g = get_byte_bits(cETC1BaseColor5GBitOffset, 5); - const uint b = get_byte_bits(cETC1BaseColor5BBitOffset, 5); - return static_cast<uint16>(b | (g << 5U) | (r << 10U)); - } - - void set_delta3_color(uint16 c) - { - set_byte_bits(cETC1DeltaColor3RBitOffset, 3, (c >> 6) & 7); - set_byte_bits(cETC1DeltaColor3GBitOffset, 3, (c >> 3) & 7); - set_byte_bits(cETC1DeltaColor3BBitOffset, 3, c & 7); - } - - inline uint16 get_delta3_color() const - { - const uint r = get_byte_bits(cETC1DeltaColor3RBitOffset, 3); - const uint g = get_byte_bits(cETC1DeltaColor3GBitOffset, 3); - const uint b = get_byte_bits(cETC1DeltaColor3BBitOffset, 3); - return static_cast<uint16>(b | (g << 3U) | (r << 6U)); - } - - // Base color 5 - static uint16 pack_color5(const color_quad_u8& color, bool scaled, uint bias = 127U); - static uint16 pack_color5(uint r, uint g, uint b, bool scaled, uint bias = 127U); - - static color_quad_u8 unpack_color5(uint16 packed_color5, bool scaled, uint alpha = 255U); - static void unpack_color5(uint& r, uint& g, uint& b, uint16 packed_color, bool scaled); - - static bool unpack_color5(color_quad_u8& result, uint16 packed_color5, uint16 packed_delta3, bool scaled, uint alpha = 255U); - static bool unpack_color5(uint& r, uint& g, uint& b, uint16 packed_color5, uint16 packed_delta3, bool scaled, uint alpha = 255U); - - // Delta color 3 - // Inputs range from -4 to 3 (cETC1ColorDeltaMin to cETC1ColorDeltaMax) - static uint16 pack_delta3(int r, int g, int b); - - // Results range from -4 to 3 (cETC1ColorDeltaMin to cETC1ColorDeltaMax) - static void unpack_delta3(int& r, int& g, int& b, uint16 packed_delta3); - - // Abs color 4 - static uint16 pack_color4(const color_quad_u8& color, bool scaled, uint bias = 127U); - static uint16 pack_color4(uint r, uint g, uint b, bool scaled, uint bias = 127U); - - static color_quad_u8 unpack_color4(uint16 packed_color4, bool scaled, uint alpha = 255U); - static void unpack_color4(uint& r, uint& g, uint& b, uint16 packed_color4, bool scaled); - - // subblock colors - static void get_diff_subblock_colors(color_quad_u8* pDst, uint16 packed_color5, uint table_idx); - static bool get_diff_subblock_colors(color_quad_u8* pDst, uint16 packed_color5, uint16 packed_delta3, uint table_idx); - static void get_abs_subblock_colors(color_quad_u8* pDst, uint16 packed_color4, uint table_idx); - - static inline void unscaled_to_scaled_color(color_quad_u8& dst, const color_quad_u8& src, bool color4) - { - if (color4) - { - dst.r = src.r | (src.r << 4); - dst.g = src.g | (src.g << 4); - dst.b = src.b | (src.b << 4); - } - else - { - dst.r = (src.r >> 2) | (src.r << 3); - dst.g = (src.g >> 2) | (src.g << 3); - dst.b = (src.b >> 2) | (src.b << 3); - } - dst.a = src.a; - } - }; - - // Returns pointer to sorted array. - template<typename T, typename Q> - T* indirect_radix_sort(uint num_indices, T* pIndices0, T* pIndices1, const Q* pKeys, uint key_ofs, uint key_size, bool init_indices) - { - RG_ETC1_ASSERT((key_ofs >= 0) && (key_ofs < sizeof(T))); - RG_ETC1_ASSERT((key_size >= 1) && (key_size <= 4)); - - if (init_indices) - { - T* p = pIndices0; - T* q = pIndices0 + (num_indices >> 1) * 2; - uint i; - for (i = 0; p != q; p += 2, i += 2) - { - p[0] = static_cast<T>(i); - p[1] = static_cast<T>(i + 1); - } - - if (num_indices & 1) - *p = static_cast<T>(i); - } - - uint hist[256 * 4]; - - memset(hist, 0, sizeof(hist[0]) * 256 * key_size); - -#define RG_ETC1_GET_KEY(p) (*(const uint*)((const uint8*)(pKeys + *(p)) + key_ofs)) -#define RG_ETC1_GET_KEY_FROM_INDEX(i) (*(const uint*)((const uint8*)(pKeys + (i)) + key_ofs)) - - if (key_size == 4) - { - T* p = pIndices0; - T* q = pIndices0 + num_indices; - for ( ; p != q; p++) - { - const uint key = RG_ETC1_GET_KEY(p); - - hist[ key & 0xFF]++; - hist[256 + ((key >> 8) & 0xFF)]++; - hist[512 + ((key >> 16) & 0xFF)]++; - hist[768 + ((key >> 24) & 0xFF)]++; - } - } - else if (key_size == 3) - { - T* p = pIndices0; - T* q = pIndices0 + num_indices; - for ( ; p != q; p++) - { - const uint key = RG_ETC1_GET_KEY(p); - - hist[ key & 0xFF]++; - hist[256 + ((key >> 8) & 0xFF)]++; - hist[512 + ((key >> 16) & 0xFF)]++; - } - } - else if (key_size == 2) - { - T* p = pIndices0; - T* q = pIndices0 + (num_indices >> 1) * 2; - - for ( ; p != q; p += 2) - { - const uint key0 = RG_ETC1_GET_KEY(p); - const uint key1 = RG_ETC1_GET_KEY(p+1); - - hist[ key0 & 0xFF]++; - hist[256 + ((key0 >> 8) & 0xFF)]++; - - hist[ key1 & 0xFF]++; - hist[256 + ((key1 >> 8) & 0xFF)]++; - } - - if (num_indices & 1) - { - const uint key = RG_ETC1_GET_KEY(p); - - hist[ key & 0xFF]++; - hist[256 + ((key >> 8) & 0xFF)]++; - } - } - else - { - RG_ETC1_ASSERT(key_size == 1); - if (key_size != 1) - return NULL; - - T* p = pIndices0; - T* q = pIndices0 + (num_indices >> 1) * 2; - - for ( ; p != q; p += 2) - { - const uint key0 = RG_ETC1_GET_KEY(p); - const uint key1 = RG_ETC1_GET_KEY(p+1); - - hist[key0 & 0xFF]++; - hist[key1 & 0xFF]++; - } - - if (num_indices & 1) - { - const uint key = RG_ETC1_GET_KEY(p); - - hist[key & 0xFF]++; - } - } - - T* pCur = pIndices0; - T* pNew = pIndices1; - - for (uint pass = 0; pass < key_size; pass++) - { - const uint* pHist = &hist[pass << 8]; - - uint offsets[256]; - - uint cur_ofs = 0; - for (uint i = 0; i < 256; i += 2) - { - offsets[i] = cur_ofs; - cur_ofs += pHist[i]; - - offsets[i+1] = cur_ofs; - cur_ofs += pHist[i+1]; - } - - const uint pass_shift = pass << 3; - - T* p = pCur; - T* q = pCur + (num_indices >> 1) * 2; - - for ( ; p != q; p += 2) - { - uint index0 = p[0]; - uint index1 = p[1]; - - uint c0 = (RG_ETC1_GET_KEY_FROM_INDEX(index0) >> pass_shift) & 0xFF; - uint c1 = (RG_ETC1_GET_KEY_FROM_INDEX(index1) >> pass_shift) & 0xFF; - - if (c0 == c1) - { - uint dst_offset0 = offsets[c0]; - - offsets[c0] = dst_offset0 + 2; - - pNew[dst_offset0] = static_cast<T>(index0); - pNew[dst_offset0 + 1] = static_cast<T>(index1); - } - else - { - uint dst_offset0 = offsets[c0]++; - uint dst_offset1 = offsets[c1]++; - - pNew[dst_offset0] = static_cast<T>(index0); - pNew[dst_offset1] = static_cast<T>(index1); - } - } - - if (num_indices & 1) - { - uint index = *p; - uint c = (RG_ETC1_GET_KEY_FROM_INDEX(index) >> pass_shift) & 0xFF; - - uint dst_offset = offsets[c]; - offsets[c] = dst_offset + 1; - - pNew[dst_offset] = static_cast<T>(index); - } - - T* t = pCur; - pCur = pNew; - pNew = t; - } - - return pCur; - } - -#undef RG_ETC1_GET_KEY -#undef RG_ETC1_GET_KEY_FROM_INDEX - - uint16 etc1_block::pack_color5(const color_quad_u8& color, bool scaled, uint bias) - { - return pack_color5(color.r, color.g, color.b, scaled, bias); - } - - uint16 etc1_block::pack_color5(uint r, uint g, uint b, bool scaled, uint bias) - { - if (scaled) - { - r = (r * 31U + bias) / 255U; - g = (g * 31U + bias) / 255U; - b = (b * 31U + bias) / 255U; - } - - r = rg_etc1::minimum(r, 31U); - g = rg_etc1::minimum(g, 31U); - b = rg_etc1::minimum(b, 31U); - - return static_cast<uint16>(b | (g << 5U) | (r << 10U)); - } - - color_quad_u8 etc1_block::unpack_color5(uint16 packed_color5, bool scaled, uint alpha) - { - uint b = packed_color5 & 31U; - uint g = (packed_color5 >> 5U) & 31U; - uint r = (packed_color5 >> 10U) & 31U; - - if (scaled) - { - b = (b << 3U) | (b >> 2U); - g = (g << 3U) | (g >> 2U); - r = (r << 3U) | (r >> 2U); - } - - return color_quad_u8(cNoClamp, r, g, b, rg_etc1::minimum(alpha, 255U)); - } - - void etc1_block::unpack_color5(uint& r, uint& g, uint& b, uint16 packed_color5, bool scaled) - { - color_quad_u8 c(unpack_color5(packed_color5, scaled, 0)); - r = c.r; - g = c.g; - b = c.b; - } - - bool etc1_block::unpack_color5(color_quad_u8& result, uint16 packed_color5, uint16 packed_delta3, bool scaled, uint alpha) - { - int dc_r, dc_g, dc_b; - unpack_delta3(dc_r, dc_g, dc_b, packed_delta3); - - int b = (packed_color5 & 31U) + dc_b; - int g = ((packed_color5 >> 5U) & 31U) + dc_g; - int r = ((packed_color5 >> 10U) & 31U) + dc_r; - - bool success = true; - if (static_cast<uint>(r | g | b) > 31U) - { - success = false; - r = rg_etc1::clamp<int>(r, 0, 31); - g = rg_etc1::clamp<int>(g, 0, 31); - b = rg_etc1::clamp<int>(b, 0, 31); - } - - if (scaled) - { - b = (b << 3U) | (b >> 2U); - g = (g << 3U) | (g >> 2U); - r = (r << 3U) | (r >> 2U); - } - - result.set_noclamp_rgba(r, g, b, rg_etc1::minimum(alpha, 255U)); - return success; - } - - bool etc1_block::unpack_color5(uint& r, uint& g, uint& b, uint16 packed_color5, uint16 packed_delta3, bool scaled, uint alpha) - { - color_quad_u8 result; - const bool success = unpack_color5(result, packed_color5, packed_delta3, scaled, alpha); - r = result.r; - g = result.g; - b = result.b; - return success; - } - - uint16 etc1_block::pack_delta3(int r, int g, int b) - { - RG_ETC1_ASSERT((r >= cETC1ColorDeltaMin) && (r <= cETC1ColorDeltaMax)); - RG_ETC1_ASSERT((g >= cETC1ColorDeltaMin) && (g <= cETC1ColorDeltaMax)); - RG_ETC1_ASSERT((b >= cETC1ColorDeltaMin) && (b <= cETC1ColorDeltaMax)); - if (r < 0) r += 8; - if (g < 0) g += 8; - if (b < 0) b += 8; - return static_cast<uint16>(b | (g << 3) | (r << 6)); - } - - void etc1_block::unpack_delta3(int& r, int& g, int& b, uint16 packed_delta3) - { - r = (packed_delta3 >> 6) & 7; - g = (packed_delta3 >> 3) & 7; - b = packed_delta3 & 7; - if (r >= 4) r -= 8; - if (g >= 4) g -= 8; - if (b >= 4) b -= 8; - } - - uint16 etc1_block::pack_color4(const color_quad_u8& color, bool scaled, uint bias) - { - return pack_color4(color.r, color.g, color.b, scaled, bias); - } - - uint16 etc1_block::pack_color4(uint r, uint g, uint b, bool scaled, uint bias) - { - if (scaled) - { - r = (r * 15U + bias) / 255U; - g = (g * 15U + bias) / 255U; - b = (b * 15U + bias) / 255U; - } - - r = rg_etc1::minimum(r, 15U); - g = rg_etc1::minimum(g, 15U); - b = rg_etc1::minimum(b, 15U); - - return static_cast<uint16>(b | (g << 4U) | (r << 8U)); - } - - color_quad_u8 etc1_block::unpack_color4(uint16 packed_color4, bool scaled, uint alpha) - { - uint b = packed_color4 & 15U; - uint g = (packed_color4 >> 4U) & 15U; - uint r = (packed_color4 >> 8U) & 15U; - - if (scaled) - { - b = (b << 4U) | b; - g = (g << 4U) | g; - r = (r << 4U) | r; - } - - return color_quad_u8(cNoClamp, r, g, b, rg_etc1::minimum(alpha, 255U)); - } - - void etc1_block::unpack_color4(uint& r, uint& g, uint& b, uint16 packed_color4, bool scaled) - { - color_quad_u8 c(unpack_color4(packed_color4, scaled, 0)); - r = c.r; - g = c.g; - b = c.b; - } - - void etc1_block::get_diff_subblock_colors(color_quad_u8* pDst, uint16 packed_color5, uint table_idx) - { - RG_ETC1_ASSERT(table_idx < cETC1IntenModifierValues); - const int *pInten_modifer_table = &g_etc1_inten_tables[table_idx][0]; - - uint r, g, b; - unpack_color5(r, g, b, packed_color5, true); - - const int ir = static_cast<int>(r), ig = static_cast<int>(g), ib = static_cast<int>(b); - - const int y0 = pInten_modifer_table[0]; - pDst[0].set(ir + y0, ig + y0, ib + y0); - - const int y1 = pInten_modifer_table[1]; - pDst[1].set(ir + y1, ig + y1, ib + y1); - - const int y2 = pInten_modifer_table[2]; - pDst[2].set(ir + y2, ig + y2, ib + y2); - - const int y3 = pInten_modifer_table[3]; - pDst[3].set(ir + y3, ig + y3, ib + y3); - } - - bool etc1_block::get_diff_subblock_colors(color_quad_u8* pDst, uint16 packed_color5, uint16 packed_delta3, uint table_idx) - { - RG_ETC1_ASSERT(table_idx < cETC1IntenModifierValues); - const int *pInten_modifer_table = &g_etc1_inten_tables[table_idx][0]; - - uint r, g, b; - bool success = unpack_color5(r, g, b, packed_color5, packed_delta3, true); - - const int ir = static_cast<int>(r), ig = static_cast<int>(g), ib = static_cast<int>(b); - - const int y0 = pInten_modifer_table[0]; - pDst[0].set(ir + y0, ig + y0, ib + y0); - - const int y1 = pInten_modifer_table[1]; - pDst[1].set(ir + y1, ig + y1, ib + y1); - - const int y2 = pInten_modifer_table[2]; - pDst[2].set(ir + y2, ig + y2, ib + y2); - - const int y3 = pInten_modifer_table[3]; - pDst[3].set(ir + y3, ig + y3, ib + y3); - - return success; - } - - void etc1_block::get_abs_subblock_colors(color_quad_u8* pDst, uint16 packed_color4, uint table_idx) - { - RG_ETC1_ASSERT(table_idx < cETC1IntenModifierValues); - const int *pInten_modifer_table = &g_etc1_inten_tables[table_idx][0]; - - uint r, g, b; - unpack_color4(r, g, b, packed_color4, true); - - const int ir = static_cast<int>(r), ig = static_cast<int>(g), ib = static_cast<int>(b); - - const int y0 = pInten_modifer_table[0]; - pDst[0].set(ir + y0, ig + y0, ib + y0); - - const int y1 = pInten_modifer_table[1]; - pDst[1].set(ir + y1, ig + y1, ib + y1); - - const int y2 = pInten_modifer_table[2]; - pDst[2].set(ir + y2, ig + y2, ib + y2); - - const int y3 = pInten_modifer_table[3]; - pDst[3].set(ir + y3, ig + y3, ib + y3); - } - - bool unpack_etc1_block(const void* pETC1_block, unsigned int* pDst_pixels_rgba, bool preserve_alpha) - { - color_quad_u8* pDst = reinterpret_cast<color_quad_u8*>(pDst_pixels_rgba); - const etc1_block& block = *static_cast<const etc1_block*>(pETC1_block); - - const bool diff_flag = block.get_diff_bit(); - const bool flip_flag = block.get_flip_bit(); - const uint table_index0 = block.get_inten_table(0); - const uint table_index1 = block.get_inten_table(1); - - color_quad_u8 subblock_colors0[4]; - color_quad_u8 subblock_colors1[4]; - bool success = true; - - if (diff_flag) - { - const uint16 base_color5 = block.get_base5_color(); - const uint16 delta_color3 = block.get_delta3_color(); - etc1_block::get_diff_subblock_colors(subblock_colors0, base_color5, table_index0); - - if (!etc1_block::get_diff_subblock_colors(subblock_colors1, base_color5, delta_color3, table_index1)) - success = false; - } - else - { - const uint16 base_color4_0 = block.get_base4_color(0); - etc1_block::get_abs_subblock_colors(subblock_colors0, base_color4_0, table_index0); - - const uint16 base_color4_1 = block.get_base4_color(1); - etc1_block::get_abs_subblock_colors(subblock_colors1, base_color4_1, table_index1); - } - - if (preserve_alpha) - { - if (flip_flag) - { - for (uint y = 0; y < 2; y++) - { - pDst[0].set_rgb(subblock_colors0[block.get_selector(0, y)]); - pDst[1].set_rgb(subblock_colors0[block.get_selector(1, y)]); - pDst[2].set_rgb(subblock_colors0[block.get_selector(2, y)]); - pDst[3].set_rgb(subblock_colors0[block.get_selector(3, y)]); - pDst += 4; - } - - for (uint y = 2; y < 4; y++) - { - pDst[0].set_rgb(subblock_colors1[block.get_selector(0, y)]); - pDst[1].set_rgb(subblock_colors1[block.get_selector(1, y)]); - pDst[2].set_rgb(subblock_colors1[block.get_selector(2, y)]); - pDst[3].set_rgb(subblock_colors1[block.get_selector(3, y)]); - pDst += 4; - } - } - else - { - for (uint y = 0; y < 4; y++) - { - pDst[0].set_rgb(subblock_colors0[block.get_selector(0, y)]); - pDst[1].set_rgb(subblock_colors0[block.get_selector(1, y)]); - pDst[2].set_rgb(subblock_colors1[block.get_selector(2, y)]); - pDst[3].set_rgb(subblock_colors1[block.get_selector(3, y)]); - pDst += 4; - } - } - } - else - { - if (flip_flag) - { - // 0000 - // 0000 - // 1111 - // 1111 - for (uint y = 0; y < 2; y++) - { - pDst[0] = subblock_colors0[block.get_selector(0, y)]; - pDst[1] = subblock_colors0[block.get_selector(1, y)]; - pDst[2] = subblock_colors0[block.get_selector(2, y)]; - pDst[3] = subblock_colors0[block.get_selector(3, y)]; - pDst += 4; - } - - for (uint y = 2; y < 4; y++) - { - pDst[0] = subblock_colors1[block.get_selector(0, y)]; - pDst[1] = subblock_colors1[block.get_selector(1, y)]; - pDst[2] = subblock_colors1[block.get_selector(2, y)]; - pDst[3] = subblock_colors1[block.get_selector(3, y)]; - pDst += 4; - } - } - else - { - // 0011 - // 0011 - // 0011 - // 0011 - for (uint y = 0; y < 4; y++) - { - pDst[0] = subblock_colors0[block.get_selector(0, y)]; - pDst[1] = subblock_colors0[block.get_selector(1, y)]; - pDst[2] = subblock_colors1[block.get_selector(2, y)]; - pDst[3] = subblock_colors1[block.get_selector(3, y)]; - pDst += 4; - } - } - } - - return success; - } - - struct etc1_solution_coordinates - { - inline etc1_solution_coordinates() : - m_unscaled_color(0, 0, 0, 0), - m_inten_table(0), - m_color4(false) - { - } - - inline etc1_solution_coordinates(uint r, uint g, uint b, uint inten_table, bool color4) : - m_unscaled_color(r, g, b, 255), - m_inten_table(inten_table), - m_color4(color4) - { - } - - inline etc1_solution_coordinates(const color_quad_u8& c, uint inten_table, bool color4) : - m_unscaled_color(c), - m_inten_table(inten_table), - m_color4(color4) - { - } - - inline etc1_solution_coordinates(const etc1_solution_coordinates& other) - { - *this = other; - } - - inline etc1_solution_coordinates& operator= (const etc1_solution_coordinates& rhs) - { - m_unscaled_color = rhs.m_unscaled_color; - m_inten_table = rhs.m_inten_table; - m_color4 = rhs.m_color4; - return *this; - } - - inline void clear() - { - m_unscaled_color.clear(); - m_inten_table = 0; - m_color4 = false; - } - - inline color_quad_u8 get_scaled_color() const - { - int br, bg, bb; - if (m_color4) - { - br = m_unscaled_color.r | (m_unscaled_color.r << 4); - bg = m_unscaled_color.g | (m_unscaled_color.g << 4); - bb = m_unscaled_color.b | (m_unscaled_color.b << 4); - } - else - { - br = (m_unscaled_color.r >> 2) | (m_unscaled_color.r << 3); - bg = (m_unscaled_color.g >> 2) | (m_unscaled_color.g << 3); - bb = (m_unscaled_color.b >> 2) | (m_unscaled_color.b << 3); - } - return color_quad_u8(br, bg, bb); - } - - inline void get_block_colors(color_quad_u8* pBlock_colors) - { - int br, bg, bb; - if (m_color4) - { - br = m_unscaled_color.r | (m_unscaled_color.r << 4); - bg = m_unscaled_color.g | (m_unscaled_color.g << 4); - bb = m_unscaled_color.b | (m_unscaled_color.b << 4); - } - else - { - br = (m_unscaled_color.r >> 2) | (m_unscaled_color.r << 3); - bg = (m_unscaled_color.g >> 2) | (m_unscaled_color.g << 3); - bb = (m_unscaled_color.b >> 2) | (m_unscaled_color.b << 3); - } - const int* pInten_table = g_etc1_inten_tables[m_inten_table]; - pBlock_colors[0].set(br + pInten_table[0], bg + pInten_table[0], bb + pInten_table[0]); - pBlock_colors[1].set(br + pInten_table[1], bg + pInten_table[1], bb + pInten_table[1]); - pBlock_colors[2].set(br + pInten_table[2], bg + pInten_table[2], bb + pInten_table[2]); - pBlock_colors[3].set(br + pInten_table[3], bg + pInten_table[3], bb + pInten_table[3]); - } - - color_quad_u8 m_unscaled_color; - uint m_inten_table; - bool m_color4; - }; - - class etc1_optimizer - { - etc1_optimizer(const etc1_optimizer&); - etc1_optimizer& operator= (const etc1_optimizer&); - - public: - etc1_optimizer() - { - clear(); - } - - void clear() - { - m_pParams = NULL; - m_pResult = NULL; - m_pSorted_luma = NULL; - m_pSorted_luma_indices = NULL; - } - - struct params : etc1_pack_params - { - params() - { - clear(); - } - - params(const etc1_pack_params& base_params) : - etc1_pack_params(base_params) - { - clear_optimizer_params(); - } - - void clear() - { - etc1_pack_params::clear(); - clear_optimizer_params(); - } - - void clear_optimizer_params() - { - m_num_src_pixels = 0; - m_pSrc_pixels = 0; - - m_use_color4 = false; - static const int s_default_scan_delta[] = { 0 }; - m_pScan_deltas = s_default_scan_delta; - m_scan_delta_size = 1; - - m_base_color5.clear(); - m_constrain_against_base_color5 = false; - } - - uint m_num_src_pixels; - const color_quad_u8* m_pSrc_pixels; - - bool m_use_color4; - const int* m_pScan_deltas; - uint m_scan_delta_size; - - color_quad_u8 m_base_color5; - bool m_constrain_against_base_color5; - }; - - struct results - { - uint64 m_error; - color_quad_u8 m_block_color_unscaled; - uint m_block_inten_table; - uint m_n; - uint8* m_pSelectors; - bool m_block_color4; - - inline results& operator= (const results& rhs) - { - m_block_color_unscaled = rhs.m_block_color_unscaled; - m_block_color4 = rhs.m_block_color4; - m_block_inten_table = rhs.m_block_inten_table; - m_error = rhs.m_error; - RG_ETC1_ASSERT(m_n == rhs.m_n); - memcpy(m_pSelectors, rhs.m_pSelectors, rhs.m_n); - return *this; - } - }; - - void init(const params& params, results& result); - bool compute(); - - private: - struct potential_solution - { - potential_solution() : m_coords(), m_error(cUINT64_MAX), m_valid(false) - { - } - - etc1_solution_coordinates m_coords; - uint8 m_selectors[8]; - uint64 m_error; - bool m_valid; - - void clear() - { - m_coords.clear(); - m_error = cUINT64_MAX; - m_valid = false; - } - }; - - const params* m_pParams; - results* m_pResult; - - int m_limit; - - vec3F m_avg_color; - int m_br, m_bg, m_bb; - uint16 m_luma[8]; - uint32 m_sorted_luma[2][8]; - const uint32* m_pSorted_luma_indices; - uint32* m_pSorted_luma; - - uint8 m_selectors[8]; - uint8 m_best_selectors[8]; - - potential_solution m_best_solution; - potential_solution m_trial_solution; - uint8 m_temp_selectors[8]; - - bool evaluate_solution(const etc1_solution_coordinates& coords, potential_solution& trial_solution, potential_solution* pBest_solution); - bool evaluate_solution_fast(const etc1_solution_coordinates& coords, potential_solution& trial_solution, potential_solution* pBest_solution); - }; - - bool etc1_optimizer::compute() - { - const uint n = m_pParams->m_num_src_pixels; - const int scan_delta_size = m_pParams->m_scan_delta_size; - - // Scan through a subset of the 3D lattice centered around the avg block color trying each 3D (555 or 444) lattice point as a potential block color. - // Each time a better solution is found try to refine the current solution's block color based of the current selectors and intensity table index. - for (int zdi = 0; zdi < scan_delta_size; zdi++) - { - const int zd = m_pParams->m_pScan_deltas[zdi]; - const int mbb = m_bb + zd; - if (mbb < 0) continue; else if (mbb > m_limit) break; - - for (int ydi = 0; ydi < scan_delta_size; ydi++) - { - const int yd = m_pParams->m_pScan_deltas[ydi]; - const int mbg = m_bg + yd; - if (mbg < 0) continue; else if (mbg > m_limit) break; - - for (int xdi = 0; xdi < scan_delta_size; xdi++) - { - const int xd = m_pParams->m_pScan_deltas[xdi]; - const int mbr = m_br + xd; - if (mbr < 0) continue; else if (mbr > m_limit) break; - - etc1_solution_coordinates coords(mbr, mbg, mbb, 0, m_pParams->m_use_color4); - if (m_pParams->m_quality == cHighQuality) - { - if (!evaluate_solution(coords, m_trial_solution, &m_best_solution)) - continue; - } - else - { - if (!evaluate_solution_fast(coords, m_trial_solution, &m_best_solution)) - continue; - } - - // Now we have the input block, the avg. color of the input pixels, a set of trial selector indices, and the block color+intensity index. - // Now, for each component, attempt to refine the current solution by solving a simple linear equation. For example, for 4 colors: - // The goal is: - // pixel0 - (block_color+inten_table[selector0]) + pixel1 - (block_color+inten_table[selector1]) + pixel2 - (block_color+inten_table[selector2]) + pixel3 - (block_color+inten_table[selector3]) = 0 - // Rearranging this: - // (pixel0 + pixel1 + pixel2 + pixel3) - (block_color+inten_table[selector0]) - (block_color+inten_table[selector1]) - (block_color+inten_table[selector2]) - (block_color+inten_table[selector3]) = 0 - // (pixel0 + pixel1 + pixel2 + pixel3) - block_color - inten_table[selector0] - block_color-inten_table[selector1] - block_color-inten_table[selector2] - block_color-inten_table[selector3] = 0 - // (pixel0 + pixel1 + pixel2 + pixel3) - 4*block_color - inten_table[selector0] - inten_table[selector1] - inten_table[selector2] - inten_table[selector3] = 0 - // (pixel0 + pixel1 + pixel2 + pixel3) - 4*block_color - (inten_table[selector0] + inten_table[selector1] + inten_table[selector2] + inten_table[selector3]) = 0 - // (pixel0 + pixel1 + pixel2 + pixel3)/4 - block_color - (inten_table[selector0] + inten_table[selector1] + inten_table[selector2] + inten_table[selector3])/4 = 0 - // block_color = (pixel0 + pixel1 + pixel2 + pixel3)/4 - (inten_table[selector0] + inten_table[selector1] + inten_table[selector2] + inten_table[selector3])/4 - // So what this means: - // optimal_block_color = avg_input - avg_inten_delta - // So the optimal block color can be computed by taking the average block color and subtracting the current average of the intensity delta. - // Unfortunately, optimal_block_color must then be quantized to 555 or 444 so it's not always possible to improve matters using this formula. - // Also, the above formula is for unclamped intensity deltas. The actual implementation takes into account clamping. - - const uint max_refinement_trials = (m_pParams->m_quality == cLowQuality) ? 2 : (((xd | yd | zd) == 0) ? 4 : 2); - for (uint refinement_trial = 0; refinement_trial < max_refinement_trials; refinement_trial++) - { - const uint8* pSelectors = m_best_solution.m_selectors; - const int* pInten_table = g_etc1_inten_tables[m_best_solution.m_coords.m_inten_table]; - - int delta_sum_r = 0, delta_sum_g = 0, delta_sum_b = 0; - const color_quad_u8 base_color(m_best_solution.m_coords.get_scaled_color()); - for (uint r = 0; r < n; r++) - { - const uint s = *pSelectors++; - const int yd = pInten_table[s]; - // Compute actual delta being applied to each pixel, taking into account clamping. - delta_sum_r += rg_etc1::clamp<int>(base_color.r + yd, 0, 255) - base_color.r; - delta_sum_g += rg_etc1::clamp<int>(base_color.g + yd, 0, 255) - base_color.g; - delta_sum_b += rg_etc1::clamp<int>(base_color.b + yd, 0, 255) - base_color.b; - } - if ((!delta_sum_r) && (!delta_sum_g) && (!delta_sum_b)) - break; - const float avg_delta_r_f = static_cast<float>(delta_sum_r) / n; - const float avg_delta_g_f = static_cast<float>(delta_sum_g) / n; - const float avg_delta_b_f = static_cast<float>(delta_sum_b) / n; - const int br1 = rg_etc1::clamp<int>(static_cast<uint>((m_avg_color[0] - avg_delta_r_f) * m_limit / 255.0f + .5f), 0, m_limit); - const int bg1 = rg_etc1::clamp<int>(static_cast<uint>((m_avg_color[1] - avg_delta_g_f) * m_limit / 255.0f + .5f), 0, m_limit); - const int bb1 = rg_etc1::clamp<int>(static_cast<uint>((m_avg_color[2] - avg_delta_b_f) * m_limit / 255.0f + .5f), 0, m_limit); - - bool skip = false; - - if ((mbr == br1) && (mbg == bg1) && (mbb == bb1)) - skip = true; - else if ((br1 == m_best_solution.m_coords.m_unscaled_color.r) && (bg1 == m_best_solution.m_coords.m_unscaled_color.g) && (bb1 == m_best_solution.m_coords.m_unscaled_color.b)) - skip = true; - else if ((m_br == br1) && (m_bg == bg1) && (m_bb == bb1)) - skip = true; - - if (skip) - break; - - etc1_solution_coordinates coords1(br1, bg1, bb1, 0, m_pParams->m_use_color4); - if (m_pParams->m_quality == cHighQuality) - { - if (!evaluate_solution(coords1, m_trial_solution, &m_best_solution)) - break; - } - else - { - if (!evaluate_solution_fast(coords1, m_trial_solution, &m_best_solution)) - break; - } - - } // refinement_trial - - } // xdi - } // ydi - } // zdi - - if (!m_best_solution.m_valid) - { - m_pResult->m_error = cUINT32_MAX; - return false; - } - - const uint8* pSelectors = m_best_solution.m_selectors; - -#ifdef RG_ETC1_BUILD_DEBUG - { - color_quad_u8 block_colors[4]; - m_best_solution.m_coords.get_block_colors(block_colors); - - const color_quad_u8* pSrc_pixels = m_pParams->m_pSrc_pixels; - uint64 actual_error = 0; - for (uint i = 0; i < n; i++) - actual_error += pSrc_pixels[i].squared_distance_rgb(block_colors[pSelectors[i]]); - - RG_ETC1_ASSERT(actual_error == m_best_solution.m_error); - } -#endif - - m_pResult->m_error = m_best_solution.m_error; - - m_pResult->m_block_color_unscaled = m_best_solution.m_coords.m_unscaled_color; - m_pResult->m_block_color4 = m_best_solution.m_coords.m_color4; - - m_pResult->m_block_inten_table = m_best_solution.m_coords.m_inten_table; - memcpy(m_pResult->m_pSelectors, pSelectors, n); - m_pResult->m_n = n; - - return true; - } - - void etc1_optimizer::init(const params& p, results& r) - { - // This version is hardcoded for 8 pixel subblocks. - RG_ETC1_ASSERT(p.m_num_src_pixels == 8); - - m_pParams = &p; - m_pResult = &r; - - const uint n = 8; - - m_limit = m_pParams->m_use_color4 ? 15 : 31; - - vec3F avg_color(0.0f); - - for (uint i = 0; i < n; i++) - { - const color_quad_u8& c = m_pParams->m_pSrc_pixels[i]; - const vec3F fc(c.r, c.g, c.b); - - avg_color += fc; - - m_luma[i] = static_cast<uint16>(c.r + c.g + c.b); - m_sorted_luma[0][i] = i; - } - avg_color *= (1.0f / static_cast<float>(n)); - m_avg_color = avg_color; - - m_br = rg_etc1::clamp<int>(static_cast<uint>(m_avg_color[0] * m_limit / 255.0f + .5f), 0, m_limit); - m_bg = rg_etc1::clamp<int>(static_cast<uint>(m_avg_color[1] * m_limit / 255.0f + .5f), 0, m_limit); - m_bb = rg_etc1::clamp<int>(static_cast<uint>(m_avg_color[2] * m_limit / 255.0f + .5f), 0, m_limit); - - if (m_pParams->m_quality <= cMediumQuality) - { - m_pSorted_luma_indices = indirect_radix_sort(n, m_sorted_luma[0], m_sorted_luma[1], m_luma, 0, sizeof(m_luma[0]), false); - m_pSorted_luma = m_sorted_luma[0]; - if (m_pSorted_luma_indices == m_sorted_luma[0]) - m_pSorted_luma = m_sorted_luma[1]; - - for (uint i = 0; i < n; i++) - m_pSorted_luma[i] = m_luma[m_pSorted_luma_indices[i]]; - } - - m_best_solution.m_coords.clear(); - m_best_solution.m_valid = false; - m_best_solution.m_error = cUINT64_MAX; - } - - bool etc1_optimizer::evaluate_solution(const etc1_solution_coordinates& coords, potential_solution& trial_solution, potential_solution* pBest_solution) - { - trial_solution.m_valid = false; - - if (m_pParams->m_constrain_against_base_color5) - { - const int dr = coords.m_unscaled_color.r - m_pParams->m_base_color5.r; - const int dg = coords.m_unscaled_color.g - m_pParams->m_base_color5.g; - const int db = coords.m_unscaled_color.b - m_pParams->m_base_color5.b; - - if ((rg_etc1::minimum(dr, dg, db) < cETC1ColorDeltaMin) || (rg_etc1::maximum(dr, dg, db) > cETC1ColorDeltaMax)) - return false; - } - - const color_quad_u8 base_color(coords.get_scaled_color()); - - const uint n = 8; - - trial_solution.m_error = cUINT64_MAX; - - for (uint inten_table = 0; inten_table < cETC1IntenModifierValues; inten_table++) - { - const int* pInten_table = g_etc1_inten_tables[inten_table]; - - color_quad_u8 block_colors[4]; - for (uint s = 0; s < 4; s++) - { - const int yd = pInten_table[s]; - block_colors[s].set(base_color.r + yd, base_color.g + yd, base_color.b + yd, 0); - } - - uint64 total_error = 0; - - const color_quad_u8* pSrc_pixels = m_pParams->m_pSrc_pixels; - for (uint c = 0; c < n; c++) - { - const color_quad_u8& src_pixel = *pSrc_pixels++; - - uint best_selector_index = 0; - uint best_error = rg_etc1::square(src_pixel.r - block_colors[0].r) + rg_etc1::square(src_pixel.g - block_colors[0].g) + rg_etc1::square(src_pixel.b - block_colors[0].b); - - uint trial_error = rg_etc1::square(src_pixel.r - block_colors[1].r) + rg_etc1::square(src_pixel.g - block_colors[1].g) + rg_etc1::square(src_pixel.b - block_colors[1].b); - if (trial_error < best_error) - { - best_error = trial_error; - best_selector_index = 1; - } - - trial_error = rg_etc1::square(src_pixel.r - block_colors[2].r) + rg_etc1::square(src_pixel.g - block_colors[2].g) + rg_etc1::square(src_pixel.b - block_colors[2].b); - if (trial_error < best_error) - { - best_error = trial_error; - best_selector_index = 2; - } - - trial_error = rg_etc1::square(src_pixel.r - block_colors[3].r) + rg_etc1::square(src_pixel.g - block_colors[3].g) + rg_etc1::square(src_pixel.b - block_colors[3].b); - if (trial_error < best_error) - { - best_error = trial_error; - best_selector_index = 3; - } - - m_temp_selectors[c] = static_cast<uint8>(best_selector_index); - - total_error += best_error; - if (total_error >= trial_solution.m_error) - break; - } - - if (total_error < trial_solution.m_error) - { - trial_solution.m_error = total_error; - trial_solution.m_coords.m_inten_table = inten_table; - memcpy(trial_solution.m_selectors, m_temp_selectors, 8); - trial_solution.m_valid = true; - } - } - trial_solution.m_coords.m_unscaled_color = coords.m_unscaled_color; - trial_solution.m_coords.m_color4 = m_pParams->m_use_color4; - - bool success = false; - if (pBest_solution) - { - if (trial_solution.m_error < pBest_solution->m_error) - { - *pBest_solution = trial_solution; - success = true; - } - } - - return success; - } - - bool etc1_optimizer::evaluate_solution_fast(const etc1_solution_coordinates& coords, potential_solution& trial_solution, potential_solution* pBest_solution) - { - if (m_pParams->m_constrain_against_base_color5) - { - const int dr = coords.m_unscaled_color.r - m_pParams->m_base_color5.r; - const int dg = coords.m_unscaled_color.g - m_pParams->m_base_color5.g; - const int db = coords.m_unscaled_color.b - m_pParams->m_base_color5.b; - - if ((rg_etc1::minimum(dr, dg, db) < cETC1ColorDeltaMin) || (rg_etc1::maximum(dr, dg, db) > cETC1ColorDeltaMax)) - { - trial_solution.m_valid = false; - return false; - } - } - - const color_quad_u8 base_color(coords.get_scaled_color()); - - const uint n = 8; - - trial_solution.m_error = cUINT64_MAX; - - for (int inten_table = cETC1IntenModifierValues - 1; inten_table >= 0; --inten_table) - { - const int* pInten_table = g_etc1_inten_tables[inten_table]; - - uint block_inten[4]; - color_quad_u8 block_colors[4]; - for (uint s = 0; s < 4; s++) - { - const int yd = pInten_table[s]; - color_quad_u8 block_color(base_color.r + yd, base_color.g + yd, base_color.b + yd, 0); - block_colors[s] = block_color; - block_inten[s] = block_color.r + block_color.g + block_color.b; - } - - // evaluate_solution_fast() enforces/assumesd a total ordering of the input colors along the intensity (1,1,1) axis to more quickly classify the inputs to selectors. - // The inputs colors have been presorted along the projection onto this axis, and ETC1 block colors are always ordered along the intensity axis, so this classification is fast. - // 0 1 2 3 - // 01 12 23 - const uint block_inten_midpoints[3] = { block_inten[0] + block_inten[1], block_inten[1] + block_inten[2], block_inten[2] + block_inten[3] }; - - uint64 total_error = 0; - const color_quad_u8* pSrc_pixels = m_pParams->m_pSrc_pixels; - if ((m_pSorted_luma[n - 1] * 2) < block_inten_midpoints[0]) - { - if (block_inten[0] > m_pSorted_luma[n - 1]) - { - const uint min_error = labs(block_inten[0] - m_pSorted_luma[n - 1]); - if (min_error >= trial_solution.m_error) - continue; - } - - memset(&m_temp_selectors[0], 0, n); - - for (uint c = 0; c < n; c++) - total_error += block_colors[0].squared_distance_rgb(pSrc_pixels[c]); - } - else if ((m_pSorted_luma[0] * 2) >= block_inten_midpoints[2]) - { - if (m_pSorted_luma[0] > block_inten[3]) - { - const uint min_error = labs(m_pSorted_luma[0] - block_inten[3]); - if (min_error >= trial_solution.m_error) - continue; - } - - memset(&m_temp_selectors[0], 3, n); - - for (uint c = 0; c < n; c++) - total_error += block_colors[3].squared_distance_rgb(pSrc_pixels[c]); - } - else - { - uint cur_selector = 0, c; - for (c = 0; c < n; c++) - { - const uint y = m_pSorted_luma[c]; - while ((y * 2) >= block_inten_midpoints[cur_selector]) - if (++cur_selector > 2) - goto done; - const uint sorted_pixel_index = m_pSorted_luma_indices[c]; - m_temp_selectors[sorted_pixel_index] = static_cast<uint8>(cur_selector); - total_error += block_colors[cur_selector].squared_distance_rgb(pSrc_pixels[sorted_pixel_index]); - } -done: - while (c < n) - { - const uint sorted_pixel_index = m_pSorted_luma_indices[c]; - m_temp_selectors[sorted_pixel_index] = 3; - total_error += block_colors[3].squared_distance_rgb(pSrc_pixels[sorted_pixel_index]); - ++c; - } - } - - if (total_error < trial_solution.m_error) - { - trial_solution.m_error = total_error; - trial_solution.m_coords.m_inten_table = inten_table; - memcpy(trial_solution.m_selectors, m_temp_selectors, n); - trial_solution.m_valid = true; - if (!total_error) - break; - } - } - trial_solution.m_coords.m_unscaled_color = coords.m_unscaled_color; - trial_solution.m_coords.m_color4 = m_pParams->m_use_color4; - - bool success = false; - if (pBest_solution) - { - if (trial_solution.m_error < pBest_solution->m_error) - { - *pBest_solution = trial_solution; - success = true; - } - } - - return success; - } - - static uint etc1_decode_value(uint diff, uint inten, uint selector, uint packed_c) - { - const uint limit = diff ? 32 : 16; limit; - RG_ETC1_ASSERT((diff < 2) && (inten < 8) && (selector < 4) && (packed_c < limit)); - int c; - if (diff) - c = (packed_c >> 2) | (packed_c << 3); - else - c = packed_c | (packed_c << 4); - c += g_etc1_inten_tables[inten][selector]; - c = rg_etc1::clamp<int>(c, 0, 255); - return c; - } - - static inline int mul_8bit(int a, int b) { int t = a*b + 128; return (t + (t >> 8)) >> 8; } - - void pack_etc1_block_init() - { - for (uint diff = 0; diff < 2; diff++) - { - const uint limit = diff ? 32 : 16; - - for (uint inten = 0; inten < 8; inten++) - { - for (uint selector = 0; selector < 4; selector++) - { - const uint inverse_table_index = diff + (inten << 1) + (selector << 4); - for (uint color = 0; color < 256; color++) - { - uint best_error = cUINT32_MAX, best_packed_c = 0; - for (uint packed_c = 0; packed_c < limit; packed_c++) - { - int v = etc1_decode_value(diff, inten, selector, packed_c); - uint err = labs(v - static_cast<int>(color)); - if (err < best_error) - { - best_error = err; - best_packed_c = packed_c; - if (!best_error) - break; - } - } - RG_ETC1_ASSERT(best_error <= 255); - g_etc1_inverse_lookup[inverse_table_index][color] = static_cast<uint16>(best_packed_c | (best_error << 8)); - } - } - } - } - - uint expand5[32]; - for(int i = 0; i < 32; i++) - expand5[i] = (i << 3) | (i >> 2); - - for(int i = 0; i < 256 + 16; i++) - { - int v = clamp<int>(i - 8, 0, 255); - g_quant5_tab[i] = static_cast<uint8>(expand5[mul_8bit(v,31)]); - } - } - - // Packs solid color blocks efficiently using a set of small precomputed tables. - // For random 888 inputs, MSE results are better than Erricson's ETC1 packer in "slow" mode ~9.5% of the time, is slightly worse only ~.01% of the time, and is equal the rest of the time. - static uint64 pack_etc1_block_solid_color(etc1_block& block, const uint8* pColor, etc1_pack_params& pack_params) - { - pack_params; - RG_ETC1_ASSERT(g_etc1_inverse_lookup[0][255]); - - static uint s_next_comp[4] = { 1, 2, 0, 1 }; - - uint best_error = cUINT32_MAX, best_i = 0; - int best_x = 0, best_packed_c1 = 0, best_packed_c2 = 0; - - // For each possible 8-bit value, there is a precomputed list of diff/inten/selector configurations that allow that 8-bit value to be encoded with no error. - for (uint i = 0; i < 3; i++) - { - const uint c1 = pColor[s_next_comp[i]], c2 = pColor[s_next_comp[i + 1]]; - - const int delta_range = 1; - for (int delta = -delta_range; delta <= delta_range; delta++) - { - const int c_plus_delta = rg_etc1::clamp<int>(pColor[i] + delta, 0, 255); - - const uint16* pTable; - if (!c_plus_delta) - pTable = g_color8_to_etc_block_config_0_255[0]; - else if (c_plus_delta == 255) - pTable = g_color8_to_etc_block_config_0_255[1]; - else - pTable = g_color8_to_etc_block_config_1_to_254[c_plus_delta - 1]; - - do - { - const uint x = *pTable++; - -#ifdef RG_ETC1_BUILD_DEBUG - const uint diff = x & 1; - const uint inten = (x >> 1) & 7; - const uint selector = (x >> 4) & 3; - const uint p0 = (x >> 8) & 255; - RG_ETC1_ASSERT(etc1_decode_value(diff, inten, selector, p0) == (uint)c_plus_delta); -#endif - - const uint16* pInverse_table = g_etc1_inverse_lookup[x & 0xFF]; - uint16 p1 = pInverse_table[c1]; - uint16 p2 = pInverse_table[c2]; - const uint trial_error = rg_etc1::square(c_plus_delta - pColor[i]) + rg_etc1::square(p1 >> 8) + rg_etc1::square(p2 >> 8); - if (trial_error < best_error) - { - best_error = trial_error; - best_x = x; - best_packed_c1 = p1 & 0xFF; - best_packed_c2 = p2 & 0xFF; - best_i = i; - if (!best_error) - goto found_perfect_match; - } - } while (*pTable != 0xFFFF); - } - } -found_perfect_match: - - const uint diff = best_x & 1; - const uint inten = (best_x >> 1) & 7; - - block.m_bytes[3] = static_cast<uint8>(((inten | (inten << 3)) << 2) | (diff << 1)); - - const uint etc1_selector = g_selector_index_to_etc1[(best_x >> 4) & 3]; - *reinterpret_cast<uint16*>(&block.m_bytes[4]) = (etc1_selector & 2) ? 0xFFFF : 0; - *reinterpret_cast<uint16*>(&block.m_bytes[6]) = (etc1_selector & 1) ? 0xFFFF : 0; - - const uint best_packed_c0 = (best_x >> 8) & 255; - if (diff) - { - block.m_bytes[best_i] = static_cast<uint8>(best_packed_c0 << 3); - block.m_bytes[s_next_comp[best_i]] = static_cast<uint8>(best_packed_c1 << 3); - block.m_bytes[s_next_comp[best_i+1]] = static_cast<uint8>(best_packed_c2 << 3); - } - else - { - block.m_bytes[best_i] = static_cast<uint8>(best_packed_c0 | (best_packed_c0 << 4)); - block.m_bytes[s_next_comp[best_i]] = static_cast<uint8>(best_packed_c1 | (best_packed_c1 << 4)); - block.m_bytes[s_next_comp[best_i+1]] = static_cast<uint8>(best_packed_c2 | (best_packed_c2 << 4)); - } - - return best_error; - } - - static uint pack_etc1_block_solid_color_constrained( - etc1_optimizer::results& results, - uint num_colors, const uint8* pColor, - etc1_pack_params& pack_params, - bool use_diff, - const color_quad_u8* pBase_color5_unscaled) - { - RG_ETC1_ASSERT(g_etc1_inverse_lookup[0][255]); - - pack_params; - static uint s_next_comp[4] = { 1, 2, 0, 1 }; - - uint best_error = cUINT32_MAX, best_i = 0; - int best_x = 0, best_packed_c1 = 0, best_packed_c2 = 0; - - // For each possible 8-bit value, there is a precomputed list of diff/inten/selector configurations that allow that 8-bit value to be encoded with no error. - for (uint i = 0; i < 3; i++) - { - const uint c1 = pColor[s_next_comp[i]], c2 = pColor[s_next_comp[i + 1]]; - - const int delta_range = 1; - for (int delta = -delta_range; delta <= delta_range; delta++) - { - const int c_plus_delta = rg_etc1::clamp<int>(pColor[i] + delta, 0, 255); - - const uint16* pTable; - if (!c_plus_delta) - pTable = g_color8_to_etc_block_config_0_255[0]; - else if (c_plus_delta == 255) - pTable = g_color8_to_etc_block_config_0_255[1]; - else - pTable = g_color8_to_etc_block_config_1_to_254[c_plus_delta - 1]; - - do - { - const uint x = *pTable++; - const uint diff = x & 1; - if (static_cast<uint>(use_diff) != diff) - { - if (*pTable == 0xFFFF) - break; - continue; - } - - if ((diff) && (pBase_color5_unscaled)) - { - const int p0 = (x >> 8) & 255; - int delta = p0 - static_cast<int>(pBase_color5_unscaled->c[i]); - if ((delta < cETC1ColorDeltaMin) || (delta > cETC1ColorDeltaMax)) - { - if (*pTable == 0xFFFF) - break; - continue; - } - } - -#ifdef RG_ETC1_BUILD_DEBUG - { - const uint inten = (x >> 1) & 7; - const uint selector = (x >> 4) & 3; - const uint p0 = (x >> 8) & 255; - RG_ETC1_ASSERT(etc1_decode_value(diff, inten, selector, p0) == (uint)c_plus_delta); - } -#endif - - const uint16* pInverse_table = g_etc1_inverse_lookup[x & 0xFF]; - uint16 p1 = pInverse_table[c1]; - uint16 p2 = pInverse_table[c2]; - - if ((diff) && (pBase_color5_unscaled)) - { - int delta1 = (p1 & 0xFF) - static_cast<int>(pBase_color5_unscaled->c[s_next_comp[i]]); - int delta2 = (p2 & 0xFF) - static_cast<int>(pBase_color5_unscaled->c[s_next_comp[i + 1]]); - if ((delta1 < cETC1ColorDeltaMin) || (delta1 > cETC1ColorDeltaMax) || (delta2 < cETC1ColorDeltaMin) || (delta2 > cETC1ColorDeltaMax)) - { - if (*pTable == 0xFFFF) - break; - continue; - } - } - - const uint trial_error = rg_etc1::square(c_plus_delta - pColor[i]) + rg_etc1::square(p1 >> 8) + rg_etc1::square(p2 >> 8); - if (trial_error < best_error) - { - best_error = trial_error; - best_x = x; - best_packed_c1 = p1 & 0xFF; - best_packed_c2 = p2 & 0xFF; - best_i = i; - if (!best_error) - goto found_perfect_match; - } - } while (*pTable != 0xFFFF); - } - } -found_perfect_match: - - if (best_error == cUINT32_MAX) - return best_error; - - best_error *= num_colors; - - results.m_n = num_colors; - results.m_block_color4 = !(best_x & 1); - results.m_block_inten_table = (best_x >> 1) & 7; - memset(results.m_pSelectors, (best_x >> 4) & 3, num_colors); - - const uint best_packed_c0 = (best_x >> 8) & 255; - results.m_block_color_unscaled[best_i] = static_cast<uint8>(best_packed_c0); - results.m_block_color_unscaled[s_next_comp[best_i]] = static_cast<uint8>(best_packed_c1); - results.m_block_color_unscaled[s_next_comp[best_i + 1]] = static_cast<uint8>(best_packed_c2); - results.m_error = best_error; - - return best_error; - } - - // Function originally from RYG's public domain real-time DXT1 compressor, modified for 555. - static void dither_block_555(color_quad_u8* dest, const color_quad_u8* block) - { - int err[8],*ep1 = err,*ep2 = err+4; - uint8 *quant = g_quant5_tab+8; - - memset(dest, 0xFF, sizeof(color_quad_u8)*16); - - // process channels seperately - for(int ch=0;ch<3;ch++) - { - uint8* bp = (uint8*)block; - uint8* dp = (uint8*)dest; - - bp += ch; dp += ch; - - memset(err,0, sizeof(err)); - for(int y = 0; y < 4; y++) - { - // pixel 0 - dp[ 0] = quant[bp[ 0] + ((3*ep2[1] + 5*ep2[0]) >> 4)]; - ep1[0] = bp[ 0] - dp[ 0]; - - // pixel 1 - dp[ 4] = quant[bp[ 4] + ((7*ep1[0] + 3*ep2[2] + 5*ep2[1] + ep2[0]) >> 4)]; - ep1[1] = bp[ 4] - dp[ 4]; - - // pixel 2 - dp[ 8] = quant[bp[ 8] + ((7*ep1[1] + 3*ep2[3] + 5*ep2[2] + ep2[1]) >> 4)]; - ep1[2] = bp[ 8] - dp[ 8]; - - // pixel 3 - dp[12] = quant[bp[12] + ((7*ep1[2] + 5*ep2[3] + ep2[2]) >> 4)]; - ep1[3] = bp[12] - dp[12]; - - // advance to next line - int* tmp = ep1; ep1 = ep2; ep2 = tmp; - bp += 16; - dp += 16; - } - } - } - - unsigned int pack_etc1_block(void* pETC1_block, const unsigned int* pSrc_pixels_rgba, etc1_pack_params& pack_params) - { - const color_quad_u8* pSrc_pixels = reinterpret_cast<const color_quad_u8*>(pSrc_pixels_rgba); - etc1_block& dst_block = *static_cast<etc1_block*>(pETC1_block); - -#ifdef RG_ETC1_BUILD_DEBUG - // Ensure all alpha values are 0xFF. - for (uint i = 0; i < 16; i++) - { - RG_ETC1_ASSERT(pSrc_pixels[i].a == 255); - } -#endif - - color_quad_u8 src_pixel0(pSrc_pixels[0]); - - // Check for solid block. - const uint32 first_pixel_u32 = pSrc_pixels->m_u32; - int r; - for (r = 15; r >= 1; --r) - if (pSrc_pixels[r].m_u32 != first_pixel_u32) - break; - if (!r) - return static_cast<unsigned int>(16 * pack_etc1_block_solid_color(dst_block, &pSrc_pixels[0].r, pack_params)); - - color_quad_u8 dithered_pixels[16]; - if (pack_params.m_dithering) - { - dither_block_555(dithered_pixels, pSrc_pixels); - pSrc_pixels = dithered_pixels; - } - - etc1_optimizer optimizer; - - uint64 best_error = cUINT64_MAX; - uint best_flip = false, best_use_color4 = false; - - uint8 best_selectors[2][8]; - etc1_optimizer::results best_results[2]; - for (uint i = 0; i < 2; i++) - { - best_results[i].m_n = 8; - best_results[i].m_pSelectors = best_selectors[i]; - } - - uint8 selectors[3][8]; - etc1_optimizer::results results[3]; - - for (uint i = 0; i < 3; i++) - { - results[i].m_n = 8; - results[i].m_pSelectors = selectors[i]; - } - - color_quad_u8 subblock_pixels[8]; - - etc1_optimizer::params params(pack_params); - params.m_num_src_pixels = 8; - params.m_pSrc_pixels = subblock_pixels; - - for (uint flip = 0; flip < 2; flip++) - { - for (uint use_color4 = 0; use_color4 < 2; use_color4++) - { - uint64 trial_error = 0; - - uint subblock; - for (subblock = 0; subblock < 2; subblock++) - { - if (flip) - memcpy(subblock_pixels, pSrc_pixels + subblock * 8, sizeof(color_quad_u8) * 8); - else - { - const color_quad_u8* pSrc_col = pSrc_pixels + subblock * 2; - subblock_pixels[0] = pSrc_col[0]; subblock_pixels[1] = pSrc_col[4]; subblock_pixels[2] = pSrc_col[8]; subblock_pixels[3] = pSrc_col[12]; - subblock_pixels[4] = pSrc_col[1]; subblock_pixels[5] = pSrc_col[5]; subblock_pixels[6] = pSrc_col[9]; subblock_pixels[7] = pSrc_col[13]; - } - - results[2].m_error = cUINT64_MAX; - if ((params.m_quality >= cMediumQuality) && ((subblock) || (use_color4))) - { - const uint32 subblock_pixel0_u32 = subblock_pixels[0].m_u32; - for (r = 7; r >= 1; --r) - if (subblock_pixels[r].m_u32 != subblock_pixel0_u32) - break; - if (!r) - { - pack_etc1_block_solid_color_constrained(results[2], 8, &subblock_pixels[0].r, pack_params, !use_color4, (subblock && !use_color4) ? &results[0].m_block_color_unscaled : NULL); - } - } - - params.m_use_color4 = (use_color4 != 0); - params.m_constrain_against_base_color5 = false; - - if ((!use_color4) && (subblock)) - { - params.m_constrain_against_base_color5 = true; - params.m_base_color5 = results[0].m_block_color_unscaled; - } - - if (params.m_quality == cHighQuality) - { - static const int s_scan_delta_0_to_4[] = { -4, -3, -2, -1, 0, 1, 2, 3, 4 }; - params.m_scan_delta_size = RG_ETC1_ARRAY_SIZE(s_scan_delta_0_to_4); - params.m_pScan_deltas = s_scan_delta_0_to_4; - } - else if (params.m_quality == cMediumQuality) - { - static const int s_scan_delta_0_to_1[] = { -1, 0, 1 }; - params.m_scan_delta_size = RG_ETC1_ARRAY_SIZE(s_scan_delta_0_to_1); - params.m_pScan_deltas = s_scan_delta_0_to_1; - } - else - { - static const int s_scan_delta_0[] = { 0 }; - params.m_scan_delta_size = RG_ETC1_ARRAY_SIZE(s_scan_delta_0); - params.m_pScan_deltas = s_scan_delta_0; - } - - optimizer.init(params, results[subblock]); - if (!optimizer.compute()) - break; - - if (params.m_quality >= cMediumQuality) - { - // TODO: Fix fairly arbitrary/unrefined thresholds that control how far away to scan for potentially better solutions. - const uint refinement_error_thresh0 = 3000; - const uint refinement_error_thresh1 = 6000; - if (results[subblock].m_error > refinement_error_thresh0) - { - if (params.m_quality == cMediumQuality) - { - static const int s_scan_delta_2_to_3[] = { -3, -2, 2, 3 }; - params.m_scan_delta_size = RG_ETC1_ARRAY_SIZE(s_scan_delta_2_to_3); - params.m_pScan_deltas = s_scan_delta_2_to_3; - } - else - { - static const int s_scan_delta_5_to_5[] = { -5, 5 }; - static const int s_scan_delta_5_to_8[] = { -8, -7, -6, -5, 5, 6, 7, 8 }; - if (results[subblock].m_error > refinement_error_thresh1) - { - params.m_scan_delta_size = RG_ETC1_ARRAY_SIZE(s_scan_delta_5_to_8); - params.m_pScan_deltas = s_scan_delta_5_to_8; - } - else - { - params.m_scan_delta_size = RG_ETC1_ARRAY_SIZE(s_scan_delta_5_to_5); - params.m_pScan_deltas = s_scan_delta_5_to_5; - } - } - - if (!optimizer.compute()) - break; - } - - if (results[2].m_error < results[subblock].m_error) - results[subblock] = results[2]; - } - - trial_error += results[subblock].m_error; - if (trial_error >= best_error) - break; - } - - if (subblock < 2) - continue; - - best_error = trial_error; - best_results[0] = results[0]; - best_results[1] = results[1]; - best_flip = flip; - best_use_color4 = use_color4; - - } // use_color4 - - } // flip - - int dr = best_results[1].m_block_color_unscaled.r - best_results[0].m_block_color_unscaled.r; - int dg = best_results[1].m_block_color_unscaled.g - best_results[0].m_block_color_unscaled.g; - int db = best_results[1].m_block_color_unscaled.b - best_results[0].m_block_color_unscaled.b; - RG_ETC1_ASSERT(best_use_color4 || (rg_etc1::minimum(dr, dg, db) >= cETC1ColorDeltaMin) && (rg_etc1::maximum(dr, dg, db) <= cETC1ColorDeltaMax)); - - if (best_use_color4) - { - dst_block.m_bytes[0] = static_cast<uint8>(best_results[1].m_block_color_unscaled.r | (best_results[0].m_block_color_unscaled.r << 4)); - dst_block.m_bytes[1] = static_cast<uint8>(best_results[1].m_block_color_unscaled.g | (best_results[0].m_block_color_unscaled.g << 4)); - dst_block.m_bytes[2] = static_cast<uint8>(best_results[1].m_block_color_unscaled.b | (best_results[0].m_block_color_unscaled.b << 4)); - } - else - { - if (dr < 0) dr += 8; dst_block.m_bytes[0] = static_cast<uint8>((best_results[0].m_block_color_unscaled.r << 3) | dr); - if (dg < 0) dg += 8; dst_block.m_bytes[1] = static_cast<uint8>((best_results[0].m_block_color_unscaled.g << 3) | dg); - if (db < 0) db += 8; dst_block.m_bytes[2] = static_cast<uint8>((best_results[0].m_block_color_unscaled.b << 3) | db); - } - - dst_block.m_bytes[3] = static_cast<uint8>( (best_results[1].m_block_inten_table << 2) | (best_results[0].m_block_inten_table << 5) | ((~best_use_color4 & 1) << 1) | best_flip ); - - uint selector0 = 0, selector1 = 0; - if (best_flip) - { - // flipped: - // { 0, 0 }, { 1, 0 }, { 2, 0 }, { 3, 0 }, - // { 0, 1 }, { 1, 1 }, { 2, 1 }, { 3, 1 } - // - // { 0, 2 }, { 1, 2 }, { 2, 2 }, { 3, 2 }, - // { 0, 3 }, { 1, 3 }, { 2, 3 }, { 3, 3 } - const uint8* pSelectors0 = best_results[0].m_pSelectors; - const uint8* pSelectors1 = best_results[1].m_pSelectors; - for (int x = 3; x >= 0; --x) - { - uint b; - b = g_selector_index_to_etc1[pSelectors1[4 + x]]; - selector0 = (selector0 << 1) | (b & 1); selector1 = (selector1 << 1) | (b >> 1); - - b = g_selector_index_to_etc1[pSelectors1[x]]; - selector0 = (selector0 << 1) | (b & 1); selector1 = (selector1 << 1) | (b >> 1); - - b = g_selector_index_to_etc1[pSelectors0[4 + x]]; - selector0 = (selector0 << 1) | (b & 1); selector1 = (selector1 << 1) | (b >> 1); - - b = g_selector_index_to_etc1[pSelectors0[x]]; - selector0 = (selector0 << 1) | (b & 1); selector1 = (selector1 << 1) | (b >> 1); - } - } - else - { - // non-flipped: - // { 0, 0 }, { 0, 1 }, { 0, 2 }, { 0, 3 }, - // { 1, 0 }, { 1, 1 }, { 1, 2 }, { 1, 3 } - // - // { 2, 0 }, { 2, 1 }, { 2, 2 }, { 2, 3 }, - // { 3, 0 }, { 3, 1 }, { 3, 2 }, { 3, 3 } - for (int subblock = 1; subblock >= 0; --subblock) - { - const uint8* pSelectors = best_results[subblock].m_pSelectors + 4; - for (uint i = 0; i < 2; i++) - { - uint b; - b = g_selector_index_to_etc1[pSelectors[3]]; - selector0 = (selector0 << 1) | (b & 1); selector1 = (selector1 << 1) | (b >> 1); - - b = g_selector_index_to_etc1[pSelectors[2]]; - selector0 = (selector0 << 1) | (b & 1); selector1 = (selector1 << 1) | (b >> 1); - - b = g_selector_index_to_etc1[pSelectors[1]]; - selector0 = (selector0 << 1) | (b & 1); selector1 = (selector1 << 1) | (b >> 1); - - b = g_selector_index_to_etc1[pSelectors[0]]; - selector0 = (selector0 << 1) | (b & 1);selector1 = (selector1 << 1) | (b >> 1); - - pSelectors -= 4; - } - } - } - - dst_block.m_bytes[4] = static_cast<uint8>(selector1 >> 8); dst_block.m_bytes[5] = static_cast<uint8>(selector1 & 0xFF); - dst_block.m_bytes[6] = static_cast<uint8>(selector0 >> 8); dst_block.m_bytes[7] = static_cast<uint8>(selector0 & 0xFF); - - return static_cast<unsigned int>(best_error); - } - -} // namespace rg_etc1 diff --git a/thirdparty/rg-etc1/rg_etc1.h b/thirdparty/rg-etc1/rg_etc1.h deleted file mode 100644 index 9ce89a6cc6..0000000000 --- a/thirdparty/rg-etc1/rg_etc1.h +++ /dev/null @@ -1,76 +0,0 @@ -// File: rg_etc1.h - Fast, high quality ETC1 block packer/unpacker - Rich Geldreich <richgel99@gmail.com> -// Please see ZLIB license at the end of this file. -#pragma once - -namespace rg_etc1 -{ - // Unpacks an 8-byte ETC1 compressed block to a block of 4x4 32bpp RGBA pixels. - // Returns false if the block is invalid. Invalid blocks will still be unpacked with clamping. - // This function is thread safe, and does not dynamically allocate any memory. - // If preserve_alpha is true, the alpha channel of the destination pixels will not be overwritten. Otherwise, alpha will be set to 255. - bool unpack_etc1_block(const void *pETC1_block, unsigned int* pDst_pixels_rgba, bool preserve_alpha = false); - - // Quality setting = the higher the quality, the slower. - // To pack large textures, it is highly recommended to call pack_etc1_block() in parallel, on different blocks, from multiple threads (particularly when using cHighQuality). - enum etc1_quality - { - cLowQuality, - cMediumQuality, - cHighQuality, - }; - - struct etc1_pack_params - { - etc1_quality m_quality; - bool m_dithering; - - inline etc1_pack_params() - { - clear(); - } - - void clear() - { - m_quality = cHighQuality; - m_dithering = false; - } - }; - - // Important: pack_etc1_block_init() must be called before calling pack_etc1_block(). - void pack_etc1_block_init(); - - // Packs a 4x4 block of 32bpp RGBA pixels to an 8-byte ETC1 block. - // 32-bit RGBA pixels must always be arranged as (R,G,B,A) (R first, A last) in memory, independent of platform endianness. A should always be 255. - // Returns squared error of result. - // This function is thread safe, and does not dynamically allocate any memory. - // pack_etc1_block() does not currently support "perceptual" colorspace metrics - it primarily optimizes for RGB RMSE. - unsigned int pack_etc1_block(void* pETC1_block, const unsigned int* pSrc_pixels_rgba, etc1_pack_params& pack_params); - -} // namespace rg_etc1 - -//------------------------------------------------------------------------------ -// -// rg_etc1 uses the ZLIB license: -// http://opensource.org/licenses/Zlib -// -// Copyright (c) 2012 Rich Geldreich -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// -// 3. This notice may not be removed or altered from any source distribution. -// -//------------------------------------------------------------------------------ diff --git a/thirdparty/zstd/LICENSE b/thirdparty/zstd/LICENSE new file mode 100644 index 0000000000..a793a80289 --- /dev/null +++ b/thirdparty/zstd/LICENSE @@ -0,0 +1,30 @@ +BSD License + +For Zstandard software + +Copyright (c) 2016-present, Facebook, Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name Facebook nor the names of its contributors may be used to + endorse or promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/thirdparty/zstd/PATENTS b/thirdparty/zstd/PATENTS new file mode 100644 index 0000000000..15b4a2ea5c --- /dev/null +++ b/thirdparty/zstd/PATENTS @@ -0,0 +1,33 @@ +Additional Grant of Patent Rights Version 2 + +"Software" means the Zstandard software distributed by Facebook, Inc. + +Facebook, Inc. ("Facebook") hereby grants to each recipient of the Software +("you") a perpetual, worldwide, royalty-free, non-exclusive, irrevocable +(subject to the termination provision below) license under any Necessary +Claims, to make, have made, use, sell, offer to sell, import, and otherwise +transfer the Software. For avoidance of doubt, no license is granted under +Facebook’s rights in any patent claims that are infringed by (i) modifications +to the Software made by you or any third party or (ii) the Software in +combination with any software or other technology. + +The license granted hereunder will terminate, automatically and without notice, +if you (or any of your subsidiaries, corporate affiliates or agents) initiate +directly or indirectly, or take a direct financial interest in, any Patent +Assertion: (i) against Facebook or any of its subsidiaries or corporate +affiliates, (ii) against any party if such Patent Assertion arises in whole or +in part from any software, technology, product or service of Facebook or any of +its subsidiaries or corporate affiliates, or (iii) against any party relating +to the Software. Notwithstanding the foregoing, if Facebook or any of its +subsidiaries or corporate affiliates files a lawsuit alleging patent +infringement against you in the first instance, and you respond by filing a +patent infringement counterclaim in that lawsuit against that party that is +unrelated to the Software, the license granted hereunder will not terminate +under section (i) of this paragraph due to such counterclaim. + +A "Necessary Claim" is a claim of a patent owned by Facebook that is +necessarily infringed by the Software standing alone. + +A "Patent Assertion" is any lawsuit or other action alleging direct, indirect, +or contributory infringement or inducement to infringe any patent, including a +cross-claim or counterclaim. diff --git a/thirdparty/zstd/README.md b/thirdparty/zstd/README.md new file mode 100644 index 0000000000..7caee5fd3f --- /dev/null +++ b/thirdparty/zstd/README.md @@ -0,0 +1,146 @@ + __Zstandard__, or `zstd` as short version, is a fast lossless compression algorithm, + targeting real-time compression scenarios at zlib-level and better compression ratios. + +It is provided as an open-source BSD-licensed **C** library, +and a command line utility producing and decoding `.zst` and `.gz` files. +For other programming languages, +you can consult a list of known ports on [Zstandard homepage](http://www.zstd.net/#other-languages). + +|Branch |Status | +|------------|---------| +|master | [![Build Status](https://travis-ci.org/facebook/zstd.svg?branch=master)](https://travis-ci.org/facebook/zstd) | +|dev | [![Build Status](https://travis-ci.org/facebook/zstd.svg?branch=dev)](https://travis-ci.org/facebook/zstd) | + +As a reference, several fast compression algorithms were tested and compared +on a server running Linux Debian (`Linux version 4.8.0-1-amd64`), +with a Core i7-6700K CPU @ 4.0GHz, +using [lzbench], an open-source in-memory benchmark by @inikep +compiled with GCC 6.3.0, +on the [Silesia compression corpus]. + +[lzbench]: https://github.com/inikep/lzbench +[Silesia compression corpus]: http://sun.aei.polsl.pl/~sdeor/index.php?page=silesia + +| Compressor name | Ratio | Compression| Decompress.| +| --------------- | ------| -----------| ---------- | +| **zstd 1.1.3 -1** | 2.877 | 430 MB/s | 1110 MB/s | +| zlib 1.2.8 -1 | 2.743 | 110 MB/s | 400 MB/s | +| brotli 0.5.2 -0 | 2.708 | 400 MB/s | 430 MB/s | +| quicklz 1.5.0 -1 | 2.238 | 550 MB/s | 710 MB/s | +| lzo1x 2.09 -1 | 2.108 | 650 MB/s | 830 MB/s | +| lz4 1.7.5 | 2.101 | 720 MB/s | 3600 MB/s | +| snappy 1.1.3 | 2.091 | 500 MB/s | 1650 MB/s | +| lzf 3.6 -1 | 2.077 | 400 MB/s | 860 MB/s | + +[zlib]:http://www.zlib.net/ +[LZ4]: http://www.lz4.org/ + +Zstd can also offer stronger compression ratios at the cost of compression speed. +Speed vs Compression trade-off is configurable by small increments. Decompression speed is preserved and remains roughly the same at all settings, a property shared by most LZ compression algorithms, such as [zlib] or lzma. + +The following tests were run +on a server running Linux Debian (`Linux version 4.8.0-1-amd64`) +with a Core i7-6700K CPU @ 4.0GHz, +using [lzbench], an open-source in-memory benchmark by @inikep +compiled with GCC 6.3.0, +on the [Silesia compression corpus]. + +Compression Speed vs Ratio | Decompression Speed +---------------------------|-------------------- +![Compression Speed vs Ratio](doc/images/Cspeed4.png "Compression Speed vs Ratio") | ![Decompression Speed](doc/images/Dspeed4.png "Decompression Speed") + +Several algorithms can produce higher compression ratios, but at slower speeds, falling outside of the graph. +For a larger picture including very slow modes, [click on this link](doc/images/DCspeed5.png) . + + +### The case for Small Data compression + +Previous charts provide results applicable to typical file and stream scenarios (several MB). Small data comes with different perspectives. + +The smaller the amount of data to compress, the more difficult it is to compress. This problem is common to all compression algorithms, and reason is, compression algorithms learn from past data how to compress future data. But at the beginning of a new data set, there is no "past" to build upon. + +To solve this situation, Zstd offers a __training mode__, which can be used to tune the algorithm for a selected type of data. +Training Zstandard is achieved by provide it with a few samples (one file per sample). The result of this training is stored in a file called "dictionary", which must be loaded before compression and decompression. +Using this dictionary, the compression ratio achievable on small data improves dramatically. + +The following example uses the `github-users` [sample set](https://github.com/facebook/zstd/releases/tag/v1.1.3), created from [github public API](https://developer.github.com/v3/users/#get-all-users). +It consists of roughly 10K records weighting about 1KB each. + +Compression Ratio | Compression Speed | Decompression Speed +------------------|-------------------|-------------------- +![Compression Ratio](doc/images/dict-cr.png "Compression Ratio") | ![Compression Speed](doc/images/dict-cs.png "Compression Speed") | ![Decompression Speed](doc/images/dict-ds.png "Decompression Speed") + + +These compression gains are achieved while simultaneously providing _faster_ compression and decompression speeds. + +Training works if there is some correlation in a family of small data samples. The more data-specific a dictionary is, the more efficient it is (there is no _universal dictionary_). +Hence, deploying one dictionary per type of data will provide the greatest benefits. +Dictionary gains are mostly effective in the first few KB. Then, the compression algorithm will gradually use previously decoded content to better compress the rest of the file. + +#### Dictionary compression How To : + +1) Create the dictionary + +`zstd --train FullPathToTrainingSet/* -o dictionaryName` + +2) Compress with dictionary + +`zstd -D dictionaryName FILE` + +3) Decompress with dictionary + +`zstd -D dictionaryName --decompress FILE.zst` + + +### Build + +Once you have the repository cloned, there are multiple ways provided to build Zstandard. + +#### Makefile + +If your system is compatible with a standard `make` (or `gmake`) binary generator, +you can simply run it at the root directory. +It will generate `zstd` within root directory. + +Other available options include : +- `make install` : create and install zstd binary, library and man page +- `make test` : create and run `zstd` and test tools on local platform + +#### cmake + +A `cmake` project generator is provided within `build/cmake`. +It can generate Makefiles or other build scripts +to create `zstd` binary, and `libzstd` dynamic and static libraries. + +#### Meson + +A Meson project is provided within `contrib/meson`. + +#### Visual Studio (Windows) + +Going into `build` directory, you will find additional possibilities : +- Projects for Visual Studio 2005, 2008 and 2010 + + VS2010 project is compatible with VS2012, VS2013 and VS2015 +- Automated build scripts for Visual compiler by @KrzysFR , in `build/VS_scripts`, + which will build `zstd` cli and `libzstd` library without any need to open Visual Studio solution. + + +### Status + +Zstandard is currently deployed within Facebook. It is used daily to compress and decompress very large amounts of data in multiple formats and use cases. +Zstandard is considered safe for production environments. + +### License + +Zstandard is [BSD-licensed](LICENSE). We also provide an [additional patent grant](PATENTS). + +### Contributing + +The "dev" branch is the one where all contributions will be merged before reaching "master". +If you plan to propose a patch, please commit into the "dev" branch or its own feature branch. +Direct commit to "master" are not permitted. +For more information, please read [CONTRIBUTING](CONTRIBUTING.md). + +### Miscellaneous + +Zstd entropy stage is provided by [Huff0 and FSE, from Finite State Entropy library](https://github.com/Cyan4973/FiniteStateEntropy). diff --git a/thirdparty/zstd/common/bitstream.h b/thirdparty/zstd/common/bitstream.h new file mode 100644 index 0000000000..ca42850df3 --- /dev/null +++ b/thirdparty/zstd/common/bitstream.h @@ -0,0 +1,446 @@ +/* ****************************************************************** + bitstream + Part of FSE library + header file (to include) + Copyright (C) 2013-2017, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - Source repository : https://github.com/Cyan4973/FiniteStateEntropy +****************************************************************** */ +#ifndef BITSTREAM_H_MODULE +#define BITSTREAM_H_MODULE + +#if defined (__cplusplus) +extern "C" { +#endif + + +/* +* This API consists of small unitary functions, which must be inlined for best performance. +* Since link-time-optimization is not available for all compilers, +* these functions are defined into a .h to be included. +*/ + +/*-**************************************** +* Dependencies +******************************************/ +#include "mem.h" /* unaligned access routines */ +#include "error_private.h" /* error codes and messages */ + + +/*-************************************* +* Debug +***************************************/ +#if defined(BIT_DEBUG) && (BIT_DEBUG>=1) +# include <assert.h> +#else +# define assert(condition) ((void)0) +#endif + + +/*========================================= +* Target specific +=========================================*/ +#if defined(__BMI__) && defined(__GNUC__) +# include <immintrin.h> /* support for bextr (experimental) */ +#endif + +#define STREAM_ACCUMULATOR_MIN_32 25 +#define STREAM_ACCUMULATOR_MIN_64 57 +#define STREAM_ACCUMULATOR_MIN ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64)) + +/*-****************************************** +* bitStream encoding API (write forward) +********************************************/ +/* bitStream can mix input from multiple sources. +* A critical property of these streams is that they encode and decode in **reverse** direction. +* So the first bit sequence you add will be the last to be read, like a LIFO stack. +*/ +typedef struct +{ + size_t bitContainer; + unsigned bitPos; + char* startPtr; + char* ptr; + char* endPtr; +} BIT_CStream_t; + +MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* dstBuffer, size_t dstCapacity); +MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits); +MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC); +MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC); + +/* Start with initCStream, providing the size of buffer to write into. +* bitStream will never write outside of this buffer. +* `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code. +* +* bits are first added to a local register. +* Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems. +* Writing data into memory is an explicit operation, performed by the flushBits function. +* Hence keep track how many bits are potentially stored into local register to avoid register overflow. +* After a flushBits, a maximum of 7 bits might still be stored into local register. +* +* Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers. +* +* Last operation is to close the bitStream. +* The function returns the final size of CStream in bytes. +* If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable) +*/ + + +/*-******************************************** +* bitStream decoding API (read backward) +**********************************************/ +typedef struct +{ + size_t bitContainer; + unsigned bitsConsumed; + const char* ptr; + const char* start; + const char* limitPtr; +} BIT_DStream_t; + +typedef enum { BIT_DStream_unfinished = 0, + BIT_DStream_endOfBuffer = 1, + BIT_DStream_completed = 2, + BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */ + /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */ + +MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize); +MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits); +MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD); +MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD); + + +/* Start by invoking BIT_initDStream(). +* A chunk of the bitStream is then stored into a local register. +* Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t). +* You can then retrieve bitFields stored into the local register, **in reverse order**. +* Local register is explicitly reloaded from memory by the BIT_reloadDStream() method. +* A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished. +* Otherwise, it can be less than that, so proceed accordingly. +* Checking if DStream has reached its end can be performed with BIT_endOfDStream(). +*/ + + +/*-**************************************** +* unsafe API +******************************************/ +MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits); +/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */ + +MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC); +/* unsafe version; does not check buffer overflow */ + +MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits); +/* faster, but works only if nbBits >= 1 */ + + + +/*-************************************************************** +* Internal functions +****************************************************************/ +MEM_STATIC unsigned BIT_highbit32 (register U32 val) +{ +# if defined(_MSC_VER) /* Visual */ + unsigned long r=0; + _BitScanReverse ( &r, val ); + return (unsigned) r; +# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ + return 31 - __builtin_clz (val); +# else /* Software version */ + static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, + 11, 14, 16, 18, 22, 25, 3, 30, + 8, 12, 20, 28, 15, 17, 24, 7, + 19, 27, 23, 6, 26, 5, 4, 31 }; + U32 v = val; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27]; +# endif +} + +/*===== Local Constants =====*/ +static const unsigned BIT_mask[] = { 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, + 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, + 0xFFFF, 0x1FFFF, 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF, + 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF }; /* up to 26 bits */ + + +/*-************************************************************** +* bitStream encoding +****************************************************************/ +/*! BIT_initCStream() : + * `dstCapacity` must be > sizeof(size_t) + * @return : 0 if success, + otherwise an error code (can be tested using ERR_isError() ) */ +MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, + void* startPtr, size_t dstCapacity) +{ + bitC->bitContainer = 0; + bitC->bitPos = 0; + bitC->startPtr = (char*)startPtr; + bitC->ptr = bitC->startPtr; + bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer); + if (dstCapacity <= sizeof(bitC->bitContainer)) return ERROR(dstSize_tooSmall); + return 0; +} + +/*! BIT_addBits() : + can add up to 26 bits into `bitC`. + Does not check for register overflow ! */ +MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, + size_t value, unsigned nbBits) +{ + bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos; + bitC->bitPos += nbBits; +} + +/*! BIT_addBitsFast() : + * works only if `value` is _clean_, meaning all high bits above nbBits are 0 */ +MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, + size_t value, unsigned nbBits) +{ + assert((value>>nbBits) == 0); + bitC->bitContainer |= value << bitC->bitPos; + bitC->bitPos += nbBits; +} + +/*! BIT_flushBitsFast() : + * assumption : bitContainer has not overflowed + * unsafe version; does not check buffer overflow */ +MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC) +{ + size_t const nbBytes = bitC->bitPos >> 3; + assert( bitC->bitPos <= (sizeof(bitC->bitContainer)*8) ); + MEM_writeLEST(bitC->ptr, bitC->bitContainer); + bitC->ptr += nbBytes; + assert(bitC->ptr <= bitC->endPtr); + bitC->bitPos &= 7; + bitC->bitContainer >>= nbBytes*8; +} + +/*! BIT_flushBits() : + * assumption : bitContainer has not overflowed + * safe version; check for buffer overflow, and prevents it. + * note : does not signal buffer overflow. + * overflow will be revealed later on using BIT_closeCStream() */ +MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC) +{ + size_t const nbBytes = bitC->bitPos >> 3; + assert( bitC->bitPos <= (sizeof(bitC->bitContainer)*8) ); + MEM_writeLEST(bitC->ptr, bitC->bitContainer); + bitC->ptr += nbBytes; + if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr; + bitC->bitPos &= 7; + bitC->bitContainer >>= nbBytes*8; +} + +/*! BIT_closeCStream() : + * @return : size of CStream, in bytes, + or 0 if it could not fit into dstBuffer */ +MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC) +{ + BIT_addBitsFast(bitC, 1, 1); /* endMark */ + BIT_flushBits(bitC); + if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */ + return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0); +} + + +/*-******************************************************** +* bitStream decoding +**********************************************************/ +/*! BIT_initDStream() : +* Initialize a BIT_DStream_t. +* `bitD` : a pointer to an already allocated BIT_DStream_t structure. +* `srcSize` must be the *exact* size of the bitStream, in bytes. +* @return : size of stream (== srcSize) or an errorCode if a problem is detected +*/ +MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize) +{ + if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); } + + bitD->start = (const char*)srcBuffer; + bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer); + + if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */ + bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer); + bitD->bitContainer = MEM_readLEST(bitD->ptr); + { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1]; + bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */ + if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ } + } else { + bitD->ptr = bitD->start; + bitD->bitContainer = *(const BYTE*)(bitD->start); + switch(srcSize) + { + case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16); + case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24); + case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32); + case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; + case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; + case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8; + default:; + } + { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1]; + bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; + if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ } + bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8; + } + + return srcSize; +} + +MEM_STATIC size_t BIT_getUpperBits(size_t bitContainer, U32 const start) +{ + return bitContainer >> start; +} + +MEM_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits) +{ +#if defined(__BMI__) && defined(__GNUC__) && __GNUC__*1000+__GNUC_MINOR__ >= 4008 /* experimental */ +# if defined(__x86_64__) + if (sizeof(bitContainer)==8) + return _bextr_u64(bitContainer, start, nbBits); + else +# endif + return _bextr_u32(bitContainer, start, nbBits); +#else + return (bitContainer >> start) & BIT_mask[nbBits]; +#endif +} + +MEM_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits) +{ + return bitContainer & BIT_mask[nbBits]; +} + +/*! BIT_lookBits() : + * Provides next n bits from local register. + * local register is not modified. + * On 32-bits, maxNbBits==24. + * On 64-bits, maxNbBits==56. + * @return : value extracted + */ + MEM_STATIC size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits) +{ +#if defined(__BMI__) && defined(__GNUC__) /* experimental; fails if bitD->bitsConsumed + nbBits > sizeof(bitD->bitContainer)*8 */ + return BIT_getMiddleBits(bitD->bitContainer, (sizeof(bitD->bitContainer)*8) - bitD->bitsConsumed - nbBits, nbBits); +#else + U32 const regMask = sizeof(bitD->bitContainer)*8 - 1; + return ((bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> 1) >> ((regMask-nbBits) & regMask); +#endif +} + +/*! BIT_lookBitsFast() : + * unsafe version; only works if nbBits >= 1 */ +MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits) +{ + U32 const regMask = sizeof(bitD->bitContainer)*8 - 1; + assert(nbBits >= 1); + return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask); +} + +MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits) +{ + bitD->bitsConsumed += nbBits; +} + +/*! BIT_readBits() : + * Read (consume) next n bits from local register and update. + * Pay attention to not read more than nbBits contained into local register. + * @return : extracted value. + */ +MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits) +{ + size_t const value = BIT_lookBits(bitD, nbBits); + BIT_skipBits(bitD, nbBits); + return value; +} + +/*! BIT_readBitsFast() : +* unsafe version; only works only if nbBits >= 1 */ +MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits) +{ + size_t const value = BIT_lookBitsFast(bitD, nbBits); + assert(nbBits >= 1); + BIT_skipBits(bitD, nbBits); + return value; +} + +/*! BIT_reloadDStream() : +* Refill `bitD` from buffer previously set in BIT_initDStream() . +* This function is safe, it guarantees it will not read beyond src buffer. +* @return : status of `BIT_DStream_t` internal register. + if status == BIT_DStream_unfinished, internal register is filled with >= (sizeof(bitD->bitContainer)*8 - 7) bits */ +MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD) +{ + if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* overflow detected, like end of stream */ + return BIT_DStream_overflow; + + if (bitD->ptr >= bitD->limitPtr) { + bitD->ptr -= bitD->bitsConsumed >> 3; + bitD->bitsConsumed &= 7; + bitD->bitContainer = MEM_readLEST(bitD->ptr); + return BIT_DStream_unfinished; + } + if (bitD->ptr == bitD->start) { + if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer; + return BIT_DStream_completed; + } + /* start < ptr < limitPtr */ + { U32 nbBytes = bitD->bitsConsumed >> 3; + BIT_DStream_status result = BIT_DStream_unfinished; + if (bitD->ptr - nbBytes < bitD->start) { + nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */ + result = BIT_DStream_endOfBuffer; + } + bitD->ptr -= nbBytes; + bitD->bitsConsumed -= nbBytes*8; + bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD->bitContainer), otherwise bitD->ptr == bitD->start */ + return result; + } +} + +/*! BIT_endOfDStream() : +* @return Tells if DStream has exactly reached its end (all bits consumed). +*/ +MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream) +{ + return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8)); +} + +#if defined (__cplusplus) +} +#endif + +#endif /* BITSTREAM_H_MODULE */ diff --git a/thirdparty/zstd/common/entropy_common.c b/thirdparty/zstd/common/entropy_common.c new file mode 100644 index 0000000000..b37a082fee --- /dev/null +++ b/thirdparty/zstd/common/entropy_common.c @@ -0,0 +1,221 @@ +/* + Common functions of New Generation Entropy library + Copyright (C) 2016, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy + - Public forum : https://groups.google.com/forum/#!forum/lz4c +*************************************************************************** */ + +/* ************************************* +* Dependencies +***************************************/ +#include "mem.h" +#include "error_private.h" /* ERR_*, ERROR */ +#define FSE_STATIC_LINKING_ONLY /* FSE_MIN_TABLELOG */ +#include "fse.h" +#define HUF_STATIC_LINKING_ONLY /* HUF_TABLELOG_ABSOLUTEMAX */ +#include "huf.h" + + +/*=== Version ===*/ +unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; } + + +/*=== Error Management ===*/ +unsigned FSE_isError(size_t code) { return ERR_isError(code); } +const char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); } + +unsigned HUF_isError(size_t code) { return ERR_isError(code); } +const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); } + + +/*-************************************************************** +* FSE NCount encoding-decoding +****************************************************************/ +size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, + const void* headerBuffer, size_t hbSize) +{ + const BYTE* const istart = (const BYTE*) headerBuffer; + const BYTE* const iend = istart + hbSize; + const BYTE* ip = istart; + int nbBits; + int remaining; + int threshold; + U32 bitStream; + int bitCount; + unsigned charnum = 0; + int previous0 = 0; + + if (hbSize < 4) return ERROR(srcSize_wrong); + bitStream = MEM_readLE32(ip); + nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */ + if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge); + bitStream >>= 4; + bitCount = 4; + *tableLogPtr = nbBits; + remaining = (1<<nbBits)+1; + threshold = 1<<nbBits; + nbBits++; + + while ((remaining>1) & (charnum<=*maxSVPtr)) { + if (previous0) { + unsigned n0 = charnum; + while ((bitStream & 0xFFFF) == 0xFFFF) { + n0 += 24; + if (ip < iend-5) { + ip += 2; + bitStream = MEM_readLE32(ip) >> bitCount; + } else { + bitStream >>= 16; + bitCount += 16; + } } + while ((bitStream & 3) == 3) { + n0 += 3; + bitStream >>= 2; + bitCount += 2; + } + n0 += bitStream & 3; + bitCount += 2; + if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall); + while (charnum < n0) normalizedCounter[charnum++] = 0; + if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { + ip += bitCount>>3; + bitCount &= 7; + bitStream = MEM_readLE32(ip) >> bitCount; + } else { + bitStream >>= 2; + } } + { int const max = (2*threshold-1) - remaining; + int count; + + if ((bitStream & (threshold-1)) < (U32)max) { + count = bitStream & (threshold-1); + bitCount += nbBits-1; + } else { + count = bitStream & (2*threshold-1); + if (count >= threshold) count -= max; + bitCount += nbBits; + } + + count--; /* extra accuracy */ + remaining -= count < 0 ? -count : count; /* -1 means +1 */ + normalizedCounter[charnum++] = (short)count; + previous0 = !count; + while (remaining < threshold) { + nbBits--; + threshold >>= 1; + } + + if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { + ip += bitCount>>3; + bitCount &= 7; + } else { + bitCount -= (int)(8 * (iend - 4 - ip)); + ip = iend - 4; + } + bitStream = MEM_readLE32(ip) >> (bitCount & 31); + } } /* while ((remaining>1) & (charnum<=*maxSVPtr)) */ + if (remaining != 1) return ERROR(corruption_detected); + if (bitCount > 32) return ERROR(corruption_detected); + *maxSVPtr = charnum-1; + + ip += (bitCount+7)>>3; + return ip-istart; +} + + +/*! HUF_readStats() : + Read compact Huffman tree, saved by HUF_writeCTable(). + `huffWeight` is destination buffer. + `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32. + @return : size read from `src` , or an error Code . + Note : Needed by HUF_readCTable() and HUF_readDTableX?() . +*/ +size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, + U32* nbSymbolsPtr, U32* tableLogPtr, + const void* src, size_t srcSize) +{ + U32 weightTotal; + const BYTE* ip = (const BYTE*) src; + size_t iSize; + size_t oSize; + + if (!srcSize) return ERROR(srcSize_wrong); + iSize = ip[0]; + /* memset(huffWeight, 0, hwSize); *//* is not necessary, even though some analyzer complain ... */ + + if (iSize >= 128) { /* special header */ + oSize = iSize - 127; + iSize = ((oSize+1)/2); + if (iSize+1 > srcSize) return ERROR(srcSize_wrong); + if (oSize >= hwSize) return ERROR(corruption_detected); + ip += 1; + { U32 n; + for (n=0; n<oSize; n+=2) { + huffWeight[n] = ip[n/2] >> 4; + huffWeight[n+1] = ip[n/2] & 15; + } } } + else { /* header compressed with FSE (normal case) */ + FSE_DTable fseWorkspace[FSE_DTABLE_SIZE_U32(6)]; /* 6 is max possible tableLog for HUF header (maybe even 5, to be tested) */ + if (iSize+1 > srcSize) return ERROR(srcSize_wrong); + oSize = FSE_decompress_wksp(huffWeight, hwSize-1, ip+1, iSize, fseWorkspace, 6); /* max (hwSize-1) values decoded, as last one is implied */ + if (FSE_isError(oSize)) return oSize; + } + + /* collect weight stats */ + memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32)); + weightTotal = 0; + { U32 n; for (n=0; n<oSize; n++) { + if (huffWeight[n] >= HUF_TABLELOG_MAX) return ERROR(corruption_detected); + rankStats[huffWeight[n]]++; + weightTotal += (1 << huffWeight[n]) >> 1; + } } + if (weightTotal == 0) return ERROR(corruption_detected); + + /* get last non-null symbol weight (implied, total must be 2^n) */ + { U32 const tableLog = BIT_highbit32(weightTotal) + 1; + if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected); + *tableLogPtr = tableLog; + /* determine last weight */ + { U32 const total = 1 << tableLog; + U32 const rest = total - weightTotal; + U32 const verif = 1 << BIT_highbit32(rest); + U32 const lastWeight = BIT_highbit32(rest) + 1; + if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */ + huffWeight[oSize] = (BYTE)lastWeight; + rankStats[lastWeight]++; + } } + + /* check tree construction validity */ + if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */ + + /* results */ + *nbSymbolsPtr = (U32)(oSize+1); + return iSize+1; +} diff --git a/thirdparty/zstd/common/error_private.c b/thirdparty/zstd/common/error_private.c new file mode 100644 index 0000000000..b3287245f1 --- /dev/null +++ b/thirdparty/zstd/common/error_private.c @@ -0,0 +1,44 @@ +/** + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +/* The purpose of this file is to have a single list of error strings embedded in binary */ + +#include "error_private.h" + +const char* ERR_getErrorString(ERR_enum code) +{ + static const char* const notErrorCode = "Unspecified error code"; + switch( code ) + { + case PREFIX(no_error): return "No error detected"; + case PREFIX(GENERIC): return "Error (generic)"; + case PREFIX(prefix_unknown): return "Unknown frame descriptor"; + case PREFIX(version_unsupported): return "Version not supported"; + case PREFIX(parameter_unknown): return "Unknown parameter type"; + case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter"; + case PREFIX(frameParameter_unsupportedBy32bits): return "Frame parameter unsupported in 32-bits mode"; + case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding"; + case PREFIX(compressionParameter_unsupported): return "Compression parameter is out of bound"; + case PREFIX(init_missing): return "Context should be init first"; + case PREFIX(memory_allocation): return "Allocation error : not enough memory"; + case PREFIX(stage_wrong): return "Operation not authorized at current processing stage"; + case PREFIX(dstSize_tooSmall): return "Destination buffer is too small"; + case PREFIX(srcSize_wrong): return "Src size is incorrect"; + case PREFIX(corruption_detected): return "Corrupted block detected"; + case PREFIX(checksum_wrong): return "Restored data doesn't match checksum"; + case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported"; + case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large"; + case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small"; + case PREFIX(dictionary_corrupted): return "Dictionary is corrupted"; + case PREFIX(dictionary_wrong): return "Dictionary mismatch"; + case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples"; + case PREFIX(maxCode): + default: return notErrorCode; + } +} diff --git a/thirdparty/zstd/common/error_private.h b/thirdparty/zstd/common/error_private.h new file mode 100644 index 0000000000..1bc2e49548 --- /dev/null +++ b/thirdparty/zstd/common/error_private.h @@ -0,0 +1,76 @@ +/** + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +/* Note : this module is expected to remain private, do not expose it */ + +#ifndef ERROR_H_MODULE +#define ERROR_H_MODULE + +#if defined (__cplusplus) +extern "C" { +#endif + + +/* **************************************** +* Dependencies +******************************************/ +#include <stddef.h> /* size_t */ +#include "zstd_errors.h" /* enum list */ + + +/* **************************************** +* Compiler-specific +******************************************/ +#if defined(__GNUC__) +# define ERR_STATIC static __attribute__((unused)) +#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# define ERR_STATIC static inline +#elif defined(_MSC_VER) +# define ERR_STATIC static __inline +#else +# define ERR_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ +#endif + + +/*-**************************************** +* Customization (error_public.h) +******************************************/ +typedef ZSTD_ErrorCode ERR_enum; +#define PREFIX(name) ZSTD_error_##name + + +/*-**************************************** +* Error codes handling +******************************************/ +#ifdef ERROR +# undef ERROR /* reported already defined on VS 2015 (Rich Geldreich) */ +#endif +#define ERROR(name) ((size_t)-PREFIX(name)) + +ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); } + +ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); } + + +/*-**************************************** +* Error Strings +******************************************/ + +const char* ERR_getErrorString(ERR_enum code); /* error_private.c */ + +ERR_STATIC const char* ERR_getErrorName(size_t code) +{ + return ERR_getErrorString(ERR_getErrorCode(code)); +} + +#if defined (__cplusplus) +} +#endif + +#endif /* ERROR_H_MODULE */ diff --git a/thirdparty/zstd/common/fse.h b/thirdparty/zstd/common/fse.h new file mode 100644 index 0000000000..6d5d41def1 --- /dev/null +++ b/thirdparty/zstd/common/fse.h @@ -0,0 +1,698 @@ +/* ****************************************************************** + FSE : Finite State Entropy codec + Public Prototypes declaration + Copyright (C) 2013-2016, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - Source repository : https://github.com/Cyan4973/FiniteStateEntropy +****************************************************************** */ +#ifndef FSE_H +#define FSE_H + +#if defined (__cplusplus) +extern "C" { +#endif + + +/*-***************************************** +* Dependencies +******************************************/ +#include <stddef.h> /* size_t, ptrdiff_t */ + + +/*-***************************************** +* FSE_PUBLIC_API : control library symbols visibility +******************************************/ +#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4) +# define FSE_PUBLIC_API __attribute__ ((visibility ("default"))) +#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */ +# define FSE_PUBLIC_API __declspec(dllexport) +#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1) +# define FSE_PUBLIC_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ +#else +# define FSE_PUBLIC_API +#endif + +/*------ Version ------*/ +#define FSE_VERSION_MAJOR 0 +#define FSE_VERSION_MINOR 9 +#define FSE_VERSION_RELEASE 0 + +#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE +#define FSE_QUOTE(str) #str +#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str) +#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION) + +#define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE) +FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */ + +/*-**************************************** +* FSE simple functions +******************************************/ +/*! FSE_compress() : + Compress content of buffer 'src', of size 'srcSize', into destination buffer 'dst'. + 'dst' buffer must be already allocated. Compression runs faster is dstCapacity >= FSE_compressBound(srcSize). + @return : size of compressed data (<= dstCapacity). + Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!! + if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead. + if FSE_isError(return), compression failed (more details using FSE_getErrorName()) +*/ +FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity, + const void* src, size_t srcSize); + +/*! FSE_decompress(): + Decompress FSE data from buffer 'cSrc', of size 'cSrcSize', + into already allocated destination buffer 'dst', of size 'dstCapacity'. + @return : size of regenerated data (<= maxDstSize), + or an error code, which can be tested using FSE_isError() . + + ** Important ** : FSE_decompress() does not decompress non-compressible nor RLE data !!! + Why ? : making this distinction requires a header. + Header management is intentionally delegated to the user layer, which can better manage special cases. +*/ +FSE_PUBLIC_API size_t FSE_decompress(void* dst, size_t dstCapacity, + const void* cSrc, size_t cSrcSize); + + +/*-***************************************** +* Tool functions +******************************************/ +FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */ + +/* Error Management */ +FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */ +FSE_PUBLIC_API const char* FSE_getErrorName(size_t code); /* provides error code string (useful for debugging) */ + + +/*-***************************************** +* FSE advanced functions +******************************************/ +/*! FSE_compress2() : + Same as FSE_compress(), but allows the selection of 'maxSymbolValue' and 'tableLog' + Both parameters can be defined as '0' to mean : use default value + @return : size of compressed data + Special values : if return == 0, srcData is not compressible => Nothing is stored within cSrc !!! + if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression. + if FSE_isError(return), it's an error code. +*/ +FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog); + + +/*-***************************************** +* FSE detailed API +******************************************/ +/*! +FSE_compress() does the following: +1. count symbol occurrence from source[] into table count[] +2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog) +3. save normalized counters to memory buffer using writeNCount() +4. build encoding table 'CTable' from normalized counters +5. encode the data stream using encoding table 'CTable' + +FSE_decompress() does the following: +1. read normalized counters with readNCount() +2. build decoding table 'DTable' from normalized counters +3. decode the data stream using decoding table 'DTable' + +The following API allows targeting specific sub-functions for advanced tasks. +For example, it's possible to compress several blocks using the same 'CTable', +or to save and provide normalized distribution using external method. +*/ + +/* *** COMPRESSION *** */ + +/*! FSE_count(): + Provides the precise count of each byte within a table 'count'. + 'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1). + *maxSymbolValuePtr will be updated if detected smaller than initial value. + @return : the count of the most frequent symbol (which is not identified). + if return == srcSize, there is only one symbol. + Can also return an error code, which can be tested with FSE_isError(). */ +FSE_PUBLIC_API size_t FSE_count(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize); + +/*! FSE_optimalTableLog(): + dynamically downsize 'tableLog' when conditions are met. + It saves CPU time, by using smaller tables, while preserving or even improving compression ratio. + @return : recommended tableLog (necessarily <= 'maxTableLog') */ +FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue); + +/*! FSE_normalizeCount(): + normalize counts so that sum(count[]) == Power_of_2 (2^tableLog) + 'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1). + @return : tableLog, + or an errorCode, which can be tested using FSE_isError() */ +FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog, const unsigned* count, size_t srcSize, unsigned maxSymbolValue); + +/*! FSE_NCountWriteBound(): + Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'. + Typically useful for allocation purpose. */ +FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog); + +/*! FSE_writeNCount(): + Compactly save 'normalizedCounter' into 'buffer'. + @return : size of the compressed table, + or an errorCode, which can be tested using FSE_isError(). */ +FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); + + +/*! Constructor and Destructor of FSE_CTable. + Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */ +typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */ +FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned tableLog, unsigned maxSymbolValue); +FSE_PUBLIC_API void FSE_freeCTable (FSE_CTable* ct); + +/*! FSE_buildCTable(): + Builds `ct`, which must be already allocated, using FSE_createCTable(). + @return : 0, or an errorCode, which can be tested using FSE_isError() */ +FSE_PUBLIC_API size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); + +/*! FSE_compress_usingCTable(): + Compress `src` using `ct` into `dst` which must be already allocated. + @return : size of compressed data (<= `dstCapacity`), + or 0 if compressed data could not fit into `dst`, + or an errorCode, which can be tested using FSE_isError() */ +FSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct); + +/*! +Tutorial : +---------- +The first step is to count all symbols. FSE_count() does this job very fast. +Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells. +'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0] +maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value) +FSE_count() will return the number of occurrence of the most frequent symbol. +This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility. +If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()). + +The next step is to normalize the frequencies. +FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'. +It also guarantees a minimum of 1 to any Symbol with frequency >= 1. +You can use 'tableLog'==0 to mean "use default tableLog value". +If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(), +which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default"). + +The result of FSE_normalizeCount() will be saved into a table, +called 'normalizedCounter', which is a table of signed short. +'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells. +The return value is tableLog if everything proceeded as expected. +It is 0 if there is a single symbol within distribution. +If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()). + +'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount(). +'buffer' must be already allocated. +For guaranteed success, buffer size must be at least FSE_headerBound(). +The result of the function is the number of bytes written into 'buffer'. +If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small). + +'normalizedCounter' can then be used to create the compression table 'CTable'. +The space required by 'CTable' must be already allocated, using FSE_createCTable(). +You can then use FSE_buildCTable() to fill 'CTable'. +If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()). + +'CTable' can then be used to compress 'src', with FSE_compress_usingCTable(). +Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize' +The function returns the size of compressed data (without header), necessarily <= `dstCapacity`. +If it returns '0', compressed data could not fit into 'dst'. +If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()). +*/ + + +/* *** DECOMPRESSION *** */ + +/*! FSE_readNCount(): + Read compactly saved 'normalizedCounter' from 'rBuffer'. + @return : size read from 'rBuffer', + or an errorCode, which can be tested using FSE_isError(). + maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */ +FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize); + +/*! Constructor and Destructor of FSE_DTable. + Note that its size depends on 'tableLog' */ +typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */ +FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog); +FSE_PUBLIC_API void FSE_freeDTable(FSE_DTable* dt); + +/*! FSE_buildDTable(): + Builds 'dt', which must be already allocated, using FSE_createDTable(). + return : 0, or an errorCode, which can be tested using FSE_isError() */ +FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); + +/*! FSE_decompress_usingDTable(): + Decompress compressed source `cSrc` of size `cSrcSize` using `dt` + into `dst` which must be already allocated. + @return : size of regenerated data (necessarily <= `dstCapacity`), + or an errorCode, which can be tested using FSE_isError() */ +FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt); + +/*! +Tutorial : +---------- +(Note : these functions only decompress FSE-compressed blocks. + If block is uncompressed, use memcpy() instead + If block is a single repeated byte, use memset() instead ) + +The first step is to obtain the normalized frequencies of symbols. +This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount(). +'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short. +In practice, that means it's necessary to know 'maxSymbolValue' beforehand, +or size the table to handle worst case situations (typically 256). +FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'. +The result of FSE_readNCount() is the number of bytes read from 'rBuffer'. +Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that. +If there is an error, the function will return an error code, which can be tested using FSE_isError(). + +The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'. +This is performed by the function FSE_buildDTable(). +The space required by 'FSE_DTable' must be already allocated using FSE_createDTable(). +If there is an error, the function will return an error code, which can be tested using FSE_isError(). + +`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable(). +`cSrcSize` must be strictly correct, otherwise decompression will fail. +FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`). +If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small) +*/ + + +#ifdef FSE_STATIC_LINKING_ONLY + +/* *** Dependency *** */ +#include "bitstream.h" + + +/* ***************************************** +* Static allocation +*******************************************/ +/* FSE buffer bounds */ +#define FSE_NCOUNTBOUND 512 +#define FSE_BLOCKBOUND(size) (size + (size>>7)) +#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ + +/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */ +#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2)) +#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<maxTableLog)) + +/* or use the size to malloc() space directly. Pay attention to alignment restrictions though */ +#define FSE_CTABLE_SIZE(maxTableLog, maxSymbolValue) (FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(FSE_CTable)) +#define FSE_DTABLE_SIZE(maxTableLog) (FSE_DTABLE_SIZE_U32(maxTableLog) * sizeof(FSE_DTable)) + + +/* ***************************************** +* FSE advanced API +*******************************************/ +/* FSE_count_wksp() : + * Same as FSE_count(), but using an externally provided scratch buffer. + * `workSpace` size must be table of >= `1024` unsigned + */ +size_t FSE_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr, + const void* source, size_t sourceSize, unsigned* workSpace); + +/** FSE_countFast() : + * same as FSE_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr + */ +size_t FSE_countFast(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize); + +/* FSE_countFast_wksp() : + * Same as FSE_countFast(), but using an externally provided scratch buffer. + * `workSpace` must be a table of minimum `1024` unsigned + */ +size_t FSE_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* workSpace); + +/*! FSE_count_simple + * Same as FSE_countFast(), but does not use any additional memory (not even on stack). + * This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr` (presuming it's also the size of `count`). +*/ +size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize); + + + +unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus); +/**< same as FSE_optimalTableLog(), which used `minus==2` */ + +/* FSE_compress_wksp() : + * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`). + * FSE_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable. + */ +#define FSE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + ((maxTableLog > 12) ? (1 << (maxTableLog - 2)) : 1024) ) +size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); + +size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits); +/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */ + +size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue); +/**< build a fake FSE_CTable, designed to compress always the same symbolValue */ + +/* FSE_buildCTable_wksp() : + * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`). + * `wkspSize` must be >= `(1<<tableLog)`. + */ +size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); + +size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits); +/**< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */ + +size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue); +/**< build a fake FSE_DTable, designed to always generate the same symbolValue */ + +size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog); +/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DTABLE_SIZE_U32(maxLog)` */ + + +/* ***************************************** +* FSE symbol compression API +*******************************************/ +/*! + This API consists of small unitary functions, which highly benefit from being inlined. + Hence their body are included in next section. +*/ +typedef struct { + ptrdiff_t value; + const void* stateTable; + const void* symbolTT; + unsigned stateLog; +} FSE_CState_t; + +static void FSE_initCState(FSE_CState_t* CStatePtr, const FSE_CTable* ct); + +static void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* CStatePtr, unsigned symbol); + +static void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* CStatePtr); + +/**< +These functions are inner components of FSE_compress_usingCTable(). +They allow the creation of custom streams, mixing multiple tables and bit sources. + +A key property to keep in mind is that encoding and decoding are done **in reverse direction**. +So the first symbol you will encode is the last you will decode, like a LIFO stack. + +You will need a few variables to track your CStream. They are : + +FSE_CTable ct; // Provided by FSE_buildCTable() +BIT_CStream_t bitStream; // bitStream tracking structure +FSE_CState_t state; // State tracking structure (can have several) + + +The first thing to do is to init bitStream and state. + size_t errorCode = BIT_initCStream(&bitStream, dstBuffer, maxDstSize); + FSE_initCState(&state, ct); + +Note that BIT_initCStream() can produce an error code, so its result should be tested, using FSE_isError(); +You can then encode your input data, byte after byte. +FSE_encodeSymbol() outputs a maximum of 'tableLog' bits at a time. +Remember decoding will be done in reverse direction. + FSE_encodeByte(&bitStream, &state, symbol); + +At any time, you can also add any bit sequence. +Note : maximum allowed nbBits is 25, for compatibility with 32-bits decoders + BIT_addBits(&bitStream, bitField, nbBits); + +The above methods don't commit data to memory, they just store it into local register, for speed. +Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t). +Writing data to memory is a manual operation, performed by the flushBits function. + BIT_flushBits(&bitStream); + +Your last FSE encoding operation shall be to flush your last state value(s). + FSE_flushState(&bitStream, &state); + +Finally, you must close the bitStream. +The function returns the size of CStream in bytes. +If data couldn't fit into dstBuffer, it will return a 0 ( == not compressible) +If there is an error, it returns an errorCode (which can be tested using FSE_isError()). + size_t size = BIT_closeCStream(&bitStream); +*/ + + +/* ***************************************** +* FSE symbol decompression API +*******************************************/ +typedef struct { + size_t state; + const void* table; /* precise table may vary, depending on U16 */ +} FSE_DState_t; + + +static void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt); + +static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD); + +static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr); + +/**< +Let's now decompose FSE_decompress_usingDTable() into its unitary components. +You will decode FSE-encoded symbols from the bitStream, +and also any other bitFields you put in, **in reverse order**. + +You will need a few variables to track your bitStream. They are : + +BIT_DStream_t DStream; // Stream context +FSE_DState_t DState; // State context. Multiple ones are possible +FSE_DTable* DTablePtr; // Decoding table, provided by FSE_buildDTable() + +The first thing to do is to init the bitStream. + errorCode = BIT_initDStream(&DStream, srcBuffer, srcSize); + +You should then retrieve your initial state(s) +(in reverse flushing order if you have several ones) : + errorCode = FSE_initDState(&DState, &DStream, DTablePtr); + +You can then decode your data, symbol after symbol. +For information the maximum number of bits read by FSE_decodeSymbol() is 'tableLog'. +Keep in mind that symbols are decoded in reverse order, like a LIFO stack (last in, first out). + unsigned char symbol = FSE_decodeSymbol(&DState, &DStream); + +You can retrieve any bitfield you eventually stored into the bitStream (in reverse order) +Note : maximum allowed nbBits is 25, for 32-bits compatibility + size_t bitField = BIT_readBits(&DStream, nbBits); + +All above operations only read from local register (which size depends on size_t). +Refueling the register from memory is manually performed by the reload method. + endSignal = FSE_reloadDStream(&DStream); + +BIT_reloadDStream() result tells if there is still some more data to read from DStream. +BIT_DStream_unfinished : there is still some data left into the DStream. +BIT_DStream_endOfBuffer : Dstream reached end of buffer. Its container may no longer be completely filled. +BIT_DStream_completed : Dstream reached its exact end, corresponding in general to decompression completed. +BIT_DStream_tooFar : Dstream went too far. Decompression result is corrupted. + +When reaching end of buffer (BIT_DStream_endOfBuffer), progress slowly, notably if you decode multiple symbols per loop, +to properly detect the exact end of stream. +After each decoded symbol, check if DStream is fully consumed using this simple test : + BIT_reloadDStream(&DStream) >= BIT_DStream_completed + +When it's done, verify decompression is fully completed, by checking both DStream and the relevant states. +Checking if DStream has reached its end is performed by : + BIT_endOfDStream(&DStream); +Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible. + FSE_endOfDState(&DState); +*/ + + +/* ***************************************** +* FSE unsafe API +*******************************************/ +static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD); +/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */ + + +/* ***************************************** +* Implementation of inlined functions +*******************************************/ +typedef struct { + int deltaFindState; + U32 deltaNbBits; +} FSE_symbolCompressionTransform; /* total 8 bytes */ + +MEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct) +{ + const void* ptr = ct; + const U16* u16ptr = (const U16*) ptr; + const U32 tableLog = MEM_read16(ptr); + statePtr->value = (ptrdiff_t)1<<tableLog; + statePtr->stateTable = u16ptr+2; + statePtr->symbolTT = ((const U32*)ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1)); + statePtr->stateLog = tableLog; +} + + +/*! FSE_initCState2() : +* Same as FSE_initCState(), but the first symbol to include (which will be the last to be read) +* uses the smallest state value possible, saving the cost of this symbol */ +MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U32 symbol) +{ + FSE_initCState(statePtr, ct); + { const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol]; + const U16* stateTable = (const U16*)(statePtr->stateTable); + U32 nbBitsOut = (U32)((symbolTT.deltaNbBits + (1<<15)) >> 16); + statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits; + statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState]; + } +} + +MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, U32 symbol) +{ + FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol]; + const U16* const stateTable = (const U16*)(statePtr->stateTable); + U32 const nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16); + BIT_addBits(bitC, statePtr->value, nbBitsOut); + statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState]; +} + +MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr) +{ + BIT_addBits(bitC, statePtr->value, statePtr->stateLog); + BIT_flushBits(bitC); +} + + +/* ====== Decompression ====== */ + +typedef struct { + U16 tableLog; + U16 fastMode; +} FSE_DTableHeader; /* sizeof U32 */ + +typedef struct +{ + unsigned short newState; + unsigned char symbol; + unsigned char nbBits; +} FSE_decode_t; /* size == U32 */ + +MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt) +{ + const void* ptr = dt; + const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr; + DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog); + BIT_reloadDStream(bitD); + DStatePtr->table = dt + 1; +} + +MEM_STATIC BYTE FSE_peekSymbol(const FSE_DState_t* DStatePtr) +{ + FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; + return DInfo.symbol; +} + +MEM_STATIC void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) +{ + FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; + U32 const nbBits = DInfo.nbBits; + size_t const lowBits = BIT_readBits(bitD, nbBits); + DStatePtr->state = DInfo.newState + lowBits; +} + +MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) +{ + FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; + U32 const nbBits = DInfo.nbBits; + BYTE const symbol = DInfo.symbol; + size_t const lowBits = BIT_readBits(bitD, nbBits); + + DStatePtr->state = DInfo.newState + lowBits; + return symbol; +} + +/*! FSE_decodeSymbolFast() : + unsafe, only works if no symbol has a probability > 50% */ +MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) +{ + FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; + U32 const nbBits = DInfo.nbBits; + BYTE const symbol = DInfo.symbol; + size_t const lowBits = BIT_readBitsFast(bitD, nbBits); + + DStatePtr->state = DInfo.newState + lowBits; + return symbol; +} + +MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) +{ + return DStatePtr->state == 0; +} + + + +#ifndef FSE_COMMONDEFS_ONLY + +/* ************************************************************** +* Tuning parameters +****************************************************************/ +/*!MEMORY_USAGE : +* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) +* Increasing memory usage improves compression ratio +* Reduced memory usage can improve speed, due to cache effect +* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ +#ifndef FSE_MAX_MEMORY_USAGE +# define FSE_MAX_MEMORY_USAGE 14 +#endif +#ifndef FSE_DEFAULT_MEMORY_USAGE +# define FSE_DEFAULT_MEMORY_USAGE 13 +#endif + +/*!FSE_MAX_SYMBOL_VALUE : +* Maximum symbol value authorized. +* Required for proper stack allocation */ +#ifndef FSE_MAX_SYMBOL_VALUE +# define FSE_MAX_SYMBOL_VALUE 255 +#endif + +/* ************************************************************** +* template functions type & suffix +****************************************************************/ +#define FSE_FUNCTION_TYPE BYTE +#define FSE_FUNCTION_EXTENSION +#define FSE_DECODE_TYPE FSE_decode_t + + +#endif /* !FSE_COMMONDEFS_ONLY */ + + +/* *************************************************************** +* Constants +*****************************************************************/ +#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2) +#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG) +#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1) +#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2) +#define FSE_MIN_TABLELOG 5 + +#define FSE_TABLELOG_ABSOLUTE_MAX 15 +#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX +# error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported" +#endif + +#define FSE_TABLESTEP(tableSize) ((tableSize>>1) + (tableSize>>3) + 3) + + +#endif /* FSE_STATIC_LINKING_ONLY */ + + +#if defined (__cplusplus) +} +#endif + +#endif /* FSE_H */ diff --git a/thirdparty/zstd/common/fse_decompress.c b/thirdparty/zstd/common/fse_decompress.c new file mode 100644 index 0000000000..8474a4c079 --- /dev/null +++ b/thirdparty/zstd/common/fse_decompress.c @@ -0,0 +1,328 @@ +/* ****************************************************************** + FSE : Finite State Entropy decoder + Copyright (C) 2013-2015, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy + - Public forum : https://groups.google.com/forum/#!forum/lz4c +****************************************************************** */ + + +/* ************************************************************** +* Compiler specifics +****************************************************************/ +#ifdef _MSC_VER /* Visual Studio */ +# define FORCE_INLINE static __forceinline +# include <intrin.h> /* For Visual 2005 */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */ +#else +# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ +# ifdef __GNUC__ +# define FORCE_INLINE static inline __attribute__((always_inline)) +# else +# define FORCE_INLINE static inline +# endif +# else +# define FORCE_INLINE static +# endif /* __STDC_VERSION__ */ +#endif + + +/* ************************************************************** +* Includes +****************************************************************/ +#include <stdlib.h> /* malloc, free, qsort */ +#include <string.h> /* memcpy, memset */ +#include "bitstream.h" +#define FSE_STATIC_LINKING_ONLY +#include "fse.h" + + +/* ************************************************************** +* Error Management +****************************************************************/ +#define FSE_isError ERR_isError +#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ + +/* check and forward error code */ +#define CHECK_F(f) { size_t const e = f; if (FSE_isError(e)) return e; } + + +/* ************************************************************** +* Templates +****************************************************************/ +/* + designed to be included + for type-specific functions (template emulation in C) + Objective is to write these functions only once, for improved maintenance +*/ + +/* safety checks */ +#ifndef FSE_FUNCTION_EXTENSION +# error "FSE_FUNCTION_EXTENSION must be defined" +#endif +#ifndef FSE_FUNCTION_TYPE +# error "FSE_FUNCTION_TYPE must be defined" +#endif + +/* Function names */ +#define FSE_CAT(X,Y) X##Y +#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) +#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) + + +/* Function templates */ +FSE_DTable* FSE_createDTable (unsigned tableLog) +{ + if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX; + return (FSE_DTable*)malloc( FSE_DTABLE_SIZE_U32(tableLog) * sizeof (U32) ); +} + +void FSE_freeDTable (FSE_DTable* dt) +{ + free(dt); +} + +size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) +{ + void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */ + FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr); + U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1]; + + U32 const maxSV1 = maxSymbolValue + 1; + U32 const tableSize = 1 << tableLog; + U32 highThreshold = tableSize-1; + + /* Sanity Checks */ + if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge); + if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); + + /* Init, lay down lowprob symbols */ + { FSE_DTableHeader DTableH; + DTableH.tableLog = (U16)tableLog; + DTableH.fastMode = 1; + { S16 const largeLimit= (S16)(1 << (tableLog-1)); + U32 s; + for (s=0; s<maxSV1; s++) { + if (normalizedCounter[s]==-1) { + tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s; + symbolNext[s] = 1; + } else { + if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0; + symbolNext[s] = normalizedCounter[s]; + } } } + memcpy(dt, &DTableH, sizeof(DTableH)); + } + + /* Spread symbols */ + { U32 const tableMask = tableSize-1; + U32 const step = FSE_TABLESTEP(tableSize); + U32 s, position = 0; + for (s=0; s<maxSV1; s++) { + int i; + for (i=0; i<normalizedCounter[s]; i++) { + tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s; + position = (position + step) & tableMask; + while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */ + } } + if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */ + } + + /* Build Decoding table */ + { U32 u; + for (u=0; u<tableSize; u++) { + FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol); + U16 nextState = symbolNext[symbol]++; + tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32 ((U32)nextState) ); + tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize); + } } + + return 0; +} + + +#ifndef FSE_COMMONDEFS_ONLY + +/*-******************************************************* +* Decompression (Byte symbols) +*********************************************************/ +size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue) +{ + void* ptr = dt; + FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; + void* dPtr = dt + 1; + FSE_decode_t* const cell = (FSE_decode_t*)dPtr; + + DTableH->tableLog = 0; + DTableH->fastMode = 0; + + cell->newState = 0; + cell->symbol = symbolValue; + cell->nbBits = 0; + + return 0; +} + + +size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits) +{ + void* ptr = dt; + FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; + void* dPtr = dt + 1; + FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr; + const unsigned tableSize = 1 << nbBits; + const unsigned tableMask = tableSize - 1; + const unsigned maxSV1 = tableMask+1; + unsigned s; + + /* Sanity checks */ + if (nbBits < 1) return ERROR(GENERIC); /* min size */ + + /* Build Decoding Table */ + DTableH->tableLog = (U16)nbBits; + DTableH->fastMode = 1; + for (s=0; s<maxSV1; s++) { + dinfo[s].newState = 0; + dinfo[s].symbol = (BYTE)s; + dinfo[s].nbBits = (BYTE)nbBits; + } + + return 0; +} + +FORCE_INLINE size_t FSE_decompress_usingDTable_generic( + void* dst, size_t maxDstSize, + const void* cSrc, size_t cSrcSize, + const FSE_DTable* dt, const unsigned fast) +{ + BYTE* const ostart = (BYTE*) dst; + BYTE* op = ostart; + BYTE* const omax = op + maxDstSize; + BYTE* const olimit = omax-3; + + BIT_DStream_t bitD; + FSE_DState_t state1; + FSE_DState_t state2; + + /* Init */ + CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize)); + + FSE_initDState(&state1, &bitD, dt); + FSE_initDState(&state2, &bitD, dt); + +#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD) + + /* 4 symbols per loop */ + for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) & (op<olimit) ; op+=4) { + op[0] = FSE_GETSYMBOL(&state1); + + if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ + BIT_reloadDStream(&bitD); + + op[1] = FSE_GETSYMBOL(&state2); + + if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ + { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } } + + op[2] = FSE_GETSYMBOL(&state1); + + if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ + BIT_reloadDStream(&bitD); + + op[3] = FSE_GETSYMBOL(&state2); + } + + /* tail */ + /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */ + while (1) { + if (op>(omax-2)) return ERROR(dstSize_tooSmall); + *op++ = FSE_GETSYMBOL(&state1); + if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) { + *op++ = FSE_GETSYMBOL(&state2); + break; + } + + if (op>(omax-2)) return ERROR(dstSize_tooSmall); + *op++ = FSE_GETSYMBOL(&state2); + if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) { + *op++ = FSE_GETSYMBOL(&state1); + break; + } } + + return op-ostart; +} + + +size_t FSE_decompress_usingDTable(void* dst, size_t originalSize, + const void* cSrc, size_t cSrcSize, + const FSE_DTable* dt) +{ + const void* ptr = dt; + const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr; + const U32 fastMode = DTableH->fastMode; + + /* select fast mode (static) */ + if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1); + return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0); +} + + +size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog) +{ + const BYTE* const istart = (const BYTE*)cSrc; + const BYTE* ip = istart; + short counting[FSE_MAX_SYMBOL_VALUE+1]; + unsigned tableLog; + unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE; + + /* normal FSE decoding mode */ + size_t const NCountLength = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize); + if (FSE_isError(NCountLength)) return NCountLength; + //if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size; supposed to be already checked in NCountLength, only remaining case : NCountLength==cSrcSize */ + if (tableLog > maxLog) return ERROR(tableLog_tooLarge); + ip += NCountLength; + cSrcSize -= NCountLength; + + CHECK_F( FSE_buildDTable (workSpace, counting, maxSymbolValue, tableLog) ); + + return FSE_decompress_usingDTable (dst, dstCapacity, ip, cSrcSize, workSpace); /* always return, even if it is an error code */ +} + + +typedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)]; + +size_t FSE_decompress(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize) +{ + DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */ + return FSE_decompress_wksp(dst, dstCapacity, cSrc, cSrcSize, dt, FSE_MAX_TABLELOG); +} + + + +#endif /* FSE_COMMONDEFS_ONLY */ diff --git a/thirdparty/zstd/common/huf.h b/thirdparty/zstd/common/huf.h new file mode 100644 index 0000000000..7873ca3d42 --- /dev/null +++ b/thirdparty/zstd/common/huf.h @@ -0,0 +1,283 @@ +/* ****************************************************************** + Huffman coder, part of New Generation Entropy library + header file + Copyright (C) 2013-2016, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - Source repository : https://github.com/Cyan4973/FiniteStateEntropy +****************************************************************** */ +#ifndef HUF_H_298734234 +#define HUF_H_298734234 + +#if defined (__cplusplus) +extern "C" { +#endif + + +/* *** Dependencies *** */ +#include <stddef.h> /* size_t */ + + +/* *** library symbols visibility *** */ +/* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual, + * HUF symbols remain "private" (internal symbols for library only). + * Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */ +#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4) +# define HUF_PUBLIC_API __attribute__ ((visibility ("default"))) +#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */ +# define HUF_PUBLIC_API __declspec(dllexport) +#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1) +# define HUF_PUBLIC_API __declspec(dllimport) /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */ +#else +# define HUF_PUBLIC_API +#endif + + +/* *** simple functions *** */ +/** +HUF_compress() : + Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'. + 'dst' buffer must be already allocated. + Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize). + `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB. + @return : size of compressed data (<= `dstCapacity`). + Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!! + if return == 1, srcData is a single repeated byte symbol (RLE compression). + if HUF_isError(return), compression failed (more details using HUF_getErrorName()) +*/ +HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity, + const void* src, size_t srcSize); + +/** +HUF_decompress() : + Decompress HUF data from buffer 'cSrc', of size 'cSrcSize', + into already allocated buffer 'dst', of minimum size 'dstSize'. + `originalSize` : **must** be the ***exact*** size of original (uncompressed) data. + Note : in contrast with FSE, HUF_decompress can regenerate + RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data, + because it knows size to regenerate. + @return : size of regenerated data (== originalSize), + or an error code, which can be tested using HUF_isError() +*/ +HUF_PUBLIC_API size_t HUF_decompress(void* dst, size_t originalSize, + const void* cSrc, size_t cSrcSize); + + +/* *** Tool functions *** */ +#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */ +HUF_PUBLIC_API size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */ + +/* Error Management */ +HUF_PUBLIC_API unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */ +HUF_PUBLIC_API const char* HUF_getErrorName(size_t code); /**< provides error code string (useful for debugging) */ + + +/* *** Advanced function *** */ + +/** HUF_compress2() : + * Same as HUF_compress(), but offers direct control over `maxSymbolValue` and `tableLog`. + * `tableLog` must be `<= HUF_TABLELOG_MAX` . */ +HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog); + +/** HUF_compress4X_wksp() : + * Same as HUF_compress2(), but uses externally allocated `workSpace`. + * `workspace` must have minimum alignment of 4, and be at least as large as following macro */ +#define HUF_WORKSPACE_SIZE (6 << 10) +#define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32)) +HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); + + + +/* ****************************************************************** + * WARNING !! + * The following section contains advanced and experimental definitions + * which shall never be used in the context of dll + * because they are not guaranteed to remain stable in the future. + * Only consider them in association with static linking. + *******************************************************************/ +#ifdef HUF_STATIC_LINKING_ONLY + +/* *** Dependencies *** */ +#include "mem.h" /* U32 */ + + +/* *** Constants *** */ +#define HUF_TABLELOG_MAX 12 /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */ +#define HUF_TABLELOG_DEFAULT 11 /* tableLog by default, when not specified */ +#define HUF_SYMBOLVALUE_MAX 255 + +#define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ +#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX) +# error "HUF_TABLELOG_MAX is too large !" +#endif + + +/* **************************************** +* Static allocation +******************************************/ +/* HUF buffer bounds */ +#define HUF_CTABLEBOUND 129 +#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true when incompressible is pre-filtered with fast heuristic */ +#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ + +/* static allocation of HUF's Compression Table */ +#define HUF_CTABLE_SIZE_U32(maxSymbolValue) ((maxSymbolValue)+1) /* Use tables of U32, for proper alignment */ +#define HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_U32(maxSymbolValue) * sizeof(U32)) +#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \ + U32 name##hb[HUF_CTABLE_SIZE_U32(maxSymbolValue)]; \ + void* name##hv = &(name##hb); \ + HUF_CElt* name = (HUF_CElt*)(name##hv) /* no final ; */ + +/* static allocation of HUF's DTable */ +typedef U32 HUF_DTable; +#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<(maxTableLog))) +#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \ + HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) } +#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \ + HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) } + + +/* **************************************** +* Advanced decompression functions +******************************************/ +size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ +size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ + +size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< decodes RLE and uncompressed */ +size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */ +size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ +size_t HUF_decompress4X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ + + +/* **************************************** +* HUF detailed API +******************************************/ +/*! +HUF_compress() does the following: +1. count symbol occurrence from source[] into table count[] using FSE_count() +2. (optional) refine tableLog using HUF_optimalTableLog() +3. build Huffman table from count using HUF_buildCTable() +4. save Huffman table to memory buffer using HUF_writeCTable() +5. encode the data stream using HUF_compress4X_usingCTable() + +The following API allows targeting specific sub-functions for advanced tasks. +For example, it's possible to compress several blocks using the same 'CTable', +or to save and regenerate 'CTable' using external methods. +*/ +/* FSE_count() : find it within "fse.h" */ +unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue); +typedef struct HUF_CElt_s HUF_CElt; /* incomplete type */ +size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits); +size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog); +size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable); + +typedef enum { + HUF_repeat_none, /**< Cannot use the previous table */ + HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */ + HUF_repeat_valid /**< Can use the previous table and it is asumed to be valid */ + } HUF_repeat; +/** HUF_compress4X_repeat() : +* Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. +* If it uses hufTable it does not modify hufTable or repeat. +* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. +* If preferRepeat then the old table will always be used if valid. */ +size_t HUF_compress4X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */ + +/** HUF_buildCTable_wksp() : + * Same as HUF_buildCTable(), but using externally allocated scratch buffer. + * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned. + */ +size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize); + +/*! HUF_readStats() : + Read compact Huffman tree, saved by HUF_writeCTable(). + `huffWeight` is destination buffer. + @return : size read from `src` , or an error Code . + Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */ +size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, + U32* nbSymbolsPtr, U32* tableLogPtr, + const void* src, size_t srcSize); + +/** HUF_readCTable() : +* Loading a CTable saved with HUF_writeCTable() */ +size_t HUF_readCTable (HUF_CElt* CTable, unsigned maxSymbolValue, const void* src, size_t srcSize); + + +/* +HUF_decompress() does the following: +1. select the decompression algorithm (X2, X4) based on pre-computed heuristics +2. build Huffman table from save, using HUF_readDTableXn() +3. decode 1 or 4 segments in parallel using HUF_decompressSXn_usingDTable +*/ + +/** HUF_selectDecoder() : +* Tells which decoder is likely to decode faster, +* based on a set of pre-determined metrics. +* @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 . +* Assumption : 0 < cSrcSize < dstSize <= 128 KB */ +U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize); + +size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize); +size_t HUF_readDTableX4 (HUF_DTable* DTable, const void* src, size_t srcSize); + +size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); +size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); +size_t HUF_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); + + +/* single stream variants */ + +size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog); +size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */ +size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable); +/** HUF_compress1X_repeat() : +* Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. +* If it uses hufTable it does not modify hufTable or repeat. +* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. +* If preferRepeat then the old table will always be used if valid. */ +size_t HUF_compress1X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */ + +size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */ +size_t HUF_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */ + +size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); +size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ +size_t HUF_decompress1X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ + +size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */ +size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); +size_t HUF_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); + +#endif /* HUF_STATIC_LINKING_ONLY */ + + +#if defined (__cplusplus) +} +#endif + +#endif /* HUF_H_298734234 */ diff --git a/thirdparty/zstd/common/mem.h b/thirdparty/zstd/common/mem.h new file mode 100644 index 0000000000..4773a8b930 --- /dev/null +++ b/thirdparty/zstd/common/mem.h @@ -0,0 +1,373 @@ +/** + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +#ifndef MEM_H_MODULE +#define MEM_H_MODULE + +#if defined (__cplusplus) +extern "C" { +#endif + +/*-**************************************** +* Dependencies +******************************************/ +#include <stddef.h> /* size_t, ptrdiff_t */ +#include <string.h> /* memcpy */ + + +/*-**************************************** +* Compiler specifics +******************************************/ +#if defined(_MSC_VER) /* Visual Studio */ +# include <stdlib.h> /* _byteswap_ulong */ +# include <intrin.h> /* _byteswap_* */ +#endif +#if defined(__GNUC__) +# define MEM_STATIC static __inline __attribute__((unused)) +#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# define MEM_STATIC static inline +#elif defined(_MSC_VER) +# define MEM_STATIC static __inline +#else +# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ +#endif + +/* code only tested on 32 and 64 bits systems */ +#define MEM_STATIC_ASSERT(c) { enum { MEM_static_assert = 1/(int)(!!(c)) }; } +MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); } + + +/*-************************************************************** +* Basic Types +*****************************************************************/ +#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include <stdint.h> + typedef uint8_t BYTE; + typedef uint16_t U16; + typedef int16_t S16; + typedef uint32_t U32; + typedef int32_t S32; + typedef uint64_t U64; + typedef int64_t S64; + typedef intptr_t iPtrDiff; + typedef uintptr_t uPtrDiff; +#else + typedef unsigned char BYTE; + typedef unsigned short U16; + typedef signed short S16; + typedef unsigned int U32; + typedef signed int S32; + typedef unsigned long long U64; + typedef signed long long S64; + typedef ptrdiff_t iPtrDiff; + typedef size_t uPtrDiff; +#endif + + +/*-************************************************************** +* Memory I/O +*****************************************************************/ +/* MEM_FORCE_MEMORY_ACCESS : + * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. + * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. + * The below switch allow to select different access method for improved performance. + * Method 0 (default) : use `memcpy()`. Safe and portable. + * Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable). + * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. + * Method 2 : direct access. This method is portable but violate C standard. + * It can generate buggy code on targets depending on alignment. + * In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6) + * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. + * Prefer these methods in priority order (0 > 1 > 2) + */ +#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ +# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) +# define MEM_FORCE_MEMORY_ACCESS 2 +# elif defined(__INTEL_COMPILER) || defined(__GNUC__) +# define MEM_FORCE_MEMORY_ACCESS 1 +# endif +#endif + +MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; } +MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; } + +MEM_STATIC unsigned MEM_isLittleEndian(void) +{ + const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ + return one.c[0]; +} + +#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) + +/* violates C standard, by lying on structure alignment. +Only use if no other choice to achieve best performance on target platform */ +MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } +MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } +MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } +MEM_STATIC U64 MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; } + +MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } +MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; } +MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; } + +#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) + +/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ +/* currently only defined for gcc and icc */ +#if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32)) + __pragma( pack(push, 1) ) + typedef union { U16 u16; U32 u32; U64 u64; size_t st; } unalign; + __pragma( pack(pop) ) +#else + typedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign; +#endif + +MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } +MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } +MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } +MEM_STATIC U64 MEM_readST(const void* ptr) { return ((const unalign*)ptr)->st; } + +MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } +MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; } +MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign*)memPtr)->u64 = value; } + +#else + +/* default method, safe and standard. + can sometimes prove slower */ + +MEM_STATIC U16 MEM_read16(const void* memPtr) +{ + U16 val; memcpy(&val, memPtr, sizeof(val)); return val; +} + +MEM_STATIC U32 MEM_read32(const void* memPtr) +{ + U32 val; memcpy(&val, memPtr, sizeof(val)); return val; +} + +MEM_STATIC U64 MEM_read64(const void* memPtr) +{ + U64 val; memcpy(&val, memPtr, sizeof(val)); return val; +} + +MEM_STATIC size_t MEM_readST(const void* memPtr) +{ + size_t val; memcpy(&val, memPtr, sizeof(val)); return val; +} + +MEM_STATIC void MEM_write16(void* memPtr, U16 value) +{ + memcpy(memPtr, &value, sizeof(value)); +} + +MEM_STATIC void MEM_write32(void* memPtr, U32 value) +{ + memcpy(memPtr, &value, sizeof(value)); +} + +MEM_STATIC void MEM_write64(void* memPtr, U64 value) +{ + memcpy(memPtr, &value, sizeof(value)); +} + +#endif /* MEM_FORCE_MEMORY_ACCESS */ + +MEM_STATIC U32 MEM_swap32(U32 in) +{ +#if defined(_MSC_VER) /* Visual Studio */ + return _byteswap_ulong(in); +#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403) + return __builtin_bswap32(in); +#else + return ((in << 24) & 0xff000000 ) | + ((in << 8) & 0x00ff0000 ) | + ((in >> 8) & 0x0000ff00 ) | + ((in >> 24) & 0x000000ff ); +#endif +} + +MEM_STATIC U64 MEM_swap64(U64 in) +{ +#if defined(_MSC_VER) /* Visual Studio */ + return _byteswap_uint64(in); +#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403) + return __builtin_bswap64(in); +#else + return ((in << 56) & 0xff00000000000000ULL) | + ((in << 40) & 0x00ff000000000000ULL) | + ((in << 24) & 0x0000ff0000000000ULL) | + ((in << 8) & 0x000000ff00000000ULL) | + ((in >> 8) & 0x00000000ff000000ULL) | + ((in >> 24) & 0x0000000000ff0000ULL) | + ((in >> 40) & 0x000000000000ff00ULL) | + ((in >> 56) & 0x00000000000000ffULL); +#endif +} + +MEM_STATIC size_t MEM_swapST(size_t in) +{ + if (MEM_32bits()) + return (size_t)MEM_swap32((U32)in); + else + return (size_t)MEM_swap64((U64)in); +} + +/*=== Little endian r/w ===*/ + +MEM_STATIC U16 MEM_readLE16(const void* memPtr) +{ + if (MEM_isLittleEndian()) + return MEM_read16(memPtr); + else { + const BYTE* p = (const BYTE*)memPtr; + return (U16)(p[0] + (p[1]<<8)); + } +} + +MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val) +{ + if (MEM_isLittleEndian()) { + MEM_write16(memPtr, val); + } else { + BYTE* p = (BYTE*)memPtr; + p[0] = (BYTE)val; + p[1] = (BYTE)(val>>8); + } +} + +MEM_STATIC U32 MEM_readLE24(const void* memPtr) +{ + return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16); +} + +MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val) +{ + MEM_writeLE16(memPtr, (U16)val); + ((BYTE*)memPtr)[2] = (BYTE)(val>>16); +} + +MEM_STATIC U32 MEM_readLE32(const void* memPtr) +{ + if (MEM_isLittleEndian()) + return MEM_read32(memPtr); + else + return MEM_swap32(MEM_read32(memPtr)); +} + +MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32) +{ + if (MEM_isLittleEndian()) + MEM_write32(memPtr, val32); + else + MEM_write32(memPtr, MEM_swap32(val32)); +} + +MEM_STATIC U64 MEM_readLE64(const void* memPtr) +{ + if (MEM_isLittleEndian()) + return MEM_read64(memPtr); + else + return MEM_swap64(MEM_read64(memPtr)); +} + +MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64) +{ + if (MEM_isLittleEndian()) + MEM_write64(memPtr, val64); + else + MEM_write64(memPtr, MEM_swap64(val64)); +} + +MEM_STATIC size_t MEM_readLEST(const void* memPtr) +{ + if (MEM_32bits()) + return (size_t)MEM_readLE32(memPtr); + else + return (size_t)MEM_readLE64(memPtr); +} + +MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val) +{ + if (MEM_32bits()) + MEM_writeLE32(memPtr, (U32)val); + else + MEM_writeLE64(memPtr, (U64)val); +} + +/*=== Big endian r/w ===*/ + +MEM_STATIC U32 MEM_readBE32(const void* memPtr) +{ + if (MEM_isLittleEndian()) + return MEM_swap32(MEM_read32(memPtr)); + else + return MEM_read32(memPtr); +} + +MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32) +{ + if (MEM_isLittleEndian()) + MEM_write32(memPtr, MEM_swap32(val32)); + else + MEM_write32(memPtr, val32); +} + +MEM_STATIC U64 MEM_readBE64(const void* memPtr) +{ + if (MEM_isLittleEndian()) + return MEM_swap64(MEM_read64(memPtr)); + else + return MEM_read64(memPtr); +} + +MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64) +{ + if (MEM_isLittleEndian()) + MEM_write64(memPtr, MEM_swap64(val64)); + else + MEM_write64(memPtr, val64); +} + +MEM_STATIC size_t MEM_readBEST(const void* memPtr) +{ + if (MEM_32bits()) + return (size_t)MEM_readBE32(memPtr); + else + return (size_t)MEM_readBE64(memPtr); +} + +MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val) +{ + if (MEM_32bits()) + MEM_writeBE32(memPtr, (U32)val); + else + MEM_writeBE64(memPtr, (U64)val); +} + + +/* function safe only for comparisons */ +MEM_STATIC U32 MEM_readMINMATCH(const void* memPtr, U32 length) +{ + switch (length) + { + default : + case 4 : return MEM_read32(memPtr); + case 3 : if (MEM_isLittleEndian()) + return MEM_read32(memPtr)<<8; + else + return MEM_read32(memPtr)>>8; + } +} + +#if defined (__cplusplus) +} +#endif + +#endif /* MEM_H_MODULE */ diff --git a/thirdparty/zstd/common/pool.c b/thirdparty/zstd/common/pool.c new file mode 100644 index 0000000000..e439fe1b0d --- /dev/null +++ b/thirdparty/zstd/common/pool.c @@ -0,0 +1,194 @@ +/** + * Copyright (c) 2016-present, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + + +/* ====== Dependencies ======= */ +#include <stddef.h> /* size_t */ +#include <stdlib.h> /* malloc, calloc, free */ +#include "pool.h" + +/* ====== Compiler specifics ====== */ +#if defined(_MSC_VER) +# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */ +#endif + + +#ifdef ZSTD_MULTITHREAD + +#include "threading.h" /* pthread adaptation */ + +/* A job is a function and an opaque argument */ +typedef struct POOL_job_s { + POOL_function function; + void *opaque; +} POOL_job; + +struct POOL_ctx_s { + /* Keep track of the threads */ + pthread_t *threads; + size_t numThreads; + + /* The queue is a circular buffer */ + POOL_job *queue; + size_t queueHead; + size_t queueTail; + size_t queueSize; + /* The mutex protects the queue */ + pthread_mutex_t queueMutex; + /* Condition variable for pushers to wait on when the queue is full */ + pthread_cond_t queuePushCond; + /* Condition variables for poppers to wait on when the queue is empty */ + pthread_cond_t queuePopCond; + /* Indicates if the queue is shutting down */ + int shutdown; +}; + +/* POOL_thread() : + Work thread for the thread pool. + Waits for jobs and executes them. + @returns : NULL on failure else non-null. +*/ +static void* POOL_thread(void* opaque) { + POOL_ctx* const ctx = (POOL_ctx*)opaque; + if (!ctx) { return NULL; } + for (;;) { + /* Lock the mutex and wait for a non-empty queue or until shutdown */ + pthread_mutex_lock(&ctx->queueMutex); + while (ctx->queueHead == ctx->queueTail && !ctx->shutdown) { + pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex); + } + /* empty => shutting down: so stop */ + if (ctx->queueHead == ctx->queueTail) { + pthread_mutex_unlock(&ctx->queueMutex); + return opaque; + } + /* Pop a job off the queue */ + { POOL_job const job = ctx->queue[ctx->queueHead]; + ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize; + /* Unlock the mutex, signal a pusher, and run the job */ + pthread_mutex_unlock(&ctx->queueMutex); + pthread_cond_signal(&ctx->queuePushCond); + job.function(job.opaque); + } + } + /* Unreachable */ +} + +POOL_ctx *POOL_create(size_t numThreads, size_t queueSize) { + POOL_ctx *ctx; + /* Check the parameters */ + if (!numThreads || !queueSize) { return NULL; } + /* Allocate the context and zero initialize */ + ctx = (POOL_ctx *)calloc(1, sizeof(POOL_ctx)); + if (!ctx) { return NULL; } + /* Initialize the job queue. + * It needs one extra space since one space is wasted to differentiate empty + * and full queues. + */ + ctx->queueSize = queueSize + 1; + ctx->queue = (POOL_job *)malloc(ctx->queueSize * sizeof(POOL_job)); + ctx->queueHead = 0; + ctx->queueTail = 0; + pthread_mutex_init(&ctx->queueMutex, NULL); + pthread_cond_init(&ctx->queuePushCond, NULL); + pthread_cond_init(&ctx->queuePopCond, NULL); + ctx->shutdown = 0; + /* Allocate space for the thread handles */ + ctx->threads = (pthread_t *)malloc(numThreads * sizeof(pthread_t)); + ctx->numThreads = 0; + /* Check for errors */ + if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; } + /* Initialize the threads */ + { size_t i; + for (i = 0; i < numThreads; ++i) { + if (pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) { + ctx->numThreads = i; + POOL_free(ctx); + return NULL; + } } + ctx->numThreads = numThreads; + } + return ctx; +} + +/*! POOL_join() : + Shutdown the queue, wake any sleeping threads, and join all of the threads. +*/ +static void POOL_join(POOL_ctx *ctx) { + /* Shut down the queue */ + pthread_mutex_lock(&ctx->queueMutex); + ctx->shutdown = 1; + pthread_mutex_unlock(&ctx->queueMutex); + /* Wake up sleeping threads */ + pthread_cond_broadcast(&ctx->queuePushCond); + pthread_cond_broadcast(&ctx->queuePopCond); + /* Join all of the threads */ + { size_t i; + for (i = 0; i < ctx->numThreads; ++i) { + pthread_join(ctx->threads[i], NULL); + } } +} + +void POOL_free(POOL_ctx *ctx) { + if (!ctx) { return; } + POOL_join(ctx); + pthread_mutex_destroy(&ctx->queueMutex); + pthread_cond_destroy(&ctx->queuePushCond); + pthread_cond_destroy(&ctx->queuePopCond); + if (ctx->queue) free(ctx->queue); + if (ctx->threads) free(ctx->threads); + free(ctx); +} + +void POOL_add(void *ctxVoid, POOL_function function, void *opaque) { + POOL_ctx *ctx = (POOL_ctx *)ctxVoid; + if (!ctx) { return; } + + pthread_mutex_lock(&ctx->queueMutex); + { POOL_job const job = {function, opaque}; + /* Wait until there is space in the queue for the new job */ + size_t newTail = (ctx->queueTail + 1) % ctx->queueSize; + while (ctx->queueHead == newTail && !ctx->shutdown) { + pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex); + newTail = (ctx->queueTail + 1) % ctx->queueSize; + } + /* The queue is still going => there is space */ + if (!ctx->shutdown) { + ctx->queue[ctx->queueTail] = job; + ctx->queueTail = newTail; + } + } + pthread_mutex_unlock(&ctx->queueMutex); + pthread_cond_signal(&ctx->queuePopCond); +} + +#else /* ZSTD_MULTITHREAD not defined */ +/* No multi-threading support */ + +/* We don't need any data, but if it is empty malloc() might return NULL. */ +struct POOL_ctx_s { + int data; +}; + +POOL_ctx *POOL_create(size_t numThreads, size_t queueSize) { + (void)numThreads; + (void)queueSize; + return (POOL_ctx *)malloc(sizeof(POOL_ctx)); +} + +void POOL_free(POOL_ctx *ctx) { + if (ctx) free(ctx); +} + +void POOL_add(void *ctx, POOL_function function, void *opaque) { + (void)ctx; + function(opaque); +} + +#endif /* ZSTD_MULTITHREAD */ diff --git a/thirdparty/zstd/common/pool.h b/thirdparty/zstd/common/pool.h new file mode 100644 index 0000000000..50cb25b12c --- /dev/null +++ b/thirdparty/zstd/common/pool.h @@ -0,0 +1,56 @@ +/** + * Copyright (c) 2016-present, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ +#ifndef POOL_H +#define POOL_H + +#if defined (__cplusplus) +extern "C" { +#endif + + +#include <stddef.h> /* size_t */ + +typedef struct POOL_ctx_s POOL_ctx; + +/*! POOL_create() : + Create a thread pool with at most `numThreads` threads. + `numThreads` must be at least 1. + The maximum number of queued jobs before blocking is `queueSize`. + `queueSize` must be at least 1. + @return : The POOL_ctx pointer on success else NULL. +*/ +POOL_ctx *POOL_create(size_t numThreads, size_t queueSize); + +/*! POOL_free() : + Free a thread pool returned by POOL_create(). +*/ +void POOL_free(POOL_ctx *ctx); + +/*! POOL_function : + The function type that can be added to a thread pool. +*/ +typedef void (*POOL_function)(void *); +/*! POOL_add_function : + The function type for a generic thread pool add function. +*/ +typedef void (*POOL_add_function)(void *, POOL_function, void *); + +/*! POOL_add() : + Add the job `function(opaque)` to the thread pool. + Possibly blocks until there is room in the queue. + Note : The function may be executed asynchronously, so `opaque` must live until the function has been completed. +*/ +void POOL_add(void *ctx, POOL_function function, void *opaque); + + +#if defined (__cplusplus) +} +#endif + +#endif diff --git a/thirdparty/zstd/common/threading.c b/thirdparty/zstd/common/threading.c new file mode 100644 index 0000000000..32d58796a9 --- /dev/null +++ b/thirdparty/zstd/common/threading.c @@ -0,0 +1,80 @@ + +/** + * Copyright (c) 2016 Tino Reichardt + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + * + * You can contact the author at: + * - zstdmt source repository: https://github.com/mcmilk/zstdmt + */ + +/** + * This file will hold wrapper for systems, which do not support pthreads + */ + +/* When ZSTD_MULTITHREAD is not defined, this file would become an empty translation unit. +* Include some ISO C header code to prevent this and portably avoid related warnings. +* (Visual C++: C4206 / GCC: -Wpedantic / Clang: -Wempty-translation-unit) +*/ +#include <stddef.h> + + +#if defined(ZSTD_MULTITHREAD) && defined(_WIN32) + +/** + * Windows minimalist Pthread Wrapper, based on : + * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html + */ + + +/* === Dependencies === */ +#include <process.h> +#include <errno.h> +#include "threading.h" + + +/* === Implementation === */ + +static unsigned __stdcall worker(void *arg) +{ + pthread_t* const thread = (pthread_t*) arg; + thread->arg = thread->start_routine(thread->arg); + return 0; +} + +int pthread_create(pthread_t* thread, const void* unused, + void* (*start_routine) (void*), void* arg) +{ + (void)unused; + thread->arg = arg; + thread->start_routine = start_routine; + thread->handle = (HANDLE) _beginthreadex(NULL, 0, worker, thread, 0, NULL); + + if (!thread->handle) + return errno; + else + return 0; +} + +int _pthread_join(pthread_t * thread, void **value_ptr) +{ + DWORD result; + + if (!thread->handle) return 0; + + result = WaitForSingleObject(thread->handle, INFINITE); + switch (result) { + case WAIT_OBJECT_0: + if (value_ptr) *value_ptr = thread->arg; + return 0; + case WAIT_ABANDONED: + return EINVAL; + default: + return GetLastError(); + } +} + +#endif /* ZSTD_MULTITHREAD */ diff --git a/thirdparty/zstd/common/threading.h b/thirdparty/zstd/common/threading.h new file mode 100644 index 0000000000..c0086139ea --- /dev/null +++ b/thirdparty/zstd/common/threading.h @@ -0,0 +1,104 @@ + +/** + * Copyright (c) 2016 Tino Reichardt + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + * + * You can contact the author at: + * - zstdmt source repository: https://github.com/mcmilk/zstdmt + */ + +#ifndef THREADING_H_938743 +#define THREADING_H_938743 + +#if defined (__cplusplus) +extern "C" { +#endif + +#if defined(ZSTD_MULTITHREAD) && defined(_WIN32) + +/** + * Windows minimalist Pthread Wrapper, based on : + * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html + */ +#ifdef WINVER +# undef WINVER +#endif +#define WINVER 0x0600 + +#ifdef _WIN32_WINNT +# undef _WIN32_WINNT +#endif +#define _WIN32_WINNT 0x0600 + +#ifndef WIN32_LEAN_AND_MEAN +# define WIN32_LEAN_AND_MEAN +#endif + +#include <windows.h> + +/* mutex */ +#define pthread_mutex_t CRITICAL_SECTION +#define pthread_mutex_init(a,b) InitializeCriticalSection((a)) +#define pthread_mutex_destroy(a) DeleteCriticalSection((a)) +#define pthread_mutex_lock(a) EnterCriticalSection((a)) +#define pthread_mutex_unlock(a) LeaveCriticalSection((a)) + +/* condition variable */ +#define pthread_cond_t CONDITION_VARIABLE +#define pthread_cond_init(a, b) InitializeConditionVariable((a)) +#define pthread_cond_destroy(a) /* No delete */ +#define pthread_cond_wait(a, b) SleepConditionVariableCS((a), (b), INFINITE) +#define pthread_cond_signal(a) WakeConditionVariable((a)) +#define pthread_cond_broadcast(a) WakeAllConditionVariable((a)) + +/* pthread_create() and pthread_join() */ +typedef struct { + HANDLE handle; + void* (*start_routine)(void*); + void* arg; +} pthread_t; + +int pthread_create(pthread_t* thread, const void* unused, + void* (*start_routine) (void*), void* arg); + +#define pthread_join(a, b) _pthread_join(&(a), (b)) +int _pthread_join(pthread_t* thread, void** value_ptr); + +/** + * add here more wrappers as required + */ + + +#elif defined(ZSTD_MULTITHREAD) /* posix assumed ; need a better detection method */ +/* === POSIX Systems === */ +# include <pthread.h> + +#else /* ZSTD_MULTITHREAD not defined */ +/* No multithreading support */ + +#define pthread_mutex_t int /* #define rather than typedef, as sometimes pthread support is implicit, resulting in duplicated symbols */ +#define pthread_mutex_init(a,b) +#define pthread_mutex_destroy(a) +#define pthread_mutex_lock(a) +#define pthread_mutex_unlock(a) + +#define pthread_cond_t int +#define pthread_cond_init(a,b) +#define pthread_cond_destroy(a) +#define pthread_cond_wait(a,b) +#define pthread_cond_signal(a) +#define pthread_cond_broadcast(a) + +/* do not use pthread_t */ + +#endif /* ZSTD_MULTITHREAD */ + +#if defined (__cplusplus) +} +#endif + +#endif /* THREADING_H_938743 */ diff --git a/thirdparty/zstd/common/xxhash.c b/thirdparty/zstd/common/xxhash.c new file mode 100644 index 0000000000..eb44222c5f --- /dev/null +++ b/thirdparty/zstd/common/xxhash.c @@ -0,0 +1,869 @@ +/* +* xxHash - Fast Hash algorithm +* Copyright (C) 2012-2016, Yann Collet +* +* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following disclaimer +* in the documentation and/or other materials provided with the +* distribution. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +* You can contact the author at : +* - xxHash homepage: http://www.xxhash.com +* - xxHash source repository : https://github.com/Cyan4973/xxHash +*/ + + +/* ************************************* +* Tuning parameters +***************************************/ +/*!XXH_FORCE_MEMORY_ACCESS : + * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. + * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. + * The below switch allow to select different access method for improved performance. + * Method 0 (default) : use `memcpy()`. Safe and portable. + * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). + * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. + * Method 2 : direct access. This method doesn't depend on compiler but violate C standard. + * It can generate buggy code on targets which do not support unaligned memory accesses. + * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) + * See http://stackoverflow.com/a/32095106/646947 for details. + * Prefer these methods in priority order (0 > 1 > 2) + */ +#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ +# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) +# define XXH_FORCE_MEMORY_ACCESS 2 +# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \ + (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) +# define XXH_FORCE_MEMORY_ACCESS 1 +# endif +#endif + +/*!XXH_ACCEPT_NULL_INPUT_POINTER : + * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer. + * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input. + * By default, this option is disabled. To enable it, uncomment below define : + */ +/* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */ + +/*!XXH_FORCE_NATIVE_FORMAT : + * By default, xxHash library provides endian-independant Hash values, based on little-endian convention. + * Results are therefore identical for little-endian and big-endian CPU. + * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format. + * Should endian-independance be of no importance for your application, you may set the #define below to 1, + * to improve speed for Big-endian CPU. + * This option has no impact on Little_Endian CPU. + */ +#ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */ +# define XXH_FORCE_NATIVE_FORMAT 0 +#endif + +/*!XXH_FORCE_ALIGN_CHECK : + * This is a minor performance trick, only useful with lots of very small keys. + * It means : check for aligned/unaligned input. + * The check costs one initial branch per hash; set to 0 when the input data + * is guaranteed to be aligned. + */ +#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ +# if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) +# define XXH_FORCE_ALIGN_CHECK 0 +# else +# define XXH_FORCE_ALIGN_CHECK 1 +# endif +#endif + + +/* ************************************* +* Includes & Memory related functions +***************************************/ +/* Modify the local functions below should you wish to use some other memory routines */ +/* for malloc(), free() */ +#include <stdlib.h> +static void* XXH_malloc(size_t s) { return malloc(s); } +static void XXH_free (void* p) { free(p); } +/* for memcpy() */ +#include <string.h> +static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); } + +#ifndef XXH_STATIC_LINKING_ONLY +# define XXH_STATIC_LINKING_ONLY +#endif +#include "xxhash.h" + + +/* ************************************* +* Compiler Specific Options +***************************************/ +#ifdef _MSC_VER /* Visual Studio */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +# define FORCE_INLINE static __forceinline +#else +# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ +# ifdef __GNUC__ +# define FORCE_INLINE static inline __attribute__((always_inline)) +# else +# define FORCE_INLINE static inline +# endif +# else +# define FORCE_INLINE static +# endif /* __STDC_VERSION__ */ +#endif + + +/* ************************************* +* Basic Types +***************************************/ +#ifndef MEM_MODULE +# define MEM_MODULE +# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include <stdint.h> + typedef uint8_t BYTE; + typedef uint16_t U16; + typedef uint32_t U32; + typedef int32_t S32; + typedef uint64_t U64; +# else + typedef unsigned char BYTE; + typedef unsigned short U16; + typedef unsigned int U32; + typedef signed int S32; + typedef unsigned long long U64; /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */ +# endif +#endif + + +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) + +/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ +static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; } +static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; } + +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) + +/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ +/* currently only defined for gcc and icc */ +typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign; + +static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } +static U64 XXH_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } + +#else + +/* portable and safe solution. Generally efficient. + * see : http://stackoverflow.com/a/32095106/646947 + */ + +static U32 XXH_read32(const void* memPtr) +{ + U32 val; + memcpy(&val, memPtr, sizeof(val)); + return val; +} + +static U64 XXH_read64(const void* memPtr) +{ + U64 val; + memcpy(&val, memPtr, sizeof(val)); + return val; +} + +#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ + + +/* **************************************** +* Compiler-specific Functions and Macros +******************************************/ +#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) + +/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */ +#if defined(_MSC_VER) +# define XXH_rotl32(x,r) _rotl(x,r) +# define XXH_rotl64(x,r) _rotl64(x,r) +#else +# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r))) +# define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r))) +#endif + +#if defined(_MSC_VER) /* Visual Studio */ +# define XXH_swap32 _byteswap_ulong +# define XXH_swap64 _byteswap_uint64 +#elif GCC_VERSION >= 403 +# define XXH_swap32 __builtin_bswap32 +# define XXH_swap64 __builtin_bswap64 +#else +static U32 XXH_swap32 (U32 x) +{ + return ((x << 24) & 0xff000000 ) | + ((x << 8) & 0x00ff0000 ) | + ((x >> 8) & 0x0000ff00 ) | + ((x >> 24) & 0x000000ff ); +} +static U64 XXH_swap64 (U64 x) +{ + return ((x << 56) & 0xff00000000000000ULL) | + ((x << 40) & 0x00ff000000000000ULL) | + ((x << 24) & 0x0000ff0000000000ULL) | + ((x << 8) & 0x000000ff00000000ULL) | + ((x >> 8) & 0x00000000ff000000ULL) | + ((x >> 24) & 0x0000000000ff0000ULL) | + ((x >> 40) & 0x000000000000ff00ULL) | + ((x >> 56) & 0x00000000000000ffULL); +} +#endif + + +/* ************************************* +* Architecture Macros +***************************************/ +typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess; + +/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */ +#ifndef XXH_CPU_LITTLE_ENDIAN + static const int g_one = 1; +# define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&g_one)) +#endif + + +/* *************************** +* Memory reads +*****************************/ +typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment; + +FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align) +{ + if (align==XXH_unaligned) + return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); + else + return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr); +} + +FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian) +{ + return XXH_readLE32_align(ptr, endian, XXH_unaligned); +} + +static U32 XXH_readBE32(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); +} + +FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align) +{ + if (align==XXH_unaligned) + return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); + else + return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr); +} + +FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian) +{ + return XXH_readLE64_align(ptr, endian, XXH_unaligned); +} + +static U64 XXH_readBE64(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); +} + + +/* ************************************* +* Macros +***************************************/ +#define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ + + +/* ************************************* +* Constants +***************************************/ +static const U32 PRIME32_1 = 2654435761U; +static const U32 PRIME32_2 = 2246822519U; +static const U32 PRIME32_3 = 3266489917U; +static const U32 PRIME32_4 = 668265263U; +static const U32 PRIME32_5 = 374761393U; + +static const U64 PRIME64_1 = 11400714785074694791ULL; +static const U64 PRIME64_2 = 14029467366897019727ULL; +static const U64 PRIME64_3 = 1609587929392839161ULL; +static const U64 PRIME64_4 = 9650029242287828579ULL; +static const U64 PRIME64_5 = 2870177450012600261ULL; + +XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } + + +/* ************************** +* Utils +****************************/ +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dstState, const XXH32_state_t* restrict srcState) +{ + memcpy(dstState, srcState, sizeof(*dstState)); +} + +XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dstState, const XXH64_state_t* restrict srcState) +{ + memcpy(dstState, srcState, sizeof(*dstState)); +} + + +/* *************************** +* Simple Hash Functions +*****************************/ + +static U32 XXH32_round(U32 seed, U32 input) +{ + seed += input * PRIME32_2; + seed = XXH_rotl32(seed, 13); + seed *= PRIME32_1; + return seed; +} + +FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align) +{ + const BYTE* p = (const BYTE*)input; + const BYTE* bEnd = p + len; + U32 h32; +#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align) + +#ifdef XXH_ACCEPT_NULL_INPUT_POINTER + if (p==NULL) { + len=0; + bEnd=p=(const BYTE*)(size_t)16; + } +#endif + + if (len>=16) { + const BYTE* const limit = bEnd - 16; + U32 v1 = seed + PRIME32_1 + PRIME32_2; + U32 v2 = seed + PRIME32_2; + U32 v3 = seed + 0; + U32 v4 = seed - PRIME32_1; + + do { + v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4; + v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4; + v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4; + v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4; + } while (p<=limit); + + h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); + } else { + h32 = seed + PRIME32_5; + } + + h32 += (U32) len; + + while (p+4<=bEnd) { + h32 += XXH_get32bits(p) * PRIME32_3; + h32 = XXH_rotl32(h32, 17) * PRIME32_4 ; + p+=4; + } + + while (p<bEnd) { + h32 += (*p) * PRIME32_5; + h32 = XXH_rotl32(h32, 11) * PRIME32_1 ; + p++; + } + + h32 ^= h32 >> 15; + h32 *= PRIME32_2; + h32 ^= h32 >> 13; + h32 *= PRIME32_3; + h32 ^= h32 >> 16; + + return h32; +} + + +XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed) +{ +#if 0 + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH32_CREATESTATE_STATIC(state); + XXH32_reset(state, seed); + XXH32_update(state, input, len); + return XXH32_digest(state); +#else + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); + else + return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); + } } + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); + else + return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); +#endif +} + + +static U64 XXH64_round(U64 acc, U64 input) +{ + acc += input * PRIME64_2; + acc = XXH_rotl64(acc, 31); + acc *= PRIME64_1; + return acc; +} + +static U64 XXH64_mergeRound(U64 acc, U64 val) +{ + val = XXH64_round(0, val); + acc ^= val; + acc = acc * PRIME64_1 + PRIME64_4; + return acc; +} + +FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align) +{ + const BYTE* p = (const BYTE*)input; + const BYTE* const bEnd = p + len; + U64 h64; +#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align) + +#ifdef XXH_ACCEPT_NULL_INPUT_POINTER + if (p==NULL) { + len=0; + bEnd=p=(const BYTE*)(size_t)32; + } +#endif + + if (len>=32) { + const BYTE* const limit = bEnd - 32; + U64 v1 = seed + PRIME64_1 + PRIME64_2; + U64 v2 = seed + PRIME64_2; + U64 v3 = seed + 0; + U64 v4 = seed - PRIME64_1; + + do { + v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8; + v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8; + v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8; + v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8; + } while (p<=limit); + + h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); + h64 = XXH64_mergeRound(h64, v1); + h64 = XXH64_mergeRound(h64, v2); + h64 = XXH64_mergeRound(h64, v3); + h64 = XXH64_mergeRound(h64, v4); + + } else { + h64 = seed + PRIME64_5; + } + + h64 += (U64) len; + + while (p+8<=bEnd) { + U64 const k1 = XXH64_round(0, XXH_get64bits(p)); + h64 ^= k1; + h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; + p+=8; + } + + if (p+4<=bEnd) { + h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; + h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; + p+=4; + } + + while (p<bEnd) { + h64 ^= (*p) * PRIME64_5; + h64 = XXH_rotl64(h64, 11) * PRIME64_1; + p++; + } + + h64 ^= h64 >> 33; + h64 *= PRIME64_2; + h64 ^= h64 >> 29; + h64 *= PRIME64_3; + h64 ^= h64 >> 32; + + return h64; +} + + +XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed) +{ +#if 0 + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH64_CREATESTATE_STATIC(state); + XXH64_reset(state, seed); + XXH64_update(state, input, len); + return XXH64_digest(state); +#else + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); + else + return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); + } } + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); + else + return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); +#endif +} + + +/* ************************************************** +* Advanced Hash Functions +****************************************************/ + +XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) +{ + return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); +} +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) +{ + XXH_free(statePtr); + return XXH_OK; +} + +XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) +{ + return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); +} +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) +{ + XXH_free(statePtr); + return XXH_OK; +} + + +/*** Hash feed ***/ + +XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed) +{ + XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ + memset(&state, 0, sizeof(state)-4); /* do not write into reserved, for future removal */ + state.v1 = seed + PRIME32_1 + PRIME32_2; + state.v2 = seed + PRIME32_2; + state.v3 = seed + 0; + state.v4 = seed - PRIME32_1; + memcpy(statePtr, &state, sizeof(state)); + return XXH_OK; +} + + +XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed) +{ + XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ + memset(&state, 0, sizeof(state)-8); /* do not write into reserved, for future removal */ + state.v1 = seed + PRIME64_1 + PRIME64_2; + state.v2 = seed + PRIME64_2; + state.v3 = seed + 0; + state.v4 = seed - PRIME64_1; + memcpy(statePtr, &state, sizeof(state)); + return XXH_OK; +} + + +FORCE_INLINE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian) +{ + const BYTE* p = (const BYTE*)input; + const BYTE* const bEnd = p + len; + +#ifdef XXH_ACCEPT_NULL_INPUT_POINTER + if (input==NULL) return XXH_ERROR; +#endif + + state->total_len_32 += (unsigned)len; + state->large_len |= (len>=16) | (state->total_len_32>=16); + + if (state->memsize + len < 16) { /* fill in tmp buffer */ + XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len); + state->memsize += (unsigned)len; + return XXH_OK; + } + + if (state->memsize) { /* some data left from previous update */ + XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize); + { const U32* p32 = state->mem32; + state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++; + state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++; + state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++; + state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++; + } + p += 16-state->memsize; + state->memsize = 0; + } + + if (p <= bEnd-16) { + const BYTE* const limit = bEnd - 16; + U32 v1 = state->v1; + U32 v2 = state->v2; + U32 v3 = state->v3; + U32 v4 = state->v4; + + do { + v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4; + v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4; + v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4; + v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4; + } while (p<=limit); + + state->v1 = v1; + state->v2 = v2; + state->v3 = v3; + state->v4 = v4; + } + + if (p < bEnd) { + XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } + + return XXH_OK; +} + +XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len) +{ + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_update_endian(state_in, input, len, XXH_littleEndian); + else + return XXH32_update_endian(state_in, input, len, XXH_bigEndian); +} + + + +FORCE_INLINE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian) +{ + const BYTE * p = (const BYTE*)state->mem32; + const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize; + U32 h32; + + if (state->large_len) { + h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18); + } else { + h32 = state->v3 /* == seed */ + PRIME32_5; + } + + h32 += state->total_len_32; + + while (p+4<=bEnd) { + h32 += XXH_readLE32(p, endian) * PRIME32_3; + h32 = XXH_rotl32(h32, 17) * PRIME32_4; + p+=4; + } + + while (p<bEnd) { + h32 += (*p) * PRIME32_5; + h32 = XXH_rotl32(h32, 11) * PRIME32_1; + p++; + } + + h32 ^= h32 >> 15; + h32 *= PRIME32_2; + h32 ^= h32 >> 13; + h32 *= PRIME32_3; + h32 ^= h32 >> 16; + + return h32; +} + + +XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in) +{ + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_digest_endian(state_in, XXH_littleEndian); + else + return XXH32_digest_endian(state_in, XXH_bigEndian); +} + + + +/* **** XXH64 **** */ + +FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian) +{ + const BYTE* p = (const BYTE*)input; + const BYTE* const bEnd = p + len; + +#ifdef XXH_ACCEPT_NULL_INPUT_POINTER + if (input==NULL) return XXH_ERROR; +#endif + + state->total_len += len; + + if (state->memsize + len < 32) { /* fill in tmp buffer */ + XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len); + state->memsize += (U32)len; + return XXH_OK; + } + + if (state->memsize) { /* tmp buffer is full */ + XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize); + state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian)); + state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian)); + state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian)); + state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian)); + p += 32-state->memsize; + state->memsize = 0; + } + + if (p+32 <= bEnd) { + const BYTE* const limit = bEnd - 32; + U64 v1 = state->v1; + U64 v2 = state->v2; + U64 v3 = state->v3; + U64 v4 = state->v4; + + do { + v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8; + v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8; + v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8; + v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8; + } while (p<=limit); + + state->v1 = v1; + state->v2 = v2; + state->v3 = v3; + state->v4 = v4; + } + + if (p < bEnd) { + XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } + + return XXH_OK; +} + +XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len) +{ + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_update_endian(state_in, input, len, XXH_littleEndian); + else + return XXH64_update_endian(state_in, input, len, XXH_bigEndian); +} + + + +FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian) +{ + const BYTE * p = (const BYTE*)state->mem64; + const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize; + U64 h64; + + if (state->total_len >= 32) { + U64 const v1 = state->v1; + U64 const v2 = state->v2; + U64 const v3 = state->v3; + U64 const v4 = state->v4; + + h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); + h64 = XXH64_mergeRound(h64, v1); + h64 = XXH64_mergeRound(h64, v2); + h64 = XXH64_mergeRound(h64, v3); + h64 = XXH64_mergeRound(h64, v4); + } else { + h64 = state->v3 + PRIME64_5; + } + + h64 += (U64) state->total_len; + + while (p+8<=bEnd) { + U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian)); + h64 ^= k1; + h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; + p+=8; + } + + if (p+4<=bEnd) { + h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1; + h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; + p+=4; + } + + while (p<bEnd) { + h64 ^= (*p) * PRIME64_5; + h64 = XXH_rotl64(h64, 11) * PRIME64_1; + p++; + } + + h64 ^= h64 >> 33; + h64 *= PRIME64_2; + h64 ^= h64 >> 29; + h64 *= PRIME64_3; + h64 ^= h64 >> 32; + + return h64; +} + + +XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in) +{ + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_digest_endian(state_in, XXH_littleEndian); + else + return XXH64_digest_endian(state_in, XXH_bigEndian); +} + + +/* ************************** +* Canonical representation +****************************/ + +/*! Default XXH result types are basic unsigned 32 and 64 bits. +* The canonical representation follows human-readable write convention, aka big-endian (large digits first). +* These functions allow transformation of hash result into and from its canonical format. +* This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs. +*/ + +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); + memcpy(dst, &hash, sizeof(*dst)); +} + +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); + memcpy(dst, &hash, sizeof(*dst)); +} + +XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) +{ + return XXH_readBE32(src); +} + +XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src) +{ + return XXH_readBE64(src); +} diff --git a/thirdparty/zstd/common/xxhash.h b/thirdparty/zstd/common/xxhash.h new file mode 100644 index 0000000000..9bad1f59f6 --- /dev/null +++ b/thirdparty/zstd/common/xxhash.h @@ -0,0 +1,305 @@ +/* + xxHash - Extremely Fast Hash algorithm + Header File + Copyright (C) 2012-2016, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - xxHash source repository : https://github.com/Cyan4973/xxHash +*/ + +/* Notice extracted from xxHash homepage : + +xxHash is an extremely fast Hash algorithm, running at RAM speed limits. +It also successfully passes all tests from the SMHasher suite. + +Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz) + +Name Speed Q.Score Author +xxHash 5.4 GB/s 10 +CrapWow 3.2 GB/s 2 Andrew +MumurHash 3a 2.7 GB/s 10 Austin Appleby +SpookyHash 2.0 GB/s 10 Bob Jenkins +SBox 1.4 GB/s 9 Bret Mulvey +Lookup3 1.2 GB/s 9 Bob Jenkins +SuperFastHash 1.2 GB/s 1 Paul Hsieh +CityHash64 1.05 GB/s 10 Pike & Alakuijala +FNV 0.55 GB/s 5 Fowler, Noll, Vo +CRC32 0.43 GB/s 9 +MD5-32 0.33 GB/s 10 Ronald L. Rivest +SHA1-32 0.28 GB/s 10 + +Q.Score is a measure of quality of the hash function. +It depends on successfully passing SMHasher test set. +10 is a perfect score. + +A 64-bits version, named XXH64, is available since r35. +It offers much better speed, but for 64-bits applications only. +Name Speed on 64 bits Speed on 32 bits +XXH64 13.8 GB/s 1.9 GB/s +XXH32 6.8 GB/s 6.0 GB/s +*/ + +#if defined (__cplusplus) +extern "C" { +#endif + +#ifndef XXHASH_H_5627135585666179 +#define XXHASH_H_5627135585666179 1 + + +/* **************************** +* Definitions +******************************/ +#include <stddef.h> /* size_t */ +typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; + + +/* **************************** +* API modifier +******************************/ +/** XXH_PRIVATE_API +* This is useful if you want to include xxhash functions in `static` mode +* in order to inline them, and remove their symbol from the public list. +* Methodology : +* #define XXH_PRIVATE_API +* #include "xxhash.h" +* `xxhash.c` is automatically included. +* It's not useful to compile and link it as a separate module anymore. +*/ +#ifdef XXH_PRIVATE_API +# ifndef XXH_STATIC_LINKING_ONLY +# define XXH_STATIC_LINKING_ONLY +# endif +# if defined(__GNUC__) +# define XXH_PUBLIC_API static __inline __attribute__((unused)) +# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# define XXH_PUBLIC_API static inline +# elif defined(_MSC_VER) +# define XXH_PUBLIC_API static __inline +# else +# define XXH_PUBLIC_API static /* this version may generate warnings for unused static functions; disable the relevant warning */ +# endif +#else +# define XXH_PUBLIC_API /* do nothing */ +#endif /* XXH_PRIVATE_API */ + +/*!XXH_NAMESPACE, aka Namespace Emulation : + +If you want to include _and expose_ xxHash functions from within your own library, +but also want to avoid symbol collisions with another library which also includes xxHash, + +you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library +with the value of XXH_NAMESPACE (so avoid to keep it NULL and avoid numeric values). + +Note that no change is required within the calling program as long as it includes `xxhash.h` : +regular symbol name will be automatically translated by this header. +*/ +#ifdef XXH_NAMESPACE +# define XXH_CAT(A,B) A##B +# define XXH_NAME2(A,B) XXH_CAT(A,B) +# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) +# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) +# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber) +# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState) +# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) +# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) +# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) +# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset) +# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) +# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update) +# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) +# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest) +# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) +# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState) +# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) +# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash) +# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) +# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical) +# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) +#endif + + +/* ************************************* +* Version +***************************************/ +#define XXH_VERSION_MAJOR 0 +#define XXH_VERSION_MINOR 6 +#define XXH_VERSION_RELEASE 2 +#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) +XXH_PUBLIC_API unsigned XXH_versionNumber (void); + + +/* **************************** +* Simple Hash Functions +******************************/ +typedef unsigned int XXH32_hash_t; +typedef unsigned long long XXH64_hash_t; + +XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed); +XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed); + +/*! +XXH32() : + Calculate the 32-bits hash of sequence "length" bytes stored at memory address "input". + The memory between input & input+length must be valid (allocated and read-accessible). + "seed" can be used to alter the result predictably. + Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s +XXH64() : + Calculate the 64-bits hash of sequence of length "len" stored at memory address "input". + "seed" can be used to alter the result predictably. + This function runs 2x faster on 64-bits systems, but slower on 32-bits systems (see benchmark). +*/ + + +/* **************************** +* Streaming Hash Functions +******************************/ +typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */ +typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ + +/*! State allocation, compatible with dynamic libraries */ + +XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void); +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); + +XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void); +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); + + +/* hash streaming */ + +XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned int seed); +XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length); +XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); + +XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed); +XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length); +XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr); + +/* +These functions generate the xxHash of an input provided in multiple segments. +Note that, for small input, they are slower than single-call functions, due to state management. +For small input, prefer `XXH32()` and `XXH64()` . + +XXH state must first be allocated, using XXH*_createState() . + +Start a new hash by initializing state with a seed, using XXH*_reset(). + +Then, feed the hash state by calling XXH*_update() as many times as necessary. +Obviously, input must be allocated and read accessible. +The function returns an error code, with 0 meaning OK, and any other value meaning there is an error. + +Finally, a hash value can be produced anytime, by using XXH*_digest(). +This function returns the nn-bits hash as an int or long long. + +It's still possible to continue inserting input into the hash state after a digest, +and generate some new hashes later on, by calling again XXH*_digest(). + +When done, free XXH state space if it was allocated dynamically. +*/ + + +/* ************************** +* Utils +****************************/ +#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* ! C99 */ +# define restrict /* disable restrict */ +#endif + +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dst_state, const XXH32_state_t* restrict src_state); +XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dst_state, const XXH64_state_t* restrict src_state); + + +/* ************************** +* Canonical representation +****************************/ +/* Default result type for XXH functions are primitive unsigned 32 and 64 bits. +* The canonical representation uses human-readable write convention, aka big-endian (large digits first). +* These functions allow transformation of hash result into and from its canonical format. +* This way, hash values can be written into a file / memory, and remain comparable on different systems and programs. +*/ +typedef struct { unsigned char digest[4]; } XXH32_canonical_t; +typedef struct { unsigned char digest[8]; } XXH64_canonical_t; + +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash); + +XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); +XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src); + +#endif /* XXHASH_H_5627135585666179 */ + + + +/* ================================================================================================ + This section contains definitions which are not guaranteed to remain stable. + They may change in future versions, becoming incompatible with a different version of the library. + They shall only be used with static linking. + Never use these definitions in association with dynamic linking ! +=================================================================================================== */ +#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXH_STATIC_H_3543687687345) +#define XXH_STATIC_H_3543687687345 + +/* These definitions are only meant to allow allocation of XXH state + statically, on stack, or in a struct for example. + Do not use members directly. */ + + struct XXH32_state_s { + unsigned total_len_32; + unsigned large_len; + unsigned v1; + unsigned v2; + unsigned v3; + unsigned v4; + unsigned mem32[4]; /* buffer defined as U32 for alignment */ + unsigned memsize; + unsigned reserved; /* never read nor write, will be removed in a future version */ + }; /* typedef'd to XXH32_state_t */ + + struct XXH64_state_s { + unsigned long long total_len; + unsigned long long v1; + unsigned long long v2; + unsigned long long v3; + unsigned long long v4; + unsigned long long mem64[4]; /* buffer defined as U64 for alignment */ + unsigned memsize; + unsigned reserved[2]; /* never read nor write, will be removed in a future version */ + }; /* typedef'd to XXH64_state_t */ + + +# ifdef XXH_PRIVATE_API +# include "xxhash.c" /* include xxhash functions as `static`, for inlining */ +# endif + +#endif /* XXH_STATIC_LINKING_ONLY && XXH_STATIC_H_3543687687345 */ + + +#if defined (__cplusplus) +} +#endif diff --git a/thirdparty/zstd/common/zstd_common.c b/thirdparty/zstd/common/zstd_common.c new file mode 100644 index 0000000000..8408a589ae --- /dev/null +++ b/thirdparty/zstd/common/zstd_common.c @@ -0,0 +1,73 @@ +/** + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + + + +/*-************************************* +* Dependencies +***************************************/ +#include <stdlib.h> /* malloc */ +#include "error_private.h" +#define ZSTD_STATIC_LINKING_ONLY +#include "zstd.h" /* declaration of ZSTD_isError, ZSTD_getErrorName, ZSTD_getErrorCode, ZSTD_getErrorString, ZSTD_versionNumber */ + + +/*-**************************************** +* Version +******************************************/ +unsigned ZSTD_versionNumber (void) { return ZSTD_VERSION_NUMBER; } + + +/*-**************************************** +* ZSTD Error Management +******************************************/ +/*! ZSTD_isError() : +* tells if a return value is an error code */ +unsigned ZSTD_isError(size_t code) { return ERR_isError(code); } + +/*! ZSTD_getErrorName() : +* provides error code string from function result (useful for debugging) */ +const char* ZSTD_getErrorName(size_t code) { return ERR_getErrorName(code); } + +/*! ZSTD_getError() : +* convert a `size_t` function result into a proper ZSTD_errorCode enum */ +ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); } + +/*! ZSTD_getErrorString() : +* provides error code string from enum */ +const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); } + + +/*=************************************************************** +* Custom allocator +****************************************************************/ +/* default uses stdlib */ +void* ZSTD_defaultAllocFunction(void* opaque, size_t size) +{ + void* address = malloc(size); + (void)opaque; + return address; +} + +void ZSTD_defaultFreeFunction(void* opaque, void* address) +{ + (void)opaque; + free(address); +} + +void* ZSTD_malloc(size_t size, ZSTD_customMem customMem) +{ + return customMem.customAlloc(customMem.opaque, size); +} + +void ZSTD_free(void* ptr, ZSTD_customMem customMem) +{ + if (ptr!=NULL) + customMem.customFree(customMem.opaque, ptr); +} diff --git a/thirdparty/zstd/common/zstd_errors.h b/thirdparty/zstd/common/zstd_errors.h new file mode 100644 index 0000000000..3d579d9693 --- /dev/null +++ b/thirdparty/zstd/common/zstd_errors.h @@ -0,0 +1,75 @@ +/** + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +#ifndef ZSTD_ERRORS_H_398273423 +#define ZSTD_ERRORS_H_398273423 + +#if defined (__cplusplus) +extern "C" { +#endif + +/*===== dependency =====*/ +#include <stddef.h> /* size_t */ + + +/* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */ +#if defined(__GNUC__) && (__GNUC__ >= 4) +# define ZSTDERRORLIB_VISIBILITY __attribute__ ((visibility ("default"))) +#else +# define ZSTDERRORLIB_VISIBILITY +#endif +#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) +# define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBILITY +#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) +# define ZSTDERRORLIB_API __declspec(dllimport) ZSTDERRORLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ +#else +# define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY +#endif + +/*-**************************************** +* error codes list +******************************************/ +typedef enum { + ZSTD_error_no_error, + ZSTD_error_GENERIC, + ZSTD_error_prefix_unknown, + ZSTD_error_version_unsupported, + ZSTD_error_parameter_unknown, + ZSTD_error_frameParameter_unsupported, + ZSTD_error_frameParameter_unsupportedBy32bits, + ZSTD_error_frameParameter_windowTooLarge, + ZSTD_error_compressionParameter_unsupported, + ZSTD_error_init_missing, + ZSTD_error_memory_allocation, + ZSTD_error_stage_wrong, + ZSTD_error_dstSize_tooSmall, + ZSTD_error_srcSize_wrong, + ZSTD_error_corruption_detected, + ZSTD_error_checksum_wrong, + ZSTD_error_tableLog_tooLarge, + ZSTD_error_maxSymbolValue_tooLarge, + ZSTD_error_maxSymbolValue_tooSmall, + ZSTD_error_dictionary_corrupted, + ZSTD_error_dictionary_wrong, + ZSTD_error_dictionaryCreation_failed, + ZSTD_error_maxCode +} ZSTD_ErrorCode; + +/*! ZSTD_getErrorCode() : + convert a `size_t` function result into a `ZSTD_ErrorCode` enum type, + which can be used to compare directly with enum list published into "error_public.h" */ +ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult); +ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code); + + +#if defined (__cplusplus) +} +#endif + +#endif /* ZSTD_ERRORS_H_398273423 */ diff --git a/thirdparty/zstd/common/zstd_internal.h b/thirdparty/zstd/common/zstd_internal.h new file mode 100644 index 0000000000..2533333ba8 --- /dev/null +++ b/thirdparty/zstd/common/zstd_internal.h @@ -0,0 +1,284 @@ +/** + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +#ifndef ZSTD_CCOMMON_H_MODULE +#define ZSTD_CCOMMON_H_MODULE + +/*-******************************************************* +* Compiler specifics +*********************************************************/ +#ifdef _MSC_VER /* Visual Studio */ +# define FORCE_INLINE static __forceinline +# include <intrin.h> /* For Visual 2005 */ +# pragma warning(disable : 4100) /* disable: C4100: unreferenced formal parameter */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +# pragma warning(disable : 4324) /* disable: C4324: padded structure */ +#else +# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ +# ifdef __GNUC__ +# define FORCE_INLINE static inline __attribute__((always_inline)) +# else +# define FORCE_INLINE static inline +# endif +# else +# define FORCE_INLINE static +# endif /* __STDC_VERSION__ */ +#endif + +#ifdef _MSC_VER +# define FORCE_NOINLINE static __declspec(noinline) +#else +# ifdef __GNUC__ +# define FORCE_NOINLINE static __attribute__((__noinline__)) +# else +# define FORCE_NOINLINE static +# endif +#endif + + +/*-************************************* +* Dependencies +***************************************/ +#include "mem.h" +#include "error_private.h" +#define ZSTD_STATIC_LINKING_ONLY +#include "zstd.h" +#ifndef XXH_STATIC_LINKING_ONLY +# define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */ +#endif +#include "xxhash.h" /* XXH_reset, update, digest */ + + +/*-************************************* +* shared macros +***************************************/ +#undef MIN +#undef MAX +#define MIN(a,b) ((a)<(b) ? (a) : (b)) +#define MAX(a,b) ((a)>(b) ? (a) : (b)) +#define CHECK_F(f) { size_t const errcod = f; if (ERR_isError(errcod)) return errcod; } /* check and Forward error code */ +#define CHECK_E(f, e) { size_t const errcod = f; if (ERR_isError(errcod)) return ERROR(e); } /* check and send Error code */ + + +/*-************************************* +* Common constants +***************************************/ +#define ZSTD_OPT_NUM (1<<12) +#define ZSTD_DICT_MAGIC 0xEC30A437 /* v0.7+ */ + +#define ZSTD_REP_NUM 3 /* number of repcodes */ +#define ZSTD_REP_CHECK (ZSTD_REP_NUM) /* number of repcodes to check by the optimal parser */ +#define ZSTD_REP_MOVE (ZSTD_REP_NUM-1) +#define ZSTD_REP_MOVE_OPT (ZSTD_REP_NUM) +static const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 }; + +#define KB *(1 <<10) +#define MB *(1 <<20) +#define GB *(1U<<30) + +#define BIT7 128 +#define BIT6 64 +#define BIT5 32 +#define BIT4 16 +#define BIT1 2 +#define BIT0 1 + +#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10 +static const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 }; +static const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 }; + +#define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */ +static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE; +typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e; + +#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */ +#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */ + +#define HufLog 12 +typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e; + +#define LONGNBSEQ 0x7F00 + +#define MINMATCH 3 + +#define Litbits 8 +#define MaxLit ((1<<Litbits) - 1) +#define MaxML 52 +#define MaxLL 35 +#define MaxOff 28 +#define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */ +#define MLFSELog 9 +#define LLFSELog 9 +#define OffFSELog 8 + +static const U32 LL_bits[MaxLL+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9,10,11,12, + 13,14,15,16 }; +static const S16 LL_defaultNorm[MaxLL+1] = { 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, + -1,-1,-1,-1 }; +#define LL_DEFAULTNORMLOG 6 /* for static allocation */ +static const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG; + +static const U32 ML_bits[MaxML+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9,10,11, + 12,13,14,15,16 }; +static const S16 ML_defaultNorm[MaxML+1] = { 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1, + -1,-1,-1,-1,-1 }; +#define ML_DEFAULTNORMLOG 6 /* for static allocation */ +static const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG; + +static const S16 OF_defaultNorm[MaxOff+1] = { 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1 }; +#define OF_DEFAULTNORMLOG 5 /* for static allocation */ +static const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG; + + +/*-******************************************* +* Shared functions to include for inlining +*********************************************/ +static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); } +#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; } + +/*! ZSTD_wildcopy() : +* custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */ +#define WILDCOPY_OVERLENGTH 8 +MEM_STATIC void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length) +{ + const BYTE* ip = (const BYTE*)src; + BYTE* op = (BYTE*)dst; + BYTE* const oend = op + length; + do + COPY8(op, ip) + while (op < oend); +} + +MEM_STATIC void ZSTD_wildcopy_e(void* dst, const void* src, void* dstEnd) /* should be faster for decoding, but strangely, not verified on all platform */ +{ + const BYTE* ip = (const BYTE*)src; + BYTE* op = (BYTE*)dst; + BYTE* const oend = (BYTE*)dstEnd; + do + COPY8(op, ip) + while (op < oend); +} + + +/*-******************************************* +* Private interfaces +*********************************************/ +typedef struct ZSTD_stats_s ZSTD_stats_t; + +typedef struct { + U32 off; + U32 len; +} ZSTD_match_t; + +typedef struct { + U32 price; + U32 off; + U32 mlen; + U32 litlen; + U32 rep[ZSTD_REP_NUM]; +} ZSTD_optimal_t; + + +typedef struct seqDef_s { + U32 offset; + U16 litLength; + U16 matchLength; +} seqDef; + + +typedef struct { + seqDef* sequencesStart; + seqDef* sequences; + BYTE* litStart; + BYTE* lit; + BYTE* llCode; + BYTE* mlCode; + BYTE* ofCode; + U32 longLengthID; /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */ + U32 longLengthPos; + /* opt */ + ZSTD_optimal_t* priceTable; + ZSTD_match_t* matchTable; + U32* matchLengthFreq; + U32* litLengthFreq; + U32* litFreq; + U32* offCodeFreq; + U32 matchLengthSum; + U32 matchSum; + U32 litLengthSum; + U32 litSum; + U32 offCodeSum; + U32 log2matchLengthSum; + U32 log2matchSum; + U32 log2litLengthSum; + U32 log2litSum; + U32 log2offCodeSum; + U32 factor; + U32 staticPrices; + U32 cachedPrice; + U32 cachedLitLength; + const BYTE* cachedLiterals; +} seqStore_t; + +const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); +void ZSTD_seqToCodes(const seqStore_t* seqStorePtr); +int ZSTD_isSkipFrame(ZSTD_DCtx* dctx); + +/* custom memory allocation functions */ +void* ZSTD_defaultAllocFunction(void* opaque, size_t size); +void ZSTD_defaultFreeFunction(void* opaque, void* address); +#ifndef ZSTD_DLL_IMPORT +static const ZSTD_customMem defaultCustomMem = { ZSTD_defaultAllocFunction, ZSTD_defaultFreeFunction, NULL }; +#endif +void* ZSTD_malloc(size_t size, ZSTD_customMem customMem); +void ZSTD_free(void* ptr, ZSTD_customMem customMem); + + +/*====== common function ======*/ + +MEM_STATIC U32 ZSTD_highbit32(U32 val) +{ +# if defined(_MSC_VER) /* Visual */ + unsigned long r=0; + _BitScanReverse(&r, val); + return (unsigned)r; +# elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */ + return 31 - __builtin_clz(val); +# else /* Software version */ + static const int DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; + U32 v = val; + int r; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + r = DeBruijnClz[(U32)(v * 0x07C4ACDDU) >> 27]; + return r; +# endif +} + + +/* hidden functions */ + +/* ZSTD_invalidateRepCodes() : + * ensures next compression will not use repcodes from previous block. + * Note : only works with regular variant; + * do not use with extDict variant ! */ +void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx); + + +#endif /* ZSTD_CCOMMON_H_MODULE */ diff --git a/thirdparty/zstd/compress/fse_compress.c b/thirdparty/zstd/compress/fse_compress.c new file mode 100644 index 0000000000..26e8052ddc --- /dev/null +++ b/thirdparty/zstd/compress/fse_compress.c @@ -0,0 +1,857 @@ +/* ****************************************************************** + FSE : Finite State Entropy encoder + Copyright (C) 2013-2015, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy + - Public forum : https://groups.google.com/forum/#!forum/lz4c +****************************************************************** */ + +/* ************************************************************** +* Compiler specifics +****************************************************************/ +#ifdef _MSC_VER /* Visual Studio */ +# define FORCE_INLINE static __forceinline +# include <intrin.h> /* For Visual 2005 */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */ +#else +# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ +# ifdef __GNUC__ +# define FORCE_INLINE static inline __attribute__((always_inline)) +# else +# define FORCE_INLINE static inline +# endif +# else +# define FORCE_INLINE static +# endif /* __STDC_VERSION__ */ +#endif + + +/* ************************************************************** +* Includes +****************************************************************/ +#include <stdlib.h> /* malloc, free, qsort */ +#include <string.h> /* memcpy, memset */ +#include <stdio.h> /* printf (debug) */ +#include "bitstream.h" +#define FSE_STATIC_LINKING_ONLY +#include "fse.h" + + +/* ************************************************************** +* Error Management +****************************************************************/ +#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ + + +/* ************************************************************** +* Templates +****************************************************************/ +/* + designed to be included + for type-specific functions (template emulation in C) + Objective is to write these functions only once, for improved maintenance +*/ + +/* safety checks */ +#ifndef FSE_FUNCTION_EXTENSION +# error "FSE_FUNCTION_EXTENSION must be defined" +#endif +#ifndef FSE_FUNCTION_TYPE +# error "FSE_FUNCTION_TYPE must be defined" +#endif + +/* Function names */ +#define FSE_CAT(X,Y) X##Y +#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) +#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) + + +/* Function templates */ + +/* FSE_buildCTable_wksp() : + * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`). + * wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)` + * workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements + */ +size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize) +{ + U32 const tableSize = 1 << tableLog; + U32 const tableMask = tableSize - 1; + void* const ptr = ct; + U16* const tableU16 = ( (U16*) ptr) + 2; + void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableLog ? tableSize>>1 : 1) ; + FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT); + U32 const step = FSE_TABLESTEP(tableSize); + U32 cumul[FSE_MAX_SYMBOL_VALUE+2]; + + FSE_FUNCTION_TYPE* const tableSymbol = (FSE_FUNCTION_TYPE*)workSpace; + U32 highThreshold = tableSize-1; + + /* CTable header */ + if (((size_t)1 << tableLog) * sizeof(FSE_FUNCTION_TYPE) > wkspSize) return ERROR(tableLog_tooLarge); + tableU16[-2] = (U16) tableLog; + tableU16[-1] = (U16) maxSymbolValue; + + /* For explanations on how to distribute symbol values over the table : + * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */ + + /* symbol start positions */ + { U32 u; + cumul[0] = 0; + for (u=1; u<=maxSymbolValue+1; u++) { + if (normalizedCounter[u-1]==-1) { /* Low proba symbol */ + cumul[u] = cumul[u-1] + 1; + tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1); + } else { + cumul[u] = cumul[u-1] + normalizedCounter[u-1]; + } } + cumul[maxSymbolValue+1] = tableSize+1; + } + + /* Spread symbols */ + { U32 position = 0; + U32 symbol; + for (symbol=0; symbol<=maxSymbolValue; symbol++) { + int nbOccurences; + for (nbOccurences=0; nbOccurences<normalizedCounter[symbol]; nbOccurences++) { + tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol; + position = (position + step) & tableMask; + while (position > highThreshold) position = (position + step) & tableMask; /* Low proba area */ + } } + + if (position!=0) return ERROR(GENERIC); /* Must have gone through all positions */ + } + + /* Build table */ + { U32 u; for (u=0; u<tableSize; u++) { + FSE_FUNCTION_TYPE s = tableSymbol[u]; /* note : static analyzer may not understand tableSymbol is properly initialized */ + tableU16[cumul[s]++] = (U16) (tableSize+u); /* TableU16 : sorted by symbol order; gives next state value */ + } } + + /* Build Symbol Transformation Table */ + { unsigned total = 0; + unsigned s; + for (s=0; s<=maxSymbolValue; s++) { + switch (normalizedCounter[s]) + { + case 0: break; + + case -1: + case 1: + symbolTT[s].deltaNbBits = (tableLog << 16) - (1<<tableLog); + symbolTT[s].deltaFindState = total - 1; + total ++; + break; + default : + { + U32 const maxBitsOut = tableLog - BIT_highbit32 (normalizedCounter[s]-1); + U32 const minStatePlus = normalizedCounter[s] << maxBitsOut; + symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus; + symbolTT[s].deltaFindState = total - normalizedCounter[s]; + total += normalizedCounter[s]; + } } } } + + return 0; +} + + +size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) +{ + FSE_FUNCTION_TYPE tableSymbol[FSE_MAX_TABLESIZE]; /* memset() is not necessary, even if static analyzer complain about it */ + return FSE_buildCTable_wksp(ct, normalizedCounter, maxSymbolValue, tableLog, tableSymbol, sizeof(tableSymbol)); +} + + + +#ifndef FSE_COMMONDEFS_ONLY + +/*-************************************************************** +* FSE NCount encoding-decoding +****************************************************************/ +size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog) +{ + size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog) >> 3) + 3; + return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */ +} + +static size_t FSE_writeNCount_generic (void* header, size_t headerBufferSize, + const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, + unsigned writeIsSafe) +{ + BYTE* const ostart = (BYTE*) header; + BYTE* out = ostart; + BYTE* const oend = ostart + headerBufferSize; + int nbBits; + const int tableSize = 1 << tableLog; + int remaining; + int threshold; + U32 bitStream; + int bitCount; + unsigned charnum = 0; + int previous0 = 0; + + bitStream = 0; + bitCount = 0; + /* Table Size */ + bitStream += (tableLog-FSE_MIN_TABLELOG) << bitCount; + bitCount += 4; + + /* Init */ + remaining = tableSize+1; /* +1 for extra accuracy */ + threshold = tableSize; + nbBits = tableLog+1; + + while (remaining>1) { /* stops at 1 */ + if (previous0) { + unsigned start = charnum; + while (!normalizedCounter[charnum]) charnum++; + while (charnum >= start+24) { + start+=24; + bitStream += 0xFFFFU << bitCount; + if ((!writeIsSafe) && (out > oend-2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */ + out[0] = (BYTE) bitStream; + out[1] = (BYTE)(bitStream>>8); + out+=2; + bitStream>>=16; + } + while (charnum >= start+3) { + start+=3; + bitStream += 3 << bitCount; + bitCount += 2; + } + bitStream += (charnum-start) << bitCount; + bitCount += 2; + if (bitCount>16) { + if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */ + out[0] = (BYTE)bitStream; + out[1] = (BYTE)(bitStream>>8); + out += 2; + bitStream >>= 16; + bitCount -= 16; + } } + { int count = normalizedCounter[charnum++]; + int const max = (2*threshold-1)-remaining; + remaining -= count < 0 ? -count : count; + count++; /* +1 for extra accuracy */ + if (count>=threshold) count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */ + bitStream += count << bitCount; + bitCount += nbBits; + bitCount -= (count<max); + previous0 = (count==1); + if (remaining<1) return ERROR(GENERIC); + while (remaining<threshold) nbBits--, threshold>>=1; + } + if (bitCount>16) { + if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */ + out[0] = (BYTE)bitStream; + out[1] = (BYTE)(bitStream>>8); + out += 2; + bitStream >>= 16; + bitCount -= 16; + } } + + /* flush remaining bitStream */ + if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */ + out[0] = (BYTE)bitStream; + out[1] = (BYTE)(bitStream>>8); + out+= (bitCount+7) /8; + + if (charnum > maxSymbolValue + 1) return ERROR(GENERIC); + + return (out-ostart); +} + + +size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) +{ + if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported */ + if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported */ + + if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog)) + return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0); + + return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1); +} + + + +/*-************************************************************** +* Counting histogram +****************************************************************/ +/*! FSE_count_simple + This function counts byte values within `src`, and store the histogram into table `count`. + It doesn't use any additional memory. + But this function is unsafe : it doesn't check that all values within `src` can fit into `count`. + For this reason, prefer using a table `count` with 256 elements. + @return : count of most numerous element +*/ +size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr, + const void* src, size_t srcSize) +{ + const BYTE* ip = (const BYTE*)src; + const BYTE* const end = ip + srcSize; + unsigned maxSymbolValue = *maxSymbolValuePtr; + unsigned max=0; + + memset(count, 0, (maxSymbolValue+1)*sizeof(*count)); + if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; } + + while (ip<end) count[*ip++]++; + + while (!count[maxSymbolValue]) maxSymbolValue--; + *maxSymbolValuePtr = maxSymbolValue; + + { U32 s; for (s=0; s<=maxSymbolValue; s++) if (count[s] > max) max = count[s]; } + + return (size_t)max; +} + + +/* FSE_count_parallel_wksp() : + * Same as FSE_count_parallel(), but using an externally provided scratch buffer. + * `workSpace` size must be a minimum of `1024 * sizeof(unsigned)`` */ +static size_t FSE_count_parallel_wksp( + unsigned* count, unsigned* maxSymbolValuePtr, + const void* source, size_t sourceSize, + unsigned checkMax, unsigned* const workSpace) +{ + const BYTE* ip = (const BYTE*)source; + const BYTE* const iend = ip+sourceSize; + unsigned maxSymbolValue = *maxSymbolValuePtr; + unsigned max=0; + U32* const Counting1 = workSpace; + U32* const Counting2 = Counting1 + 256; + U32* const Counting3 = Counting2 + 256; + U32* const Counting4 = Counting3 + 256; + + memset(Counting1, 0, 4*256*sizeof(unsigned)); + + /* safety checks */ + if (!sourceSize) { + memset(count, 0, maxSymbolValue + 1); + *maxSymbolValuePtr = 0; + return 0; + } + if (!maxSymbolValue) maxSymbolValue = 255; /* 0 == default */ + + /* by stripes of 16 bytes */ + { U32 cached = MEM_read32(ip); ip += 4; + while (ip < iend-15) { + U32 c = cached; cached = MEM_read32(ip); ip += 4; + Counting1[(BYTE) c ]++; + Counting2[(BYTE)(c>>8) ]++; + Counting3[(BYTE)(c>>16)]++; + Counting4[ c>>24 ]++; + c = cached; cached = MEM_read32(ip); ip += 4; + Counting1[(BYTE) c ]++; + Counting2[(BYTE)(c>>8) ]++; + Counting3[(BYTE)(c>>16)]++; + Counting4[ c>>24 ]++; + c = cached; cached = MEM_read32(ip); ip += 4; + Counting1[(BYTE) c ]++; + Counting2[(BYTE)(c>>8) ]++; + Counting3[(BYTE)(c>>16)]++; + Counting4[ c>>24 ]++; + c = cached; cached = MEM_read32(ip); ip += 4; + Counting1[(BYTE) c ]++; + Counting2[(BYTE)(c>>8) ]++; + Counting3[(BYTE)(c>>16)]++; + Counting4[ c>>24 ]++; + } + ip-=4; + } + + /* finish last symbols */ + while (ip<iend) Counting1[*ip++]++; + + if (checkMax) { /* verify stats will fit into destination table */ + U32 s; for (s=255; s>maxSymbolValue; s--) { + Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s]; + if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall); + } } + + { U32 s; for (s=0; s<=maxSymbolValue; s++) { + count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s]; + if (count[s] > max) max = count[s]; + } } + + while (!count[maxSymbolValue]) maxSymbolValue--; + *maxSymbolValuePtr = maxSymbolValue; + return (size_t)max; +} + +/* FSE_countFast_wksp() : + * Same as FSE_countFast(), but using an externally provided scratch buffer. + * `workSpace` size must be table of >= `1024` unsigned */ +size_t FSE_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, + const void* source, size_t sourceSize, unsigned* workSpace) +{ + if (sourceSize < 1500) return FSE_count_simple(count, maxSymbolValuePtr, source, sourceSize); + return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 0, workSpace); +} + +/* fast variant (unsafe : won't check if src contains values beyond count[] limit) */ +size_t FSE_countFast(unsigned* count, unsigned* maxSymbolValuePtr, + const void* source, size_t sourceSize) +{ + unsigned tmpCounters[1024]; + return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters); +} + +/* FSE_count_wksp() : + * Same as FSE_count(), but using an externally provided scratch buffer. + * `workSpace` size must be table of >= `1024` unsigned */ +size_t FSE_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr, + const void* source, size_t sourceSize, unsigned* workSpace) +{ + if (*maxSymbolValuePtr < 255) + return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 1, workSpace); + *maxSymbolValuePtr = 255; + return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace); +} + +size_t FSE_count(unsigned* count, unsigned* maxSymbolValuePtr, + const void* src, size_t srcSize) +{ + unsigned tmpCounters[1024]; + return FSE_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters); +} + + + +/*-************************************************************** +* FSE Compression Code +****************************************************************/ +/*! FSE_sizeof_CTable() : + FSE_CTable is a variable size structure which contains : + `U16 tableLog;` + `U16 maxSymbolValue;` + `U16 nextStateNumber[1 << tableLog];` // This size is variable + `FSE_symbolCompressionTransform symbolTT[maxSymbolValue+1];` // This size is variable +Allocation is manual (C standard does not support variable-size structures). +*/ +size_t FSE_sizeof_CTable (unsigned maxSymbolValue, unsigned tableLog) +{ + if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); + return FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32); +} + +FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog) +{ + size_t size; + if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX; + size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32); + return (FSE_CTable*)malloc(size); +} + +void FSE_freeCTable (FSE_CTable* ct) { free(ct); } + +/* provides the minimum logSize to safely represent a distribution */ +static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue) +{ + U32 minBitsSrc = BIT_highbit32((U32)(srcSize - 1)) + 1; + U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2; + U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols; + return minBits; +} + +unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus) +{ + U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus; + U32 tableLog = maxTableLog; + U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue); + if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG; + if (maxBitsSrc < tableLog) tableLog = maxBitsSrc; /* Accuracy can be reduced */ + if (minBits > tableLog) tableLog = minBits; /* Need a minimum to safely represent all symbol values */ + if (tableLog < FSE_MIN_TABLELOG) tableLog = FSE_MIN_TABLELOG; + if (tableLog > FSE_MAX_TABLELOG) tableLog = FSE_MAX_TABLELOG; + return tableLog; +} + +unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue) +{ + return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2); +} + + +/* Secondary normalization method. + To be used when primary method fails. */ + +static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue) +{ + short const NOT_YET_ASSIGNED = -2; + U32 s; + U32 distributed = 0; + U32 ToDistribute; + + /* Init */ + U32 const lowThreshold = (U32)(total >> tableLog); + U32 lowOne = (U32)((total * 3) >> (tableLog + 1)); + + for (s=0; s<=maxSymbolValue; s++) { + if (count[s] == 0) { + norm[s]=0; + continue; + } + if (count[s] <= lowThreshold) { + norm[s] = -1; + distributed++; + total -= count[s]; + continue; + } + if (count[s] <= lowOne) { + norm[s] = 1; + distributed++; + total -= count[s]; + continue; + } + + norm[s]=NOT_YET_ASSIGNED; + } + ToDistribute = (1 << tableLog) - distributed; + + if ((total / ToDistribute) > lowOne) { + /* risk of rounding to zero */ + lowOne = (U32)((total * 3) / (ToDistribute * 2)); + for (s=0; s<=maxSymbolValue; s++) { + if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) { + norm[s] = 1; + distributed++; + total -= count[s]; + continue; + } } + ToDistribute = (1 << tableLog) - distributed; + } + + if (distributed == maxSymbolValue+1) { + /* all values are pretty poor; + probably incompressible data (should have already been detected); + find max, then give all remaining points to max */ + U32 maxV = 0, maxC = 0; + for (s=0; s<=maxSymbolValue; s++) + if (count[s] > maxC) maxV=s, maxC=count[s]; + norm[maxV] += (short)ToDistribute; + return 0; + } + + if (total == 0) { + /* all of the symbols were low enough for the lowOne or lowThreshold */ + for (s=0; ToDistribute > 0; s = (s+1)%(maxSymbolValue+1)) + if (norm[s] > 0) ToDistribute--, norm[s]++; + return 0; + } + + { U64 const vStepLog = 62 - tableLog; + U64 const mid = (1ULL << (vStepLog-1)) - 1; + U64 const rStep = ((((U64)1<<vStepLog) * ToDistribute) + mid) / total; /* scale on remaining */ + U64 tmpTotal = mid; + for (s=0; s<=maxSymbolValue; s++) { + if (norm[s]==NOT_YET_ASSIGNED) { + U64 const end = tmpTotal + (count[s] * rStep); + U32 const sStart = (U32)(tmpTotal >> vStepLog); + U32 const sEnd = (U32)(end >> vStepLog); + U32 const weight = sEnd - sStart; + if (weight < 1) + return ERROR(GENERIC); + norm[s] = (short)weight; + tmpTotal = end; + } } } + + return 0; +} + + +size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog, + const unsigned* count, size_t total, + unsigned maxSymbolValue) +{ + /* Sanity checks */ + if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG; + if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported size */ + if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported size */ + if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */ + + { U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 }; + U64 const scale = 62 - tableLog; + U64 const step = ((U64)1<<62) / total; /* <== here, one division ! */ + U64 const vStep = 1ULL<<(scale-20); + int stillToDistribute = 1<<tableLog; + unsigned s; + unsigned largest=0; + short largestP=0; + U32 lowThreshold = (U32)(total >> tableLog); + + for (s=0; s<=maxSymbolValue; s++) { + if (count[s] == total) return 0; /* rle special case */ + if (count[s] == 0) { normalizedCounter[s]=0; continue; } + if (count[s] <= lowThreshold) { + normalizedCounter[s] = -1; + stillToDistribute--; + } else { + short proba = (short)((count[s]*step) >> scale); + if (proba<8) { + U64 restToBeat = vStep * rtbTable[proba]; + proba += (count[s]*step) - ((U64)proba<<scale) > restToBeat; + } + if (proba > largestP) largestP=proba, largest=s; + normalizedCounter[s] = proba; + stillToDistribute -= proba; + } } + if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) { + /* corner case, need another normalization method */ + size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue); + if (FSE_isError(errorCode)) return errorCode; + } + else normalizedCounter[largest] += (short)stillToDistribute; + } + +#if 0 + { /* Print Table (debug) */ + U32 s; + U32 nTotal = 0; + for (s=0; s<=maxSymbolValue; s++) + printf("%3i: %4i \n", s, normalizedCounter[s]); + for (s=0; s<=maxSymbolValue; s++) + nTotal += abs(normalizedCounter[s]); + if (nTotal != (1U<<tableLog)) + printf("Warning !!! Total == %u != %u !!!", nTotal, 1U<<tableLog); + getchar(); + } +#endif + + return tableLog; +} + + +/* fake FSE_CTable, for raw (uncompressed) input */ +size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits) +{ + const unsigned tableSize = 1 << nbBits; + const unsigned tableMask = tableSize - 1; + const unsigned maxSymbolValue = tableMask; + void* const ptr = ct; + U16* const tableU16 = ( (U16*) ptr) + 2; + void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableSize>>1); /* assumption : tableLog >= 1 */ + FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT); + unsigned s; + + /* Sanity checks */ + if (nbBits < 1) return ERROR(GENERIC); /* min size */ + + /* header */ + tableU16[-2] = (U16) nbBits; + tableU16[-1] = (U16) maxSymbolValue; + + /* Build table */ + for (s=0; s<tableSize; s++) + tableU16[s] = (U16)(tableSize + s); + + /* Build Symbol Transformation Table */ + { const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits); + for (s=0; s<=maxSymbolValue; s++) { + symbolTT[s].deltaNbBits = deltaNbBits; + symbolTT[s].deltaFindState = s-1; + } } + + return 0; +} + +/* fake FSE_CTable, for rle input (always same symbol) */ +size_t FSE_buildCTable_rle (FSE_CTable* ct, BYTE symbolValue) +{ + void* ptr = ct; + U16* tableU16 = ( (U16*) ptr) + 2; + void* FSCTptr = (U32*)ptr + 2; + FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*) FSCTptr; + + /* header */ + tableU16[-2] = (U16) 0; + tableU16[-1] = (U16) symbolValue; + + /* Build table */ + tableU16[0] = 0; + tableU16[1] = 0; /* just in case */ + + /* Build Symbol Transformation Table */ + symbolTT[symbolValue].deltaNbBits = 0; + symbolTT[symbolValue].deltaFindState = 0; + + return 0; +} + + +static size_t FSE_compress_usingCTable_generic (void* dst, size_t dstSize, + const void* src, size_t srcSize, + const FSE_CTable* ct, const unsigned fast) +{ + const BYTE* const istart = (const BYTE*) src; + const BYTE* const iend = istart + srcSize; + const BYTE* ip=iend; + + BIT_CStream_t bitC; + FSE_CState_t CState1, CState2; + + /* init */ + if (srcSize <= 2) return 0; + { size_t const initError = BIT_initCStream(&bitC, dst, dstSize); + if (FSE_isError(initError)) return 0; /* not enough space available to write a bitstream */ } + +#define FSE_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s)) + + if (srcSize & 1) { + FSE_initCState2(&CState1, ct, *--ip); + FSE_initCState2(&CState2, ct, *--ip); + FSE_encodeSymbol(&bitC, &CState1, *--ip); + FSE_FLUSHBITS(&bitC); + } else { + FSE_initCState2(&CState2, ct, *--ip); + FSE_initCState2(&CState1, ct, *--ip); + } + + /* join to mod 4 */ + srcSize -= 2; + if ((sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) && (srcSize & 2)) { /* test bit 2 */ + FSE_encodeSymbol(&bitC, &CState2, *--ip); + FSE_encodeSymbol(&bitC, &CState1, *--ip); + FSE_FLUSHBITS(&bitC); + } + + /* 2 or 4 encoding per loop */ + while ( ip>istart ) { + + FSE_encodeSymbol(&bitC, &CState2, *--ip); + + if (sizeof(bitC.bitContainer)*8 < FSE_MAX_TABLELOG*2+7 ) /* this test must be static */ + FSE_FLUSHBITS(&bitC); + + FSE_encodeSymbol(&bitC, &CState1, *--ip); + + if (sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) { /* this test must be static */ + FSE_encodeSymbol(&bitC, &CState2, *--ip); + FSE_encodeSymbol(&bitC, &CState1, *--ip); + } + + FSE_FLUSHBITS(&bitC); + } + + FSE_flushCState(&bitC, &CState2); + FSE_flushCState(&bitC, &CState1); + return BIT_closeCStream(&bitC); +} + +size_t FSE_compress_usingCTable (void* dst, size_t dstSize, + const void* src, size_t srcSize, + const FSE_CTable* ct) +{ + unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize)); + + if (fast) + return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1); + else + return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0); +} + + +size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); } + +#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return f +#define CHECK_F(f) { CHECK_V_F(_var_err__, f); } + +/* FSE_compress_wksp() : + * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`). + * `wkspSize` size must be `(1<<tableLog)`. + */ +size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize) +{ + BYTE* const ostart = (BYTE*) dst; + BYTE* op = ostart; + BYTE* const oend = ostart + dstSize; + + U32 count[FSE_MAX_SYMBOL_VALUE+1]; + S16 norm[FSE_MAX_SYMBOL_VALUE+1]; + FSE_CTable* CTable = (FSE_CTable*)workSpace; + size_t const CTableSize = FSE_CTABLE_SIZE_U32(tableLog, maxSymbolValue); + void* scratchBuffer = (void*)(CTable + CTableSize); + size_t const scratchBufferSize = wkspSize - (CTableSize * sizeof(FSE_CTable)); + + /* init conditions */ + if (wkspSize < FSE_WKSP_SIZE_U32(tableLog, maxSymbolValue)) return ERROR(tableLog_tooLarge); + if (srcSize <= 1) return 0; /* Not compressible */ + if (!maxSymbolValue) maxSymbolValue = FSE_MAX_SYMBOL_VALUE; + if (!tableLog) tableLog = FSE_DEFAULT_TABLELOG; + + /* Scan input and build symbol stats */ + { CHECK_V_F(maxCount, FSE_count_wksp(count, &maxSymbolValue, src, srcSize, (unsigned*)scratchBuffer) ); + if (maxCount == srcSize) return 1; /* only a single symbol in src : rle */ + if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */ + if (maxCount < (srcSize >> 7)) return 0; /* Heuristic : not compressible enough */ + } + + tableLog = FSE_optimalTableLog(tableLog, srcSize, maxSymbolValue); + CHECK_F( FSE_normalizeCount(norm, tableLog, count, srcSize, maxSymbolValue) ); + + /* Write table description header */ + { CHECK_V_F(nc_err, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) ); + op += nc_err; + } + + /* Compress */ + CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, scratchBufferSize) ); + { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, src, srcSize, CTable) ); + if (cSize == 0) return 0; /* not enough space for compressed data */ + op += cSize; + } + + /* check compressibility */ + if ( (size_t)(op-ostart) >= srcSize-1 ) return 0; + + return op-ostart; +} + +typedef struct { + FSE_CTable CTable_max[FSE_CTABLE_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)]; + BYTE scratchBuffer[1 << FSE_MAX_TABLELOG]; +} fseWkspMax_t; + +size_t FSE_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog) +{ + fseWkspMax_t scratchBuffer; + FSE_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)); /* compilation failures here means scratchBuffer is not large enough */ + if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); + return FSE_compress_wksp(dst, dstCapacity, src, srcSize, maxSymbolValue, tableLog, &scratchBuffer, sizeof(scratchBuffer)); +} + +size_t FSE_compress (void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + return FSE_compress2(dst, dstCapacity, src, srcSize, FSE_MAX_SYMBOL_VALUE, FSE_DEFAULT_TABLELOG); +} + + +#endif /* FSE_COMMONDEFS_ONLY */ diff --git a/thirdparty/zstd/compress/huf_compress.c b/thirdparty/zstd/compress/huf_compress.c new file mode 100644 index 0000000000..fe11aafb8f --- /dev/null +++ b/thirdparty/zstd/compress/huf_compress.c @@ -0,0 +1,684 @@ +/* ****************************************************************** + Huffman encoder, part of New Generation Entropy library + Copyright (C) 2013-2016, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy + - Public forum : https://groups.google.com/forum/#!forum/lz4c +****************************************************************** */ + +/* ************************************************************** +* Compiler specifics +****************************************************************/ +#ifdef _MSC_VER /* Visual Studio */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +#endif + + +/* ************************************************************** +* Includes +****************************************************************/ +#include <string.h> /* memcpy, memset */ +#include <stdio.h> /* printf (debug) */ +#include "bitstream.h" +#define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */ +#include "fse.h" /* header compression */ +#define HUF_STATIC_LINKING_ONLY +#include "huf.h" + + +/* ************************************************************** +* Error Management +****************************************************************/ +#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ +#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return f +#define CHECK_F(f) { CHECK_V_F(_var_err__, f); } + + +/* ************************************************************** +* Utils +****************************************************************/ +unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue) +{ + return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1); +} + + +/* ******************************************************* +* HUF : Huffman block compression +*********************************************************/ +/* HUF_compressWeights() : + * Same as FSE_compress(), but dedicated to huff0's weights compression. + * The use case needs much less stack memory. + * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX. + */ +#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6 +size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weightTable, size_t wtSize) +{ + BYTE* const ostart = (BYTE*) dst; + BYTE* op = ostart; + BYTE* const oend = ostart + dstSize; + + U32 maxSymbolValue = HUF_TABLELOG_MAX; + U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER; + + FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)]; + BYTE scratchBuffer[1<<MAX_FSE_TABLELOG_FOR_HUFF_HEADER]; + + U32 count[HUF_TABLELOG_MAX+1]; + S16 norm[HUF_TABLELOG_MAX+1]; + + /* init conditions */ + if (wtSize <= 1) return 0; /* Not compressible */ + + /* Scan input and build symbol stats */ + { CHECK_V_F(maxCount, FSE_count_simple(count, &maxSymbolValue, weightTable, wtSize) ); + if (maxCount == wtSize) return 1; /* only a single symbol in src : rle */ + if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */ + } + + tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue); + CHECK_F( FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue) ); + + /* Write table description header */ + { CHECK_V_F(hSize, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) ); + op += hSize; + } + + /* Compress */ + CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, sizeof(scratchBuffer)) ); + { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable) ); + if (cSize == 0) return 0; /* not enough space for compressed data */ + op += cSize; + } + + return op-ostart; +} + + +struct HUF_CElt_s { + U16 val; + BYTE nbBits; +}; /* typedef'd to HUF_CElt within "huf.h" */ + +/*! HUF_writeCTable() : + `CTable` : Huffman tree to save, using huf representation. + @return : size of saved CTable */ +size_t HUF_writeCTable (void* dst, size_t maxDstSize, + const HUF_CElt* CTable, U32 maxSymbolValue, U32 huffLog) +{ + BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */ + BYTE huffWeight[HUF_SYMBOLVALUE_MAX]; + BYTE* op = (BYTE*)dst; + U32 n; + + /* check conditions */ + if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge); + + /* convert to weight */ + bitsToWeight[0] = 0; + for (n=1; n<huffLog+1; n++) + bitsToWeight[n] = (BYTE)(huffLog + 1 - n); + for (n=0; n<maxSymbolValue; n++) + huffWeight[n] = bitsToWeight[CTable[n].nbBits]; + + /* attempt weights compression by FSE */ + { CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, huffWeight, maxSymbolValue) ); + if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */ + op[0] = (BYTE)hSize; + return hSize+1; + } } + + /* write raw values as 4-bits (max : 15) */ + if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */ + if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */ + op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1)); + huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */ + for (n=0; n<maxSymbolValue; n+=2) + op[(n/2)+1] = (BYTE)((huffWeight[n] << 4) + huffWeight[n+1]); + return ((maxSymbolValue+1)/2) + 1; +} + + +size_t HUF_readCTable (HUF_CElt* CTable, U32 maxSymbolValue, const void* src, size_t srcSize) +{ + BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */ + U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */ + U32 tableLog = 0; + U32 nbSymbols = 0; + + /* get symbol weights */ + CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize)); + + /* check result */ + if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); + if (nbSymbols > maxSymbolValue+1) return ERROR(maxSymbolValue_tooSmall); + + /* Prepare base value per rank */ + { U32 n, nextRankStart = 0; + for (n=1; n<=tableLog; n++) { + U32 current = nextRankStart; + nextRankStart += (rankVal[n] << (n-1)); + rankVal[n] = current; + } } + + /* fill nbBits */ + { U32 n; for (n=0; n<nbSymbols; n++) { + const U32 w = huffWeight[n]; + CTable[n].nbBits = (BYTE)(tableLog + 1 - w); + } } + + /* fill val */ + { U16 nbPerRank[HUF_TABLELOG_MAX+2] = {0}; /* support w=0=>n=tableLog+1 */ + U16 valPerRank[HUF_TABLELOG_MAX+2] = {0}; + { U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[CTable[n].nbBits]++; } + /* determine stating value per rank */ + valPerRank[tableLog+1] = 0; /* for w==0 */ + { U16 min = 0; + U32 n; for (n=tableLog; n>0; n--) { /* start at n=tablelog <-> w=1 */ + valPerRank[n] = min; /* get starting value within each rank */ + min += nbPerRank[n]; + min >>= 1; + } } + /* assign value within rank, symbol order */ + { U32 n; for (n=0; n<=maxSymbolValue; n++) CTable[n].val = valPerRank[CTable[n].nbBits]++; } + } + + return readSize; +} + + +typedef struct nodeElt_s { + U32 count; + U16 parent; + BYTE byte; + BYTE nbBits; +} nodeElt; + +static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits) +{ + const U32 largestBits = huffNode[lastNonNull].nbBits; + if (largestBits <= maxNbBits) return largestBits; /* early exit : no elt > maxNbBits */ + + /* there are several too large elements (at least >= 2) */ + { int totalCost = 0; + const U32 baseCost = 1 << (largestBits - maxNbBits); + U32 n = lastNonNull; + + while (huffNode[n].nbBits > maxNbBits) { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)); + huffNode[n].nbBits = (BYTE)maxNbBits; + n --; + } /* n stops at huffNode[n].nbBits <= maxNbBits */ + while (huffNode[n].nbBits == maxNbBits) n--; /* n end at index of smallest symbol using < maxNbBits */ + + /* renorm totalCost */ + totalCost >>= (largestBits - maxNbBits); /* note : totalCost is necessarily a multiple of baseCost */ + + /* repay normalized cost */ + { U32 const noSymbol = 0xF0F0F0F0; + U32 rankLast[HUF_TABLELOG_MAX+2]; + int pos; + + /* Get pos of last (smallest) symbol per rank */ + memset(rankLast, 0xF0, sizeof(rankLast)); + { U32 currentNbBits = maxNbBits; + for (pos=n ; pos >= 0; pos--) { + if (huffNode[pos].nbBits >= currentNbBits) continue; + currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */ + rankLast[maxNbBits-currentNbBits] = pos; + } } + + while (totalCost > 0) { + U32 nBitsToDecrease = BIT_highbit32(totalCost) + 1; + for ( ; nBitsToDecrease > 1; nBitsToDecrease--) { + U32 highPos = rankLast[nBitsToDecrease]; + U32 lowPos = rankLast[nBitsToDecrease-1]; + if (highPos == noSymbol) continue; + if (lowPos == noSymbol) break; + { U32 const highTotal = huffNode[highPos].count; + U32 const lowTotal = 2 * huffNode[lowPos].count; + if (highTotal <= lowTotal) break; + } } + /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */ + while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol)) /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */ + nBitsToDecrease ++; + totalCost -= 1 << (nBitsToDecrease-1); + if (rankLast[nBitsToDecrease-1] == noSymbol) + rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]; /* this rank is no longer empty */ + huffNode[rankLast[nBitsToDecrease]].nbBits ++; + if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */ + rankLast[nBitsToDecrease] = noSymbol; + else { + rankLast[nBitsToDecrease]--; + if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease) + rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */ + } } /* while (totalCost > 0) */ + + while (totalCost < 0) { /* Sometimes, cost correction overshoot */ + if (rankLast[1] == noSymbol) { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ + while (huffNode[n].nbBits == maxNbBits) n--; + huffNode[n+1].nbBits--; + rankLast[1] = n+1; + totalCost++; + continue; + } + huffNode[ rankLast[1] + 1 ].nbBits--; + rankLast[1]++; + totalCost ++; + } } } /* there are several too large elements (at least >= 2) */ + + return maxNbBits; +} + + +typedef struct { + U32 base; + U32 current; +} rankPos; + +static void HUF_sort(nodeElt* huffNode, const U32* count, U32 maxSymbolValue) +{ + rankPos rank[32]; + U32 n; + + memset(rank, 0, sizeof(rank)); + for (n=0; n<=maxSymbolValue; n++) { + U32 r = BIT_highbit32(count[n] + 1); + rank[r].base ++; + } + for (n=30; n>0; n--) rank[n-1].base += rank[n].base; + for (n=0; n<32; n++) rank[n].current = rank[n].base; + for (n=0; n<=maxSymbolValue; n++) { + U32 const c = count[n]; + U32 const r = BIT_highbit32(c+1) + 1; + U32 pos = rank[r].current++; + while ((pos > rank[r].base) && (c > huffNode[pos-1].count)) huffNode[pos]=huffNode[pos-1], pos--; + huffNode[pos].count = c; + huffNode[pos].byte = (BYTE)n; + } +} + + +/** HUF_buildCTable_wksp() : + * Same as HUF_buildCTable(), but using externally allocated scratch buffer. + * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned. + */ +#define STARTNODE (HUF_SYMBOLVALUE_MAX+1) +typedef nodeElt huffNodeTable[2*HUF_SYMBOLVALUE_MAX+1 +1]; +size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize) +{ + nodeElt* const huffNode0 = (nodeElt*)workSpace; + nodeElt* const huffNode = huffNode0+1; + U32 n, nonNullRank; + int lowS, lowN; + U16 nodeNb = STARTNODE; + U32 nodeRoot; + + /* safety checks */ + if (wkspSize < sizeof(huffNodeTable)) return ERROR(GENERIC); /* workSpace is not large enough */ + if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT; + if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(GENERIC); + memset(huffNode0, 0, sizeof(huffNodeTable)); + + /* sort, decreasing order */ + HUF_sort(huffNode, count, maxSymbolValue); + + /* init for parents */ + nonNullRank = maxSymbolValue; + while(huffNode[nonNullRank].count == 0) nonNullRank--; + lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb; + huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count; + huffNode[lowS].parent = huffNode[lowS-1].parent = nodeNb; + nodeNb++; lowS-=2; + for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30); + huffNode0[0].count = (U32)(1U<<31); /* fake entry, strong barrier */ + + /* create parents */ + while (nodeNb <= nodeRoot) { + U32 n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; + U32 n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; + huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count; + huffNode[n1].parent = huffNode[n2].parent = nodeNb; + nodeNb++; + } + + /* distribute weights (unlimited tree height) */ + huffNode[nodeRoot].nbBits = 0; + for (n=nodeRoot-1; n>=STARTNODE; n--) + huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1; + for (n=0; n<=nonNullRank; n++) + huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1; + + /* enforce maxTableLog */ + maxNbBits = HUF_setMaxHeight(huffNode, nonNullRank, maxNbBits); + + /* fill result into tree (val, nbBits) */ + { U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0}; + U16 valPerRank[HUF_TABLELOG_MAX+1] = {0}; + if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */ + for (n=0; n<=nonNullRank; n++) + nbPerRank[huffNode[n].nbBits]++; + /* determine stating value per rank */ + { U16 min = 0; + for (n=maxNbBits; n>0; n--) { + valPerRank[n] = min; /* get starting value within each rank */ + min += nbPerRank[n]; + min >>= 1; + } } + for (n=0; n<=maxSymbolValue; n++) + tree[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */ + for (n=0; n<=maxSymbolValue; n++) + tree[n].val = valPerRank[tree[n].nbBits]++; /* assign value within rank, symbol order */ + } + + return maxNbBits; +} + +/** HUF_buildCTable() : + * Note : count is used before tree is written, so they can safely overlap + */ +size_t HUF_buildCTable (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits) +{ + huffNodeTable nodeTable; + return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, nodeTable, sizeof(nodeTable)); +} + +static size_t HUF_estimateCompressedSize(HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) +{ + size_t nbBits = 0; + int s; + for (s = 0; s <= (int)maxSymbolValue; ++s) { + nbBits += CTable[s].nbBits * count[s]; + } + return nbBits >> 3; +} + +static int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) { + int bad = 0; + int s; + for (s = 0; s <= (int)maxSymbolValue; ++s) { + bad |= (count[s] != 0) & (CTable[s].nbBits == 0); + } + return !bad; +} + +static void HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable) +{ + BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits); +} + +size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); } + +#define HUF_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s)) + +#define HUF_FLUSHBITS_1(stream) \ + if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream) + +#define HUF_FLUSHBITS_2(stream) \ + if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*4+7) HUF_FLUSHBITS(stream) + +size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) +{ + const BYTE* ip = (const BYTE*) src; + BYTE* const ostart = (BYTE*)dst; + BYTE* const oend = ostart + dstSize; + BYTE* op = ostart; + size_t n; + const unsigned fast = (dstSize >= HUF_BLOCKBOUND(srcSize)); + BIT_CStream_t bitC; + + /* init */ + if (dstSize < 8) return 0; /* not enough space to compress */ + { size_t const initErr = BIT_initCStream(&bitC, op, oend-op); + if (HUF_isError(initErr)) return 0; } + + n = srcSize & ~3; /* join to mod 4 */ + switch (srcSize & 3) + { + case 3 : HUF_encodeSymbol(&bitC, ip[n+ 2], CTable); + HUF_FLUSHBITS_2(&bitC); + case 2 : HUF_encodeSymbol(&bitC, ip[n+ 1], CTable); + HUF_FLUSHBITS_1(&bitC); + case 1 : HUF_encodeSymbol(&bitC, ip[n+ 0], CTable); + HUF_FLUSHBITS(&bitC); + case 0 : + default: ; + } + + for (; n>0; n-=4) { /* note : n&3==0 at this stage */ + HUF_encodeSymbol(&bitC, ip[n- 1], CTable); + HUF_FLUSHBITS_1(&bitC); + HUF_encodeSymbol(&bitC, ip[n- 2], CTable); + HUF_FLUSHBITS_2(&bitC); + HUF_encodeSymbol(&bitC, ip[n- 3], CTable); + HUF_FLUSHBITS_1(&bitC); + HUF_encodeSymbol(&bitC, ip[n- 4], CTable); + HUF_FLUSHBITS(&bitC); + } + + return BIT_closeCStream(&bitC); +} + + +size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) +{ + size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */ + const BYTE* ip = (const BYTE*) src; + const BYTE* const iend = ip + srcSize; + BYTE* const ostart = (BYTE*) dst; + BYTE* const oend = ostart + dstSize; + BYTE* op = ostart; + + if (dstSize < 6 + 1 + 1 + 1 + 8) return 0; /* minimum space to compress successfully */ + if (srcSize < 12) return 0; /* no saving possible : too small input */ + op += 6; /* jumpTable */ + + { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) ); + if (cSize==0) return 0; + MEM_writeLE16(ostart, (U16)cSize); + op += cSize; + } + + ip += segmentSize; + { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) ); + if (cSize==0) return 0; + MEM_writeLE16(ostart+2, (U16)cSize); + op += cSize; + } + + ip += segmentSize; + { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) ); + if (cSize==0) return 0; + MEM_writeLE16(ostart+4, (U16)cSize); + op += cSize; + } + + ip += segmentSize; + { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, iend-ip, CTable) ); + if (cSize==0) return 0; + op += cSize; + } + + return op-ostart; +} + + +static size_t HUF_compressCTable_internal( + BYTE* const ostart, BYTE* op, BYTE* const oend, + const void* src, size_t srcSize, + unsigned singleStream, const HUF_CElt* CTable) +{ + size_t const cSize = singleStream ? + HUF_compress1X_usingCTable(op, oend - op, src, srcSize, CTable) : + HUF_compress4X_usingCTable(op, oend - op, src, srcSize, CTable); + if (HUF_isError(cSize)) { return cSize; } + if (cSize==0) { return 0; } /* uncompressible */ + op += cSize; + /* check compressibility */ + if ((size_t)(op-ostart) >= srcSize-1) { return 0; } + return op-ostart; +} + + +/* `workSpace` must a table of at least 1024 unsigned */ +static size_t HUF_compress_internal ( + void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned huffLog, + unsigned singleStream, + void* workSpace, size_t wkspSize, + HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat) +{ + BYTE* const ostart = (BYTE*)dst; + BYTE* const oend = ostart + dstSize; + BYTE* op = ostart; + + U32* count; + size_t const countSize = sizeof(U32) * (HUF_SYMBOLVALUE_MAX + 1); + HUF_CElt* CTable; + size_t const CTableSize = sizeof(HUF_CElt) * (HUF_SYMBOLVALUE_MAX + 1); + + /* checks & inits */ + if (wkspSize < sizeof(huffNodeTable) + countSize + CTableSize) return ERROR(GENERIC); + if (!srcSize) return 0; /* Uncompressed (note : 1 means rle, so first byte must be correct) */ + if (!dstSize) return 0; /* cannot fit within dst budget */ + if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */ + if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); + if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX; + if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT; + + count = (U32*)workSpace; + workSpace = (BYTE*)workSpace + countSize; + wkspSize -= countSize; + CTable = (HUF_CElt*)workSpace; + workSpace = (BYTE*)workSpace + CTableSize; + wkspSize -= CTableSize; + + /* Heuristic : If we don't need to check the validity of the old table use the old table for small inputs */ + if (preferRepeat && repeat && *repeat == HUF_repeat_valid) { + return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable); + } + + /* Scan input and build symbol stats */ + { CHECK_V_F(largest, FSE_count_wksp (count, &maxSymbolValue, (const BYTE*)src, srcSize, (U32*)workSpace) ); + if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */ + if (largest <= (srcSize >> 7)+1) return 0; /* Fast heuristic : not compressible enough */ + } + + /* Check validity of previous table */ + if (repeat && *repeat == HUF_repeat_check && !HUF_validateCTable(oldHufTable, count, maxSymbolValue)) { + *repeat = HUF_repeat_none; + } + /* Heuristic : use existing table for small inputs */ + if (preferRepeat && repeat && *repeat != HUF_repeat_none) { + return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable); + } + + /* Build Huffman Tree */ + huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue); + { CHECK_V_F(maxBits, HUF_buildCTable_wksp (CTable, count, maxSymbolValue, huffLog, workSpace, wkspSize) ); + huffLog = (U32)maxBits; + /* Zero the unused symbols so we can check it for validity */ + memset(CTable + maxSymbolValue + 1, 0, CTableSize - (maxSymbolValue + 1) * sizeof(HUF_CElt)); + } + + /* Write table description header */ + { CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, CTable, maxSymbolValue, huffLog) ); + /* Check if using the previous table will be beneficial */ + if (repeat && *repeat != HUF_repeat_none) { + size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, count, maxSymbolValue); + size_t const newSize = HUF_estimateCompressedSize(CTable, count, maxSymbolValue); + if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) { + return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable); + } + } + /* Use the new table */ + if (hSize + 12ul >= srcSize) { return 0; } + op += hSize; + if (repeat) { *repeat = HUF_repeat_none; } + if (oldHufTable) { memcpy(oldHufTable, CTable, CTableSize); } /* Save the new table */ + } + return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, CTable); +} + + +size_t HUF_compress1X_wksp (void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned huffLog, + void* workSpace, size_t wkspSize) +{ + return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, NULL, NULL, 0); +} + +size_t HUF_compress1X_repeat (void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned huffLog, + void* workSpace, size_t wkspSize, + HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat) +{ + return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, hufTable, repeat, preferRepeat); +} + +size_t HUF_compress1X (void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned huffLog) +{ + unsigned workSpace[1024]; + return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace)); +} + +size_t HUF_compress4X_wksp (void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned huffLog, + void* workSpace, size_t wkspSize) +{ + return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, NULL, NULL, 0); +} + +size_t HUF_compress4X_repeat (void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned huffLog, + void* workSpace, size_t wkspSize, + HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat) +{ + return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, hufTable, repeat, preferRepeat); +} + +size_t HUF_compress2 (void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned huffLog) +{ + unsigned workSpace[1024]; + return HUF_compress4X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace)); +} + +size_t HUF_compress (void* dst, size_t maxDstSize, const void* src, size_t srcSize) +{ + return HUF_compress2(dst, maxDstSize, src, (U32)srcSize, 255, HUF_TABLELOG_DEFAULT); +} diff --git a/thirdparty/zstd/compress/zstd_compress.c b/thirdparty/zstd/compress/zstd_compress.c new file mode 100644 index 0000000000..c08b315dab --- /dev/null +++ b/thirdparty/zstd/compress/zstd_compress.c @@ -0,0 +1,3598 @@ +/** + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + + +/*-************************************* +* Dependencies +***************************************/ +#include <string.h> /* memset */ +#include "mem.h" +#define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */ +#include "fse.h" +#define HUF_STATIC_LINKING_ONLY +#include "huf.h" +#include "zstd_internal.h" /* includes zstd.h */ + + +/*-************************************* +* Debug +***************************************/ +#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=1) +# include <assert.h> +#else +# define assert(condition) ((void)0) +#endif + +#define ZSTD_STATIC_ASSERT(c) { enum { ZSTD_static_assert = 1/(int)(!!(c)) }; } + +#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=2) +# include <stdio.h> + static unsigned g_debugLevel = ZSTD_DEBUG; +# define DEBUGLOG(l, ...) if (l<=g_debugLevel) { fprintf(stderr, __FILE__ ": "); fprintf(stderr, __VA_ARGS__); fprintf(stderr, " \n"); } +#else +# define DEBUGLOG(l, ...) {} /* disabled */ +#endif + + +/*-************************************* +* Constants +***************************************/ +static const U32 g_searchStrength = 8; /* control skip over incompressible data */ +#define HASH_READ_SIZE 8 +typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e; + +/* entropy tables always have same size */ +static size_t const hufCTable_size = HUF_CTABLE_SIZE(255); +static size_t const litlengthCTable_size = FSE_CTABLE_SIZE(LLFSELog, MaxLL); +static size_t const offcodeCTable_size = FSE_CTABLE_SIZE(OffFSELog, MaxOff); +static size_t const matchlengthCTable_size = FSE_CTABLE_SIZE(MLFSELog, MaxML); +static size_t const entropyScratchSpace_size = HUF_WORKSPACE_SIZE; + + +/*-************************************* +* Helper functions +***************************************/ +size_t ZSTD_compressBound(size_t srcSize) { + size_t const lowLimit = 256 KB; + size_t const margin = (srcSize < lowLimit) ? (lowLimit-srcSize) >> 12 : 0; /* from 64 to 0 */ + return srcSize + (srcSize >> 8) + margin; +} + + +/*-************************************* +* Sequence storage +***************************************/ +static void ZSTD_resetSeqStore(seqStore_t* ssPtr) +{ + ssPtr->lit = ssPtr->litStart; + ssPtr->sequences = ssPtr->sequencesStart; + ssPtr->longLengthID = 0; +} + + +/*-************************************* +* Context memory management +***************************************/ +struct ZSTD_CCtx_s { + const BYTE* nextSrc; /* next block here to continue on current prefix */ + const BYTE* base; /* All regular indexes relative to this position */ + const BYTE* dictBase; /* extDict indexes relative to this position */ + U32 dictLimit; /* below that point, need extDict */ + U32 lowLimit; /* below that point, no more data */ + U32 nextToUpdate; /* index from which to continue dictionary update */ + U32 nextToUpdate3; /* index from which to continue dictionary update */ + U32 hashLog3; /* dispatch table : larger == faster, more memory */ + U32 loadedDictEnd; /* index of end of dictionary */ + U32 forceWindow; /* force back-references to respect limit of 1<<wLog, even for dictionary */ + U32 forceRawDict; /* Force loading dictionary in "content-only" mode (no header analysis) */ + ZSTD_compressionStage_e stage; + U32 rep[ZSTD_REP_NUM]; + U32 repToConfirm[ZSTD_REP_NUM]; + U32 dictID; + ZSTD_parameters params; + void* workSpace; + size_t workSpaceSize; + size_t blockSize; + U64 frameContentSize; + U64 consumedSrcSize; + XXH64_state_t xxhState; + ZSTD_customMem customMem; + + seqStore_t seqStore; /* sequences storage ptrs */ + U32* hashTable; + U32* hashTable3; + U32* chainTable; + HUF_repeat hufCTable_repeatMode; + HUF_CElt* hufCTable; + U32 fseCTables_ready; + FSE_CTable* offcodeCTable; + FSE_CTable* matchlengthCTable; + FSE_CTable* litlengthCTable; + unsigned* entropyScratchSpace; +}; + +ZSTD_CCtx* ZSTD_createCCtx(void) +{ + return ZSTD_createCCtx_advanced(defaultCustomMem); +} + +ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem) +{ + ZSTD_CCtx* cctx; + + if (!customMem.customAlloc && !customMem.customFree) customMem = defaultCustomMem; + if (!customMem.customAlloc || !customMem.customFree) return NULL; + + cctx = (ZSTD_CCtx*) ZSTD_malloc(sizeof(ZSTD_CCtx), customMem); + if (!cctx) return NULL; + memset(cctx, 0, sizeof(ZSTD_CCtx)); + cctx->customMem = customMem; + return cctx; +} + +size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx) +{ + if (cctx==NULL) return 0; /* support free on NULL */ + ZSTD_free(cctx->workSpace, cctx->customMem); + ZSTD_free(cctx, cctx->customMem); + return 0; /* reserved as a potential error code in the future */ +} + +size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx) +{ + if (cctx==NULL) return 0; /* support sizeof on NULL */ + return sizeof(*cctx) + cctx->workSpaceSize; +} + +size_t ZSTD_setCCtxParameter(ZSTD_CCtx* cctx, ZSTD_CCtxParameter param, unsigned value) +{ + switch(param) + { + case ZSTD_p_forceWindow : cctx->forceWindow = value>0; cctx->loadedDictEnd = 0; return 0; + case ZSTD_p_forceRawDict : cctx->forceRawDict = value>0; return 0; + default: return ERROR(parameter_unknown); + } +} + +const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) /* hidden interface */ +{ + return &(ctx->seqStore); +} + +static ZSTD_parameters ZSTD_getParamsFromCCtx(const ZSTD_CCtx* cctx) +{ + return cctx->params; +} + + +/** ZSTD_checkParams() : + ensure param values remain within authorized range. + @return : 0, or an error code if one value is beyond authorized range */ +size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams) +{ +# define CLAMPCHECK(val,min,max) { if ((val<min) | (val>max)) return ERROR(compressionParameter_unsupported); } + CLAMPCHECK(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX); + CLAMPCHECK(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX); + CLAMPCHECK(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX); + CLAMPCHECK(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX); + CLAMPCHECK(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX); + CLAMPCHECK(cParams.targetLength, ZSTD_TARGETLENGTH_MIN, ZSTD_TARGETLENGTH_MAX); + if ((U32)(cParams.strategy) > (U32)ZSTD_btopt2) return ERROR(compressionParameter_unsupported); + return 0; +} + + +/** ZSTD_cycleLog() : + * condition for correct operation : hashLog > 1 */ +static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat) +{ + U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2); + return hashLog - btScale; +} + +/** ZSTD_adjustCParams() : + optimize `cPar` for a given input (`srcSize` and `dictSize`). + mostly downsizing to reduce memory consumption and initialization. + Both `srcSize` and `dictSize` are optional (use 0 if unknown), + but if both are 0, no optimization can be done. + Note : cPar is considered validated at this stage. Use ZSTD_checkParams() to ensure that. */ +ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize) +{ + if (srcSize+dictSize == 0) return cPar; /* no size information available : no adjustment */ + + /* resize params, to use less memory when necessary */ + { U32 const minSrcSize = (srcSize==0) ? 500 : 0; + U64 const rSize = srcSize + dictSize + minSrcSize; + if (rSize < ((U64)1<<ZSTD_WINDOWLOG_MAX)) { + U32 const srcLog = MAX(ZSTD_HASHLOG_MIN, ZSTD_highbit32((U32)(rSize)-1) + 1); + if (cPar.windowLog > srcLog) cPar.windowLog = srcLog; + } } + if (cPar.hashLog > cPar.windowLog) cPar.hashLog = cPar.windowLog; + { U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy); + if (cycleLog > cPar.windowLog) cPar.chainLog -= (cycleLog - cPar.windowLog); + } + + if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN) cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* required for frame header */ + + return cPar; +} + + +size_t ZSTD_estimateCCtxSize(ZSTD_compressionParameters cParams) +{ + size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << cParams.windowLog); + U32 const divider = (cParams.searchLength==3) ? 3 : 4; + size_t const maxNbSeq = blockSize / divider; + size_t const tokenSpace = blockSize + 11*maxNbSeq; + + size_t const chainSize = (cParams.strategy == ZSTD_fast) ? 0 : (1 << cParams.chainLog); + size_t const hSize = ((size_t)1) << cParams.hashLog; + U32 const hashLog3 = (cParams.searchLength>3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, cParams.windowLog); + size_t const h3Size = ((size_t)1) << hashLog3; + size_t const entropySpace = hufCTable_size + litlengthCTable_size + + offcodeCTable_size + matchlengthCTable_size + + entropyScratchSpace_size; + size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); + + size_t const optSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<<Litbits))*sizeof(U32) + + (ZSTD_OPT_NUM+1)*(sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t)); + size_t const neededSpace = entropySpace + tableSpace + tokenSpace + + (((cParams.strategy == ZSTD_btopt) || (cParams.strategy == ZSTD_btopt2)) ? optSpace : 0); + + return sizeof(ZSTD_CCtx) + neededSpace; +} + + +static U32 ZSTD_equivalentParams(ZSTD_parameters param1, ZSTD_parameters param2) +{ + return (param1.cParams.hashLog == param2.cParams.hashLog) + & (param1.cParams.chainLog == param2.cParams.chainLog) + & (param1.cParams.strategy == param2.cParams.strategy) + & ((param1.cParams.searchLength==3) == (param2.cParams.searchLength==3)); +} + +/*! ZSTD_continueCCtx() : + reuse CCtx without reset (note : requires no dictionary) */ +static size_t ZSTD_continueCCtx(ZSTD_CCtx* cctx, ZSTD_parameters params, U64 frameContentSize) +{ + U32 const end = (U32)(cctx->nextSrc - cctx->base); + cctx->params = params; + cctx->frameContentSize = frameContentSize; + cctx->consumedSrcSize = 0; + cctx->lowLimit = end; + cctx->dictLimit = end; + cctx->nextToUpdate = end+1; + cctx->stage = ZSTDcs_init; + cctx->dictID = 0; + cctx->loadedDictEnd = 0; + { int i; for (i=0; i<ZSTD_REP_NUM; i++) cctx->rep[i] = repStartValue[i]; } + cctx->seqStore.litLengthSum = 0; /* force reset of btopt stats */ + XXH64_reset(&cctx->xxhState, 0); + return 0; +} + +typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset, ZSTDcrp_fullReset } ZSTD_compResetPolicy_e; + +/*! ZSTD_resetCCtx_internal() : + note : `params` must be validated */ +static size_t ZSTD_resetCCtx_internal (ZSTD_CCtx* zc, + ZSTD_parameters params, U64 frameContentSize, + ZSTD_compResetPolicy_e const crp) +{ + if (crp == ZSTDcrp_continue) + if (ZSTD_equivalentParams(params, zc->params)) { + zc->fseCTables_ready = 0; + zc->hufCTable_repeatMode = HUF_repeat_none; + return ZSTD_continueCCtx(zc, params, frameContentSize); + } + + { size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << params.cParams.windowLog); + U32 const divider = (params.cParams.searchLength==3) ? 3 : 4; + size_t const maxNbSeq = blockSize / divider; + size_t const tokenSpace = blockSize + 11*maxNbSeq; + size_t const chainSize = (params.cParams.strategy == ZSTD_fast) ? 0 : (1 << params.cParams.chainLog); + size_t const hSize = ((size_t)1) << params.cParams.hashLog; + U32 const hashLog3 = (params.cParams.searchLength>3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, params.cParams.windowLog); + size_t const h3Size = ((size_t)1) << hashLog3; + size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); + void* ptr; + + /* Check if workSpace is large enough, alloc a new one if needed */ + { size_t const entropySpace = hufCTable_size + litlengthCTable_size + + offcodeCTable_size + matchlengthCTable_size + + entropyScratchSpace_size; + size_t const optPotentialSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<<Litbits)) * sizeof(U32) + + (ZSTD_OPT_NUM+1) * (sizeof(ZSTD_match_t)+sizeof(ZSTD_optimal_t)); + size_t const optSpace = ((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) ? optPotentialSpace : 0; + size_t const neededSpace = entropySpace + optSpace + tableSpace + tokenSpace; + if (zc->workSpaceSize < neededSpace) { + zc->workSpaceSize = 0; + ZSTD_free(zc->workSpace, zc->customMem); + zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem); + if (zc->workSpace == NULL) return ERROR(memory_allocation); + zc->workSpaceSize = neededSpace; + ptr = zc->workSpace; + + /* entropy space */ + zc->hufCTable = (HUF_CElt*)ptr; + ptr = (char*)zc->hufCTable + hufCTable_size; /* note : HUF_CElt* is incomplete type, size is estimated via macro */ + zc->offcodeCTable = (FSE_CTable*) ptr; + ptr = (char*)ptr + offcodeCTable_size; + zc->matchlengthCTable = (FSE_CTable*) ptr; + ptr = (char*)ptr + matchlengthCTable_size; + zc->litlengthCTable = (FSE_CTable*) ptr; + ptr = (char*)ptr + litlengthCTable_size; + assert(((size_t)ptr & 3) == 0); /* ensure correct alignment */ + zc->entropyScratchSpace = (unsigned*) ptr; + } } + + /* init params */ + zc->params = params; + zc->blockSize = blockSize; + zc->frameContentSize = frameContentSize; + zc->consumedSrcSize = 0; + + XXH64_reset(&zc->xxhState, 0); + zc->stage = ZSTDcs_init; + zc->dictID = 0; + zc->loadedDictEnd = 0; + zc->fseCTables_ready = 0; + zc->hufCTable_repeatMode = HUF_repeat_none; + zc->nextToUpdate = 1; + zc->nextSrc = NULL; + zc->base = NULL; + zc->dictBase = NULL; + zc->dictLimit = 0; + zc->lowLimit = 0; + { int i; for (i=0; i<ZSTD_REP_NUM; i++) zc->rep[i] = repStartValue[i]; } + zc->hashLog3 = hashLog3; + zc->seqStore.litLengthSum = 0; + + /* ensure entropy tables are close together at the beginning */ + assert((void*)zc->hufCTable == zc->workSpace); + assert((char*)zc->offcodeCTable == (char*)zc->hufCTable + hufCTable_size); + assert((char*)zc->matchlengthCTable == (char*)zc->offcodeCTable + offcodeCTable_size); + assert((char*)zc->litlengthCTable == (char*)zc->matchlengthCTable + matchlengthCTable_size); + assert((char*)zc->entropyScratchSpace == (char*)zc->litlengthCTable + litlengthCTable_size); + ptr = (char*)zc->entropyScratchSpace + entropyScratchSpace_size; + + /* opt parser space */ + if ((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) { + assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */ + zc->seqStore.litFreq = (U32*)ptr; + zc->seqStore.litLengthFreq = zc->seqStore.litFreq + (1<<Litbits); + zc->seqStore.matchLengthFreq = zc->seqStore.litLengthFreq + (MaxLL+1); + zc->seqStore.offCodeFreq = zc->seqStore.matchLengthFreq + (MaxML+1); + ptr = zc->seqStore.offCodeFreq + (MaxOff+1); + zc->seqStore.matchTable = (ZSTD_match_t*)ptr; + ptr = zc->seqStore.matchTable + ZSTD_OPT_NUM+1; + zc->seqStore.priceTable = (ZSTD_optimal_t*)ptr; + ptr = zc->seqStore.priceTable + ZSTD_OPT_NUM+1; + } + + /* table Space */ + if (crp!=ZSTDcrp_noMemset) memset(ptr, 0, tableSpace); /* reset tables only */ + assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */ + zc->hashTable = (U32*)(ptr); + zc->chainTable = zc->hashTable + hSize; + zc->hashTable3 = zc->chainTable + chainSize; + ptr = zc->hashTable3 + h3Size; + + /* sequences storage */ + zc->seqStore.sequencesStart = (seqDef*)ptr; + ptr = zc->seqStore.sequencesStart + maxNbSeq; + zc->seqStore.llCode = (BYTE*) ptr; + zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq; + zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq; + zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq; + + return 0; + } +} + +/* ZSTD_invalidateRepCodes() : + * ensures next compression will not use repcodes from previous block. + * Note : only works with regular variant; + * do not use with extDict variant ! */ +void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) { + int i; + for (i=0; i<ZSTD_REP_NUM; i++) cctx->rep[i] = 0; +} + + +/*! ZSTD_copyCCtx_internal() : + * Duplicate an existing context `srcCCtx` into another one `dstCCtx`. + * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). + * pledgedSrcSize=0 means "empty" if fParams.contentSizeFlag=1 + * @return : 0, or an error code */ +size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, + ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize) +{ + if (srcCCtx->stage!=ZSTDcs_init) return ERROR(stage_wrong); + + memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem)); + { ZSTD_parameters params = srcCCtx->params; + params.fParams = fParams; + DEBUGLOG(5, "ZSTD_resetCCtx_internal : dictIDFlag : %u \n", !fParams.noDictIDFlag); + ZSTD_resetCCtx_internal(dstCCtx, params, pledgedSrcSize, ZSTDcrp_noMemset); + } + + /* copy tables */ + { size_t const chainSize = (srcCCtx->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << srcCCtx->params.cParams.chainLog); + size_t const hSize = (size_t)1 << srcCCtx->params.cParams.hashLog; + size_t const h3Size = (size_t)1 << srcCCtx->hashLog3; + size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); + assert((U32*)dstCCtx->chainTable == (U32*)dstCCtx->hashTable + hSize); /* chainTable must follow hashTable */ + assert((U32*)dstCCtx->hashTable3 == (U32*)dstCCtx->chainTable + chainSize); + memcpy(dstCCtx->hashTable, srcCCtx->hashTable, tableSpace); /* presumes all tables follow each other */ + } + + /* copy dictionary offsets */ + dstCCtx->nextToUpdate = srcCCtx->nextToUpdate; + dstCCtx->nextToUpdate3= srcCCtx->nextToUpdate3; + dstCCtx->nextSrc = srcCCtx->nextSrc; + dstCCtx->base = srcCCtx->base; + dstCCtx->dictBase = srcCCtx->dictBase; + dstCCtx->dictLimit = srcCCtx->dictLimit; + dstCCtx->lowLimit = srcCCtx->lowLimit; + dstCCtx->loadedDictEnd= srcCCtx->loadedDictEnd; + dstCCtx->dictID = srcCCtx->dictID; + + /* copy entropy tables */ + dstCCtx->fseCTables_ready = srcCCtx->fseCTables_ready; + if (srcCCtx->fseCTables_ready) { + memcpy(dstCCtx->litlengthCTable, srcCCtx->litlengthCTable, litlengthCTable_size); + memcpy(dstCCtx->matchlengthCTable, srcCCtx->matchlengthCTable, matchlengthCTable_size); + memcpy(dstCCtx->offcodeCTable, srcCCtx->offcodeCTable, offcodeCTable_size); + } + dstCCtx->hufCTable_repeatMode = srcCCtx->hufCTable_repeatMode; + if (srcCCtx->hufCTable_repeatMode) { + memcpy(dstCCtx->hufCTable, srcCCtx->hufCTable, hufCTable_size); + } + + return 0; +} + +/*! ZSTD_copyCCtx() : + * Duplicate an existing context `srcCCtx` into another one `dstCCtx`. + * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). + * pledgedSrcSize==0 means "unknown". +* @return : 0, or an error code */ +size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize) +{ + ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; + fParams.contentSizeFlag = pledgedSrcSize>0; + + return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx, fParams, pledgedSrcSize); +} + + +/*! ZSTD_reduceTable() : + * reduce table indexes by `reducerValue` */ +static void ZSTD_reduceTable (U32* const table, U32 const size, U32 const reducerValue) +{ + U32 u; + for (u=0 ; u < size ; u++) { + if (table[u] < reducerValue) table[u] = 0; + else table[u] -= reducerValue; + } +} + +/*! ZSTD_reduceIndex() : +* rescale all indexes to avoid future overflow (indexes are U32) */ +static void ZSTD_reduceIndex (ZSTD_CCtx* zc, const U32 reducerValue) +{ + { U32 const hSize = 1 << zc->params.cParams.hashLog; + ZSTD_reduceTable(zc->hashTable, hSize, reducerValue); } + + { U32 const chainSize = (zc->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << zc->params.cParams.chainLog); + ZSTD_reduceTable(zc->chainTable, chainSize, reducerValue); } + + { U32 const h3Size = (zc->hashLog3) ? 1 << zc->hashLog3 : 0; + ZSTD_reduceTable(zc->hashTable3, h3Size, reducerValue); } +} + + +/*-******************************************************* +* Block entropic compression +*********************************************************/ + +/* See doc/zstd_compression_format.md for detailed format description */ + +size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + if (srcSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall); + memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize); + MEM_writeLE24(dst, (U32)(srcSize << 2) + (U32)bt_raw); + return ZSTD_blockHeaderSize+srcSize; +} + + +static size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + BYTE* const ostart = (BYTE* const)dst; + U32 const flSize = 1 + (srcSize>31) + (srcSize>4095); + + if (srcSize + flSize > dstCapacity) return ERROR(dstSize_tooSmall); + + switch(flSize) + { + case 1: /* 2 - 1 - 5 */ + ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3)); + break; + case 2: /* 2 - 2 - 12 */ + MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4))); + break; + default: /*note : should not be necessary : flSize is within {1,2,3} */ + case 3: /* 2 - 2 - 20 */ + MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4))); + break; + } + + memcpy(ostart + flSize, src, srcSize); + return srcSize + flSize; +} + +static size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + BYTE* const ostart = (BYTE* const)dst; + U32 const flSize = 1 + (srcSize>31) + (srcSize>4095); + + (void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */ + + switch(flSize) + { + case 1: /* 2 - 1 - 5 */ + ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3)); + break; + case 2: /* 2 - 2 - 12 */ + MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4))); + break; + default: /*note : should not be necessary : flSize is necessarily within {1,2,3} */ + case 3: /* 2 - 2 - 20 */ + MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4))); + break; + } + + ostart[flSize] = *(const BYTE*)src; + return flSize+1; +} + + +static size_t ZSTD_minGain(size_t srcSize) { return (srcSize >> 6) + 2; } + +static size_t ZSTD_compressLiterals (ZSTD_CCtx* zc, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ + size_t const minGain = ZSTD_minGain(srcSize); + size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB); + BYTE* const ostart = (BYTE*)dst; + U32 singleStream = srcSize < 256; + symbolEncodingType_e hType = set_compressed; + size_t cLitSize; + + + /* small ? don't even attempt compression (speed opt) */ +# define LITERAL_NOENTROPY 63 + { size_t const minLitSize = zc->hufCTable_repeatMode == HUF_repeat_valid ? 6 : LITERAL_NOENTROPY; + if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); + } + + if (dstCapacity < lhSize+1) return ERROR(dstSize_tooSmall); /* not enough space for compression */ + { HUF_repeat repeat = zc->hufCTable_repeatMode; + int const preferRepeat = zc->params.cParams.strategy < ZSTD_lazy ? srcSize <= 1024 : 0; + if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1; + cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11, + zc->entropyScratchSpace, entropyScratchSpace_size, zc->hufCTable, &repeat, preferRepeat) + : HUF_compress4X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11, + zc->entropyScratchSpace, entropyScratchSpace_size, zc->hufCTable, &repeat, preferRepeat); + if (repeat != HUF_repeat_none) { hType = set_repeat; } /* reused the existing table */ + else { zc->hufCTable_repeatMode = HUF_repeat_check; } /* now have a table to reuse */ + } + + if ((cLitSize==0) | (cLitSize >= srcSize - minGain)) { + zc->hufCTable_repeatMode = HUF_repeat_none; + return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); + } + if (cLitSize==1) { + zc->hufCTable_repeatMode = HUF_repeat_none; + return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize); + } + + /* Build header */ + switch(lhSize) + { + case 3: /* 2 - 2 - 10 - 10 */ + { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14); + MEM_writeLE24(ostart, lhc); + break; + } + case 4: /* 2 - 2 - 14 - 14 */ + { U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18); + MEM_writeLE32(ostart, lhc); + break; + } + default: /* should not be necessary, lhSize is only {3,4,5} */ + case 5: /* 2 - 2 - 18 - 18 */ + { U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22); + MEM_writeLE32(ostart, lhc); + ostart[4] = (BYTE)(cLitSize >> 10); + break; + } + } + return lhSize+cLitSize; +} + +static const BYTE LL_Code[64] = { 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 16, 17, 17, 18, 18, 19, 19, + 20, 20, 20, 20, 21, 21, 21, 21, + 22, 22, 22, 22, 22, 22, 22, 22, + 23, 23, 23, 23, 23, 23, 23, 23, + 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24 }; + +static const BYTE ML_Code[128] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, + 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, + 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, + 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 }; + + +void ZSTD_seqToCodes(const seqStore_t* seqStorePtr) +{ + BYTE const LL_deltaCode = 19; + BYTE const ML_deltaCode = 36; + const seqDef* const sequences = seqStorePtr->sequencesStart; + BYTE* const llCodeTable = seqStorePtr->llCode; + BYTE* const ofCodeTable = seqStorePtr->ofCode; + BYTE* const mlCodeTable = seqStorePtr->mlCode; + U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + U32 u; + for (u=0; u<nbSeq; u++) { + U32 const llv = sequences[u].litLength; + U32 const mlv = sequences[u].matchLength; + llCodeTable[u] = (llv> 63) ? (BYTE)ZSTD_highbit32(llv) + LL_deltaCode : LL_Code[llv]; + ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset); + mlCodeTable[u] = (mlv>127) ? (BYTE)ZSTD_highbit32(mlv) + ML_deltaCode : ML_Code[mlv]; + } + if (seqStorePtr->longLengthID==1) + llCodeTable[seqStorePtr->longLengthPos] = MaxLL; + if (seqStorePtr->longLengthID==2) + mlCodeTable[seqStorePtr->longLengthPos] = MaxML; +} + +MEM_STATIC size_t ZSTD_compressSequences (ZSTD_CCtx* zc, + void* dst, size_t dstCapacity, + size_t srcSize) +{ + const int longOffsets = zc->params.cParams.windowLog > STREAM_ACCUMULATOR_MIN; + const seqStore_t* seqStorePtr = &(zc->seqStore); + U32 count[MaxSeq+1]; + S16 norm[MaxSeq+1]; + FSE_CTable* CTable_LitLength = zc->litlengthCTable; + FSE_CTable* CTable_OffsetBits = zc->offcodeCTable; + FSE_CTable* CTable_MatchLength = zc->matchlengthCTable; + U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */ + const seqDef* const sequences = seqStorePtr->sequencesStart; + const BYTE* const ofCodeTable = seqStorePtr->ofCode; + const BYTE* const llCodeTable = seqStorePtr->llCode; + const BYTE* const mlCodeTable = seqStorePtr->mlCode; + BYTE* const ostart = (BYTE*)dst; + BYTE* const oend = ostart + dstCapacity; + BYTE* op = ostart; + size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart; + BYTE* seqHead; + BYTE scratchBuffer[1<<MAX(MLFSELog,LLFSELog)]; + + /* Compress literals */ + { const BYTE* const literals = seqStorePtr->litStart; + size_t const litSize = seqStorePtr->lit - literals; + size_t const cSize = ZSTD_compressLiterals(zc, op, dstCapacity, literals, litSize); + if (ZSTD_isError(cSize)) return cSize; + op += cSize; + } + + /* Sequences Header */ + if ((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead */) return ERROR(dstSize_tooSmall); + if (nbSeq < 0x7F) *op++ = (BYTE)nbSeq; + else if (nbSeq < LONGNBSEQ) op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2; + else op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3; + if (nbSeq==0) goto _check_compressibility; + + /* seqHead : flags for FSE encoding type */ + seqHead = op++; + +#define MIN_SEQ_FOR_DYNAMIC_FSE 64 +#define MAX_SEQ_FOR_STATIC_FSE 1000 + + /* convert length/distances into codes */ + ZSTD_seqToCodes(seqStorePtr); + + /* CTable for Literal Lengths */ + { U32 max = MaxLL; + size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, zc->entropyScratchSpace); + if ((mostFrequent == nbSeq) && (nbSeq > 2)) { + *op++ = llCodeTable[0]; + FSE_buildCTable_rle(CTable_LitLength, (BYTE)max); + LLtype = set_rle; + } else if ((zc->fseCTables_ready) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) { + LLtype = set_repeat; + } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (LL_defaultNormLog-1)))) { + FSE_buildCTable_wksp(CTable_LitLength, LL_defaultNorm, MaxLL, LL_defaultNormLog, scratchBuffer, sizeof(scratchBuffer)); + LLtype = set_basic; + } else { + size_t nbSeq_1 = nbSeq; + const U32 tableLog = FSE_optimalTableLog(LLFSELog, nbSeq, max); + if (count[llCodeTable[nbSeq-1]]>1) { count[llCodeTable[nbSeq-1]]--; nbSeq_1--; } + FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max); + { size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */ + if (FSE_isError(NCountSize)) return NCountSize; + op += NCountSize; } + FSE_buildCTable_wksp(CTable_LitLength, norm, max, tableLog, scratchBuffer, sizeof(scratchBuffer)); + LLtype = set_compressed; + } } + + /* CTable for Offsets */ + { U32 max = MaxOff; + size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, zc->entropyScratchSpace); + if ((mostFrequent == nbSeq) && (nbSeq > 2)) { + *op++ = ofCodeTable[0]; + FSE_buildCTable_rle(CTable_OffsetBits, (BYTE)max); + Offtype = set_rle; + } else if ((zc->fseCTables_ready) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) { + Offtype = set_repeat; + } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (OF_defaultNormLog-1)))) { + FSE_buildCTable_wksp(CTable_OffsetBits, OF_defaultNorm, MaxOff, OF_defaultNormLog, scratchBuffer, sizeof(scratchBuffer)); + Offtype = set_basic; + } else { + size_t nbSeq_1 = nbSeq; + const U32 tableLog = FSE_optimalTableLog(OffFSELog, nbSeq, max); + if (count[ofCodeTable[nbSeq-1]]>1) { count[ofCodeTable[nbSeq-1]]--; nbSeq_1--; } + FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max); + { size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */ + if (FSE_isError(NCountSize)) return NCountSize; + op += NCountSize; } + FSE_buildCTable_wksp(CTable_OffsetBits, norm, max, tableLog, scratchBuffer, sizeof(scratchBuffer)); + Offtype = set_compressed; + } } + + /* CTable for MatchLengths */ + { U32 max = MaxML; + size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, zc->entropyScratchSpace); + if ((mostFrequent == nbSeq) && (nbSeq > 2)) { + *op++ = *mlCodeTable; + FSE_buildCTable_rle(CTable_MatchLength, (BYTE)max); + MLtype = set_rle; + } else if ((zc->fseCTables_ready) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) { + MLtype = set_repeat; + } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (ML_defaultNormLog-1)))) { + FSE_buildCTable_wksp(CTable_MatchLength, ML_defaultNorm, MaxML, ML_defaultNormLog, scratchBuffer, sizeof(scratchBuffer)); + MLtype = set_basic; + } else { + size_t nbSeq_1 = nbSeq; + const U32 tableLog = FSE_optimalTableLog(MLFSELog, nbSeq, max); + if (count[mlCodeTable[nbSeq-1]]>1) { count[mlCodeTable[nbSeq-1]]--; nbSeq_1--; } + FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max); + { size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */ + if (FSE_isError(NCountSize)) return NCountSize; + op += NCountSize; } + FSE_buildCTable_wksp(CTable_MatchLength, norm, max, tableLog, scratchBuffer, sizeof(scratchBuffer)); + MLtype = set_compressed; + } } + + *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2)); + zc->fseCTables_ready = 0; + + /* Encoding Sequences */ + { BIT_CStream_t blockStream; + FSE_CState_t stateMatchLength; + FSE_CState_t stateOffsetBits; + FSE_CState_t stateLitLength; + + CHECK_E(BIT_initCStream(&blockStream, op, oend-op), dstSize_tooSmall); /* not enough space remaining */ + + /* first symbols */ + FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]); + FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq-1]); + FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq-1]); + BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]); + if (MEM_32bits()) BIT_flushBits(&blockStream); + BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]); + if (MEM_32bits()) BIT_flushBits(&blockStream); + if (longOffsets) { + U32 const ofBits = ofCodeTable[nbSeq-1]; + int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1); + if (extraBits) { + BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits); + BIT_flushBits(&blockStream); + } + BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits, + ofBits - extraBits); + } else { + BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]); + } + BIT_flushBits(&blockStream); + + { size_t n; + for (n=nbSeq-2 ; n<nbSeq ; n--) { /* intentional underflow */ + BYTE const llCode = llCodeTable[n]; + BYTE const ofCode = ofCodeTable[n]; + BYTE const mlCode = mlCodeTable[n]; + U32 const llBits = LL_bits[llCode]; + U32 const ofBits = ofCode; /* 32b*/ /* 64b*/ + U32 const mlBits = ML_bits[mlCode]; + /* (7)*/ /* (7)*/ + FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */ /* 15 */ + FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode); /* 24 */ /* 24 */ + if (MEM_32bits()) BIT_flushBits(&blockStream); /* (7)*/ + FSE_encodeSymbol(&blockStream, &stateLitLength, llCode); /* 16 */ /* 33 */ + if (MEM_32bits() || (ofBits+mlBits+llBits >= 64-7-(LLFSELog+MLFSELog+OffFSELog))) + BIT_flushBits(&blockStream); /* (7)*/ + BIT_addBits(&blockStream, sequences[n].litLength, llBits); + if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream); + BIT_addBits(&blockStream, sequences[n].matchLength, mlBits); + if (MEM_32bits()) BIT_flushBits(&blockStream); /* (7)*/ + if (longOffsets) { + int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1); + if (extraBits) { + BIT_addBits(&blockStream, sequences[n].offset, extraBits); + BIT_flushBits(&blockStream); /* (7)*/ + } + BIT_addBits(&blockStream, sequences[n].offset >> extraBits, + ofBits - extraBits); /* 31 */ + } else { + BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */ + } + BIT_flushBits(&blockStream); /* (7)*/ + } } + + FSE_flushCState(&blockStream, &stateMatchLength); + FSE_flushCState(&blockStream, &stateOffsetBits); + FSE_flushCState(&blockStream, &stateLitLength); + + { size_t const streamSize = BIT_closeCStream(&blockStream); + if (streamSize==0) return ERROR(dstSize_tooSmall); /* not enough space */ + op += streamSize; + } } + + /* check compressibility */ +_check_compressibility: + { size_t const minGain = ZSTD_minGain(srcSize); + size_t const maxCSize = srcSize - minGain; + if ((size_t)(op-ostart) >= maxCSize) { + zc->hufCTable_repeatMode = HUF_repeat_none; + return 0; + } } + + /* confirm repcodes */ + { int i; for (i=0; i<ZSTD_REP_NUM; i++) zc->rep[i] = zc->repToConfirm[i]; } + + return op - ostart; +} + +#if 0 /* for debug */ +# define STORESEQ_DEBUG +#include <stdio.h> /* fprintf */ +U32 g_startDebug = 0; +const BYTE* g_start = NULL; +#endif + +/*! ZSTD_storeSeq() : + Store a sequence (literal length, literals, offset code and match length code) into seqStore_t. + `offsetCode` : distance to match, or 0 == repCode. + `matchCode` : matchLength - MINMATCH +*/ +MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const void* literals, U32 offsetCode, size_t matchCode) +{ +#ifdef STORESEQ_DEBUG + if (g_startDebug) { + const U32 pos = (U32)((const BYTE*)literals - g_start); + if (g_start==NULL) g_start = (const BYTE*)literals; + if ((pos > 1895000) && (pos < 1895300)) + DEBUGLOG(5, "Cpos %6u :%5u literals & match %3u bytes at distance %6u \n", + pos, (U32)litLength, (U32)matchCode+MINMATCH, (U32)offsetCode); + } +#endif + /* copy Literals */ + ZSTD_wildcopy(seqStorePtr->lit, literals, litLength); + seqStorePtr->lit += litLength; + + /* literal Length */ + if (litLength>0xFFFF) { + seqStorePtr->longLengthID = 1; + seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + } + seqStorePtr->sequences[0].litLength = (U16)litLength; + + /* match offset */ + seqStorePtr->sequences[0].offset = offsetCode + 1; + + /* match Length */ + if (matchCode>0xFFFF) { + seqStorePtr->longLengthID = 2; + seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + } + seqStorePtr->sequences[0].matchLength = (U16)matchCode; + + seqStorePtr->sequences++; +} + + +/*-************************************* +* Match length counter +***************************************/ +static unsigned ZSTD_NbCommonBytes (register size_t val) +{ + if (MEM_isLittleEndian()) { + if (MEM_64bits()) { +# if defined(_MSC_VER) && defined(_WIN64) + unsigned long r = 0; + _BitScanForward64( &r, (U64)val ); + return (unsigned)(r>>3); +# elif defined(__GNUC__) && (__GNUC__ >= 3) + return (__builtin_ctzll((U64)val) >> 3); +# else + static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, + 0, 3, 1, 3, 1, 4, 2, 7, + 0, 2, 3, 6, 1, 5, 3, 5, + 1, 3, 4, 4, 2, 5, 6, 7, + 7, 0, 1, 2, 3, 3, 4, 6, + 2, 6, 5, 5, 3, 4, 5, 6, + 7, 1, 2, 4, 6, 4, 4, 5, + 7, 2, 6, 5, 7, 6, 7, 7 }; + return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; +# endif + } else { /* 32 bits */ +# if defined(_MSC_VER) + unsigned long r=0; + _BitScanForward( &r, (U32)val ); + return (unsigned)(r>>3); +# elif defined(__GNUC__) && (__GNUC__ >= 3) + return (__builtin_ctz((U32)val) >> 3); +# else + static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, + 3, 2, 2, 1, 3, 2, 0, 1, + 3, 3, 1, 2, 2, 2, 2, 0, + 3, 1, 2, 0, 1, 0, 1, 1 }; + return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; +# endif + } + } else { /* Big Endian CPU */ + if (MEM_64bits()) { +# if defined(_MSC_VER) && defined(_WIN64) + unsigned long r = 0; + _BitScanReverse64( &r, val ); + return (unsigned)(r>>3); +# elif defined(__GNUC__) && (__GNUC__ >= 3) + return (__builtin_clzll(val) >> 3); +# else + unsigned r; + const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */ + if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; } + if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } + r += (!val); + return r; +# endif + } else { /* 32 bits */ +# if defined(_MSC_VER) + unsigned long r = 0; + _BitScanReverse( &r, (unsigned long)val ); + return (unsigned)(r>>3); +# elif defined(__GNUC__) && (__GNUC__ >= 3) + return (__builtin_clz((U32)val) >> 3); +# else + unsigned r; + if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } + r += (!val); + return r; +# endif + } } +} + + +static size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit) +{ + const BYTE* const pStart = pIn; + const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1); + + while (pIn < pInLoopLimit) { + size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn); + if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; } + pIn += ZSTD_NbCommonBytes(diff); + return (size_t)(pIn - pStart); + } + if (MEM_64bits()) if ((pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; } + if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; } + if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++; + return (size_t)(pIn - pStart); +} + +/** ZSTD_count_2segments() : +* can count match length with `ip` & `match` in 2 different segments. +* convention : on reaching mEnd, match count continue starting from iStart +*/ +static size_t ZSTD_count_2segments(const BYTE* ip, const BYTE* match, const BYTE* iEnd, const BYTE* mEnd, const BYTE* iStart) +{ + const BYTE* const vEnd = MIN( ip + (mEnd - match), iEnd); + size_t const matchLength = ZSTD_count(ip, match, vEnd); + if (match + matchLength != mEnd) return matchLength; + return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd); +} + + +/*-************************************* +* Hashes +***************************************/ +static const U32 prime3bytes = 506832829U; +static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes) >> (32-h) ; } +MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */ + +static const U32 prime4bytes = 2654435761U; +static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; } +static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); } + +static const U64 prime5bytes = 889523592379ULL; +static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64-40)) * prime5bytes) >> (64-h)) ; } +static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); } + +static const U64 prime6bytes = 227718039650203ULL; +static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; } +static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); } + +static const U64 prime7bytes = 58295818150454627ULL; +static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64-56)) * prime7bytes) >> (64-h)) ; } +static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); } + +static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL; +static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; } +static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); } + +static size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls) +{ + switch(mls) + { + default: + case 4: return ZSTD_hash4Ptr(p, hBits); + case 5: return ZSTD_hash5Ptr(p, hBits); + case 6: return ZSTD_hash6Ptr(p, hBits); + case 7: return ZSTD_hash7Ptr(p, hBits); + case 8: return ZSTD_hash8Ptr(p, hBits); + } +} + + +/*-************************************* +* Fast Scan +***************************************/ +static void ZSTD_fillHashTable (ZSTD_CCtx* zc, const void* end, const U32 mls) +{ + U32* const hashTable = zc->hashTable; + U32 const hBits = zc->params.cParams.hashLog; + const BYTE* const base = zc->base; + const BYTE* ip = base + zc->nextToUpdate; + const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; + const size_t fastHashFillStep = 3; + + while(ip <= iend) { + hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base); + ip += fastHashFillStep; + } +} + + +FORCE_INLINE +void ZSTD_compressBlock_fast_generic(ZSTD_CCtx* cctx, + const void* src, size_t srcSize, + const U32 mls) +{ + U32* const hashTable = cctx->hashTable; + U32 const hBits = cctx->params.cParams.hashLog; + seqStore_t* seqStorePtr = &(cctx->seqStore); + const BYTE* const base = cctx->base; + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const U32 lowestIndex = cctx->dictLimit; + const BYTE* const lowest = base + lowestIndex; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - HASH_READ_SIZE; + U32 offset_1=cctx->rep[0], offset_2=cctx->rep[1]; + U32 offsetSaved = 0; + + /* init */ + ip += (ip==lowest); + { U32 const maxRep = (U32)(ip-lowest); + if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; + if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; + } + + /* Main Search Loop */ + while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ + size_t mLength; + size_t const h = ZSTD_hashPtr(ip, hBits, mls); + U32 const current = (U32)(ip-base); + U32 const matchIndex = hashTable[h]; + const BYTE* match = base + matchIndex; + hashTable[h] = current; /* update hash table */ + + if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { + mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; + ip++; + ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH); + } else { + U32 offset; + if ( (matchIndex <= lowestIndex) || (MEM_read32(match) != MEM_read32(ip)) ) { + ip += ((ip-anchor) >> g_searchStrength) + 1; + continue; + } + mLength = ZSTD_count(ip+4, match+4, iend) + 4; + offset = (U32)(ip-match); + while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ + offset_2 = offset_1; + offset_1 = offset; + + ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + } + + /* match found */ + ip += mLength; + anchor = ip; + + if (ip <= ilimit) { + /* Fill Table */ + hashTable[ZSTD_hashPtr(base+current+2, hBits, mls)] = current+2; /* here because current+2 could be > iend-8 */ + hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base); + /* check immediate repcode */ + while ( (ip <= ilimit) + && ( (offset_2>0) + & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) { + /* store sequence */ + size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; + { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */ + hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip-base); + ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength-MINMATCH); + ip += rLength; + anchor = ip; + continue; /* faster when present ... (?) */ + } } } + + /* save reps for next block */ + cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved; + cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved; + + /* Last Literals */ + { size_t const lastLLSize = iend - anchor; + memcpy(seqStorePtr->lit, anchor, lastLLSize); + seqStorePtr->lit += lastLLSize; + } +} + + +static void ZSTD_compressBlock_fast(ZSTD_CCtx* ctx, + const void* src, size_t srcSize) +{ + const U32 mls = ctx->params.cParams.searchLength; + switch(mls) + { + default: /* includes case 3 */ + case 4 : + ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 4); return; + case 5 : + ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 5); return; + case 6 : + ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 6); return; + case 7 : + ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 7); return; + } +} + + +static void ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx, + const void* src, size_t srcSize, + const U32 mls) +{ + U32* hashTable = ctx->hashTable; + const U32 hBits = ctx->params.cParams.hashLog; + seqStore_t* seqStorePtr = &(ctx->seqStore); + const BYTE* const base = ctx->base; + const BYTE* const dictBase = ctx->dictBase; + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const U32 lowestIndex = ctx->lowLimit; + const BYTE* const dictStart = dictBase + lowestIndex; + const U32 dictLimit = ctx->dictLimit; + const BYTE* const lowPrefixPtr = base + dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - 8; + U32 offset_1=ctx->rep[0], offset_2=ctx->rep[1]; + + /* Search Loop */ + while (ip < ilimit) { /* < instead of <=, because (ip+1) */ + const size_t h = ZSTD_hashPtr(ip, hBits, mls); + const U32 matchIndex = hashTable[h]; + const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base; + const BYTE* match = matchBase + matchIndex; + const U32 current = (U32)(ip-base); + const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */ + const BYTE* repBase = repIndex < dictLimit ? dictBase : base; + const BYTE* repMatch = repBase + repIndex; + size_t mLength; + hashTable[h] = current; /* update hash table */ + + if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) + && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { + const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend; + mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4; + ip++; + ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH); + } else { + if ( (matchIndex < lowestIndex) || + (MEM_read32(match) != MEM_read32(ip)) ) { + ip += ((ip-anchor) >> g_searchStrength) + 1; + continue; + } + { const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend; + const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr; + U32 offset; + mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4; + while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ + offset = current - matchIndex; + offset_2 = offset_1; + offset_1 = offset; + ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + } } + + /* found a match : store it */ + ip += mLength; + anchor = ip; + + if (ip <= ilimit) { + /* Fill Table */ + hashTable[ZSTD_hashPtr(base+current+2, hBits, mls)] = current+2; + hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base); + /* check immediate repcode */ + while (ip <= ilimit) { + U32 const current2 = (U32)(ip-base); + U32 const repIndex2 = current2 - offset_2; + const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2; + if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */ + && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { + const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend; + size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, lowPrefixPtr) + 4; + U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ + ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH); + hashTable[ZSTD_hashPtr(ip, hBits, mls)] = current2; + ip += repLength2; + anchor = ip; + continue; + } + break; + } } } + + /* save reps for next block */ + ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2; + + /* Last Literals */ + { size_t const lastLLSize = iend - anchor; + memcpy(seqStorePtr->lit, anchor, lastLLSize); + seqStorePtr->lit += lastLLSize; + } +} + + +static void ZSTD_compressBlock_fast_extDict(ZSTD_CCtx* ctx, + const void* src, size_t srcSize) +{ + U32 const mls = ctx->params.cParams.searchLength; + switch(mls) + { + default: /* includes case 3 */ + case 4 : + ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 4); return; + case 5 : + ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 5); return; + case 6 : + ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 6); return; + case 7 : + ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 7); return; + } +} + + +/*-************************************* +* Double Fast +***************************************/ +static void ZSTD_fillDoubleHashTable (ZSTD_CCtx* cctx, const void* end, const U32 mls) +{ + U32* const hashLarge = cctx->hashTable; + U32 const hBitsL = cctx->params.cParams.hashLog; + U32* const hashSmall = cctx->chainTable; + U32 const hBitsS = cctx->params.cParams.chainLog; + const BYTE* const base = cctx->base; + const BYTE* ip = base + cctx->nextToUpdate; + const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; + const size_t fastHashFillStep = 3; + + while(ip <= iend) { + hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base); + hashLarge[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base); + ip += fastHashFillStep; + } +} + + +FORCE_INLINE +void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx* cctx, + const void* src, size_t srcSize, + const U32 mls) +{ + U32* const hashLong = cctx->hashTable; + const U32 hBitsL = cctx->params.cParams.hashLog; + U32* const hashSmall = cctx->chainTable; + const U32 hBitsS = cctx->params.cParams.chainLog; + seqStore_t* seqStorePtr = &(cctx->seqStore); + const BYTE* const base = cctx->base; + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const U32 lowestIndex = cctx->dictLimit; + const BYTE* const lowest = base + lowestIndex; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - HASH_READ_SIZE; + U32 offset_1=cctx->rep[0], offset_2=cctx->rep[1]; + U32 offsetSaved = 0; + + /* init */ + ip += (ip==lowest); + { U32 const maxRep = (U32)(ip-lowest); + if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; + if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; + } + + /* Main Search Loop */ + while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ + size_t mLength; + size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8); + size_t const h = ZSTD_hashPtr(ip, hBitsS, mls); + U32 const current = (U32)(ip-base); + U32 const matchIndexL = hashLong[h2]; + U32 const matchIndexS = hashSmall[h]; + const BYTE* matchLong = base + matchIndexL; + const BYTE* match = base + matchIndexS; + hashLong[h2] = hashSmall[h] = current; /* update hash tables */ + + assert(offset_1 <= current); /* supposed guaranteed by construction */ + if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { + /* favor repcode */ + mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; + ip++; + ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH); + } else { + U32 offset; + if ( (matchIndexL > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip)) ) { + mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8; + offset = (U32)(ip-matchLong); + while (((ip>anchor) & (matchLong>lowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ + } else if ( (matchIndexS > lowestIndex) && (MEM_read32(match) == MEM_read32(ip)) ) { + size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8); + U32 const matchIndexL3 = hashLong[hl3]; + const BYTE* matchL3 = base + matchIndexL3; + hashLong[hl3] = current + 1; + if ( (matchIndexL3 > lowestIndex) && (MEM_read64(matchL3) == MEM_read64(ip+1)) ) { + mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8; + ip++; + offset = (U32)(ip-matchL3); + while (((ip>anchor) & (matchL3>lowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */ + } else { + mLength = ZSTD_count(ip+4, match+4, iend) + 4; + offset = (U32)(ip-match); + while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ + } + } else { + ip += ((ip-anchor) >> g_searchStrength) + 1; + continue; + } + + offset_2 = offset_1; + offset_1 = offset; + + ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + } + + /* match found */ + ip += mLength; + anchor = ip; + + if (ip <= ilimit) { + /* Fill Table */ + hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] = + hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2; /* here because current+2 could be > iend-8 */ + hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = + hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base); + + /* check immediate repcode */ + while ( (ip <= ilimit) + && ( (offset_2>0) + & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) { + /* store sequence */ + size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; + { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */ + hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base); + hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base); + ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength-MINMATCH); + ip += rLength; + anchor = ip; + continue; /* faster when present ... (?) */ + } } } + + /* save reps for next block */ + cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved; + cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved; + + /* Last Literals */ + { size_t const lastLLSize = iend - anchor; + memcpy(seqStorePtr->lit, anchor, lastLLSize); + seqStorePtr->lit += lastLLSize; + } +} + + +static void ZSTD_compressBlock_doubleFast(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ + const U32 mls = ctx->params.cParams.searchLength; + switch(mls) + { + default: /* includes case 3 */ + case 4 : + ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 4); return; + case 5 : + ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 5); return; + case 6 : + ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 6); return; + case 7 : + ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 7); return; + } +} + + +static void ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx* ctx, + const void* src, size_t srcSize, + const U32 mls) +{ + U32* const hashLong = ctx->hashTable; + U32 const hBitsL = ctx->params.cParams.hashLog; + U32* const hashSmall = ctx->chainTable; + U32 const hBitsS = ctx->params.cParams.chainLog; + seqStore_t* seqStorePtr = &(ctx->seqStore); + const BYTE* const base = ctx->base; + const BYTE* const dictBase = ctx->dictBase; + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const U32 lowestIndex = ctx->lowLimit; + const BYTE* const dictStart = dictBase + lowestIndex; + const U32 dictLimit = ctx->dictLimit; + const BYTE* const lowPrefixPtr = base + dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - 8; + U32 offset_1=ctx->rep[0], offset_2=ctx->rep[1]; + + /* Search Loop */ + while (ip < ilimit) { /* < instead of <=, because (ip+1) */ + const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls); + const U32 matchIndex = hashSmall[hSmall]; + const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base; + const BYTE* match = matchBase + matchIndex; + + const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8); + const U32 matchLongIndex = hashLong[hLong]; + const BYTE* matchLongBase = matchLongIndex < dictLimit ? dictBase : base; + const BYTE* matchLong = matchLongBase + matchLongIndex; + + const U32 current = (U32)(ip-base); + const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */ + const BYTE* repBase = repIndex < dictLimit ? dictBase : base; + const BYTE* repMatch = repBase + repIndex; + size_t mLength; + hashSmall[hSmall] = hashLong[hLong] = current; /* update hash table */ + + if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) + && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { + const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend; + mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4; + ip++; + ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH); + } else { + if ((matchLongIndex > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) { + const BYTE* matchEnd = matchLongIndex < dictLimit ? dictEnd : iend; + const BYTE* lowMatchPtr = matchLongIndex < dictLimit ? dictStart : lowPrefixPtr; + U32 offset; + mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, lowPrefixPtr) + 8; + offset = current - matchLongIndex; + while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ + offset_2 = offset_1; + offset_1 = offset; + ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + + } else if ((matchIndex > lowestIndex) && (MEM_read32(match) == MEM_read32(ip))) { + size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8); + U32 const matchIndex3 = hashLong[h3]; + const BYTE* const match3Base = matchIndex3 < dictLimit ? dictBase : base; + const BYTE* match3 = match3Base + matchIndex3; + U32 offset; + hashLong[h3] = current + 1; + if ( (matchIndex3 > lowestIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) { + const BYTE* matchEnd = matchIndex3 < dictLimit ? dictEnd : iend; + const BYTE* lowMatchPtr = matchIndex3 < dictLimit ? dictStart : lowPrefixPtr; + mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, lowPrefixPtr) + 8; + ip++; + offset = current+1 - matchIndex3; + while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */ + } else { + const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend; + const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr; + mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4; + offset = current - matchIndex; + while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ + } + offset_2 = offset_1; + offset_1 = offset; + ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + + } else { + ip += ((ip-anchor) >> g_searchStrength) + 1; + continue; + } } + + /* found a match : store it */ + ip += mLength; + anchor = ip; + + if (ip <= ilimit) { + /* Fill Table */ + hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2; + hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] = current+2; + hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base); + hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base); + /* check immediate repcode */ + while (ip <= ilimit) { + U32 const current2 = (U32)(ip-base); + U32 const repIndex2 = current2 - offset_2; + const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2; + if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */ + && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { + const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend; + size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, lowPrefixPtr) + 4; + U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ + ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH); + hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; + hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; + ip += repLength2; + anchor = ip; + continue; + } + break; + } } } + + /* save reps for next block */ + ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2; + + /* Last Literals */ + { size_t const lastLLSize = iend - anchor; + memcpy(seqStorePtr->lit, anchor, lastLLSize); + seqStorePtr->lit += lastLLSize; + } +} + + +static void ZSTD_compressBlock_doubleFast_extDict(ZSTD_CCtx* ctx, + const void* src, size_t srcSize) +{ + U32 const mls = ctx->params.cParams.searchLength; + switch(mls) + { + default: /* includes case 3 */ + case 4 : + ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 4); return; + case 5 : + ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 5); return; + case 6 : + ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 6); return; + case 7 : + ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 7); return; + } +} + + +/*-************************************* +* Binary Tree search +***************************************/ +/** ZSTD_insertBt1() : add one or multiple positions to tree. +* ip : assumed <= iend-8 . +* @return : nb of positions added */ +static U32 ZSTD_insertBt1(ZSTD_CCtx* zc, const BYTE* const ip, const U32 mls, const BYTE* const iend, U32 nbCompares, + U32 extDict) +{ + U32* const hashTable = zc->hashTable; + U32 const hashLog = zc->params.cParams.hashLog; + size_t const h = ZSTD_hashPtr(ip, hashLog, mls); + U32* const bt = zc->chainTable; + U32 const btLog = zc->params.cParams.chainLog - 1; + U32 const btMask = (1 << btLog) - 1; + U32 matchIndex = hashTable[h]; + size_t commonLengthSmaller=0, commonLengthLarger=0; + const BYTE* const base = zc->base; + const BYTE* const dictBase = zc->dictBase; + const U32 dictLimit = zc->dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const prefixStart = base + dictLimit; + const BYTE* match; + const U32 current = (U32)(ip-base); + const U32 btLow = btMask >= current ? 0 : current - btMask; + U32* smallerPtr = bt + 2*(current&btMask); + U32* largerPtr = smallerPtr + 1; + U32 dummy32; /* to be nullified at the end */ + U32 const windowLow = zc->lowLimit; + U32 matchEndIdx = current+8; + size_t bestLength = 8; +#ifdef ZSTD_C_PREDICT + U32 predictedSmall = *(bt + 2*((current-1)&btMask) + 0); + U32 predictedLarge = *(bt + 2*((current-1)&btMask) + 1); + predictedSmall += (predictedSmall>0); + predictedLarge += (predictedLarge>0); +#endif /* ZSTD_C_PREDICT */ + + hashTable[h] = current; /* Update Hash Table */ + + while (nbCompares-- && (matchIndex > windowLow)) { + U32* const nextPtr = bt + 2*(matchIndex & btMask); + size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ + +#ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */ + const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */ + if (matchIndex == predictedSmall) { + /* no need to check length, result known */ + *smallerPtr = matchIndex; + if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ + matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ + predictedSmall = predictPtr[1] + (predictPtr[1]>0); + continue; + } + if (matchIndex == predictedLarge) { + *largerPtr = matchIndex; + if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + largerPtr = nextPtr; + matchIndex = nextPtr[0]; + predictedLarge = predictPtr[0] + (predictPtr[0]>0); + continue; + } +#endif + if ((!extDict) || (matchIndex+matchLength >= dictLimit)) { + match = base + matchIndex; + if (match[matchLength] == ip[matchLength]) + matchLength += ZSTD_count(ip+matchLength+1, match+matchLength+1, iend) +1; + } else { + match = dictBase + matchIndex; + matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); + if (matchIndex+matchLength >= dictLimit) + match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ + } + + if (matchLength > bestLength) { + bestLength = matchLength; + if (matchLength > matchEndIdx - matchIndex) + matchEndIdx = matchIndex + (U32)matchLength; + } + + if (ip+matchLength == iend) /* equal : no way to know if inf or sup */ + break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt the tree */ + + if (match[matchLength] < ip[matchLength]) { /* necessarily within correct buffer */ + /* match is smaller than current */ + *smallerPtr = matchIndex; /* update smaller idx */ + commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ + if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ + matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ + } else { + /* match is larger than current */ + *largerPtr = matchIndex; + commonLengthLarger = matchLength; + if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + largerPtr = nextPtr; + matchIndex = nextPtr[0]; + } } + + *smallerPtr = *largerPtr = 0; + if (bestLength > 384) return MIN(192, (U32)(bestLength - 384)); /* speed optimization */ + if (matchEndIdx > current + 8) return matchEndIdx - current - 8; + return 1; +} + + +static size_t ZSTD_insertBtAndFindBestMatch ( + ZSTD_CCtx* zc, + const BYTE* const ip, const BYTE* const iend, + size_t* offsetPtr, + U32 nbCompares, const U32 mls, + U32 extDict) +{ + U32* const hashTable = zc->hashTable; + U32 const hashLog = zc->params.cParams.hashLog; + size_t const h = ZSTD_hashPtr(ip, hashLog, mls); + U32* const bt = zc->chainTable; + U32 const btLog = zc->params.cParams.chainLog - 1; + U32 const btMask = (1 << btLog) - 1; + U32 matchIndex = hashTable[h]; + size_t commonLengthSmaller=0, commonLengthLarger=0; + const BYTE* const base = zc->base; + const BYTE* const dictBase = zc->dictBase; + const U32 dictLimit = zc->dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const prefixStart = base + dictLimit; + const U32 current = (U32)(ip-base); + const U32 btLow = btMask >= current ? 0 : current - btMask; + const U32 windowLow = zc->lowLimit; + U32* smallerPtr = bt + 2*(current&btMask); + U32* largerPtr = bt + 2*(current&btMask) + 1; + U32 matchEndIdx = current+8; + U32 dummy32; /* to be nullified at the end */ + size_t bestLength = 0; + + hashTable[h] = current; /* Update Hash Table */ + + while (nbCompares-- && (matchIndex > windowLow)) { + U32* const nextPtr = bt + 2*(matchIndex & btMask); + size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ + const BYTE* match; + + if ((!extDict) || (matchIndex+matchLength >= dictLimit)) { + match = base + matchIndex; + if (match[matchLength] == ip[matchLength]) + matchLength += ZSTD_count(ip+matchLength+1, match+matchLength+1, iend) +1; + } else { + match = dictBase + matchIndex; + matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); + if (matchIndex+matchLength >= dictLimit) + match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ + } + + if (matchLength > bestLength) { + if (matchLength > matchEndIdx - matchIndex) + matchEndIdx = matchIndex + (U32)matchLength; + if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) + bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex; + if (ip+matchLength == iend) /* equal : no way to know if inf or sup */ + break; /* drop, to guarantee consistency (miss a little bit of compression) */ + } + + if (match[matchLength] < ip[matchLength]) { + /* match is smaller than current */ + *smallerPtr = matchIndex; /* update smaller idx */ + commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ + if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ + matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ + } else { + /* match is larger than current */ + *largerPtr = matchIndex; + commonLengthLarger = matchLength; + if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + largerPtr = nextPtr; + matchIndex = nextPtr[0]; + } } + + *smallerPtr = *largerPtr = 0; + + zc->nextToUpdate = (matchEndIdx > current + 8) ? matchEndIdx - 8 : current+1; + return bestLength; +} + + +static void ZSTD_updateTree(ZSTD_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls) +{ + const BYTE* const base = zc->base; + const U32 target = (U32)(ip - base); + U32 idx = zc->nextToUpdate; + + while(idx < target) + idx += ZSTD_insertBt1(zc, base+idx, mls, iend, nbCompares, 0); +} + +/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */ +static size_t ZSTD_BtFindBestMatch ( + ZSTD_CCtx* zc, + const BYTE* const ip, const BYTE* const iLimit, + size_t* offsetPtr, + const U32 maxNbAttempts, const U32 mls) +{ + if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */ + ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls); + return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 0); +} + + +static size_t ZSTD_BtFindBestMatch_selectMLS ( + ZSTD_CCtx* zc, /* Index table will be updated */ + const BYTE* ip, const BYTE* const iLimit, + size_t* offsetPtr, + const U32 maxNbAttempts, const U32 matchLengthSearch) +{ + switch(matchLengthSearch) + { + default : /* includes case 3 */ + case 4 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4); + case 5 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5); + case 7 : + case 6 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6); + } +} + + +static void ZSTD_updateTree_extDict(ZSTD_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls) +{ + const BYTE* const base = zc->base; + const U32 target = (U32)(ip - base); + U32 idx = zc->nextToUpdate; + + while (idx < target) idx += ZSTD_insertBt1(zc, base+idx, mls, iend, nbCompares, 1); +} + + +/** Tree updater, providing best match */ +static size_t ZSTD_BtFindBestMatch_extDict ( + ZSTD_CCtx* zc, + const BYTE* const ip, const BYTE* const iLimit, + size_t* offsetPtr, + const U32 maxNbAttempts, const U32 mls) +{ + if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */ + ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls); + return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 1); +} + + +static size_t ZSTD_BtFindBestMatch_selectMLS_extDict ( + ZSTD_CCtx* zc, /* Index table will be updated */ + const BYTE* ip, const BYTE* const iLimit, + size_t* offsetPtr, + const U32 maxNbAttempts, const U32 matchLengthSearch) +{ + switch(matchLengthSearch) + { + default : /* includes case 3 */ + case 4 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4); + case 5 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5); + case 7 : + case 6 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6); + } +} + + + +/* ********************************* +* Hash Chain +***********************************/ +#define NEXT_IN_CHAIN(d, mask) chainTable[(d) & mask] + +/* Update chains up to ip (excluded) + Assumption : always within prefix (i.e. not within extDict) */ +FORCE_INLINE +U32 ZSTD_insertAndFindFirstIndex (ZSTD_CCtx* zc, const BYTE* ip, U32 mls) +{ + U32* const hashTable = zc->hashTable; + const U32 hashLog = zc->params.cParams.hashLog; + U32* const chainTable = zc->chainTable; + const U32 chainMask = (1 << zc->params.cParams.chainLog) - 1; + const BYTE* const base = zc->base; + const U32 target = (U32)(ip - base); + U32 idx = zc->nextToUpdate; + + while(idx < target) { /* catch up */ + size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls); + NEXT_IN_CHAIN(idx, chainMask) = hashTable[h]; + hashTable[h] = idx; + idx++; + } + + zc->nextToUpdate = target; + return hashTable[ZSTD_hashPtr(ip, hashLog, mls)]; +} + + + +FORCE_INLINE /* inlining is important to hardwire a hot branch (template emulation) */ +size_t ZSTD_HcFindBestMatch_generic ( + ZSTD_CCtx* zc, /* Index table will be updated */ + const BYTE* const ip, const BYTE* const iLimit, + size_t* offsetPtr, + const U32 maxNbAttempts, const U32 mls, const U32 extDict) +{ + U32* const chainTable = zc->chainTable; + const U32 chainSize = (1 << zc->params.cParams.chainLog); + const U32 chainMask = chainSize-1; + const BYTE* const base = zc->base; + const BYTE* const dictBase = zc->dictBase; + const U32 dictLimit = zc->dictLimit; + const BYTE* const prefixStart = base + dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const U32 lowLimit = zc->lowLimit; + const U32 current = (U32)(ip-base); + const U32 minChain = current > chainSize ? current - chainSize : 0; + int nbAttempts=maxNbAttempts; + size_t ml=4-1; + + /* HC4 match finder */ + U32 matchIndex = ZSTD_insertAndFindFirstIndex (zc, ip, mls); + + for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) { + const BYTE* match; + size_t currentMl=0; + if ((!extDict) || matchIndex >= dictLimit) { + match = base + matchIndex; + if (match[ml] == ip[ml]) /* potentially better */ + currentMl = ZSTD_count(ip, match, iLimit); + } else { + match = dictBase + matchIndex; + if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */ + currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4; + } + + /* save best solution */ + if (currentMl > ml) { + ml = currentMl; + *offsetPtr = current - matchIndex + ZSTD_REP_MOVE; + if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */ + } + + if (matchIndex <= minChain) break; + matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask); + } + + return ml; +} + + +FORCE_INLINE size_t ZSTD_HcFindBestMatch_selectMLS ( + ZSTD_CCtx* zc, + const BYTE* ip, const BYTE* const iLimit, + size_t* offsetPtr, + const U32 maxNbAttempts, const U32 matchLengthSearch) +{ + switch(matchLengthSearch) + { + default : /* includes case 3 */ + case 4 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 0); + case 5 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 0); + case 7 : + case 6 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 0); + } +} + + +FORCE_INLINE size_t ZSTD_HcFindBestMatch_extDict_selectMLS ( + ZSTD_CCtx* zc, + const BYTE* ip, const BYTE* const iLimit, + size_t* offsetPtr, + const U32 maxNbAttempts, const U32 matchLengthSearch) +{ + switch(matchLengthSearch) + { + default : /* includes case 3 */ + case 4 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 1); + case 5 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 1); + case 7 : + case 6 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 1); + } +} + + +/* ******************************* +* Common parser - lazy strategy +*********************************/ +FORCE_INLINE +void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx, + const void* src, size_t srcSize, + const U32 searchMethod, const U32 depth) +{ + seqStore_t* seqStorePtr = &(ctx->seqStore); + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - 8; + const BYTE* const base = ctx->base + ctx->dictLimit; + + U32 const maxSearches = 1 << ctx->params.cParams.searchLog; + U32 const mls = ctx->params.cParams.searchLength; + + typedef size_t (*searchMax_f)(ZSTD_CCtx* zc, const BYTE* ip, const BYTE* iLimit, + size_t* offsetPtr, + U32 maxNbAttempts, U32 matchLengthSearch); + searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS; + U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1], savedOffset=0; + + /* init */ + ip += (ip==base); + ctx->nextToUpdate3 = ctx->nextToUpdate; + { U32 const maxRep = (U32)(ip-base); + if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0; + if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0; + } + + /* Match Loop */ + while (ip < ilimit) { + size_t matchLength=0; + size_t offset=0; + const BYTE* start=ip+1; + + /* check repCode */ + if ((offset_1>0) & (MEM_read32(ip+1) == MEM_read32(ip+1 - offset_1))) { + /* repcode : we take it */ + matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; + if (depth==0) goto _storeSequence; + } + + /* first search (depth 0) */ + { size_t offsetFound = 99999999; + size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls); + if (ml2 > matchLength) + matchLength = ml2, start = ip, offset=offsetFound; + } + + if (matchLength < 4) { + ip += ((ip-anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */ + continue; + } + + /* let's try to find a better solution */ + if (depth>=1) + while (ip<ilimit) { + ip ++; + if ((offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { + size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4; + int const gain2 = (int)(mlRep * 3); + int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1); + if ((mlRep >= 4) && (gain2 > gain1)) + matchLength = mlRep, offset = 0, start = ip; + } + { size_t offset2=99999999; + size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4); + if ((ml2 >= 4) && (gain2 > gain1)) { + matchLength = ml2, offset = offset2, start = ip; + continue; /* search a better one */ + } } + + /* let's find an even better one */ + if ((depth==2) && (ip<ilimit)) { + ip ++; + if ((offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { + size_t const ml2 = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4; + int const gain2 = (int)(ml2 * 4); + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1); + if ((ml2 >= 4) && (gain2 > gain1)) + matchLength = ml2, offset = 0, start = ip; + } + { size_t offset2=99999999; + size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7); + if ((ml2 >= 4) && (gain2 > gain1)) { + matchLength = ml2, offset = offset2, start = ip; + continue; + } } } + break; /* nothing found : store previous solution */ + } + + /* catch up */ + if (offset) { + while ( (start > anchor) + && (start > base+offset-ZSTD_REP_MOVE) + && (start[-1] == start[-1-offset+ZSTD_REP_MOVE]) ) /* only search for offset within prefix */ + { start--; matchLength++; } + offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE); + } + + /* store sequence */ +_storeSequence: + { size_t const litLength = start - anchor; + ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength-MINMATCH); + anchor = ip = start + matchLength; + } + + /* check immediate repcode */ + while ( (ip <= ilimit) + && ((offset_2>0) + & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) { + /* store sequence */ + matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; + offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */ + ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength-MINMATCH); + ip += matchLength; + anchor = ip; + continue; /* faster when present ... (?) */ + } } + + /* Save reps for next block */ + ctx->repToConfirm[0] = offset_1 ? offset_1 : savedOffset; + ctx->repToConfirm[1] = offset_2 ? offset_2 : savedOffset; + + /* Last Literals */ + { size_t const lastLLSize = iend - anchor; + memcpy(seqStorePtr->lit, anchor, lastLLSize); + seqStorePtr->lit += lastLLSize; + } +} + + +static void ZSTD_compressBlock_btlazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ + ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 1, 2); +} + +static void ZSTD_compressBlock_lazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ + ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 2); +} + +static void ZSTD_compressBlock_lazy(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ + ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 1); +} + +static void ZSTD_compressBlock_greedy(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ + ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 0); +} + + +FORCE_INLINE +void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx, + const void* src, size_t srcSize, + const U32 searchMethod, const U32 depth) +{ + seqStore_t* seqStorePtr = &(ctx->seqStore); + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - 8; + const BYTE* const base = ctx->base; + const U32 dictLimit = ctx->dictLimit; + const U32 lowestIndex = ctx->lowLimit; + const BYTE* const prefixStart = base + dictLimit; + const BYTE* const dictBase = ctx->dictBase; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const dictStart = dictBase + ctx->lowLimit; + + const U32 maxSearches = 1 << ctx->params.cParams.searchLog; + const U32 mls = ctx->params.cParams.searchLength; + + typedef size_t (*searchMax_f)(ZSTD_CCtx* zc, const BYTE* ip, const BYTE* iLimit, + size_t* offsetPtr, + U32 maxNbAttempts, U32 matchLengthSearch); + searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS; + + U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1]; + + /* init */ + ctx->nextToUpdate3 = ctx->nextToUpdate; + ip += (ip == prefixStart); + + /* Match Loop */ + while (ip < ilimit) { + size_t matchLength=0; + size_t offset=0; + const BYTE* start=ip+1; + U32 current = (U32)(ip-base); + + /* check repCode */ + { const U32 repIndex = (U32)(current+1 - offset_1); + const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; + const BYTE* const repMatch = repBase + repIndex; + if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ + if (MEM_read32(ip+1) == MEM_read32(repMatch)) { + /* repcode detected we should take it */ + const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; + matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4; + if (depth==0) goto _storeSequence; + } } + + /* first search (depth 0) */ + { size_t offsetFound = 99999999; + size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls); + if (ml2 > matchLength) + matchLength = ml2, start = ip, offset=offsetFound; + } + + if (matchLength < 4) { + ip += ((ip-anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */ + continue; + } + + /* let's try to find a better solution */ + if (depth>=1) + while (ip<ilimit) { + ip ++; + current++; + /* check repCode */ + if (offset) { + const U32 repIndex = (U32)(current - offset_1); + const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; + const BYTE* const repMatch = repBase + repIndex; + if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ + if (MEM_read32(ip) == MEM_read32(repMatch)) { + /* repcode detected */ + const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; + size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; + int const gain2 = (int)(repLength * 3); + int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1); + if ((repLength >= 4) && (gain2 > gain1)) + matchLength = repLength, offset = 0, start = ip; + } } + + /* search match, depth 1 */ + { size_t offset2=99999999; + size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4); + if ((ml2 >= 4) && (gain2 > gain1)) { + matchLength = ml2, offset = offset2, start = ip; + continue; /* search a better one */ + } } + + /* let's find an even better one */ + if ((depth==2) && (ip<ilimit)) { + ip ++; + current++; + /* check repCode */ + if (offset) { + const U32 repIndex = (U32)(current - offset_1); + const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; + const BYTE* const repMatch = repBase + repIndex; + if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ + if (MEM_read32(ip) == MEM_read32(repMatch)) { + /* repcode detected */ + const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; + size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; + int const gain2 = (int)(repLength * 4); + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1); + if ((repLength >= 4) && (gain2 > gain1)) + matchLength = repLength, offset = 0, start = ip; + } } + + /* search match, depth 2 */ + { size_t offset2=99999999; + size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7); + if ((ml2 >= 4) && (gain2 > gain1)) { + matchLength = ml2, offset = offset2, start = ip; + continue; + } } } + break; /* nothing found : store previous solution */ + } + + /* catch up */ + if (offset) { + U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE)); + const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex; + const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart; + while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */ + offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE); + } + + /* store sequence */ +_storeSequence: + { size_t const litLength = start - anchor; + ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength-MINMATCH); + anchor = ip = start + matchLength; + } + + /* check immediate repcode */ + while (ip <= ilimit) { + const U32 repIndex = (U32)((ip-base) - offset_2); + const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; + const BYTE* const repMatch = repBase + repIndex; + if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ + if (MEM_read32(ip) == MEM_read32(repMatch)) { + /* repcode detected we should take it */ + const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; + matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; + offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset history */ + ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength-MINMATCH); + ip += matchLength; + anchor = ip; + continue; /* faster when present ... (?) */ + } + break; + } } + + /* Save reps for next block */ + ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2; + + /* Last Literals */ + { size_t const lastLLSize = iend - anchor; + memcpy(seqStorePtr->lit, anchor, lastLLSize); + seqStorePtr->lit += lastLLSize; + } +} + + +void ZSTD_compressBlock_greedy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ + ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 0); +} + +static void ZSTD_compressBlock_lazy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ + ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 1); +} + +static void ZSTD_compressBlock_lazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ + ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 2); +} + +static void ZSTD_compressBlock_btlazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ + ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 1, 2); +} + + +/* The optimal parser */ +#include "zstd_opt.h" + +static void ZSTD_compressBlock_btopt(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ +#ifdef ZSTD_OPT_H_91842398743 + ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 0); +#else + (void)ctx; (void)src; (void)srcSize; + return; +#endif +} + +static void ZSTD_compressBlock_btopt2(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ +#ifdef ZSTD_OPT_H_91842398743 + ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 1); +#else + (void)ctx; (void)src; (void)srcSize; + return; +#endif +} + +static void ZSTD_compressBlock_btopt_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ +#ifdef ZSTD_OPT_H_91842398743 + ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 0); +#else + (void)ctx; (void)src; (void)srcSize; + return; +#endif +} + +static void ZSTD_compressBlock_btopt2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ +#ifdef ZSTD_OPT_H_91842398743 + ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 1); +#else + (void)ctx; (void)src; (void)srcSize; + return; +#endif +} + + +typedef void (*ZSTD_blockCompressor) (ZSTD_CCtx* ctx, const void* src, size_t srcSize); + +static ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict) +{ + static const ZSTD_blockCompressor blockCompressor[2][8] = { + { ZSTD_compressBlock_fast, ZSTD_compressBlock_doubleFast, ZSTD_compressBlock_greedy, + ZSTD_compressBlock_lazy, ZSTD_compressBlock_lazy2, ZSTD_compressBlock_btlazy2, + ZSTD_compressBlock_btopt, ZSTD_compressBlock_btopt2 }, + { ZSTD_compressBlock_fast_extDict, ZSTD_compressBlock_doubleFast_extDict, ZSTD_compressBlock_greedy_extDict, + ZSTD_compressBlock_lazy_extDict,ZSTD_compressBlock_lazy2_extDict, ZSTD_compressBlock_btlazy2_extDict, + ZSTD_compressBlock_btopt_extDict, ZSTD_compressBlock_btopt2_extDict } + }; + + return blockCompressor[extDict][(U32)strat]; +} + + +static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->params.cParams.strategy, zc->lowLimit < zc->dictLimit); + const BYTE* const base = zc->base; + const BYTE* const istart = (const BYTE*)src; + const U32 current = (U32)(istart-base); + if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) return 0; /* don't even attempt compression below a certain srcSize */ + ZSTD_resetSeqStore(&(zc->seqStore)); + if (current > zc->nextToUpdate + 384) + zc->nextToUpdate = current - MIN(192, (U32)(current - zc->nextToUpdate - 384)); /* limited update after finding a very long match */ + blockCompressor(zc, src, srcSize); + return ZSTD_compressSequences(zc, dst, dstCapacity, srcSize); +} + + +/*! ZSTD_compress_generic() : +* Compress a chunk of data into one or multiple blocks. +* All blocks will be terminated, all input will be consumed. +* Function will issue an error if there is not enough `dstCapacity` to hold the compressed content. +* Frame is supposed already started (header already produced) +* @return : compressed size, or an error code +*/ +static size_t ZSTD_compress_generic (ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + U32 lastFrameChunk) +{ + size_t blockSize = cctx->blockSize; + size_t remaining = srcSize; + const BYTE* ip = (const BYTE*)src; + BYTE* const ostart = (BYTE*)dst; + BYTE* op = ostart; + U32 const maxDist = 1 << cctx->params.cParams.windowLog; + + if (cctx->params.fParams.checksumFlag && srcSize) + XXH64_update(&cctx->xxhState, src, srcSize); + + while (remaining) { + U32 const lastBlock = lastFrameChunk & (blockSize >= remaining); + size_t cSize; + + if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE) + return ERROR(dstSize_tooSmall); /* not enough space to store compressed block */ + if (remaining < blockSize) blockSize = remaining; + + /* preemptive overflow correction */ + if (cctx->lowLimit > (3U<<29)) { + U32 const cycleMask = (1 << ZSTD_cycleLog(cctx->params.cParams.hashLog, cctx->params.cParams.strategy)) - 1; + U32 const current = (U32)(ip - cctx->base); + U32 const newCurrent = (current & cycleMask) + (1 << cctx->params.cParams.windowLog); + U32 const correction = current - newCurrent; + ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_64 <= 30); + ZSTD_reduceIndex(cctx, correction); + cctx->base += correction; + cctx->dictBase += correction; + cctx->lowLimit -= correction; + cctx->dictLimit -= correction; + if (cctx->nextToUpdate < correction) cctx->nextToUpdate = 0; + else cctx->nextToUpdate -= correction; + } + + if ((U32)(ip+blockSize - cctx->base) > cctx->loadedDictEnd + maxDist) { + /* enforce maxDist */ + U32 const newLowLimit = (U32)(ip+blockSize - cctx->base) - maxDist; + if (cctx->lowLimit < newLowLimit) cctx->lowLimit = newLowLimit; + if (cctx->dictLimit < cctx->lowLimit) cctx->dictLimit = cctx->lowLimit; + } + + cSize = ZSTD_compressBlock_internal(cctx, op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize, ip, blockSize); + if (ZSTD_isError(cSize)) return cSize; + + if (cSize == 0) { /* block is not compressible */ + U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(blockSize << 3); + if (blockSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall); + MEM_writeLE32(op, cBlockHeader24); /* no pb, 4th byte will be overwritten */ + memcpy(op + ZSTD_blockHeaderSize, ip, blockSize); + cSize = ZSTD_blockHeaderSize+blockSize; + } else { + U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3); + MEM_writeLE24(op, cBlockHeader24); + cSize += ZSTD_blockHeaderSize; + } + + remaining -= blockSize; + dstCapacity -= cSize; + ip += blockSize; + op += cSize; + } + + if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending; + return op-ostart; +} + + +static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity, + ZSTD_parameters params, U64 pledgedSrcSize, U32 dictID) +{ BYTE* const op = (BYTE*)dst; + U32 const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */ + U32 const dictIDSizeCode = params.fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */ + U32 const checksumFlag = params.fParams.checksumFlag>0; + U32 const windowSize = 1U << params.cParams.windowLog; + U32 const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize); + BYTE const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3); + U32 const fcsCode = params.fParams.contentSizeFlag ? + (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : /* 0-3 */ + 0; + BYTE const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) ); + size_t pos; + + if (dstCapacity < ZSTD_frameHeaderSize_max) return ERROR(dstSize_tooSmall); + DEBUGLOG(5, "ZSTD_writeFrameHeader : dictIDFlag : %u \n", !params.fParams.noDictIDFlag); + DEBUGLOG(5, "ZSTD_writeFrameHeader : dictID : %u \n", dictID); + DEBUGLOG(5, "ZSTD_writeFrameHeader : dictIDSizeCode : %u \n", dictIDSizeCode); + + MEM_writeLE32(dst, ZSTD_MAGICNUMBER); + op[4] = frameHeaderDecriptionByte; pos=5; + if (!singleSegment) op[pos++] = windowLogByte; + switch(dictIDSizeCode) + { + default: /* impossible */ + case 0 : break; + case 1 : op[pos] = (BYTE)(dictID); pos++; break; + case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break; + case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break; + } + switch(fcsCode) + { + default: /* impossible */ + case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break; + case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break; + case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break; + case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break; + } + return pos; +} + + +static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + U32 frame, U32 lastFrameChunk) +{ + const BYTE* const ip = (const BYTE*) src; + size_t fhSize = 0; + + if (cctx->stage==ZSTDcs_created) return ERROR(stage_wrong); /* missing init (ZSTD_compressBegin) */ + + if (frame && (cctx->stage==ZSTDcs_init)) { + fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, cctx->frameContentSize, cctx->dictID); + if (ZSTD_isError(fhSize)) return fhSize; + dstCapacity -= fhSize; + dst = (char*)dst + fhSize; + cctx->stage = ZSTDcs_ongoing; + } + + /* Check if blocks follow each other */ + if (src != cctx->nextSrc) { + /* not contiguous */ + ptrdiff_t const delta = cctx->nextSrc - ip; + cctx->lowLimit = cctx->dictLimit; + cctx->dictLimit = (U32)(cctx->nextSrc - cctx->base); + cctx->dictBase = cctx->base; + cctx->base -= delta; + cctx->nextToUpdate = cctx->dictLimit; + if (cctx->dictLimit - cctx->lowLimit < HASH_READ_SIZE) cctx->lowLimit = cctx->dictLimit; /* too small extDict */ + } + + /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */ + if ((ip+srcSize > cctx->dictBase + cctx->lowLimit) & (ip < cctx->dictBase + cctx->dictLimit)) { + ptrdiff_t const highInputIdx = (ip + srcSize) - cctx->dictBase; + U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)cctx->dictLimit) ? cctx->dictLimit : (U32)highInputIdx; + cctx->lowLimit = lowLimitMax; + } + + cctx->nextSrc = ip + srcSize; + + if (srcSize) { + size_t const cSize = frame ? + ZSTD_compress_generic (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) : + ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize); + if (ZSTD_isError(cSize)) return cSize; + cctx->consumedSrcSize += srcSize; + return cSize + fhSize; + } else + return fhSize; +} + + +size_t ZSTD_compressContinue (ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ + return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */); +} + + +size_t ZSTD_getBlockSizeMax(ZSTD_CCtx* cctx) +{ + return MIN (ZSTD_BLOCKSIZE_ABSOLUTEMAX, 1 << cctx->params.cParams.windowLog); +} + +size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + size_t const blockSizeMax = ZSTD_getBlockSizeMax(cctx); + if (srcSize > blockSizeMax) return ERROR(srcSize_wrong); + return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */); +} + +/*! ZSTD_loadDictionaryContent() : + * @return : 0, or an error code + */ +static size_t ZSTD_loadDictionaryContent(ZSTD_CCtx* zc, const void* src, size_t srcSize) +{ + const BYTE* const ip = (const BYTE*) src; + const BYTE* const iend = ip + srcSize; + + /* input becomes current prefix */ + zc->lowLimit = zc->dictLimit; + zc->dictLimit = (U32)(zc->nextSrc - zc->base); + zc->dictBase = zc->base; + zc->base += ip - zc->nextSrc; + zc->nextToUpdate = zc->dictLimit; + zc->loadedDictEnd = zc->forceWindow ? 0 : (U32)(iend - zc->base); + + zc->nextSrc = iend; + if (srcSize <= HASH_READ_SIZE) return 0; + + switch(zc->params.cParams.strategy) + { + case ZSTD_fast: + ZSTD_fillHashTable (zc, iend, zc->params.cParams.searchLength); + break; + + case ZSTD_dfast: + ZSTD_fillDoubleHashTable (zc, iend, zc->params.cParams.searchLength); + break; + + case ZSTD_greedy: + case ZSTD_lazy: + case ZSTD_lazy2: + if (srcSize >= HASH_READ_SIZE) + ZSTD_insertAndFindFirstIndex(zc, iend-HASH_READ_SIZE, zc->params.cParams.searchLength); + break; + + case ZSTD_btlazy2: + case ZSTD_btopt: + case ZSTD_btopt2: + if (srcSize >= HASH_READ_SIZE) + ZSTD_updateTree(zc, iend-HASH_READ_SIZE, iend, 1 << zc->params.cParams.searchLog, zc->params.cParams.searchLength); + break; + + default: + return ERROR(GENERIC); /* strategy doesn't exist; impossible */ + } + + zc->nextToUpdate = (U32)(iend - zc->base); + return 0; +} + + +/* Dictionaries that assign zero probability to symbols that show up causes problems + when FSE encoding. Refuse dictionaries that assign zero probability to symbols + that we may encounter during compression. + NOTE: This behavior is not standard and could be improved in the future. */ +static size_t ZSTD_checkDictNCount(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) { + U32 s; + if (dictMaxSymbolValue < maxSymbolValue) return ERROR(dictionary_corrupted); + for (s = 0; s <= maxSymbolValue; ++s) { + if (normalizedCounter[s] == 0) return ERROR(dictionary_corrupted); + } + return 0; +} + + +/* Dictionary format : + * See : + * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format + */ +/*! ZSTD_loadZstdDictionary() : + * @return : 0, or an error code + * assumptions : magic number supposed already checked + * dictSize supposed > 8 + */ +static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize) +{ + const BYTE* dictPtr = (const BYTE*)dict; + const BYTE* const dictEnd = dictPtr + dictSize; + short offcodeNCount[MaxOff+1]; + unsigned offcodeMaxValue = MaxOff; + BYTE scratchBuffer[1<<MAX(MLFSELog,LLFSELog)]; + + dictPtr += 4; /* skip magic number */ + cctx->dictID = cctx->params.fParams.noDictIDFlag ? 0 : MEM_readLE32(dictPtr); + dictPtr += 4; + + { size_t const hufHeaderSize = HUF_readCTable(cctx->hufCTable, 255, dictPtr, dictEnd-dictPtr); + if (HUF_isError(hufHeaderSize)) return ERROR(dictionary_corrupted); + dictPtr += hufHeaderSize; + } + + { unsigned offcodeLog; + size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr); + if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted); + if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted); + /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */ + CHECK_E( FSE_buildCTable_wksp(cctx->offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog, scratchBuffer, sizeof(scratchBuffer)), + dictionary_corrupted); + dictPtr += offcodeHeaderSize; + } + + { short matchlengthNCount[MaxML+1]; + unsigned matchlengthMaxValue = MaxML, matchlengthLog; + size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr); + if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted); + if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted); + /* Every match length code must have non-zero probability */ + CHECK_F( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML)); + CHECK_E( FSE_buildCTable_wksp(cctx->matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, scratchBuffer, sizeof(scratchBuffer)), + dictionary_corrupted); + dictPtr += matchlengthHeaderSize; + } + + { short litlengthNCount[MaxLL+1]; + unsigned litlengthMaxValue = MaxLL, litlengthLog; + size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr); + if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted); + if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted); + /* Every literal length code must have non-zero probability */ + CHECK_F( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL)); + CHECK_E( FSE_buildCTable_wksp(cctx->litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, scratchBuffer, sizeof(scratchBuffer)), + dictionary_corrupted); + dictPtr += litlengthHeaderSize; + } + + if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted); + cctx->rep[0] = MEM_readLE32(dictPtr+0); + cctx->rep[1] = MEM_readLE32(dictPtr+4); + cctx->rep[2] = MEM_readLE32(dictPtr+8); + dictPtr += 12; + + { size_t const dictContentSize = (size_t)(dictEnd - dictPtr); + U32 offcodeMax = MaxOff; + if (dictContentSize <= ((U32)-1) - 128 KB) { + U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */ + offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */ + } + /* All offset values <= dictContentSize + 128 KB must be representable */ + CHECK_F (ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff))); + /* All repCodes must be <= dictContentSize and != 0*/ + { U32 u; + for (u=0; u<3; u++) { + if (cctx->rep[u] == 0) return ERROR(dictionary_corrupted); + if (cctx->rep[u] > dictContentSize) return ERROR(dictionary_corrupted); + } } + + cctx->fseCTables_ready = 1; + cctx->hufCTable_repeatMode = HUF_repeat_valid; + return ZSTD_loadDictionaryContent(cctx, dictPtr, dictContentSize); + } +} + +/** ZSTD_compress_insertDictionary() : +* @return : 0, or an error code */ +static size_t ZSTD_compress_insertDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize) +{ + if ((dict==NULL) || (dictSize<=8)) return 0; + + /* dict as pure content */ + if ((MEM_readLE32(dict) != ZSTD_DICT_MAGIC) || (cctx->forceRawDict)) + return ZSTD_loadDictionaryContent(cctx, dict, dictSize); + + /* dict as zstd dictionary */ + return ZSTD_loadZstdDictionary(cctx, dict, dictSize); +} + +/*! ZSTD_compressBegin_internal() : +* @return : 0, or an error code */ +static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx, + const void* dict, size_t dictSize, + ZSTD_parameters params, U64 pledgedSrcSize) +{ + ZSTD_compResetPolicy_e const crp = dictSize ? ZSTDcrp_fullReset : ZSTDcrp_continue; + assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); + CHECK_F(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize, crp)); + return ZSTD_compress_insertDictionary(cctx, dict, dictSize); +} + + +/*! ZSTD_compressBegin_advanced() : +* @return : 0, or an error code */ +size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, + const void* dict, size_t dictSize, + ZSTD_parameters params, unsigned long long pledgedSrcSize) +{ + /* compression parameters verification and optimization */ + CHECK_F(ZSTD_checkCParams(params.cParams)); + return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, pledgedSrcSize); +} + + +size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel) +{ + ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize); + return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, 0); +} + + +size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel) +{ + return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel); +} + + +/*! ZSTD_writeEpilogue() : +* Ends a frame. +* @return : nb of bytes written into dst (or an error code) */ +static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity) +{ + BYTE* const ostart = (BYTE*)dst; + BYTE* op = ostart; + size_t fhSize = 0; + + if (cctx->stage == ZSTDcs_created) return ERROR(stage_wrong); /* init missing */ + + /* special case : empty frame */ + if (cctx->stage == ZSTDcs_init) { + fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, 0, 0); + if (ZSTD_isError(fhSize)) return fhSize; + dstCapacity -= fhSize; + op += fhSize; + cctx->stage = ZSTDcs_ongoing; + } + + if (cctx->stage != ZSTDcs_ending) { + /* write one last empty block, make it the "last" block */ + U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0; + if (dstCapacity<4) return ERROR(dstSize_tooSmall); + MEM_writeLE32(op, cBlockHeader24); + op += ZSTD_blockHeaderSize; + dstCapacity -= ZSTD_blockHeaderSize; + } + + if (cctx->params.fParams.checksumFlag) { + U32 const checksum = (U32) XXH64_digest(&cctx->xxhState); + if (dstCapacity<4) return ERROR(dstSize_tooSmall); + MEM_writeLE32(op, checksum); + op += 4; + } + + cctx->stage = ZSTDcs_created; /* return to "created but no init" status */ + return op-ostart; +} + + +size_t ZSTD_compressEnd (ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ + size_t endResult; + size_t const cSize = ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 1 /* last chunk */); + if (ZSTD_isError(cSize)) return cSize; + endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize); + if (ZSTD_isError(endResult)) return endResult; + if (cctx->params.fParams.contentSizeFlag) { /* control src size */ + if (cctx->frameContentSize != cctx->consumedSrcSize) return ERROR(srcSize_wrong); + } + return cSize + endResult; +} + + +static size_t ZSTD_compress_internal (ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void* dict,size_t dictSize, + ZSTD_parameters params) +{ + CHECK_F(ZSTD_compressBegin_internal(cctx, dict, dictSize, params, srcSize)); + return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); +} + +size_t ZSTD_compress_advanced (ZSTD_CCtx* ctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void* dict,size_t dictSize, + ZSTD_parameters params) +{ + CHECK_F(ZSTD_checkCParams(params.cParams)); + return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params); +} + +size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, + const void* dict, size_t dictSize, int compressionLevel) +{ + ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, dict ? dictSize : 0); + params.fParams.contentSizeFlag = 1; + return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params); +} + +size_t ZSTD_compressCCtx (ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel) +{ + return ZSTD_compress_usingDict(ctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel); +} + +size_t ZSTD_compress(void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel) +{ + size_t result; + ZSTD_CCtx ctxBody; + memset(&ctxBody, 0, sizeof(ctxBody)); + memcpy(&ctxBody.customMem, &defaultCustomMem, sizeof(ZSTD_customMem)); + result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel); + ZSTD_free(ctxBody.workSpace, defaultCustomMem); /* can't free ctxBody itself, as it's on stack; free only heap content */ + return result; +} + + +/* ===== Dictionary API ===== */ + +struct ZSTD_CDict_s { + void* dictBuffer; + const void* dictContent; + size_t dictContentSize; + ZSTD_CCtx* refContext; +}; /* typedef'd tp ZSTD_CDict within "zstd.h" */ + +size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict) +{ + if (cdict==NULL) return 0; /* support sizeof on NULL */ + return ZSTD_sizeof_CCtx(cdict->refContext) + (cdict->dictBuffer ? cdict->dictContentSize : 0) + sizeof(*cdict); +} + +static ZSTD_parameters ZSTD_makeParams(ZSTD_compressionParameters cParams, ZSTD_frameParameters fParams) +{ + ZSTD_parameters params; + params.cParams = cParams; + params.fParams = fParams; + return params; +} + +ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize, unsigned byReference, + ZSTD_compressionParameters cParams, ZSTD_customMem customMem) +{ + if (!customMem.customAlloc && !customMem.customFree) customMem = defaultCustomMem; + if (!customMem.customAlloc || !customMem.customFree) return NULL; + + { ZSTD_CDict* const cdict = (ZSTD_CDict*) ZSTD_malloc(sizeof(ZSTD_CDict), customMem); + ZSTD_CCtx* const cctx = ZSTD_createCCtx_advanced(customMem); + + if (!cdict || !cctx) { + ZSTD_free(cdict, customMem); + ZSTD_freeCCtx(cctx); + return NULL; + } + + if ((byReference) || (!dictBuffer) || (!dictSize)) { + cdict->dictBuffer = NULL; + cdict->dictContent = dictBuffer; + } else { + void* const internalBuffer = ZSTD_malloc(dictSize, customMem); + if (!internalBuffer) { ZSTD_free(cctx, customMem); ZSTD_free(cdict, customMem); return NULL; } + memcpy(internalBuffer, dictBuffer, dictSize); + cdict->dictBuffer = internalBuffer; + cdict->dictContent = internalBuffer; + } + + { ZSTD_frameParameters const fParams = { 0 /* contentSizeFlag */, 0 /* checksumFlag */, 0 /* noDictIDFlag */ }; /* dummy */ + ZSTD_parameters const params = ZSTD_makeParams(cParams, fParams); + size_t const errorCode = ZSTD_compressBegin_advanced(cctx, cdict->dictContent, dictSize, params, 0); + if (ZSTD_isError(errorCode)) { + ZSTD_free(cdict->dictBuffer, customMem); + ZSTD_free(cdict, customMem); + ZSTD_freeCCtx(cctx); + return NULL; + } } + + cdict->refContext = cctx; + cdict->dictContentSize = dictSize; + return cdict; + } +} + +ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel) +{ + ZSTD_customMem const allocator = { NULL, NULL, NULL }; + ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize); + return ZSTD_createCDict_advanced(dict, dictSize, 0, cParams, allocator); +} + +ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel) +{ + ZSTD_customMem const allocator = { NULL, NULL, NULL }; + ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize); + return ZSTD_createCDict_advanced(dict, dictSize, 1, cParams, allocator); +} + +size_t ZSTD_freeCDict(ZSTD_CDict* cdict) +{ + if (cdict==NULL) return 0; /* support free on NULL */ + { ZSTD_customMem const cMem = cdict->refContext->customMem; + ZSTD_freeCCtx(cdict->refContext); + ZSTD_free(cdict->dictBuffer, cMem); + ZSTD_free(cdict, cMem); + return 0; + } +} + +static ZSTD_parameters ZSTD_getParamsFromCDict(const ZSTD_CDict* cdict) { + return ZSTD_getParamsFromCCtx(cdict->refContext); +} + +/* ZSTD_compressBegin_usingCDict_advanced() : + * cdict must be != NULL */ +size_t ZSTD_compressBegin_usingCDict_advanced( + ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, + ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize) +{ + if (cdict==NULL) return ERROR(GENERIC); /* does not support NULL cdict */ + DEBUGLOG(5, "ZSTD_compressBegin_usingCDict_advanced : dictIDFlag == %u \n", !fParams.noDictIDFlag); + if (cdict->dictContentSize) + CHECK_F( ZSTD_copyCCtx_internal(cctx, cdict->refContext, fParams, pledgedSrcSize) ) + else { + ZSTD_parameters params = cdict->refContext->params; + params.fParams = fParams; + CHECK_F(ZSTD_compressBegin_internal(cctx, NULL, 0, params, pledgedSrcSize)); + } + return 0; +} + +/* ZSTD_compressBegin_usingCDict() : + * pledgedSrcSize=0 means "unknown" + * if pledgedSrcSize>0, it will enable contentSizeFlag */ +size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) +{ + ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; + DEBUGLOG(5, "ZSTD_compressBegin_usingCDict : dictIDFlag == %u \n", !fParams.noDictIDFlag); + return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, 0); +} + +size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const ZSTD_CDict* cdict, ZSTD_frameParameters fParams) +{ + CHECK_F (ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize)); /* will check if cdict != NULL */ + return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); +} + +/*! ZSTD_compress_usingCDict() : + * Compression using a digested Dictionary. + * Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times. + * Note that compression parameters are decided at CDict creation time + * while frame parameters are hardcoded */ +size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const ZSTD_CDict* cdict) +{ + ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; + return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, fParams); +} + + + +/* ****************************************************************** +* Streaming +********************************************************************/ + +typedef enum { zcss_init, zcss_load, zcss_flush, zcss_final } ZSTD_cStreamStage; + +struct ZSTD_CStream_s { + ZSTD_CCtx* cctx; + ZSTD_CDict* cdictLocal; + const ZSTD_CDict* cdict; + char* inBuff; + size_t inBuffSize; + size_t inToCompress; + size_t inBuffPos; + size_t inBuffTarget; + size_t blockSize; + char* outBuff; + size_t outBuffSize; + size_t outBuffContentSize; + size_t outBuffFlushedSize; + ZSTD_cStreamStage stage; + U32 checksum; + U32 frameEnded; + U64 pledgedSrcSize; + ZSTD_parameters params; + ZSTD_customMem customMem; +}; /* typedef'd to ZSTD_CStream within "zstd.h" */ + +ZSTD_CStream* ZSTD_createCStream(void) +{ + return ZSTD_createCStream_advanced(defaultCustomMem); +} + +ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem) +{ + ZSTD_CStream* zcs; + + if (!customMem.customAlloc && !customMem.customFree) customMem = defaultCustomMem; + if (!customMem.customAlloc || !customMem.customFree) return NULL; + + zcs = (ZSTD_CStream*)ZSTD_malloc(sizeof(ZSTD_CStream), customMem); + if (zcs==NULL) return NULL; + memset(zcs, 0, sizeof(ZSTD_CStream)); + memcpy(&zcs->customMem, &customMem, sizeof(ZSTD_customMem)); + zcs->cctx = ZSTD_createCCtx_advanced(customMem); + if (zcs->cctx == NULL) { ZSTD_freeCStream(zcs); return NULL; } + return zcs; +} + +size_t ZSTD_freeCStream(ZSTD_CStream* zcs) +{ + if (zcs==NULL) return 0; /* support free on NULL */ + { ZSTD_customMem const cMem = zcs->customMem; + ZSTD_freeCCtx(zcs->cctx); + zcs->cctx = NULL; + ZSTD_freeCDict(zcs->cdictLocal); + zcs->cdictLocal = NULL; + ZSTD_free(zcs->inBuff, cMem); + zcs->inBuff = NULL; + ZSTD_free(zcs->outBuff, cMem); + zcs->outBuff = NULL; + ZSTD_free(zcs, cMem); + return 0; + } +} + + +/*====== Initialization ======*/ + +size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; } + +size_t ZSTD_CStreamOutSize(void) +{ + return ZSTD_compressBound(ZSTD_BLOCKSIZE_ABSOLUTEMAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ; +} + +static size_t ZSTD_resetCStream_internal(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize) +{ + if (zcs->inBuffSize==0) return ERROR(stage_wrong); /* zcs has not been init at least once => can't reset */ + + DEBUGLOG(5, "ZSTD_resetCStream_internal : dictIDFlag == %u \n", !zcs->params.fParams.noDictIDFlag); + + if (zcs->cdict) CHECK_F(ZSTD_compressBegin_usingCDict_advanced(zcs->cctx, zcs->cdict, zcs->params.fParams, pledgedSrcSize)) + else CHECK_F(ZSTD_compressBegin_internal(zcs->cctx, NULL, 0, zcs->params, pledgedSrcSize)); + + zcs->inToCompress = 0; + zcs->inBuffPos = 0; + zcs->inBuffTarget = zcs->blockSize; + zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0; + zcs->stage = zcss_load; + zcs->frameEnded = 0; + zcs->pledgedSrcSize = pledgedSrcSize; + return 0; /* ready to go */ +} + +size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize) +{ + + zcs->params.fParams.contentSizeFlag = (pledgedSrcSize > 0); + DEBUGLOG(5, "ZSTD_resetCStream : dictIDFlag == %u \n", !zcs->params.fParams.noDictIDFlag); + return ZSTD_resetCStream_internal(zcs, pledgedSrcSize); +} + +/* ZSTD_initCStream_internal() : + * params are supposed validated at this stage + * and zcs->cdict is supposed to be correct */ +static size_t ZSTD_initCStream_stage2(ZSTD_CStream* zcs, + const ZSTD_parameters params, + unsigned long long pledgedSrcSize) +{ + assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); + + /* allocate buffers */ + { size_t const neededInBuffSize = (size_t)1 << params.cParams.windowLog; + if (zcs->inBuffSize < neededInBuffSize) { + zcs->inBuffSize = 0; + ZSTD_free(zcs->inBuff, zcs->customMem); + zcs->inBuff = (char*) ZSTD_malloc(neededInBuffSize, zcs->customMem); + if (zcs->inBuff == NULL) return ERROR(memory_allocation); + zcs->inBuffSize = neededInBuffSize; + } + zcs->blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, neededInBuffSize); + } + if (zcs->outBuffSize < ZSTD_compressBound(zcs->blockSize)+1) { + size_t const outBuffSize = ZSTD_compressBound(zcs->blockSize)+1; + zcs->outBuffSize = 0; + ZSTD_free(zcs->outBuff, zcs->customMem); + zcs->outBuff = (char*) ZSTD_malloc(outBuffSize, zcs->customMem); + if (zcs->outBuff == NULL) return ERROR(memory_allocation); + zcs->outBuffSize = outBuffSize; + } + + zcs->checksum = params.fParams.checksumFlag > 0; + zcs->params = params; + + DEBUGLOG(5, "ZSTD_initCStream_stage2 : dictIDFlag == %u \n", !params.fParams.noDictIDFlag); + return ZSTD_resetCStream_internal(zcs, pledgedSrcSize); +} + +/* ZSTD_initCStream_usingCDict_advanced() : + * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */ +size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, unsigned long long pledgedSrcSize, ZSTD_frameParameters fParams) +{ + if (!cdict) return ERROR(GENERIC); /* cannot handle NULL cdict (does not know what to do) */ + { ZSTD_parameters params = ZSTD_getParamsFromCDict(cdict); + params.fParams = fParams; + zcs->cdict = cdict; + return ZSTD_initCStream_stage2(zcs, params, pledgedSrcSize); + } +} + +/* note : cdict must outlive compression session */ +size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict) +{ + ZSTD_frameParameters const fParams = { 0 /* content */, 0 /* checksum */, 0 /* noDictID */ }; + return ZSTD_initCStream_usingCDict_advanced(zcs, cdict, 0, fParams); +} + +static size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs, + const void* dict, size_t dictSize, + ZSTD_parameters params, unsigned long long pledgedSrcSize) +{ + assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); + zcs->cdict = NULL; + + if (dict && dictSize >= 8) { + ZSTD_freeCDict(zcs->cdictLocal); + zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, 0 /* copy */, params.cParams, zcs->customMem); + if (zcs->cdictLocal == NULL) return ERROR(memory_allocation); + zcs->cdict = zcs->cdictLocal; + } + + DEBUGLOG(5, "ZSTD_initCStream_internal : dictIDFlag == %u \n", !params.fParams.noDictIDFlag); + return ZSTD_initCStream_stage2(zcs, params, pledgedSrcSize); +} + +size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, + const void* dict, size_t dictSize, + ZSTD_parameters params, unsigned long long pledgedSrcSize) +{ + CHECK_F( ZSTD_checkCParams(params.cParams) ); + DEBUGLOG(5, "ZSTD_initCStream_advanced : dictIDFlag == %u \n", !params.fParams.noDictIDFlag); + return ZSTD_initCStream_internal(zcs, dict, dictSize, params, pledgedSrcSize); +} + +size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel) +{ + ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize); + return ZSTD_initCStream_internal(zcs, dict, dictSize, params, 0); +} + +size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize) +{ + ZSTD_parameters params = ZSTD_getParams(compressionLevel, pledgedSrcSize, 0); + params.fParams.contentSizeFlag = (pledgedSrcSize>0); + return ZSTD_initCStream_internal(zcs, NULL, 0, params, pledgedSrcSize); +} + +size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel) +{ + ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, 0); + return ZSTD_initCStream_internal(zcs, NULL, 0, params, 0); +} + +size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs) +{ + if (zcs==NULL) return 0; /* support sizeof on NULL */ + return sizeof(*zcs) + ZSTD_sizeof_CCtx(zcs->cctx) + ZSTD_sizeof_CDict(zcs->cdictLocal) + zcs->outBuffSize + zcs->inBuffSize; +} + +/*====== Compression ======*/ + +typedef enum { zsf_gather, zsf_flush, zsf_end } ZSTD_flush_e; + +MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + size_t const length = MIN(dstCapacity, srcSize); + memcpy(dst, src, length); + return length; +} + +static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, + void* dst, size_t* dstCapacityPtr, + const void* src, size_t* srcSizePtr, + ZSTD_flush_e const flush) +{ + U32 someMoreWork = 1; + const char* const istart = (const char*)src; + const char* const iend = istart + *srcSizePtr; + const char* ip = istart; + char* const ostart = (char*)dst; + char* const oend = ostart + *dstCapacityPtr; + char* op = ostart; + + while (someMoreWork) { + switch(zcs->stage) + { + case zcss_init: return ERROR(init_missing); /* call ZBUFF_compressInit() first ! */ + + case zcss_load: + /* complete inBuffer */ + { size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos; + size_t const loaded = ZSTD_limitCopy(zcs->inBuff + zcs->inBuffPos, toLoad, ip, iend-ip); + zcs->inBuffPos += loaded; + ip += loaded; + if ( (zcs->inBuffPos==zcs->inToCompress) || (!flush && (toLoad != loaded)) ) { + someMoreWork = 0; break; /* not enough input to get a full block : stop there, wait for more */ + } } + /* compress current block (note : this stage cannot be stopped in the middle) */ + { void* cDst; + size_t cSize; + size_t const iSize = zcs->inBuffPos - zcs->inToCompress; + size_t oSize = oend-op; + if (oSize >= ZSTD_compressBound(iSize)) + cDst = op; /* compress directly into output buffer (avoid flush stage) */ + else + cDst = zcs->outBuff, oSize = zcs->outBuffSize; + cSize = (flush == zsf_end) ? + ZSTD_compressEnd(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize) : + ZSTD_compressContinue(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize); + if (ZSTD_isError(cSize)) return cSize; + if (flush == zsf_end) zcs->frameEnded = 1; + /* prepare next block */ + zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize; + if (zcs->inBuffTarget > zcs->inBuffSize) + zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize; /* note : inBuffSize >= blockSize */ + zcs->inToCompress = zcs->inBuffPos; + if (cDst == op) { op += cSize; break; } /* no need to flush */ + zcs->outBuffContentSize = cSize; + zcs->outBuffFlushedSize = 0; + zcs->stage = zcss_flush; /* pass-through to flush stage */ + } + + case zcss_flush: + { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; + size_t const flushed = ZSTD_limitCopy(op, oend-op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush); + op += flushed; + zcs->outBuffFlushedSize += flushed; + if (toFlush!=flushed) { someMoreWork = 0; break; } /* dst too small to store flushed data : stop there */ + zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0; + zcs->stage = zcss_load; + break; + } + + case zcss_final: + someMoreWork = 0; /* do nothing */ + break; + + default: + return ERROR(GENERIC); /* impossible */ + } + } + + *srcSizePtr = ip - istart; + *dstCapacityPtr = op - ostart; + if (zcs->frameEnded) return 0; + { size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos; + if (hintInSize==0) hintInSize = zcs->blockSize; + return hintInSize; + } +} + +size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input) +{ + size_t sizeRead = input->size - input->pos; + size_t sizeWritten = output->size - output->pos; + size_t const result = ZSTD_compressStream_generic(zcs, + (char*)(output->dst) + output->pos, &sizeWritten, + (const char*)(input->src) + input->pos, &sizeRead, zsf_gather); + input->pos += sizeRead; + output->pos += sizeWritten; + return result; +} + + +/*====== Finalize ======*/ + +/*! ZSTD_flushStream() : +* @return : amount of data remaining to flush */ +size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) +{ + size_t srcSize = 0; + size_t sizeWritten = output->size - output->pos; + size_t const result = ZSTD_compressStream_generic(zcs, + (char*)(output->dst) + output->pos, &sizeWritten, + &srcSize, &srcSize, /* use a valid src address instead of NULL */ + zsf_flush); + output->pos += sizeWritten; + if (ZSTD_isError(result)) return result; + return zcs->outBuffContentSize - zcs->outBuffFlushedSize; /* remaining to flush */ +} + + +size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) +{ + BYTE* const ostart = (BYTE*)(output->dst) + output->pos; + BYTE* const oend = (BYTE*)(output->dst) + output->size; + BYTE* op = ostart; + + if (zcs->stage != zcss_final) { + /* flush whatever remains */ + size_t srcSize = 0; + size_t sizeWritten = output->size - output->pos; + size_t const notEnded = ZSTD_compressStream_generic(zcs, ostart, &sizeWritten, + &srcSize /* use a valid src address instead of NULL */, &srcSize, zsf_end); + size_t const remainingToFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; + op += sizeWritten; + if (remainingToFlush) { + output->pos += sizeWritten; + return remainingToFlush + ZSTD_BLOCKHEADERSIZE /* final empty block */ + (zcs->checksum * 4); + } + /* create epilogue */ + zcs->stage = zcss_final; + zcs->outBuffContentSize = !notEnded ? 0 : + /* write epilogue, including final empty block, into outBuff */ + ZSTD_compressEnd(zcs->cctx, zcs->outBuff, zcs->outBuffSize, NULL, 0); + if (ZSTD_isError(zcs->outBuffContentSize)) return zcs->outBuffContentSize; + } + + /* flush epilogue */ + { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; + size_t const flushed = ZSTD_limitCopy(op, oend-op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush); + op += flushed; + zcs->outBuffFlushedSize += flushed; + output->pos += op-ostart; + if (toFlush==flushed) zcs->stage = zcss_init; /* end reached */ + return toFlush - flushed; + } +} + + + +/*-===== Pre-defined compression levels =====-*/ + +#define ZSTD_DEFAULT_CLEVEL 1 +#define ZSTD_MAX_CLEVEL 22 +int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; } + +static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = { +{ /* "default" */ + /* W, C, H, S, L, TL, strat */ + { 18, 12, 12, 1, 7, 16, ZSTD_fast }, /* level 0 - never used */ + { 19, 13, 14, 1, 7, 16, ZSTD_fast }, /* level 1 */ + { 19, 15, 16, 1, 6, 16, ZSTD_fast }, /* level 2 */ + { 20, 16, 17, 1, 5, 16, ZSTD_dfast }, /* level 3.*/ + { 20, 18, 18, 1, 5, 16, ZSTD_dfast }, /* level 4.*/ + { 20, 15, 18, 3, 5, 16, ZSTD_greedy }, /* level 5 */ + { 21, 16, 19, 2, 5, 16, ZSTD_lazy }, /* level 6 */ + { 21, 17, 20, 3, 5, 16, ZSTD_lazy }, /* level 7 */ + { 21, 18, 20, 3, 5, 16, ZSTD_lazy2 }, /* level 8 */ + { 21, 20, 20, 3, 5, 16, ZSTD_lazy2 }, /* level 9 */ + { 21, 19, 21, 4, 5, 16, ZSTD_lazy2 }, /* level 10 */ + { 22, 20, 22, 4, 5, 16, ZSTD_lazy2 }, /* level 11 */ + { 22, 20, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 12 */ + { 22, 21, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 13 */ + { 22, 21, 22, 6, 5, 16, ZSTD_lazy2 }, /* level 14 */ + { 22, 21, 21, 5, 5, 16, ZSTD_btlazy2 }, /* level 15 */ + { 23, 22, 22, 5, 5, 16, ZSTD_btlazy2 }, /* level 16 */ + { 23, 21, 22, 4, 5, 24, ZSTD_btopt }, /* level 17 */ + { 23, 22, 22, 5, 4, 32, ZSTD_btopt }, /* level 18 */ + { 23, 23, 22, 6, 3, 48, ZSTD_btopt }, /* level 19 */ + { 25, 25, 23, 7, 3, 64, ZSTD_btopt2 }, /* level 20 */ + { 26, 26, 23, 7, 3,256, ZSTD_btopt2 }, /* level 21 */ + { 27, 27, 25, 9, 3,512, ZSTD_btopt2 }, /* level 22 */ +}, +{ /* for srcSize <= 256 KB */ + /* W, C, H, S, L, T, strat */ + { 0, 0, 0, 0, 0, 0, ZSTD_fast }, /* level 0 - not used */ + { 18, 13, 14, 1, 6, 8, ZSTD_fast }, /* level 1 */ + { 18, 14, 13, 1, 5, 8, ZSTD_dfast }, /* level 2 */ + { 18, 16, 15, 1, 5, 8, ZSTD_dfast }, /* level 3 */ + { 18, 15, 17, 1, 5, 8, ZSTD_greedy }, /* level 4.*/ + { 18, 16, 17, 4, 5, 8, ZSTD_greedy }, /* level 5.*/ + { 18, 16, 17, 3, 5, 8, ZSTD_lazy }, /* level 6.*/ + { 18, 17, 17, 4, 4, 8, ZSTD_lazy }, /* level 7 */ + { 18, 17, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */ + { 18, 17, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */ + { 18, 17, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */ + { 18, 18, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 11.*/ + { 18, 18, 17, 7, 4, 8, ZSTD_lazy2 }, /* level 12.*/ + { 18, 19, 17, 6, 4, 8, ZSTD_btlazy2 }, /* level 13 */ + { 18, 18, 18, 4, 4, 16, ZSTD_btopt }, /* level 14.*/ + { 18, 18, 18, 4, 3, 16, ZSTD_btopt }, /* level 15.*/ + { 18, 19, 18, 6, 3, 32, ZSTD_btopt }, /* level 16.*/ + { 18, 19, 18, 8, 3, 64, ZSTD_btopt }, /* level 17.*/ + { 18, 19, 18, 9, 3,128, ZSTD_btopt }, /* level 18.*/ + { 18, 19, 18, 10, 3,256, ZSTD_btopt }, /* level 19.*/ + { 18, 19, 18, 11, 3,512, ZSTD_btopt2 }, /* level 20.*/ + { 18, 19, 18, 12, 3,512, ZSTD_btopt2 }, /* level 21.*/ + { 18, 19, 18, 13, 3,512, ZSTD_btopt2 }, /* level 22.*/ +}, +{ /* for srcSize <= 128 KB */ + /* W, C, H, S, L, T, strat */ + { 17, 12, 12, 1, 7, 8, ZSTD_fast }, /* level 0 - not used */ + { 17, 12, 13, 1, 6, 8, ZSTD_fast }, /* level 1 */ + { 17, 13, 16, 1, 5, 8, ZSTD_fast }, /* level 2 */ + { 17, 16, 16, 2, 5, 8, ZSTD_dfast }, /* level 3 */ + { 17, 13, 15, 3, 4, 8, ZSTD_greedy }, /* level 4 */ + { 17, 15, 17, 4, 4, 8, ZSTD_greedy }, /* level 5 */ + { 17, 16, 17, 3, 4, 8, ZSTD_lazy }, /* level 6 */ + { 17, 15, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 7 */ + { 17, 17, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */ + { 17, 17, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */ + { 17, 17, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */ + { 17, 17, 17, 7, 4, 8, ZSTD_lazy2 }, /* level 11 */ + { 17, 17, 17, 8, 4, 8, ZSTD_lazy2 }, /* level 12 */ + { 17, 18, 17, 6, 4, 8, ZSTD_btlazy2 }, /* level 13.*/ + { 17, 17, 17, 7, 3, 8, ZSTD_btopt }, /* level 14.*/ + { 17, 17, 17, 7, 3, 16, ZSTD_btopt }, /* level 15.*/ + { 17, 18, 17, 7, 3, 32, ZSTD_btopt }, /* level 16.*/ + { 17, 18, 17, 7, 3, 64, ZSTD_btopt }, /* level 17.*/ + { 17, 18, 17, 7, 3,256, ZSTD_btopt }, /* level 18.*/ + { 17, 18, 17, 8, 3,256, ZSTD_btopt }, /* level 19.*/ + { 17, 18, 17, 9, 3,256, ZSTD_btopt2 }, /* level 20.*/ + { 17, 18, 17, 10, 3,256, ZSTD_btopt2 }, /* level 21.*/ + { 17, 18, 17, 11, 3,512, ZSTD_btopt2 }, /* level 22.*/ +}, +{ /* for srcSize <= 16 KB */ + /* W, C, H, S, L, T, strat */ + { 14, 12, 12, 1, 7, 6, ZSTD_fast }, /* level 0 - not used */ + { 14, 14, 14, 1, 6, 6, ZSTD_fast }, /* level 1 */ + { 14, 14, 14, 1, 4, 6, ZSTD_fast }, /* level 2 */ + { 14, 14, 14, 1, 4, 6, ZSTD_dfast }, /* level 3.*/ + { 14, 14, 14, 4, 4, 6, ZSTD_greedy }, /* level 4.*/ + { 14, 14, 14, 3, 4, 6, ZSTD_lazy }, /* level 5.*/ + { 14, 14, 14, 4, 4, 6, ZSTD_lazy2 }, /* level 6 */ + { 14, 14, 14, 5, 4, 6, ZSTD_lazy2 }, /* level 7 */ + { 14, 14, 14, 6, 4, 6, ZSTD_lazy2 }, /* level 8.*/ + { 14, 15, 14, 6, 4, 6, ZSTD_btlazy2 }, /* level 9.*/ + { 14, 15, 14, 3, 3, 6, ZSTD_btopt }, /* level 10.*/ + { 14, 15, 14, 6, 3, 8, ZSTD_btopt }, /* level 11.*/ + { 14, 15, 14, 6, 3, 16, ZSTD_btopt }, /* level 12.*/ + { 14, 15, 14, 6, 3, 24, ZSTD_btopt }, /* level 13.*/ + { 14, 15, 15, 6, 3, 48, ZSTD_btopt }, /* level 14.*/ + { 14, 15, 15, 6, 3, 64, ZSTD_btopt }, /* level 15.*/ + { 14, 15, 15, 6, 3, 96, ZSTD_btopt }, /* level 16.*/ + { 14, 15, 15, 6, 3,128, ZSTD_btopt }, /* level 17.*/ + { 14, 15, 15, 6, 3,256, ZSTD_btopt }, /* level 18.*/ + { 14, 15, 15, 7, 3,256, ZSTD_btopt }, /* level 19.*/ + { 14, 15, 15, 8, 3,256, ZSTD_btopt2 }, /* level 20.*/ + { 14, 15, 15, 9, 3,256, ZSTD_btopt2 }, /* level 21.*/ + { 14, 15, 15, 10, 3,256, ZSTD_btopt2 }, /* level 22.*/ +}, +}; + +/*! ZSTD_getCParams() : +* @return ZSTD_compressionParameters structure for a selected compression level, `srcSize` and `dictSize`. +* Size values are optional, provide 0 if not known or unused */ +ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSize, size_t dictSize) +{ + ZSTD_compressionParameters cp; + size_t const addedSize = srcSize ? 0 : 500; + U64 const rSize = srcSize+dictSize ? srcSize+dictSize+addedSize : (U64)-1; + U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB); /* intentional underflow for srcSizeHint == 0 */ + if (compressionLevel <= 0) compressionLevel = ZSTD_DEFAULT_CLEVEL; /* 0 == default; no negative compressionLevel yet */ + if (compressionLevel > ZSTD_MAX_CLEVEL) compressionLevel = ZSTD_MAX_CLEVEL; + cp = ZSTD_defaultCParameters[tableID][compressionLevel]; + if (MEM_32bits()) { /* auto-correction, for 32-bits mode */ + if (cp.windowLog > ZSTD_WINDOWLOG_MAX) cp.windowLog = ZSTD_WINDOWLOG_MAX; + if (cp.chainLog > ZSTD_CHAINLOG_MAX) cp.chainLog = ZSTD_CHAINLOG_MAX; + if (cp.hashLog > ZSTD_HASHLOG_MAX) cp.hashLog = ZSTD_HASHLOG_MAX; + } + cp = ZSTD_adjustCParams(cp, srcSize, dictSize); + return cp; +} + +/*! ZSTD_getParams() : +* same as ZSTD_getCParams(), but @return a `ZSTD_parameters` object (instead of `ZSTD_compressionParameters`). +* All fields of `ZSTD_frameParameters` are set to default (0) */ +ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSize, size_t dictSize) { + ZSTD_parameters params; + ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSize, dictSize); + memset(¶ms, 0, sizeof(params)); + params.cParams = cParams; + return params; +} diff --git a/thirdparty/zstd/compress/zstd_opt.h b/thirdparty/zstd/compress/zstd_opt.h new file mode 100644 index 0000000000..5437611912 --- /dev/null +++ b/thirdparty/zstd/compress/zstd_opt.h @@ -0,0 +1,921 @@ +/** + * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + + +/* Note : this file is intended to be included within zstd_compress.c */ + + +#ifndef ZSTD_OPT_H_91842398743 +#define ZSTD_OPT_H_91842398743 + + +#define ZSTD_LITFREQ_ADD 2 +#define ZSTD_FREQ_DIV 4 +#define ZSTD_MAX_PRICE (1<<30) + +/*-************************************* +* Price functions for optimal parser +***************************************/ +FORCE_INLINE void ZSTD_setLog2Prices(seqStore_t* ssPtr) +{ + ssPtr->log2matchLengthSum = ZSTD_highbit32(ssPtr->matchLengthSum+1); + ssPtr->log2litLengthSum = ZSTD_highbit32(ssPtr->litLengthSum+1); + ssPtr->log2litSum = ZSTD_highbit32(ssPtr->litSum+1); + ssPtr->log2offCodeSum = ZSTD_highbit32(ssPtr->offCodeSum+1); + ssPtr->factor = 1 + ((ssPtr->litSum>>5) / ssPtr->litLengthSum) + ((ssPtr->litSum<<1) / (ssPtr->litSum + ssPtr->matchSum)); +} + + +MEM_STATIC void ZSTD_rescaleFreqs(seqStore_t* ssPtr, const BYTE* src, size_t srcSize) +{ + unsigned u; + + ssPtr->cachedLiterals = NULL; + ssPtr->cachedPrice = ssPtr->cachedLitLength = 0; + ssPtr->staticPrices = 0; + + if (ssPtr->litLengthSum == 0) { + if (srcSize <= 1024) ssPtr->staticPrices = 1; + + for (u=0; u<=MaxLit; u++) + ssPtr->litFreq[u] = 0; + for (u=0; u<srcSize; u++) + ssPtr->litFreq[src[u]]++; + + ssPtr->litSum = 0; + ssPtr->litLengthSum = MaxLL+1; + ssPtr->matchLengthSum = MaxML+1; + ssPtr->offCodeSum = (MaxOff+1); + ssPtr->matchSum = (ZSTD_LITFREQ_ADD<<Litbits); + + for (u=0; u<=MaxLit; u++) { + ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u]>>ZSTD_FREQ_DIV); + ssPtr->litSum += ssPtr->litFreq[u]; + } + for (u=0; u<=MaxLL; u++) + ssPtr->litLengthFreq[u] = 1; + for (u=0; u<=MaxML; u++) + ssPtr->matchLengthFreq[u] = 1; + for (u=0; u<=MaxOff; u++) + ssPtr->offCodeFreq[u] = 1; + } else { + ssPtr->matchLengthSum = 0; + ssPtr->litLengthSum = 0; + ssPtr->offCodeSum = 0; + ssPtr->matchSum = 0; + ssPtr->litSum = 0; + + for (u=0; u<=MaxLit; u++) { + ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u]>>(ZSTD_FREQ_DIV+1)); + ssPtr->litSum += ssPtr->litFreq[u]; + } + for (u=0; u<=MaxLL; u++) { + ssPtr->litLengthFreq[u] = 1 + (ssPtr->litLengthFreq[u]>>(ZSTD_FREQ_DIV+1)); + ssPtr->litLengthSum += ssPtr->litLengthFreq[u]; + } + for (u=0; u<=MaxML; u++) { + ssPtr->matchLengthFreq[u] = 1 + (ssPtr->matchLengthFreq[u]>>ZSTD_FREQ_DIV); + ssPtr->matchLengthSum += ssPtr->matchLengthFreq[u]; + ssPtr->matchSum += ssPtr->matchLengthFreq[u] * (u + 3); + } + ssPtr->matchSum *= ZSTD_LITFREQ_ADD; + for (u=0; u<=MaxOff; u++) { + ssPtr->offCodeFreq[u] = 1 + (ssPtr->offCodeFreq[u]>>ZSTD_FREQ_DIV); + ssPtr->offCodeSum += ssPtr->offCodeFreq[u]; + } + } + + ZSTD_setLog2Prices(ssPtr); +} + + +FORCE_INLINE U32 ZSTD_getLiteralPrice(seqStore_t* ssPtr, U32 litLength, const BYTE* literals) +{ + U32 price, u; + + if (ssPtr->staticPrices) + return ZSTD_highbit32((U32)litLength+1) + (litLength*6); + + if (litLength == 0) + return ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[0]+1); + + /* literals */ + if (ssPtr->cachedLiterals == literals) { + U32 const additional = litLength - ssPtr->cachedLitLength; + const BYTE* literals2 = ssPtr->cachedLiterals + ssPtr->cachedLitLength; + price = ssPtr->cachedPrice + additional * ssPtr->log2litSum; + for (u=0; u < additional; u++) + price -= ZSTD_highbit32(ssPtr->litFreq[literals2[u]]+1); + ssPtr->cachedPrice = price; + ssPtr->cachedLitLength = litLength; + } else { + price = litLength * ssPtr->log2litSum; + for (u=0; u < litLength; u++) + price -= ZSTD_highbit32(ssPtr->litFreq[literals[u]]+1); + + if (litLength >= 12) { + ssPtr->cachedLiterals = literals; + ssPtr->cachedPrice = price; + ssPtr->cachedLitLength = litLength; + } + } + + /* literal Length */ + { const BYTE LL_deltaCode = 19; + const BYTE llCode = (litLength>63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength]; + price += LL_bits[llCode] + ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[llCode]+1); + } + + return price; +} + + +FORCE_INLINE U32 ZSTD_getPrice(seqStore_t* seqStorePtr, U32 litLength, const BYTE* literals, U32 offset, U32 matchLength, const int ultra) +{ + /* offset */ + U32 price; + BYTE const offCode = (BYTE)ZSTD_highbit32(offset+1); + + if (seqStorePtr->staticPrices) + return ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + ZSTD_highbit32((U32)matchLength+1) + 16 + offCode; + + price = offCode + seqStorePtr->log2offCodeSum - ZSTD_highbit32(seqStorePtr->offCodeFreq[offCode]+1); + if (!ultra && offCode >= 20) price += (offCode-19)*2; + + /* match Length */ + { const BYTE ML_deltaCode = 36; + const BYTE mlCode = (matchLength>127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength]; + price += ML_bits[mlCode] + seqStorePtr->log2matchLengthSum - ZSTD_highbit32(seqStorePtr->matchLengthFreq[mlCode]+1); + } + + return price + ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + seqStorePtr->factor; +} + + +MEM_STATIC void ZSTD_updatePrice(seqStore_t* seqStorePtr, U32 litLength, const BYTE* literals, U32 offset, U32 matchLength) +{ + U32 u; + + /* literals */ + seqStorePtr->litSum += litLength*ZSTD_LITFREQ_ADD; + for (u=0; u < litLength; u++) + seqStorePtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD; + + /* literal Length */ + { const BYTE LL_deltaCode = 19; + const BYTE llCode = (litLength>63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength]; + seqStorePtr->litLengthFreq[llCode]++; + seqStorePtr->litLengthSum++; + } + + /* match offset */ + { BYTE const offCode = (BYTE)ZSTD_highbit32(offset+1); + seqStorePtr->offCodeSum++; + seqStorePtr->offCodeFreq[offCode]++; + } + + /* match Length */ + { const BYTE ML_deltaCode = 36; + const BYTE mlCode = (matchLength>127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength]; + seqStorePtr->matchLengthFreq[mlCode]++; + seqStorePtr->matchLengthSum++; + } + + ZSTD_setLog2Prices(seqStorePtr); +} + + +#define SET_PRICE(pos, mlen_, offset_, litlen_, price_) \ + { \ + while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; } \ + opt[pos].mlen = mlen_; \ + opt[pos].off = offset_; \ + opt[pos].litlen = litlen_; \ + opt[pos].price = price_; \ + } + + + +/* Update hashTable3 up to ip (excluded) + Assumption : always within prefix (i.e. not within extDict) */ +FORCE_INLINE +U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_CCtx* zc, const BYTE* ip) +{ + U32* const hashTable3 = zc->hashTable3; + U32 const hashLog3 = zc->hashLog3; + const BYTE* const base = zc->base; + U32 idx = zc->nextToUpdate3; + const U32 target = zc->nextToUpdate3 = (U32)(ip - base); + const size_t hash3 = ZSTD_hash3Ptr(ip, hashLog3); + + while(idx < target) { + hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx; + idx++; + } + + return hashTable3[hash3]; +} + + +/*-************************************* +* Binary Tree search +***************************************/ +static U32 ZSTD_insertBtAndGetAllMatches ( + ZSTD_CCtx* zc, + const BYTE* const ip, const BYTE* const iLimit, + U32 nbCompares, const U32 mls, + U32 extDict, ZSTD_match_t* matches, const U32 minMatchLen) +{ + const BYTE* const base = zc->base; + const U32 current = (U32)(ip-base); + const U32 hashLog = zc->params.cParams.hashLog; + const size_t h = ZSTD_hashPtr(ip, hashLog, mls); + U32* const hashTable = zc->hashTable; + U32 matchIndex = hashTable[h]; + U32* const bt = zc->chainTable; + const U32 btLog = zc->params.cParams.chainLog - 1; + const U32 btMask= (1U << btLog) - 1; + size_t commonLengthSmaller=0, commonLengthLarger=0; + const BYTE* const dictBase = zc->dictBase; + const U32 dictLimit = zc->dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const prefixStart = base + dictLimit; + const U32 btLow = btMask >= current ? 0 : current - btMask; + const U32 windowLow = zc->lowLimit; + U32* smallerPtr = bt + 2*(current&btMask); + U32* largerPtr = bt + 2*(current&btMask) + 1; + U32 matchEndIdx = current+8; + U32 dummy32; /* to be nullified at the end */ + U32 mnum = 0; + + const U32 minMatch = (mls == 3) ? 3 : 4; + size_t bestLength = minMatchLen-1; + + if (minMatch == 3) { /* HC3 match finder */ + U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3 (zc, ip); + if (matchIndex3>windowLow && (current - matchIndex3 < (1<<18))) { + const BYTE* match; + size_t currentMl=0; + if ((!extDict) || matchIndex3 >= dictLimit) { + match = base + matchIndex3; + if (match[bestLength] == ip[bestLength]) currentMl = ZSTD_count(ip, match, iLimit); + } else { + match = dictBase + matchIndex3; + if (MEM_readMINMATCH(match, MINMATCH) == MEM_readMINMATCH(ip, MINMATCH)) /* assumption : matchIndex3 <= dictLimit-4 (by table construction) */ + currentMl = ZSTD_count_2segments(ip+MINMATCH, match+MINMATCH, iLimit, dictEnd, prefixStart) + MINMATCH; + } + + /* save best solution */ + if (currentMl > bestLength) { + bestLength = currentMl; + matches[mnum].off = ZSTD_REP_MOVE_OPT + current - matchIndex3; + matches[mnum].len = (U32)currentMl; + mnum++; + if (currentMl > ZSTD_OPT_NUM) goto update; + if (ip+currentMl == iLimit) goto update; /* best possible, and avoid read overflow*/ + } + } + } + + hashTable[h] = current; /* Update Hash Table */ + + while (nbCompares-- && (matchIndex > windowLow)) { + U32* nextPtr = bt + 2*(matchIndex & btMask); + size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ + const BYTE* match; + + if ((!extDict) || (matchIndex+matchLength >= dictLimit)) { + match = base + matchIndex; + if (match[matchLength] == ip[matchLength]) { + matchLength += ZSTD_count(ip+matchLength+1, match+matchLength+1, iLimit) +1; + } + } else { + match = dictBase + matchIndex; + matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart); + if (matchIndex+matchLength >= dictLimit) + match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ + } + + if (matchLength > bestLength) { + if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (U32)matchLength; + bestLength = matchLength; + matches[mnum].off = ZSTD_REP_MOVE_OPT + current - matchIndex; + matches[mnum].len = (U32)matchLength; + mnum++; + if (matchLength > ZSTD_OPT_NUM) break; + if (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */ + break; /* drop, to guarantee consistency (miss a little bit of compression) */ + } + + if (match[matchLength] < ip[matchLength]) { + /* match is smaller than current */ + *smallerPtr = matchIndex; /* update smaller idx */ + commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ + if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ + matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ + } else { + /* match is larger than current */ + *largerPtr = matchIndex; + commonLengthLarger = matchLength; + if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + largerPtr = nextPtr; + matchIndex = nextPtr[0]; + } } + + *smallerPtr = *largerPtr = 0; + +update: + zc->nextToUpdate = (matchEndIdx > current + 8) ? matchEndIdx - 8 : current+1; + return mnum; +} + + +/** Tree updater, providing best match */ +static U32 ZSTD_BtGetAllMatches ( + ZSTD_CCtx* zc, + const BYTE* const ip, const BYTE* const iLimit, + const U32 maxNbAttempts, const U32 mls, ZSTD_match_t* matches, const U32 minMatchLen) +{ + if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */ + ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls); + return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 0, matches, minMatchLen); +} + + +static U32 ZSTD_BtGetAllMatches_selectMLS ( + ZSTD_CCtx* zc, /* Index table will be updated */ + const BYTE* ip, const BYTE* const iHighLimit, + const U32 maxNbAttempts, const U32 matchLengthSearch, ZSTD_match_t* matches, const U32 minMatchLen) +{ + switch(matchLengthSearch) + { + case 3 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen); + default : + case 4 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen); + case 5 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen); + case 7 : + case 6 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen); + } +} + +/** Tree updater, providing best match */ +static U32 ZSTD_BtGetAllMatches_extDict ( + ZSTD_CCtx* zc, + const BYTE* const ip, const BYTE* const iLimit, + const U32 maxNbAttempts, const U32 mls, ZSTD_match_t* matches, const U32 minMatchLen) +{ + if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */ + ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls); + return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 1, matches, minMatchLen); +} + + +static U32 ZSTD_BtGetAllMatches_selectMLS_extDict ( + ZSTD_CCtx* zc, /* Index table will be updated */ + const BYTE* ip, const BYTE* const iHighLimit, + const U32 maxNbAttempts, const U32 matchLengthSearch, ZSTD_match_t* matches, const U32 minMatchLen) +{ + switch(matchLengthSearch) + { + case 3 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen); + default : + case 4 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen); + case 5 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen); + case 7 : + case 6 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen); + } +} + + +/*-******************************* +* Optimal parser +*********************************/ +FORCE_INLINE +void ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx, + const void* src, size_t srcSize, const int ultra) +{ + seqStore_t* seqStorePtr = &(ctx->seqStore); + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - 8; + const BYTE* const base = ctx->base; + const BYTE* const prefixStart = base + ctx->dictLimit; + + const U32 maxSearches = 1U << ctx->params.cParams.searchLog; + const U32 sufficient_len = ctx->params.cParams.targetLength; + const U32 mls = ctx->params.cParams.searchLength; + const U32 minMatch = (ctx->params.cParams.searchLength == 3) ? 3 : 4; + + ZSTD_optimal_t* opt = seqStorePtr->priceTable; + ZSTD_match_t* matches = seqStorePtr->matchTable; + const BYTE* inr; + U32 offset, rep[ZSTD_REP_NUM]; + + /* init */ + ctx->nextToUpdate3 = ctx->nextToUpdate; + ZSTD_rescaleFreqs(seqStorePtr, (const BYTE*)src, srcSize); + ip += (ip==prefixStart); + { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) rep[i]=ctx->rep[i]; } + + /* Match Loop */ + while (ip < ilimit) { + U32 cur, match_num, last_pos, litlen, price; + U32 u, mlen, best_mlen, best_off, litLength; + memset(opt, 0, sizeof(ZSTD_optimal_t)); + last_pos = 0; + litlen = (U32)(ip - anchor); + + /* check repCode */ + { U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor); + for (i=(ip == anchor); i<last_i; i++) { + const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i]; + if ( (repCur > 0) && (repCur < (S32)(ip-prefixStart)) + && (MEM_readMINMATCH(ip, minMatch) == MEM_readMINMATCH(ip - repCur, minMatch))) { + mlen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repCur, iend) + minMatch; + if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) { + best_mlen = mlen; best_off = i; cur = 0; last_pos = 1; + goto _storeSequence; + } + best_off = i - (ip == anchor); + do { + price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra); + if (mlen > last_pos || price < opt[mlen].price) + SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */ + mlen--; + } while (mlen >= minMatch); + } } } + + match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, ip, iend, maxSearches, mls, matches, minMatch); + + if (!last_pos && !match_num) { ip++; continue; } + + if (match_num && (matches[match_num-1].len > sufficient_len || matches[match_num-1].len >= ZSTD_OPT_NUM)) { + best_mlen = matches[match_num-1].len; + best_off = matches[match_num-1].off; + cur = 0; + last_pos = 1; + goto _storeSequence; + } + + /* set prices using matches at position = 0 */ + best_mlen = (last_pos) ? last_pos : minMatch; + for (u = 0; u < match_num; u++) { + mlen = (u>0) ? matches[u-1].len+1 : best_mlen; + best_mlen = matches[u].len; + while (mlen <= best_mlen) { + price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra); + if (mlen > last_pos || price < opt[mlen].price) + SET_PRICE(mlen, mlen, matches[u].off, litlen, price); /* note : macro modifies last_pos */ + mlen++; + } } + + if (last_pos < minMatch) { ip++; continue; } + + /* initialize opt[0] */ + { U32 i ; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; } + opt[0].mlen = 1; + opt[0].litlen = litlen; + + /* check further positions */ + for (cur = 1; cur <= last_pos; cur++) { + inr = ip + cur; + + if (opt[cur-1].mlen == 1) { + litlen = opt[cur-1].litlen + 1; + if (cur > litlen) { + price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr-litlen); + } else + price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor); + } else { + litlen = 1; + price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr-1); + } + + if (cur > last_pos || price <= opt[cur].price) + SET_PRICE(cur, 1, 0, litlen, price); + + if (cur == last_pos) break; + + if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */ + continue; + + mlen = opt[cur].mlen; + if (opt[cur].off > ZSTD_REP_MOVE_OPT) { + opt[cur].rep[2] = opt[cur-mlen].rep[1]; + opt[cur].rep[1] = opt[cur-mlen].rep[0]; + opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT; + } else { + opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur-mlen].rep[1] : opt[cur-mlen].rep[2]; + opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur-mlen].rep[0] : opt[cur-mlen].rep[1]; + opt[cur].rep[0] = ((opt[cur].off==ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur-mlen].rep[0] - 1) : (opt[cur-mlen].rep[opt[cur].off]); + } + + best_mlen = minMatch; + { U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1); + for (i=(opt[cur].mlen != 1); i<last_i; i++) { /* check rep */ + const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i]; + if ( (repCur > 0) && (repCur < (S32)(inr-prefixStart)) + && (MEM_readMINMATCH(inr, minMatch) == MEM_readMINMATCH(inr - repCur, minMatch))) { + mlen = (U32)ZSTD_count(inr+minMatch, inr+minMatch - repCur, iend) + minMatch; + + if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) { + best_mlen = mlen; best_off = i; last_pos = cur + 1; + goto _storeSequence; + } + + best_off = i - (opt[cur].mlen != 1); + if (mlen > best_mlen) best_mlen = mlen; + + do { + if (opt[cur].mlen == 1) { + litlen = opt[cur].litlen; + if (cur > litlen) { + price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr-litlen, best_off, mlen - MINMATCH, ultra); + } else + price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra); + } else { + litlen = 0; + price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra); + } + + if (cur + mlen > last_pos || price <= opt[cur + mlen].price) + SET_PRICE(cur + mlen, mlen, i, litlen, price); + mlen--; + } while (mlen >= minMatch); + } } } + + match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, inr, iend, maxSearches, mls, matches, best_mlen); + + if (match_num > 0 && (matches[match_num-1].len > sufficient_len || cur + matches[match_num-1].len >= ZSTD_OPT_NUM)) { + best_mlen = matches[match_num-1].len; + best_off = matches[match_num-1].off; + last_pos = cur + 1; + goto _storeSequence; + } + + /* set prices using matches at position = cur */ + for (u = 0; u < match_num; u++) { + mlen = (u>0) ? matches[u-1].len+1 : best_mlen; + best_mlen = matches[u].len; + + while (mlen <= best_mlen) { + if (opt[cur].mlen == 1) { + litlen = opt[cur].litlen; + if (cur > litlen) + price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip+cur-litlen, matches[u].off-1, mlen - MINMATCH, ultra); + else + price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra); + } else { + litlen = 0; + price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off-1, mlen - MINMATCH, ultra); + } + + if (cur + mlen > last_pos || (price < opt[cur + mlen].price)) + SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price); + + mlen++; + } } } + + best_mlen = opt[last_pos].mlen; + best_off = opt[last_pos].off; + cur = last_pos - best_mlen; + + /* store sequence */ +_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */ + opt[0].mlen = 1; + + while (1) { + mlen = opt[cur].mlen; + offset = opt[cur].off; + opt[cur].mlen = best_mlen; + opt[cur].off = best_off; + best_mlen = mlen; + best_off = offset; + if (mlen > cur) break; + cur -= mlen; + } + + for (u = 0; u <= last_pos;) { + u += opt[u].mlen; + } + + for (cur=0; cur < last_pos; ) { + mlen = opt[cur].mlen; + if (mlen == 1) { ip++; cur++; continue; } + offset = opt[cur].off; + cur += mlen; + litLength = (U32)(ip - anchor); + + if (offset > ZSTD_REP_MOVE_OPT) { + rep[2] = rep[1]; + rep[1] = rep[0]; + rep[0] = offset - ZSTD_REP_MOVE_OPT; + offset--; + } else { + if (offset != 0) { + best_off = (offset==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]); + if (offset != 1) rep[2] = rep[1]; + rep[1] = rep[0]; + rep[0] = best_off; + } + if (litLength==0) offset--; + } + + ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH); + ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH); + anchor = ip = ip + mlen; + } } /* for (cur=0; cur < last_pos; ) */ + + /* Save reps for next block */ + { int i; for (i=0; i<ZSTD_REP_NUM; i++) ctx->repToConfirm[i] = rep[i]; } + + /* Last Literals */ + { size_t const lastLLSize = iend - anchor; + memcpy(seqStorePtr->lit, anchor, lastLLSize); + seqStorePtr->lit += lastLLSize; + } +} + + +FORCE_INLINE +void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx* ctx, + const void* src, size_t srcSize, const int ultra) +{ + seqStore_t* seqStorePtr = &(ctx->seqStore); + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - 8; + const BYTE* const base = ctx->base; + const U32 lowestIndex = ctx->lowLimit; + const U32 dictLimit = ctx->dictLimit; + const BYTE* const prefixStart = base + dictLimit; + const BYTE* const dictBase = ctx->dictBase; + const BYTE* const dictEnd = dictBase + dictLimit; + + const U32 maxSearches = 1U << ctx->params.cParams.searchLog; + const U32 sufficient_len = ctx->params.cParams.targetLength; + const U32 mls = ctx->params.cParams.searchLength; + const U32 minMatch = (ctx->params.cParams.searchLength == 3) ? 3 : 4; + + ZSTD_optimal_t* opt = seqStorePtr->priceTable; + ZSTD_match_t* matches = seqStorePtr->matchTable; + const BYTE* inr; + + /* init */ + U32 offset, rep[ZSTD_REP_NUM]; + { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) rep[i]=ctx->rep[i]; } + + ctx->nextToUpdate3 = ctx->nextToUpdate; + ZSTD_rescaleFreqs(seqStorePtr, (const BYTE*)src, srcSize); + ip += (ip==prefixStart); + + /* Match Loop */ + while (ip < ilimit) { + U32 cur, match_num, last_pos, litlen, price; + U32 u, mlen, best_mlen, best_off, litLength; + U32 current = (U32)(ip-base); + memset(opt, 0, sizeof(ZSTD_optimal_t)); + last_pos = 0; + opt[0].litlen = (U32)(ip - anchor); + + /* check repCode */ + { U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor); + for (i = (ip==anchor); i<last_i; i++) { + const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i]; + const U32 repIndex = (U32)(current - repCur); + const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; + const BYTE* const repMatch = repBase + repIndex; + if ( (repCur > 0 && repCur <= (S32)current) + && (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex>lowestIndex)) /* intentional overflow */ + && (MEM_readMINMATCH(ip, minMatch) == MEM_readMINMATCH(repMatch, minMatch)) ) { + /* repcode detected we should take it */ + const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; + mlen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iend, repEnd, prefixStart) + minMatch; + + if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) { + best_mlen = mlen; best_off = i; cur = 0; last_pos = 1; + goto _storeSequence; + } + + best_off = i - (ip==anchor); + litlen = opt[0].litlen; + do { + price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra); + if (mlen > last_pos || price < opt[mlen].price) + SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */ + mlen--; + } while (mlen >= minMatch); + } } } + + match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, ip, iend, maxSearches, mls, matches, minMatch); /* first search (depth 0) */ + + if (!last_pos && !match_num) { ip++; continue; } + + { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; } + opt[0].mlen = 1; + + if (match_num && (matches[match_num-1].len > sufficient_len || matches[match_num-1].len >= ZSTD_OPT_NUM)) { + best_mlen = matches[match_num-1].len; + best_off = matches[match_num-1].off; + cur = 0; + last_pos = 1; + goto _storeSequence; + } + + best_mlen = (last_pos) ? last_pos : minMatch; + + /* set prices using matches at position = 0 */ + for (u = 0; u < match_num; u++) { + mlen = (u>0) ? matches[u-1].len+1 : best_mlen; + best_mlen = matches[u].len; + litlen = opt[0].litlen; + while (mlen <= best_mlen) { + price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra); + if (mlen > last_pos || price < opt[mlen].price) + SET_PRICE(mlen, mlen, matches[u].off, litlen, price); + mlen++; + } } + + if (last_pos < minMatch) { + ip++; continue; + } + + /* check further positions */ + for (cur = 1; cur <= last_pos; cur++) { + inr = ip + cur; + + if (opt[cur-1].mlen == 1) { + litlen = opt[cur-1].litlen + 1; + if (cur > litlen) { + price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr-litlen); + } else + price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor); + } else { + litlen = 1; + price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr-1); + } + + if (cur > last_pos || price <= opt[cur].price) + SET_PRICE(cur, 1, 0, litlen, price); + + if (cur == last_pos) break; + + if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */ + continue; + + mlen = opt[cur].mlen; + if (opt[cur].off > ZSTD_REP_MOVE_OPT) { + opt[cur].rep[2] = opt[cur-mlen].rep[1]; + opt[cur].rep[1] = opt[cur-mlen].rep[0]; + opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT; + } else { + opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur-mlen].rep[1] : opt[cur-mlen].rep[2]; + opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur-mlen].rep[0] : opt[cur-mlen].rep[1]; + opt[cur].rep[0] = ((opt[cur].off==ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur-mlen].rep[0] - 1) : (opt[cur-mlen].rep[opt[cur].off]); + } + + best_mlen = minMatch; + { U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1); + for (i = (mlen != 1); i<last_i; i++) { + const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i]; + const U32 repIndex = (U32)(current+cur - repCur); + const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; + const BYTE* const repMatch = repBase + repIndex; + if ( (repCur > 0 && repCur <= (S32)(current+cur)) + && (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex>lowestIndex)) /* intentional overflow */ + && (MEM_readMINMATCH(inr, minMatch) == MEM_readMINMATCH(repMatch, minMatch)) ) { + /* repcode detected */ + const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; + mlen = (U32)ZSTD_count_2segments(inr+minMatch, repMatch+minMatch, iend, repEnd, prefixStart) + minMatch; + + if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) { + best_mlen = mlen; best_off = i; last_pos = cur + 1; + goto _storeSequence; + } + + best_off = i - (opt[cur].mlen != 1); + if (mlen > best_mlen) best_mlen = mlen; + + do { + if (opt[cur].mlen == 1) { + litlen = opt[cur].litlen; + if (cur > litlen) { + price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr-litlen, best_off, mlen - MINMATCH, ultra); + } else + price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra); + } else { + litlen = 0; + price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra); + } + + if (cur + mlen > last_pos || price <= opt[cur + mlen].price) + SET_PRICE(cur + mlen, mlen, i, litlen, price); + mlen--; + } while (mlen >= minMatch); + } } } + + match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, inr, iend, maxSearches, mls, matches, minMatch); + + if (match_num > 0 && (matches[match_num-1].len > sufficient_len || cur + matches[match_num-1].len >= ZSTD_OPT_NUM)) { + best_mlen = matches[match_num-1].len; + best_off = matches[match_num-1].off; + last_pos = cur + 1; + goto _storeSequence; + } + + /* set prices using matches at position = cur */ + for (u = 0; u < match_num; u++) { + mlen = (u>0) ? matches[u-1].len+1 : best_mlen; + best_mlen = matches[u].len; + + while (mlen <= best_mlen) { + if (opt[cur].mlen == 1) { + litlen = opt[cur].litlen; + if (cur > litlen) + price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip+cur-litlen, matches[u].off-1, mlen - MINMATCH, ultra); + else + price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra); + } else { + litlen = 0; + price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off-1, mlen - MINMATCH, ultra); + } + + if (cur + mlen > last_pos || (price < opt[cur + mlen].price)) + SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price); + + mlen++; + } } } /* for (cur = 1; cur <= last_pos; cur++) */ + + best_mlen = opt[last_pos].mlen; + best_off = opt[last_pos].off; + cur = last_pos - best_mlen; + + /* store sequence */ +_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */ + opt[0].mlen = 1; + + while (1) { + mlen = opt[cur].mlen; + offset = opt[cur].off; + opt[cur].mlen = best_mlen; + opt[cur].off = best_off; + best_mlen = mlen; + best_off = offset; + if (mlen > cur) break; + cur -= mlen; + } + + for (u = 0; u <= last_pos; ) { + u += opt[u].mlen; + } + + for (cur=0; cur < last_pos; ) { + mlen = opt[cur].mlen; + if (mlen == 1) { ip++; cur++; continue; } + offset = opt[cur].off; + cur += mlen; + litLength = (U32)(ip - anchor); + + if (offset > ZSTD_REP_MOVE_OPT) { + rep[2] = rep[1]; + rep[1] = rep[0]; + rep[0] = offset - ZSTD_REP_MOVE_OPT; + offset--; + } else { + if (offset != 0) { + best_off = (offset==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]); + if (offset != 1) rep[2] = rep[1]; + rep[1] = rep[0]; + rep[0] = best_off; + } + + if (litLength==0) offset--; + } + + ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH); + ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH); + anchor = ip = ip + mlen; + } } /* for (cur=0; cur < last_pos; ) */ + + /* Save reps for next block */ + { int i; for (i=0; i<ZSTD_REP_NUM; i++) ctx->repToConfirm[i] = rep[i]; } + + /* Last Literals */ + { size_t lastLLSize = iend - anchor; + memcpy(seqStorePtr->lit, anchor, lastLLSize); + seqStorePtr->lit += lastLLSize; + } +} + +#endif /* ZSTD_OPT_H_91842398743 */ diff --git a/thirdparty/zstd/compress/zstdmt_compress.c b/thirdparty/zstd/compress/zstdmt_compress.c new file mode 100644 index 0000000000..fc7f52a290 --- /dev/null +++ b/thirdparty/zstd/compress/zstdmt_compress.c @@ -0,0 +1,751 @@ +/** + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + + +/* ====== Tuning parameters ====== */ +#define ZSTDMT_NBTHREADS_MAX 128 + + +/* ====== Compiler specifics ====== */ +#if defined(_MSC_VER) +# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */ +#endif + + +/* ====== Dependencies ====== */ +#include <stdlib.h> /* malloc */ +#include <string.h> /* memcpy */ +#include "pool.h" /* threadpool */ +#include "threading.h" /* mutex */ +#include "zstd_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */ +#include "zstdmt_compress.h" + + +/* ====== Debug ====== */ +#if 0 + +# include <stdio.h> +# include <unistd.h> +# include <sys/times.h> + static unsigned g_debugLevel = 5; +# define DEBUGLOGRAW(l, ...) if (l<=g_debugLevel) { fprintf(stderr, __VA_ARGS__); } +# define DEBUGLOG(l, ...) if (l<=g_debugLevel) { fprintf(stderr, __FILE__ ": "); fprintf(stderr, __VA_ARGS__); fprintf(stderr, " \n"); } + +# define DEBUG_PRINTHEX(l,p,n) { \ + unsigned debug_u; \ + for (debug_u=0; debug_u<(n); debug_u++) \ + DEBUGLOGRAW(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \ + DEBUGLOGRAW(l, " \n"); \ +} + +static unsigned long long GetCurrentClockTimeMicroseconds(void) +{ + static clock_t _ticksPerSecond = 0; + if (_ticksPerSecond <= 0) _ticksPerSecond = sysconf(_SC_CLK_TCK); + + { struct tms junk; clock_t newTicks = (clock_t) times(&junk); + return ((((unsigned long long)newTicks)*(1000000))/_ticksPerSecond); } +} + +#define MUTEX_WAIT_TIME_DLEVEL 5 +#define PTHREAD_MUTEX_LOCK(mutex) \ +if (g_debugLevel>=MUTEX_WAIT_TIME_DLEVEL) { \ + unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds(); \ + pthread_mutex_lock(mutex); \ + { unsigned long long const afterTime = GetCurrentClockTimeMicroseconds(); \ + unsigned long long const elapsedTime = (afterTime-beforeTime); \ + if (elapsedTime > 1000) { /* or whatever threshold you like; I'm using 1 millisecond here */ \ + DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL, "Thread took %llu microseconds to acquire mutex %s \n", \ + elapsedTime, #mutex); \ + } } \ +} else pthread_mutex_lock(mutex); + +#else + +# define DEBUGLOG(l, ...) {} /* disabled */ +# define PTHREAD_MUTEX_LOCK(m) pthread_mutex_lock(m) +# define DEBUG_PRINTHEX(l,p,n) {} + +#endif + + +/* ===== Buffer Pool ===== */ + +typedef struct buffer_s { + void* start; + size_t size; +} buffer_t; + +static const buffer_t g_nullBuffer = { NULL, 0 }; + +typedef struct ZSTDMT_bufferPool_s { + unsigned totalBuffers; + unsigned nbBuffers; + buffer_t bTable[1]; /* variable size */ +} ZSTDMT_bufferPool; + +static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned nbThreads) +{ + unsigned const maxNbBuffers = 2*nbThreads + 2; + ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)calloc(1, sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t)); + if (bufPool==NULL) return NULL; + bufPool->totalBuffers = maxNbBuffers; + bufPool->nbBuffers = 0; + return bufPool; +} + +static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool) +{ + unsigned u; + if (!bufPool) return; /* compatibility with free on NULL */ + for (u=0; u<bufPool->totalBuffers; u++) + free(bufPool->bTable[u].start); + free(bufPool); +} + +/* assumption : invocation from main thread only ! */ +static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* pool, size_t bSize) +{ + if (pool->nbBuffers) { /* try to use an existing buffer */ + buffer_t const buf = pool->bTable[--(pool->nbBuffers)]; + size_t const availBufferSize = buf.size; + if ((availBufferSize >= bSize) & (availBufferSize <= 10*bSize)) /* large enough, but not too much */ + return buf; + free(buf.start); /* size conditions not respected : scratch this buffer and create a new one */ + } + /* create new buffer */ + { buffer_t buffer; + void* const start = malloc(bSize); + if (start==NULL) bSize = 0; + buffer.start = start; /* note : start can be NULL if malloc fails ! */ + buffer.size = bSize; + return buffer; + } +} + +/* store buffer for later re-use, up to pool capacity */ +static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* pool, buffer_t buf) +{ + if (buf.start == NULL) return; /* release on NULL */ + if (pool->nbBuffers < pool->totalBuffers) { + pool->bTable[pool->nbBuffers++] = buf; /* store for later re-use */ + return; + } + /* Reached bufferPool capacity (should not happen) */ + free(buf.start); +} + + +/* ===== CCtx Pool ===== */ + +typedef struct { + unsigned totalCCtx; + unsigned availCCtx; + ZSTD_CCtx* cctx[1]; /* variable size */ +} ZSTDMT_CCtxPool; + +/* assumption : CCtxPool invocation only from main thread */ + +/* note : all CCtx borrowed from the pool should be released back to the pool _before_ freeing the pool */ +static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool) +{ + unsigned u; + for (u=0; u<pool->totalCCtx; u++) + ZSTD_freeCCtx(pool->cctx[u]); /* note : compatible with free on NULL */ + free(pool); +} + +/* ZSTDMT_createCCtxPool() : + * implies nbThreads >= 1 , checked by caller ZSTDMT_createCCtx() */ +static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(unsigned nbThreads) +{ + ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*) calloc(1, sizeof(ZSTDMT_CCtxPool) + (nbThreads-1)*sizeof(ZSTD_CCtx*)); + if (!cctxPool) return NULL; + cctxPool->totalCCtx = nbThreads; + cctxPool->availCCtx = 1; /* at least one cctx for single-thread mode */ + cctxPool->cctx[0] = ZSTD_createCCtx(); + if (!cctxPool->cctx[0]) { ZSTDMT_freeCCtxPool(cctxPool); return NULL; } + DEBUGLOG(1, "cctxPool created, with %u threads", nbThreads); + return cctxPool; +} + +static ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* pool) +{ + if (pool->availCCtx) { + pool->availCCtx--; + return pool->cctx[pool->availCCtx]; + } + return ZSTD_createCCtx(); /* note : can be NULL, when creation fails ! */ +} + +static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx) +{ + if (cctx==NULL) return; /* compatibility with release on NULL */ + if (pool->availCCtx < pool->totalCCtx) + pool->cctx[pool->availCCtx++] = cctx; + else + /* pool overflow : should not happen, since totalCCtx==nbThreads */ + ZSTD_freeCCtx(cctx); +} + + +/* ===== Thread worker ===== */ + +typedef struct { + buffer_t buffer; + size_t filled; +} inBuff_t; + +typedef struct { + ZSTD_CCtx* cctx; + buffer_t src; + const void* srcStart; + size_t srcSize; + size_t dictSize; + buffer_t dstBuff; + size_t cSize; + size_t dstFlushed; + unsigned firstChunk; + unsigned lastChunk; + unsigned jobCompleted; + unsigned jobScanned; + pthread_mutex_t* jobCompleted_mutex; + pthread_cond_t* jobCompleted_cond; + ZSTD_parameters params; + ZSTD_CDict* cdict; + unsigned long long fullFrameSize; +} ZSTDMT_jobDescription; + +/* ZSTDMT_compressChunk() : POOL_function type */ +void ZSTDMT_compressChunk(void* jobDescription) +{ + ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription; + const void* const src = (const char*)job->srcStart + job->dictSize; + buffer_t const dstBuff = job->dstBuff; + DEBUGLOG(3, "job (first:%u) (last:%u) : dictSize %u, srcSize %u", + job->firstChunk, job->lastChunk, (U32)job->dictSize, (U32)job->srcSize); + if (job->cdict) { /* should only happen for first segment */ + size_t const initError = ZSTD_compressBegin_usingCDict_advanced(job->cctx, job->cdict, job->params.fParams, job->fullFrameSize); + if (job->cdict) DEBUGLOG(3, "using CDict "); + if (ZSTD_isError(initError)) { job->cSize = initError; goto _endJob; } + } else { /* srcStart points at reloaded section */ + if (!job->firstChunk) job->params.fParams.contentSizeFlag = 0; /* ensure no srcSize control */ + { size_t const dictModeError = ZSTD_setCCtxParameter(job->cctx, ZSTD_p_forceRawDict, 1); /* Force loading dictionary in "content-only" mode (no header analysis) */ + size_t const initError = ZSTD_compressBegin_advanced(job->cctx, job->srcStart, job->dictSize, job->params, job->fullFrameSize); + if (ZSTD_isError(initError) || ZSTD_isError(dictModeError)) { job->cSize = initError; goto _endJob; } + ZSTD_setCCtxParameter(job->cctx, ZSTD_p_forceWindow, 1); + } } + if (!job->firstChunk) { /* flush and overwrite frame header when it's not first segment */ + size_t const hSize = ZSTD_compressContinue(job->cctx, dstBuff.start, dstBuff.size, src, 0); + if (ZSTD_isError(hSize)) { job->cSize = hSize; goto _endJob; } + ZSTD_invalidateRepCodes(job->cctx); + } + + DEBUGLOG(4, "Compressing : "); + DEBUG_PRINTHEX(4, job->srcStart, 12); + job->cSize = (job->lastChunk) ? + ZSTD_compressEnd (job->cctx, dstBuff.start, dstBuff.size, src, job->srcSize) : + ZSTD_compressContinue(job->cctx, dstBuff.start, dstBuff.size, src, job->srcSize); + DEBUGLOG(3, "compressed %u bytes into %u bytes (first:%u) (last:%u)", + (unsigned)job->srcSize, (unsigned)job->cSize, job->firstChunk, job->lastChunk); + DEBUGLOG(5, "dstBuff.size : %u ; => %s", (U32)dstBuff.size, ZSTD_getErrorName(job->cSize)); + +_endJob: + PTHREAD_MUTEX_LOCK(job->jobCompleted_mutex); + job->jobCompleted = 1; + job->jobScanned = 0; + pthread_cond_signal(job->jobCompleted_cond); + pthread_mutex_unlock(job->jobCompleted_mutex); +} + + +/* ------------------------------------------ */ +/* ===== Multi-threaded compression ===== */ +/* ------------------------------------------ */ + +struct ZSTDMT_CCtx_s { + POOL_ctx* factory; + ZSTDMT_bufferPool* buffPool; + ZSTDMT_CCtxPool* cctxPool; + pthread_mutex_t jobCompleted_mutex; + pthread_cond_t jobCompleted_cond; + size_t targetSectionSize; + size_t marginSize; + size_t inBuffSize; + size_t dictSize; + size_t targetDictSize; + inBuff_t inBuff; + ZSTD_parameters params; + XXH64_state_t xxhState; + unsigned nbThreads; + unsigned jobIDMask; + unsigned doneJobID; + unsigned nextJobID; + unsigned frameEnded; + unsigned allJobsCompleted; + unsigned overlapRLog; + unsigned long long frameContentSize; + size_t sectionSize; + ZSTD_CDict* cdict; + ZSTD_CStream* cstream; + ZSTDMT_jobDescription jobs[1]; /* variable size (must lies at the end) */ +}; + +ZSTDMT_CCtx *ZSTDMT_createCCtx(unsigned nbThreads) +{ + ZSTDMT_CCtx* cctx; + U32 const minNbJobs = nbThreads + 2; + U32 const nbJobsLog2 = ZSTD_highbit32(minNbJobs) + 1; + U32 const nbJobs = 1 << nbJobsLog2; + DEBUGLOG(5, "nbThreads : %u ; minNbJobs : %u ; nbJobsLog2 : %u ; nbJobs : %u \n", + nbThreads, minNbJobs, nbJobsLog2, nbJobs); + if ((nbThreads < 1) | (nbThreads > ZSTDMT_NBTHREADS_MAX)) return NULL; + cctx = (ZSTDMT_CCtx*) calloc(1, sizeof(ZSTDMT_CCtx) + nbJobs*sizeof(ZSTDMT_jobDescription)); + if (!cctx) return NULL; + cctx->nbThreads = nbThreads; + cctx->jobIDMask = nbJobs - 1; + cctx->allJobsCompleted = 1; + cctx->sectionSize = 0; + cctx->overlapRLog = 3; + cctx->factory = POOL_create(nbThreads, 1); + cctx->buffPool = ZSTDMT_createBufferPool(nbThreads); + cctx->cctxPool = ZSTDMT_createCCtxPool(nbThreads); + if (!cctx->factory | !cctx->buffPool | !cctx->cctxPool) { /* one object was not created */ + ZSTDMT_freeCCtx(cctx); + return NULL; + } + if (nbThreads==1) { + cctx->cstream = ZSTD_createCStream(); + if (!cctx->cstream) { + ZSTDMT_freeCCtx(cctx); return NULL; + } } + pthread_mutex_init(&cctx->jobCompleted_mutex, NULL); /* Todo : check init function return */ + pthread_cond_init(&cctx->jobCompleted_cond, NULL); + DEBUGLOG(4, "mt_cctx created, for %u threads \n", nbThreads); + return cctx; +} + +/* ZSTDMT_releaseAllJobResources() : + * Ensure all workers are killed first. */ +static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx) +{ + unsigned jobID; + for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) { + ZSTDMT_releaseBuffer(mtctx->buffPool, mtctx->jobs[jobID].dstBuff); + mtctx->jobs[jobID].dstBuff = g_nullBuffer; + ZSTDMT_releaseBuffer(mtctx->buffPool, mtctx->jobs[jobID].src); + mtctx->jobs[jobID].src = g_nullBuffer; + ZSTDMT_releaseCCtx(mtctx->cctxPool, mtctx->jobs[jobID].cctx); + mtctx->jobs[jobID].cctx = NULL; + } + memset(mtctx->jobs, 0, (mtctx->jobIDMask+1)*sizeof(ZSTDMT_jobDescription)); + ZSTDMT_releaseBuffer(mtctx->buffPool, mtctx->inBuff.buffer); + mtctx->inBuff.buffer = g_nullBuffer; + mtctx->allJobsCompleted = 1; +} + +size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx) +{ + if (mtctx==NULL) return 0; /* compatible with free on NULL */ + POOL_free(mtctx->factory); + if (!mtctx->allJobsCompleted) ZSTDMT_releaseAllJobResources(mtctx); /* stop workers first */ + ZSTDMT_freeBufferPool(mtctx->buffPool); /* release job resources into pools first */ + ZSTDMT_freeCCtxPool(mtctx->cctxPool); + ZSTD_freeCDict(mtctx->cdict); + ZSTD_freeCStream(mtctx->cstream); + pthread_mutex_destroy(&mtctx->jobCompleted_mutex); + pthread_cond_destroy(&mtctx->jobCompleted_cond); + free(mtctx); + return 0; +} + +size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSDTMT_parameter parameter, unsigned value) +{ + switch(parameter) + { + case ZSTDMT_p_sectionSize : + mtctx->sectionSize = value; + return 0; + case ZSTDMT_p_overlapSectionLog : + DEBUGLOG(4, "ZSTDMT_p_overlapSectionLog : %u", value); + mtctx->overlapRLog = (value >= 9) ? 0 : 9 - value; + return 0; + default : + return ERROR(compressionParameter_unsupported); + } +} + + +/* ------------------------------------------ */ +/* ===== Multi-threaded compression ===== */ +/* ------------------------------------------ */ + +size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + int compressionLevel) +{ + ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, 0); + U32 const overlapLog = (compressionLevel >= ZSTD_maxCLevel()) ? 0 : 3; + size_t const overlapSize = (size_t)1 << (params.cParams.windowLog - overlapLog); + size_t const chunkTargetSize = (size_t)1 << (params.cParams.windowLog + 2); + unsigned const nbChunksMax = (unsigned)(srcSize / chunkTargetSize) + 1; + unsigned nbChunks = MIN(nbChunksMax, mtctx->nbThreads); + size_t const proposedChunkSize = (srcSize + (nbChunks-1)) / nbChunks; + size_t const avgChunkSize = ((proposedChunkSize & 0x1FFFF) < 0xFFFF) ? proposedChunkSize + 0xFFFF : proposedChunkSize; /* avoid too small last block */ + size_t remainingSrcSize = srcSize; + const char* const srcStart = (const char*)src; + unsigned const compressWithinDst = (dstCapacity >= ZSTD_compressBound(srcSize)) ? nbChunks : (unsigned)(dstCapacity / ZSTD_compressBound(avgChunkSize)); /* presumes avgChunkSize >= 256 KB, which should be the case */ + size_t frameStartPos = 0, dstBufferPos = 0; + + DEBUGLOG(3, "windowLog : %2u => chunkTargetSize : %u bytes ", params.cParams.windowLog, (U32)chunkTargetSize); + DEBUGLOG(2, "nbChunks : %2u (chunkSize : %u bytes) ", nbChunks, (U32)avgChunkSize); + params.fParams.contentSizeFlag = 1; + + if (nbChunks==1) { /* fallback to single-thread mode */ + ZSTD_CCtx* const cctx = mtctx->cctxPool->cctx[0]; + return ZSTD_compressCCtx(cctx, dst, dstCapacity, src, srcSize, compressionLevel); + } + + { unsigned u; + for (u=0; u<nbChunks; u++) { + size_t const chunkSize = MIN(remainingSrcSize, avgChunkSize); + size_t const dstBufferCapacity = ZSTD_compressBound(chunkSize); + buffer_t const dstAsBuffer = { (char*)dst + dstBufferPos, dstBufferCapacity }; + buffer_t const dstBuffer = u < compressWithinDst ? dstAsBuffer : ZSTDMT_getBuffer(mtctx->buffPool, dstBufferCapacity); + ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(mtctx->cctxPool); + size_t dictSize = u ? overlapSize : 0; + + if ((cctx==NULL) || (dstBuffer.start==NULL)) { + mtctx->jobs[u].cSize = ERROR(memory_allocation); /* job result */ + mtctx->jobs[u].jobCompleted = 1; + nbChunks = u+1; + break; /* let's wait for previous jobs to complete, but don't start new ones */ + } + + mtctx->jobs[u].srcStart = srcStart + frameStartPos - dictSize; + mtctx->jobs[u].dictSize = dictSize; + mtctx->jobs[u].srcSize = chunkSize; + mtctx->jobs[u].fullFrameSize = srcSize; + mtctx->jobs[u].params = params; + mtctx->jobs[u].dstBuff = dstBuffer; + mtctx->jobs[u].cctx = cctx; + mtctx->jobs[u].firstChunk = (u==0); + mtctx->jobs[u].lastChunk = (u==nbChunks-1); + mtctx->jobs[u].jobCompleted = 0; + mtctx->jobs[u].jobCompleted_mutex = &mtctx->jobCompleted_mutex; + mtctx->jobs[u].jobCompleted_cond = &mtctx->jobCompleted_cond; + + DEBUGLOG(3, "posting job %u (%u bytes)", u, (U32)chunkSize); + DEBUG_PRINTHEX(3, mtctx->jobs[u].srcStart, 12); + POOL_add(mtctx->factory, ZSTDMT_compressChunk, &mtctx->jobs[u]); + + frameStartPos += chunkSize; + dstBufferPos += dstBufferCapacity; + remainingSrcSize -= chunkSize; + } } + /* note : since nbChunks <= nbThreads, all jobs should be running immediately in parallel */ + + { unsigned chunkID; + size_t error = 0, dstPos = 0; + for (chunkID=0; chunkID<nbChunks; chunkID++) { + DEBUGLOG(3, "waiting for chunk %u ", chunkID); + PTHREAD_MUTEX_LOCK(&mtctx->jobCompleted_mutex); + while (mtctx->jobs[chunkID].jobCompleted==0) { + DEBUGLOG(4, "waiting for jobCompleted signal from chunk %u", chunkID); + pthread_cond_wait(&mtctx->jobCompleted_cond, &mtctx->jobCompleted_mutex); + } + pthread_mutex_unlock(&mtctx->jobCompleted_mutex); + DEBUGLOG(3, "ready to write chunk %u ", chunkID); + + ZSTDMT_releaseCCtx(mtctx->cctxPool, mtctx->jobs[chunkID].cctx); + mtctx->jobs[chunkID].cctx = NULL; + mtctx->jobs[chunkID].srcStart = NULL; + { size_t const cSize = mtctx->jobs[chunkID].cSize; + if (ZSTD_isError(cSize)) error = cSize; + if ((!error) && (dstPos + cSize > dstCapacity)) error = ERROR(dstSize_tooSmall); + if (chunkID) { /* note : chunk 0 is already written directly into dst */ + if (!error) + memmove((char*)dst + dstPos, mtctx->jobs[chunkID].dstBuff.start, cSize); /* may overlap if chunk decompressed within dst */ + if (chunkID >= compressWithinDst) /* otherwise, it decompresses within dst */ + ZSTDMT_releaseBuffer(mtctx->buffPool, mtctx->jobs[chunkID].dstBuff); + mtctx->jobs[chunkID].dstBuff = g_nullBuffer; + } + dstPos += cSize ; + } + } + if (!error) DEBUGLOG(3, "compressed size : %u ", (U32)dstPos); + return error ? error : dstPos; + } + +} + + +/* ====================================== */ +/* ======= Streaming API ======= */ +/* ====================================== */ + +static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* zcs) { + while (zcs->doneJobID < zcs->nextJobID) { + unsigned const jobID = zcs->doneJobID & zcs->jobIDMask; + PTHREAD_MUTEX_LOCK(&zcs->jobCompleted_mutex); + while (zcs->jobs[jobID].jobCompleted==0) { + DEBUGLOG(4, "waiting for jobCompleted signal from chunk %u", zcs->doneJobID); /* we want to block when waiting for data to flush */ + pthread_cond_wait(&zcs->jobCompleted_cond, &zcs->jobCompleted_mutex); + } + pthread_mutex_unlock(&zcs->jobCompleted_mutex); + zcs->doneJobID++; + } +} + + +static size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs, + const void* dict, size_t dictSize, unsigned updateDict, + ZSTD_parameters params, unsigned long long pledgedSrcSize) +{ + ZSTD_customMem const cmem = { NULL, NULL, NULL }; + DEBUGLOG(3, "Started new compression, with windowLog : %u", params.cParams.windowLog); + if (zcs->nbThreads==1) return ZSTD_initCStream_advanced(zcs->cstream, dict, dictSize, params, pledgedSrcSize); + if (zcs->allJobsCompleted == 0) { /* previous job not correctly finished */ + ZSTDMT_waitForAllJobsCompleted(zcs); + ZSTDMT_releaseAllJobResources(zcs); + zcs->allJobsCompleted = 1; + } + zcs->params = params; + if (updateDict) { + ZSTD_freeCDict(zcs->cdict); zcs->cdict = NULL; + if (dict && dictSize) { + zcs->cdict = ZSTD_createCDict_advanced(dict, dictSize, 0, params.cParams, cmem); + if (zcs->cdict == NULL) return ERROR(memory_allocation); + } } + zcs->frameContentSize = pledgedSrcSize; + zcs->targetDictSize = (zcs->overlapRLog>=9) ? 0 : (size_t)1 << (zcs->params.cParams.windowLog - zcs->overlapRLog); + DEBUGLOG(4, "overlapRLog : %u ", zcs->overlapRLog); + DEBUGLOG(3, "overlap Size : %u KB", (U32)(zcs->targetDictSize>>10)); + zcs->targetSectionSize = zcs->sectionSize ? zcs->sectionSize : (size_t)1 << (zcs->params.cParams.windowLog + 2); + zcs->targetSectionSize = MAX(ZSTDMT_SECTION_SIZE_MIN, zcs->targetSectionSize); + zcs->targetSectionSize = MAX(zcs->targetDictSize, zcs->targetSectionSize); + DEBUGLOG(3, "Section Size : %u KB", (U32)(zcs->targetSectionSize>>10)); + zcs->marginSize = zcs->targetSectionSize >> 2; + zcs->inBuffSize = zcs->targetDictSize + zcs->targetSectionSize + zcs->marginSize; + zcs->inBuff.buffer = ZSTDMT_getBuffer(zcs->buffPool, zcs->inBuffSize); + if (zcs->inBuff.buffer.start == NULL) return ERROR(memory_allocation); + zcs->inBuff.filled = 0; + zcs->dictSize = 0; + zcs->doneJobID = 0; + zcs->nextJobID = 0; + zcs->frameEnded = 0; + zcs->allJobsCompleted = 0; + if (params.fParams.checksumFlag) XXH64_reset(&zcs->xxhState, 0); + return 0; +} + +size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* zcs, + const void* dict, size_t dictSize, + ZSTD_parameters params, unsigned long long pledgedSrcSize) +{ + return ZSTDMT_initCStream_internal(zcs, dict, dictSize, 1, params, pledgedSrcSize); +} + +/* ZSTDMT_resetCStream() : + * pledgedSrcSize is optional and can be zero == unknown */ +size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* zcs, unsigned long long pledgedSrcSize) +{ + if (zcs->nbThreads==1) return ZSTD_resetCStream(zcs->cstream, pledgedSrcSize); + return ZSTDMT_initCStream_internal(zcs, NULL, 0, 0, zcs->params, pledgedSrcSize); +} + +size_t ZSTDMT_initCStream(ZSTDMT_CCtx* zcs, int compressionLevel) { + ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, 0); + return ZSTDMT_initCStream_internal(zcs, NULL, 0, 1, params, 0); +} + + +static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* zcs, size_t srcSize, unsigned endFrame) +{ + size_t const dstBufferCapacity = ZSTD_compressBound(srcSize); + buffer_t const dstBuffer = ZSTDMT_getBuffer(zcs->buffPool, dstBufferCapacity); + ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(zcs->cctxPool); + unsigned const jobID = zcs->nextJobID & zcs->jobIDMask; + + if ((cctx==NULL) || (dstBuffer.start==NULL)) { + zcs->jobs[jobID].jobCompleted = 1; + zcs->nextJobID++; + ZSTDMT_waitForAllJobsCompleted(zcs); + ZSTDMT_releaseAllJobResources(zcs); + return ERROR(memory_allocation); + } + + DEBUGLOG(4, "preparing job %u to compress %u bytes with %u preload ", zcs->nextJobID, (U32)srcSize, (U32)zcs->dictSize); + zcs->jobs[jobID].src = zcs->inBuff.buffer; + zcs->jobs[jobID].srcStart = zcs->inBuff.buffer.start; + zcs->jobs[jobID].srcSize = srcSize; + zcs->jobs[jobID].dictSize = zcs->dictSize; /* note : zcs->inBuff.filled is presumed >= srcSize + dictSize */ + zcs->jobs[jobID].params = zcs->params; + if (zcs->nextJobID) zcs->jobs[jobID].params.fParams.checksumFlag = 0; /* do not calculate checksum within sections, just keep it in header for first section */ + zcs->jobs[jobID].cdict = zcs->nextJobID==0 ? zcs->cdict : NULL; + zcs->jobs[jobID].fullFrameSize = zcs->frameContentSize; + zcs->jobs[jobID].dstBuff = dstBuffer; + zcs->jobs[jobID].cctx = cctx; + zcs->jobs[jobID].firstChunk = (zcs->nextJobID==0); + zcs->jobs[jobID].lastChunk = endFrame; + zcs->jobs[jobID].jobCompleted = 0; + zcs->jobs[jobID].dstFlushed = 0; + zcs->jobs[jobID].jobCompleted_mutex = &zcs->jobCompleted_mutex; + zcs->jobs[jobID].jobCompleted_cond = &zcs->jobCompleted_cond; + + /* get a new buffer for next input */ + if (!endFrame) { + size_t const newDictSize = MIN(srcSize + zcs->dictSize, zcs->targetDictSize); + zcs->inBuff.buffer = ZSTDMT_getBuffer(zcs->buffPool, zcs->inBuffSize); + if (zcs->inBuff.buffer.start == NULL) { /* not enough memory to allocate next input buffer */ + zcs->jobs[jobID].jobCompleted = 1; + zcs->nextJobID++; + ZSTDMT_waitForAllJobsCompleted(zcs); + ZSTDMT_releaseAllJobResources(zcs); + return ERROR(memory_allocation); + } + DEBUGLOG(5, "inBuff filled to %u", (U32)zcs->inBuff.filled); + zcs->inBuff.filled -= srcSize + zcs->dictSize - newDictSize; + DEBUGLOG(5, "new job : filled to %u, with %u dict and %u src", (U32)zcs->inBuff.filled, (U32)newDictSize, (U32)(zcs->inBuff.filled - newDictSize)); + memmove(zcs->inBuff.buffer.start, (const char*)zcs->jobs[jobID].srcStart + zcs->dictSize + srcSize - newDictSize, zcs->inBuff.filled); + DEBUGLOG(5, "new inBuff pre-filled"); + zcs->dictSize = newDictSize; + } else { + zcs->inBuff.buffer = g_nullBuffer; + zcs->inBuff.filled = 0; + zcs->dictSize = 0; + zcs->frameEnded = 1; + if (zcs->nextJobID == 0) + zcs->params.fParams.checksumFlag = 0; /* single chunk : checksum is calculated directly within worker thread */ + } + + DEBUGLOG(3, "posting job %u : %u bytes (end:%u) (note : doneJob = %u=>%u)", zcs->nextJobID, (U32)zcs->jobs[jobID].srcSize, zcs->jobs[jobID].lastChunk, zcs->doneJobID, zcs->doneJobID & zcs->jobIDMask); + POOL_add(zcs->factory, ZSTDMT_compressChunk, &zcs->jobs[jobID]); /* this call is blocking when thread worker pool is exhausted */ + zcs->nextJobID++; + return 0; +} + + +/* ZSTDMT_flushNextJob() : + * output : will be updated with amount of data flushed . + * blockToFlush : if >0, the function will block and wait if there is no data available to flush . + * @return : amount of data remaining within internal buffer, 1 if unknown but > 0, 0 if no more, or an error code */ +static size_t ZSTDMT_flushNextJob(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, unsigned blockToFlush) +{ + unsigned const wJobID = zcs->doneJobID & zcs->jobIDMask; + if (zcs->doneJobID == zcs->nextJobID) return 0; /* all flushed ! */ + PTHREAD_MUTEX_LOCK(&zcs->jobCompleted_mutex); + while (zcs->jobs[wJobID].jobCompleted==0) { + DEBUGLOG(5, "waiting for jobCompleted signal from job %u", zcs->doneJobID); + if (!blockToFlush) { pthread_mutex_unlock(&zcs->jobCompleted_mutex); return 0; } /* nothing ready to be flushed => skip */ + pthread_cond_wait(&zcs->jobCompleted_cond, &zcs->jobCompleted_mutex); /* block when nothing available to flush */ + } + pthread_mutex_unlock(&zcs->jobCompleted_mutex); + /* compression job completed : output can be flushed */ + { ZSTDMT_jobDescription job = zcs->jobs[wJobID]; + if (!job.jobScanned) { + if (ZSTD_isError(job.cSize)) { + DEBUGLOG(5, "compression error detected "); + ZSTDMT_waitForAllJobsCompleted(zcs); + ZSTDMT_releaseAllJobResources(zcs); + return job.cSize; + } + ZSTDMT_releaseCCtx(zcs->cctxPool, job.cctx); + zcs->jobs[wJobID].cctx = NULL; + DEBUGLOG(5, "zcs->params.fParams.checksumFlag : %u ", zcs->params.fParams.checksumFlag); + if (zcs->params.fParams.checksumFlag) { + XXH64_update(&zcs->xxhState, (const char*)job.srcStart + job.dictSize, job.srcSize); + if (zcs->frameEnded && (zcs->doneJobID+1 == zcs->nextJobID)) { /* write checksum at end of last section */ + U32 const checksum = (U32)XXH64_digest(&zcs->xxhState); + DEBUGLOG(4, "writing checksum : %08X \n", checksum); + MEM_writeLE32((char*)job.dstBuff.start + job.cSize, checksum); + job.cSize += 4; + zcs->jobs[wJobID].cSize += 4; + } } + ZSTDMT_releaseBuffer(zcs->buffPool, job.src); + zcs->jobs[wJobID].srcStart = NULL; + zcs->jobs[wJobID].src = g_nullBuffer; + zcs->jobs[wJobID].jobScanned = 1; + } + { size_t const toWrite = MIN(job.cSize - job.dstFlushed, output->size - output->pos); + DEBUGLOG(4, "Flushing %u bytes from job %u ", (U32)toWrite, zcs->doneJobID); + memcpy((char*)output->dst + output->pos, (const char*)job.dstBuff.start + job.dstFlushed, toWrite); + output->pos += toWrite; + job.dstFlushed += toWrite; + } + if (job.dstFlushed == job.cSize) { /* output buffer fully flushed => move to next one */ + ZSTDMT_releaseBuffer(zcs->buffPool, job.dstBuff); + zcs->jobs[wJobID].dstBuff = g_nullBuffer; + zcs->jobs[wJobID].jobCompleted = 0; + zcs->doneJobID++; + } else { + zcs->jobs[wJobID].dstFlushed = job.dstFlushed; + } + /* return value : how many bytes left in buffer ; fake it to 1 if unknown but >0 */ + if (job.cSize > job.dstFlushed) return (job.cSize - job.dstFlushed); + if (zcs->doneJobID < zcs->nextJobID) return 1; /* still some buffer to flush */ + zcs->allJobsCompleted = zcs->frameEnded; /* frame completed and entirely flushed */ + return 0; /* everything flushed */ +} } + + +size_t ZSTDMT_compressStream(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input) +{ + size_t const newJobThreshold = zcs->dictSize + zcs->targetSectionSize + zcs->marginSize; + if (zcs->frameEnded) return ERROR(stage_wrong); /* current frame being ended. Only flush is allowed. Restart with init */ + if (zcs->nbThreads==1) return ZSTD_compressStream(zcs->cstream, output, input); + + /* fill input buffer */ + { size_t const toLoad = MIN(input->size - input->pos, zcs->inBuffSize - zcs->inBuff.filled); + memcpy((char*)zcs->inBuff.buffer.start + zcs->inBuff.filled, input->src, toLoad); + input->pos += toLoad; + zcs->inBuff.filled += toLoad; + } + + if ( (zcs->inBuff.filled >= newJobThreshold) /* filled enough : let's compress */ + && (zcs->nextJobID <= zcs->doneJobID + zcs->jobIDMask) ) { /* avoid overwriting job round buffer */ + CHECK_F( ZSTDMT_createCompressionJob(zcs, zcs->targetSectionSize, 0) ); + } + + /* check for data to flush */ + CHECK_F( ZSTDMT_flushNextJob(zcs, output, (zcs->inBuff.filled == zcs->inBuffSize)) ); /* block if it wasn't possible to create new job due to saturation */ + + /* recommended next input size : fill current input buffer */ + return zcs->inBuffSize - zcs->inBuff.filled; /* note : could be zero when input buffer is fully filled and no more availability to create new job */ +} + + +static size_t ZSTDMT_flushStream_internal(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output, unsigned endFrame) +{ + size_t const srcSize = zcs->inBuff.filled - zcs->dictSize; + + if (srcSize) DEBUGLOG(4, "flushing : %u bytes left to compress", (U32)srcSize); + if ( ((srcSize > 0) || (endFrame && !zcs->frameEnded)) + && (zcs->nextJobID <= zcs->doneJobID + zcs->jobIDMask) ) { + CHECK_F( ZSTDMT_createCompressionJob(zcs, srcSize, endFrame) ); + } + + /* check if there is any data available to flush */ + DEBUGLOG(5, "zcs->doneJobID : %u ; zcs->nextJobID : %u ", zcs->doneJobID, zcs->nextJobID); + return ZSTDMT_flushNextJob(zcs, output, 1); +} + + +size_t ZSTDMT_flushStream(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output) +{ + if (zcs->nbThreads==1) return ZSTD_flushStream(zcs->cstream, output); + return ZSTDMT_flushStream_internal(zcs, output, 0); +} + +size_t ZSTDMT_endStream(ZSTDMT_CCtx* zcs, ZSTD_outBuffer* output) +{ + if (zcs->nbThreads==1) return ZSTD_endStream(zcs->cstream, output); + return ZSTDMT_flushStream_internal(zcs, output, 1); +} diff --git a/thirdparty/zstd/compress/zstdmt_compress.h b/thirdparty/zstd/compress/zstdmt_compress.h new file mode 100644 index 0000000000..27f78ee031 --- /dev/null +++ b/thirdparty/zstd/compress/zstdmt_compress.h @@ -0,0 +1,78 @@ +/** + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + + #ifndef ZSTDMT_COMPRESS_H + #define ZSTDMT_COMPRESS_H + + #if defined (__cplusplus) + extern "C" { + #endif + + +/* Note : All prototypes defined in this file shall be considered experimental. + * There is no guarantee of API continuity (yet) on any of these prototypes */ + +/* === Dependencies === */ +#include <stddef.h> /* size_t */ +#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters */ +#include "zstd.h" /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTDLIB_API */ + + +/* === Simple one-pass functions === */ + +typedef struct ZSTDMT_CCtx_s ZSTDMT_CCtx; +ZSTDLIB_API ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbThreads); +ZSTDLIB_API size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* cctx); + +ZSTDLIB_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + int compressionLevel); + + +/* === Streaming functions === */ + +ZSTDLIB_API size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel); +ZSTDLIB_API size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize); /**< pledgedSrcSize is optional and can be zero == unknown */ + +ZSTDLIB_API size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input); + +ZSTDLIB_API size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output); /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */ +ZSTDLIB_API size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output); /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */ + + +/* === Advanced functions and parameters === */ + +#ifndef ZSTDMT_SECTION_SIZE_MIN +# define ZSTDMT_SECTION_SIZE_MIN (1U << 20) /* 1 MB - Minimum size of each compression job */ +#endif + +ZSTDLIB_API size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx, const void* dict, size_t dictSize, /**< dict can be released after init, a local copy is preserved within zcs */ + ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize is optional and can be zero == unknown */ + +/* ZSDTMT_parameter : + * List of parameters that can be set using ZSTDMT_setMTCtxParameter() */ +typedef enum { + ZSTDMT_p_sectionSize, /* size of input "section". Each section is compressed in parallel. 0 means default, which is dynamically determined within compression functions */ + ZSTDMT_p_overlapSectionLog /* Log of overlapped section; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window */ +} ZSDTMT_parameter; + +/* ZSTDMT_setMTCtxParameter() : + * allow setting individual parameters, one at a time, among a list of enums defined in ZSTDMT_parameter. + * The function must be called typically after ZSTD_createCCtx(). + * Parameters not explicitly reset by ZSTDMT_init*() remain the same in consecutive compression sessions. + * @return : 0, or an error code (which can be tested using ZSTD_isError()) */ +ZSTDLIB_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSDTMT_parameter parameter, unsigned value); + + +#if defined (__cplusplus) +} +#endif + +#endif /* ZSTDMT_COMPRESS_H */ diff --git a/thirdparty/zstd/decompress/huf_decompress.c b/thirdparty/zstd/decompress/huf_decompress.c new file mode 100644 index 0000000000..ea35c36201 --- /dev/null +++ b/thirdparty/zstd/decompress/huf_decompress.c @@ -0,0 +1,888 @@ +/* ****************************************************************** + Huffman decoder, part of New Generation Entropy library + Copyright (C) 2013-2016, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy + - Public forum : https://groups.google.com/forum/#!forum/lz4c +****************************************************************** */ + +/* ************************************************************** +* Compiler specifics +****************************************************************/ +#ifdef _MSC_VER /* Visual Studio */ +# define FORCE_INLINE static __forceinline +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +#else +# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ +# ifdef __GNUC__ +# define FORCE_INLINE static inline __attribute__((always_inline)) +# else +# define FORCE_INLINE static inline +# endif +# else +# define FORCE_INLINE static +# endif /* __STDC_VERSION__ */ +#endif + + +/* ************************************************************** +* Dependencies +****************************************************************/ +#include <string.h> /* memcpy, memset */ +#include "bitstream.h" /* BIT_* */ +#include "fse.h" /* header compression */ +#define HUF_STATIC_LINKING_ONLY +#include "huf.h" + + +/* ************************************************************** +* Error Management +****************************************************************/ +#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ + + +/*-***************************/ +/* generic DTableDesc */ +/*-***************************/ + +typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc; + +static DTableDesc HUF_getDTableDesc(const HUF_DTable* table) +{ + DTableDesc dtd; + memcpy(&dtd, table, sizeof(dtd)); + return dtd; +} + + +/*-***************************/ +/* single-symbol decoding */ +/*-***************************/ + +typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2; /* single-symbol decoding */ + +size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize) +{ + BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; + U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */ + U32 tableLog = 0; + U32 nbSymbols = 0; + size_t iSize; + void* const dtPtr = DTable + 1; + HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr; + + HUF_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable)); + /* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */ + + iSize = HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize); + if (HUF_isError(iSize)) return iSize; + + /* Table header */ + { DTableDesc dtd = HUF_getDTableDesc(DTable); + if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */ + dtd.tableType = 0; + dtd.tableLog = (BYTE)tableLog; + memcpy(DTable, &dtd, sizeof(dtd)); + } + + /* Calculate starting value for each rank */ + { U32 n, nextRankStart = 0; + for (n=1; n<tableLog+1; n++) { + U32 const current = nextRankStart; + nextRankStart += (rankVal[n] << (n-1)); + rankVal[n] = current; + } } + + /* fill DTable */ + { U32 n; + for (n=0; n<nbSymbols; n++) { + U32 const w = huffWeight[n]; + U32 const length = (1 << w) >> 1; + U32 u; + HUF_DEltX2 D; + D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w); + for (u = rankVal[w]; u < rankVal[w] + length; u++) + dt[u] = D; + rankVal[w] += length; + } } + + return iSize; +} + + +static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog) +{ + size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */ + BYTE const c = dt[val].byte; + BIT_skipBits(Dstream, dt[val].nbBits); + return c; +} + +#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \ + *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog) + +#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \ + if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \ + HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) + +#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \ + if (MEM_64bits()) \ + HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) + +FORCE_INLINE size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog) +{ + BYTE* const pStart = p; + + /* up to 4 symbols at a time */ + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4)) { + HUF_DECODE_SYMBOLX2_2(p, bitDPtr); + HUF_DECODE_SYMBOLX2_1(p, bitDPtr); + HUF_DECODE_SYMBOLX2_2(p, bitDPtr); + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + } + + /* closer to the end */ + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd)) + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + + /* no more data to retrieve from bitstream, hence no need to reload */ + while (p < pEnd) + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + + return pEnd-pStart; +} + +static size_t HUF_decompress1X2_usingDTable_internal( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + BYTE* op = (BYTE*)dst; + BYTE* const oend = op + dstSize; + const void* dtPtr = DTable + 1; + const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr; + BIT_DStream_t bitD; + DTableDesc const dtd = HUF_getDTableDesc(DTable); + U32 const dtLog = dtd.tableLog; + + { size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize); + if (HUF_isError(errorCode)) return errorCode; } + + HUF_decodeStreamX2(op, &bitD, oend, dt, dtLog); + + /* check */ + if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected); + + return dstSize; +} + +size_t HUF_decompress1X2_usingDTable( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + DTableDesc dtd = HUF_getDTableDesc(DTable); + if (dtd.tableType != 0) return ERROR(GENERIC); + return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable); +} + +size_t HUF_decompress1X2_DCtx (HUF_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + const BYTE* ip = (const BYTE*) cSrc; + + size_t const hSize = HUF_readDTableX2 (DCtx, cSrc, cSrcSize); + if (HUF_isError(hSize)) return hSize; + if (hSize >= cSrcSize) return ERROR(srcSize_wrong); + ip += hSize; cSrcSize -= hSize; + + return HUF_decompress1X2_usingDTable_internal (dst, dstSize, ip, cSrcSize, DCtx); +} + +size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX); + return HUF_decompress1X2_DCtx (DTable, dst, dstSize, cSrc, cSrcSize); +} + + +static size_t HUF_decompress4X2_usingDTable_internal( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + /* Check */ + if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ + + { const BYTE* const istart = (const BYTE*) cSrc; + BYTE* const ostart = (BYTE*) dst; + BYTE* const oend = ostart + dstSize; + const void* const dtPtr = DTable + 1; + const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr; + + /* Init */ + BIT_DStream_t bitD1; + BIT_DStream_t bitD2; + BIT_DStream_t bitD3; + BIT_DStream_t bitD4; + size_t const length1 = MEM_readLE16(istart); + size_t const length2 = MEM_readLE16(istart+2); + size_t const length3 = MEM_readLE16(istart+4); + size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6); + const BYTE* const istart1 = istart + 6; /* jumpTable */ + const BYTE* const istart2 = istart1 + length1; + const BYTE* const istart3 = istart2 + length2; + const BYTE* const istart4 = istart3 + length3; + const size_t segmentSize = (dstSize+3) / 4; + BYTE* const opStart2 = ostart + segmentSize; + BYTE* const opStart3 = opStart2 + segmentSize; + BYTE* const opStart4 = opStart3 + segmentSize; + BYTE* op1 = ostart; + BYTE* op2 = opStart2; + BYTE* op3 = opStart3; + BYTE* op4 = opStart4; + U32 endSignal; + DTableDesc const dtd = HUF_getDTableDesc(DTable); + U32 const dtLog = dtd.tableLog; + + if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ + { size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1); + if (HUF_isError(errorCode)) return errorCode; } + { size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2); + if (HUF_isError(errorCode)) return errorCode; } + { size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3); + if (HUF_isError(errorCode)) return errorCode; } + { size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4); + if (HUF_isError(errorCode)) return errorCode; } + + /* 16-32 symbols per loop (4-8 symbols per stream) */ + endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); + for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; ) { + HUF_DECODE_SYMBOLX2_2(op1, &bitD1); + HUF_DECODE_SYMBOLX2_2(op2, &bitD2); + HUF_DECODE_SYMBOLX2_2(op3, &bitD3); + HUF_DECODE_SYMBOLX2_2(op4, &bitD4); + HUF_DECODE_SYMBOLX2_1(op1, &bitD1); + HUF_DECODE_SYMBOLX2_1(op2, &bitD2); + HUF_DECODE_SYMBOLX2_1(op3, &bitD3); + HUF_DECODE_SYMBOLX2_1(op4, &bitD4); + HUF_DECODE_SYMBOLX2_2(op1, &bitD1); + HUF_DECODE_SYMBOLX2_2(op2, &bitD2); + HUF_DECODE_SYMBOLX2_2(op3, &bitD3); + HUF_DECODE_SYMBOLX2_2(op4, &bitD4); + HUF_DECODE_SYMBOLX2_0(op1, &bitD1); + HUF_DECODE_SYMBOLX2_0(op2, &bitD2); + HUF_DECODE_SYMBOLX2_0(op3, &bitD3); + HUF_DECODE_SYMBOLX2_0(op4, &bitD4); + endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); + } + + /* check corruption */ + if (op1 > opStart2) return ERROR(corruption_detected); + if (op2 > opStart3) return ERROR(corruption_detected); + if (op3 > opStart4) return ERROR(corruption_detected); + /* note : op4 supposed already verified within main loop */ + + /* finish bitStreams one by one */ + HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog); + HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog); + HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog); + HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog); + + /* check */ + endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); + if (!endSignal) return ERROR(corruption_detected); + + /* decoded size */ + return dstSize; + } +} + + +size_t HUF_decompress4X2_usingDTable( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + DTableDesc dtd = HUF_getDTableDesc(DTable); + if (dtd.tableType != 0) return ERROR(GENERIC); + return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable); +} + + +size_t HUF_decompress4X2_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + const BYTE* ip = (const BYTE*) cSrc; + + size_t const hSize = HUF_readDTableX2 (dctx, cSrc, cSrcSize); + if (HUF_isError(hSize)) return hSize; + if (hSize >= cSrcSize) return ERROR(srcSize_wrong); + ip += hSize; cSrcSize -= hSize; + + return HUF_decompress4X2_usingDTable_internal (dst, dstSize, ip, cSrcSize, dctx); +} + +size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX); + return HUF_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); +} + + +/* *************************/ +/* double-symbols decoding */ +/* *************************/ +typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4; /* double-symbols decoding */ + +typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t; + +/* HUF_fillDTableX4Level2() : + * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */ +static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed, + const U32* rankValOrigin, const int minWeight, + const sortedSymbol_t* sortedSymbols, const U32 sortedListSize, + U32 nbBitsBaseline, U16 baseSeq) +{ + HUF_DEltX4 DElt; + U32 rankVal[HUF_TABLELOG_MAX + 1]; + + /* get pre-calculated rankVal */ + memcpy(rankVal, rankValOrigin, sizeof(rankVal)); + + /* fill skipped values */ + if (minWeight>1) { + U32 i, skipSize = rankVal[minWeight]; + MEM_writeLE16(&(DElt.sequence), baseSeq); + DElt.nbBits = (BYTE)(consumed); + DElt.length = 1; + for (i = 0; i < skipSize; i++) + DTable[i] = DElt; + } + + /* fill DTable */ + { U32 s; for (s=0; s<sortedListSize; s++) { /* note : sortedSymbols already skipped */ + const U32 symbol = sortedSymbols[s].symbol; + const U32 weight = sortedSymbols[s].weight; + const U32 nbBits = nbBitsBaseline - weight; + const U32 length = 1 << (sizeLog-nbBits); + const U32 start = rankVal[weight]; + U32 i = start; + const U32 end = start + length; + + MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8))); + DElt.nbBits = (BYTE)(nbBits + consumed); + DElt.length = 2; + do { DTable[i++] = DElt; } while (i<end); /* since length >= 1 */ + + rankVal[weight] += length; + } } +} + +typedef U32 rankVal_t[HUF_TABLELOG_MAX][HUF_TABLELOG_MAX + 1]; + +static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog, + const sortedSymbol_t* sortedList, const U32 sortedListSize, + const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight, + const U32 nbBitsBaseline) +{ + U32 rankVal[HUF_TABLELOG_MAX + 1]; + const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */ + const U32 minBits = nbBitsBaseline - maxWeight; + U32 s; + + memcpy(rankVal, rankValOrigin, sizeof(rankVal)); + + /* fill DTable */ + for (s=0; s<sortedListSize; s++) { + const U16 symbol = sortedList[s].symbol; + const U32 weight = sortedList[s].weight; + const U32 nbBits = nbBitsBaseline - weight; + const U32 start = rankVal[weight]; + const U32 length = 1 << (targetLog-nbBits); + + if (targetLog-nbBits >= minBits) { /* enough room for a second symbol */ + U32 sortedRank; + int minWeight = nbBits + scaleLog; + if (minWeight < 1) minWeight = 1; + sortedRank = rankStart[minWeight]; + HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits, + rankValOrigin[nbBits], minWeight, + sortedList+sortedRank, sortedListSize-sortedRank, + nbBitsBaseline, symbol); + } else { + HUF_DEltX4 DElt; + MEM_writeLE16(&(DElt.sequence), symbol); + DElt.nbBits = (BYTE)(nbBits); + DElt.length = 1; + { U32 const end = start + length; + U32 u; + for (u = start; u < end; u++) DTable[u] = DElt; + } } + rankVal[weight] += length; + } +} + +size_t HUF_readDTableX4 (HUF_DTable* DTable, const void* src, size_t srcSize) +{ + BYTE weightList[HUF_SYMBOLVALUE_MAX + 1]; + sortedSymbol_t sortedSymbol[HUF_SYMBOLVALUE_MAX + 1]; + U32 rankStats[HUF_TABLELOG_MAX + 1] = { 0 }; + U32 rankStart0[HUF_TABLELOG_MAX + 2] = { 0 }; + U32* const rankStart = rankStart0+1; + rankVal_t rankVal; + U32 tableLog, maxW, sizeOfSort, nbSymbols; + DTableDesc dtd = HUF_getDTableDesc(DTable); + U32 const maxTableLog = dtd.maxTableLog; + size_t iSize; + void* dtPtr = DTable+1; /* force compiler to avoid strict-aliasing */ + HUF_DEltX4* const dt = (HUF_DEltX4*)dtPtr; + + HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */ + if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); + /* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */ + + iSize = HUF_readStats(weightList, HUF_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize); + if (HUF_isError(iSize)) return iSize; + + /* check result */ + if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */ + + /* find maxWeight */ + for (maxW = tableLog; rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */ + + /* Get start index of each weight */ + { U32 w, nextRankStart = 0; + for (w=1; w<maxW+1; w++) { + U32 current = nextRankStart; + nextRankStart += rankStats[w]; + rankStart[w] = current; + } + rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/ + sizeOfSort = nextRankStart; + } + + /* sort symbols by weight */ + { U32 s; + for (s=0; s<nbSymbols; s++) { + U32 const w = weightList[s]; + U32 const r = rankStart[w]++; + sortedSymbol[r].symbol = (BYTE)s; + sortedSymbol[r].weight = (BYTE)w; + } + rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */ + } + + /* Build rankVal */ + { U32* const rankVal0 = rankVal[0]; + { int const rescale = (maxTableLog-tableLog) - 1; /* tableLog <= maxTableLog */ + U32 nextRankVal = 0; + U32 w; + for (w=1; w<maxW+1; w++) { + U32 current = nextRankVal; + nextRankVal += rankStats[w] << (w+rescale); + rankVal0[w] = current; + } } + { U32 const minBits = tableLog+1 - maxW; + U32 consumed; + for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) { + U32* const rankValPtr = rankVal[consumed]; + U32 w; + for (w = 1; w < maxW+1; w++) { + rankValPtr[w] = rankVal0[w] >> consumed; + } } } } + + HUF_fillDTableX4(dt, maxTableLog, + sortedSymbol, sizeOfSort, + rankStart0, rankVal, maxW, + tableLog+1); + + dtd.tableLog = (BYTE)maxTableLog; + dtd.tableType = 1; + memcpy(DTable, &dtd, sizeof(dtd)); + return iSize; +} + + +static U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog) +{ + size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ + memcpy(op, dt+val, 2); + BIT_skipBits(DStream, dt[val].nbBits); + return dt[val].length; +} + +static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog) +{ + size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ + memcpy(op, dt+val, 1); + if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits); + else { + if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) { + BIT_skipBits(DStream, dt[val].nbBits); + if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8)) + DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */ + } } + return 1; +} + + +#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \ + ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) + +#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \ + if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \ + ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) + +#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \ + if (MEM_64bits()) \ + ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) + +FORCE_INLINE size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog) +{ + BYTE* const pStart = p; + + /* up to 8 symbols at a time */ + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) { + HUF_DECODE_SYMBOLX4_2(p, bitDPtr); + HUF_DECODE_SYMBOLX4_1(p, bitDPtr); + HUF_DECODE_SYMBOLX4_2(p, bitDPtr); + HUF_DECODE_SYMBOLX4_0(p, bitDPtr); + } + + /* closer to end : up to 2 symbols at a time */ + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2)) + HUF_DECODE_SYMBOLX4_0(p, bitDPtr); + + while (p <= pEnd-2) + HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */ + + if (p < pEnd) + p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog); + + return p-pStart; +} + + +static size_t HUF_decompress1X4_usingDTable_internal( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + BIT_DStream_t bitD; + + /* Init */ + { size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize); + if (HUF_isError(errorCode)) return errorCode; + } + + /* decode */ + { BYTE* const ostart = (BYTE*) dst; + BYTE* const oend = ostart + dstSize; + const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */ + const HUF_DEltX4* const dt = (const HUF_DEltX4*)dtPtr; + DTableDesc const dtd = HUF_getDTableDesc(DTable); + HUF_decodeStreamX4(ostart, &bitD, oend, dt, dtd.tableLog); + } + + /* check */ + if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected); + + /* decoded size */ + return dstSize; +} + +size_t HUF_decompress1X4_usingDTable( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + DTableDesc dtd = HUF_getDTableDesc(DTable); + if (dtd.tableType != 1) return ERROR(GENERIC); + return HUF_decompress1X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable); +} + +size_t HUF_decompress1X4_DCtx (HUF_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + const BYTE* ip = (const BYTE*) cSrc; + + size_t const hSize = HUF_readDTableX4 (DCtx, cSrc, cSrcSize); + if (HUF_isError(hSize)) return hSize; + if (hSize >= cSrcSize) return ERROR(srcSize_wrong); + ip += hSize; cSrcSize -= hSize; + + return HUF_decompress1X4_usingDTable_internal (dst, dstSize, ip, cSrcSize, DCtx); +} + +size_t HUF_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_TABLELOG_MAX); + return HUF_decompress1X4_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); +} + +static size_t HUF_decompress4X4_usingDTable_internal( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ + + { const BYTE* const istart = (const BYTE*) cSrc; + BYTE* const ostart = (BYTE*) dst; + BYTE* const oend = ostart + dstSize; + const void* const dtPtr = DTable+1; + const HUF_DEltX4* const dt = (const HUF_DEltX4*)dtPtr; + + /* Init */ + BIT_DStream_t bitD1; + BIT_DStream_t bitD2; + BIT_DStream_t bitD3; + BIT_DStream_t bitD4; + size_t const length1 = MEM_readLE16(istart); + size_t const length2 = MEM_readLE16(istart+2); + size_t const length3 = MEM_readLE16(istart+4); + size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6); + const BYTE* const istart1 = istart + 6; /* jumpTable */ + const BYTE* const istart2 = istart1 + length1; + const BYTE* const istart3 = istart2 + length2; + const BYTE* const istart4 = istart3 + length3; + size_t const segmentSize = (dstSize+3) / 4; + BYTE* const opStart2 = ostart + segmentSize; + BYTE* const opStart3 = opStart2 + segmentSize; + BYTE* const opStart4 = opStart3 + segmentSize; + BYTE* op1 = ostart; + BYTE* op2 = opStart2; + BYTE* op3 = opStart3; + BYTE* op4 = opStart4; + U32 endSignal; + DTableDesc const dtd = HUF_getDTableDesc(DTable); + U32 const dtLog = dtd.tableLog; + + if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ + { size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1); + if (HUF_isError(errorCode)) return errorCode; } + { size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2); + if (HUF_isError(errorCode)) return errorCode; } + { size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3); + if (HUF_isError(errorCode)) return errorCode; } + { size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4); + if (HUF_isError(errorCode)) return errorCode; } + + /* 16-32 symbols per loop (4-8 symbols per stream) */ + endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); + for ( ; (endSignal==BIT_DStream_unfinished) & (op4<(oend-(sizeof(bitD4.bitContainer)-1))) ; ) { + HUF_DECODE_SYMBOLX4_2(op1, &bitD1); + HUF_DECODE_SYMBOLX4_2(op2, &bitD2); + HUF_DECODE_SYMBOLX4_2(op3, &bitD3); + HUF_DECODE_SYMBOLX4_2(op4, &bitD4); + HUF_DECODE_SYMBOLX4_1(op1, &bitD1); + HUF_DECODE_SYMBOLX4_1(op2, &bitD2); + HUF_DECODE_SYMBOLX4_1(op3, &bitD3); + HUF_DECODE_SYMBOLX4_1(op4, &bitD4); + HUF_DECODE_SYMBOLX4_2(op1, &bitD1); + HUF_DECODE_SYMBOLX4_2(op2, &bitD2); + HUF_DECODE_SYMBOLX4_2(op3, &bitD3); + HUF_DECODE_SYMBOLX4_2(op4, &bitD4); + HUF_DECODE_SYMBOLX4_0(op1, &bitD1); + HUF_DECODE_SYMBOLX4_0(op2, &bitD2); + HUF_DECODE_SYMBOLX4_0(op3, &bitD3); + HUF_DECODE_SYMBOLX4_0(op4, &bitD4); + + endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); + } + + /* check corruption */ + if (op1 > opStart2) return ERROR(corruption_detected); + if (op2 > opStart3) return ERROR(corruption_detected); + if (op3 > opStart4) return ERROR(corruption_detected); + /* note : op4 already verified within main loop */ + + /* finish bitStreams one by one */ + HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog); + HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog); + HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog); + HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog); + + /* check */ + { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); + if (!endCheck) return ERROR(corruption_detected); } + + /* decoded size */ + return dstSize; + } +} + + +size_t HUF_decompress4X4_usingDTable( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + DTableDesc dtd = HUF_getDTableDesc(DTable); + if (dtd.tableType != 1) return ERROR(GENERIC); + return HUF_decompress4X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable); +} + + +size_t HUF_decompress4X4_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + const BYTE* ip = (const BYTE*) cSrc; + + size_t hSize = HUF_readDTableX4 (dctx, cSrc, cSrcSize); + if (HUF_isError(hSize)) return hSize; + if (hSize >= cSrcSize) return ERROR(srcSize_wrong); + ip += hSize; cSrcSize -= hSize; + + return HUF_decompress4X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx); +} + +size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_TABLELOG_MAX); + return HUF_decompress4X4_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); +} + + +/* ********************************/ +/* Generic decompression selector */ +/* ********************************/ + +size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + DTableDesc const dtd = HUF_getDTableDesc(DTable); + return dtd.tableType ? HUF_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) : + HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable); +} + +size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + DTableDesc const dtd = HUF_getDTableDesc(DTable); + return dtd.tableType ? HUF_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) : + HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable); +} + + +typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t; +static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] = +{ + /* single, double, quad */ + {{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */ + {{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */ + {{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */ + {{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */ + {{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */ + {{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */ + {{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */ + {{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */ + {{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */ + {{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */ + {{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */ + {{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */ + {{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */ + {{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */ + {{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */ + {{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */ +}; + +/** HUF_selectDecoder() : +* Tells which decoder is likely to decode faster, +* based on a set of pre-determined metrics. +* @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 . +* Assumption : 0 < cSrcSize < dstSize <= 128 KB */ +U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize) +{ + /* decoder timing evaluation */ + U32 const Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */ + U32 const D256 = (U32)(dstSize >> 8); + U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256); + U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256); + DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, for cache eviction */ + + return DTime1 < DTime0; +} + + +typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); + +size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + static const decompressionAlgo decompress[2] = { HUF_decompress4X2, HUF_decompress4X4 }; + + /* validation checks */ + if (dstSize == 0) return ERROR(dstSize_tooSmall); + if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ + if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ + if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ + + { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); + return decompress[algoNb](dst, dstSize, cSrc, cSrcSize); + } +} + +size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + /* validation checks */ + if (dstSize == 0) return ERROR(dstSize_tooSmall); + if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ + if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ + if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ + + { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); + return algoNb ? HUF_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) : + HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ; + } +} + +size_t HUF_decompress4X_hufOnly (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + /* validation checks */ + if (dstSize == 0) return ERROR(dstSize_tooSmall); + if ((cSrcSize >= dstSize) || (cSrcSize <= 1)) return ERROR(corruption_detected); /* invalid */ + + { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); + return algoNb ? HUF_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) : + HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ; + } +} + +size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + /* validation checks */ + if (dstSize == 0) return ERROR(dstSize_tooSmall); + if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ + if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ + if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ + + { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); + return algoNb ? HUF_decompress1X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) : + HUF_decompress1X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ; + } +} diff --git a/thirdparty/zstd/decompress/zstd_decompress.c b/thirdparty/zstd/decompress/zstd_decompress.c new file mode 100644 index 0000000000..910f9ab783 --- /dev/null +++ b/thirdparty/zstd/decompress/zstd_decompress.c @@ -0,0 +1,2376 @@ +/** + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + + +/* *************************************************************** +* Tuning parameters +*****************************************************************/ +/*! + * HEAPMODE : + * Select how default decompression function ZSTD_decompress() will allocate memory, + * in memory stack (0), or in memory heap (1, requires malloc()) + */ +#ifndef ZSTD_HEAPMODE +# define ZSTD_HEAPMODE 1 +#endif + +/*! +* LEGACY_SUPPORT : +* if set to 1, ZSTD_decompress() can decode older formats (v0.1+) +*/ +#ifndef ZSTD_LEGACY_SUPPORT +# define ZSTD_LEGACY_SUPPORT 0 +#endif + +/*! +* MAXWINDOWSIZE_DEFAULT : +* maximum window size accepted by DStream, by default. +* Frames requiring more memory will be rejected. +*/ +#ifndef ZSTD_MAXWINDOWSIZE_DEFAULT +# define ZSTD_MAXWINDOWSIZE_DEFAULT ((1 << ZSTD_WINDOWLOG_MAX) + 1) /* defined within zstd.h */ +#endif + + +/*-******************************************************* +* Dependencies +*********************************************************/ +#include <string.h> /* memcpy, memmove, memset */ +#include "mem.h" /* low level memory routines */ +#define FSE_STATIC_LINKING_ONLY +#include "fse.h" +#define HUF_STATIC_LINKING_ONLY +#include "huf.h" +#include "zstd_internal.h" + +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) +# include "zstd_legacy.h" +#endif + + +#if defined(_MSC_VER) +# include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */ +# define ZSTD_PREFETCH(ptr) _mm_prefetch((const char*)ptr, _MM_HINT_T0) +#elif defined(__GNUC__) +# define ZSTD_PREFETCH(ptr) __builtin_prefetch(ptr, 0, 0) +#else +# define ZSTD_PREFETCH(ptr) /* disabled */ +#endif + +/*-************************************* +* Macros +***************************************/ +#define ZSTD_isError ERR_isError /* for inlining */ +#define FSE_isError ERR_isError +#define HUF_isError ERR_isError + + +/*_******************************************************* +* Memory operations +**********************************************************/ +static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); } + + +/*-************************************************************* +* Context management +***************************************************************/ +typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader, + ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock, + ZSTDds_decompressLastBlock, ZSTDds_checkChecksum, + ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTD_dStage; + +typedef struct { + FSE_DTable LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)]; + FSE_DTable OFTable[FSE_DTABLE_SIZE_U32(OffFSELog)]; + FSE_DTable MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)]; + HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */ + U32 rep[ZSTD_REP_NUM]; +} ZSTD_entropyTables_t; + +struct ZSTD_DCtx_s +{ + const FSE_DTable* LLTptr; + const FSE_DTable* MLTptr; + const FSE_DTable* OFTptr; + const HUF_DTable* HUFptr; + ZSTD_entropyTables_t entropy; + const void* previousDstEnd; /* detect continuity */ + const void* base; /* start of current segment */ + const void* vBase; /* virtual start of previous segment if it was just before current one */ + const void* dictEnd; /* end of previous segment */ + size_t expected; + ZSTD_frameParams fParams; + blockType_e bType; /* used in ZSTD_decompressContinue(), to transfer blockType between header decoding and block decoding stages */ + ZSTD_dStage stage; + U32 litEntropy; + U32 fseEntropy; + XXH64_state_t xxhState; + size_t headerSize; + U32 dictID; + const BYTE* litPtr; + ZSTD_customMem customMem; + size_t litSize; + size_t rleSize; + BYTE litBuffer[ZSTD_BLOCKSIZE_ABSOLUTEMAX + WILDCOPY_OVERLENGTH]; + BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX]; +}; /* typedef'd to ZSTD_DCtx within "zstd.h" */ + +size_t ZSTD_sizeof_DCtx (const ZSTD_DCtx* dctx) { return (dctx==NULL) ? 0 : sizeof(ZSTD_DCtx); } + +size_t ZSTD_estimateDCtxSize(void) { return sizeof(ZSTD_DCtx); } + +size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx) +{ + dctx->expected = ZSTD_frameHeaderSize_prefix; + dctx->stage = ZSTDds_getFrameHeaderSize; + dctx->previousDstEnd = NULL; + dctx->base = NULL; + dctx->vBase = NULL; + dctx->dictEnd = NULL; + dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */ + dctx->litEntropy = dctx->fseEntropy = 0; + dctx->dictID = 0; + MEM_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue)); + memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */ + dctx->LLTptr = dctx->entropy.LLTable; + dctx->MLTptr = dctx->entropy.MLTable; + dctx->OFTptr = dctx->entropy.OFTable; + dctx->HUFptr = dctx->entropy.hufTable; + return 0; +} + +ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem) +{ + ZSTD_DCtx* dctx; + + if (!customMem.customAlloc && !customMem.customFree) customMem = defaultCustomMem; + if (!customMem.customAlloc || !customMem.customFree) return NULL; + + dctx = (ZSTD_DCtx*)ZSTD_malloc(sizeof(ZSTD_DCtx), customMem); + if (!dctx) return NULL; + memcpy(&dctx->customMem, &customMem, sizeof(customMem)); + ZSTD_decompressBegin(dctx); + return dctx; +} + +ZSTD_DCtx* ZSTD_createDCtx(void) +{ + return ZSTD_createDCtx_advanced(defaultCustomMem); +} + +size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx) +{ + if (dctx==NULL) return 0; /* support free on NULL */ + ZSTD_free(dctx, dctx->customMem); + return 0; /* reserved as a potential error code in the future */ +} + +void ZSTD_copyDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx) +{ + size_t const workSpaceSize = (ZSTD_BLOCKSIZE_ABSOLUTEMAX+WILDCOPY_OVERLENGTH) + ZSTD_frameHeaderSize_max; + memcpy(dstDCtx, srcDCtx, sizeof(ZSTD_DCtx) - workSpaceSize); /* no need to copy workspace */ +} + +static void ZSTD_refDDict(ZSTD_DCtx* dstDCtx, const ZSTD_DDict* ddict); + + +/*-************************************************************* +* Decompression section +***************************************************************/ + +/*! ZSTD_isFrame() : + * Tells if the content of `buffer` starts with a valid Frame Identifier. + * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. + * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled. + * Note 3 : Skippable Frame Identifiers are considered valid. */ +unsigned ZSTD_isFrame(const void* buffer, size_t size) +{ + if (size < 4) return 0; + { U32 const magic = MEM_readLE32(buffer); + if (magic == ZSTD_MAGICNUMBER) return 1; + if ((magic & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) return 1; + } +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) + if (ZSTD_isLegacy(buffer, size)) return 1; +#endif + return 0; +} + + +/** ZSTD_frameHeaderSize() : +* srcSize must be >= ZSTD_frameHeaderSize_prefix. +* @return : size of the Frame Header */ +static size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize) +{ + if (srcSize < ZSTD_frameHeaderSize_prefix) return ERROR(srcSize_wrong); + { BYTE const fhd = ((const BYTE*)src)[4]; + U32 const dictID= fhd & 3; + U32 const singleSegment = (fhd >> 5) & 1; + U32 const fcsId = fhd >> 6; + return ZSTD_frameHeaderSize_prefix + !singleSegment + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId] + + (singleSegment && !fcsId); + } +} + + +/** ZSTD_getFrameParams() : +* decode Frame Header, or require larger `srcSize`. +* @return : 0, `fparamsPtr` is correctly filled, +* >0, `srcSize` is too small, result is expected `srcSize`, +* or an error code, which can be tested using ZSTD_isError() */ +size_t ZSTD_getFrameParams(ZSTD_frameParams* fparamsPtr, const void* src, size_t srcSize) +{ + const BYTE* ip = (const BYTE*)src; + + if (srcSize < ZSTD_frameHeaderSize_prefix) return ZSTD_frameHeaderSize_prefix; + if (MEM_readLE32(src) != ZSTD_MAGICNUMBER) { + if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { + if (srcSize < ZSTD_skippableHeaderSize) return ZSTD_skippableHeaderSize; /* magic number + skippable frame length */ + memset(fparamsPtr, 0, sizeof(*fparamsPtr)); + fparamsPtr->frameContentSize = MEM_readLE32((const char *)src + 4); + fparamsPtr->windowSize = 0; /* windowSize==0 means a frame is skippable */ + return 0; + } + return ERROR(prefix_unknown); + } + + /* ensure there is enough `srcSize` to fully read/decode frame header */ + { size_t const fhsize = ZSTD_frameHeaderSize(src, srcSize); + if (srcSize < fhsize) return fhsize; } + + { BYTE const fhdByte = ip[4]; + size_t pos = 5; + U32 const dictIDSizeCode = fhdByte&3; + U32 const checksumFlag = (fhdByte>>2)&1; + U32 const singleSegment = (fhdByte>>5)&1; + U32 const fcsID = fhdByte>>6; + U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX; + U32 windowSize = 0; + U32 dictID = 0; + U64 frameContentSize = 0; + if ((fhdByte & 0x08) != 0) return ERROR(frameParameter_unsupported); /* reserved bits, which must be zero */ + if (!singleSegment) { + BYTE const wlByte = ip[pos++]; + U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN; + if (windowLog > ZSTD_WINDOWLOG_MAX) return ERROR(frameParameter_windowTooLarge); /* avoids issue with 1 << windowLog */ + windowSize = (1U << windowLog); + windowSize += (windowSize >> 3) * (wlByte&7); + } + + switch(dictIDSizeCode) + { + default: /* impossible */ + case 0 : break; + case 1 : dictID = ip[pos]; pos++; break; + case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break; + case 3 : dictID = MEM_readLE32(ip+pos); pos+=4; break; + } + switch(fcsID) + { + default: /* impossible */ + case 0 : if (singleSegment) frameContentSize = ip[pos]; break; + case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break; + case 2 : frameContentSize = MEM_readLE32(ip+pos); break; + case 3 : frameContentSize = MEM_readLE64(ip+pos); break; + } + if (!windowSize) windowSize = (U32)frameContentSize; + if (windowSize > windowSizeMax) return ERROR(frameParameter_windowTooLarge); + fparamsPtr->frameContentSize = frameContentSize; + fparamsPtr->windowSize = windowSize; + fparamsPtr->dictID = dictID; + fparamsPtr->checksumFlag = checksumFlag; + } + return 0; +} + +/** ZSTD_getFrameContentSize() : +* compatible with legacy mode +* @return : decompressed size of the single frame pointed to be `src` if known, otherwise +* - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined +* - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */ +unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize) +{ +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) + if (ZSTD_isLegacy(src, srcSize)) { + unsigned long long const ret = ZSTD_getDecompressedSize_legacy(src, srcSize); + return ret == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : ret; + } +#endif + { + ZSTD_frameParams fParams; + if (ZSTD_getFrameParams(&fParams, src, srcSize) != 0) return ZSTD_CONTENTSIZE_ERROR; + if (fParams.windowSize == 0) { + /* Either skippable or empty frame, size == 0 either way */ + return 0; + } else if (fParams.frameContentSize != 0) { + return fParams.frameContentSize; + } else { + return ZSTD_CONTENTSIZE_UNKNOWN; + } + } +} + +/** ZSTD_findDecompressedSize() : + * compatible with legacy mode + * `srcSize` must be the exact length of some number of ZSTD compressed and/or + * skippable frames + * @return : decompressed size of the frames contained */ +unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize) +{ + { + unsigned long long totalDstSize = 0; + while (srcSize >= ZSTD_frameHeaderSize_prefix) { + const U32 magicNumber = MEM_readLE32(src); + + if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { + size_t skippableSize; + if (srcSize < ZSTD_skippableHeaderSize) + return ERROR(srcSize_wrong); + skippableSize = MEM_readLE32((const BYTE *)src + 4) + + ZSTD_skippableHeaderSize; + if (srcSize < skippableSize) { + return ZSTD_CONTENTSIZE_ERROR; + } + + src = (const BYTE *)src + skippableSize; + srcSize -= skippableSize; + continue; + } + + { + unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize); + if (ret >= ZSTD_CONTENTSIZE_ERROR) return ret; + + /* check for overflow */ + if (totalDstSize + ret < totalDstSize) return ZSTD_CONTENTSIZE_ERROR; + totalDstSize += ret; + } + { + size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize); + if (ZSTD_isError(frameSrcSize)) { + return ZSTD_CONTENTSIZE_ERROR; + } + + src = (const BYTE *)src + frameSrcSize; + srcSize -= frameSrcSize; + } + } + + if (srcSize) { + return ZSTD_CONTENTSIZE_ERROR; + } + + return totalDstSize; + } +} + +/** ZSTD_getDecompressedSize() : +* compatible with legacy mode +* @return : decompressed size if known, 0 otherwise + note : 0 can mean any of the following : + - decompressed size is not present within frame header + - frame header unknown / not supported + - frame header not complete (`srcSize` too small) */ +unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize) +{ + unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize); + return ret >= ZSTD_CONTENTSIZE_ERROR ? 0 : ret; +} + + +/** ZSTD_decodeFrameHeader() : +* `headerSize` must be the size provided by ZSTD_frameHeaderSize(). +* @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */ +static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize) +{ + size_t const result = ZSTD_getFrameParams(&(dctx->fParams), src, headerSize); + if (ZSTD_isError(result)) return result; /* invalid header */ + if (result>0) return ERROR(srcSize_wrong); /* headerSize too small */ + if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID)) return ERROR(dictionary_wrong); + if (dctx->fParams.checksumFlag) XXH64_reset(&dctx->xxhState, 0); + return 0; +} + + +typedef struct +{ + blockType_e blockType; + U32 lastBlock; + U32 origSize; +} blockProperties_t; + +/*! ZSTD_getcBlockSize() : +* Provides the size of compressed block from block header `src` */ +size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, + blockProperties_t* bpPtr) +{ + if (srcSize < ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); + { U32 const cBlockHeader = MEM_readLE24(src); + U32 const cSize = cBlockHeader >> 3; + bpPtr->lastBlock = cBlockHeader & 1; + bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3); + bpPtr->origSize = cSize; /* only useful for RLE */ + if (bpPtr->blockType == bt_rle) return 1; + if (bpPtr->blockType == bt_reserved) return ERROR(corruption_detected); + return cSize; + } +} + + +static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ + if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall); + memcpy(dst, src, srcSize); + return srcSize; +} + + +static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + size_t regenSize) +{ + if (srcSize != 1) return ERROR(srcSize_wrong); + if (regenSize > dstCapacity) return ERROR(dstSize_tooSmall); + memset(dst, *(const BYTE*)src, regenSize); + return regenSize; +} + +/*! ZSTD_decodeLiteralsBlock() : + @return : nb of bytes read from src (< srcSize ) */ +size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, + const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */ +{ + if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected); + + { const BYTE* const istart = (const BYTE*) src; + symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3); + + switch(litEncType) + { + case set_repeat: + if (dctx->litEntropy==0) return ERROR(dictionary_corrupted); + /* fall-through */ + case set_compressed: + if (srcSize < 5) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */ + { size_t lhSize, litSize, litCSize; + U32 singleStream=0; + U32 const lhlCode = (istart[0] >> 2) & 3; + U32 const lhc = MEM_readLE32(istart); + switch(lhlCode) + { + case 0: case 1: default: /* note : default is impossible, since lhlCode into [0..3] */ + /* 2 - 2 - 10 - 10 */ + singleStream = !lhlCode; + lhSize = 3; + litSize = (lhc >> 4) & 0x3FF; + litCSize = (lhc >> 14) & 0x3FF; + break; + case 2: + /* 2 - 2 - 14 - 14 */ + lhSize = 4; + litSize = (lhc >> 4) & 0x3FFF; + litCSize = lhc >> 18; + break; + case 3: + /* 2 - 2 - 18 - 18 */ + lhSize = 5; + litSize = (lhc >> 4) & 0x3FFFF; + litCSize = (lhc >> 22) + (istart[4] << 10); + break; + } + if (litSize > ZSTD_BLOCKSIZE_ABSOLUTEMAX) return ERROR(corruption_detected); + if (litCSize + lhSize > srcSize) return ERROR(corruption_detected); + + if (HUF_isError((litEncType==set_repeat) ? + ( singleStream ? + HUF_decompress1X_usingDTable(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->HUFptr) : + HUF_decompress4X_usingDTable(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->HUFptr) ) : + ( singleStream ? + HUF_decompress1X2_DCtx(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize) : + HUF_decompress4X_hufOnly (dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize)) )) + return ERROR(corruption_detected); + + dctx->litPtr = dctx->litBuffer; + dctx->litSize = litSize; + dctx->litEntropy = 1; + if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable; + memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); + return litCSize + lhSize; + } + + case set_basic: + { size_t litSize, lhSize; + U32 const lhlCode = ((istart[0]) >> 2) & 3; + switch(lhlCode) + { + case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */ + lhSize = 1; + litSize = istart[0] >> 3; + break; + case 1: + lhSize = 2; + litSize = MEM_readLE16(istart) >> 4; + break; + case 3: + lhSize = 3; + litSize = MEM_readLE24(istart) >> 4; + break; + } + + if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */ + if (litSize+lhSize > srcSize) return ERROR(corruption_detected); + memcpy(dctx->litBuffer, istart+lhSize, litSize); + dctx->litPtr = dctx->litBuffer; + dctx->litSize = litSize; + memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); + return lhSize+litSize; + } + /* direct reference into compressed stream */ + dctx->litPtr = istart+lhSize; + dctx->litSize = litSize; + return lhSize+litSize; + } + + case set_rle: + { U32 const lhlCode = ((istart[0]) >> 2) & 3; + size_t litSize, lhSize; + switch(lhlCode) + { + case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */ + lhSize = 1; + litSize = istart[0] >> 3; + break; + case 1: + lhSize = 2; + litSize = MEM_readLE16(istart) >> 4; + break; + case 3: + lhSize = 3; + litSize = MEM_readLE24(istart) >> 4; + if (srcSize<4) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */ + break; + } + if (litSize > ZSTD_BLOCKSIZE_ABSOLUTEMAX) return ERROR(corruption_detected); + memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH); + dctx->litPtr = dctx->litBuffer; + dctx->litSize = litSize; + return lhSize+1; + } + default: + return ERROR(corruption_detected); /* impossible */ + } + } +} + + +typedef union { + FSE_decode_t realData; + U32 alignedBy4; +} FSE_decode_t4; + +/* Default FSE distribution table for Literal Lengths */ +static const FSE_decode_t4 LL_defaultDTable[(1<<LL_DEFAULTNORMLOG)+1] = { + { { LL_DEFAULTNORMLOG, 1, 1 } }, /* header : tableLog, fastMode, fastMode */ + /* base, symbol, bits */ + { { 0, 0, 4 } }, { { 16, 0, 4 } }, { { 32, 1, 5 } }, { { 0, 3, 5 } }, + { { 0, 4, 5 } }, { { 0, 6, 5 } }, { { 0, 7, 5 } }, { { 0, 9, 5 } }, + { { 0, 10, 5 } }, { { 0, 12, 5 } }, { { 0, 14, 6 } }, { { 0, 16, 5 } }, + { { 0, 18, 5 } }, { { 0, 19, 5 } }, { { 0, 21, 5 } }, { { 0, 22, 5 } }, + { { 0, 24, 5 } }, { { 32, 25, 5 } }, { { 0, 26, 5 } }, { { 0, 27, 6 } }, + { { 0, 29, 6 } }, { { 0, 31, 6 } }, { { 32, 0, 4 } }, { { 0, 1, 4 } }, + { { 0, 2, 5 } }, { { 32, 4, 5 } }, { { 0, 5, 5 } }, { { 32, 7, 5 } }, + { { 0, 8, 5 } }, { { 32, 10, 5 } }, { { 0, 11, 5 } }, { { 0, 13, 6 } }, + { { 32, 16, 5 } }, { { 0, 17, 5 } }, { { 32, 19, 5 } }, { { 0, 20, 5 } }, + { { 32, 22, 5 } }, { { 0, 23, 5 } }, { { 0, 25, 4 } }, { { 16, 25, 4 } }, + { { 32, 26, 5 } }, { { 0, 28, 6 } }, { { 0, 30, 6 } }, { { 48, 0, 4 } }, + { { 16, 1, 4 } }, { { 32, 2, 5 } }, { { 32, 3, 5 } }, { { 32, 5, 5 } }, + { { 32, 6, 5 } }, { { 32, 8, 5 } }, { { 32, 9, 5 } }, { { 32, 11, 5 } }, + { { 32, 12, 5 } }, { { 0, 15, 6 } }, { { 32, 17, 5 } }, { { 32, 18, 5 } }, + { { 32, 20, 5 } }, { { 32, 21, 5 } }, { { 32, 23, 5 } }, { { 32, 24, 5 } }, + { { 0, 35, 6 } }, { { 0, 34, 6 } }, { { 0, 33, 6 } }, { { 0, 32, 6 } }, +}; /* LL_defaultDTable */ + +/* Default FSE distribution table for Match Lengths */ +static const FSE_decode_t4 ML_defaultDTable[(1<<ML_DEFAULTNORMLOG)+1] = { + { { ML_DEFAULTNORMLOG, 1, 1 } }, /* header : tableLog, fastMode, fastMode */ + /* base, symbol, bits */ + { { 0, 0, 6 } }, { { 0, 1, 4 } }, { { 32, 2, 5 } }, { { 0, 3, 5 } }, + { { 0, 5, 5 } }, { { 0, 6, 5 } }, { { 0, 8, 5 } }, { { 0, 10, 6 } }, + { { 0, 13, 6 } }, { { 0, 16, 6 } }, { { 0, 19, 6 } }, { { 0, 22, 6 } }, + { { 0, 25, 6 } }, { { 0, 28, 6 } }, { { 0, 31, 6 } }, { { 0, 33, 6 } }, + { { 0, 35, 6 } }, { { 0, 37, 6 } }, { { 0, 39, 6 } }, { { 0, 41, 6 } }, + { { 0, 43, 6 } }, { { 0, 45, 6 } }, { { 16, 1, 4 } }, { { 0, 2, 4 } }, + { { 32, 3, 5 } }, { { 0, 4, 5 } }, { { 32, 6, 5 } }, { { 0, 7, 5 } }, + { { 0, 9, 6 } }, { { 0, 12, 6 } }, { { 0, 15, 6 } }, { { 0, 18, 6 } }, + { { 0, 21, 6 } }, { { 0, 24, 6 } }, { { 0, 27, 6 } }, { { 0, 30, 6 } }, + { { 0, 32, 6 } }, { { 0, 34, 6 } }, { { 0, 36, 6 } }, { { 0, 38, 6 } }, + { { 0, 40, 6 } }, { { 0, 42, 6 } }, { { 0, 44, 6 } }, { { 32, 1, 4 } }, + { { 48, 1, 4 } }, { { 16, 2, 4 } }, { { 32, 4, 5 } }, { { 32, 5, 5 } }, + { { 32, 7, 5 } }, { { 32, 8, 5 } }, { { 0, 11, 6 } }, { { 0, 14, 6 } }, + { { 0, 17, 6 } }, { { 0, 20, 6 } }, { { 0, 23, 6 } }, { { 0, 26, 6 } }, + { { 0, 29, 6 } }, { { 0, 52, 6 } }, { { 0, 51, 6 } }, { { 0, 50, 6 } }, + { { 0, 49, 6 } }, { { 0, 48, 6 } }, { { 0, 47, 6 } }, { { 0, 46, 6 } }, +}; /* ML_defaultDTable */ + +/* Default FSE distribution table for Offset Codes */ +static const FSE_decode_t4 OF_defaultDTable[(1<<OF_DEFAULTNORMLOG)+1] = { + { { OF_DEFAULTNORMLOG, 1, 1 } }, /* header : tableLog, fastMode, fastMode */ + /* base, symbol, bits */ + { { 0, 0, 5 } }, { { 0, 6, 4 } }, + { { 0, 9, 5 } }, { { 0, 15, 5 } }, + { { 0, 21, 5 } }, { { 0, 3, 5 } }, + { { 0, 7, 4 } }, { { 0, 12, 5 } }, + { { 0, 18, 5 } }, { { 0, 23, 5 } }, + { { 0, 5, 5 } }, { { 0, 8, 4 } }, + { { 0, 14, 5 } }, { { 0, 20, 5 } }, + { { 0, 2, 5 } }, { { 16, 7, 4 } }, + { { 0, 11, 5 } }, { { 0, 17, 5 } }, + { { 0, 22, 5 } }, { { 0, 4, 5 } }, + { { 16, 8, 4 } }, { { 0, 13, 5 } }, + { { 0, 19, 5 } }, { { 0, 1, 5 } }, + { { 16, 6, 4 } }, { { 0, 10, 5 } }, + { { 0, 16, 5 } }, { { 0, 28, 5 } }, + { { 0, 27, 5 } }, { { 0, 26, 5 } }, + { { 0, 25, 5 } }, { { 0, 24, 5 } }, +}; /* OF_defaultDTable */ + +/*! ZSTD_buildSeqTable() : + @return : nb bytes read from src, + or an error code if it fails, testable with ZSTD_isError() +*/ +static size_t ZSTD_buildSeqTable(FSE_DTable* DTableSpace, const FSE_DTable** DTablePtr, + symbolEncodingType_e type, U32 max, U32 maxLog, + const void* src, size_t srcSize, + const FSE_decode_t4* defaultTable, U32 flagRepeatTable) +{ + const void* const tmpPtr = defaultTable; /* bypass strict aliasing */ + switch(type) + { + case set_rle : + if (!srcSize) return ERROR(srcSize_wrong); + if ( (*(const BYTE*)src) > max) return ERROR(corruption_detected); + FSE_buildDTable_rle(DTableSpace, *(const BYTE*)src); + *DTablePtr = DTableSpace; + return 1; + case set_basic : + *DTablePtr = (const FSE_DTable*)tmpPtr; + return 0; + case set_repeat: + if (!flagRepeatTable) return ERROR(corruption_detected); + return 0; + default : /* impossible */ + case set_compressed : + { U32 tableLog; + S16 norm[MaxSeq+1]; + size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize); + if (FSE_isError(headerSize)) return ERROR(corruption_detected); + if (tableLog > maxLog) return ERROR(corruption_detected); + FSE_buildDTable(DTableSpace, norm, max, tableLog); + *DTablePtr = DTableSpace; + return headerSize; + } } +} + +size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, + const void* src, size_t srcSize) +{ + const BYTE* const istart = (const BYTE* const)src; + const BYTE* const iend = istart + srcSize; + const BYTE* ip = istart; + + /* check */ + if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong); + + /* SeqHead */ + { int nbSeq = *ip++; + if (!nbSeq) { *nbSeqPtr=0; return 1; } + if (nbSeq > 0x7F) { + if (nbSeq == 0xFF) { + if (ip+2 > iend) return ERROR(srcSize_wrong); + nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2; + } else { + if (ip >= iend) return ERROR(srcSize_wrong); + nbSeq = ((nbSeq-0x80)<<8) + *ip++; + } + } + *nbSeqPtr = nbSeq; + } + + /* FSE table descriptors */ + if (ip+4 > iend) return ERROR(srcSize_wrong); /* minimum possible size */ + { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6); + symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3); + symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3); + ip++; + + /* Build DTables */ + { size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr, + LLtype, MaxLL, LLFSELog, + ip, iend-ip, LL_defaultDTable, dctx->fseEntropy); + if (ZSTD_isError(llhSize)) return ERROR(corruption_detected); + ip += llhSize; + } + { size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr, + OFtype, MaxOff, OffFSELog, + ip, iend-ip, OF_defaultDTable, dctx->fseEntropy); + if (ZSTD_isError(ofhSize)) return ERROR(corruption_detected); + ip += ofhSize; + } + { size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr, + MLtype, MaxML, MLFSELog, + ip, iend-ip, ML_defaultDTable, dctx->fseEntropy); + if (ZSTD_isError(mlhSize)) return ERROR(corruption_detected); + ip += mlhSize; + } + } + + return ip-istart; +} + + +typedef struct { + size_t litLength; + size_t matchLength; + size_t offset; + const BYTE* match; +} seq_t; + +typedef struct { + BIT_DStream_t DStream; + FSE_DState_t stateLL; + FSE_DState_t stateOffb; + FSE_DState_t stateML; + size_t prevOffset[ZSTD_REP_NUM]; + const BYTE* base; + size_t pos; + uPtrDiff gotoDict; +} seqState_t; + + +FORCE_NOINLINE +size_t ZSTD_execSequenceLast7(BYTE* op, + BYTE* const oend, seq_t sequence, + const BYTE** litPtr, const BYTE* const litLimit, + const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd) +{ + BYTE* const oLitEnd = op + sequence.litLength; + size_t const sequenceLength = sequence.litLength + sequence.matchLength; + BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ + BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; + const BYTE* const iLitEnd = *litPtr + sequence.litLength; + const BYTE* match = oLitEnd - sequence.offset; + + /* check */ + if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */ + if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */ + if (oLitEnd <= oend_w) return ERROR(GENERIC); /* Precondition */ + + /* copy literals */ + if (op < oend_w) { + ZSTD_wildcopy(op, *litPtr, oend_w - op); + *litPtr += oend_w - op; + op = oend_w; + } + while (op < oLitEnd) *op++ = *(*litPtr)++; + + /* copy Match */ + if (sequence.offset > (size_t)(oLitEnd - base)) { + /* offset beyond prefix */ + if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected); + match = dictEnd - (base-match); + if (match + sequence.matchLength <= dictEnd) { + memmove(oLitEnd, match, sequence.matchLength); + return sequenceLength; + } + /* span extDict & currentPrefixSegment */ + { size_t const length1 = dictEnd - match; + memmove(oLitEnd, match, length1); + op = oLitEnd + length1; + sequence.matchLength -= length1; + match = base; + } } + while (op < oMatchEnd) *op++ = *match++; + return sequenceLength; +} + + +static seq_t ZSTD_decodeSequence(seqState_t* seqState) +{ + seq_t seq; + + U32 const llCode = FSE_peekSymbol(&seqState->stateLL); + U32 const mlCode = FSE_peekSymbol(&seqState->stateML); + U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */ + + U32 const llBits = LL_bits[llCode]; + U32 const mlBits = ML_bits[mlCode]; + U32 const ofBits = ofCode; + U32 const totalBits = llBits+mlBits+ofBits; + + static const U32 LL_base[MaxLL+1] = { + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 18, 20, 22, 24, 28, 32, 40, + 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, + 0x2000, 0x4000, 0x8000, 0x10000 }; + + static const U32 ML_base[MaxML+1] = { + 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29, 30, 31, 32, 33, 34, + 35, 37, 39, 41, 43, 47, 51, 59, + 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, + 0x1003, 0x2003, 0x4003, 0x8003, 0x10003 }; + + static const U32 OF_base[MaxOff+1] = { + 0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D, + 0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD, + 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, + 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD }; + + /* sequence */ + { size_t offset; + if (!ofCode) + offset = 0; + else { + offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ + if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); + } + + if (ofCode <= 1) { + offset += (llCode==0); + if (offset) { + size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset]; + temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */ + if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1]; + seqState->prevOffset[1] = seqState->prevOffset[0]; + seqState->prevOffset[0] = offset = temp; + } else { + offset = seqState->prevOffset[0]; + } + } else { + seqState->prevOffset[2] = seqState->prevOffset[1]; + seqState->prevOffset[1] = seqState->prevOffset[0]; + seqState->prevOffset[0] = offset; + } + seq.offset = offset; + } + + seq.matchLength = ML_base[mlCode] + ((mlCode>31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <= 16 bits */ + if (MEM_32bits() && (mlBits+llBits>24)) BIT_reloadDStream(&seqState->DStream); + + seq.litLength = LL_base[llCode] + ((llCode>15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <= 16 bits */ + if (MEM_32bits() || + (totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) ) BIT_reloadDStream(&seqState->DStream); + + /* ANS state update */ + FSE_updateState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */ + FSE_updateState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */ + if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ + FSE_updateState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */ + + return seq; +} + + +FORCE_INLINE +size_t ZSTD_execSequence(BYTE* op, + BYTE* const oend, seq_t sequence, + const BYTE** litPtr, const BYTE* const litLimit, + const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd) +{ + BYTE* const oLitEnd = op + sequence.litLength; + size_t const sequenceLength = sequence.litLength + sequence.matchLength; + BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ + BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; + const BYTE* const iLitEnd = *litPtr + sequence.litLength; + const BYTE* match = oLitEnd - sequence.offset; + + /* check */ + if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */ + if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */ + if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd); + + /* copy Literals */ + ZSTD_copy8(op, *litPtr); + if (sequence.litLength > 8) + ZSTD_wildcopy(op+8, (*litPtr)+8, sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */ + op = oLitEnd; + *litPtr = iLitEnd; /* update for next sequence */ + + /* copy Match */ + if (sequence.offset > (size_t)(oLitEnd - base)) { + /* offset beyond prefix -> go into extDict */ + if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected); + match = dictEnd + (match - base); + if (match + sequence.matchLength <= dictEnd) { + memmove(oLitEnd, match, sequence.matchLength); + return sequenceLength; + } + /* span extDict & currentPrefixSegment */ + { size_t const length1 = dictEnd - match; + memmove(oLitEnd, match, length1); + op = oLitEnd + length1; + sequence.matchLength -= length1; + match = base; + if (op > oend_w || sequence.matchLength < MINMATCH) { + U32 i; + for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i]; + return sequenceLength; + } + } } + /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */ + + /* match within prefix */ + if (sequence.offset < 8) { + /* close range match, overlap */ + static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */ + static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */ + int const sub2 = dec64table[sequence.offset]; + op[0] = match[0]; + op[1] = match[1]; + op[2] = match[2]; + op[3] = match[3]; + match += dec32table[sequence.offset]; + ZSTD_copy4(op+4, match); + match -= sub2; + } else { + ZSTD_copy8(op, match); + } + op += 8; match += 8; + + if (oMatchEnd > oend-(16-MINMATCH)) { + if (op < oend_w) { + ZSTD_wildcopy(op, match, oend_w - op); + match += oend_w - op; + op = oend_w; + } + while (op < oMatchEnd) *op++ = *match++; + } else { + ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */ + } + return sequenceLength; +} + + +static size_t ZSTD_decompressSequences( + ZSTD_DCtx* dctx, + void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize) +{ + const BYTE* ip = (const BYTE*)seqStart; + const BYTE* const iend = ip + seqSize; + BYTE* const ostart = (BYTE* const)dst; + BYTE* const oend = ostart + maxDstSize; + BYTE* op = ostart; + const BYTE* litPtr = dctx->litPtr; + const BYTE* const litEnd = litPtr + dctx->litSize; + const BYTE* const base = (const BYTE*) (dctx->base); + const BYTE* const vBase = (const BYTE*) (dctx->vBase); + const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); + int nbSeq; + + /* Build Decoding Tables */ + { size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize); + if (ZSTD_isError(seqHSize)) return seqHSize; + ip += seqHSize; + } + + /* Regen sequences */ + if (nbSeq) { + seqState_t seqState; + dctx->fseEntropy = 1; + { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; } + CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected); + FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); + FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); + FSE_initDState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); + + for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq ; ) { + nbSeq--; + { seq_t const sequence = ZSTD_decodeSequence(&seqState); + size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd); + if (ZSTD_isError(oneSeqSize)) return oneSeqSize; + op += oneSeqSize; + } } + + /* check if reached exact end */ + if (nbSeq) return ERROR(corruption_detected); + /* save reps for next block */ + { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); } + } + + /* last literal segment */ + { size_t const lastLLSize = litEnd - litPtr; + if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall); + memcpy(op, litPtr, lastLLSize); + op += lastLLSize; + } + + return op-ostart; +} + + +FORCE_INLINE seq_t ZSTD_decodeSequenceLong_generic(seqState_t* seqState, int const longOffsets) +{ + seq_t seq; + + U32 const llCode = FSE_peekSymbol(&seqState->stateLL); + U32 const mlCode = FSE_peekSymbol(&seqState->stateML); + U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */ + + U32 const llBits = LL_bits[llCode]; + U32 const mlBits = ML_bits[mlCode]; + U32 const ofBits = ofCode; + U32 const totalBits = llBits+mlBits+ofBits; + + static const U32 LL_base[MaxLL+1] = { + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 18, 20, 22, 24, 28, 32, 40, + 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, + 0x2000, 0x4000, 0x8000, 0x10000 }; + + static const U32 ML_base[MaxML+1] = { + 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29, 30, 31, 32, 33, 34, + 35, 37, 39, 41, 43, 47, 51, 59, + 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, + 0x1003, 0x2003, 0x4003, 0x8003, 0x10003 }; + + static const U32 OF_base[MaxOff+1] = { + 0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D, + 0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD, + 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, + 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD }; + + /* sequence */ + { size_t offset; + if (!ofCode) + offset = 0; + else { + if (longOffsets) { + int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN); + offset = OF_base[ofCode] + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits); + if (MEM_32bits() || extraBits) BIT_reloadDStream(&seqState->DStream); + if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits); + } else { + offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ + if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); + } + } + + if (ofCode <= 1) { + offset += (llCode==0); + if (offset) { + size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset]; + temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */ + if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1]; + seqState->prevOffset[1] = seqState->prevOffset[0]; + seqState->prevOffset[0] = offset = temp; + } else { + offset = seqState->prevOffset[0]; + } + } else { + seqState->prevOffset[2] = seqState->prevOffset[1]; + seqState->prevOffset[1] = seqState->prevOffset[0]; + seqState->prevOffset[0] = offset; + } + seq.offset = offset; + } + + seq.matchLength = ML_base[mlCode] + ((mlCode>31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <= 16 bits */ + if (MEM_32bits() && (mlBits+llBits>24)) BIT_reloadDStream(&seqState->DStream); + + seq.litLength = LL_base[llCode] + ((llCode>15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <= 16 bits */ + if (MEM_32bits() || + (totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) ) BIT_reloadDStream(&seqState->DStream); + + { size_t const pos = seqState->pos + seq.litLength; + seq.match = seqState->base + pos - seq.offset; /* single memory segment */ + if (seq.offset > pos) seq.match += seqState->gotoDict; /* separate memory segment */ + seqState->pos = pos + seq.matchLength; + } + + /* ANS state update */ + FSE_updateState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */ + FSE_updateState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */ + if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ + FSE_updateState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */ + + return seq; +} + +static seq_t ZSTD_decodeSequenceLong(seqState_t* seqState, unsigned const windowSize) { + if (ZSTD_highbit32(windowSize) > STREAM_ACCUMULATOR_MIN) { + return ZSTD_decodeSequenceLong_generic(seqState, 1); + } else { + return ZSTD_decodeSequenceLong_generic(seqState, 0); + } +} + +FORCE_INLINE +size_t ZSTD_execSequenceLong(BYTE* op, + BYTE* const oend, seq_t sequence, + const BYTE** litPtr, const BYTE* const litLimit, + const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd) +{ + BYTE* const oLitEnd = op + sequence.litLength; + size_t const sequenceLength = sequence.litLength + sequence.matchLength; + BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ + BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; + const BYTE* const iLitEnd = *litPtr + sequence.litLength; + const BYTE* match = sequence.match; + + /* check */ +#if 1 + if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */ + if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */ + if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd); +#endif + + /* copy Literals */ + ZSTD_copy8(op, *litPtr); + if (sequence.litLength > 8) + ZSTD_wildcopy(op+8, (*litPtr)+8, sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */ + op = oLitEnd; + *litPtr = iLitEnd; /* update for next sequence */ + + /* copy Match */ +#if 1 + if (sequence.offset > (size_t)(oLitEnd - base)) { + /* offset beyond prefix */ + if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected); + if (match + sequence.matchLength <= dictEnd) { + memmove(oLitEnd, match, sequence.matchLength); + return sequenceLength; + } + /* span extDict & currentPrefixSegment */ + { size_t const length1 = dictEnd - match; + memmove(oLitEnd, match, length1); + op = oLitEnd + length1; + sequence.matchLength -= length1; + match = base; + if (op > oend_w || sequence.matchLength < MINMATCH) { + U32 i; + for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i]; + return sequenceLength; + } + } } + /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */ +#endif + + /* match within prefix */ + if (sequence.offset < 8) { + /* close range match, overlap */ + static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */ + static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */ + int const sub2 = dec64table[sequence.offset]; + op[0] = match[0]; + op[1] = match[1]; + op[2] = match[2]; + op[3] = match[3]; + match += dec32table[sequence.offset]; + ZSTD_copy4(op+4, match); + match -= sub2; + } else { + ZSTD_copy8(op, match); + } + op += 8; match += 8; + + if (oMatchEnd > oend-(16-MINMATCH)) { + if (op < oend_w) { + ZSTD_wildcopy(op, match, oend_w - op); + match += oend_w - op; + op = oend_w; + } + while (op < oMatchEnd) *op++ = *match++; + } else { + ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */ + } + return sequenceLength; +} + +static size_t ZSTD_decompressSequencesLong( + ZSTD_DCtx* dctx, + void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize) +{ + const BYTE* ip = (const BYTE*)seqStart; + const BYTE* const iend = ip + seqSize; + BYTE* const ostart = (BYTE* const)dst; + BYTE* const oend = ostart + maxDstSize; + BYTE* op = ostart; + const BYTE* litPtr = dctx->litPtr; + const BYTE* const litEnd = litPtr + dctx->litSize; + const BYTE* const base = (const BYTE*) (dctx->base); + const BYTE* const vBase = (const BYTE*) (dctx->vBase); + const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); + unsigned const windowSize = dctx->fParams.windowSize; + int nbSeq; + + /* Build Decoding Tables */ + { size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize); + if (ZSTD_isError(seqHSize)) return seqHSize; + ip += seqHSize; + } + + /* Regen sequences */ + if (nbSeq) { +#define STORED_SEQS 4 +#define STOSEQ_MASK (STORED_SEQS-1) +#define ADVANCED_SEQS 4 + seq_t sequences[STORED_SEQS]; + int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS); + seqState_t seqState; + int seqNb; + dctx->fseEntropy = 1; + { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; } + seqState.base = base; + seqState.pos = (size_t)(op-base); + seqState.gotoDict = (uPtrDiff)dictEnd - (uPtrDiff)base; /* cast to avoid undefined behaviour */ + CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected); + FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); + FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); + FSE_initDState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); + + /* prepare in advance */ + for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && seqNb<seqAdvance; seqNb++) { + sequences[seqNb] = ZSTD_decodeSequenceLong(&seqState, windowSize); + } + if (seqNb<seqAdvance) return ERROR(corruption_detected); + + /* decode and decompress */ + for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && seqNb<nbSeq ; seqNb++) { + seq_t const sequence = ZSTD_decodeSequenceLong(&seqState, windowSize); + size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STOSEQ_MASK], &litPtr, litEnd, base, vBase, dictEnd); + if (ZSTD_isError(oneSeqSize)) return oneSeqSize; + ZSTD_PREFETCH(sequence.match); + sequences[seqNb&STOSEQ_MASK] = sequence; + op += oneSeqSize; + } + if (seqNb<nbSeq) return ERROR(corruption_detected); + + /* finish queue */ + seqNb -= seqAdvance; + for ( ; seqNb<nbSeq ; seqNb++) { + size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[seqNb&STOSEQ_MASK], &litPtr, litEnd, base, vBase, dictEnd); + if (ZSTD_isError(oneSeqSize)) return oneSeqSize; + op += oneSeqSize; + } + + /* save reps for next block */ + { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); } + } + + /* last literal segment */ + { size_t const lastLLSize = litEnd - litPtr; + if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall); + memcpy(op, litPtr, lastLLSize); + op += lastLLSize; + } + + return op-ostart; +} + + +static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ /* blockType == blockCompressed */ + const BYTE* ip = (const BYTE*)src; + + if (srcSize >= ZSTD_BLOCKSIZE_ABSOLUTEMAX) return ERROR(srcSize_wrong); + + /* Decode literals section */ + { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize); + if (ZSTD_isError(litCSize)) return litCSize; + ip += litCSize; + srcSize -= litCSize; + } + if (sizeof(size_t) > 4) /* do not enable prefetching on 32-bits x86, as it's performance detrimental */ + /* likely because of register pressure */ + /* if that's the correct cause, then 32-bits ARM should be affected differently */ + /* it would be good to test this on ARM real hardware, to see if prefetch version improves speed */ + if (dctx->fParams.windowSize > (1<<23)) + return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize); + return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize); +} + + +static void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst) +{ + if (dst != dctx->previousDstEnd) { /* not contiguous */ + dctx->dictEnd = dctx->previousDstEnd; + dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base)); + dctx->base = dst; + dctx->previousDstEnd = dst; + } +} + +size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ + size_t dSize; + ZSTD_checkContinuity(dctx, dst); + dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize); + dctx->previousDstEnd = (char*)dst + dSize; + return dSize; +} + + +/** ZSTD_insertBlock() : + insert `src` block into `dctx` history. Useful to track uncompressed blocks. */ +ZSTDLIB_API size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize) +{ + ZSTD_checkContinuity(dctx, blockStart); + dctx->previousDstEnd = (const char*)blockStart + blockSize; + return blockSize; +} + + +size_t ZSTD_generateNxBytes(void* dst, size_t dstCapacity, BYTE byte, size_t length) +{ + if (length > dstCapacity) return ERROR(dstSize_tooSmall); + memset(dst, byte, length); + return length; +} + +/** ZSTD_findFrameCompressedSize() : + * compatible with legacy mode + * `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame + * `srcSize` must be at least as large as the frame contained + * @return : the compressed size of the frame starting at `src` */ +size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize) +{ +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) + if (ZSTD_isLegacy(src, srcSize)) return ZSTD_findFrameCompressedSizeLegacy(src, srcSize); +#endif + if (srcSize >= ZSTD_skippableHeaderSize && + (MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { + return ZSTD_skippableHeaderSize + MEM_readLE32((const BYTE*)src + 4); + } else { + const BYTE* ip = (const BYTE*)src; + const BYTE* const ipstart = ip; + size_t remainingSize = srcSize; + ZSTD_frameParams fParams; + + size_t const headerSize = ZSTD_frameHeaderSize(ip, remainingSize); + if (ZSTD_isError(headerSize)) return headerSize; + + /* Frame Header */ + { size_t const ret = ZSTD_getFrameParams(&fParams, ip, remainingSize); + if (ZSTD_isError(ret)) return ret; + if (ret > 0) return ERROR(srcSize_wrong); + } + + ip += headerSize; + remainingSize -= headerSize; + + /* Loop on each block */ + while (1) { + blockProperties_t blockProperties; + size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties); + if (ZSTD_isError(cBlockSize)) return cBlockSize; + + if (ZSTD_blockHeaderSize + cBlockSize > remainingSize) return ERROR(srcSize_wrong); + + ip += ZSTD_blockHeaderSize + cBlockSize; + remainingSize -= ZSTD_blockHeaderSize + cBlockSize; + + if (blockProperties.lastBlock) break; + } + + if (fParams.checksumFlag) { /* Frame content checksum */ + if (remainingSize < 4) return ERROR(srcSize_wrong); + ip += 4; + remainingSize -= 4; + } + + return ip - ipstart; + } +} + +/*! ZSTD_decompressFrame() : +* @dctx must be properly initialized */ +static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void** srcPtr, size_t *srcSizePtr) +{ + const BYTE* ip = (const BYTE*)(*srcPtr); + BYTE* const ostart = (BYTE* const)dst; + BYTE* const oend = ostart + dstCapacity; + BYTE* op = ostart; + size_t remainingSize = *srcSizePtr; + + /* check */ + if (remainingSize < ZSTD_frameHeaderSize_min+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); + + /* Frame Header */ + { size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_frameHeaderSize_prefix); + if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize; + if (remainingSize < frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); + CHECK_F(ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize)); + ip += frameHeaderSize; remainingSize -= frameHeaderSize; + } + + /* Loop on each block */ + while (1) { + size_t decodedSize; + blockProperties_t blockProperties; + size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties); + if (ZSTD_isError(cBlockSize)) return cBlockSize; + + ip += ZSTD_blockHeaderSize; + remainingSize -= ZSTD_blockHeaderSize; + if (cBlockSize > remainingSize) return ERROR(srcSize_wrong); + + switch(blockProperties.blockType) + { + case bt_compressed: + decodedSize = ZSTD_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize); + break; + case bt_raw : + decodedSize = ZSTD_copyRawBlock(op, oend-op, ip, cBlockSize); + break; + case bt_rle : + decodedSize = ZSTD_generateNxBytes(op, oend-op, *ip, blockProperties.origSize); + break; + case bt_reserved : + default: + return ERROR(corruption_detected); + } + + if (ZSTD_isError(decodedSize)) return decodedSize; + if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, op, decodedSize); + op += decodedSize; + ip += cBlockSize; + remainingSize -= cBlockSize; + if (blockProperties.lastBlock) break; + } + + if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */ + U32 const checkCalc = (U32)XXH64_digest(&dctx->xxhState); + U32 checkRead; + if (remainingSize<4) return ERROR(checksum_wrong); + checkRead = MEM_readLE32(ip); + if (checkRead != checkCalc) return ERROR(checksum_wrong); + ip += 4; + remainingSize -= 4; + } + + /* Allow caller to get size read */ + *srcPtr = ip; + *srcSizePtr = remainingSize; + return op-ostart; +} + +static const void* ZSTD_DDictDictContent(const ZSTD_DDict* ddict); +static size_t ZSTD_DDictDictSize(const ZSTD_DDict* ddict); + +static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void *dict, size_t dictSize, + const ZSTD_DDict* ddict) +{ + void* const dststart = dst; + + if (ddict) { + if (dict) { + /* programmer error, these two cases should be mutually exclusive */ + return ERROR(GENERIC); + } + + dict = ZSTD_DDictDictContent(ddict); + dictSize = ZSTD_DDictDictSize(ddict); + } + + while (srcSize >= ZSTD_frameHeaderSize_prefix) { + U32 magicNumber; + +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) + if (ZSTD_isLegacy(src, srcSize)) { + size_t decodedSize; + size_t const frameSize = ZSTD_findFrameCompressedSizeLegacy(src, srcSize); + if (ZSTD_isError(frameSize)) return frameSize; + + decodedSize = ZSTD_decompressLegacy(dst, dstCapacity, src, frameSize, dict, dictSize); + + dst = (BYTE*)dst + decodedSize; + dstCapacity -= decodedSize; + + src = (const BYTE*)src + frameSize; + srcSize -= frameSize; + + continue; + } +#endif + + magicNumber = MEM_readLE32(src); + if (magicNumber != ZSTD_MAGICNUMBER) { + if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { + size_t skippableSize; + if (srcSize < ZSTD_skippableHeaderSize) + return ERROR(srcSize_wrong); + skippableSize = MEM_readLE32((const BYTE *)src + 4) + + ZSTD_skippableHeaderSize; + if (srcSize < skippableSize) { + return ERROR(srcSize_wrong); + } + + src = (const BYTE *)src + skippableSize; + srcSize -= skippableSize; + continue; + } else { + return ERROR(prefix_unknown); + } + } + + if (ddict) { + /* we were called from ZSTD_decompress_usingDDict */ + ZSTD_refDDict(dctx, ddict); + } else { + /* this will initialize correctly with no dict if dict == NULL, so + * use this in all cases but ddict */ + CHECK_F(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize)); + } + ZSTD_checkContinuity(dctx, dst); + + { const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity, + &src, &srcSize); + if (ZSTD_isError(res)) return res; + /* don't need to bounds check this, ZSTD_decompressFrame will have + * already */ + dst = (BYTE*)dst + res; + dstCapacity -= res; + } + } + + if (srcSize) return ERROR(srcSize_wrong); /* input not entirely consumed */ + + return (BYTE*)dst - (BYTE*)dststart; +} + +size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void* dict, size_t dictSize) +{ + return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL); +} + + +size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + return ZSTD_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0); +} + + +size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ +#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE==1) + size_t regenSize; + ZSTD_DCtx* const dctx = ZSTD_createDCtx(); + if (dctx==NULL) return ERROR(memory_allocation); + regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize); + ZSTD_freeDCtx(dctx); + return regenSize; +#else /* stack mode */ + ZSTD_DCtx dctx; + return ZSTD_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize); +#endif +} + + +/*-************************************** +* Advanced Streaming Decompression API +* Bufferless and synchronous +****************************************/ +size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; } + +ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) { + switch(dctx->stage) + { + default: /* should not happen */ + case ZSTDds_getFrameHeaderSize: + case ZSTDds_decodeFrameHeader: + return ZSTDnit_frameHeader; + case ZSTDds_decodeBlockHeader: + return ZSTDnit_blockHeader; + case ZSTDds_decompressBlock: + return ZSTDnit_block; + case ZSTDds_decompressLastBlock: + return ZSTDnit_lastBlock; + case ZSTDds_checkChecksum: + return ZSTDnit_checksum; + case ZSTDds_decodeSkippableHeader: + case ZSTDds_skipFrame: + return ZSTDnit_skippableFrame; + } +} + +int ZSTD_isSkipFrame(ZSTD_DCtx* dctx) { return dctx->stage == ZSTDds_skipFrame; } /* for zbuff */ + +/** ZSTD_decompressContinue() : +* @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity) +* or an error code, which can be tested using ZSTD_isError() */ +size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + /* Sanity check */ + if (srcSize != dctx->expected) return ERROR(srcSize_wrong); + if (dstCapacity) ZSTD_checkContinuity(dctx, dst); + + switch (dctx->stage) + { + case ZSTDds_getFrameHeaderSize : + if (srcSize != ZSTD_frameHeaderSize_prefix) return ERROR(srcSize_wrong); /* impossible */ + if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */ + memcpy(dctx->headerBuffer, src, ZSTD_frameHeaderSize_prefix); + dctx->expected = ZSTD_skippableHeaderSize - ZSTD_frameHeaderSize_prefix; /* magic number + skippable frame length */ + dctx->stage = ZSTDds_decodeSkippableHeader; + return 0; + } + dctx->headerSize = ZSTD_frameHeaderSize(src, ZSTD_frameHeaderSize_prefix); + if (ZSTD_isError(dctx->headerSize)) return dctx->headerSize; + memcpy(dctx->headerBuffer, src, ZSTD_frameHeaderSize_prefix); + if (dctx->headerSize > ZSTD_frameHeaderSize_prefix) { + dctx->expected = dctx->headerSize - ZSTD_frameHeaderSize_prefix; + dctx->stage = ZSTDds_decodeFrameHeader; + return 0; + } + dctx->expected = 0; /* not necessary to copy more */ + + case ZSTDds_decodeFrameHeader: + memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected); + CHECK_F(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize)); + dctx->expected = ZSTD_blockHeaderSize; + dctx->stage = ZSTDds_decodeBlockHeader; + return 0; + + case ZSTDds_decodeBlockHeader: + { blockProperties_t bp; + size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp); + if (ZSTD_isError(cBlockSize)) return cBlockSize; + dctx->expected = cBlockSize; + dctx->bType = bp.blockType; + dctx->rleSize = bp.origSize; + if (cBlockSize) { + dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock; + return 0; + } + /* empty block */ + if (bp.lastBlock) { + if (dctx->fParams.checksumFlag) { + dctx->expected = 4; + dctx->stage = ZSTDds_checkChecksum; + } else { + dctx->expected = 0; /* end of frame */ + dctx->stage = ZSTDds_getFrameHeaderSize; + } + } else { + dctx->expected = 3; /* go directly to next header */ + dctx->stage = ZSTDds_decodeBlockHeader; + } + return 0; + } + case ZSTDds_decompressLastBlock: + case ZSTDds_decompressBlock: + { size_t rSize; + switch(dctx->bType) + { + case bt_compressed: + rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize); + break; + case bt_raw : + rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize); + break; + case bt_rle : + rSize = ZSTD_setRleBlock(dst, dstCapacity, src, srcSize, dctx->rleSize); + break; + case bt_reserved : /* should never happen */ + default: + return ERROR(corruption_detected); + } + if (ZSTD_isError(rSize)) return rSize; + if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize); + + if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */ + if (dctx->fParams.checksumFlag) { /* another round for frame checksum */ + dctx->expected = 4; + dctx->stage = ZSTDds_checkChecksum; + } else { + dctx->expected = 0; /* ends here */ + dctx->stage = ZSTDds_getFrameHeaderSize; + } + } else { + dctx->stage = ZSTDds_decodeBlockHeader; + dctx->expected = ZSTD_blockHeaderSize; + dctx->previousDstEnd = (char*)dst + rSize; + } + return rSize; + } + case ZSTDds_checkChecksum: + { U32 const h32 = (U32)XXH64_digest(&dctx->xxhState); + U32 const check32 = MEM_readLE32(src); /* srcSize == 4, guaranteed by dctx->expected */ + if (check32 != h32) return ERROR(checksum_wrong); + dctx->expected = 0; + dctx->stage = ZSTDds_getFrameHeaderSize; + return 0; + } + case ZSTDds_decodeSkippableHeader: + { memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected); + dctx->expected = MEM_readLE32(dctx->headerBuffer + 4); + dctx->stage = ZSTDds_skipFrame; + return 0; + } + case ZSTDds_skipFrame: + { dctx->expected = 0; + dctx->stage = ZSTDds_getFrameHeaderSize; + return 0; + } + default: + return ERROR(GENERIC); /* impossible */ + } +} + + +static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) +{ + dctx->dictEnd = dctx->previousDstEnd; + dctx->vBase = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base)); + dctx->base = dict; + dctx->previousDstEnd = (const char*)dict + dictSize; + return 0; +} + +/* ZSTD_loadEntropy() : + * dict : must point at beginning of a valid zstd dictionary + * @return : size of entropy tables read */ +static size_t ZSTD_loadEntropy(ZSTD_entropyTables_t* entropy, const void* const dict, size_t const dictSize) +{ + const BYTE* dictPtr = (const BYTE*)dict; + const BYTE* const dictEnd = dictPtr + dictSize; + + if (dictSize <= 8) return ERROR(dictionary_corrupted); + dictPtr += 8; /* skip header = magic + dictID */ + + + { size_t const hSize = HUF_readDTableX4(entropy->hufTable, dictPtr, dictEnd-dictPtr); + if (HUF_isError(hSize)) return ERROR(dictionary_corrupted); + dictPtr += hSize; + } + + { short offcodeNCount[MaxOff+1]; + U32 offcodeMaxValue = MaxOff, offcodeLog; + size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr); + if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted); + if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted); + CHECK_E(FSE_buildDTable(entropy->OFTable, offcodeNCount, offcodeMaxValue, offcodeLog), dictionary_corrupted); + dictPtr += offcodeHeaderSize; + } + + { short matchlengthNCount[MaxML+1]; + unsigned matchlengthMaxValue = MaxML, matchlengthLog; + size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr); + if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted); + if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted); + CHECK_E(FSE_buildDTable(entropy->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog), dictionary_corrupted); + dictPtr += matchlengthHeaderSize; + } + + { short litlengthNCount[MaxLL+1]; + unsigned litlengthMaxValue = MaxLL, litlengthLog; + size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr); + if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted); + if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted); + CHECK_E(FSE_buildDTable(entropy->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog), dictionary_corrupted); + dictPtr += litlengthHeaderSize; + } + + if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted); + { int i; + size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12)); + for (i=0; i<3; i++) { + U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4; + if (rep==0 || rep >= dictContentSize) return ERROR(dictionary_corrupted); + entropy->rep[i] = rep; + } } + + return dictPtr - (const BYTE*)dict; +} + +static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) +{ + if (dictSize < 8) return ZSTD_refDictContent(dctx, dict, dictSize); + { U32 const magic = MEM_readLE32(dict); + if (magic != ZSTD_DICT_MAGIC) { + return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */ + } } + dctx->dictID = MEM_readLE32((const char*)dict + 4); + + /* load entropy tables */ + { size_t const eSize = ZSTD_loadEntropy(&dctx->entropy, dict, dictSize); + if (ZSTD_isError(eSize)) return ERROR(dictionary_corrupted); + dict = (const char*)dict + eSize; + dictSize -= eSize; + } + dctx->litEntropy = dctx->fseEntropy = 1; + + /* reference dictionary content */ + return ZSTD_refDictContent(dctx, dict, dictSize); +} + +size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) +{ + CHECK_F(ZSTD_decompressBegin(dctx)); + if (dict && dictSize) CHECK_E(ZSTD_decompress_insertDictionary(dctx, dict, dictSize), dictionary_corrupted); + return 0; +} + + +/* ====== ZSTD_DDict ====== */ + +struct ZSTD_DDict_s { + void* dictBuffer; + const void* dictContent; + size_t dictSize; + ZSTD_entropyTables_t entropy; + U32 dictID; + U32 entropyPresent; + ZSTD_customMem cMem; +}; /* typedef'd to ZSTD_DDict within "zstd.h" */ + +static const void* ZSTD_DDictDictContent(const ZSTD_DDict* ddict) +{ + return ddict->dictContent; +} + +static size_t ZSTD_DDictDictSize(const ZSTD_DDict* ddict) +{ + return ddict->dictSize; +} + +static void ZSTD_refDDict(ZSTD_DCtx* dstDCtx, const ZSTD_DDict* ddict) +{ + ZSTD_decompressBegin(dstDCtx); /* init */ + if (ddict) { /* support refDDict on NULL */ + dstDCtx->dictID = ddict->dictID; + dstDCtx->base = ddict->dictContent; + dstDCtx->vBase = ddict->dictContent; + dstDCtx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize; + dstDCtx->previousDstEnd = dstDCtx->dictEnd; + if (ddict->entropyPresent) { + dstDCtx->litEntropy = 1; + dstDCtx->fseEntropy = 1; + dstDCtx->LLTptr = ddict->entropy.LLTable; + dstDCtx->MLTptr = ddict->entropy.MLTable; + dstDCtx->OFTptr = ddict->entropy.OFTable; + dstDCtx->HUFptr = ddict->entropy.hufTable; + dstDCtx->entropy.rep[0] = ddict->entropy.rep[0]; + dstDCtx->entropy.rep[1] = ddict->entropy.rep[1]; + dstDCtx->entropy.rep[2] = ddict->entropy.rep[2]; + } else { + dstDCtx->litEntropy = 0; + dstDCtx->fseEntropy = 0; + } + } +} + +static size_t ZSTD_loadEntropy_inDDict(ZSTD_DDict* ddict) +{ + ddict->dictID = 0; + ddict->entropyPresent = 0; + if (ddict->dictSize < 8) return 0; + { U32 const magic = MEM_readLE32(ddict->dictContent); + if (magic != ZSTD_DICT_MAGIC) return 0; /* pure content mode */ + } + ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + 4); + + /* load entropy tables */ + CHECK_E( ZSTD_loadEntropy(&ddict->entropy, ddict->dictContent, ddict->dictSize), dictionary_corrupted ); + ddict->entropyPresent = 1; + return 0; +} + + +ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize, unsigned byReference, ZSTD_customMem customMem) +{ + if (!customMem.customAlloc && !customMem.customFree) customMem = defaultCustomMem; + if (!customMem.customAlloc || !customMem.customFree) return NULL; + + { ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_malloc(sizeof(ZSTD_DDict), customMem); + if (!ddict) return NULL; + ddict->cMem = customMem; + + if ((byReference) || (!dict) || (!dictSize)) { + ddict->dictBuffer = NULL; + ddict->dictContent = dict; + } else { + void* const internalBuffer = ZSTD_malloc(dictSize, customMem); + if (!internalBuffer) { ZSTD_freeDDict(ddict); return NULL; } + memcpy(internalBuffer, dict, dictSize); + ddict->dictBuffer = internalBuffer; + ddict->dictContent = internalBuffer; + } + ddict->dictSize = dictSize; + ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */ + /* parse dictionary content */ + { size_t const errorCode = ZSTD_loadEntropy_inDDict(ddict); + if (ZSTD_isError(errorCode)) { + ZSTD_freeDDict(ddict); + return NULL; + } } + + return ddict; + } +} + +/*! ZSTD_createDDict() : +* Create a digested dictionary, to start decompression without startup delay. +* `dict` content is copied inside DDict. +* Consequently, `dict` can be released after `ZSTD_DDict` creation */ +ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize) +{ + ZSTD_customMem const allocator = { NULL, NULL, NULL }; + return ZSTD_createDDict_advanced(dict, dictSize, 0, allocator); +} + + +/*! ZSTD_createDDict_byReference() : + * Create a digested dictionary, to start decompression without startup delay. + * Dictionary content is simply referenced, it will be accessed during decompression. + * Warning : dictBuffer must outlive DDict (DDict must be freed before dictBuffer) */ +ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize) +{ + ZSTD_customMem const allocator = { NULL, NULL, NULL }; + return ZSTD_createDDict_advanced(dictBuffer, dictSize, 1, allocator); +} + + +size_t ZSTD_freeDDict(ZSTD_DDict* ddict) +{ + if (ddict==NULL) return 0; /* support free on NULL */ + { ZSTD_customMem const cMem = ddict->cMem; + ZSTD_free(ddict->dictBuffer, cMem); + ZSTD_free(ddict, cMem); + return 0; + } +} + +size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict) +{ + if (ddict==NULL) return 0; /* support sizeof on NULL */ + return sizeof(*ddict) + (ddict->dictBuffer ? ddict->dictSize : 0) ; +} + +/*! ZSTD_getDictID_fromDict() : + * Provides the dictID stored within dictionary. + * if @return == 0, the dictionary is not conformant with Zstandard specification. + * It can still be loaded, but as a content-only dictionary. */ +unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize) +{ + if (dictSize < 8) return 0; + if (MEM_readLE32(dict) != ZSTD_DICT_MAGIC) return 0; + return MEM_readLE32((const char*)dict + 4); +} + +/*! ZSTD_getDictID_fromDDict() : + * Provides the dictID of the dictionary loaded into `ddict`. + * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. + * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ +unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict) +{ + if (ddict==NULL) return 0; + return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize); +} + +/*! ZSTD_getDictID_fromFrame() : + * Provides the dictID required to decompresse frame stored within `src`. + * If @return == 0, the dictID could not be decoded. + * This could for one of the following reasons : + * - The frame does not require a dictionary (most common case). + * - The frame was built with dictID intentionally removed. + * Needed dictionary is a hidden information. + * Note : this use case also happens when using a non-conformant dictionary. + * - `srcSize` is too small, and as a result, frame header could not be decoded. + * Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`. + * - This is not a Zstandard frame. + * When identifying the exact failure cause, it's possible to use + * ZSTD_getFrameParams(), which will provide a more precise error code. */ +unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize) +{ + ZSTD_frameParams zfp = { 0 , 0 , 0 , 0 }; + size_t const hError = ZSTD_getFrameParams(&zfp, src, srcSize); + if (ZSTD_isError(hError)) return 0; + return zfp.dictID; +} + + +/*! ZSTD_decompress_usingDDict() : +* Decompression using a pre-digested Dictionary +* Use dictionary without significant overhead. */ +size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const ZSTD_DDict* ddict) +{ + /* pass content and size in case legacy frames are encountered */ + return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, + NULL, 0, + ddict); +} + + +/*===================================== +* Streaming decompression +*====================================*/ + +typedef enum { zdss_init, zdss_loadHeader, + zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage; + +/* *** Resource management *** */ +struct ZSTD_DStream_s { + ZSTD_DCtx* dctx; + ZSTD_DDict* ddictLocal; + const ZSTD_DDict* ddict; + ZSTD_frameParams fParams; + ZSTD_dStreamStage stage; + char* inBuff; + size_t inBuffSize; + size_t inPos; + size_t maxWindowSize; + char* outBuff; + size_t outBuffSize; + size_t outStart; + size_t outEnd; + size_t blockSize; + BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX]; /* tmp buffer to store frame header */ + size_t lhSize; + ZSTD_customMem customMem; + void* legacyContext; + U32 previousLegacyVersion; + U32 legacyVersion; + U32 hostageByte; +}; /* typedef'd to ZSTD_DStream within "zstd.h" */ + + +ZSTD_DStream* ZSTD_createDStream(void) +{ + return ZSTD_createDStream_advanced(defaultCustomMem); +} + +ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem) +{ + ZSTD_DStream* zds; + + if (!customMem.customAlloc && !customMem.customFree) customMem = defaultCustomMem; + if (!customMem.customAlloc || !customMem.customFree) return NULL; + + zds = (ZSTD_DStream*) ZSTD_malloc(sizeof(ZSTD_DStream), customMem); + if (zds==NULL) return NULL; + memset(zds, 0, sizeof(ZSTD_DStream)); + memcpy(&zds->customMem, &customMem, sizeof(ZSTD_customMem)); + zds->dctx = ZSTD_createDCtx_advanced(customMem); + if (zds->dctx == NULL) { ZSTD_freeDStream(zds); return NULL; } + zds->stage = zdss_init; + zds->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT; + return zds; +} + +size_t ZSTD_freeDStream(ZSTD_DStream* zds) +{ + if (zds==NULL) return 0; /* support free on null */ + { ZSTD_customMem const cMem = zds->customMem; + ZSTD_freeDCtx(zds->dctx); + zds->dctx = NULL; + ZSTD_freeDDict(zds->ddictLocal); + zds->ddictLocal = NULL; + ZSTD_free(zds->inBuff, cMem); + zds->inBuff = NULL; + ZSTD_free(zds->outBuff, cMem); + zds->outBuff = NULL; +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) + if (zds->legacyContext) + ZSTD_freeLegacyStreamContext(zds->legacyContext, zds->previousLegacyVersion); +#endif + ZSTD_free(zds, cMem); + return 0; + } +} + + +/* *** Initialization *** */ + +size_t ZSTD_DStreamInSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX + ZSTD_blockHeaderSize; } +size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; } + +size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize) +{ + zds->stage = zdss_loadHeader; + zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0; + ZSTD_freeDDict(zds->ddictLocal); + if (dict && dictSize >= 8) { + zds->ddictLocal = ZSTD_createDDict(dict, dictSize); + if (zds->ddictLocal == NULL) return ERROR(memory_allocation); + } else zds->ddictLocal = NULL; + zds->ddict = zds->ddictLocal; + zds->legacyVersion = 0; + zds->hostageByte = 0; + return ZSTD_frameHeaderSize_prefix; +} + +size_t ZSTD_initDStream(ZSTD_DStream* zds) +{ + return ZSTD_initDStream_usingDict(zds, NULL, 0); +} + +/* ZSTD_initDStream_usingDDict() : + * ddict will just be referenced, and must outlive decompression session */ +size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict) +{ + size_t const initResult = ZSTD_initDStream(zds); + zds->ddict = ddict; + return initResult; +} + +size_t ZSTD_resetDStream(ZSTD_DStream* zds) +{ + zds->stage = zdss_loadHeader; + zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0; + zds->legacyVersion = 0; + zds->hostageByte = 0; + return ZSTD_frameHeaderSize_prefix; +} + +size_t ZSTD_setDStreamParameter(ZSTD_DStream* zds, + ZSTD_DStreamParameter_e paramType, unsigned paramValue) +{ + switch(paramType) + { + default : return ERROR(parameter_unknown); + case DStream_p_maxWindowSize : zds->maxWindowSize = paramValue ? paramValue : (U32)(-1); break; + } + return 0; +} + + +size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds) +{ + if (zds==NULL) return 0; /* support sizeof NULL */ + return sizeof(*zds) + + ZSTD_sizeof_DCtx(zds->dctx) + + ZSTD_sizeof_DDict(zds->ddictLocal) + + zds->inBuffSize + zds->outBuffSize; +} + + +/* ***** Decompression ***** */ + +MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + size_t const length = MIN(dstCapacity, srcSize); + memcpy(dst, src, length); + return length; +} + + +size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input) +{ + const char* const istart = (const char*)(input->src) + input->pos; + const char* const iend = (const char*)(input->src) + input->size; + const char* ip = istart; + char* const ostart = (char*)(output->dst) + output->pos; + char* const oend = (char*)(output->dst) + output->size; + char* op = ostart; + U32 someMoreWork = 1; + +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) + if (zds->legacyVersion) + return ZSTD_decompressLegacyStream(zds->legacyContext, zds->legacyVersion, output, input); +#endif + + while (someMoreWork) { + switch(zds->stage) + { + case zdss_init : + ZSTD_resetDStream(zds); /* transparent reset on starting decoding a new frame */ + /* fall-through */ + + case zdss_loadHeader : + { size_t const hSize = ZSTD_getFrameParams(&zds->fParams, zds->headerBuffer, zds->lhSize); + if (ZSTD_isError(hSize)) +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) + { U32 const legacyVersion = ZSTD_isLegacy(istart, iend-istart); + if (legacyVersion) { + const void* const dict = zds->ddict ? zds->ddict->dictContent : NULL; + size_t const dictSize = zds->ddict ? zds->ddict->dictSize : 0; + CHECK_F(ZSTD_initLegacyStream(&zds->legacyContext, zds->previousLegacyVersion, legacyVersion, + dict, dictSize)); + zds->legacyVersion = zds->previousLegacyVersion = legacyVersion; + return ZSTD_decompressLegacyStream(zds->legacyContext, zds->legacyVersion, output, input); + } else { + return hSize; /* error */ + } } +#else + return hSize; +#endif + if (hSize != 0) { /* need more input */ + size_t const toLoad = hSize - zds->lhSize; /* if hSize!=0, hSize > zds->lhSize */ + if (toLoad > (size_t)(iend-ip)) { /* not enough input to load full header */ + memcpy(zds->headerBuffer + zds->lhSize, ip, iend-ip); + zds->lhSize += iend-ip; + input->pos = input->size; + return (MAX(ZSTD_frameHeaderSize_min, hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */ + } + memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad; + break; + } } + + /* check for single-pass mode opportunity */ + if (zds->fParams.frameContentSize && zds->fParams.windowSize /* skippable frame if == 0 */ + && (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) { + size_t const cSize = ZSTD_findFrameCompressedSize(istart, iend-istart); + if (cSize <= (size_t)(iend-istart)) { + size_t const decompressedSize = ZSTD_decompress_usingDDict(zds->dctx, op, oend-op, istart, cSize, zds->ddict); + if (ZSTD_isError(decompressedSize)) return decompressedSize; + ip = istart + cSize; + op += decompressedSize; + zds->dctx->expected = 0; + zds->stage = zdss_init; + someMoreWork = 0; + break; + } } + + /* Consume header */ + ZSTD_refDDict(zds->dctx, zds->ddict); + { size_t const h1Size = ZSTD_nextSrcSizeToDecompress(zds->dctx); /* == ZSTD_frameHeaderSize_prefix */ + CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer, h1Size)); + { size_t const h2Size = ZSTD_nextSrcSizeToDecompress(zds->dctx); + CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer+h1Size, h2Size)); + } } + + zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN); + if (zds->fParams.windowSize > zds->maxWindowSize) return ERROR(frameParameter_windowTooLarge); + + /* Adapt buffer sizes to frame header instructions */ + { size_t const blockSize = MIN(zds->fParams.windowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX); + size_t const neededOutSize = zds->fParams.windowSize + blockSize + WILDCOPY_OVERLENGTH * 2; + zds->blockSize = blockSize; + if (zds->inBuffSize < blockSize) { + ZSTD_free(zds->inBuff, zds->customMem); + zds->inBuffSize = 0; + zds->inBuff = (char*)ZSTD_malloc(blockSize, zds->customMem); + if (zds->inBuff == NULL) return ERROR(memory_allocation); + zds->inBuffSize = blockSize; + } + if (zds->outBuffSize < neededOutSize) { + ZSTD_free(zds->outBuff, zds->customMem); + zds->outBuffSize = 0; + zds->outBuff = (char*)ZSTD_malloc(neededOutSize, zds->customMem); + if (zds->outBuff == NULL) return ERROR(memory_allocation); + zds->outBuffSize = neededOutSize; + } } + zds->stage = zdss_read; + /* pass-through */ + + case zdss_read: + { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx); + if (neededInSize==0) { /* end of frame */ + zds->stage = zdss_init; + someMoreWork = 0; + break; + } + if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */ + const int isSkipFrame = ZSTD_isSkipFrame(zds->dctx); + size_t const decodedSize = ZSTD_decompressContinue(zds->dctx, + zds->outBuff + zds->outStart, (isSkipFrame ? 0 : zds->outBuffSize - zds->outStart), + ip, neededInSize); + if (ZSTD_isError(decodedSize)) return decodedSize; + ip += neededInSize; + if (!decodedSize && !isSkipFrame) break; /* this was just a header */ + zds->outEnd = zds->outStart + decodedSize; + zds->stage = zdss_flush; + break; + } + if (ip==iend) { someMoreWork = 0; break; } /* no more input */ + zds->stage = zdss_load; + /* pass-through */ + } + + case zdss_load: + { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx); + size_t const toLoad = neededInSize - zds->inPos; /* should always be <= remaining space within inBuff */ + size_t loadedSize; + if (toLoad > zds->inBuffSize - zds->inPos) return ERROR(corruption_detected); /* should never happen */ + loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, iend-ip); + ip += loadedSize; + zds->inPos += loadedSize; + if (loadedSize < toLoad) { someMoreWork = 0; break; } /* not enough input, wait for more */ + + /* decode loaded input */ + { const int isSkipFrame = ZSTD_isSkipFrame(zds->dctx); + size_t const decodedSize = ZSTD_decompressContinue(zds->dctx, + zds->outBuff + zds->outStart, zds->outBuffSize - zds->outStart, + zds->inBuff, neededInSize); + if (ZSTD_isError(decodedSize)) return decodedSize; + zds->inPos = 0; /* input is consumed */ + if (!decodedSize && !isSkipFrame) { zds->stage = zdss_read; break; } /* this was just a header */ + zds->outEnd = zds->outStart + decodedSize; + zds->stage = zdss_flush; + /* pass-through */ + } } + + case zdss_flush: + { size_t const toFlushSize = zds->outEnd - zds->outStart; + size_t const flushedSize = ZSTD_limitCopy(op, oend-op, zds->outBuff + zds->outStart, toFlushSize); + op += flushedSize; + zds->outStart += flushedSize; + if (flushedSize == toFlushSize) { /* flush completed */ + zds->stage = zdss_read; + if (zds->outStart + zds->blockSize > zds->outBuffSize) + zds->outStart = zds->outEnd = 0; + break; + } + /* cannot complete flush */ + someMoreWork = 0; + break; + } + default: return ERROR(GENERIC); /* impossible */ + } } + + /* result */ + input->pos += (size_t)(ip-istart); + output->pos += (size_t)(op-ostart); + { size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds->dctx); + if (!nextSrcSizeHint) { /* frame fully decoded */ + if (zds->outEnd == zds->outStart) { /* output fully flushed */ + if (zds->hostageByte) { + if (input->pos >= input->size) { zds->stage = zdss_read; return 1; } /* can't release hostage (not present) */ + input->pos++; /* release hostage */ + } + return 0; + } + if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */ + input->pos--; /* note : pos > 0, otherwise, impossible to finish reading last block */ + zds->hostageByte=1; + } + return 1; + } + nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds->dctx) == ZSTDnit_block); /* preload header of next block */ + if (zds->inPos > nextSrcSizeHint) return ERROR(GENERIC); /* should never happen */ + nextSrcSizeHint -= zds->inPos; /* already loaded*/ + return nextSrcSizeHint; + } +} diff --git a/thirdparty/zstd/zstd.h b/thirdparty/zstd/zstd.h new file mode 100644 index 0000000000..f8050c1361 --- /dev/null +++ b/thirdparty/zstd/zstd.h @@ -0,0 +1,795 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +#if defined (__cplusplus) +extern "C" { +#endif + +#ifndef ZSTD_H_235446 +#define ZSTD_H_235446 + +/* ====== Dependency ======*/ +#include <stddef.h> /* size_t */ + + +/* ===== ZSTDLIB_API : control library symbols visibility ===== */ +#if defined(__GNUC__) && (__GNUC__ >= 4) +# define ZSTDLIB_VISIBILITY __attribute__ ((visibility ("default"))) +#else +# define ZSTDLIB_VISIBILITY +#endif +#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) +# define ZSTDLIB_API __declspec(dllexport) ZSTDLIB_VISIBILITY +#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) +# define ZSTDLIB_API __declspec(dllimport) ZSTDLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ +#else +# define ZSTDLIB_API ZSTDLIB_VISIBILITY +#endif + + +/******************************************************************************************************* + Introduction + + zstd, short for Zstandard, is a fast lossless compression algorithm, targeting real-time compression scenarios + at zlib-level and better compression ratios. The zstd compression library provides in-memory compression and + decompression functions. The library supports compression levels from 1 up to ZSTD_maxCLevel() which is 22. + Levels >= 20, labeled `--ultra`, should be used with caution, as they require more memory. + Compression can be done in: + - a single step (described as Simple API) + - a single step, reusing a context (described as Explicit memory management) + - unbounded multiple steps (described as Streaming compression) + The compression ratio achievable on small data can be highly improved using compression with a dictionary in: + - a single step (described as Simple dictionary API) + - a single step, reusing a dictionary (described as Fast dictionary API) + + Advanced experimental functions can be accessed using #define ZSTD_STATIC_LINKING_ONLY before including zstd.h. + These APIs shall never be used with a dynamic library. + They are not "stable", their definition may change in the future. Only static linking is allowed. +*********************************************************************************************************/ + +/*------ Version ------*/ +#define ZSTD_VERSION_MAJOR 1 +#define ZSTD_VERSION_MINOR 2 +#define ZSTD_VERSION_RELEASE 0 + +#define ZSTD_LIB_VERSION ZSTD_VERSION_MAJOR.ZSTD_VERSION_MINOR.ZSTD_VERSION_RELEASE +#define ZSTD_QUOTE(str) #str +#define ZSTD_EXPAND_AND_QUOTE(str) ZSTD_QUOTE(str) +#define ZSTD_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LIB_VERSION) + +#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE) +ZSTDLIB_API unsigned ZSTD_versionNumber(void); /**< library version number; to be used when checking dll version */ + + +/*************************************** +* Simple API +***************************************/ +/*! ZSTD_compress() : + * Compresses `src` content as a single zstd compressed frame into already allocated `dst`. + * Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`. + * @return : compressed size written into `dst` (<= `dstCapacity), + * or an error code if it fails (which can be tested using ZSTD_isError()). */ +ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + int compressionLevel); + +/*! ZSTD_decompress() : + * `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames. + * `dstCapacity` is an upper bound of originalSize. + * If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data. + * @return : the number of bytes decompressed into `dst` (<= `dstCapacity`), + * or an errorCode if it fails (which can be tested using ZSTD_isError()). */ +ZSTDLIB_API size_t ZSTD_decompress( void* dst, size_t dstCapacity, + const void* src, size_t compressedSize); + +/*! ZSTD_getDecompressedSize() : + * NOTE: This function is planned to be obsolete, in favour of ZSTD_getFrameContentSize. + * ZSTD_getFrameContentSize functions the same way, returning the decompressed size of a single + * frame, but distinguishes empty frames from frames with an unknown size, or errors. + * + * Additionally, ZSTD_findDecompressedSize can be used instead. It can handle multiple + * concatenated frames in one buffer, and so is more general. + * As a result however, it requires more computation and entire frames to be passed to it, + * as opposed to ZSTD_getFrameContentSize which requires only a single frame's header. + * + * 'src' is the start of a zstd compressed frame. + * @return : content size to be decompressed, as a 64-bits value _if known_, 0 otherwise. + * note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode. + * When `return==0`, data to decompress could be any size. + * In which case, it's necessary to use streaming mode to decompress data. + * Optionally, application can still use ZSTD_decompress() while relying on implied limits. + * (For example, data may be necessarily cut into blocks <= 16 KB). + * note 2 : decompressed size is always present when compression is done with ZSTD_compress() + * note 3 : decompressed size can be very large (64-bits value), + * potentially larger than what local system can handle as a single memory segment. + * In which case, it's necessary to use streaming mode to decompress data. + * note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified. + * Always ensure result fits within application's authorized limits. + * Each application can set its own limits. + * note 5 : when `return==0`, if precise failure cause is needed, use ZSTD_getFrameParams() to know more. */ +ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize); + + +/*====== Helper functions ======*/ +ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compression level available */ +ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case scenario */ +ZSTDLIB_API unsigned ZSTD_isError(size_t code); /*!< tells if a `size_t` function result is an error code */ +ZSTDLIB_API const char* ZSTD_getErrorName(size_t code); /*!< provides readable string from an error code */ + + +/*************************************** +* Explicit memory management +***************************************/ +/*= Compression context + * When compressing many times, + * it is recommended to allocate a context just once, and re-use it for each successive compression operation. + * This will make workload friendlier for system's memory. + * Use one context per thread for parallel execution in multi-threaded environments. */ +typedef struct ZSTD_CCtx_s ZSTD_CCtx; +ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void); +ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx); + +/*! ZSTD_compressCCtx() : + * Same as ZSTD_compress(), requires an allocated ZSTD_CCtx (see ZSTD_createCCtx()). */ +ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel); + +/*= Decompression context + * When decompressing many times, + * it is recommended to allocate a context just once, and re-use it for each successive compression operation. + * This will make workload friendlier for system's memory. + * Use one context per thread for parallel execution in multi-threaded environments. */ +typedef struct ZSTD_DCtx_s ZSTD_DCtx; +ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx(void); +ZSTDLIB_API size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx); + +/*! ZSTD_decompressDCtx() : + * Same as ZSTD_decompress(), requires an allocated ZSTD_DCtx (see ZSTD_createDCtx()). */ +ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); + + +/************************** +* Simple dictionary API +***************************/ +/*! ZSTD_compress_usingDict() : +* Compression using a predefined Dictionary (see dictBuilder/zdict.h). +* Note : This function loads the dictionary, resulting in significant startup delay. +* Note : When `dict == NULL || dictSize < 8` no dictionary is used. */ +ZSTDLIB_API size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void* dict,size_t dictSize, + int compressionLevel); + +/*! ZSTD_decompress_usingDict() : +* Decompression using a predefined Dictionary (see dictBuilder/zdict.h). +* Dictionary must be identical to the one used during compression. +* Note : This function loads the dictionary, resulting in significant startup delay. +* Note : When `dict == NULL || dictSize < 8` no dictionary is used. */ +ZSTDLIB_API size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void* dict,size_t dictSize); + + +/**************************** +* Fast dictionary API +****************************/ +typedef struct ZSTD_CDict_s ZSTD_CDict; + +/*! ZSTD_createCDict() : +* When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once. +* ZSTD_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay. +* ZSTD_CDict can be created once and used by multiple threads concurrently, as its usage is read-only. +* `dictBuffer` can be released after ZSTD_CDict creation, as its content is copied within CDict */ +ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize, int compressionLevel); + +/*! ZSTD_freeCDict() : +* Function frees memory allocated by ZSTD_createCDict(). */ +ZSTDLIB_API size_t ZSTD_freeCDict(ZSTD_CDict* CDict); + +/*! ZSTD_compress_usingCDict() : + * Compression using a digested Dictionary. + * Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times. + * Note that compression level is decided during dictionary creation. + * Frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no) */ +ZSTDLIB_API size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const ZSTD_CDict* cdict); + + +typedef struct ZSTD_DDict_s ZSTD_DDict; + +/*! ZSTD_createDDict() : +* Create a digested dictionary, ready to start decompression operation without startup delay. +* dictBuffer can be released after DDict creation, as its content is copied inside DDict */ +ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize); + +/*! ZSTD_freeDDict() : +* Function frees memory allocated with ZSTD_createDDict() */ +ZSTDLIB_API size_t ZSTD_freeDDict(ZSTD_DDict* ddict); + +/*! ZSTD_decompress_usingDDict() : +* Decompression using a digested Dictionary. +* Faster startup than ZSTD_decompress_usingDict(), recommended when same dictionary is used multiple times. */ +ZSTDLIB_API size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const ZSTD_DDict* ddict); + + +/**************************** +* Streaming +****************************/ + +typedef struct ZSTD_inBuffer_s { + const void* src; /**< start of input buffer */ + size_t size; /**< size of input buffer */ + size_t pos; /**< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size */ +} ZSTD_inBuffer; + +typedef struct ZSTD_outBuffer_s { + void* dst; /**< start of output buffer */ + size_t size; /**< size of output buffer */ + size_t pos; /**< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size */ +} ZSTD_outBuffer; + + + +/*-*********************************************************************** +* Streaming compression - HowTo +* +* A ZSTD_CStream object is required to track streaming operation. +* Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources. +* ZSTD_CStream objects can be reused multiple times on consecutive compression operations. +* It is recommended to re-use ZSTD_CStream in situations where many streaming operations will be achieved consecutively, +* since it will play nicer with system's memory, by re-using already allocated memory. +* Use one separate ZSTD_CStream per thread for parallel execution. +* +* Start a new compression by initializing ZSTD_CStream. +* Use ZSTD_initCStream() to start a new compression operation. +* Use ZSTD_initCStream_usingDict() or ZSTD_initCStream_usingCDict() for a compression which requires a dictionary (experimental section) +* +* Use ZSTD_compressStream() repetitively to consume input stream. +* The function will automatically update both `pos` fields. +* Note that it may not consume the entire input, in which case `pos < size`, +* and it's up to the caller to present again remaining data. +* @return : a size hint, preferred nb of bytes to use as input for next function call +* or an error code, which can be tested using ZSTD_isError(). +* Note 1 : it's just a hint, to help latency a little, any other value will work fine. +* Note 2 : size hint is guaranteed to be <= ZSTD_CStreamInSize() +* +* At any moment, it's possible to flush whatever data remains within internal buffer, using ZSTD_flushStream(). +* `output->pos` will be updated. +* Note that some content might still be left within internal buffer if `output->size` is too small. +* @return : nb of bytes still present within internal buffer (0 if it's empty) +* or an error code, which can be tested using ZSTD_isError(). +* +* ZSTD_endStream() instructs to finish a frame. +* It will perform a flush and write frame epilogue. +* The epilogue is required for decoders to consider a frame completed. +* Similar to ZSTD_flushStream(), it may not be able to flush the full content if `output->size` is too small. +* In which case, call again ZSTD_endStream() to complete the flush. +* @return : nb of bytes still present within internal buffer (0 if it's empty, hence compression completed) +* or an error code, which can be tested using ZSTD_isError(). +* +* *******************************************************************/ + +typedef struct ZSTD_CStream_s ZSTD_CStream; +/*===== ZSTD_CStream management functions =====*/ +ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream(void); +ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs); + +/*===== Streaming compression functions =====*/ +ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel); +ZSTDLIB_API size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input); +ZSTDLIB_API size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output); +ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output); + +ZSTDLIB_API size_t ZSTD_CStreamInSize(void); /**< recommended size for input buffer */ +ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /**< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block in all circumstances. */ + + + +/*-*************************************************************************** +* Streaming decompression - HowTo +* +* A ZSTD_DStream object is required to track streaming operations. +* Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources. +* ZSTD_DStream objects can be re-used multiple times. +* +* Use ZSTD_initDStream() to start a new decompression operation, +* or ZSTD_initDStream_usingDict() if decompression requires a dictionary. +* @return : recommended first input size +* +* Use ZSTD_decompressStream() repetitively to consume your input. +* The function will update both `pos` fields. +* If `input.pos < input.size`, some input has not been consumed. +* It's up to the caller to present again remaining data. +* If `output.pos < output.size`, decoder has flushed everything it could. +* @return : 0 when a frame is completely decoded and fully flushed, +* an error code, which can be tested using ZSTD_isError(), +* any other value > 0, which means there is still some decoding to do to complete current frame. +* The return value is a suggested next input size (a hint to improve latency) that will never load more than the current frame. +* *******************************************************************************/ + +typedef struct ZSTD_DStream_s ZSTD_DStream; +/*===== ZSTD_DStream management functions =====*/ +ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream(void); +ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds); + +/*===== Streaming decompression functions =====*/ +ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds); +ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input); + +ZSTDLIB_API size_t ZSTD_DStreamInSize(void); /*!< recommended size for input buffer */ +ZSTDLIB_API size_t ZSTD_DStreamOutSize(void); /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */ + +#endif /* ZSTD_H_235446 */ + + +#if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY) +#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY + +/**************************************************************************************** + * START OF ADVANCED AND EXPERIMENTAL FUNCTIONS + * The definitions in this section are considered experimental. + * They should never be used with a dynamic library, as they may change in the future. + * They are provided for advanced usages. + * Use them only in association with static linking. + * ***************************************************************************************/ + +/* --- Constants ---*/ +#define ZSTD_MAGICNUMBER 0xFD2FB528 /* >= v0.8.0 */ +#define ZSTD_MAGIC_SKIPPABLE_START 0x184D2A50U + +#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1) +#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2) + +#define ZSTD_WINDOWLOG_MAX_32 27 +#define ZSTD_WINDOWLOG_MAX_64 27 +#define ZSTD_WINDOWLOG_MAX ((unsigned)(sizeof(size_t) == 4 ? ZSTD_WINDOWLOG_MAX_32 : ZSTD_WINDOWLOG_MAX_64)) +#define ZSTD_WINDOWLOG_MIN 10 +#define ZSTD_HASHLOG_MAX ZSTD_WINDOWLOG_MAX +#define ZSTD_HASHLOG_MIN 6 +#define ZSTD_CHAINLOG_MAX (ZSTD_WINDOWLOG_MAX+1) +#define ZSTD_CHAINLOG_MIN ZSTD_HASHLOG_MIN +#define ZSTD_HASHLOG3_MAX 17 +#define ZSTD_SEARCHLOG_MAX (ZSTD_WINDOWLOG_MAX-1) +#define ZSTD_SEARCHLOG_MIN 1 +#define ZSTD_SEARCHLENGTH_MAX 7 /* only for ZSTD_fast, other strategies are limited to 6 */ +#define ZSTD_SEARCHLENGTH_MIN 3 /* only for ZSTD_btopt, other strategies are limited to 4 */ +#define ZSTD_TARGETLENGTH_MIN 4 +#define ZSTD_TARGETLENGTH_MAX 999 + +#define ZSTD_FRAMEHEADERSIZE_MAX 18 /* for static allocation */ +#define ZSTD_FRAMEHEADERSIZE_MIN 6 +static const size_t ZSTD_frameHeaderSize_prefix = 5; +static const size_t ZSTD_frameHeaderSize_min = ZSTD_FRAMEHEADERSIZE_MIN; +static const size_t ZSTD_frameHeaderSize_max = ZSTD_FRAMEHEADERSIZE_MAX; +static const size_t ZSTD_skippableHeaderSize = 8; /* magic number + skippable frame length */ + + +/*--- Advanced types ---*/ +typedef enum { ZSTD_fast, ZSTD_dfast, ZSTD_greedy, ZSTD_lazy, ZSTD_lazy2, ZSTD_btlazy2, ZSTD_btopt, ZSTD_btopt2 } ZSTD_strategy; /* from faster to stronger */ + +typedef struct { + unsigned windowLog; /**< largest match distance : larger == more compression, more memory needed during decompression */ + unsigned chainLog; /**< fully searched segment : larger == more compression, slower, more memory (useless for fast) */ + unsigned hashLog; /**< dispatch table : larger == faster, more memory */ + unsigned searchLog; /**< nb of searches : larger == more compression, slower */ + unsigned searchLength; /**< match length searched : larger == faster decompression, sometimes less compression */ + unsigned targetLength; /**< acceptable match size for optimal parser (only) : larger == more compression, slower */ + ZSTD_strategy strategy; +} ZSTD_compressionParameters; + +typedef struct { + unsigned contentSizeFlag; /**< 1: content size will be in frame header (when known) */ + unsigned checksumFlag; /**< 1: generate a 32-bits checksum at end of frame, for error detection */ + unsigned noDictIDFlag; /**< 1: no dictID will be saved into frame header (if dictionary compression) */ +} ZSTD_frameParameters; + +typedef struct { + ZSTD_compressionParameters cParams; + ZSTD_frameParameters fParams; +} ZSTD_parameters; + +/*= Custom memory allocation functions */ +typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size); +typedef void (*ZSTD_freeFunction) (void* opaque, void* address); +typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem; + +/*************************************** +* Compressed size functions +***************************************/ + +/*! ZSTD_findFrameCompressedSize() : + * `src` should point to the start of a ZSTD encoded frame or skippable frame + * `srcSize` must be at least as large as the frame + * @return : the compressed size of the frame pointed to by `src`, suitable to pass to + * `ZSTD_decompress` or similar, or an error code if given invalid input. */ +ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize); + +/*************************************** +* Decompressed size functions +***************************************/ +/*! ZSTD_getFrameContentSize() : +* `src` should point to the start of a ZSTD encoded frame +* `srcSize` must be at least as large as the frame header. A value greater than or equal +* to `ZSTD_frameHeaderSize_max` is guaranteed to be large enough in all cases. +* @return : decompressed size of the frame pointed to be `src` if known, otherwise +* - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined +* - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */ +ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize); + +/*! ZSTD_findDecompressedSize() : +* `src` should point the start of a series of ZSTD encoded and/or skippable frames +* `srcSize` must be the _exact_ size of this series +* (i.e. there should be a frame boundary exactly `srcSize` bytes after `src`) +* @return : the decompressed size of all data in the contained frames, as a 64-bit value _if known_ +* - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN +* - if an error occurred: ZSTD_CONTENTSIZE_ERROR +* +* note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode. +* When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size. +* In which case, it's necessary to use streaming mode to decompress data. +* Optionally, application can still use ZSTD_decompress() while relying on implied limits. +* (For example, data may be necessarily cut into blocks <= 16 KB). +* note 2 : decompressed size is always present when compression is done with ZSTD_compress() +* note 3 : decompressed size can be very large (64-bits value), +* potentially larger than what local system can handle as a single memory segment. +* In which case, it's necessary to use streaming mode to decompress data. +* note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified. +* Always ensure result fits within application's authorized limits. +* Each application can set its own limits. +* note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to +* read each contained frame header. This is efficient as most of the data is skipped, +* however it does mean that all frame data must be present and valid. */ +ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize); + + +/*************************************** +* Advanced compression functions +***************************************/ +/*! ZSTD_estimateCCtxSize() : + * Gives the amount of memory allocated for a ZSTD_CCtx given a set of compression parameters. + * `frameContentSize` is an optional parameter, provide `0` if unknown */ +ZSTDLIB_API size_t ZSTD_estimateCCtxSize(ZSTD_compressionParameters cParams); + +/*! ZSTD_createCCtx_advanced() : + * Create a ZSTD compression context using external alloc and free functions */ +ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem); + +/*! ZSTD_sizeofCCtx() : + * Gives the amount of memory used by a given ZSTD_CCtx */ +ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx); + +typedef enum { + ZSTD_p_forceWindow, /* Force back-references to remain < windowSize, even when referencing Dictionary content (default:0) */ + ZSTD_p_forceRawDict /* Force loading dictionary in "content-only" mode (no header analysis) */ +} ZSTD_CCtxParameter; +/*! ZSTD_setCCtxParameter() : + * Set advanced parameters, selected through enum ZSTD_CCtxParameter + * @result : 0, or an error code (which can be tested with ZSTD_isError()) */ +ZSTDLIB_API size_t ZSTD_setCCtxParameter(ZSTD_CCtx* cctx, ZSTD_CCtxParameter param, unsigned value); + +/*! ZSTD_createCDict_byReference() : + * Create a digested dictionary for compression + * Dictionary content is simply referenced, and therefore stays in dictBuffer. + * It is important that dictBuffer outlives CDict, it must remain read accessible throughout the lifetime of CDict */ +ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel); + +/*! ZSTD_createCDict_advanced() : + * Create a ZSTD_CDict using external alloc and free, and customized compression parameters */ +ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize, unsigned byReference, + ZSTD_compressionParameters cParams, ZSTD_customMem customMem); + +/*! ZSTD_sizeof_CDict() : + * Gives the amount of memory used by a given ZSTD_sizeof_CDict */ +ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict); + +/*! ZSTD_getCParams() : +* @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize. +* `estimatedSrcSize` value is optional, select 0 if not known */ +ZSTDLIB_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize); + +/*! ZSTD_getParams() : +* same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`. +* All fields of `ZSTD_frameParameters` are set to default (0) */ +ZSTDLIB_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize); + +/*! ZSTD_checkCParams() : +* Ensure param values remain within authorized range */ +ZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params); + +/*! ZSTD_adjustCParams() : +* optimize params for a given `srcSize` and `dictSize`. +* both values are optional, select `0` if unknown. */ +ZSTDLIB_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize); + +/*! ZSTD_compress_advanced() : +* Same as ZSTD_compress_usingDict(), with fine-tune control over each compression parameter */ +ZSTDLIB_API size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void* dict,size_t dictSize, + ZSTD_parameters params); + +/*! ZSTD_compress_usingCDict_advanced() : +* Same as ZSTD_compress_usingCDict(), with fine-tune control over frame parameters */ +ZSTDLIB_API size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const ZSTD_CDict* cdict, ZSTD_frameParameters fParams); + + +/*--- Advanced decompression functions ---*/ + +/*! ZSTD_isFrame() : + * Tells if the content of `buffer` starts with a valid Frame Identifier. + * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. + * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled. + * Note 3 : Skippable Frame Identifiers are considered valid. */ +ZSTDLIB_API unsigned ZSTD_isFrame(const void* buffer, size_t size); + +/*! ZSTD_estimateDCtxSize() : + * Gives the potential amount of memory allocated to create a ZSTD_DCtx */ +ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void); + +/*! ZSTD_createDCtx_advanced() : + * Create a ZSTD decompression context using external alloc and free functions */ +ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem); + +/*! ZSTD_sizeof_DCtx() : + * Gives the amount of memory used by a given ZSTD_DCtx */ +ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx); + +/*! ZSTD_createDDict_byReference() : + * Create a digested dictionary, ready to start decompression operation without startup delay. + * Dictionary content is simply referenced, and therefore stays in dictBuffer. + * It is important that dictBuffer outlives DDict, it must remain read accessible throughout the lifetime of DDict */ +ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize); + +/*! ZSTD_createDDict_advanced() : + * Create a ZSTD_DDict using external alloc and free, optionally by reference */ +ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize, + unsigned byReference, ZSTD_customMem customMem); + +/*! ZSTD_sizeof_DDict() : + * Gives the amount of memory used by a given ZSTD_DDict */ +ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); + +/*! ZSTD_getDictID_fromDict() : + * Provides the dictID stored within dictionary. + * if @return == 0, the dictionary is not conformant with Zstandard specification. + * It can still be loaded, but as a content-only dictionary. */ +ZSTDLIB_API unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize); + +/*! ZSTD_getDictID_fromDDict() : + * Provides the dictID of the dictionary loaded into `ddict`. + * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. + * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ +ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict); + +/*! ZSTD_getDictID_fromFrame() : + * Provides the dictID required to decompressed the frame stored within `src`. + * If @return == 0, the dictID could not be decoded. + * This could for one of the following reasons : + * - The frame does not require a dictionary to be decoded (most common case). + * - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information. + * Note : this use case also happens when using a non-conformant dictionary. + * - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`). + * - This is not a Zstandard frame. + * When identifying the exact failure cause, it's possible to use ZSTD_getFrameParams(), which will provide a more precise error code. */ +ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize); + + +/******************************************************************** +* Advanced streaming functions +********************************************************************/ + +/*===== Advanced Streaming compression functions =====*/ +ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem); +ZSTDLIB_API size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs); /**< size of CStream is variable, depending primarily on compression level */ +ZSTDLIB_API size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize); /**< pledgedSrcSize must be correct, a size of 0 means unknown. for a frame size of 0 use initCStream_advanced */ +ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); /**< note: a dict will not be used if dict == NULL or dictSize < 8 */ +ZSTDLIB_API size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize, + ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize is optional and can be 0 (meaning unknown). note: if the contentSizeFlag is set, pledgedSrcSize == 0 means the source size is actually 0 */ +ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict); /**< note : cdict will just be referenced, and must outlive compression session */ +ZSTDLIB_API size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, unsigned long long pledgedSrcSize, ZSTD_frameParameters fParams); /**< same as ZSTD_initCStream_usingCDict(), with control over frame parameters */ + +/*! ZSTD_resetCStream() : + * start a new compression job, using same parameters from previous job. + * This is typically useful to skip dictionary loading stage, since it will re-use it in-place.. + * Note that zcs must be init at least once before using ZSTD_resetCStream(). + * pledgedSrcSize==0 means "srcSize unknown". + * If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end. + * @return : 0, or an error code (which can be tested using ZSTD_isError()) */ +ZSTDLIB_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize); + + +/*===== Advanced Streaming decompression functions =====*/ +typedef enum { DStream_p_maxWindowSize } ZSTD_DStreamParameter_e; +ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem); +ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); /**< note: a dict will not be used if dict == NULL or dictSize < 8 */ +ZSTDLIB_API size_t ZSTD_setDStreamParameter(ZSTD_DStream* zds, ZSTD_DStreamParameter_e paramType, unsigned paramValue); +ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict); /**< note : ddict will just be referenced, and must outlive decompression session */ +ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); /**< re-use decompression parameters from previous init; saves dictionary loading */ +ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds); + + +/********************************************************************* +* Buffer-less and synchronous inner streaming functions +* +* This is an advanced API, giving full control over buffer management, for users which need direct control over memory. +* But it's also a complex one, with many restrictions (documented below). +* Prefer using normal streaming API for an easier experience +********************************************************************* */ + +/** + Buffer-less streaming compression (synchronous mode) + + A ZSTD_CCtx object is required to track streaming operations. + Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource. + ZSTD_CCtx object can be re-used multiple times within successive compression operations. + + Start by initializing a context. + Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression, + or ZSTD_compressBegin_advanced(), for finer parameter control. + It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx() + + Then, consume your input using ZSTD_compressContinue(). + There are some important considerations to keep in mind when using this advanced function : + - ZSTD_compressContinue() has no internal buffer. It uses externally provided buffer only. + - Interface is synchronous : input is consumed entirely and produce 1+ (or more) compressed blocks. + - Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario. + Worst case evaluation is provided by ZSTD_compressBound(). + ZSTD_compressContinue() doesn't guarantee recover after a failed compression. + - ZSTD_compressContinue() presumes prior input ***is still accessible and unmodified*** (up to maximum distance size, see WindowLog). + It remembers all previous contiguous blocks, plus one separated memory segment (which can itself consists of multiple contiguous blocks) + - ZSTD_compressContinue() detects that prior input has been overwritten when `src` buffer overlaps. + In which case, it will "discard" the relevant memory section from its history. + + Finish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum. + It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame. + Without last block mark, frames will be considered unfinished (corrupted) by decoders. + + `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress some new frame. +*/ + +/*===== Buffer-less streaming compression functions =====*/ +ZSTDLIB_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel); +ZSTDLIB_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel); +ZSTDLIB_API size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize is optional and can be 0 (meaning unknown). note: if the contentSizeFlag is set, pledgedSrcSize == 0 means the source size is actually 0 */ +ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */ +ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize); /* compression parameters are already set within cdict. pledgedSrcSize=0 means null-size */ +ZSTDLIB_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**< note: if pledgedSrcSize can be 0, indicating unknown size. if it is non-zero, it must be accurate. for 0 size frames, use compressBegin_advanced */ + +ZSTDLIB_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); +ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); + + + +/*- + Buffer-less streaming decompression (synchronous mode) + + A ZSTD_DCtx object is required to track streaming operations. + Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it. + A ZSTD_DCtx object can be re-used multiple times. + + First typical operation is to retrieve frame parameters, using ZSTD_getFrameParams(). + It fills a ZSTD_frameParams structure which provide important information to correctly decode the frame, + such as the minimum rolling buffer size to allocate to decompress data (`windowSize`), + and the dictionary ID used. + (Note : content size is optional, it may not be present. 0 means : content size unknown). + Note that these values could be wrong, either because of data malformation, or because an attacker is spoofing deliberate false information. + As a consequence, check that values remain within valid application range, especially `windowSize`, before allocation. + Each application can set its own limit, depending on local restrictions. For extended interoperability, it is recommended to support at least 8 MB. + Frame parameters are extracted from the beginning of the compressed frame. + Data fragment must be large enough to ensure successful decoding, typically `ZSTD_frameHeaderSize_max` bytes. + @result : 0 : successful decoding, the `ZSTD_frameParams` structure is correctly filled. + >0 : `srcSize` is too small, please provide at least @result bytes on next attempt. + errorCode, which can be tested using ZSTD_isError(). + + Start decompression, with ZSTD_decompressBegin() or ZSTD_decompressBegin_usingDict(). + Alternatively, you can copy a prepared context, using ZSTD_copyDCtx(). + + Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively. + ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue(). + ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail. + + @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity). + It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some metadata item. + It can also be an error code, which can be tested with ZSTD_isError(). + + ZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize`. + They should preferably be located contiguously, prior to current block. + Alternatively, a round buffer of sufficient size is also possible. Sufficient size is determined by frame parameters. + ZSTD_decompressContinue() is very sensitive to contiguity, + if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place, + or that previous contiguous segment is large enough to properly handle maximum back-reference. + + A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero. + Context can then be reset to start a new decompression. + + Note : it's possible to know if next input to present is a header or a block, using ZSTD_nextInputType(). + This information is not required to properly decode a frame. + + == Special case : skippable frames == + + Skippable frames allow integration of user-defined data into a flow of concatenated frames. + Skippable frames will be ignored (skipped) by a decompressor. The format of skippable frames is as follows : + a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F + b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits + c) Frame Content - any content (User Data) of length equal to Frame Size + For skippable frames ZSTD_decompressContinue() always returns 0. + For skippable frames ZSTD_getFrameParams() returns fparamsPtr->windowLog==0 what means that a frame is skippable. + Note : If fparamsPtr->frameContentSize==0, it is ambiguous: the frame might actually be a Zstd encoded frame with no content. + For purposes of decompression, it is valid in both cases to skip the frame using + ZSTD_findFrameCompressedSize to find its size in bytes. + It also returns Frame Size as fparamsPtr->frameContentSize. +*/ + +typedef struct { + unsigned long long frameContentSize; + unsigned windowSize; + unsigned dictID; + unsigned checksumFlag; +} ZSTD_frameParams; + +/*===== Buffer-less streaming decompression functions =====*/ +ZSTDLIB_API size_t ZSTD_getFrameParams(ZSTD_frameParams* fparamsPtr, const void* src, size_t srcSize); /**< doesn't consume input, see details below */ +ZSTDLIB_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx); +ZSTDLIB_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); +ZSTDLIB_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx); +ZSTDLIB_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx); +ZSTDLIB_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); +typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e; +ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx); + +/** + Block functions + + Block functions produce and decode raw zstd blocks, without frame metadata. + Frame metadata cost is typically ~18 bytes, which can be non-negligible for very small blocks (< 100 bytes). + User will have to take in charge required information to regenerate data, such as compressed and content sizes. + + A few rules to respect : + - Compressing and decompressing require a context structure + + Use ZSTD_createCCtx() and ZSTD_createDCtx() + - It is necessary to init context before starting + + compression : any ZSTD_compressBegin*() variant, including with dictionary + + decompression : any ZSTD_decompressBegin*() variant, including with dictionary + + copyCCtx() and copyDCtx() can be used too + - Block size is limited, it must be <= ZSTD_getBlockSizeMax() <= ZSTD_BLOCKSIZE_ABSOLUTEMAX + + If input is larger than a block size, it's necessary to split input data into multiple blocks + + For inputs larger than a single block size, consider using the regular ZSTD_compress() instead. + Frame metadata is not that costly, and quickly becomes negligible as source size grows larger. + - When a block is considered not compressible enough, ZSTD_compressBlock() result will be zero. + In which case, nothing is produced into `dst`. + + User must test for such outcome and deal directly with uncompressed data + + ZSTD_decompressBlock() doesn't accept uncompressed data as input !!! + + In case of multiple successive blocks, should some of them be uncompressed, + decoder must be informed of their existence in order to follow proper history. + Use ZSTD_insertBlock() for such a case. +*/ + +#define ZSTD_BLOCKSIZE_ABSOLUTEMAX (128 * 1024) /* define, for static allocation */ +/*===== Raw zstd block functions =====*/ +ZSTDLIB_API size_t ZSTD_getBlockSizeMax(ZSTD_CCtx* cctx); +ZSTDLIB_API size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); +ZSTDLIB_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); +ZSTDLIB_API size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /**< insert block into `dctx` history. Useful for uncompressed blocks */ + + +#endif /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */ + +#if defined (__cplusplus) +} +#endif |